cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

atl1.c (100097B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
      4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
      5 * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
      6 *
      7 * Derived from Intel e1000 driver
      8 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
      9 *
     10 * Contact Information:
     11 * Xiong Huang <xiong.huang@atheros.com>
     12 * Jie Yang <jie.yang@atheros.com>
     13 * Chris Snook <csnook@redhat.com>
     14 * Jay Cliburn <jcliburn@gmail.com>
     15 *
     16 * This version is adapted from the Attansic reference driver.
     17 *
     18 * TODO:
     19 * Add more ethtool functions.
     20 * Fix abstruse irq enable/disable condition described here:
     21 *	http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
     22 *
     23 * NEEDS TESTING:
     24 * VLAN
     25 * multicast
     26 * promiscuous mode
     27 * interrupt coalescing
     28 * SMP torture testing
     29 */
     30
     31#include <linux/atomic.h>
     32#include <asm/byteorder.h>
     33
     34#include <linux/compiler.h>
     35#include <linux/crc32.h>
     36#include <linux/delay.h>
     37#include <linux/dma-mapping.h>
     38#include <linux/etherdevice.h>
     39#include <linux/hardirq.h>
     40#include <linux/if_ether.h>
     41#include <linux/if_vlan.h>
     42#include <linux/in.h>
     43#include <linux/interrupt.h>
     44#include <linux/ip.h>
     45#include <linux/irqflags.h>
     46#include <linux/irqreturn.h>
     47#include <linux/jiffies.h>
     48#include <linux/mii.h>
     49#include <linux/module.h>
     50#include <linux/net.h>
     51#include <linux/netdevice.h>
     52#include <linux/pci.h>
     53#include <linux/pci_ids.h>
     54#include <linux/pm.h>
     55#include <linux/skbuff.h>
     56#include <linux/slab.h>
     57#include <linux/spinlock.h>
     58#include <linux/string.h>
     59#include <linux/tcp.h>
     60#include <linux/timer.h>
     61#include <linux/types.h>
     62#include <linux/workqueue.h>
     63
     64#include <net/checksum.h>
     65
     66#include "atl1.h"
     67
     68MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, "
     69	      "Chris Snook <csnook@redhat.com>, "
     70	      "Jay Cliburn <jcliburn@gmail.com>");
     71MODULE_LICENSE("GPL");
     72
     73/* Temporary hack for merging atl1 and atl2 */
     74#include "atlx.c"
     75
     76static const struct ethtool_ops atl1_ethtool_ops;
     77
     78/*
     79 * This is the only thing that needs to be changed to adjust the
     80 * maximum number of ports that the driver can manage.
     81 */
     82#define ATL1_MAX_NIC 4
     83
     84#define OPTION_UNSET    -1
     85#define OPTION_DISABLED 0
     86#define OPTION_ENABLED  1
     87
     88#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
     89
     90/*
     91 * Interrupt Moderate Timer in units of 2 us
     92 *
     93 * Valid Range: 10-65535
     94 *
     95 * Default Value: 100 (200us)
     96 */
     97static int int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
     98static unsigned int num_int_mod_timer;
     99module_param_array_named(int_mod_timer, int_mod_timer, int,
    100	&num_int_mod_timer, 0);
    101MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
    102
    103#define DEFAULT_INT_MOD_CNT	100	/* 200us */
    104#define MAX_INT_MOD_CNT		65000
    105#define MIN_INT_MOD_CNT		50
    106
    107struct atl1_option {
    108	enum { enable_option, range_option, list_option } type;
    109	char *name;
    110	char *err;
    111	int def;
    112	union {
    113		struct {	/* range_option info */
    114			int min;
    115			int max;
    116		} r;
    117		struct {	/* list_option info */
    118			int nr;
    119			struct atl1_opt_list {
    120				int i;
    121				char *str;
    122			} *p;
    123		} l;
    124	} arg;
    125};
    126
    127static int atl1_validate_option(int *value, struct atl1_option *opt,
    128				struct pci_dev *pdev)
    129{
    130	if (*value == OPTION_UNSET) {
    131		*value = opt->def;
    132		return 0;
    133	}
    134
    135	switch (opt->type) {
    136	case enable_option:
    137		switch (*value) {
    138		case OPTION_ENABLED:
    139			dev_info(&pdev->dev, "%s enabled\n", opt->name);
    140			return 0;
    141		case OPTION_DISABLED:
    142			dev_info(&pdev->dev, "%s disabled\n", opt->name);
    143			return 0;
    144		}
    145		break;
    146	case range_option:
    147		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
    148			dev_info(&pdev->dev, "%s set to %i\n", opt->name,
    149				*value);
    150			return 0;
    151		}
    152		break;
    153	case list_option:{
    154			int i;
    155			struct atl1_opt_list *ent;
    156
    157			for (i = 0; i < opt->arg.l.nr; i++) {
    158				ent = &opt->arg.l.p[i];
    159				if (*value == ent->i) {
    160					if (ent->str[0] != '\0')
    161						dev_info(&pdev->dev, "%s\n",
    162							ent->str);
    163					return 0;
    164				}
    165			}
    166		}
    167		break;
    168
    169	default:
    170		break;
    171	}
    172
    173	dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
    174		opt->name, *value, opt->err);
    175	*value = opt->def;
    176	return -1;
    177}
    178
    179/**
    180 * atl1_check_options - Range Checking for Command Line Parameters
    181 * @adapter: board private structure
    182 *
    183 * This routine checks all command line parameters for valid user
    184 * input.  If an invalid value is given, or if no user specified
    185 * value exists, a default value is used.  The final value is stored
    186 * in a variable in the adapter structure.
    187 */
    188static void atl1_check_options(struct atl1_adapter *adapter)
    189{
    190	struct pci_dev *pdev = adapter->pdev;
    191	int bd = adapter->bd_number;
    192	if (bd >= ATL1_MAX_NIC) {
    193		dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
    194		dev_notice(&pdev->dev, "using defaults for all values\n");
    195	}
    196	{			/* Interrupt Moderate Timer */
    197		struct atl1_option opt = {
    198			.type = range_option,
    199			.name = "Interrupt Moderator Timer",
    200			.err = "using default of "
    201				__MODULE_STRING(DEFAULT_INT_MOD_CNT),
    202			.def = DEFAULT_INT_MOD_CNT,
    203			.arg = {.r = {.min = MIN_INT_MOD_CNT,
    204					.max = MAX_INT_MOD_CNT} }
    205		};
    206		int val;
    207		if (num_int_mod_timer > bd) {
    208			val = int_mod_timer[bd];
    209			atl1_validate_option(&val, &opt, pdev);
    210			adapter->imt = (u16) val;
    211		} else
    212			adapter->imt = (u16) (opt.def);
    213	}
    214}
    215
    216/*
    217 * atl1_pci_tbl - PCI Device ID Table
    218 */
    219static const struct pci_device_id atl1_pci_tbl[] = {
    220	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
    221	/* required last entry */
    222	{0,}
    223};
    224MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
    225
    226static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
    227	NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
    228
    229static int debug = -1;
    230module_param(debug, int, 0);
    231MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)");
    232
    233/*
    234 * Reset the transmit and receive units; mask and clear all interrupts.
    235 * hw - Struct containing variables accessed by shared code
    236 * return : 0  or  idle status (if error)
    237 */
    238static s32 atl1_reset_hw(struct atl1_hw *hw)
    239{
    240	struct pci_dev *pdev = hw->back->pdev;
    241	struct atl1_adapter *adapter = hw->back;
    242	u32 icr;
    243	int i;
    244
    245	/*
    246	 * Clear Interrupt mask to stop board from generating
    247	 * interrupts & Clear any pending interrupt events
    248	 */
    249	/*
    250	 * atlx_irq_disable(adapter);
    251	 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
    252	 */
    253
    254	/*
    255	 * Issue Soft Reset to the MAC.  This will reset the chip's
    256	 * transmit, receive, DMA.  It will not effect
    257	 * the current PCI configuration.  The global reset bit is self-
    258	 * clearing, and should clear within a microsecond.
    259	 */
    260	iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
    261	ioread32(hw->hw_addr + REG_MASTER_CTRL);
    262
    263	iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
    264	ioread16(hw->hw_addr + REG_PHY_ENABLE);
    265
    266	/* delay about 1ms */
    267	msleep(1);
    268
    269	/* Wait at least 10ms for All module to be Idle */
    270	for (i = 0; i < 10; i++) {
    271		icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
    272		if (!icr)
    273			break;
    274		/* delay 1 ms */
    275		msleep(1);
    276		/* FIXME: still the right way to do this? */
    277		cpu_relax();
    278	}
    279
    280	if (icr) {
    281		if (netif_msg_hw(adapter))
    282			dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
    283		return icr;
    284	}
    285
    286	return 0;
    287}
    288
    289/* function about EEPROM
    290 *
    291 * check_eeprom_exist
    292 * return 0 if eeprom exist
    293 */
    294static int atl1_check_eeprom_exist(struct atl1_hw *hw)
    295{
    296	u32 value;
    297	value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
    298	if (value & SPI_FLASH_CTRL_EN_VPD) {
    299		value &= ~SPI_FLASH_CTRL_EN_VPD;
    300		iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
    301	}
    302
    303	value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
    304	return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
    305}
    306
    307static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
    308{
    309	int i;
    310	u32 control;
    311
    312	if (offset & 3)
    313		/* address do not align */
    314		return false;
    315
    316	iowrite32(0, hw->hw_addr + REG_VPD_DATA);
    317	control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
    318	iowrite32(control, hw->hw_addr + REG_VPD_CAP);
    319	ioread32(hw->hw_addr + REG_VPD_CAP);
    320
    321	for (i = 0; i < 10; i++) {
    322		msleep(2);
    323		control = ioread32(hw->hw_addr + REG_VPD_CAP);
    324		if (control & VPD_CAP_VPD_FLAG)
    325			break;
    326	}
    327	if (control & VPD_CAP_VPD_FLAG) {
    328		*p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
    329		return true;
    330	}
    331	/* timeout */
    332	return false;
    333}
    334
    335/*
    336 * Reads the value from a PHY register
    337 * hw - Struct containing variables accessed by shared code
    338 * reg_addr - address of the PHY register to read
    339 */
    340static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
    341{
    342	u32 val;
    343	int i;
    344
    345	val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
    346		MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
    347		MDIO_CLK_SEL_SHIFT;
    348	iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
    349	ioread32(hw->hw_addr + REG_MDIO_CTRL);
    350
    351	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
    352		udelay(2);
    353		val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
    354		if (!(val & (MDIO_START | MDIO_BUSY)))
    355			break;
    356	}
    357	if (!(val & (MDIO_START | MDIO_BUSY))) {
    358		*phy_data = (u16) val;
    359		return 0;
    360	}
    361	return ATLX_ERR_PHY;
    362}
    363
    364#define CUSTOM_SPI_CS_SETUP	2
    365#define CUSTOM_SPI_CLK_HI	2
    366#define CUSTOM_SPI_CLK_LO	2
    367#define CUSTOM_SPI_CS_HOLD	2
    368#define CUSTOM_SPI_CS_HI	3
    369
    370static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
    371{
    372	int i;
    373	u32 value;
    374
    375	iowrite32(0, hw->hw_addr + REG_SPI_DATA);
    376	iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
    377
    378	value = SPI_FLASH_CTRL_WAIT_READY |
    379	    (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
    380	    SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
    381					     SPI_FLASH_CTRL_CLK_HI_MASK) <<
    382	    SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
    383					   SPI_FLASH_CTRL_CLK_LO_MASK) <<
    384	    SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
    385					   SPI_FLASH_CTRL_CS_HOLD_MASK) <<
    386	    SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
    387					    SPI_FLASH_CTRL_CS_HI_MASK) <<
    388	    SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
    389	    SPI_FLASH_CTRL_INS_SHIFT;
    390
    391	iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
    392
    393	value |= SPI_FLASH_CTRL_START;
    394	iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
    395	ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
    396
    397	for (i = 0; i < 10; i++) {
    398		msleep(1);
    399		value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
    400		if (!(value & SPI_FLASH_CTRL_START))
    401			break;
    402	}
    403
    404	if (value & SPI_FLASH_CTRL_START)
    405		return false;
    406
    407	*buf = ioread32(hw->hw_addr + REG_SPI_DATA);
    408
    409	return true;
    410}
    411
    412/*
    413 * get_permanent_address
    414 * return 0 if get valid mac address,
    415 */
    416static int atl1_get_permanent_address(struct atl1_hw *hw)
    417{
    418	u32 addr[2];
    419	u32 i, control;
    420	u16 reg;
    421	u8 eth_addr[ETH_ALEN];
    422	bool key_valid;
    423
    424	if (is_valid_ether_addr(hw->perm_mac_addr))
    425		return 0;
    426
    427	/* init */
    428	addr[0] = addr[1] = 0;
    429
    430	if (!atl1_check_eeprom_exist(hw)) {
    431		reg = 0;
    432		key_valid = false;
    433		/* Read out all EEPROM content */
    434		i = 0;
    435		while (1) {
    436			if (atl1_read_eeprom(hw, i + 0x100, &control)) {
    437				if (key_valid) {
    438					if (reg == REG_MAC_STA_ADDR)
    439						addr[0] = control;
    440					else if (reg == (REG_MAC_STA_ADDR + 4))
    441						addr[1] = control;
    442					key_valid = false;
    443				} else if ((control & 0xff) == 0x5A) {
    444					key_valid = true;
    445					reg = (u16) (control >> 16);
    446				} else
    447					break;
    448			} else
    449				/* read error */
    450				break;
    451			i += 4;
    452		}
    453
    454		*(u32 *) &eth_addr[2] = swab32(addr[0]);
    455		*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
    456		if (is_valid_ether_addr(eth_addr)) {
    457			memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
    458			return 0;
    459		}
    460	}
    461
    462	/* see if SPI FLAGS exist ? */
    463	addr[0] = addr[1] = 0;
    464	reg = 0;
    465	key_valid = false;
    466	i = 0;
    467	while (1) {
    468		if (atl1_spi_read(hw, i + 0x1f000, &control)) {
    469			if (key_valid) {
    470				if (reg == REG_MAC_STA_ADDR)
    471					addr[0] = control;
    472				else if (reg == (REG_MAC_STA_ADDR + 4))
    473					addr[1] = control;
    474				key_valid = false;
    475			} else if ((control & 0xff) == 0x5A) {
    476				key_valid = true;
    477				reg = (u16) (control >> 16);
    478			} else
    479				/* data end */
    480				break;
    481		} else
    482			/* read error */
    483			break;
    484		i += 4;
    485	}
    486
    487	*(u32 *) &eth_addr[2] = swab32(addr[0]);
    488	*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
    489	if (is_valid_ether_addr(eth_addr)) {
    490		memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
    491		return 0;
    492	}
    493
    494	/*
    495	 * On some motherboards, the MAC address is written by the
    496	 * BIOS directly to the MAC register during POST, and is
    497	 * not stored in eeprom.  If all else thus far has failed
    498	 * to fetch the permanent MAC address, try reading it directly.
    499	 */
    500	addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
    501	addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
    502	*(u32 *) &eth_addr[2] = swab32(addr[0]);
    503	*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
    504	if (is_valid_ether_addr(eth_addr)) {
    505		memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
    506		return 0;
    507	}
    508
    509	return 1;
    510}
    511
    512/*
    513 * Reads the adapter's MAC address from the EEPROM
    514 * hw - Struct containing variables accessed by shared code
    515 */
    516static s32 atl1_read_mac_addr(struct atl1_hw *hw)
    517{
    518	s32 ret = 0;
    519	u16 i;
    520
    521	if (atl1_get_permanent_address(hw)) {
    522		eth_random_addr(hw->perm_mac_addr);
    523		ret = 1;
    524	}
    525
    526	for (i = 0; i < ETH_ALEN; i++)
    527		hw->mac_addr[i] = hw->perm_mac_addr[i];
    528	return ret;
    529}
    530
    531/*
    532 * Hashes an address to determine its location in the multicast table
    533 * hw - Struct containing variables accessed by shared code
    534 * mc_addr - the multicast address to hash
    535 *
    536 * atl1_hash_mc_addr
    537 *  purpose
    538 *      set hash value for a multicast address
    539 *      hash calcu processing :
    540 *          1. calcu 32bit CRC for multicast address
    541 *          2. reverse crc with MSB to LSB
    542 */
    543static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
    544{
    545	u32 crc32, value = 0;
    546	int i;
    547
    548	crc32 = ether_crc_le(6, mc_addr);
    549	for (i = 0; i < 32; i++)
    550		value |= (((crc32 >> i) & 1) << (31 - i));
    551
    552	return value;
    553}
    554
    555/*
    556 * Sets the bit in the multicast table corresponding to the hash value.
    557 * hw - Struct containing variables accessed by shared code
    558 * hash_value - Multicast address hash value
    559 */
    560static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
    561{
    562	u32 hash_bit, hash_reg;
    563	u32 mta;
    564
    565	/*
    566	 * The HASH Table  is a register array of 2 32-bit registers.
    567	 * It is treated like an array of 64 bits.  We want to set
    568	 * bit BitArray[hash_value]. So we figure out what register
    569	 * the bit is in, read it, OR in the new bit, then write
    570	 * back the new value.  The register is determined by the
    571	 * upper 7 bits of the hash value and the bit within that
    572	 * register are determined by the lower 5 bits of the value.
    573	 */
    574	hash_reg = (hash_value >> 31) & 0x1;
    575	hash_bit = (hash_value >> 26) & 0x1F;
    576	mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
    577	mta |= (1 << hash_bit);
    578	iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
    579}
    580
    581/*
    582 * Writes a value to a PHY register
    583 * hw - Struct containing variables accessed by shared code
    584 * reg_addr - address of the PHY register to write
    585 * data - data to write to the PHY
    586 */
    587static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
    588{
    589	int i;
    590	u32 val;
    591
    592	val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
    593	    (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
    594	    MDIO_SUP_PREAMBLE |
    595	    MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
    596	iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
    597	ioread32(hw->hw_addr + REG_MDIO_CTRL);
    598
    599	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
    600		udelay(2);
    601		val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
    602		if (!(val & (MDIO_START | MDIO_BUSY)))
    603			break;
    604	}
    605
    606	if (!(val & (MDIO_START | MDIO_BUSY)))
    607		return 0;
    608
    609	return ATLX_ERR_PHY;
    610}
    611
    612/*
    613 * Make L001's PHY out of Power Saving State (bug)
    614 * hw - Struct containing variables accessed by shared code
    615 * when power on, L001's PHY always on Power saving State
    616 * (Gigabit Link forbidden)
    617 */
    618static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
    619{
    620	s32 ret;
    621	ret = atl1_write_phy_reg(hw, 29, 0x0029);
    622	if (ret)
    623		return ret;
    624	return atl1_write_phy_reg(hw, 30, 0);
    625}
    626
    627/*
    628 * Resets the PHY and make all config validate
    629 * hw - Struct containing variables accessed by shared code
    630 *
    631 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
    632 */
    633static s32 atl1_phy_reset(struct atl1_hw *hw)
    634{
    635	struct pci_dev *pdev = hw->back->pdev;
    636	struct atl1_adapter *adapter = hw->back;
    637	s32 ret_val;
    638	u16 phy_data;
    639
    640	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
    641	    hw->media_type == MEDIA_TYPE_1000M_FULL)
    642		phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
    643	else {
    644		switch (hw->media_type) {
    645		case MEDIA_TYPE_100M_FULL:
    646			phy_data =
    647			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
    648			    MII_CR_RESET;
    649			break;
    650		case MEDIA_TYPE_100M_HALF:
    651			phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
    652			break;
    653		case MEDIA_TYPE_10M_FULL:
    654			phy_data =
    655			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
    656			break;
    657		default:
    658			/* MEDIA_TYPE_10M_HALF: */
    659			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
    660			break;
    661		}
    662	}
    663
    664	ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
    665	if (ret_val) {
    666		u32 val;
    667		int i;
    668		/* pcie serdes link may be down! */
    669		if (netif_msg_hw(adapter))
    670			dev_dbg(&pdev->dev, "pcie phy link down\n");
    671
    672		for (i = 0; i < 25; i++) {
    673			msleep(1);
    674			val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
    675			if (!(val & (MDIO_START | MDIO_BUSY)))
    676				break;
    677		}
    678
    679		if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
    680			if (netif_msg_hw(adapter))
    681				dev_warn(&pdev->dev,
    682					"pcie link down at least 25ms\n");
    683			return ret_val;
    684		}
    685	}
    686	return 0;
    687}
    688
    689/*
    690 * Configures PHY autoneg and flow control advertisement settings
    691 * hw - Struct containing variables accessed by shared code
    692 */
    693static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
    694{
    695	s32 ret_val;
    696	s16 mii_autoneg_adv_reg;
    697	s16 mii_1000t_ctrl_reg;
    698
    699	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
    700	mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
    701
    702	/* Read the MII 1000Base-T Control Register (Address 9). */
    703	mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
    704
    705	/*
    706	 * First we clear all the 10/100 mb speed bits in the Auto-Neg
    707	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
    708	 * the  1000Base-T Control Register (Address 9).
    709	 */
    710	mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
    711	mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
    712
    713	/*
    714	 * Need to parse media_type  and set up
    715	 * the appropriate PHY registers.
    716	 */
    717	switch (hw->media_type) {
    718	case MEDIA_TYPE_AUTO_SENSOR:
    719		mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
    720					MII_AR_10T_FD_CAPS |
    721					MII_AR_100TX_HD_CAPS |
    722					MII_AR_100TX_FD_CAPS);
    723		mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
    724		break;
    725
    726	case MEDIA_TYPE_1000M_FULL:
    727		mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
    728		break;
    729
    730	case MEDIA_TYPE_100M_FULL:
    731		mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
    732		break;
    733
    734	case MEDIA_TYPE_100M_HALF:
    735		mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
    736		break;
    737
    738	case MEDIA_TYPE_10M_FULL:
    739		mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
    740		break;
    741
    742	default:
    743		mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
    744		break;
    745	}
    746
    747	/* flow control fixed to enable all */
    748	mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
    749
    750	hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
    751	hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
    752
    753	ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
    754	if (ret_val)
    755		return ret_val;
    756
    757	ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
    758	if (ret_val)
    759		return ret_val;
    760
    761	return 0;
    762}
    763
    764/*
    765 * Configures link settings.
    766 * hw - Struct containing variables accessed by shared code
    767 * Assumes the hardware has previously been reset and the
    768 * transmitter and receiver are not enabled.
    769 */
    770static s32 atl1_setup_link(struct atl1_hw *hw)
    771{
    772	struct pci_dev *pdev = hw->back->pdev;
    773	struct atl1_adapter *adapter = hw->back;
    774	s32 ret_val;
    775
    776	/*
    777	 * Options:
    778	 *  PHY will advertise value(s) parsed from
    779	 *  autoneg_advertised and fc
    780	 *  no matter what autoneg is , We will not wait link result.
    781	 */
    782	ret_val = atl1_phy_setup_autoneg_adv(hw);
    783	if (ret_val) {
    784		if (netif_msg_link(adapter))
    785			dev_dbg(&pdev->dev,
    786				"error setting up autonegotiation\n");
    787		return ret_val;
    788	}
    789	/* SW.Reset , En-Auto-Neg if needed */
    790	ret_val = atl1_phy_reset(hw);
    791	if (ret_val) {
    792		if (netif_msg_link(adapter))
    793			dev_dbg(&pdev->dev, "error resetting phy\n");
    794		return ret_val;
    795	}
    796	hw->phy_configured = true;
    797	return ret_val;
    798}
    799
    800static void atl1_init_flash_opcode(struct atl1_hw *hw)
    801{
    802	if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
    803		/* Atmel */
    804		hw->flash_vendor = 0;
    805
    806	/* Init OP table */
    807	iowrite8(flash_table[hw->flash_vendor].cmd_program,
    808		hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
    809	iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
    810		hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
    811	iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
    812		hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
    813	iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
    814		hw->hw_addr + REG_SPI_FLASH_OP_RDID);
    815	iowrite8(flash_table[hw->flash_vendor].cmd_wren,
    816		hw->hw_addr + REG_SPI_FLASH_OP_WREN);
    817	iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
    818		hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
    819	iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
    820		hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
    821	iowrite8(flash_table[hw->flash_vendor].cmd_read,
    822		hw->hw_addr + REG_SPI_FLASH_OP_READ);
    823}
    824
    825/*
    826 * Performs basic configuration of the adapter.
    827 * hw - Struct containing variables accessed by shared code
    828 * Assumes that the controller has previously been reset and is in a
    829 * post-reset uninitialized state. Initializes multicast table,
    830 * and  Calls routines to setup link
    831 * Leaves the transmit and receive units disabled and uninitialized.
    832 */
    833static s32 atl1_init_hw(struct atl1_hw *hw)
    834{
    835	u32 ret_val = 0;
    836
    837	/* Zero out the Multicast HASH table */
    838	iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
    839	/* clear the old settings from the multicast hash table */
    840	iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
    841
    842	atl1_init_flash_opcode(hw);
    843
    844	if (!hw->phy_configured) {
    845		/* enable GPHY LinkChange Interrupt */
    846		ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
    847		if (ret_val)
    848			return ret_val;
    849		/* make PHY out of power-saving state */
    850		ret_val = atl1_phy_leave_power_saving(hw);
    851		if (ret_val)
    852			return ret_val;
    853		/* Call a subroutine to configure the link */
    854		ret_val = atl1_setup_link(hw);
    855	}
    856	return ret_val;
    857}
    858
    859/*
    860 * Detects the current speed and duplex settings of the hardware.
    861 * hw - Struct containing variables accessed by shared code
    862 * speed - Speed of the connection
    863 * duplex - Duplex setting of the connection
    864 */
    865static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
    866{
    867	struct pci_dev *pdev = hw->back->pdev;
    868	struct atl1_adapter *adapter = hw->back;
    869	s32 ret_val;
    870	u16 phy_data;
    871
    872	/* ; --- Read   PHY Specific Status Register (17) */
    873	ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
    874	if (ret_val)
    875		return ret_val;
    876
    877	if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
    878		return ATLX_ERR_PHY_RES;
    879
    880	switch (phy_data & MII_ATLX_PSSR_SPEED) {
    881	case MII_ATLX_PSSR_1000MBS:
    882		*speed = SPEED_1000;
    883		break;
    884	case MII_ATLX_PSSR_100MBS:
    885		*speed = SPEED_100;
    886		break;
    887	case MII_ATLX_PSSR_10MBS:
    888		*speed = SPEED_10;
    889		break;
    890	default:
    891		if (netif_msg_hw(adapter))
    892			dev_dbg(&pdev->dev, "error getting speed\n");
    893		return ATLX_ERR_PHY_SPEED;
    894	}
    895	if (phy_data & MII_ATLX_PSSR_DPLX)
    896		*duplex = FULL_DUPLEX;
    897	else
    898		*duplex = HALF_DUPLEX;
    899
    900	return 0;
    901}
    902
    903static void atl1_set_mac_addr(struct atl1_hw *hw)
    904{
    905	u32 value;
    906	/*
    907	 * 00-0B-6A-F6-00-DC
    908	 * 0:  6AF600DC   1: 000B
    909	 * low dword
    910	 */
    911	value = (((u32) hw->mac_addr[2]) << 24) |
    912	    (((u32) hw->mac_addr[3]) << 16) |
    913	    (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
    914	iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
    915	/* high dword */
    916	value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
    917	iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
    918}
    919
    920/**
    921 * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
    922 * @adapter: board private structure to initialize
    923 *
    924 * atl1_sw_init initializes the Adapter private data structure.
    925 * Fields are initialized based on PCI device information and
    926 * OS network device settings (MTU size).
    927 */
    928static int atl1_sw_init(struct atl1_adapter *adapter)
    929{
    930	struct atl1_hw *hw = &adapter->hw;
    931	struct net_device *netdev = adapter->netdev;
    932
    933	hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
    934	hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
    935
    936	adapter->wol = 0;
    937	device_set_wakeup_enable(&adapter->pdev->dev, false);
    938	adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
    939	adapter->ict = 50000;		/* 100ms */
    940	adapter->link_speed = SPEED_0;	/* hardware init */
    941	adapter->link_duplex = FULL_DUPLEX;
    942
    943	hw->phy_configured = false;
    944	hw->preamble_len = 7;
    945	hw->ipgt = 0x60;
    946	hw->min_ifg = 0x50;
    947	hw->ipgr1 = 0x40;
    948	hw->ipgr2 = 0x60;
    949	hw->max_retry = 0xf;
    950	hw->lcol = 0x37;
    951	hw->jam_ipg = 7;
    952	hw->rfd_burst = 8;
    953	hw->rrd_burst = 8;
    954	hw->rfd_fetch_gap = 1;
    955	hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
    956	hw->rx_jumbo_lkah = 1;
    957	hw->rrd_ret_timer = 16;
    958	hw->tpd_burst = 4;
    959	hw->tpd_fetch_th = 16;
    960	hw->txf_burst = 0x100;
    961	hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
    962	hw->tpd_fetch_gap = 1;
    963	hw->rcb_value = atl1_rcb_64;
    964	hw->dma_ord = atl1_dma_ord_enh;
    965	hw->dmar_block = atl1_dma_req_256;
    966	hw->dmaw_block = atl1_dma_req_256;
    967	hw->cmb_rrd = 4;
    968	hw->cmb_tpd = 4;
    969	hw->cmb_rx_timer = 1;	/* about 2us */
    970	hw->cmb_tx_timer = 1;	/* about 2us */
    971	hw->smb_timer = 100000;	/* about 200ms */
    972
    973	spin_lock_init(&adapter->lock);
    974	spin_lock_init(&adapter->mb_lock);
    975
    976	return 0;
    977}
    978
    979static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
    980{
    981	struct atl1_adapter *adapter = netdev_priv(netdev);
    982	u16 result;
    983
    984	atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
    985
    986	return result;
    987}
    988
    989static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
    990	int val)
    991{
    992	struct atl1_adapter *adapter = netdev_priv(netdev);
    993
    994	atl1_write_phy_reg(&adapter->hw, reg_num, val);
    995}
    996
    997static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
    998{
    999	struct atl1_adapter *adapter = netdev_priv(netdev);
   1000	unsigned long flags;
   1001	int retval;
   1002
   1003	if (!netif_running(netdev))
   1004		return -EINVAL;
   1005
   1006	spin_lock_irqsave(&adapter->lock, flags);
   1007	retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
   1008	spin_unlock_irqrestore(&adapter->lock, flags);
   1009
   1010	return retval;
   1011}
   1012
   1013/**
   1014 * atl1_setup_ring_resources - allocate Tx / RX descriptor resources
   1015 * @adapter: board private structure
   1016 *
   1017 * Return 0 on success, negative on failure
   1018 */
   1019static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
   1020{
   1021	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
   1022	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
   1023	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
   1024	struct atl1_ring_header *ring_header = &adapter->ring_header;
   1025	struct pci_dev *pdev = adapter->pdev;
   1026	int size;
   1027	u8 offset = 0;
   1028
   1029	size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
   1030	tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
   1031	if (unlikely(!tpd_ring->buffer_info)) {
   1032		if (netif_msg_drv(adapter))
   1033			dev_err(&pdev->dev, "kzalloc failed , size = D%d\n",
   1034				size);
   1035		goto err_nomem;
   1036	}
   1037	rfd_ring->buffer_info =
   1038		(tpd_ring->buffer_info + tpd_ring->count);
   1039
   1040	/*
   1041	 * real ring DMA buffer
   1042	 * each ring/block may need up to 8 bytes for alignment, hence the
   1043	 * additional 40 bytes tacked onto the end.
   1044	 */
   1045	ring_header->size =
   1046		sizeof(struct tx_packet_desc) * tpd_ring->count
   1047		+ sizeof(struct rx_free_desc) * rfd_ring->count
   1048		+ sizeof(struct rx_return_desc) * rrd_ring->count
   1049		+ sizeof(struct coals_msg_block)
   1050		+ sizeof(struct stats_msg_block)
   1051		+ 40;
   1052
   1053	ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
   1054					       &ring_header->dma, GFP_KERNEL);
   1055	if (unlikely(!ring_header->desc)) {
   1056		if (netif_msg_drv(adapter))
   1057			dev_err(&pdev->dev, "dma_alloc_coherent failed\n");
   1058		goto err_nomem;
   1059	}
   1060
   1061	/* init TPD ring */
   1062	tpd_ring->dma = ring_header->dma;
   1063	offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
   1064	tpd_ring->dma += offset;
   1065	tpd_ring->desc = (u8 *) ring_header->desc + offset;
   1066	tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
   1067
   1068	/* init RFD ring */
   1069	rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
   1070	offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
   1071	rfd_ring->dma += offset;
   1072	rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
   1073	rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
   1074
   1075
   1076	/* init RRD ring */
   1077	rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
   1078	offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
   1079	rrd_ring->dma += offset;
   1080	rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
   1081	rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
   1082
   1083
   1084	/* init CMB */
   1085	adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
   1086	offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
   1087	adapter->cmb.dma += offset;
   1088	adapter->cmb.cmb = (struct coals_msg_block *)
   1089		((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
   1090
   1091	/* init SMB */
   1092	adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
   1093	offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
   1094	adapter->smb.dma += offset;
   1095	adapter->smb.smb = (struct stats_msg_block *)
   1096		((u8 *) adapter->cmb.cmb +
   1097		(sizeof(struct coals_msg_block) + offset));
   1098
   1099	return 0;
   1100
   1101err_nomem:
   1102	kfree(tpd_ring->buffer_info);
   1103	return -ENOMEM;
   1104}
   1105
   1106static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
   1107{
   1108	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
   1109	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
   1110	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
   1111
   1112	atomic_set(&tpd_ring->next_to_use, 0);
   1113	atomic_set(&tpd_ring->next_to_clean, 0);
   1114
   1115	rfd_ring->next_to_clean = 0;
   1116	atomic_set(&rfd_ring->next_to_use, 0);
   1117
   1118	rrd_ring->next_to_use = 0;
   1119	atomic_set(&rrd_ring->next_to_clean, 0);
   1120}
   1121
   1122/**
   1123 * atl1_clean_rx_ring - Free RFD Buffers
   1124 * @adapter: board private structure
   1125 */
   1126static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
   1127{
   1128	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
   1129	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
   1130	struct atl1_buffer *buffer_info;
   1131	struct pci_dev *pdev = adapter->pdev;
   1132	unsigned long size;
   1133	unsigned int i;
   1134
   1135	/* Free all the Rx ring sk_buffs */
   1136	for (i = 0; i < rfd_ring->count; i++) {
   1137		buffer_info = &rfd_ring->buffer_info[i];
   1138		if (buffer_info->dma) {
   1139			dma_unmap_page(&pdev->dev, buffer_info->dma,
   1140				       buffer_info->length, DMA_FROM_DEVICE);
   1141			buffer_info->dma = 0;
   1142		}
   1143		if (buffer_info->skb) {
   1144			dev_kfree_skb(buffer_info->skb);
   1145			buffer_info->skb = NULL;
   1146		}
   1147	}
   1148
   1149	size = sizeof(struct atl1_buffer) * rfd_ring->count;
   1150	memset(rfd_ring->buffer_info, 0, size);
   1151
   1152	/* Zero out the descriptor ring */
   1153	memset(rfd_ring->desc, 0, rfd_ring->size);
   1154
   1155	rfd_ring->next_to_clean = 0;
   1156	atomic_set(&rfd_ring->next_to_use, 0);
   1157
   1158	rrd_ring->next_to_use = 0;
   1159	atomic_set(&rrd_ring->next_to_clean, 0);
   1160}
   1161
   1162/**
   1163 * atl1_clean_tx_ring - Free Tx Buffers
   1164 * @adapter: board private structure
   1165 */
   1166static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
   1167{
   1168	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
   1169	struct atl1_buffer *buffer_info;
   1170	struct pci_dev *pdev = adapter->pdev;
   1171	unsigned long size;
   1172	unsigned int i;
   1173
   1174	/* Free all the Tx ring sk_buffs */
   1175	for (i = 0; i < tpd_ring->count; i++) {
   1176		buffer_info = &tpd_ring->buffer_info[i];
   1177		if (buffer_info->dma) {
   1178			dma_unmap_page(&pdev->dev, buffer_info->dma,
   1179				       buffer_info->length, DMA_TO_DEVICE);
   1180			buffer_info->dma = 0;
   1181		}
   1182	}
   1183
   1184	for (i = 0; i < tpd_ring->count; i++) {
   1185		buffer_info = &tpd_ring->buffer_info[i];
   1186		if (buffer_info->skb) {
   1187			dev_kfree_skb_any(buffer_info->skb);
   1188			buffer_info->skb = NULL;
   1189		}
   1190	}
   1191
   1192	size = sizeof(struct atl1_buffer) * tpd_ring->count;
   1193	memset(tpd_ring->buffer_info, 0, size);
   1194
   1195	/* Zero out the descriptor ring */
   1196	memset(tpd_ring->desc, 0, tpd_ring->size);
   1197
   1198	atomic_set(&tpd_ring->next_to_use, 0);
   1199	atomic_set(&tpd_ring->next_to_clean, 0);
   1200}
   1201
   1202/**
   1203 * atl1_free_ring_resources - Free Tx / RX descriptor Resources
   1204 * @adapter: board private structure
   1205 *
   1206 * Free all transmit software resources
   1207 */
   1208static void atl1_free_ring_resources(struct atl1_adapter *adapter)
   1209{
   1210	struct pci_dev *pdev = adapter->pdev;
   1211	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
   1212	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
   1213	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
   1214	struct atl1_ring_header *ring_header = &adapter->ring_header;
   1215
   1216	atl1_clean_tx_ring(adapter);
   1217	atl1_clean_rx_ring(adapter);
   1218
   1219	kfree(tpd_ring->buffer_info);
   1220	dma_free_coherent(&pdev->dev, ring_header->size, ring_header->desc,
   1221			  ring_header->dma);
   1222
   1223	tpd_ring->buffer_info = NULL;
   1224	tpd_ring->desc = NULL;
   1225	tpd_ring->dma = 0;
   1226
   1227	rfd_ring->buffer_info = NULL;
   1228	rfd_ring->desc = NULL;
   1229	rfd_ring->dma = 0;
   1230
   1231	rrd_ring->desc = NULL;
   1232	rrd_ring->dma = 0;
   1233
   1234	adapter->cmb.dma = 0;
   1235	adapter->cmb.cmb = NULL;
   1236
   1237	adapter->smb.dma = 0;
   1238	adapter->smb.smb = NULL;
   1239}
   1240
   1241static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
   1242{
   1243	u32 value;
   1244	struct atl1_hw *hw = &adapter->hw;
   1245	struct net_device *netdev = adapter->netdev;
   1246	/* Config MAC CTRL Register */
   1247	value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
   1248	/* duplex */
   1249	if (FULL_DUPLEX == adapter->link_duplex)
   1250		value |= MAC_CTRL_DUPLX;
   1251	/* speed */
   1252	value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
   1253			 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
   1254		  MAC_CTRL_SPEED_SHIFT);
   1255	/* flow control */
   1256	value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
   1257	/* PAD & CRC */
   1258	value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
   1259	/* preamble length */
   1260	value |= (((u32) adapter->hw.preamble_len
   1261		   & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
   1262	/* vlan */
   1263	__atlx_vlan_mode(netdev->features, &value);
   1264	/* rx checksum
   1265	   if (adapter->rx_csum)
   1266	   value |= MAC_CTRL_RX_CHKSUM_EN;
   1267	 */
   1268	/* filter mode */
   1269	value |= MAC_CTRL_BC_EN;
   1270	if (netdev->flags & IFF_PROMISC)
   1271		value |= MAC_CTRL_PROMIS_EN;
   1272	else if (netdev->flags & IFF_ALLMULTI)
   1273		value |= MAC_CTRL_MC_ALL_EN;
   1274	/* value |= MAC_CTRL_LOOPBACK; */
   1275	iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
   1276}
   1277
   1278static u32 atl1_check_link(struct atl1_adapter *adapter)
   1279{
   1280	struct atl1_hw *hw = &adapter->hw;
   1281	struct net_device *netdev = adapter->netdev;
   1282	u32 ret_val;
   1283	u16 speed, duplex, phy_data;
   1284	int reconfig = 0;
   1285
   1286	/* MII_BMSR must read twice */
   1287	atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
   1288	atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
   1289	if (!(phy_data & BMSR_LSTATUS)) {
   1290		/* link down */
   1291		if (netif_carrier_ok(netdev)) {
   1292			/* old link state: Up */
   1293			if (netif_msg_link(adapter))
   1294				dev_info(&adapter->pdev->dev, "link is down\n");
   1295			adapter->link_speed = SPEED_0;
   1296			netif_carrier_off(netdev);
   1297		}
   1298		return 0;
   1299	}
   1300
   1301	/* Link Up */
   1302	ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
   1303	if (ret_val)
   1304		return ret_val;
   1305
   1306	switch (hw->media_type) {
   1307	case MEDIA_TYPE_1000M_FULL:
   1308		if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
   1309			reconfig = 1;
   1310		break;
   1311	case MEDIA_TYPE_100M_FULL:
   1312		if (speed != SPEED_100 || duplex != FULL_DUPLEX)
   1313			reconfig = 1;
   1314		break;
   1315	case MEDIA_TYPE_100M_HALF:
   1316		if (speed != SPEED_100 || duplex != HALF_DUPLEX)
   1317			reconfig = 1;
   1318		break;
   1319	case MEDIA_TYPE_10M_FULL:
   1320		if (speed != SPEED_10 || duplex != FULL_DUPLEX)
   1321			reconfig = 1;
   1322		break;
   1323	case MEDIA_TYPE_10M_HALF:
   1324		if (speed != SPEED_10 || duplex != HALF_DUPLEX)
   1325			reconfig = 1;
   1326		break;
   1327	}
   1328
   1329	/* link result is our setting */
   1330	if (!reconfig) {
   1331		if (adapter->link_speed != speed ||
   1332		    adapter->link_duplex != duplex) {
   1333			adapter->link_speed = speed;
   1334			adapter->link_duplex = duplex;
   1335			atl1_setup_mac_ctrl(adapter);
   1336			if (netif_msg_link(adapter))
   1337				dev_info(&adapter->pdev->dev,
   1338					"%s link is up %d Mbps %s\n",
   1339					netdev->name, adapter->link_speed,
   1340					adapter->link_duplex == FULL_DUPLEX ?
   1341					"full duplex" : "half duplex");
   1342		}
   1343		if (!netif_carrier_ok(netdev)) {
   1344			/* Link down -> Up */
   1345			netif_carrier_on(netdev);
   1346		}
   1347		return 0;
   1348	}
   1349
   1350	/* change original link status */
   1351	if (netif_carrier_ok(netdev)) {
   1352		adapter->link_speed = SPEED_0;
   1353		netif_carrier_off(netdev);
   1354		netif_stop_queue(netdev);
   1355	}
   1356
   1357	if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
   1358	    hw->media_type != MEDIA_TYPE_1000M_FULL) {
   1359		switch (hw->media_type) {
   1360		case MEDIA_TYPE_100M_FULL:
   1361			phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
   1362			           MII_CR_RESET;
   1363			break;
   1364		case MEDIA_TYPE_100M_HALF:
   1365			phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
   1366			break;
   1367		case MEDIA_TYPE_10M_FULL:
   1368			phy_data =
   1369			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
   1370			break;
   1371		default:
   1372			/* MEDIA_TYPE_10M_HALF: */
   1373			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
   1374			break;
   1375		}
   1376		atl1_write_phy_reg(hw, MII_BMCR, phy_data);
   1377		return 0;
   1378	}
   1379
   1380	/* auto-neg, insert timer to re-config phy */
   1381	if (!adapter->phy_timer_pending) {
   1382		adapter->phy_timer_pending = true;
   1383		mod_timer(&adapter->phy_config_timer,
   1384			  round_jiffies(jiffies + 3 * HZ));
   1385	}
   1386
   1387	return 0;
   1388}
   1389
   1390static void set_flow_ctrl_old(struct atl1_adapter *adapter)
   1391{
   1392	u32 hi, lo, value;
   1393
   1394	/* RFD Flow Control */
   1395	value = adapter->rfd_ring.count;
   1396	hi = value / 16;
   1397	if (hi < 2)
   1398		hi = 2;
   1399	lo = value * 7 / 8;
   1400
   1401	value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
   1402		((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
   1403	iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
   1404
   1405	/* RRD Flow Control */
   1406	value = adapter->rrd_ring.count;
   1407	lo = value / 16;
   1408	hi = value * 7 / 8;
   1409	if (lo < 2)
   1410		lo = 2;
   1411	value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
   1412		((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
   1413	iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
   1414}
   1415
   1416static void set_flow_ctrl_new(struct atl1_hw *hw)
   1417{
   1418	u32 hi, lo, value;
   1419
   1420	/* RXF Flow Control */
   1421	value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
   1422	lo = value / 16;
   1423	if (lo < 192)
   1424		lo = 192;
   1425	hi = value * 7 / 8;
   1426	if (hi < lo)
   1427		hi = lo + 16;
   1428	value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
   1429		((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
   1430	iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
   1431
   1432	/* RRD Flow Control */
   1433	value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
   1434	lo = value / 8;
   1435	hi = value * 7 / 8;
   1436	if (lo < 2)
   1437		lo = 2;
   1438	if (hi < lo)
   1439		hi = lo + 3;
   1440	value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
   1441		((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
   1442	iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
   1443}
   1444
   1445/**
   1446 * atl1_configure - Configure Transmit&Receive Unit after Reset
   1447 * @adapter: board private structure
   1448 *
   1449 * Configure the Tx /Rx unit of the MAC after a reset.
   1450 */
   1451static u32 atl1_configure(struct atl1_adapter *adapter)
   1452{
   1453	struct atl1_hw *hw = &adapter->hw;
   1454	u32 value;
   1455
   1456	/* clear interrupt status */
   1457	iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
   1458
   1459	/* set MAC Address */
   1460	value = (((u32) hw->mac_addr[2]) << 24) |
   1461		(((u32) hw->mac_addr[3]) << 16) |
   1462		(((u32) hw->mac_addr[4]) << 8) |
   1463		(((u32) hw->mac_addr[5]));
   1464	iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
   1465	value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
   1466	iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
   1467
   1468	/* tx / rx ring */
   1469
   1470	/* HI base address */
   1471	iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
   1472		hw->hw_addr + REG_DESC_BASE_ADDR_HI);
   1473	/* LO base address */
   1474	iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
   1475		hw->hw_addr + REG_DESC_RFD_ADDR_LO);
   1476	iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
   1477		hw->hw_addr + REG_DESC_RRD_ADDR_LO);
   1478	iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
   1479		hw->hw_addr + REG_DESC_TPD_ADDR_LO);
   1480	iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
   1481		hw->hw_addr + REG_DESC_CMB_ADDR_LO);
   1482	iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
   1483		hw->hw_addr + REG_DESC_SMB_ADDR_LO);
   1484
   1485	/* element count */
   1486	value = adapter->rrd_ring.count;
   1487	value <<= 16;
   1488	value += adapter->rfd_ring.count;
   1489	iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
   1490	iowrite32(adapter->tpd_ring.count, hw->hw_addr +
   1491		REG_DESC_TPD_RING_SIZE);
   1492
   1493	/* Load Ptr */
   1494	iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
   1495
   1496	/* config Mailbox */
   1497	value = ((atomic_read(&adapter->tpd_ring.next_to_use)
   1498		  & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
   1499		((atomic_read(&adapter->rrd_ring.next_to_clean)
   1500		& MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
   1501		((atomic_read(&adapter->rfd_ring.next_to_use)
   1502		& MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
   1503	iowrite32(value, hw->hw_addr + REG_MAILBOX);
   1504
   1505	/* config IPG/IFG */
   1506	value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
   1507		 << MAC_IPG_IFG_IPGT_SHIFT) |
   1508		(((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
   1509		<< MAC_IPG_IFG_MIFG_SHIFT) |
   1510		(((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
   1511		<< MAC_IPG_IFG_IPGR1_SHIFT) |
   1512		(((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
   1513		<< MAC_IPG_IFG_IPGR2_SHIFT);
   1514	iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
   1515
   1516	/* config  Half-Duplex Control */
   1517	value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
   1518		(((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
   1519		<< MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
   1520		MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
   1521		(0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
   1522		(((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
   1523		<< MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
   1524	iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
   1525
   1526	/* set Interrupt Moderator Timer */
   1527	iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
   1528	iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
   1529
   1530	/* set Interrupt Clear Timer */
   1531	iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
   1532
   1533	/* set max frame size hw will accept */
   1534	iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU);
   1535
   1536	/* jumbo size & rrd retirement timer */
   1537	value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
   1538		 << RXQ_JMBOSZ_TH_SHIFT) |
   1539		(((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
   1540		<< RXQ_JMBO_LKAH_SHIFT) |
   1541		(((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
   1542		<< RXQ_RRD_TIMER_SHIFT);
   1543	iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
   1544
   1545	/* Flow Control */
   1546	switch (hw->dev_rev) {
   1547	case 0x8001:
   1548	case 0x9001:
   1549	case 0x9002:
   1550	case 0x9003:
   1551		set_flow_ctrl_old(adapter);
   1552		break;
   1553	default:
   1554		set_flow_ctrl_new(hw);
   1555		break;
   1556	}
   1557
   1558	/* config TXQ */
   1559	value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
   1560		 << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
   1561		(((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
   1562		<< TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
   1563		(((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
   1564		<< TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
   1565		TXQ_CTRL_EN;
   1566	iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
   1567
   1568	/* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
   1569	value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
   1570		<< TX_JUMBO_TASK_TH_SHIFT) |
   1571		(((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
   1572		<< TX_TPD_MIN_IPG_SHIFT);
   1573	iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
   1574
   1575	/* config RXQ */
   1576	value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
   1577		<< RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
   1578		(((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
   1579		<< RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
   1580		(((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
   1581		<< RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
   1582		RXQ_CTRL_EN;
   1583	iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
   1584
   1585	/* config DMA Engine */
   1586	value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
   1587		<< DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
   1588		((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
   1589		<< DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
   1590		DMA_CTRL_DMAW_EN;
   1591	value |= (u32) hw->dma_ord;
   1592	if (atl1_rcb_128 == hw->rcb_value)
   1593		value |= DMA_CTRL_RCB_VALUE;
   1594	iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
   1595
   1596	/* config CMB / SMB */
   1597	value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
   1598		hw->cmb_tpd : adapter->tpd_ring.count;
   1599	value <<= 16;
   1600	value |= hw->cmb_rrd;
   1601	iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
   1602	value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
   1603	iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
   1604	iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
   1605
   1606	/* --- enable CMB / SMB */
   1607	value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
   1608	iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
   1609
   1610	value = ioread32(adapter->hw.hw_addr + REG_ISR);
   1611	if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
   1612		value = 1;	/* config failed */
   1613	else
   1614		value = 0;
   1615
   1616	/* clear all interrupt status */
   1617	iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
   1618	iowrite32(0, adapter->hw.hw_addr + REG_ISR);
   1619	return value;
   1620}
   1621
   1622/*
   1623 * atl1_pcie_patch - Patch for PCIE module
   1624 */
   1625static void atl1_pcie_patch(struct atl1_adapter *adapter)
   1626{
   1627	u32 value;
   1628
   1629	/* much vendor magic here */
   1630	value = 0x6500;
   1631	iowrite32(value, adapter->hw.hw_addr + 0x12FC);
   1632	/* pcie flow control mode change */
   1633	value = ioread32(adapter->hw.hw_addr + 0x1008);
   1634	value |= 0x8000;
   1635	iowrite32(value, adapter->hw.hw_addr + 0x1008);
   1636}
   1637
   1638/*
   1639 * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
   1640 * on PCI Command register is disable.
   1641 * The function enable this bit.
   1642 * Brackett, 2006/03/15
   1643 */
   1644static void atl1_via_workaround(struct atl1_adapter *adapter)
   1645{
   1646	unsigned long value;
   1647
   1648	value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
   1649	if (value & PCI_COMMAND_INTX_DISABLE)
   1650		value &= ~PCI_COMMAND_INTX_DISABLE;
   1651	iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
   1652}
   1653
   1654static void atl1_inc_smb(struct atl1_adapter *adapter)
   1655{
   1656	struct net_device *netdev = adapter->netdev;
   1657	struct stats_msg_block *smb = adapter->smb.smb;
   1658
   1659	u64 new_rx_errors = smb->rx_frag +
   1660			    smb->rx_fcs_err +
   1661			    smb->rx_len_err +
   1662			    smb->rx_sz_ov +
   1663			    smb->rx_rxf_ov +
   1664			    smb->rx_rrd_ov +
   1665			    smb->rx_align_err;
   1666	u64 new_tx_errors = smb->tx_late_col +
   1667			    smb->tx_abort_col +
   1668			    smb->tx_underrun +
   1669			    smb->tx_trunc;
   1670
   1671	/* Fill out the OS statistics structure */
   1672	adapter->soft_stats.rx_packets += smb->rx_ok + new_rx_errors;
   1673	adapter->soft_stats.tx_packets += smb->tx_ok + new_tx_errors;
   1674	adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
   1675	adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
   1676	adapter->soft_stats.multicast += smb->rx_mcast;
   1677	adapter->soft_stats.collisions += smb->tx_1_col +
   1678					  smb->tx_2_col +
   1679					  smb->tx_late_col +
   1680					  smb->tx_abort_col;
   1681
   1682	/* Rx Errors */
   1683	adapter->soft_stats.rx_errors += new_rx_errors;
   1684	adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
   1685	adapter->soft_stats.rx_length_errors += smb->rx_len_err;
   1686	adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
   1687	adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
   1688
   1689	adapter->soft_stats.rx_pause += smb->rx_pause;
   1690	adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
   1691	adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
   1692
   1693	/* Tx Errors */
   1694	adapter->soft_stats.tx_errors += new_tx_errors;
   1695	adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
   1696	adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
   1697	adapter->soft_stats.tx_window_errors += smb->tx_late_col;
   1698
   1699	adapter->soft_stats.excecol += smb->tx_abort_col;
   1700	adapter->soft_stats.deffer += smb->tx_defer;
   1701	adapter->soft_stats.scc += smb->tx_1_col;
   1702	adapter->soft_stats.mcc += smb->tx_2_col;
   1703	adapter->soft_stats.latecol += smb->tx_late_col;
   1704	adapter->soft_stats.tx_underrun += smb->tx_underrun;
   1705	adapter->soft_stats.tx_trunc += smb->tx_trunc;
   1706	adapter->soft_stats.tx_pause += smb->tx_pause;
   1707
   1708	netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes;
   1709	netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes;
   1710	netdev->stats.multicast = adapter->soft_stats.multicast;
   1711	netdev->stats.collisions = adapter->soft_stats.collisions;
   1712	netdev->stats.rx_errors = adapter->soft_stats.rx_errors;
   1713	netdev->stats.rx_length_errors =
   1714		adapter->soft_stats.rx_length_errors;
   1715	netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
   1716	netdev->stats.rx_frame_errors =
   1717		adapter->soft_stats.rx_frame_errors;
   1718	netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
   1719	netdev->stats.rx_dropped = adapter->soft_stats.rx_rrd_ov;
   1720	netdev->stats.tx_errors = adapter->soft_stats.tx_errors;
   1721	netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
   1722	netdev->stats.tx_aborted_errors =
   1723		adapter->soft_stats.tx_aborted_errors;
   1724	netdev->stats.tx_window_errors =
   1725		adapter->soft_stats.tx_window_errors;
   1726	netdev->stats.tx_carrier_errors =
   1727		adapter->soft_stats.tx_carrier_errors;
   1728
   1729	netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
   1730	netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
   1731}
   1732
   1733static void atl1_update_mailbox(struct atl1_adapter *adapter)
   1734{
   1735	unsigned long flags;
   1736	u32 tpd_next_to_use;
   1737	u32 rfd_next_to_use;
   1738	u32 rrd_next_to_clean;
   1739	u32 value;
   1740
   1741	spin_lock_irqsave(&adapter->mb_lock, flags);
   1742
   1743	tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
   1744	rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
   1745	rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
   1746
   1747	value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
   1748		MB_RFD_PROD_INDX_SHIFT) |
   1749		((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
   1750		MB_RRD_CONS_INDX_SHIFT) |
   1751		((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
   1752		MB_TPD_PROD_INDX_SHIFT);
   1753	iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
   1754
   1755	spin_unlock_irqrestore(&adapter->mb_lock, flags);
   1756}
   1757
   1758static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
   1759	struct rx_return_desc *rrd, u16 offset)
   1760{
   1761	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
   1762
   1763	while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
   1764		rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
   1765		if (++rfd_ring->next_to_clean == rfd_ring->count) {
   1766			rfd_ring->next_to_clean = 0;
   1767		}
   1768	}
   1769}
   1770
   1771static void atl1_update_rfd_index(struct atl1_adapter *adapter,
   1772	struct rx_return_desc *rrd)
   1773{
   1774	u16 num_buf;
   1775
   1776	num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
   1777		adapter->rx_buffer_len;
   1778	if (rrd->num_buf == num_buf)
   1779		/* clean alloc flag for bad rrd */
   1780		atl1_clean_alloc_flag(adapter, rrd, num_buf);
   1781}
   1782
   1783static void atl1_rx_checksum(struct atl1_adapter *adapter,
   1784	struct rx_return_desc *rrd, struct sk_buff *skb)
   1785{
   1786	struct pci_dev *pdev = adapter->pdev;
   1787
   1788	/*
   1789	 * The L1 hardware contains a bug that erroneously sets the
   1790	 * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a
   1791	 * fragmented IP packet is received, even though the packet
   1792	 * is perfectly valid and its checksum is correct. There's
   1793	 * no way to distinguish between one of these good packets
   1794	 * and a packet that actually contains a TCP/UDP checksum
   1795	 * error, so all we can do is allow it to be handed up to
   1796	 * the higher layers and let it be sorted out there.
   1797	 */
   1798
   1799	skb_checksum_none_assert(skb);
   1800
   1801	if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
   1802		if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
   1803					ERR_FLAG_CODE | ERR_FLAG_OV)) {
   1804			adapter->hw_csum_err++;
   1805			if (netif_msg_rx_err(adapter))
   1806				dev_printk(KERN_DEBUG, &pdev->dev,
   1807					"rx checksum error\n");
   1808			return;
   1809		}
   1810	}
   1811
   1812	/* not IPv4 */
   1813	if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
   1814		/* checksum is invalid, but it's not an IPv4 pkt, so ok */
   1815		return;
   1816
   1817	/* IPv4 packet */
   1818	if (likely(!(rrd->err_flg &
   1819		(ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
   1820		skb->ip_summed = CHECKSUM_UNNECESSARY;
   1821		adapter->hw_csum_good++;
   1822		return;
   1823	}
   1824}
   1825
   1826/**
   1827 * atl1_alloc_rx_buffers - Replace used receive buffers
   1828 * @adapter: address of board private structure
   1829 */
   1830static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
   1831{
   1832	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
   1833	struct pci_dev *pdev = adapter->pdev;
   1834	struct page *page;
   1835	unsigned long offset;
   1836	struct atl1_buffer *buffer_info, *next_info;
   1837	struct sk_buff *skb;
   1838	u16 num_alloc = 0;
   1839	u16 rfd_next_to_use, next_next;
   1840	struct rx_free_desc *rfd_desc;
   1841
   1842	next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
   1843	if (++next_next == rfd_ring->count)
   1844		next_next = 0;
   1845	buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
   1846	next_info = &rfd_ring->buffer_info[next_next];
   1847
   1848	while (!buffer_info->alloced && !next_info->alloced) {
   1849		if (buffer_info->skb) {
   1850			buffer_info->alloced = 1;
   1851			goto next;
   1852		}
   1853
   1854		rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
   1855
   1856		skb = netdev_alloc_skb_ip_align(adapter->netdev,
   1857						adapter->rx_buffer_len);
   1858		if (unlikely(!skb)) {
   1859			/* Better luck next round */
   1860			adapter->soft_stats.rx_dropped++;
   1861			break;
   1862		}
   1863
   1864		buffer_info->alloced = 1;
   1865		buffer_info->skb = skb;
   1866		buffer_info->length = (u16) adapter->rx_buffer_len;
   1867		page = virt_to_page(skb->data);
   1868		offset = offset_in_page(skb->data);
   1869		buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
   1870						adapter->rx_buffer_len,
   1871						DMA_FROM_DEVICE);
   1872		rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
   1873		rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
   1874		rfd_desc->coalese = 0;
   1875
   1876next:
   1877		rfd_next_to_use = next_next;
   1878		if (unlikely(++next_next == rfd_ring->count))
   1879			next_next = 0;
   1880
   1881		buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
   1882		next_info = &rfd_ring->buffer_info[next_next];
   1883		num_alloc++;
   1884	}
   1885
   1886	if (num_alloc) {
   1887		/*
   1888		 * Force memory writes to complete before letting h/w
   1889		 * know there are new descriptors to fetch.  (Only
   1890		 * applicable for weak-ordered memory model archs,
   1891		 * such as IA-64).
   1892		 */
   1893		wmb();
   1894		atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
   1895	}
   1896	return num_alloc;
   1897}
   1898
   1899static int atl1_intr_rx(struct atl1_adapter *adapter, int budget)
   1900{
   1901	int i, count;
   1902	u16 length;
   1903	u16 rrd_next_to_clean;
   1904	u32 value;
   1905	struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
   1906	struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
   1907	struct atl1_buffer *buffer_info;
   1908	struct rx_return_desc *rrd;
   1909	struct sk_buff *skb;
   1910
   1911	count = 0;
   1912
   1913	rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
   1914
   1915	while (count < budget) {
   1916		rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
   1917		i = 1;
   1918		if (likely(rrd->xsz.valid)) {	/* packet valid */
   1919chk_rrd:
   1920			/* check rrd status */
   1921			if (likely(rrd->num_buf == 1))
   1922				goto rrd_ok;
   1923			else if (netif_msg_rx_err(adapter)) {
   1924				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   1925					"unexpected RRD buffer count\n");
   1926				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   1927					"rx_buf_len = %d\n",
   1928					adapter->rx_buffer_len);
   1929				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   1930					"RRD num_buf = %d\n",
   1931					rrd->num_buf);
   1932				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   1933					"RRD pkt_len = %d\n",
   1934					rrd->xsz.xsum_sz.pkt_size);
   1935				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   1936					"RRD pkt_flg = 0x%08X\n",
   1937					rrd->pkt_flg);
   1938				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   1939					"RRD err_flg = 0x%08X\n",
   1940					rrd->err_flg);
   1941				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   1942					"RRD vlan_tag = 0x%08X\n",
   1943					rrd->vlan_tag);
   1944			}
   1945
   1946			/* rrd seems to be bad */
   1947			if (unlikely(i-- > 0)) {
   1948				/* rrd may not be DMAed completely */
   1949				udelay(1);
   1950				goto chk_rrd;
   1951			}
   1952			/* bad rrd */
   1953			if (netif_msg_rx_err(adapter))
   1954				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   1955					"bad RRD\n");
   1956			/* see if update RFD index */
   1957			if (rrd->num_buf > 1)
   1958				atl1_update_rfd_index(adapter, rrd);
   1959
   1960			/* update rrd */
   1961			rrd->xsz.valid = 0;
   1962			if (++rrd_next_to_clean == rrd_ring->count)
   1963				rrd_next_to_clean = 0;
   1964			count++;
   1965			continue;
   1966		} else {	/* current rrd still not be updated */
   1967
   1968			break;
   1969		}
   1970rrd_ok:
   1971		/* clean alloc flag for bad rrd */
   1972		atl1_clean_alloc_flag(adapter, rrd, 0);
   1973
   1974		buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
   1975		if (++rfd_ring->next_to_clean == rfd_ring->count)
   1976			rfd_ring->next_to_clean = 0;
   1977
   1978		/* update rrd next to clean */
   1979		if (++rrd_next_to_clean == rrd_ring->count)
   1980			rrd_next_to_clean = 0;
   1981		count++;
   1982
   1983		if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
   1984			if (!(rrd->err_flg &
   1985				(ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
   1986				| ERR_FLAG_LEN))) {
   1987				/* packet error, don't need upstream */
   1988				buffer_info->alloced = 0;
   1989				rrd->xsz.valid = 0;
   1990				continue;
   1991			}
   1992		}
   1993
   1994		/* Good Receive */
   1995		dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
   1996			       buffer_info->length, DMA_FROM_DEVICE);
   1997		buffer_info->dma = 0;
   1998		skb = buffer_info->skb;
   1999		length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
   2000
   2001		skb_put(skb, length - ETH_FCS_LEN);
   2002
   2003		/* Receive Checksum Offload */
   2004		atl1_rx_checksum(adapter, rrd, skb);
   2005		skb->protocol = eth_type_trans(skb, adapter->netdev);
   2006
   2007		if (rrd->pkt_flg & PACKET_FLAG_VLAN_INS) {
   2008			u16 vlan_tag = (rrd->vlan_tag >> 4) |
   2009					((rrd->vlan_tag & 7) << 13) |
   2010					((rrd->vlan_tag & 8) << 9);
   2011
   2012			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
   2013		}
   2014		netif_receive_skb(skb);
   2015
   2016		/* let protocol layer free skb */
   2017		buffer_info->skb = NULL;
   2018		buffer_info->alloced = 0;
   2019		rrd->xsz.valid = 0;
   2020	}
   2021
   2022	atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
   2023
   2024	atl1_alloc_rx_buffers(adapter);
   2025
   2026	/* update mailbox ? */
   2027	if (count) {
   2028		u32 tpd_next_to_use;
   2029		u32 rfd_next_to_use;
   2030
   2031		spin_lock(&adapter->mb_lock);
   2032
   2033		tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
   2034		rfd_next_to_use =
   2035		    atomic_read(&adapter->rfd_ring.next_to_use);
   2036		rrd_next_to_clean =
   2037		    atomic_read(&adapter->rrd_ring.next_to_clean);
   2038		value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
   2039			MB_RFD_PROD_INDX_SHIFT) |
   2040                        ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
   2041			MB_RRD_CONS_INDX_SHIFT) |
   2042                        ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
   2043			MB_TPD_PROD_INDX_SHIFT);
   2044		iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
   2045		spin_unlock(&adapter->mb_lock);
   2046	}
   2047
   2048	return count;
   2049}
   2050
   2051static int atl1_intr_tx(struct atl1_adapter *adapter)
   2052{
   2053	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
   2054	struct atl1_buffer *buffer_info;
   2055	u16 sw_tpd_next_to_clean;
   2056	u16 cmb_tpd_next_to_clean;
   2057	int count = 0;
   2058
   2059	sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
   2060	cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
   2061
   2062	while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
   2063		buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
   2064		if (buffer_info->dma) {
   2065			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
   2066				       buffer_info->length, DMA_TO_DEVICE);
   2067			buffer_info->dma = 0;
   2068		}
   2069
   2070		if (buffer_info->skb) {
   2071			dev_consume_skb_irq(buffer_info->skb);
   2072			buffer_info->skb = NULL;
   2073		}
   2074
   2075		if (++sw_tpd_next_to_clean == tpd_ring->count)
   2076			sw_tpd_next_to_clean = 0;
   2077
   2078		count++;
   2079	}
   2080	atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
   2081
   2082	if (netif_queue_stopped(adapter->netdev) &&
   2083	    netif_carrier_ok(adapter->netdev))
   2084		netif_wake_queue(adapter->netdev);
   2085
   2086	return count;
   2087}
   2088
   2089static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
   2090{
   2091	u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
   2092	u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
   2093	return (next_to_clean > next_to_use) ?
   2094		next_to_clean - next_to_use - 1 :
   2095		tpd_ring->count + next_to_clean - next_to_use - 1;
   2096}
   2097
   2098static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
   2099		    struct tx_packet_desc *ptpd)
   2100{
   2101	u8 hdr_len, ip_off;
   2102	u32 real_len;
   2103
   2104	if (skb_shinfo(skb)->gso_size) {
   2105		int err;
   2106
   2107		err = skb_cow_head(skb, 0);
   2108		if (err < 0)
   2109			return err;
   2110
   2111		if (skb->protocol == htons(ETH_P_IP)) {
   2112			struct iphdr *iph = ip_hdr(skb);
   2113
   2114			real_len = (((unsigned char *)iph - skb->data) +
   2115				ntohs(iph->tot_len));
   2116			if (real_len < skb->len)
   2117				pskb_trim(skb, real_len);
   2118			hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
   2119			if (skb->len == hdr_len) {
   2120				iph->check = 0;
   2121				tcp_hdr(skb)->check =
   2122					~csum_tcpudp_magic(iph->saddr,
   2123					iph->daddr, tcp_hdrlen(skb),
   2124					IPPROTO_TCP, 0);
   2125				ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
   2126					TPD_IPHL_SHIFT;
   2127				ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
   2128					TPD_TCPHDRLEN_MASK) <<
   2129					TPD_TCPHDRLEN_SHIFT;
   2130				ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT;
   2131				ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT;
   2132				return 1;
   2133			}
   2134
   2135			iph->check = 0;
   2136			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
   2137					iph->daddr, 0, IPPROTO_TCP, 0);
   2138			ip_off = (unsigned char *)iph -
   2139				(unsigned char *) skb_network_header(skb);
   2140			if (ip_off == 8) /* 802.3-SNAP frame */
   2141				ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT;
   2142			else if (ip_off != 0)
   2143				return -2;
   2144
   2145			ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
   2146				TPD_IPHL_SHIFT;
   2147			ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
   2148				TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT;
   2149			ptpd->word3 |= (skb_shinfo(skb)->gso_size &
   2150				TPD_MSS_MASK) << TPD_MSS_SHIFT;
   2151			ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
   2152			return 3;
   2153		}
   2154	}
   2155	return 0;
   2156}
   2157
   2158static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
   2159	struct tx_packet_desc *ptpd)
   2160{
   2161	u8 css, cso;
   2162
   2163	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
   2164		css = skb_checksum_start_offset(skb);
   2165		cso = css + (u8) skb->csum_offset;
   2166		if (unlikely(css & 0x1)) {
   2167			/* L1 hardware requires an even number here */
   2168			if (netif_msg_tx_err(adapter))
   2169				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   2170					"payload offset not an even number\n");
   2171			return -1;
   2172		}
   2173		ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) <<
   2174			TPD_PLOADOFFSET_SHIFT;
   2175		ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) <<
   2176			TPD_CCSUMOFFSET_SHIFT;
   2177		ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT;
   2178		return true;
   2179	}
   2180	return 0;
   2181}
   2182
   2183static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
   2184	struct tx_packet_desc *ptpd)
   2185{
   2186	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
   2187	struct atl1_buffer *buffer_info;
   2188	u16 buf_len = skb->len;
   2189	struct page *page;
   2190	unsigned long offset;
   2191	unsigned int nr_frags;
   2192	unsigned int f;
   2193	int retval;
   2194	u16 next_to_use;
   2195	u16 data_len;
   2196	u8 hdr_len;
   2197
   2198	buf_len -= skb->data_len;
   2199	nr_frags = skb_shinfo(skb)->nr_frags;
   2200	next_to_use = atomic_read(&tpd_ring->next_to_use);
   2201	buffer_info = &tpd_ring->buffer_info[next_to_use];
   2202	BUG_ON(buffer_info->skb);
   2203	/* put skb in last TPD */
   2204	buffer_info->skb = NULL;
   2205
   2206	retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
   2207	if (retval) {
   2208		/* TSO */
   2209		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
   2210		buffer_info->length = hdr_len;
   2211		page = virt_to_page(skb->data);
   2212		offset = offset_in_page(skb->data);
   2213		buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
   2214						offset, hdr_len,
   2215						DMA_TO_DEVICE);
   2216
   2217		if (++next_to_use == tpd_ring->count)
   2218			next_to_use = 0;
   2219
   2220		if (buf_len > hdr_len) {
   2221			int i, nseg;
   2222
   2223			data_len = buf_len - hdr_len;
   2224			nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) /
   2225				ATL1_MAX_TX_BUF_LEN;
   2226			for (i = 0; i < nseg; i++) {
   2227				buffer_info =
   2228				    &tpd_ring->buffer_info[next_to_use];
   2229				buffer_info->skb = NULL;
   2230				buffer_info->length =
   2231				    (ATL1_MAX_TX_BUF_LEN >=
   2232				     data_len) ? ATL1_MAX_TX_BUF_LEN : data_len;
   2233				data_len -= buffer_info->length;
   2234				page = virt_to_page(skb->data +
   2235					(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
   2236				offset = offset_in_page(skb->data +
   2237					(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
   2238				buffer_info->dma = dma_map_page(&adapter->pdev->dev,
   2239								page, offset,
   2240								buffer_info->length,
   2241								DMA_TO_DEVICE);
   2242				if (++next_to_use == tpd_ring->count)
   2243					next_to_use = 0;
   2244			}
   2245		}
   2246	} else {
   2247		/* not TSO */
   2248		buffer_info->length = buf_len;
   2249		page = virt_to_page(skb->data);
   2250		offset = offset_in_page(skb->data);
   2251		buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
   2252						offset, buf_len,
   2253						DMA_TO_DEVICE);
   2254		if (++next_to_use == tpd_ring->count)
   2255			next_to_use = 0;
   2256	}
   2257
   2258	for (f = 0; f < nr_frags; f++) {
   2259		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
   2260		u16 i, nseg;
   2261
   2262		buf_len = skb_frag_size(frag);
   2263
   2264		nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
   2265			ATL1_MAX_TX_BUF_LEN;
   2266		for (i = 0; i < nseg; i++) {
   2267			buffer_info = &tpd_ring->buffer_info[next_to_use];
   2268			BUG_ON(buffer_info->skb);
   2269
   2270			buffer_info->skb = NULL;
   2271			buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
   2272				ATL1_MAX_TX_BUF_LEN : buf_len;
   2273			buf_len -= buffer_info->length;
   2274			buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
   2275				frag, i * ATL1_MAX_TX_BUF_LEN,
   2276				buffer_info->length, DMA_TO_DEVICE);
   2277
   2278			if (++next_to_use == tpd_ring->count)
   2279				next_to_use = 0;
   2280		}
   2281	}
   2282
   2283	/* last tpd's buffer-info */
   2284	buffer_info->skb = skb;
   2285}
   2286
   2287static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
   2288       struct tx_packet_desc *ptpd)
   2289{
   2290	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
   2291	struct atl1_buffer *buffer_info;
   2292	struct tx_packet_desc *tpd;
   2293	u16 j;
   2294	u32 val;
   2295	u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use);
   2296
   2297	for (j = 0; j < count; j++) {
   2298		buffer_info = &tpd_ring->buffer_info[next_to_use];
   2299		tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use);
   2300		if (tpd != ptpd)
   2301			memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
   2302		tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
   2303		tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT);
   2304		tpd->word2 |= (cpu_to_le16(buffer_info->length) &
   2305			TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
   2306
   2307		/*
   2308		 * if this is the first packet in a TSO chain, set
   2309		 * TPD_HDRFLAG, otherwise, clear it.
   2310		 */
   2311		val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) &
   2312			TPD_SEGMENT_EN_MASK;
   2313		if (val) {
   2314			if (!j)
   2315				tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
   2316			else
   2317				tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT);
   2318		}
   2319
   2320		if (j == (count - 1))
   2321			tpd->word3 |= 1 << TPD_EOP_SHIFT;
   2322
   2323		if (++next_to_use == tpd_ring->count)
   2324			next_to_use = 0;
   2325	}
   2326	/*
   2327	 * Force memory writes to complete before letting h/w
   2328	 * know there are new descriptors to fetch.  (Only
   2329	 * applicable for weak-ordered memory model archs,
   2330	 * such as IA-64).
   2331	 */
   2332	wmb();
   2333
   2334	atomic_set(&tpd_ring->next_to_use, next_to_use);
   2335}
   2336
   2337static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
   2338					 struct net_device *netdev)
   2339{
   2340	struct atl1_adapter *adapter = netdev_priv(netdev);
   2341	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
   2342	int len;
   2343	int tso;
   2344	int count = 1;
   2345	int ret_val;
   2346	struct tx_packet_desc *ptpd;
   2347	u16 vlan_tag;
   2348	unsigned int nr_frags = 0;
   2349	unsigned int mss = 0;
   2350	unsigned int f;
   2351	unsigned int proto_hdr_len;
   2352
   2353	len = skb_headlen(skb);
   2354
   2355	if (unlikely(skb->len <= 0)) {
   2356		dev_kfree_skb_any(skb);
   2357		return NETDEV_TX_OK;
   2358	}
   2359
   2360	nr_frags = skb_shinfo(skb)->nr_frags;
   2361	for (f = 0; f < nr_frags; f++) {
   2362		unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
   2363		count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) /
   2364			 ATL1_MAX_TX_BUF_LEN;
   2365	}
   2366
   2367	mss = skb_shinfo(skb)->gso_size;
   2368	if (mss) {
   2369		if (skb->protocol == htons(ETH_P_IP)) {
   2370			proto_hdr_len = (skb_transport_offset(skb) +
   2371					 tcp_hdrlen(skb));
   2372			if (unlikely(proto_hdr_len > len)) {
   2373				dev_kfree_skb_any(skb);
   2374				return NETDEV_TX_OK;
   2375			}
   2376			/* need additional TPD ? */
   2377			if (proto_hdr_len != len)
   2378				count += (len - proto_hdr_len +
   2379					ATL1_MAX_TX_BUF_LEN - 1) /
   2380					ATL1_MAX_TX_BUF_LEN;
   2381		}
   2382	}
   2383
   2384	if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
   2385		/* not enough descriptors */
   2386		netif_stop_queue(netdev);
   2387		if (netif_msg_tx_queued(adapter))
   2388			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   2389				"tx busy\n");
   2390		return NETDEV_TX_BUSY;
   2391	}
   2392
   2393	ptpd = ATL1_TPD_DESC(tpd_ring,
   2394		(u16) atomic_read(&tpd_ring->next_to_use));
   2395	memset(ptpd, 0, sizeof(struct tx_packet_desc));
   2396
   2397	if (skb_vlan_tag_present(skb)) {
   2398		vlan_tag = skb_vlan_tag_get(skb);
   2399		vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
   2400			((vlan_tag >> 9) & 0x8);
   2401		ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
   2402		ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) <<
   2403			TPD_VLANTAG_SHIFT;
   2404	}
   2405
   2406	tso = atl1_tso(adapter, skb, ptpd);
   2407	if (tso < 0) {
   2408		dev_kfree_skb_any(skb);
   2409		return NETDEV_TX_OK;
   2410	}
   2411
   2412	if (!tso) {
   2413		ret_val = atl1_tx_csum(adapter, skb, ptpd);
   2414		if (ret_val < 0) {
   2415			dev_kfree_skb_any(skb);
   2416			return NETDEV_TX_OK;
   2417		}
   2418	}
   2419
   2420	atl1_tx_map(adapter, skb, ptpd);
   2421	atl1_tx_queue(adapter, count, ptpd);
   2422	atl1_update_mailbox(adapter);
   2423	return NETDEV_TX_OK;
   2424}
   2425
   2426static int atl1_rings_clean(struct napi_struct *napi, int budget)
   2427{
   2428	struct atl1_adapter *adapter = container_of(napi, struct atl1_adapter, napi);
   2429	int work_done = atl1_intr_rx(adapter, budget);
   2430
   2431	if (atl1_intr_tx(adapter))
   2432		work_done = budget;
   2433
   2434	/* Let's come again to process some more packets */
   2435	if (work_done >= budget)
   2436		return work_done;
   2437
   2438	napi_complete_done(napi, work_done);
   2439	/* re-enable Interrupt */
   2440	if (likely(adapter->int_enabled))
   2441		atlx_imr_set(adapter, IMR_NORMAL_MASK);
   2442	return work_done;
   2443}
   2444
   2445static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
   2446{
   2447	if (!napi_schedule_prep(&adapter->napi))
   2448		/* It is possible in case even the RX/TX ints are disabled via IMR
   2449		 * register the ISR bits are set anyway (but do not produce IRQ).
   2450		 * To handle such situation the napi functions used to check is
   2451		 * something scheduled or not.
   2452		 */
   2453		return 0;
   2454
   2455	__napi_schedule(&adapter->napi);
   2456
   2457	/*
   2458	 * Disable RX/TX ints via IMR register if it is
   2459	 * allowed. NAPI handler must reenable them in same
   2460	 * way.
   2461	 */
   2462	if (!adapter->int_enabled)
   2463		return 1;
   2464
   2465	atlx_imr_set(adapter, IMR_NORXTX_MASK);
   2466	return 1;
   2467}
   2468
   2469/**
   2470 * atl1_intr - Interrupt Handler
   2471 * @irq: interrupt number
   2472 * @data: pointer to a network interface device structure
   2473 */
   2474static irqreturn_t atl1_intr(int irq, void *data)
   2475{
   2476	struct atl1_adapter *adapter = netdev_priv(data);
   2477	u32 status;
   2478
   2479	status = adapter->cmb.cmb->int_stats;
   2480	if (!status)
   2481		return IRQ_NONE;
   2482
   2483	/* clear CMB interrupt status at once,
   2484	 * but leave rx/tx interrupt status in case it should be dropped
   2485	 * only if rx/tx processing queued. In other case interrupt
   2486	 * can be lost.
   2487	 */
   2488	adapter->cmb.cmb->int_stats = status & (ISR_CMB_TX | ISR_CMB_RX);
   2489
   2490	if (status & ISR_GPHY)	/* clear phy status */
   2491		atlx_clear_phy_int(adapter);
   2492
   2493	/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
   2494	iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
   2495
   2496	/* check if SMB intr */
   2497	if (status & ISR_SMB)
   2498		atl1_inc_smb(adapter);
   2499
   2500	/* check if PCIE PHY Link down */
   2501	if (status & ISR_PHY_LINKDOWN) {
   2502		if (netif_msg_intr(adapter))
   2503			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   2504				"pcie phy link down %x\n", status);
   2505		if (netif_running(adapter->netdev)) {	/* reset MAC */
   2506			atlx_irq_disable(adapter);
   2507			schedule_work(&adapter->reset_dev_task);
   2508			return IRQ_HANDLED;
   2509		}
   2510	}
   2511
   2512	/* check if DMA read/write error ? */
   2513	if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
   2514		if (netif_msg_intr(adapter))
   2515			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
   2516				"pcie DMA r/w error (status = 0x%x)\n",
   2517				status);
   2518		atlx_irq_disable(adapter);
   2519		schedule_work(&adapter->reset_dev_task);
   2520		return IRQ_HANDLED;
   2521	}
   2522
   2523	/* link event */
   2524	if (status & ISR_GPHY) {
   2525		adapter->soft_stats.tx_carrier_errors++;
   2526		atl1_check_for_link(adapter);
   2527	}
   2528
   2529	/* transmit or receive event */
   2530	if (status & (ISR_CMB_TX | ISR_CMB_RX) &&
   2531	    atl1_sched_rings_clean(adapter))
   2532		adapter->cmb.cmb->int_stats = adapter->cmb.cmb->int_stats &
   2533					      ~(ISR_CMB_TX | ISR_CMB_RX);
   2534
   2535	/* rx exception */
   2536	if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
   2537		ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
   2538		ISR_HOST_RRD_OV))) {
   2539		if (netif_msg_intr(adapter))
   2540			dev_printk(KERN_DEBUG,
   2541				&adapter->pdev->dev,
   2542				"rx exception, ISR = 0x%x\n",
   2543				status);
   2544		atl1_sched_rings_clean(adapter);
   2545	}
   2546
   2547	/* re-enable Interrupt */
   2548	iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
   2549	return IRQ_HANDLED;
   2550}
   2551
   2552
   2553/**
   2554 * atl1_phy_config - Timer Call-back
   2555 * @t: timer_list containing pointer to netdev cast into an unsigned long
   2556 */
   2557static void atl1_phy_config(struct timer_list *t)
   2558{
   2559	struct atl1_adapter *adapter = from_timer(adapter, t,
   2560						  phy_config_timer);
   2561	struct atl1_hw *hw = &adapter->hw;
   2562	unsigned long flags;
   2563
   2564	spin_lock_irqsave(&adapter->lock, flags);
   2565	adapter->phy_timer_pending = false;
   2566	atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
   2567	atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
   2568	atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
   2569	spin_unlock_irqrestore(&adapter->lock, flags);
   2570}
   2571
   2572/*
   2573 * Orphaned vendor comment left intact here:
   2574 * <vendor comment>
   2575 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
   2576 * will assert. We do soft reset <0x1400=1> according
   2577 * with the SPEC. BUT, it seemes that PCIE or DMA
   2578 * state-machine will not be reset. DMAR_TO_INT will
   2579 * assert again and again.
   2580 * </vendor comment>
   2581 */
   2582
   2583static int atl1_reset(struct atl1_adapter *adapter)
   2584{
   2585	int ret;
   2586	ret = atl1_reset_hw(&adapter->hw);
   2587	if (ret)
   2588		return ret;
   2589	return atl1_init_hw(&adapter->hw);
   2590}
   2591
   2592static s32 atl1_up(struct atl1_adapter *adapter)
   2593{
   2594	struct net_device *netdev = adapter->netdev;
   2595	int err;
   2596	int irq_flags = 0;
   2597
   2598	/* hardware has been reset, we need to reload some things */
   2599	atlx_set_multi(netdev);
   2600	atl1_init_ring_ptrs(adapter);
   2601	atlx_restore_vlan(adapter);
   2602	err = atl1_alloc_rx_buffers(adapter);
   2603	if (unlikely(!err))
   2604		/* no RX BUFFER allocated */
   2605		return -ENOMEM;
   2606
   2607	if (unlikely(atl1_configure(adapter))) {
   2608		err = -EIO;
   2609		goto err_up;
   2610	}
   2611
   2612	err = pci_enable_msi(adapter->pdev);
   2613	if (err) {
   2614		if (netif_msg_ifup(adapter))
   2615			dev_info(&adapter->pdev->dev,
   2616				"Unable to enable MSI: %d\n", err);
   2617		irq_flags |= IRQF_SHARED;
   2618	}
   2619
   2620	err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags,
   2621			netdev->name, netdev);
   2622	if (unlikely(err))
   2623		goto err_up;
   2624
   2625	napi_enable(&adapter->napi);
   2626	atlx_irq_enable(adapter);
   2627	atl1_check_link(adapter);
   2628	netif_start_queue(netdev);
   2629	return 0;
   2630
   2631err_up:
   2632	pci_disable_msi(adapter->pdev);
   2633	/* free rx_buffers */
   2634	atl1_clean_rx_ring(adapter);
   2635	return err;
   2636}
   2637
   2638static void atl1_down(struct atl1_adapter *adapter)
   2639{
   2640	struct net_device *netdev = adapter->netdev;
   2641
   2642	napi_disable(&adapter->napi);
   2643	netif_stop_queue(netdev);
   2644	del_timer_sync(&adapter->phy_config_timer);
   2645	adapter->phy_timer_pending = false;
   2646
   2647	atlx_irq_disable(adapter);
   2648	free_irq(adapter->pdev->irq, netdev);
   2649	pci_disable_msi(adapter->pdev);
   2650	atl1_reset_hw(&adapter->hw);
   2651	adapter->cmb.cmb->int_stats = 0;
   2652
   2653	adapter->link_speed = SPEED_0;
   2654	adapter->link_duplex = -1;
   2655	netif_carrier_off(netdev);
   2656
   2657	atl1_clean_tx_ring(adapter);
   2658	atl1_clean_rx_ring(adapter);
   2659}
   2660
   2661static void atl1_reset_dev_task(struct work_struct *work)
   2662{
   2663	struct atl1_adapter *adapter =
   2664		container_of(work, struct atl1_adapter, reset_dev_task);
   2665	struct net_device *netdev = adapter->netdev;
   2666
   2667	netif_device_detach(netdev);
   2668	atl1_down(adapter);
   2669	atl1_up(adapter);
   2670	netif_device_attach(netdev);
   2671}
   2672
   2673/**
   2674 * atl1_change_mtu - Change the Maximum Transfer Unit
   2675 * @netdev: network interface device structure
   2676 * @new_mtu: new value for maximum frame size
   2677 *
   2678 * Returns 0 on success, negative on failure
   2679 */
   2680static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
   2681{
   2682	struct atl1_adapter *adapter = netdev_priv(netdev);
   2683	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
   2684
   2685	adapter->hw.max_frame_size = max_frame;
   2686	adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
   2687	adapter->rx_buffer_len = (max_frame + 7) & ~7;
   2688	adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
   2689
   2690	netdev->mtu = new_mtu;
   2691	if (netif_running(netdev)) {
   2692		atl1_down(adapter);
   2693		atl1_up(adapter);
   2694	}
   2695
   2696	return 0;
   2697}
   2698
   2699/**
   2700 * atl1_open - Called when a network interface is made active
   2701 * @netdev: network interface device structure
   2702 *
   2703 * Returns 0 on success, negative value on failure
   2704 *
   2705 * The open entry point is called when a network interface is made
   2706 * active by the system (IFF_UP).  At this point all resources needed
   2707 * for transmit and receive operations are allocated, the interrupt
   2708 * handler is registered with the OS, the watchdog timer is started,
   2709 * and the stack is notified that the interface is ready.
   2710 */
   2711static int atl1_open(struct net_device *netdev)
   2712{
   2713	struct atl1_adapter *adapter = netdev_priv(netdev);
   2714	int err;
   2715
   2716	netif_carrier_off(netdev);
   2717
   2718	/* allocate transmit descriptors */
   2719	err = atl1_setup_ring_resources(adapter);
   2720	if (err)
   2721		return err;
   2722
   2723	err = atl1_up(adapter);
   2724	if (err)
   2725		goto err_up;
   2726
   2727	return 0;
   2728
   2729err_up:
   2730	atl1_reset(adapter);
   2731	return err;
   2732}
   2733
   2734/**
   2735 * atl1_close - Disables a network interface
   2736 * @netdev: network interface device structure
   2737 *
   2738 * Returns 0, this is not allowed to fail
   2739 *
   2740 * The close entry point is called when an interface is de-activated
   2741 * by the OS.  The hardware is still under the drivers control, but
   2742 * needs to be disabled.  A global MAC reset is issued to stop the
   2743 * hardware, and all transmit and receive resources are freed.
   2744 */
   2745static int atl1_close(struct net_device *netdev)
   2746{
   2747	struct atl1_adapter *adapter = netdev_priv(netdev);
   2748	atl1_down(adapter);
   2749	atl1_free_ring_resources(adapter);
   2750	return 0;
   2751}
   2752
   2753#ifdef CONFIG_PM_SLEEP
   2754static int atl1_suspend(struct device *dev)
   2755{
   2756	struct net_device *netdev = dev_get_drvdata(dev);
   2757	struct atl1_adapter *adapter = netdev_priv(netdev);
   2758	struct atl1_hw *hw = &adapter->hw;
   2759	u32 ctrl = 0;
   2760	u32 wufc = adapter->wol;
   2761	u32 val;
   2762	u16 speed;
   2763	u16 duplex;
   2764
   2765	netif_device_detach(netdev);
   2766	if (netif_running(netdev))
   2767		atl1_down(adapter);
   2768
   2769	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
   2770	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
   2771	val = ctrl & BMSR_LSTATUS;
   2772	if (val)
   2773		wufc &= ~ATLX_WUFC_LNKC;
   2774	if (!wufc)
   2775		goto disable_wol;
   2776
   2777	if (val) {
   2778		val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
   2779		if (val) {
   2780			if (netif_msg_ifdown(adapter))
   2781				dev_printk(KERN_DEBUG, dev,
   2782					"error getting speed/duplex\n");
   2783			goto disable_wol;
   2784		}
   2785
   2786		ctrl = 0;
   2787
   2788		/* enable magic packet WOL */
   2789		if (wufc & ATLX_WUFC_MAG)
   2790			ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
   2791		iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
   2792		ioread32(hw->hw_addr + REG_WOL_CTRL);
   2793
   2794		/* configure the mac */
   2795		ctrl = MAC_CTRL_RX_EN;
   2796		ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 :
   2797			MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT);
   2798		if (duplex == FULL_DUPLEX)
   2799			ctrl |= MAC_CTRL_DUPLX;
   2800		ctrl |= (((u32)adapter->hw.preamble_len &
   2801			MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
   2802		__atlx_vlan_mode(netdev->features, &ctrl);
   2803		if (wufc & ATLX_WUFC_MAG)
   2804			ctrl |= MAC_CTRL_BC_EN;
   2805		iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
   2806		ioread32(hw->hw_addr + REG_MAC_CTRL);
   2807
   2808		/* poke the PHY */
   2809		ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
   2810		ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
   2811		iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
   2812		ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
   2813	} else {
   2814		ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
   2815		iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
   2816		ioread32(hw->hw_addr + REG_WOL_CTRL);
   2817		iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
   2818		ioread32(hw->hw_addr + REG_MAC_CTRL);
   2819		hw->phy_configured = false;
   2820	}
   2821
   2822	return 0;
   2823
   2824 disable_wol:
   2825	iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
   2826	ioread32(hw->hw_addr + REG_WOL_CTRL);
   2827	ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
   2828	ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
   2829	iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
   2830	ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
   2831	hw->phy_configured = false;
   2832
   2833	return 0;
   2834}
   2835
   2836static int atl1_resume(struct device *dev)
   2837{
   2838	struct net_device *netdev = dev_get_drvdata(dev);
   2839	struct atl1_adapter *adapter = netdev_priv(netdev);
   2840
   2841	iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
   2842
   2843	atl1_reset_hw(&adapter->hw);
   2844
   2845	if (netif_running(netdev)) {
   2846		adapter->cmb.cmb->int_stats = 0;
   2847		atl1_up(adapter);
   2848	}
   2849	netif_device_attach(netdev);
   2850
   2851	return 0;
   2852}
   2853#endif
   2854
   2855static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
   2856
   2857static void atl1_shutdown(struct pci_dev *pdev)
   2858{
   2859	struct net_device *netdev = pci_get_drvdata(pdev);
   2860	struct atl1_adapter *adapter = netdev_priv(netdev);
   2861
   2862#ifdef CONFIG_PM_SLEEP
   2863	atl1_suspend(&pdev->dev);
   2864#endif
   2865	pci_wake_from_d3(pdev, adapter->wol);
   2866	pci_set_power_state(pdev, PCI_D3hot);
   2867}
   2868
   2869#ifdef CONFIG_NET_POLL_CONTROLLER
   2870static void atl1_poll_controller(struct net_device *netdev)
   2871{
   2872	disable_irq(netdev->irq);
   2873	atl1_intr(netdev->irq, netdev);
   2874	enable_irq(netdev->irq);
   2875}
   2876#endif
   2877
   2878static const struct net_device_ops atl1_netdev_ops = {
   2879	.ndo_open		= atl1_open,
   2880	.ndo_stop		= atl1_close,
   2881	.ndo_start_xmit		= atl1_xmit_frame,
   2882	.ndo_set_rx_mode	= atlx_set_multi,
   2883	.ndo_validate_addr	= eth_validate_addr,
   2884	.ndo_set_mac_address	= atl1_set_mac,
   2885	.ndo_change_mtu		= atl1_change_mtu,
   2886	.ndo_fix_features	= atlx_fix_features,
   2887	.ndo_set_features	= atlx_set_features,
   2888	.ndo_eth_ioctl		= atlx_ioctl,
   2889	.ndo_tx_timeout		= atlx_tx_timeout,
   2890#ifdef CONFIG_NET_POLL_CONTROLLER
   2891	.ndo_poll_controller	= atl1_poll_controller,
   2892#endif
   2893};
   2894
   2895/**
   2896 * atl1_probe - Device Initialization Routine
   2897 * @pdev: PCI device information struct
   2898 * @ent: entry in atl1_pci_tbl
   2899 *
   2900 * Returns 0 on success, negative on failure
   2901 *
   2902 * atl1_probe initializes an adapter identified by a pci_dev structure.
   2903 * The OS initialization, configuring of the adapter private structure,
   2904 * and a hardware reset occur.
   2905 */
   2906static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
   2907{
   2908	struct net_device *netdev;
   2909	struct atl1_adapter *adapter;
   2910	static int cards_found = 0;
   2911	int err;
   2912
   2913	err = pci_enable_device(pdev);
   2914	if (err)
   2915		return err;
   2916
   2917	/*
   2918	 * The atl1 chip can DMA to 64-bit addresses, but it uses a single
   2919	 * shared register for the high 32 bits, so only a single, aligned,
   2920	 * 4 GB physical address range can be used at a time.
   2921	 *
   2922	 * Supporting 64-bit DMA on this hardware is more trouble than it's
   2923	 * worth.  It is far easier to limit to 32-bit DMA than update
   2924	 * various kernel subsystems to support the mechanics required by a
   2925	 * fixed-high-32-bit system.
   2926	 */
   2927	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
   2928	if (err) {
   2929		dev_err(&pdev->dev, "no usable DMA configuration\n");
   2930		goto err_dma;
   2931	}
   2932	/*
   2933	 * Mark all PCI regions associated with PCI device
   2934	 * pdev as being reserved by owner atl1_driver_name
   2935	 */
   2936	err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
   2937	if (err)
   2938		goto err_request_regions;
   2939
   2940	/*
   2941	 * Enables bus-mastering on the device and calls
   2942	 * pcibios_set_master to do the needed arch specific settings
   2943	 */
   2944	pci_set_master(pdev);
   2945
   2946	netdev = alloc_etherdev(sizeof(struct atl1_adapter));
   2947	if (!netdev) {
   2948		err = -ENOMEM;
   2949		goto err_alloc_etherdev;
   2950	}
   2951	SET_NETDEV_DEV(netdev, &pdev->dev);
   2952
   2953	pci_set_drvdata(pdev, netdev);
   2954	adapter = netdev_priv(netdev);
   2955	adapter->netdev = netdev;
   2956	adapter->pdev = pdev;
   2957	adapter->hw.back = adapter;
   2958	adapter->msg_enable = netif_msg_init(debug, atl1_default_msg);
   2959
   2960	adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
   2961	if (!adapter->hw.hw_addr) {
   2962		err = -EIO;
   2963		goto err_pci_iomap;
   2964	}
   2965	/* get device revision number */
   2966	adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
   2967		(REG_MASTER_CTRL + 2));
   2968
   2969	/* set default ring resource counts */
   2970	adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
   2971	adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
   2972
   2973	adapter->mii.dev = netdev;
   2974	adapter->mii.mdio_read = mdio_read;
   2975	adapter->mii.mdio_write = mdio_write;
   2976	adapter->mii.phy_id_mask = 0x1f;
   2977	adapter->mii.reg_num_mask = 0x1f;
   2978
   2979	netdev->netdev_ops = &atl1_netdev_ops;
   2980	netdev->watchdog_timeo = 5 * HZ;
   2981	netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
   2982
   2983	netdev->ethtool_ops = &atl1_ethtool_ops;
   2984	adapter->bd_number = cards_found;
   2985
   2986	/* setup the private structure */
   2987	err = atl1_sw_init(adapter);
   2988	if (err)
   2989		goto err_common;
   2990
   2991	netdev->features = NETIF_F_HW_CSUM;
   2992	netdev->features |= NETIF_F_SG;
   2993	netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
   2994
   2995	netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO |
   2996			      NETIF_F_HW_VLAN_CTAG_RX;
   2997
   2998	/* is this valid? see atl1_setup_mac_ctrl() */
   2999	netdev->features |= NETIF_F_RXCSUM;
   3000
   3001	/* MTU range: 42 - 10218 */
   3002	netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
   3003	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
   3004			  (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
   3005
   3006	/*
   3007	 * patch for some L1 of old version,
   3008	 * the final version of L1 may not need these
   3009	 * patches
   3010	 */
   3011	/* atl1_pcie_patch(adapter); */
   3012
   3013	/* really reset GPHY core */
   3014	iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
   3015
   3016	/*
   3017	 * reset the controller to
   3018	 * put the device in a known good starting state
   3019	 */
   3020	if (atl1_reset_hw(&adapter->hw)) {
   3021		err = -EIO;
   3022		goto err_common;
   3023	}
   3024
   3025	/* copy the MAC address out of the EEPROM */
   3026	if (atl1_read_mac_addr(&adapter->hw)) {
   3027		/* mark random mac */
   3028		netdev->addr_assign_type = NET_ADDR_RANDOM;
   3029	}
   3030	eth_hw_addr_set(netdev, adapter->hw.mac_addr);
   3031
   3032	if (!is_valid_ether_addr(netdev->dev_addr)) {
   3033		err = -EIO;
   3034		goto err_common;
   3035	}
   3036
   3037	atl1_check_options(adapter);
   3038
   3039	/* pre-init the MAC, and setup link */
   3040	err = atl1_init_hw(&adapter->hw);
   3041	if (err) {
   3042		err = -EIO;
   3043		goto err_common;
   3044	}
   3045
   3046	atl1_pcie_patch(adapter);
   3047	/* assume we have no link for now */
   3048	netif_carrier_off(netdev);
   3049
   3050	timer_setup(&adapter->phy_config_timer, atl1_phy_config, 0);
   3051	adapter->phy_timer_pending = false;
   3052
   3053	INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
   3054
   3055	INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
   3056
   3057	err = register_netdev(netdev);
   3058	if (err)
   3059		goto err_common;
   3060
   3061	cards_found++;
   3062	atl1_via_workaround(adapter);
   3063	return 0;
   3064
   3065err_common:
   3066	pci_iounmap(pdev, adapter->hw.hw_addr);
   3067err_pci_iomap:
   3068	free_netdev(netdev);
   3069err_alloc_etherdev:
   3070	pci_release_regions(pdev);
   3071err_dma:
   3072err_request_regions:
   3073	pci_disable_device(pdev);
   3074	return err;
   3075}
   3076
   3077/**
   3078 * atl1_remove - Device Removal Routine
   3079 * @pdev: PCI device information struct
   3080 *
   3081 * atl1_remove is called by the PCI subsystem to alert the driver
   3082 * that it should release a PCI device.  The could be caused by a
   3083 * Hot-Plug event, or because the driver is going to be removed from
   3084 * memory.
   3085 */
   3086static void atl1_remove(struct pci_dev *pdev)
   3087{
   3088	struct net_device *netdev = pci_get_drvdata(pdev);
   3089	struct atl1_adapter *adapter;
   3090	/* Device not available. Return. */
   3091	if (!netdev)
   3092		return;
   3093
   3094	adapter = netdev_priv(netdev);
   3095
   3096	/*
   3097	 * Some atl1 boards lack persistent storage for their MAC, and get it
   3098	 * from the BIOS during POST.  If we've been messing with the MAC
   3099	 * address, we need to save the permanent one.
   3100	 */
   3101	if (!ether_addr_equal_unaligned(adapter->hw.mac_addr,
   3102					adapter->hw.perm_mac_addr)) {
   3103		memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
   3104			ETH_ALEN);
   3105		atl1_set_mac_addr(&adapter->hw);
   3106	}
   3107
   3108	iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
   3109	unregister_netdev(netdev);
   3110	pci_iounmap(pdev, adapter->hw.hw_addr);
   3111	pci_release_regions(pdev);
   3112	free_netdev(netdev);
   3113	pci_disable_device(pdev);
   3114}
   3115
   3116static struct pci_driver atl1_driver = {
   3117	.name = ATLX_DRIVER_NAME,
   3118	.id_table = atl1_pci_tbl,
   3119	.probe = atl1_probe,
   3120	.remove = atl1_remove,
   3121	.shutdown = atl1_shutdown,
   3122	.driver.pm = &atl1_pm_ops,
   3123};
   3124
   3125struct atl1_stats {
   3126	char stat_string[ETH_GSTRING_LEN];
   3127	int sizeof_stat;
   3128	int stat_offset;
   3129};
   3130
   3131#define ATL1_STAT(m) \
   3132	sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
   3133
   3134static struct atl1_stats atl1_gstrings_stats[] = {
   3135	{"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
   3136	{"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
   3137	{"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
   3138	{"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
   3139	{"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
   3140	{"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
   3141	{"multicast", ATL1_STAT(soft_stats.multicast)},
   3142	{"collisions", ATL1_STAT(soft_stats.collisions)},
   3143	{"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
   3144	{"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
   3145	{"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
   3146	{"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
   3147	{"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
   3148	{"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
   3149	{"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
   3150	{"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
   3151	{"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
   3152	{"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
   3153	{"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
   3154	{"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
   3155	{"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
   3156	{"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
   3157	{"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
   3158	{"tx_underrun", ATL1_STAT(soft_stats.tx_underrun)},
   3159	{"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
   3160	{"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
   3161	{"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
   3162	{"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
   3163	{"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
   3164};
   3165
   3166static void atl1_get_ethtool_stats(struct net_device *netdev,
   3167	struct ethtool_stats *stats, u64 *data)
   3168{
   3169	struct atl1_adapter *adapter = netdev_priv(netdev);
   3170	int i;
   3171	char *p;
   3172
   3173	for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
   3174		p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
   3175		data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
   3176			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
   3177	}
   3178
   3179}
   3180
   3181static int atl1_get_sset_count(struct net_device *netdev, int sset)
   3182{
   3183	switch (sset) {
   3184	case ETH_SS_STATS:
   3185		return ARRAY_SIZE(atl1_gstrings_stats);
   3186	default:
   3187		return -EOPNOTSUPP;
   3188	}
   3189}
   3190
   3191static int atl1_get_link_ksettings(struct net_device *netdev,
   3192				   struct ethtool_link_ksettings *cmd)
   3193{
   3194	struct atl1_adapter *adapter = netdev_priv(netdev);
   3195	struct atl1_hw *hw = &adapter->hw;
   3196	u32 supported, advertising;
   3197
   3198	supported = (SUPPORTED_10baseT_Half |
   3199			   SUPPORTED_10baseT_Full |
   3200			   SUPPORTED_100baseT_Half |
   3201			   SUPPORTED_100baseT_Full |
   3202			   SUPPORTED_1000baseT_Full |
   3203			   SUPPORTED_Autoneg | SUPPORTED_TP);
   3204	advertising = ADVERTISED_TP;
   3205	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
   3206	    hw->media_type == MEDIA_TYPE_1000M_FULL) {
   3207		advertising |= ADVERTISED_Autoneg;
   3208		if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
   3209			advertising |= ADVERTISED_Autoneg;
   3210			advertising |=
   3211			    (ADVERTISED_10baseT_Half |
   3212			     ADVERTISED_10baseT_Full |
   3213			     ADVERTISED_100baseT_Half |
   3214			     ADVERTISED_100baseT_Full |
   3215			     ADVERTISED_1000baseT_Full);
   3216		} else
   3217			advertising |= (ADVERTISED_1000baseT_Full);
   3218	}
   3219	cmd->base.port = PORT_TP;
   3220	cmd->base.phy_address = 0;
   3221
   3222	if (netif_carrier_ok(adapter->netdev)) {
   3223		u16 link_speed, link_duplex;
   3224		atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
   3225		cmd->base.speed = link_speed;
   3226		if (link_duplex == FULL_DUPLEX)
   3227			cmd->base.duplex = DUPLEX_FULL;
   3228		else
   3229			cmd->base.duplex = DUPLEX_HALF;
   3230	} else {
   3231		cmd->base.speed = SPEED_UNKNOWN;
   3232		cmd->base.duplex = DUPLEX_UNKNOWN;
   3233	}
   3234	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
   3235	    hw->media_type == MEDIA_TYPE_1000M_FULL)
   3236		cmd->base.autoneg = AUTONEG_ENABLE;
   3237	else
   3238		cmd->base.autoneg = AUTONEG_DISABLE;
   3239
   3240	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
   3241						supported);
   3242	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
   3243						advertising);
   3244
   3245	return 0;
   3246}
   3247
   3248static int atl1_set_link_ksettings(struct net_device *netdev,
   3249				   const struct ethtool_link_ksettings *cmd)
   3250{
   3251	struct atl1_adapter *adapter = netdev_priv(netdev);
   3252	struct atl1_hw *hw = &adapter->hw;
   3253	u16 phy_data;
   3254	int ret_val = 0;
   3255	u16 old_media_type = hw->media_type;
   3256
   3257	if (netif_running(adapter->netdev)) {
   3258		if (netif_msg_link(adapter))
   3259			dev_dbg(&adapter->pdev->dev,
   3260				"ethtool shutting down adapter\n");
   3261		atl1_down(adapter);
   3262	}
   3263
   3264	if (cmd->base.autoneg == AUTONEG_ENABLE)
   3265		hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
   3266	else {
   3267		u32 speed = cmd->base.speed;
   3268		if (speed == SPEED_1000) {
   3269			if (cmd->base.duplex != DUPLEX_FULL) {
   3270				if (netif_msg_link(adapter))
   3271					dev_warn(&adapter->pdev->dev,
   3272						"1000M half is invalid\n");
   3273				ret_val = -EINVAL;
   3274				goto exit_sset;
   3275			}
   3276			hw->media_type = MEDIA_TYPE_1000M_FULL;
   3277		} else if (speed == SPEED_100) {
   3278			if (cmd->base.duplex == DUPLEX_FULL)
   3279				hw->media_type = MEDIA_TYPE_100M_FULL;
   3280			else
   3281				hw->media_type = MEDIA_TYPE_100M_HALF;
   3282		} else {
   3283			if (cmd->base.duplex == DUPLEX_FULL)
   3284				hw->media_type = MEDIA_TYPE_10M_FULL;
   3285			else
   3286				hw->media_type = MEDIA_TYPE_10M_HALF;
   3287		}
   3288	}
   3289
   3290	if (atl1_phy_setup_autoneg_adv(hw)) {
   3291		ret_val = -EINVAL;
   3292		if (netif_msg_link(adapter))
   3293			dev_warn(&adapter->pdev->dev,
   3294				"invalid ethtool speed/duplex setting\n");
   3295		goto exit_sset;
   3296	}
   3297	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
   3298	    hw->media_type == MEDIA_TYPE_1000M_FULL)
   3299		phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
   3300	else {
   3301		switch (hw->media_type) {
   3302		case MEDIA_TYPE_100M_FULL:
   3303			phy_data =
   3304			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
   3305			    MII_CR_RESET;
   3306			break;
   3307		case MEDIA_TYPE_100M_HALF:
   3308			phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
   3309			break;
   3310		case MEDIA_TYPE_10M_FULL:
   3311			phy_data =
   3312			    MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
   3313			break;
   3314		default:
   3315			/* MEDIA_TYPE_10M_HALF: */
   3316			phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
   3317			break;
   3318		}
   3319	}
   3320	atl1_write_phy_reg(hw, MII_BMCR, phy_data);
   3321exit_sset:
   3322	if (ret_val)
   3323		hw->media_type = old_media_type;
   3324
   3325	if (netif_running(adapter->netdev)) {
   3326		if (netif_msg_link(adapter))
   3327			dev_dbg(&adapter->pdev->dev,
   3328				"ethtool starting adapter\n");
   3329		atl1_up(adapter);
   3330	} else if (!ret_val) {
   3331		if (netif_msg_link(adapter))
   3332			dev_dbg(&adapter->pdev->dev,
   3333				"ethtool resetting adapter\n");
   3334		atl1_reset(adapter);
   3335	}
   3336	return ret_val;
   3337}
   3338
   3339static void atl1_get_drvinfo(struct net_device *netdev,
   3340	struct ethtool_drvinfo *drvinfo)
   3341{
   3342	struct atl1_adapter *adapter = netdev_priv(netdev);
   3343
   3344	strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
   3345	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
   3346		sizeof(drvinfo->bus_info));
   3347}
   3348
   3349static void atl1_get_wol(struct net_device *netdev,
   3350	struct ethtool_wolinfo *wol)
   3351{
   3352	struct atl1_adapter *adapter = netdev_priv(netdev);
   3353
   3354	wol->supported = WAKE_MAGIC;
   3355	wol->wolopts = 0;
   3356	if (adapter->wol & ATLX_WUFC_MAG)
   3357		wol->wolopts |= WAKE_MAGIC;
   3358}
   3359
   3360static int atl1_set_wol(struct net_device *netdev,
   3361	struct ethtool_wolinfo *wol)
   3362{
   3363	struct atl1_adapter *adapter = netdev_priv(netdev);
   3364
   3365	if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
   3366		WAKE_ARP | WAKE_MAGICSECURE))
   3367		return -EOPNOTSUPP;
   3368	adapter->wol = 0;
   3369	if (wol->wolopts & WAKE_MAGIC)
   3370		adapter->wol |= ATLX_WUFC_MAG;
   3371
   3372	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
   3373
   3374	return 0;
   3375}
   3376
   3377static u32 atl1_get_msglevel(struct net_device *netdev)
   3378{
   3379	struct atl1_adapter *adapter = netdev_priv(netdev);
   3380	return adapter->msg_enable;
   3381}
   3382
   3383static void atl1_set_msglevel(struct net_device *netdev, u32 value)
   3384{
   3385	struct atl1_adapter *adapter = netdev_priv(netdev);
   3386	adapter->msg_enable = value;
   3387}
   3388
   3389static int atl1_get_regs_len(struct net_device *netdev)
   3390{
   3391	return ATL1_REG_COUNT * sizeof(u32);
   3392}
   3393
   3394static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
   3395	void *p)
   3396{
   3397	struct atl1_adapter *adapter = netdev_priv(netdev);
   3398	struct atl1_hw *hw = &adapter->hw;
   3399	unsigned int i;
   3400	u32 *regbuf = p;
   3401
   3402	for (i = 0; i < ATL1_REG_COUNT; i++) {
   3403		/*
   3404		 * This switch statement avoids reserved regions
   3405		 * of register space.
   3406		 */
   3407		switch (i) {
   3408		case 6 ... 9:
   3409		case 14:
   3410		case 29 ... 31:
   3411		case 34 ... 63:
   3412		case 75 ... 127:
   3413		case 136 ... 1023:
   3414		case 1027 ... 1087:
   3415		case 1091 ... 1151:
   3416		case 1194 ... 1195:
   3417		case 1200 ... 1201:
   3418		case 1206 ... 1213:
   3419		case 1216 ... 1279:
   3420		case 1290 ... 1311:
   3421		case 1323 ... 1343:
   3422		case 1358 ... 1359:
   3423		case 1368 ... 1375:
   3424		case 1378 ... 1383:
   3425		case 1388 ... 1391:
   3426		case 1393 ... 1395:
   3427		case 1402 ... 1403:
   3428		case 1410 ... 1471:
   3429		case 1522 ... 1535:
   3430			/* reserved region; don't read it */
   3431			regbuf[i] = 0;
   3432			break;
   3433		default:
   3434			/* unreserved region */
   3435			regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
   3436		}
   3437	}
   3438}
   3439
   3440static void atl1_get_ringparam(struct net_device *netdev,
   3441			       struct ethtool_ringparam *ring,
   3442			       struct kernel_ethtool_ringparam *kernel_ring,
   3443			       struct netlink_ext_ack *extack)
   3444{
   3445	struct atl1_adapter *adapter = netdev_priv(netdev);
   3446	struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
   3447	struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
   3448
   3449	ring->rx_max_pending = ATL1_MAX_RFD;
   3450	ring->tx_max_pending = ATL1_MAX_TPD;
   3451	ring->rx_pending = rxdr->count;
   3452	ring->tx_pending = txdr->count;
   3453}
   3454
   3455static int atl1_set_ringparam(struct net_device *netdev,
   3456			      struct ethtool_ringparam *ring,
   3457			      struct kernel_ethtool_ringparam *kernel_ring,
   3458			      struct netlink_ext_ack *extack)
   3459{
   3460	struct atl1_adapter *adapter = netdev_priv(netdev);
   3461	struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
   3462	struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
   3463	struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
   3464
   3465	struct atl1_tpd_ring tpd_old, tpd_new;
   3466	struct atl1_rfd_ring rfd_old, rfd_new;
   3467	struct atl1_rrd_ring rrd_old, rrd_new;
   3468	struct atl1_ring_header rhdr_old, rhdr_new;
   3469	struct atl1_smb smb;
   3470	struct atl1_cmb cmb;
   3471	int err;
   3472
   3473	tpd_old = adapter->tpd_ring;
   3474	rfd_old = adapter->rfd_ring;
   3475	rrd_old = adapter->rrd_ring;
   3476	rhdr_old = adapter->ring_header;
   3477
   3478	if (netif_running(adapter->netdev))
   3479		atl1_down(adapter);
   3480
   3481	rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
   3482	rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
   3483			rfdr->count;
   3484	rfdr->count = (rfdr->count + 3) & ~3;
   3485	rrdr->count = rfdr->count;
   3486
   3487	tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
   3488	tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
   3489			tpdr->count;
   3490	tpdr->count = (tpdr->count + 3) & ~3;
   3491
   3492	if (netif_running(adapter->netdev)) {
   3493		/* try to get new resources before deleting old */
   3494		err = atl1_setup_ring_resources(adapter);
   3495		if (err)
   3496			goto err_setup_ring;
   3497
   3498		/*
   3499		 * save the new, restore the old in order to free it,
   3500		 * then restore the new back again
   3501		 */
   3502
   3503		rfd_new = adapter->rfd_ring;
   3504		rrd_new = adapter->rrd_ring;
   3505		tpd_new = adapter->tpd_ring;
   3506		rhdr_new = adapter->ring_header;
   3507		adapter->rfd_ring = rfd_old;
   3508		adapter->rrd_ring = rrd_old;
   3509		adapter->tpd_ring = tpd_old;
   3510		adapter->ring_header = rhdr_old;
   3511		/*
   3512		 * Save SMB and CMB, since atl1_free_ring_resources
   3513		 * will clear them.
   3514		 */
   3515		smb = adapter->smb;
   3516		cmb = adapter->cmb;
   3517		atl1_free_ring_resources(adapter);
   3518		adapter->rfd_ring = rfd_new;
   3519		adapter->rrd_ring = rrd_new;
   3520		adapter->tpd_ring = tpd_new;
   3521		adapter->ring_header = rhdr_new;
   3522		adapter->smb = smb;
   3523		adapter->cmb = cmb;
   3524
   3525		err = atl1_up(adapter);
   3526		if (err)
   3527			return err;
   3528	}
   3529	return 0;
   3530
   3531err_setup_ring:
   3532	adapter->rfd_ring = rfd_old;
   3533	adapter->rrd_ring = rrd_old;
   3534	adapter->tpd_ring = tpd_old;
   3535	adapter->ring_header = rhdr_old;
   3536	atl1_up(adapter);
   3537	return err;
   3538}
   3539
   3540static void atl1_get_pauseparam(struct net_device *netdev,
   3541	struct ethtool_pauseparam *epause)
   3542{
   3543	struct atl1_adapter *adapter = netdev_priv(netdev);
   3544	struct atl1_hw *hw = &adapter->hw;
   3545
   3546	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
   3547	    hw->media_type == MEDIA_TYPE_1000M_FULL) {
   3548		epause->autoneg = AUTONEG_ENABLE;
   3549	} else {
   3550		epause->autoneg = AUTONEG_DISABLE;
   3551	}
   3552	epause->rx_pause = 1;
   3553	epause->tx_pause = 1;
   3554}
   3555
   3556static int atl1_set_pauseparam(struct net_device *netdev,
   3557	struct ethtool_pauseparam *epause)
   3558{
   3559	struct atl1_adapter *adapter = netdev_priv(netdev);
   3560	struct atl1_hw *hw = &adapter->hw;
   3561
   3562	if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
   3563	    hw->media_type == MEDIA_TYPE_1000M_FULL) {
   3564		epause->autoneg = AUTONEG_ENABLE;
   3565	} else {
   3566		epause->autoneg = AUTONEG_DISABLE;
   3567	}
   3568
   3569	epause->rx_pause = 1;
   3570	epause->tx_pause = 1;
   3571
   3572	return 0;
   3573}
   3574
   3575static void atl1_get_strings(struct net_device *netdev, u32 stringset,
   3576	u8 *data)
   3577{
   3578	u8 *p = data;
   3579	int i;
   3580
   3581	switch (stringset) {
   3582	case ETH_SS_STATS:
   3583		for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
   3584			memcpy(p, atl1_gstrings_stats[i].stat_string,
   3585				ETH_GSTRING_LEN);
   3586			p += ETH_GSTRING_LEN;
   3587		}
   3588		break;
   3589	}
   3590}
   3591
   3592static int atl1_nway_reset(struct net_device *netdev)
   3593{
   3594	struct atl1_adapter *adapter = netdev_priv(netdev);
   3595	struct atl1_hw *hw = &adapter->hw;
   3596
   3597	if (netif_running(netdev)) {
   3598		u16 phy_data;
   3599		atl1_down(adapter);
   3600
   3601		if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
   3602			hw->media_type == MEDIA_TYPE_1000M_FULL) {
   3603			phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
   3604		} else {
   3605			switch (hw->media_type) {
   3606			case MEDIA_TYPE_100M_FULL:
   3607				phy_data = MII_CR_FULL_DUPLEX |
   3608					MII_CR_SPEED_100 | MII_CR_RESET;
   3609				break;
   3610			case MEDIA_TYPE_100M_HALF:
   3611				phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
   3612				break;
   3613			case MEDIA_TYPE_10M_FULL:
   3614				phy_data = MII_CR_FULL_DUPLEX |
   3615					MII_CR_SPEED_10 | MII_CR_RESET;
   3616				break;
   3617			default:
   3618				/* MEDIA_TYPE_10M_HALF */
   3619				phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
   3620			}
   3621		}
   3622		atl1_write_phy_reg(hw, MII_BMCR, phy_data);
   3623		atl1_up(adapter);
   3624	}
   3625	return 0;
   3626}
   3627
   3628static const struct ethtool_ops atl1_ethtool_ops = {
   3629	.get_drvinfo		= atl1_get_drvinfo,
   3630	.get_wol		= atl1_get_wol,
   3631	.set_wol		= atl1_set_wol,
   3632	.get_msglevel		= atl1_get_msglevel,
   3633	.set_msglevel		= atl1_set_msglevel,
   3634	.get_regs_len		= atl1_get_regs_len,
   3635	.get_regs		= atl1_get_regs,
   3636	.get_ringparam		= atl1_get_ringparam,
   3637	.set_ringparam		= atl1_set_ringparam,
   3638	.get_pauseparam		= atl1_get_pauseparam,
   3639	.set_pauseparam		= atl1_set_pauseparam,
   3640	.get_link		= ethtool_op_get_link,
   3641	.get_strings		= atl1_get_strings,
   3642	.nway_reset		= atl1_nway_reset,
   3643	.get_ethtool_stats	= atl1_get_ethtool_stats,
   3644	.get_sset_count		= atl1_get_sset_count,
   3645	.get_link_ksettings	= atl1_get_link_ksettings,
   3646	.set_link_ksettings	= atl1_set_link_ksettings,
   3647};
   3648
   3649module_pci_driver(atl1_driver);