cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

atl1e_main.c (69890B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
      4 *
      5 * Derived from Intel e1000 driver
      6 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
      7 */
      8
      9#include "atl1e.h"
     10
     11char atl1e_driver_name[] = "ATL1E";
     12#define PCI_DEVICE_ID_ATTANSIC_L1E      0x1026
     13/*
     14 * atl1e_pci_tbl - PCI Device ID Table
     15 *
     16 * Wildcard entries (PCI_ANY_ID) should come last
     17 * Last entry must be all 0s
     18 *
     19 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
     20 *   Class, Class Mask, private data (not used) }
     21 */
     22static const struct pci_device_id atl1e_pci_tbl[] = {
     23	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
     24	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
     25	/* required last entry */
     26	{ 0 }
     27};
     28MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl);
     29
     30MODULE_AUTHOR("Atheros Corporation, <xiong.huang@atheros.com>, Jie Yang <jie.yang@atheros.com>");
     31MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
     32MODULE_LICENSE("GPL");
     33
     34static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
     35
     36static const u16
     37atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
     38{
     39	{REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD},
     40	{REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD},
     41	{REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD},
     42	{REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD}
     43};
     44
     45static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] =
     46{
     47	REG_RXF0_BASE_ADDR_HI,
     48	REG_RXF1_BASE_ADDR_HI,
     49	REG_RXF2_BASE_ADDR_HI,
     50	REG_RXF3_BASE_ADDR_HI
     51};
     52
     53static const u16
     54atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
     55{
     56	{REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO},
     57	{REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO},
     58	{REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO},
     59	{REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO}
     60};
     61
     62static const u16
     63atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
     64{
     65	{REG_HOST_RXF0_MB0_LO,  REG_HOST_RXF0_MB1_LO},
     66	{REG_HOST_RXF1_MB0_LO,  REG_HOST_RXF1_MB1_LO},
     67	{REG_HOST_RXF2_MB0_LO,  REG_HOST_RXF2_MB1_LO},
     68	{REG_HOST_RXF3_MB0_LO,  REG_HOST_RXF3_MB1_LO}
     69};
     70
     71static const u16 atl1e_pay_load_size[] = {
     72	128, 256, 512, 1024, 2048, 4096,
     73};
     74
     75/**
     76 * atl1e_irq_enable - Enable default interrupt generation settings
     77 * @adapter: board private structure
     78 */
     79static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
     80{
     81	if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
     82		AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
     83		AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
     84		AT_WRITE_FLUSH(&adapter->hw);
     85	}
     86}
     87
     88/**
     89 * atl1e_irq_disable - Mask off interrupt generation on the NIC
     90 * @adapter: board private structure
     91 */
     92static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
     93{
     94	atomic_inc(&adapter->irq_sem);
     95	AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
     96	AT_WRITE_FLUSH(&adapter->hw);
     97	synchronize_irq(adapter->pdev->irq);
     98}
     99
    100/**
    101 * atl1e_irq_reset - reset interrupt confiure on the NIC
    102 * @adapter: board private structure
    103 */
    104static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
    105{
    106	atomic_set(&adapter->irq_sem, 0);
    107	AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
    108	AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
    109	AT_WRITE_FLUSH(&adapter->hw);
    110}
    111
    112/**
    113 * atl1e_phy_config - Timer Call-back
    114 * @t: timer list containing pointer to netdev cast into an unsigned long
    115 */
    116static void atl1e_phy_config(struct timer_list *t)
    117{
    118	struct atl1e_adapter *adapter = from_timer(adapter, t,
    119						   phy_config_timer);
    120	struct atl1e_hw *hw = &adapter->hw;
    121	unsigned long flags;
    122
    123	spin_lock_irqsave(&adapter->mdio_lock, flags);
    124	atl1e_restart_autoneg(hw);
    125	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
    126}
    127
    128void atl1e_reinit_locked(struct atl1e_adapter *adapter)
    129{
    130	while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
    131		msleep(1);
    132	atl1e_down(adapter);
    133	atl1e_up(adapter);
    134	clear_bit(__AT_RESETTING, &adapter->flags);
    135}
    136
    137static void atl1e_reset_task(struct work_struct *work)
    138{
    139	struct atl1e_adapter *adapter;
    140	adapter = container_of(work, struct atl1e_adapter, reset_task);
    141
    142	atl1e_reinit_locked(adapter);
    143}
    144
    145static int atl1e_check_link(struct atl1e_adapter *adapter)
    146{
    147	struct atl1e_hw *hw = &adapter->hw;
    148	struct net_device *netdev = adapter->netdev;
    149	int err = 0;
    150	u16 speed, duplex, phy_data;
    151
    152	/* MII_BMSR must read twice */
    153	atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
    154	atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
    155	if ((phy_data & BMSR_LSTATUS) == 0) {
    156		/* link down */
    157		if (netif_carrier_ok(netdev)) { /* old link state: Up */
    158			u32 value;
    159			/* disable rx */
    160			value = AT_READ_REG(hw, REG_MAC_CTRL);
    161			value &= ~MAC_CTRL_RX_EN;
    162			AT_WRITE_REG(hw, REG_MAC_CTRL, value);
    163			adapter->link_speed = SPEED_0;
    164			netif_carrier_off(netdev);
    165			netif_stop_queue(netdev);
    166		}
    167	} else {
    168		/* Link Up */
    169		err = atl1e_get_speed_and_duplex(hw, &speed, &duplex);
    170		if (unlikely(err))
    171			return err;
    172
    173		/* link result is our setting */
    174		if (adapter->link_speed != speed ||
    175		    adapter->link_duplex != duplex) {
    176			adapter->link_speed  = speed;
    177			adapter->link_duplex = duplex;
    178			atl1e_setup_mac_ctrl(adapter);
    179			netdev_info(netdev,
    180				    "NIC Link is Up <%d Mbps %s Duplex>\n",
    181				    adapter->link_speed,
    182				    adapter->link_duplex == FULL_DUPLEX ?
    183				    "Full" : "Half");
    184		}
    185
    186		if (!netif_carrier_ok(netdev)) {
    187			/* Link down -> Up */
    188			netif_carrier_on(netdev);
    189			netif_wake_queue(netdev);
    190		}
    191	}
    192	return 0;
    193}
    194
    195/**
    196 * atl1e_link_chg_task - deal with link change event Out of interrupt context
    197 * @work: work struct with driver info
    198 */
    199static void atl1e_link_chg_task(struct work_struct *work)
    200{
    201	struct atl1e_adapter *adapter;
    202	unsigned long flags;
    203
    204	adapter = container_of(work, struct atl1e_adapter, link_chg_task);
    205	spin_lock_irqsave(&adapter->mdio_lock, flags);
    206	atl1e_check_link(adapter);
    207	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
    208}
    209
    210static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
    211{
    212	struct net_device *netdev = adapter->netdev;
    213	u16 phy_data = 0;
    214	u16 link_up = 0;
    215
    216	spin_lock(&adapter->mdio_lock);
    217	atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
    218	atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
    219	spin_unlock(&adapter->mdio_lock);
    220	link_up = phy_data & BMSR_LSTATUS;
    221	/* notify upper layer link down ASAP */
    222	if (!link_up) {
    223		if (netif_carrier_ok(netdev)) {
    224			/* old link state: Up */
    225			netdev_info(netdev, "NIC Link is Down\n");
    226			adapter->link_speed = SPEED_0;
    227			netif_stop_queue(netdev);
    228		}
    229	}
    230	schedule_work(&adapter->link_chg_task);
    231}
    232
    233static void atl1e_del_timer(struct atl1e_adapter *adapter)
    234{
    235	del_timer_sync(&adapter->phy_config_timer);
    236}
    237
    238static void atl1e_cancel_work(struct atl1e_adapter *adapter)
    239{
    240	cancel_work_sync(&adapter->reset_task);
    241	cancel_work_sync(&adapter->link_chg_task);
    242}
    243
    244/**
    245 * atl1e_tx_timeout - Respond to a Tx Hang
    246 * @netdev: network interface device structure
    247 * @txqueue: the index of the hanging queue
    248 */
    249static void atl1e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
    250{
    251	struct atl1e_adapter *adapter = netdev_priv(netdev);
    252
    253	/* Do the reset outside of interrupt context */
    254	schedule_work(&adapter->reset_task);
    255}
    256
    257/**
    258 * atl1e_set_multi - Multicast and Promiscuous mode set
    259 * @netdev: network interface device structure
    260 *
    261 * The set_multi entry point is called whenever the multicast address
    262 * list or the network interface flags are updated.  This routine is
    263 * responsible for configuring the hardware for proper multicast,
    264 * promiscuous mode, and all-multi behavior.
    265 */
    266static void atl1e_set_multi(struct net_device *netdev)
    267{
    268	struct atl1e_adapter *adapter = netdev_priv(netdev);
    269	struct atl1e_hw *hw = &adapter->hw;
    270	struct netdev_hw_addr *ha;
    271	u32 mac_ctrl_data = 0;
    272	u32 hash_value;
    273
    274	/* Check for Promiscuous and All Multicast modes */
    275	mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL);
    276
    277	if (netdev->flags & IFF_PROMISC) {
    278		mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
    279	} else if (netdev->flags & IFF_ALLMULTI) {
    280		mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
    281		mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
    282	} else {
    283		mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
    284	}
    285
    286	AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
    287
    288	/* clear the old settings from the multicast hash table */
    289	AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
    290	AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
    291
    292	/* comoute mc addresses' hash value ,and put it into hash table */
    293	netdev_for_each_mc_addr(ha, netdev) {
    294		hash_value = atl1e_hash_mc_addr(hw, ha->addr);
    295		atl1e_hash_set(hw, hash_value);
    296	}
    297}
    298
    299static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
    300{
    301
    302	if (features & NETIF_F_RXALL) {
    303		/* enable RX of ALL frames */
    304		*mac_ctrl_data |= MAC_CTRL_DBG;
    305	} else {
    306		/* disable RX of ALL frames */
    307		*mac_ctrl_data &= ~MAC_CTRL_DBG;
    308	}
    309}
    310
    311static void atl1e_rx_mode(struct net_device *netdev,
    312	netdev_features_t features)
    313{
    314	struct atl1e_adapter *adapter = netdev_priv(netdev);
    315	u32 mac_ctrl_data = 0;
    316
    317	netdev_dbg(adapter->netdev, "%s\n", __func__);
    318
    319	atl1e_irq_disable(adapter);
    320	mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
    321	__atl1e_rx_mode(features, &mac_ctrl_data);
    322	AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
    323	atl1e_irq_enable(adapter);
    324}
    325
    326
    327static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
    328{
    329	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
    330		/* enable VLAN tag insert/strip */
    331		*mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
    332	} else {
    333		/* disable VLAN tag insert/strip */
    334		*mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
    335	}
    336}
    337
    338static void atl1e_vlan_mode(struct net_device *netdev,
    339	netdev_features_t features)
    340{
    341	struct atl1e_adapter *adapter = netdev_priv(netdev);
    342	u32 mac_ctrl_data = 0;
    343
    344	netdev_dbg(adapter->netdev, "%s\n", __func__);
    345
    346	atl1e_irq_disable(adapter);
    347	mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
    348	__atl1e_vlan_mode(features, &mac_ctrl_data);
    349	AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
    350	atl1e_irq_enable(adapter);
    351}
    352
    353static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
    354{
    355	netdev_dbg(adapter->netdev, "%s\n", __func__);
    356	atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
    357}
    358
    359/**
    360 * atl1e_set_mac_addr - Change the Ethernet Address of the NIC
    361 * @netdev: network interface device structure
    362 * @p: pointer to an address structure
    363 *
    364 * Returns 0 on success, negative on failure
    365 */
    366static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
    367{
    368	struct atl1e_adapter *adapter = netdev_priv(netdev);
    369	struct sockaddr *addr = p;
    370
    371	if (!is_valid_ether_addr(addr->sa_data))
    372		return -EADDRNOTAVAIL;
    373
    374	if (netif_running(netdev))
    375		return -EBUSY;
    376
    377	eth_hw_addr_set(netdev, addr->sa_data);
    378	memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
    379
    380	atl1e_hw_set_mac_addr(&adapter->hw);
    381
    382	return 0;
    383}
    384
    385static netdev_features_t atl1e_fix_features(struct net_device *netdev,
    386	netdev_features_t features)
    387{
    388	/*
    389	 * Since there is no support for separate rx/tx vlan accel
    390	 * enable/disable make sure tx flag is always in same state as rx.
    391	 */
    392	if (features & NETIF_F_HW_VLAN_CTAG_RX)
    393		features |= NETIF_F_HW_VLAN_CTAG_TX;
    394	else
    395		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
    396
    397	return features;
    398}
    399
    400static int atl1e_set_features(struct net_device *netdev,
    401	netdev_features_t features)
    402{
    403	netdev_features_t changed = netdev->features ^ features;
    404
    405	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
    406		atl1e_vlan_mode(netdev, features);
    407
    408	if (changed & NETIF_F_RXALL)
    409		atl1e_rx_mode(netdev, features);
    410
    411
    412	return 0;
    413}
    414
    415/**
    416 * atl1e_change_mtu - Change the Maximum Transfer Unit
    417 * @netdev: network interface device structure
    418 * @new_mtu: new value for maximum frame size
    419 *
    420 * Returns 0 on success, negative on failure
    421 */
    422static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
    423{
    424	struct atl1e_adapter *adapter = netdev_priv(netdev);
    425	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
    426
    427	/* set MTU */
    428	if (netif_running(netdev)) {
    429		while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
    430			msleep(1);
    431		netdev->mtu = new_mtu;
    432		adapter->hw.max_frame_size = new_mtu;
    433		adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3;
    434		atl1e_down(adapter);
    435		atl1e_up(adapter);
    436		clear_bit(__AT_RESETTING, &adapter->flags);
    437	}
    438	return 0;
    439}
    440
    441/*
    442 *  caller should hold mdio_lock
    443 */
    444static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
    445{
    446	struct atl1e_adapter *adapter = netdev_priv(netdev);
    447	u16 result;
    448
    449	atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
    450	return result;
    451}
    452
    453static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
    454			     int reg_num, int val)
    455{
    456	struct atl1e_adapter *adapter = netdev_priv(netdev);
    457
    458	if (atl1e_write_phy_reg(&adapter->hw,
    459				reg_num & MDIO_REG_ADDR_MASK, val))
    460		netdev_err(netdev, "write phy register failed\n");
    461}
    462
    463static int atl1e_mii_ioctl(struct net_device *netdev,
    464			   struct ifreq *ifr, int cmd)
    465{
    466	struct atl1e_adapter *adapter = netdev_priv(netdev);
    467	struct mii_ioctl_data *data = if_mii(ifr);
    468	unsigned long flags;
    469	int retval = 0;
    470
    471	if (!netif_running(netdev))
    472		return -EINVAL;
    473
    474	spin_lock_irqsave(&adapter->mdio_lock, flags);
    475	switch (cmd) {
    476	case SIOCGMIIPHY:
    477		data->phy_id = 0;
    478		break;
    479
    480	case SIOCGMIIREG:
    481		if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
    482				    &data->val_out)) {
    483			retval = -EIO;
    484			goto out;
    485		}
    486		break;
    487
    488	case SIOCSMIIREG:
    489		if (data->reg_num & ~(0x1F)) {
    490			retval = -EFAULT;
    491			goto out;
    492		}
    493
    494		netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n",
    495			   data->reg_num, data->val_in);
    496		if (atl1e_write_phy_reg(&adapter->hw,
    497				     data->reg_num, data->val_in)) {
    498			retval = -EIO;
    499			goto out;
    500		}
    501		break;
    502
    503	default:
    504		retval = -EOPNOTSUPP;
    505		break;
    506	}
    507out:
    508	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
    509	return retval;
    510
    511}
    512
    513static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
    514{
    515	switch (cmd) {
    516	case SIOCGMIIPHY:
    517	case SIOCGMIIREG:
    518	case SIOCSMIIREG:
    519		return atl1e_mii_ioctl(netdev, ifr, cmd);
    520	default:
    521		return -EOPNOTSUPP;
    522	}
    523}
    524
    525static void atl1e_setup_pcicmd(struct pci_dev *pdev)
    526{
    527	u16 cmd;
    528
    529	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
    530	cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO);
    531	cmd |=  (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
    532	pci_write_config_word(pdev, PCI_COMMAND, cmd);
    533
    534	/*
    535	 * some motherboards BIOS(PXE/EFI) driver may set PME
    536	 * while they transfer control to OS (Windows/Linux)
    537	 * so we should clear this bit before NIC work normally
    538	 */
    539	pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
    540	msleep(1);
    541}
    542
    543/**
    544 * atl1e_alloc_queues - Allocate memory for all rings
    545 * @adapter: board private structure to initialize
    546 *
    547 */
    548static int atl1e_alloc_queues(struct atl1e_adapter *adapter)
    549{
    550	return 0;
    551}
    552
    553/**
    554 * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
    555 * @adapter: board private structure to initialize
    556 *
    557 * atl1e_sw_init initializes the Adapter private data structure.
    558 * Fields are initialized based on PCI device information and
    559 * OS network device settings (MTU size).
    560 */
    561static int atl1e_sw_init(struct atl1e_adapter *adapter)
    562{
    563	struct atl1e_hw *hw   = &adapter->hw;
    564	struct pci_dev	*pdev = adapter->pdev;
    565	u32 phy_status_data = 0;
    566
    567	adapter->wol = 0;
    568	adapter->link_speed = SPEED_0;   /* hardware init */
    569	adapter->link_duplex = FULL_DUPLEX;
    570	adapter->num_rx_queues = 1;
    571
    572	/* PCI config space info */
    573	hw->vendor_id = pdev->vendor;
    574	hw->device_id = pdev->device;
    575	hw->subsystem_vendor_id = pdev->subsystem_vendor;
    576	hw->subsystem_id = pdev->subsystem_device;
    577	hw->revision_id  = pdev->revision;
    578
    579	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
    580
    581	phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
    582	/* nic type */
    583	if (hw->revision_id >= 0xF0) {
    584		hw->nic_type = athr_l2e_revB;
    585	} else {
    586		if (phy_status_data & PHY_STATUS_100M)
    587			hw->nic_type = athr_l1e;
    588		else
    589			hw->nic_type = athr_l2e_revA;
    590	}
    591
    592	phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
    593
    594	if (phy_status_data & PHY_STATUS_EMI_CA)
    595		hw->emi_ca = true;
    596	else
    597		hw->emi_ca = false;
    598
    599	hw->phy_configured = false;
    600	hw->preamble_len = 7;
    601	hw->max_frame_size = adapter->netdev->mtu;
    602	hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN +
    603				VLAN_HLEN + ETH_FCS_LEN + 7) >> 3;
    604
    605	hw->rrs_type = atl1e_rrs_disable;
    606	hw->indirect_tab = 0;
    607	hw->base_cpu = 0;
    608
    609	/* need confirm */
    610
    611	hw->ict = 50000;                 /* 100ms */
    612	hw->smb_timer = 200000;          /* 200ms  */
    613	hw->tpd_burst = 5;
    614	hw->rrd_thresh = 1;
    615	hw->tpd_thresh = adapter->tx_ring.count / 2;
    616	hw->rx_count_down = 4;  /* 2us resolution */
    617	hw->tx_count_down = hw->imt * 4 / 3;
    618	hw->dmar_block = atl1e_dma_req_1024;
    619	hw->dmaw_block = atl1e_dma_req_1024;
    620	hw->dmar_dly_cnt = 15;
    621	hw->dmaw_dly_cnt = 4;
    622
    623	if (atl1e_alloc_queues(adapter)) {
    624		netdev_err(adapter->netdev, "Unable to allocate memory for queues\n");
    625		return -ENOMEM;
    626	}
    627
    628	atomic_set(&adapter->irq_sem, 1);
    629	spin_lock_init(&adapter->mdio_lock);
    630
    631	set_bit(__AT_DOWN, &adapter->flags);
    632
    633	return 0;
    634}
    635
    636/**
    637 * atl1e_clean_tx_ring - Free Tx-skb
    638 * @adapter: board private structure
    639 */
    640static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
    641{
    642	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
    643	struct atl1e_tx_buffer *tx_buffer = NULL;
    644	struct pci_dev *pdev = adapter->pdev;
    645	u16 index, ring_count;
    646
    647	if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
    648		return;
    649
    650	ring_count = tx_ring->count;
    651	/* first unmmap dma */
    652	for (index = 0; index < ring_count; index++) {
    653		tx_buffer = &tx_ring->tx_buffer[index];
    654		if (tx_buffer->dma) {
    655			if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
    656				dma_unmap_single(&pdev->dev, tx_buffer->dma,
    657						 tx_buffer->length,
    658						 DMA_TO_DEVICE);
    659			else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
    660				dma_unmap_page(&pdev->dev, tx_buffer->dma,
    661					       tx_buffer->length,
    662					       DMA_TO_DEVICE);
    663			tx_buffer->dma = 0;
    664		}
    665	}
    666	/* second free skb */
    667	for (index = 0; index < ring_count; index++) {
    668		tx_buffer = &tx_ring->tx_buffer[index];
    669		if (tx_buffer->skb) {
    670			dev_kfree_skb_any(tx_buffer->skb);
    671			tx_buffer->skb = NULL;
    672		}
    673	}
    674	/* Zero out Tx-buffers */
    675	memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
    676				ring_count);
    677	memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) *
    678				ring_count);
    679}
    680
    681/**
    682 * atl1e_clean_rx_ring - Free rx-reservation skbs
    683 * @adapter: board private structure
    684 */
    685static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
    686{
    687	struct atl1e_rx_ring *rx_ring =
    688		&adapter->rx_ring;
    689	struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
    690	u16 i, j;
    691
    692
    693	if (adapter->ring_vir_addr == NULL)
    694		return;
    695	/* Zero out the descriptor ring */
    696	for (i = 0; i < adapter->num_rx_queues; i++) {
    697		for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
    698			if (rx_page_desc[i].rx_page[j].addr != NULL) {
    699				memset(rx_page_desc[i].rx_page[j].addr, 0,
    700						rx_ring->real_page_size);
    701			}
    702		}
    703	}
    704}
    705
    706static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
    707{
    708	*ring_size = ((u32)(adapter->tx_ring.count *
    709		     sizeof(struct atl1e_tpd_desc) + 7
    710			/* tx ring, qword align */
    711		     + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE *
    712			adapter->num_rx_queues + 31
    713			/* rx ring,  32 bytes align */
    714		     + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) *
    715			sizeof(u32) + 3));
    716			/* tx, rx cmd, dword align   */
    717}
    718
    719static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
    720{
    721	struct atl1e_rx_ring *rx_ring = NULL;
    722
    723	rx_ring = &adapter->rx_ring;
    724
    725	rx_ring->real_page_size = adapter->rx_ring.page_size
    726				 + adapter->hw.max_frame_size
    727				 + ETH_HLEN + VLAN_HLEN
    728				 + ETH_FCS_LEN;
    729	rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32);
    730	atl1e_cal_ring_size(adapter, &adapter->ring_size);
    731
    732	adapter->ring_vir_addr = NULL;
    733	adapter->rx_ring.desc = NULL;
    734	rwlock_init(&adapter->tx_ring.tx_lock);
    735}
    736
    737/*
    738 * Read / Write Ptr Initialize:
    739 */
    740static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
    741{
    742	struct atl1e_tx_ring *tx_ring = NULL;
    743	struct atl1e_rx_ring *rx_ring = NULL;
    744	struct atl1e_rx_page_desc *rx_page_desc = NULL;
    745	int i, j;
    746
    747	tx_ring = &adapter->tx_ring;
    748	rx_ring = &adapter->rx_ring;
    749	rx_page_desc = rx_ring->rx_page_desc;
    750
    751	tx_ring->next_to_use = 0;
    752	atomic_set(&tx_ring->next_to_clean, 0);
    753
    754	for (i = 0; i < adapter->num_rx_queues; i++) {
    755		rx_page_desc[i].rx_using  = 0;
    756		rx_page_desc[i].rx_nxseq = 0;
    757		for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
    758			*rx_page_desc[i].rx_page[j].write_offset_addr = 0;
    759			rx_page_desc[i].rx_page[j].read_offset = 0;
    760		}
    761	}
    762}
    763
    764/**
    765 * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
    766 * @adapter: board private structure
    767 *
    768 * Free all transmit software resources
    769 */
    770static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
    771{
    772	struct pci_dev *pdev = adapter->pdev;
    773
    774	atl1e_clean_tx_ring(adapter);
    775	atl1e_clean_rx_ring(adapter);
    776
    777	if (adapter->ring_vir_addr) {
    778		dma_free_coherent(&pdev->dev, adapter->ring_size,
    779				  adapter->ring_vir_addr, adapter->ring_dma);
    780		adapter->ring_vir_addr = NULL;
    781	}
    782
    783	if (adapter->tx_ring.tx_buffer) {
    784		kfree(adapter->tx_ring.tx_buffer);
    785		adapter->tx_ring.tx_buffer = NULL;
    786	}
    787}
    788
    789/**
    790 * atl1e_setup_ring_resources - allocate Tx / RX descriptor resources
    791 * @adapter: board private structure
    792 *
    793 * Return 0 on success, negative on failure
    794 */
    795static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
    796{
    797	struct pci_dev *pdev = adapter->pdev;
    798	struct atl1e_tx_ring *tx_ring;
    799	struct atl1e_rx_ring *rx_ring;
    800	struct atl1e_rx_page_desc  *rx_page_desc;
    801	int size, i, j;
    802	u32 offset = 0;
    803	int err = 0;
    804
    805	if (adapter->ring_vir_addr != NULL)
    806		return 0; /* alloced already */
    807
    808	tx_ring = &adapter->tx_ring;
    809	rx_ring = &adapter->rx_ring;
    810
    811	/* real ring DMA buffer */
    812
    813	size = adapter->ring_size;
    814	adapter->ring_vir_addr = dma_alloc_coherent(&pdev->dev,
    815						    adapter->ring_size,
    816						    &adapter->ring_dma, GFP_KERNEL);
    817	if (adapter->ring_vir_addr == NULL) {
    818		netdev_err(adapter->netdev,
    819			   "dma_alloc_coherent failed, size = D%d\n", size);
    820		return -ENOMEM;
    821	}
    822
    823	rx_page_desc = rx_ring->rx_page_desc;
    824
    825	/* Init TPD Ring */
    826	tx_ring->dma = roundup(adapter->ring_dma, 8);
    827	offset = tx_ring->dma - adapter->ring_dma;
    828	tx_ring->desc = adapter->ring_vir_addr + offset;
    829	size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
    830	tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
    831	if (tx_ring->tx_buffer == NULL) {
    832		err = -ENOMEM;
    833		goto failed;
    834	}
    835
    836	/* Init RXF-Pages */
    837	offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count);
    838	offset = roundup(offset, 32);
    839
    840	for (i = 0; i < adapter->num_rx_queues; i++) {
    841		for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
    842			rx_page_desc[i].rx_page[j].dma =
    843				adapter->ring_dma + offset;
    844			rx_page_desc[i].rx_page[j].addr =
    845				adapter->ring_vir_addr + offset;
    846			offset += rx_ring->real_page_size;
    847		}
    848	}
    849
    850	/* Init CMB dma address */
    851	tx_ring->cmb_dma = adapter->ring_dma + offset;
    852	tx_ring->cmb = adapter->ring_vir_addr + offset;
    853	offset += sizeof(u32);
    854
    855	for (i = 0; i < adapter->num_rx_queues; i++) {
    856		for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
    857			rx_page_desc[i].rx_page[j].write_offset_dma =
    858				adapter->ring_dma + offset;
    859			rx_page_desc[i].rx_page[j].write_offset_addr =
    860				adapter->ring_vir_addr + offset;
    861			offset += sizeof(u32);
    862		}
    863	}
    864
    865	if (unlikely(offset > adapter->ring_size)) {
    866		netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
    867			   offset, adapter->ring_size);
    868		err = -1;
    869		goto failed;
    870	}
    871
    872	return 0;
    873failed:
    874	if (adapter->ring_vir_addr != NULL) {
    875		dma_free_coherent(&pdev->dev, adapter->ring_size,
    876				  adapter->ring_vir_addr, adapter->ring_dma);
    877		adapter->ring_vir_addr = NULL;
    878	}
    879	return err;
    880}
    881
    882static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter)
    883{
    884
    885	struct atl1e_hw *hw = &adapter->hw;
    886	struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
    887	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
    888	struct atl1e_rx_page_desc *rx_page_desc = NULL;
    889	int i, j;
    890
    891	AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
    892			(u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32));
    893	AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO,
    894			(u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK));
    895	AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count));
    896	AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO,
    897			(u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK));
    898
    899	rx_page_desc = rx_ring->rx_page_desc;
    900	/* RXF Page Physical address / Page Length */
    901	for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) {
    902		AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i],
    903				 (u32)((adapter->ring_dma &
    904				 AT_DMA_HI_ADDR_MASK) >> 32));
    905		for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
    906			u32 page_phy_addr;
    907			u32 offset_phy_addr;
    908
    909			page_phy_addr = rx_page_desc[i].rx_page[j].dma;
    910			offset_phy_addr =
    911				   rx_page_desc[i].rx_page[j].write_offset_dma;
    912
    913			AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j],
    914					page_phy_addr & AT_DMA_LO_ADDR_MASK);
    915			AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j],
    916					offset_phy_addr & AT_DMA_LO_ADDR_MASK);
    917			AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1);
    918		}
    919	}
    920	/* Page Length */
    921	AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
    922	/* Load all of base address above */
    923	AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
    924}
    925
    926static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
    927{
    928	struct atl1e_hw *hw = &adapter->hw;
    929	u32 dev_ctrl_data = 0;
    930	u32 max_pay_load = 0;
    931	u32 jumbo_thresh = 0;
    932	u32 extra_size = 0;     /* Jumbo frame threshold in QWORD unit */
    933
    934	/* configure TXQ param */
    935	if (hw->nic_type != athr_l2e_revB) {
    936		extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
    937		if (hw->max_frame_size <= 1500) {
    938			jumbo_thresh = hw->max_frame_size + extra_size;
    939		} else if (hw->max_frame_size < 6*1024) {
    940			jumbo_thresh =
    941				(hw->max_frame_size + extra_size) * 2 / 3;
    942		} else {
    943			jumbo_thresh = (hw->max_frame_size + extra_size) / 2;
    944		}
    945		AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3);
    946	}
    947
    948	dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL);
    949
    950	max_pay_load  = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
    951			DEVICE_CTRL_MAX_PAYLOAD_MASK;
    952
    953	hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
    954
    955	max_pay_load  = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
    956			DEVICE_CTRL_MAX_RREQ_SZ_MASK;
    957	hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
    958
    959	if (hw->nic_type != athr_l2e_revB)
    960		AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
    961			      atl1e_pay_load_size[hw->dmar_block]);
    962	/* enable TXQ */
    963	AT_WRITE_REGW(hw, REG_TXQ_CTRL,
    964			(((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK)
    965			 << TXQ_CTRL_NUM_TPD_BURST_SHIFT)
    966			| TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
    967}
    968
    969static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
    970{
    971	struct atl1e_hw *hw = &adapter->hw;
    972	u32 rxf_len  = 0;
    973	u32 rxf_low  = 0;
    974	u32 rxf_high = 0;
    975	u32 rxf_thresh_data = 0;
    976	u32 rxq_ctrl_data = 0;
    977
    978	if (hw->nic_type != athr_l2e_revB) {
    979		AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM,
    980			      (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) <<
    981			      RXQ_JMBOSZ_TH_SHIFT |
    982			      (1 & RXQ_JMBO_LKAH_MASK) <<
    983			      RXQ_JMBO_LKAH_SHIFT));
    984
    985		rxf_len  = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
    986		rxf_high = rxf_len * 4 / 5;
    987		rxf_low  = rxf_len / 5;
    988		rxf_thresh_data = ((rxf_high  & RXQ_RXF_PAUSE_TH_HI_MASK)
    989				  << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
    990				  ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK)
    991				  << RXQ_RXF_PAUSE_TH_LO_SHIFT);
    992
    993		AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data);
    994	}
    995
    996	/* RRS */
    997	AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
    998	AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
    999
   1000	if (hw->rrs_type & atl1e_rrs_ipv4)
   1001		rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4;
   1002
   1003	if (hw->rrs_type & atl1e_rrs_ipv4_tcp)
   1004		rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP;
   1005
   1006	if (hw->rrs_type & atl1e_rrs_ipv6)
   1007		rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6;
   1008
   1009	if (hw->rrs_type & atl1e_rrs_ipv6_tcp)
   1010		rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP;
   1011
   1012	if (hw->rrs_type != atl1e_rrs_disable)
   1013		rxq_ctrl_data |=
   1014			(RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT);
   1015
   1016	rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 |
   1017			 RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
   1018
   1019	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
   1020}
   1021
   1022static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
   1023{
   1024	struct atl1e_hw *hw = &adapter->hw;
   1025	u32 dma_ctrl_data = 0;
   1026
   1027	dma_ctrl_data = DMA_CTRL_RXCMB_EN;
   1028	dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
   1029		<< DMA_CTRL_DMAR_BURST_LEN_SHIFT;
   1030	dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
   1031		<< DMA_CTRL_DMAW_BURST_LEN_SHIFT;
   1032	dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER;
   1033	dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
   1034		<< DMA_CTRL_DMAR_DLY_CNT_SHIFT;
   1035	dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
   1036		<< DMA_CTRL_DMAW_DLY_CNT_SHIFT;
   1037
   1038	AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
   1039}
   1040
   1041static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
   1042{
   1043	u32 value;
   1044	struct atl1e_hw *hw = &adapter->hw;
   1045	struct net_device *netdev = adapter->netdev;
   1046
   1047	/* Config MAC CTRL Register */
   1048	value = MAC_CTRL_TX_EN |
   1049		MAC_CTRL_RX_EN ;
   1050
   1051	if (FULL_DUPLEX == adapter->link_duplex)
   1052		value |= MAC_CTRL_DUPLX;
   1053
   1054	value |= ((u32)((SPEED_1000 == adapter->link_speed) ?
   1055			  MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
   1056			  MAC_CTRL_SPEED_SHIFT);
   1057	value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
   1058
   1059	value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
   1060	value |= (((u32)adapter->hw.preamble_len &
   1061		  MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
   1062
   1063	__atl1e_vlan_mode(netdev->features, &value);
   1064
   1065	value |= MAC_CTRL_BC_EN;
   1066	if (netdev->flags & IFF_PROMISC)
   1067		value |= MAC_CTRL_PROMIS_EN;
   1068	if (netdev->flags & IFF_ALLMULTI)
   1069		value |= MAC_CTRL_MC_ALL_EN;
   1070	if (netdev->features & NETIF_F_RXALL)
   1071		value |= MAC_CTRL_DBG;
   1072	AT_WRITE_REG(hw, REG_MAC_CTRL, value);
   1073}
   1074
   1075/**
   1076 * atl1e_configure - Configure Transmit&Receive Unit after Reset
   1077 * @adapter: board private structure
   1078 *
   1079 * Configure the Tx /Rx unit of the MAC after a reset.
   1080 */
   1081static int atl1e_configure(struct atl1e_adapter *adapter)
   1082{
   1083	struct atl1e_hw *hw = &adapter->hw;
   1084
   1085	u32 intr_status_data = 0;
   1086
   1087	/* clear interrupt status */
   1088	AT_WRITE_REG(hw, REG_ISR, ~0);
   1089
   1090	/* 1. set MAC Address */
   1091	atl1e_hw_set_mac_addr(hw);
   1092
   1093	/* 2. Init the Multicast HASH table done by set_muti */
   1094
   1095	/* 3. Clear any WOL status */
   1096	AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
   1097
   1098	/* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr
   1099	 *    TPD Ring/SMB/RXF0 Page CMBs, they use the same
   1100	 *    High 32bits memory */
   1101	atl1e_configure_des_ring(adapter);
   1102
   1103	/* 5. set Interrupt Moderator Timer */
   1104	AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt);
   1105	AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt);
   1106	AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE |
   1107			MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN);
   1108
   1109	/* 6. rx/tx threshold to trig interrupt */
   1110	AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh);
   1111	AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh);
   1112	AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down);
   1113	AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down);
   1114
   1115	/* 7. set Interrupt Clear Timer */
   1116	AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict);
   1117
   1118	/* 8. set MTU */
   1119	AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
   1120			VLAN_HLEN + ETH_FCS_LEN);
   1121
   1122	/* 9. config TXQ early tx threshold */
   1123	atl1e_configure_tx(adapter);
   1124
   1125	/* 10. config RXQ */
   1126	atl1e_configure_rx(adapter);
   1127
   1128	/* 11. config  DMA Engine */
   1129	atl1e_configure_dma(adapter);
   1130
   1131	/* 12. smb timer to trig interrupt */
   1132	AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer);
   1133
   1134	intr_status_data = AT_READ_REG(hw, REG_ISR);
   1135	if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) {
   1136		netdev_err(adapter->netdev,
   1137			   "atl1e_configure failed, PCIE phy link down\n");
   1138		return -1;
   1139	}
   1140
   1141	AT_WRITE_REG(hw, REG_ISR, 0x7fffffff);
   1142	return 0;
   1143}
   1144
   1145/**
   1146 * atl1e_get_stats - Get System Network Statistics
   1147 * @netdev: network interface device structure
   1148 *
   1149 * Returns the address of the device statistics structure.
   1150 * The statistics are actually updated from the timer callback.
   1151 */
   1152static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
   1153{
   1154	struct atl1e_adapter *adapter = netdev_priv(netdev);
   1155	struct atl1e_hw_stats  *hw_stats = &adapter->hw_stats;
   1156	struct net_device_stats *net_stats = &netdev->stats;
   1157
   1158	net_stats->rx_bytes   = hw_stats->rx_byte_cnt;
   1159	net_stats->tx_bytes   = hw_stats->tx_byte_cnt;
   1160	net_stats->multicast  = hw_stats->rx_mcast;
   1161	net_stats->collisions = hw_stats->tx_1_col +
   1162				hw_stats->tx_2_col +
   1163				hw_stats->tx_late_col +
   1164				hw_stats->tx_abort_col;
   1165
   1166	net_stats->rx_errors  = hw_stats->rx_frag +
   1167				hw_stats->rx_fcs_err +
   1168				hw_stats->rx_len_err +
   1169				hw_stats->rx_sz_ov +
   1170				hw_stats->rx_rrd_ov +
   1171				hw_stats->rx_align_err +
   1172				hw_stats->rx_rxf_ov;
   1173
   1174	net_stats->rx_fifo_errors   = hw_stats->rx_rxf_ov;
   1175	net_stats->rx_length_errors = hw_stats->rx_len_err;
   1176	net_stats->rx_crc_errors    = hw_stats->rx_fcs_err;
   1177	net_stats->rx_frame_errors  = hw_stats->rx_align_err;
   1178	net_stats->rx_dropped       = hw_stats->rx_rrd_ov;
   1179
   1180	net_stats->tx_errors = hw_stats->tx_late_col +
   1181			       hw_stats->tx_abort_col +
   1182			       hw_stats->tx_underrun +
   1183			       hw_stats->tx_trunc;
   1184
   1185	net_stats->tx_fifo_errors    = hw_stats->tx_underrun;
   1186	net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
   1187	net_stats->tx_window_errors  = hw_stats->tx_late_col;
   1188
   1189	net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
   1190	net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
   1191
   1192	return net_stats;
   1193}
   1194
   1195static void atl1e_update_hw_stats(struct atl1e_adapter *adapter)
   1196{
   1197	u16 hw_reg_addr = 0;
   1198	unsigned long *stats_item = NULL;
   1199
   1200	/* update rx status */
   1201	hw_reg_addr = REG_MAC_RX_STATUS_BIN;
   1202	stats_item  = &adapter->hw_stats.rx_ok;
   1203	while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
   1204		*stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
   1205		stats_item++;
   1206		hw_reg_addr += 4;
   1207	}
   1208	/* update tx status */
   1209	hw_reg_addr = REG_MAC_TX_STATUS_BIN;
   1210	stats_item  = &adapter->hw_stats.tx_ok;
   1211	while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
   1212		*stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
   1213		stats_item++;
   1214		hw_reg_addr += 4;
   1215	}
   1216}
   1217
   1218static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
   1219{
   1220	u16 phy_data;
   1221
   1222	spin_lock(&adapter->mdio_lock);
   1223	atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data);
   1224	spin_unlock(&adapter->mdio_lock);
   1225}
   1226
   1227static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
   1228{
   1229	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
   1230	struct atl1e_tx_buffer *tx_buffer = NULL;
   1231	u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
   1232	u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
   1233
   1234	while (next_to_clean != hw_next_to_clean) {
   1235		tx_buffer = &tx_ring->tx_buffer[next_to_clean];
   1236		if (tx_buffer->dma) {
   1237			if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
   1238				dma_unmap_single(&adapter->pdev->dev,
   1239						 tx_buffer->dma,
   1240						 tx_buffer->length,
   1241						 DMA_TO_DEVICE);
   1242			else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
   1243				dma_unmap_page(&adapter->pdev->dev,
   1244					       tx_buffer->dma,
   1245					       tx_buffer->length,
   1246					       DMA_TO_DEVICE);
   1247			tx_buffer->dma = 0;
   1248		}
   1249
   1250		if (tx_buffer->skb) {
   1251			dev_consume_skb_irq(tx_buffer->skb);
   1252			tx_buffer->skb = NULL;
   1253		}
   1254
   1255		if (++next_to_clean == tx_ring->count)
   1256			next_to_clean = 0;
   1257	}
   1258
   1259	atomic_set(&tx_ring->next_to_clean, next_to_clean);
   1260
   1261	if (netif_queue_stopped(adapter->netdev) &&
   1262			netif_carrier_ok(adapter->netdev)) {
   1263		netif_wake_queue(adapter->netdev);
   1264	}
   1265
   1266	return true;
   1267}
   1268
   1269/**
   1270 * atl1e_intr - Interrupt Handler
   1271 * @irq: interrupt number
   1272 * @data: pointer to a network interface device structure
   1273 */
   1274static irqreturn_t atl1e_intr(int irq, void *data)
   1275{
   1276	struct net_device *netdev  = data;
   1277	struct atl1e_adapter *adapter = netdev_priv(netdev);
   1278	struct atl1e_hw *hw = &adapter->hw;
   1279	int max_ints = AT_MAX_INT_WORK;
   1280	int handled = IRQ_NONE;
   1281	u32 status;
   1282
   1283	do {
   1284		status = AT_READ_REG(hw, REG_ISR);
   1285		if ((status & IMR_NORMAL_MASK) == 0 ||
   1286				(status & ISR_DIS_INT) != 0) {
   1287			if (max_ints != AT_MAX_INT_WORK)
   1288				handled = IRQ_HANDLED;
   1289			break;
   1290		}
   1291		/* link event */
   1292		if (status & ISR_GPHY)
   1293			atl1e_clear_phy_int(adapter);
   1294		/* Ack ISR */
   1295		AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
   1296
   1297		handled = IRQ_HANDLED;
   1298		/* check if PCIE PHY Link down */
   1299		if (status & ISR_PHY_LINKDOWN) {
   1300			netdev_err(adapter->netdev,
   1301				   "pcie phy linkdown %x\n", status);
   1302			if (netif_running(adapter->netdev)) {
   1303				/* reset MAC */
   1304				atl1e_irq_reset(adapter);
   1305				schedule_work(&adapter->reset_task);
   1306				break;
   1307			}
   1308		}
   1309
   1310		/* check if DMA read/write error */
   1311		if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
   1312			netdev_err(adapter->netdev,
   1313				   "PCIE DMA RW error (status = 0x%x)\n",
   1314				   status);
   1315			atl1e_irq_reset(adapter);
   1316			schedule_work(&adapter->reset_task);
   1317			break;
   1318		}
   1319
   1320		if (status & ISR_SMB)
   1321			atl1e_update_hw_stats(adapter);
   1322
   1323		/* link event */
   1324		if (status & (ISR_GPHY | ISR_MANUAL)) {
   1325			netdev->stats.tx_carrier_errors++;
   1326			atl1e_link_chg_event(adapter);
   1327			break;
   1328		}
   1329
   1330		/* transmit event */
   1331		if (status & ISR_TX_EVENT)
   1332			atl1e_clean_tx_irq(adapter);
   1333
   1334		if (status & ISR_RX_EVENT) {
   1335			/*
   1336			 * disable rx interrupts, without
   1337			 * the synchronize_irq bit
   1338			 */
   1339			AT_WRITE_REG(hw, REG_IMR,
   1340				     IMR_NORMAL_MASK & ~ISR_RX_EVENT);
   1341			AT_WRITE_FLUSH(hw);
   1342			if (likely(napi_schedule_prep(
   1343				   &adapter->napi)))
   1344				__napi_schedule(&adapter->napi);
   1345		}
   1346	} while (--max_ints > 0);
   1347	/* re-enable Interrupt*/
   1348	AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
   1349
   1350	return handled;
   1351}
   1352
   1353static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter,
   1354		  struct sk_buff *skb, struct atl1e_recv_ret_status *prrs)
   1355{
   1356	u8 *packet = (u8 *)(prrs + 1);
   1357	struct iphdr *iph;
   1358	u16 head_len = ETH_HLEN;
   1359	u16 pkt_flags;
   1360	u16 err_flags;
   1361
   1362	skb_checksum_none_assert(skb);
   1363	pkt_flags = prrs->pkt_flag;
   1364	err_flags = prrs->err_flag;
   1365	if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) &&
   1366		((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) {
   1367		if (pkt_flags & RRS_IS_IPV4) {
   1368			if (pkt_flags & RRS_IS_802_3)
   1369				head_len += 8;
   1370			iph = (struct iphdr *) (packet + head_len);
   1371			if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF))
   1372				goto hw_xsum;
   1373		}
   1374		if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) {
   1375			skb->ip_summed = CHECKSUM_UNNECESSARY;
   1376			return;
   1377		}
   1378	}
   1379
   1380hw_xsum :
   1381	return;
   1382}
   1383
   1384static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
   1385					       u8 que)
   1386{
   1387	struct atl1e_rx_page_desc *rx_page_desc =
   1388		(struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
   1389	u8 rx_using = rx_page_desc[que].rx_using;
   1390
   1391	return &(rx_page_desc[que].rx_page[rx_using]);
   1392}
   1393
   1394static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
   1395		   int *work_done, int work_to_do)
   1396{
   1397	struct net_device *netdev  = adapter->netdev;
   1398	struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
   1399	struct atl1e_rx_page_desc *rx_page_desc =
   1400		(struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
   1401	struct sk_buff *skb = NULL;
   1402	struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que);
   1403	u32 packet_size, write_offset;
   1404	struct atl1e_recv_ret_status *prrs;
   1405
   1406	write_offset = *(rx_page->write_offset_addr);
   1407	if (likely(rx_page->read_offset < write_offset)) {
   1408		do {
   1409			if (*work_done >= work_to_do)
   1410				break;
   1411			(*work_done)++;
   1412			/* get new packet's  rrs */
   1413			prrs = (struct atl1e_recv_ret_status *) (rx_page->addr +
   1414						 rx_page->read_offset);
   1415			/* check sequence number */
   1416			if (prrs->seq_num != rx_page_desc[que].rx_nxseq) {
   1417				netdev_err(netdev,
   1418					   "rx sequence number error (rx=%d) (expect=%d)\n",
   1419					   prrs->seq_num,
   1420					   rx_page_desc[que].rx_nxseq);
   1421				rx_page_desc[que].rx_nxseq++;
   1422				/* just for debug use */
   1423				AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0,
   1424					     (((u32)prrs->seq_num) << 16) |
   1425					     rx_page_desc[que].rx_nxseq);
   1426				goto fatal_err;
   1427			}
   1428			rx_page_desc[que].rx_nxseq++;
   1429
   1430			/* error packet */
   1431			if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
   1432			    !(netdev->features & NETIF_F_RXALL)) {
   1433				if (prrs->err_flag & (RRS_ERR_BAD_CRC |
   1434					RRS_ERR_DRIBBLE | RRS_ERR_CODE |
   1435					RRS_ERR_TRUNC)) {
   1436				/* hardware error, discard this packet*/
   1437					netdev_err(netdev,
   1438						   "rx packet desc error %x\n",
   1439						   *((u32 *)prrs + 1));
   1440					goto skip_pkt;
   1441				}
   1442			}
   1443
   1444			packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
   1445					RRS_PKT_SIZE_MASK);
   1446			if (likely(!(netdev->features & NETIF_F_RXFCS)))
   1447				packet_size -= 4; /* CRC */
   1448
   1449			skb = netdev_alloc_skb_ip_align(netdev, packet_size);
   1450			if (skb == NULL)
   1451				goto skip_pkt;
   1452
   1453			memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
   1454			skb_put(skb, packet_size);
   1455			skb->protocol = eth_type_trans(skb, netdev);
   1456			atl1e_rx_checksum(adapter, skb, prrs);
   1457
   1458			if (prrs->pkt_flag & RRS_IS_VLAN_TAG) {
   1459				u16 vlan_tag = (prrs->vtag >> 4) |
   1460					       ((prrs->vtag & 7) << 13) |
   1461					       ((prrs->vtag & 8) << 9);
   1462				netdev_dbg(netdev,
   1463					   "RXD VLAN TAG<RRD>=0x%04x\n",
   1464					   prrs->vtag);
   1465				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
   1466			}
   1467			napi_gro_receive(&adapter->napi, skb);
   1468
   1469skip_pkt:
   1470	/* skip current packet whether it's ok or not. */
   1471			rx_page->read_offset +=
   1472				(((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
   1473				RRS_PKT_SIZE_MASK) +
   1474				sizeof(struct atl1e_recv_ret_status) + 31) &
   1475						0xFFFFFFE0);
   1476
   1477			if (rx_page->read_offset >= rx_ring->page_size) {
   1478				/* mark this page clean */
   1479				u16 reg_addr;
   1480				u8  rx_using;
   1481
   1482				rx_page->read_offset =
   1483					*(rx_page->write_offset_addr) = 0;
   1484				rx_using = rx_page_desc[que].rx_using;
   1485				reg_addr =
   1486					atl1e_rx_page_vld_regs[que][rx_using];
   1487				AT_WRITE_REGB(&adapter->hw, reg_addr, 1);
   1488				rx_page_desc[que].rx_using ^= 1;
   1489				rx_page = atl1e_get_rx_page(adapter, que);
   1490			}
   1491			write_offset = *(rx_page->write_offset_addr);
   1492		} while (rx_page->read_offset < write_offset);
   1493	}
   1494
   1495	return;
   1496
   1497fatal_err:
   1498	if (!test_bit(__AT_DOWN, &adapter->flags))
   1499		schedule_work(&adapter->reset_task);
   1500}
   1501
   1502/**
   1503 * atl1e_clean - NAPI Rx polling callback
   1504 * @napi: napi info
   1505 * @budget: number of packets to clean
   1506 */
   1507static int atl1e_clean(struct napi_struct *napi, int budget)
   1508{
   1509	struct atl1e_adapter *adapter =
   1510			container_of(napi, struct atl1e_adapter, napi);
   1511	u32 imr_data;
   1512	int work_done = 0;
   1513
   1514	/* Keep link state information with original netdev */
   1515	if (!netif_carrier_ok(adapter->netdev))
   1516		goto quit_polling;
   1517
   1518	atl1e_clean_rx_irq(adapter, 0, &work_done, budget);
   1519
   1520	/* If no Tx and not enough Rx work done, exit the polling mode */
   1521	if (work_done < budget) {
   1522quit_polling:
   1523		napi_complete_done(napi, work_done);
   1524		imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
   1525		AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
   1526		/* test debug */
   1527		if (test_bit(__AT_DOWN, &adapter->flags)) {
   1528			atomic_dec(&adapter->irq_sem);
   1529			netdev_err(adapter->netdev,
   1530				   "atl1e_clean is called when AT_DOWN\n");
   1531		}
   1532		/* reenable RX intr */
   1533		/*atl1e_irq_enable(adapter); */
   1534
   1535	}
   1536	return work_done;
   1537}
   1538
   1539#ifdef CONFIG_NET_POLL_CONTROLLER
   1540
   1541/*
   1542 * Polling 'interrupt' - used by things like netconsole to send skbs
   1543 * without having to re-enable interrupts. It's not called while
   1544 * the interrupt routine is executing.
   1545 */
   1546static void atl1e_netpoll(struct net_device *netdev)
   1547{
   1548	struct atl1e_adapter *adapter = netdev_priv(netdev);
   1549
   1550	disable_irq(adapter->pdev->irq);
   1551	atl1e_intr(adapter->pdev->irq, netdev);
   1552	enable_irq(adapter->pdev->irq);
   1553}
   1554#endif
   1555
   1556static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter)
   1557{
   1558	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
   1559	u16 next_to_use = 0;
   1560	u16 next_to_clean = 0;
   1561
   1562	next_to_clean = atomic_read(&tx_ring->next_to_clean);
   1563	next_to_use   = tx_ring->next_to_use;
   1564
   1565	return (u16)(next_to_clean > next_to_use) ?
   1566		(next_to_clean - next_to_use - 1) :
   1567		(tx_ring->count + next_to_clean - next_to_use - 1);
   1568}
   1569
   1570/*
   1571 * get next usable tpd
   1572 * Note: should call atl1e_tdp_avail to make sure
   1573 * there is enough tpd to use
   1574 */
   1575static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
   1576{
   1577	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
   1578	u16 next_to_use = 0;
   1579
   1580	next_to_use = tx_ring->next_to_use;
   1581	if (++tx_ring->next_to_use == tx_ring->count)
   1582		tx_ring->next_to_use = 0;
   1583
   1584	memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
   1585	return &tx_ring->desc[next_to_use];
   1586}
   1587
   1588static struct atl1e_tx_buffer *
   1589atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd)
   1590{
   1591	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
   1592
   1593	return &tx_ring->tx_buffer[tpd - tx_ring->desc];
   1594}
   1595
   1596/* Calculate the transmit packet descript needed*/
   1597static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
   1598{
   1599	int i = 0;
   1600	u16 tpd_req = 1;
   1601	u16 fg_size = 0;
   1602	u16 proto_hdr_len = 0;
   1603
   1604	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
   1605		fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
   1606		tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
   1607	}
   1608
   1609	if (skb_is_gso(skb)) {
   1610		if (skb->protocol == htons(ETH_P_IP) ||
   1611		   (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
   1612			proto_hdr_len = skb_transport_offset(skb) +
   1613					tcp_hdrlen(skb);
   1614			if (proto_hdr_len < skb_headlen(skb)) {
   1615				tpd_req += ((skb_headlen(skb) - proto_hdr_len +
   1616					   MAX_TX_BUF_LEN - 1) >>
   1617					   MAX_TX_BUF_SHIFT);
   1618			}
   1619		}
   1620
   1621	}
   1622	return tpd_req;
   1623}
   1624
   1625static int atl1e_tso_csum(struct atl1e_adapter *adapter,
   1626		       struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
   1627{
   1628	unsigned short offload_type;
   1629	u8 hdr_len;
   1630	u32 real_len;
   1631
   1632	if (skb_is_gso(skb)) {
   1633		int err;
   1634
   1635		err = skb_cow_head(skb, 0);
   1636		if (err < 0)
   1637			return err;
   1638
   1639		offload_type = skb_shinfo(skb)->gso_type;
   1640
   1641		if (offload_type & SKB_GSO_TCPV4) {
   1642			real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
   1643					+ ntohs(ip_hdr(skb)->tot_len));
   1644
   1645			if (real_len < skb->len)
   1646				pskb_trim(skb, real_len);
   1647
   1648			hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
   1649			if (unlikely(skb->len == hdr_len)) {
   1650				/* only xsum need */
   1651				netdev_warn(adapter->netdev,
   1652					    "IPV4 tso with zero data??\n");
   1653				goto check_sum;
   1654			} else {
   1655				ip_hdr(skb)->check = 0;
   1656				ip_hdr(skb)->tot_len = 0;
   1657				tcp_hdr(skb)->check = ~csum_tcpudp_magic(
   1658							ip_hdr(skb)->saddr,
   1659							ip_hdr(skb)->daddr,
   1660							0, IPPROTO_TCP, 0);
   1661				tpd->word3 |= (ip_hdr(skb)->ihl &
   1662					TDP_V4_IPHL_MASK) <<
   1663					TPD_V4_IPHL_SHIFT;
   1664				tpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
   1665					TPD_TCPHDRLEN_MASK) <<
   1666					TPD_TCPHDRLEN_SHIFT;
   1667				tpd->word3 |= ((skb_shinfo(skb)->gso_size) &
   1668					TPD_MSS_MASK) << TPD_MSS_SHIFT;
   1669				tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
   1670			}
   1671			return 0;
   1672		}
   1673	}
   1674
   1675check_sum:
   1676	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
   1677		u8 css, cso;
   1678
   1679		cso = skb_checksum_start_offset(skb);
   1680		if (unlikely(cso & 0x1)) {
   1681			netdev_err(adapter->netdev,
   1682				   "payload offset should not ant event number\n");
   1683			return -1;
   1684		} else {
   1685			css = cso + skb->csum_offset;
   1686			tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) <<
   1687					TPD_PLOADOFFSET_SHIFT;
   1688			tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) <<
   1689					TPD_CCSUMOFFSET_SHIFT;
   1690			tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT;
   1691		}
   1692	}
   1693
   1694	return 0;
   1695}
   1696
   1697static int atl1e_tx_map(struct atl1e_adapter *adapter,
   1698			struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
   1699{
   1700	struct atl1e_tpd_desc *use_tpd = NULL;
   1701	struct atl1e_tx_buffer *tx_buffer = NULL;
   1702	u16 buf_len = skb_headlen(skb);
   1703	u16 map_len = 0;
   1704	u16 mapped_len = 0;
   1705	u16 hdr_len = 0;
   1706	u16 nr_frags;
   1707	u16 f;
   1708	int segment;
   1709	int ring_start = adapter->tx_ring.next_to_use;
   1710	int ring_end;
   1711
   1712	nr_frags = skb_shinfo(skb)->nr_frags;
   1713	segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
   1714	if (segment) {
   1715		/* TSO */
   1716		map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
   1717		use_tpd = tpd;
   1718
   1719		tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
   1720		tx_buffer->length = map_len;
   1721		tx_buffer->dma = dma_map_single(&adapter->pdev->dev,
   1722						skb->data, hdr_len,
   1723						DMA_TO_DEVICE);
   1724		if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
   1725			return -ENOSPC;
   1726
   1727		ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
   1728		mapped_len += map_len;
   1729		use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
   1730		use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
   1731			((cpu_to_le32(tx_buffer->length) &
   1732			TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
   1733	}
   1734
   1735	while (mapped_len < buf_len) {
   1736		/* mapped_len == 0, means we should use the first tpd,
   1737		   which is given by caller  */
   1738		if (mapped_len == 0) {
   1739			use_tpd = tpd;
   1740		} else {
   1741			use_tpd = atl1e_get_tpd(adapter);
   1742			memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
   1743		}
   1744		tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
   1745		tx_buffer->skb = NULL;
   1746
   1747		tx_buffer->length = map_len =
   1748			((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ?
   1749			MAX_TX_BUF_LEN : (buf_len - mapped_len);
   1750		tx_buffer->dma =
   1751			dma_map_single(&adapter->pdev->dev,
   1752				       skb->data + mapped_len, map_len,
   1753				       DMA_TO_DEVICE);
   1754
   1755		if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
   1756			/* We need to unwind the mappings we've done */
   1757			ring_end = adapter->tx_ring.next_to_use;
   1758			adapter->tx_ring.next_to_use = ring_start;
   1759			while (adapter->tx_ring.next_to_use != ring_end) {
   1760				tpd = atl1e_get_tpd(adapter);
   1761				tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
   1762				dma_unmap_single(&adapter->pdev->dev,
   1763						 tx_buffer->dma,
   1764						 tx_buffer->length,
   1765						 DMA_TO_DEVICE);
   1766			}
   1767			/* Reset the tx rings next pointer */
   1768			adapter->tx_ring.next_to_use = ring_start;
   1769			return -ENOSPC;
   1770		}
   1771
   1772		ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
   1773		mapped_len  += map_len;
   1774		use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
   1775		use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
   1776			((cpu_to_le32(tx_buffer->length) &
   1777			TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
   1778	}
   1779
   1780	for (f = 0; f < nr_frags; f++) {
   1781		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
   1782		u16 i;
   1783		u16 seg_num;
   1784
   1785		buf_len = skb_frag_size(frag);
   1786
   1787		seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
   1788		for (i = 0; i < seg_num; i++) {
   1789			use_tpd = atl1e_get_tpd(adapter);
   1790			memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
   1791
   1792			tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
   1793			BUG_ON(tx_buffer->skb);
   1794
   1795			tx_buffer->skb = NULL;
   1796			tx_buffer->length =
   1797				(buf_len > MAX_TX_BUF_LEN) ?
   1798				MAX_TX_BUF_LEN : buf_len;
   1799			buf_len -= tx_buffer->length;
   1800
   1801			tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev,
   1802							  frag,
   1803							  (i * MAX_TX_BUF_LEN),
   1804							  tx_buffer->length,
   1805							  DMA_TO_DEVICE);
   1806
   1807			if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
   1808				/* We need to unwind the mappings we've done */
   1809				ring_end = adapter->tx_ring.next_to_use;
   1810				adapter->tx_ring.next_to_use = ring_start;
   1811				while (adapter->tx_ring.next_to_use != ring_end) {
   1812					tpd = atl1e_get_tpd(adapter);
   1813					tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
   1814					dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
   1815						       tx_buffer->length, DMA_TO_DEVICE);
   1816				}
   1817
   1818				/* Reset the ring next to use pointer */
   1819				adapter->tx_ring.next_to_use = ring_start;
   1820				return -ENOSPC;
   1821			}
   1822
   1823			ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
   1824			use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
   1825			use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
   1826					((cpu_to_le32(tx_buffer->length) &
   1827					TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
   1828		}
   1829	}
   1830
   1831	if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK)
   1832		/* note this one is a tcp header */
   1833		tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
   1834	/* The last tpd */
   1835
   1836	use_tpd->word3 |= 1 << TPD_EOP_SHIFT;
   1837	/* The last buffer info contain the skb address,
   1838	   so it will be free after unmap */
   1839	tx_buffer->skb = skb;
   1840	return 0;
   1841}
   1842
   1843static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
   1844			   struct atl1e_tpd_desc *tpd)
   1845{
   1846	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
   1847	/* Force memory writes to complete before letting h/w
   1848	 * know there are new descriptors to fetch.  (Only
   1849	 * applicable for weak-ordered memory model archs,
   1850	 * such as IA-64). */
   1851	wmb();
   1852	AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use);
   1853}
   1854
   1855static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
   1856					  struct net_device *netdev)
   1857{
   1858	struct atl1e_adapter *adapter = netdev_priv(netdev);
   1859	u16 tpd_req = 1;
   1860	struct atl1e_tpd_desc *tpd;
   1861
   1862	if (test_bit(__AT_DOWN, &adapter->flags)) {
   1863		dev_kfree_skb_any(skb);
   1864		return NETDEV_TX_OK;
   1865	}
   1866
   1867	if (unlikely(skb->len <= 0)) {
   1868		dev_kfree_skb_any(skb);
   1869		return NETDEV_TX_OK;
   1870	}
   1871	tpd_req = atl1e_cal_tdp_req(skb);
   1872
   1873	if (atl1e_tpd_avail(adapter) < tpd_req) {
   1874		/* no enough descriptor, just stop queue */
   1875		netif_stop_queue(netdev);
   1876		return NETDEV_TX_BUSY;
   1877	}
   1878
   1879	tpd = atl1e_get_tpd(adapter);
   1880
   1881	if (skb_vlan_tag_present(skb)) {
   1882		u16 vlan_tag = skb_vlan_tag_get(skb);
   1883		u16 atl1e_vlan_tag;
   1884
   1885		tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
   1886		AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag);
   1887		tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) <<
   1888				TPD_VLAN_SHIFT;
   1889	}
   1890
   1891	if (skb->protocol == htons(ETH_P_8021Q))
   1892		tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT;
   1893
   1894	if (skb_network_offset(skb) != ETH_HLEN)
   1895		tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */
   1896
   1897	/* do TSO and check sum */
   1898	if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
   1899		dev_kfree_skb_any(skb);
   1900		return NETDEV_TX_OK;
   1901	}
   1902
   1903	if (atl1e_tx_map(adapter, skb, tpd)) {
   1904		dev_kfree_skb_any(skb);
   1905		goto out;
   1906	}
   1907
   1908	atl1e_tx_queue(adapter, tpd_req, tpd);
   1909out:
   1910	return NETDEV_TX_OK;
   1911}
   1912
   1913static void atl1e_free_irq(struct atl1e_adapter *adapter)
   1914{
   1915	struct net_device *netdev = adapter->netdev;
   1916
   1917	free_irq(adapter->pdev->irq, netdev);
   1918}
   1919
   1920static int atl1e_request_irq(struct atl1e_adapter *adapter)
   1921{
   1922	struct pci_dev    *pdev   = adapter->pdev;
   1923	struct net_device *netdev = adapter->netdev;
   1924	int err = 0;
   1925
   1926	err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
   1927			  netdev);
   1928	if (err) {
   1929		netdev_dbg(adapter->netdev,
   1930			   "Unable to allocate interrupt Error: %d\n", err);
   1931		return err;
   1932	}
   1933	netdev_dbg(netdev, "atl1e_request_irq OK\n");
   1934	return err;
   1935}
   1936
   1937int atl1e_up(struct atl1e_adapter *adapter)
   1938{
   1939	struct net_device *netdev = adapter->netdev;
   1940	int err = 0;
   1941	u32 val;
   1942
   1943	/* hardware has been reset, we need to reload some things */
   1944	err = atl1e_init_hw(&adapter->hw);
   1945	if (err) {
   1946		err = -EIO;
   1947		return err;
   1948	}
   1949	atl1e_init_ring_ptrs(adapter);
   1950	atl1e_set_multi(netdev);
   1951	atl1e_restore_vlan(adapter);
   1952
   1953	if (atl1e_configure(adapter)) {
   1954		err = -EIO;
   1955		goto err_up;
   1956	}
   1957
   1958	clear_bit(__AT_DOWN, &adapter->flags);
   1959	napi_enable(&adapter->napi);
   1960	atl1e_irq_enable(adapter);
   1961	val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL);
   1962	AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
   1963		      val | MASTER_CTRL_MANUAL_INT);
   1964
   1965err_up:
   1966	return err;
   1967}
   1968
   1969void atl1e_down(struct atl1e_adapter *adapter)
   1970{
   1971	struct net_device *netdev = adapter->netdev;
   1972
   1973	/* signal that we're down so the interrupt handler does not
   1974	 * reschedule our watchdog timer */
   1975	set_bit(__AT_DOWN, &adapter->flags);
   1976
   1977	netif_stop_queue(netdev);
   1978
   1979	/* reset MAC to disable all RX/TX */
   1980	atl1e_reset_hw(&adapter->hw);
   1981	msleep(1);
   1982
   1983	napi_disable(&adapter->napi);
   1984	atl1e_del_timer(adapter);
   1985	atl1e_irq_disable(adapter);
   1986
   1987	netif_carrier_off(netdev);
   1988	adapter->link_speed = SPEED_0;
   1989	adapter->link_duplex = -1;
   1990	atl1e_clean_tx_ring(adapter);
   1991	atl1e_clean_rx_ring(adapter);
   1992}
   1993
   1994/**
   1995 * atl1e_open - Called when a network interface is made active
   1996 * @netdev: network interface device structure
   1997 *
   1998 * Returns 0 on success, negative value on failure
   1999 *
   2000 * The open entry point is called when a network interface is made
   2001 * active by the system (IFF_UP).  At this point all resources needed
   2002 * for transmit and receive operations are allocated, the interrupt
   2003 * handler is registered with the OS, the watchdog timer is started,
   2004 * and the stack is notified that the interface is ready.
   2005 */
   2006static int atl1e_open(struct net_device *netdev)
   2007{
   2008	struct atl1e_adapter *adapter = netdev_priv(netdev);
   2009	int err;
   2010
   2011	/* disallow open during test */
   2012	if (test_bit(__AT_TESTING, &adapter->flags))
   2013		return -EBUSY;
   2014
   2015	/* allocate rx/tx dma buffer & descriptors */
   2016	atl1e_init_ring_resources(adapter);
   2017	err = atl1e_setup_ring_resources(adapter);
   2018	if (unlikely(err))
   2019		return err;
   2020
   2021	err = atl1e_request_irq(adapter);
   2022	if (unlikely(err))
   2023		goto err_req_irq;
   2024
   2025	err = atl1e_up(adapter);
   2026	if (unlikely(err))
   2027		goto err_up;
   2028
   2029	return 0;
   2030
   2031err_up:
   2032	atl1e_free_irq(adapter);
   2033err_req_irq:
   2034	atl1e_free_ring_resources(adapter);
   2035	atl1e_reset_hw(&adapter->hw);
   2036
   2037	return err;
   2038}
   2039
   2040/**
   2041 * atl1e_close - Disables a network interface
   2042 * @netdev: network interface device structure
   2043 *
   2044 * Returns 0, this is not allowed to fail
   2045 *
   2046 * The close entry point is called when an interface is de-activated
   2047 * by the OS.  The hardware is still under the drivers control, but
   2048 * needs to be disabled.  A global MAC reset is issued to stop the
   2049 * hardware, and all transmit and receive resources are freed.
   2050 */
   2051static int atl1e_close(struct net_device *netdev)
   2052{
   2053	struct atl1e_adapter *adapter = netdev_priv(netdev);
   2054
   2055	WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
   2056	atl1e_down(adapter);
   2057	atl1e_free_irq(adapter);
   2058	atl1e_free_ring_resources(adapter);
   2059
   2060	return 0;
   2061}
   2062
   2063static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
   2064{
   2065	struct net_device *netdev = pci_get_drvdata(pdev);
   2066	struct atl1e_adapter *adapter = netdev_priv(netdev);
   2067	struct atl1e_hw *hw = &adapter->hw;
   2068	u32 ctrl = 0;
   2069	u32 mac_ctrl_data = 0;
   2070	u32 wol_ctrl_data = 0;
   2071	u16 mii_advertise_data = 0;
   2072	u16 mii_bmsr_data = 0;
   2073	u16 mii_intr_status_data = 0;
   2074	u32 wufc = adapter->wol;
   2075	u32 i;
   2076#ifdef CONFIG_PM
   2077	int retval = 0;
   2078#endif
   2079
   2080	if (netif_running(netdev)) {
   2081		WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
   2082		atl1e_down(adapter);
   2083	}
   2084	netif_device_detach(netdev);
   2085
   2086#ifdef CONFIG_PM
   2087	retval = pci_save_state(pdev);
   2088	if (retval)
   2089		return retval;
   2090#endif
   2091
   2092	if (wufc) {
   2093		/* get link status */
   2094		atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
   2095		atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
   2096
   2097		mii_advertise_data = ADVERTISE_10HALF;
   2098
   2099		if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
   2100		    (atl1e_write_phy_reg(hw,
   2101			   MII_ADVERTISE, mii_advertise_data) != 0) ||
   2102		    (atl1e_phy_commit(hw)) != 0) {
   2103			netdev_dbg(adapter->netdev, "set phy register failed\n");
   2104			goto wol_dis;
   2105		}
   2106
   2107		hw->phy_configured = false; /* re-init PHY when resume */
   2108
   2109		/* turn on magic packet wol */
   2110		if (wufc & AT_WUFC_MAG)
   2111			wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
   2112
   2113		if (wufc & AT_WUFC_LNKC) {
   2114		/* if orignal link status is link, just wait for retrive link */
   2115			if (mii_bmsr_data & BMSR_LSTATUS) {
   2116				for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
   2117					msleep(100);
   2118					atl1e_read_phy_reg(hw, MII_BMSR,
   2119							&mii_bmsr_data);
   2120					if (mii_bmsr_data & BMSR_LSTATUS)
   2121						break;
   2122				}
   2123
   2124				if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
   2125					netdev_dbg(adapter->netdev,
   2126						   "Link may change when suspend\n");
   2127			}
   2128			wol_ctrl_data |=  WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
   2129			/* only link up can wake up */
   2130			if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) {
   2131				netdev_dbg(adapter->netdev,
   2132					   "read write phy register failed\n");
   2133				goto wol_dis;
   2134			}
   2135		}
   2136		/* clear phy interrupt */
   2137		atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data);
   2138		/* Config MAC Ctrl register */
   2139		mac_ctrl_data = MAC_CTRL_RX_EN;
   2140		/* set to 10/100M halt duplex */
   2141		mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT;
   2142		mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
   2143				 MAC_CTRL_PRMLEN_MASK) <<
   2144				 MAC_CTRL_PRMLEN_SHIFT);
   2145
   2146		__atl1e_vlan_mode(netdev->features, &mac_ctrl_data);
   2147
   2148		/* magic packet maybe Broadcast&multicast&Unicast frame */
   2149		if (wufc & AT_WUFC_MAG)
   2150			mac_ctrl_data |= MAC_CTRL_BC_EN;
   2151
   2152		netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n",
   2153			   mac_ctrl_data);
   2154
   2155		AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
   2156		AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
   2157		/* pcie patch */
   2158		ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
   2159		ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
   2160		AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
   2161		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
   2162		goto suspend_exit;
   2163	}
   2164wol_dis:
   2165
   2166	/* WOL disabled */
   2167	AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
   2168
   2169	/* pcie patch */
   2170	ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
   2171	ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
   2172	AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
   2173
   2174	atl1e_force_ps(hw);
   2175	hw->phy_configured = false; /* re-init PHY when resume */
   2176
   2177	pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
   2178
   2179suspend_exit:
   2180
   2181	if (netif_running(netdev))
   2182		atl1e_free_irq(adapter);
   2183
   2184	pci_disable_device(pdev);
   2185
   2186	pci_set_power_state(pdev, pci_choose_state(pdev, state));
   2187
   2188	return 0;
   2189}
   2190
   2191#ifdef CONFIG_PM
   2192static int atl1e_resume(struct pci_dev *pdev)
   2193{
   2194	struct net_device *netdev = pci_get_drvdata(pdev);
   2195	struct atl1e_adapter *adapter = netdev_priv(netdev);
   2196	u32 err;
   2197
   2198	pci_set_power_state(pdev, PCI_D0);
   2199	pci_restore_state(pdev);
   2200
   2201	err = pci_enable_device(pdev);
   2202	if (err) {
   2203		netdev_err(adapter->netdev,
   2204			   "Cannot enable PCI device from suspend\n");
   2205		return err;
   2206	}
   2207
   2208	pci_set_master(pdev);
   2209
   2210	AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
   2211
   2212	pci_enable_wake(pdev, PCI_D3hot, 0);
   2213	pci_enable_wake(pdev, PCI_D3cold, 0);
   2214
   2215	AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
   2216
   2217	if (netif_running(netdev)) {
   2218		err = atl1e_request_irq(adapter);
   2219		if (err)
   2220			return err;
   2221	}
   2222
   2223	atl1e_reset_hw(&adapter->hw);
   2224
   2225	if (netif_running(netdev))
   2226		atl1e_up(adapter);
   2227
   2228	netif_device_attach(netdev);
   2229
   2230	return 0;
   2231}
   2232#endif
   2233
   2234static void atl1e_shutdown(struct pci_dev *pdev)
   2235{
   2236	atl1e_suspend(pdev, PMSG_SUSPEND);
   2237}
   2238
   2239static const struct net_device_ops atl1e_netdev_ops = {
   2240	.ndo_open		= atl1e_open,
   2241	.ndo_stop		= atl1e_close,
   2242	.ndo_start_xmit		= atl1e_xmit_frame,
   2243	.ndo_get_stats		= atl1e_get_stats,
   2244	.ndo_set_rx_mode	= atl1e_set_multi,
   2245	.ndo_validate_addr	= eth_validate_addr,
   2246	.ndo_set_mac_address	= atl1e_set_mac_addr,
   2247	.ndo_fix_features	= atl1e_fix_features,
   2248	.ndo_set_features	= atl1e_set_features,
   2249	.ndo_change_mtu		= atl1e_change_mtu,
   2250	.ndo_eth_ioctl		= atl1e_ioctl,
   2251	.ndo_tx_timeout		= atl1e_tx_timeout,
   2252#ifdef CONFIG_NET_POLL_CONTROLLER
   2253	.ndo_poll_controller	= atl1e_netpoll,
   2254#endif
   2255
   2256};
   2257
   2258static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
   2259{
   2260	SET_NETDEV_DEV(netdev, &pdev->dev);
   2261	pci_set_drvdata(pdev, netdev);
   2262
   2263	netdev->netdev_ops = &atl1e_netdev_ops;
   2264
   2265	netdev->watchdog_timeo = AT_TX_WATCHDOG;
   2266	/* MTU range: 42 - 8170 */
   2267	netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
   2268	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
   2269			  (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
   2270	atl1e_set_ethtool_ops(netdev);
   2271
   2272	netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
   2273			      NETIF_F_HW_VLAN_CTAG_RX;
   2274	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX;
   2275	/* not enabled by default */
   2276	netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
   2277	return 0;
   2278}
   2279
   2280/**
   2281 * atl1e_probe - Device Initialization Routine
   2282 * @pdev: PCI device information struct
   2283 * @ent: entry in atl1e_pci_tbl
   2284 *
   2285 * Returns 0 on success, negative on failure
   2286 *
   2287 * atl1e_probe initializes an adapter identified by a pci_dev structure.
   2288 * The OS initialization, configuring of the adapter private structure,
   2289 * and a hardware reset occur.
   2290 */
   2291static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
   2292{
   2293	struct net_device *netdev;
   2294	struct atl1e_adapter *adapter = NULL;
   2295	static int cards_found;
   2296
   2297	int err = 0;
   2298
   2299	err = pci_enable_device(pdev);
   2300	if (err)
   2301		return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
   2302
   2303	/*
   2304	 * The atl1e chip can DMA to 64-bit addresses, but it uses a single
   2305	 * shared register for the high 32 bits, so only a single, aligned,
   2306	 * 4 GB physical address range can be used at a time.
   2307	 *
   2308	 * Supporting 64-bit DMA on this hardware is more trouble than it's
   2309	 * worth.  It is far easier to limit to 32-bit DMA than update
   2310	 * various kernel subsystems to support the mechanics required by a
   2311	 * fixed-high-32-bit system.
   2312	 */
   2313	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
   2314	if (err) {
   2315		dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
   2316		goto err_dma;
   2317	}
   2318
   2319	err = pci_request_regions(pdev, atl1e_driver_name);
   2320	if (err) {
   2321		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
   2322		goto err_pci_reg;
   2323	}
   2324
   2325	pci_set_master(pdev);
   2326
   2327	netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
   2328	if (netdev == NULL) {
   2329		err = -ENOMEM;
   2330		goto err_alloc_etherdev;
   2331	}
   2332
   2333	err = atl1e_init_netdev(netdev, pdev);
   2334	if (err) {
   2335		netdev_err(netdev, "init netdevice failed\n");
   2336		goto err_init_netdev;
   2337	}
   2338	adapter = netdev_priv(netdev);
   2339	adapter->bd_number = cards_found;
   2340	adapter->netdev = netdev;
   2341	adapter->pdev = pdev;
   2342	adapter->hw.adapter = adapter;
   2343	adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0);
   2344	if (!adapter->hw.hw_addr) {
   2345		err = -EIO;
   2346		netdev_err(netdev, "cannot map device registers\n");
   2347		goto err_ioremap;
   2348	}
   2349
   2350	/* init mii data */
   2351	adapter->mii.dev = netdev;
   2352	adapter->mii.mdio_read  = atl1e_mdio_read;
   2353	adapter->mii.mdio_write = atl1e_mdio_write;
   2354	adapter->mii.phy_id_mask = 0x1f;
   2355	adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
   2356
   2357	netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
   2358
   2359	timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
   2360
   2361	/* get user settings */
   2362	atl1e_check_options(adapter);
   2363	/*
   2364	 * Mark all PCI regions associated with PCI device
   2365	 * pdev as being reserved by owner atl1e_driver_name
   2366	 * Enables bus-mastering on the device and calls
   2367	 * pcibios_set_master to do the needed arch specific settings
   2368	 */
   2369	atl1e_setup_pcicmd(pdev);
   2370	/* setup the private structure */
   2371	err = atl1e_sw_init(adapter);
   2372	if (err) {
   2373		netdev_err(netdev, "net device private data init failed\n");
   2374		goto err_sw_init;
   2375	}
   2376
   2377	/* Init GPHY as early as possible due to power saving issue  */
   2378	atl1e_phy_init(&adapter->hw);
   2379	/* reset the controller to
   2380	 * put the device in a known good starting state */
   2381	err = atl1e_reset_hw(&adapter->hw);
   2382	if (err) {
   2383		err = -EIO;
   2384		goto err_reset;
   2385	}
   2386
   2387	if (atl1e_read_mac_addr(&adapter->hw) != 0) {
   2388		err = -EIO;
   2389		netdev_err(netdev, "get mac address failed\n");
   2390		goto err_eeprom;
   2391	}
   2392
   2393	eth_hw_addr_set(netdev, adapter->hw.mac_addr);
   2394	netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
   2395
   2396	INIT_WORK(&adapter->reset_task, atl1e_reset_task);
   2397	INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
   2398	netif_set_tso_max_size(netdev, MAX_TSO_SEG_SIZE);
   2399	err = register_netdev(netdev);
   2400	if (err) {
   2401		netdev_err(netdev, "register netdevice failed\n");
   2402		goto err_register;
   2403	}
   2404
   2405	/* assume we have no link for now */
   2406	netif_stop_queue(netdev);
   2407	netif_carrier_off(netdev);
   2408
   2409	cards_found++;
   2410
   2411	return 0;
   2412
   2413err_reset:
   2414err_register:
   2415err_sw_init:
   2416err_eeprom:
   2417	pci_iounmap(pdev, adapter->hw.hw_addr);
   2418err_init_netdev:
   2419err_ioremap:
   2420	free_netdev(netdev);
   2421err_alloc_etherdev:
   2422	pci_release_regions(pdev);
   2423err_pci_reg:
   2424err_dma:
   2425	pci_disable_device(pdev);
   2426	return err;
   2427}
   2428
   2429/**
   2430 * atl1e_remove - Device Removal Routine
   2431 * @pdev: PCI device information struct
   2432 *
   2433 * atl1e_remove is called by the PCI subsystem to alert the driver
   2434 * that it should release a PCI device.  The could be caused by a
   2435 * Hot-Plug event, or because the driver is going to be removed from
   2436 * memory.
   2437 */
   2438static void atl1e_remove(struct pci_dev *pdev)
   2439{
   2440	struct net_device *netdev = pci_get_drvdata(pdev);
   2441	struct atl1e_adapter *adapter = netdev_priv(netdev);
   2442
   2443	/*
   2444	 * flush_scheduled work may reschedule our watchdog task, so
   2445	 * explicitly disable watchdog tasks from being rescheduled
   2446	 */
   2447	set_bit(__AT_DOWN, &adapter->flags);
   2448
   2449	atl1e_del_timer(adapter);
   2450	atl1e_cancel_work(adapter);
   2451
   2452	unregister_netdev(netdev);
   2453	atl1e_free_ring_resources(adapter);
   2454	atl1e_force_ps(&adapter->hw);
   2455	pci_iounmap(pdev, adapter->hw.hw_addr);
   2456	pci_release_regions(pdev);
   2457	free_netdev(netdev);
   2458	pci_disable_device(pdev);
   2459}
   2460
   2461/**
   2462 * atl1e_io_error_detected - called when PCI error is detected
   2463 * @pdev: Pointer to PCI device
   2464 * @state: The current pci connection state
   2465 *
   2466 * This function is called after a PCI bus error affecting
   2467 * this device has been detected.
   2468 */
   2469static pci_ers_result_t
   2470atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
   2471{
   2472	struct net_device *netdev = pci_get_drvdata(pdev);
   2473	struct atl1e_adapter *adapter = netdev_priv(netdev);
   2474
   2475	netif_device_detach(netdev);
   2476
   2477	if (state == pci_channel_io_perm_failure)
   2478		return PCI_ERS_RESULT_DISCONNECT;
   2479
   2480	if (netif_running(netdev))
   2481		atl1e_down(adapter);
   2482
   2483	pci_disable_device(pdev);
   2484
   2485	/* Request a slot slot reset. */
   2486	return PCI_ERS_RESULT_NEED_RESET;
   2487}
   2488
   2489/**
   2490 * atl1e_io_slot_reset - called after the pci bus has been reset.
   2491 * @pdev: Pointer to PCI device
   2492 *
   2493 * Restart the card from scratch, as if from a cold-boot. Implementation
   2494 * resembles the first-half of the e1000_resume routine.
   2495 */
   2496static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
   2497{
   2498	struct net_device *netdev = pci_get_drvdata(pdev);
   2499	struct atl1e_adapter *adapter = netdev_priv(netdev);
   2500
   2501	if (pci_enable_device(pdev)) {
   2502		netdev_err(adapter->netdev,
   2503			   "Cannot re-enable PCI device after reset\n");
   2504		return PCI_ERS_RESULT_DISCONNECT;
   2505	}
   2506	pci_set_master(pdev);
   2507
   2508	pci_enable_wake(pdev, PCI_D3hot, 0);
   2509	pci_enable_wake(pdev, PCI_D3cold, 0);
   2510
   2511	atl1e_reset_hw(&adapter->hw);
   2512
   2513	return PCI_ERS_RESULT_RECOVERED;
   2514}
   2515
   2516/**
   2517 * atl1e_io_resume - called when traffic can start flowing again.
   2518 * @pdev: Pointer to PCI device
   2519 *
   2520 * This callback is called when the error recovery driver tells us that
   2521 * its OK to resume normal operation. Implementation resembles the
   2522 * second-half of the atl1e_resume routine.
   2523 */
   2524static void atl1e_io_resume(struct pci_dev *pdev)
   2525{
   2526	struct net_device *netdev = pci_get_drvdata(pdev);
   2527	struct atl1e_adapter *adapter = netdev_priv(netdev);
   2528
   2529	if (netif_running(netdev)) {
   2530		if (atl1e_up(adapter)) {
   2531			netdev_err(adapter->netdev,
   2532				   "can't bring device back up after reset\n");
   2533			return;
   2534		}
   2535	}
   2536
   2537	netif_device_attach(netdev);
   2538}
   2539
   2540static const struct pci_error_handlers atl1e_err_handler = {
   2541	.error_detected = atl1e_io_error_detected,
   2542	.slot_reset = atl1e_io_slot_reset,
   2543	.resume = atl1e_io_resume,
   2544};
   2545
   2546static struct pci_driver atl1e_driver = {
   2547	.name     = atl1e_driver_name,
   2548	.id_table = atl1e_pci_tbl,
   2549	.probe    = atl1e_probe,
   2550	.remove   = atl1e_remove,
   2551	/* Power Management Hooks */
   2552#ifdef CONFIG_PM
   2553	.suspend  = atl1e_suspend,
   2554	.resume   = atl1e_resume,
   2555#endif
   2556	.shutdown = atl1e_shutdown,
   2557	.err_handler = &atl1e_err_handler
   2558};
   2559
   2560module_pci_driver(atl1e_driver);