cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ixgb_main.c (61400B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright(c) 1999 - 2008 Intel Corporation. */
      3
      4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      5
      6#include <linux/prefetch.h>
      7#include "ixgb.h"
      8
      9char ixgb_driver_name[] = "ixgb";
     10static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
     11
     12static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
     13
     14#define IXGB_CB_LENGTH 256
     15static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
     16module_param(copybreak, uint, 0644);
     17MODULE_PARM_DESC(copybreak,
     18	"Maximum size of packet that is copied to a new buffer on receive");
     19
     20/* ixgb_pci_tbl - PCI Device ID Table
     21 *
     22 * Wildcard entries (PCI_ANY_ID) should come last
     23 * Last entry must be all 0s
     24 *
     25 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
     26 *   Class, Class Mask, private data (not used) }
     27 */
     28static const struct pci_device_id ixgb_pci_tbl[] = {
     29	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
     30	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
     31	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
     32	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
     33	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
     34	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
     35	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
     36	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
     37
     38	/* required last entry */
     39	{0,}
     40};
     41
     42MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
     43
     44/* Local Function Prototypes */
     45static int ixgb_init_module(void);
     46static void ixgb_exit_module(void);
     47static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
     48static void ixgb_remove(struct pci_dev *pdev);
     49static int ixgb_sw_init(struct ixgb_adapter *adapter);
     50static int ixgb_open(struct net_device *netdev);
     51static int ixgb_close(struct net_device *netdev);
     52static void ixgb_configure_tx(struct ixgb_adapter *adapter);
     53static void ixgb_configure_rx(struct ixgb_adapter *adapter);
     54static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
     55static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
     56static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
     57static void ixgb_set_multi(struct net_device *netdev);
     58static void ixgb_watchdog(struct timer_list *t);
     59static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
     60				   struct net_device *netdev);
     61static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
     62static int ixgb_set_mac(struct net_device *netdev, void *p);
     63static irqreturn_t ixgb_intr(int irq, void *data);
     64static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
     65
     66static int ixgb_clean(struct napi_struct *, int);
     67static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
     68static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
     69
     70static void ixgb_tx_timeout(struct net_device *dev, unsigned int txqueue);
     71static void ixgb_tx_timeout_task(struct work_struct *work);
     72
     73static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
     74static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
     75static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
     76				__be16 proto, u16 vid);
     77static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
     78				 __be16 proto, u16 vid);
     79static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
     80
     81static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
     82                             pci_channel_state_t state);
     83static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
     84static void ixgb_io_resume (struct pci_dev *pdev);
     85
     86static const struct pci_error_handlers ixgb_err_handler = {
     87	.error_detected = ixgb_io_error_detected,
     88	.slot_reset = ixgb_io_slot_reset,
     89	.resume = ixgb_io_resume,
     90};
     91
     92static struct pci_driver ixgb_driver = {
     93	.name     = ixgb_driver_name,
     94	.id_table = ixgb_pci_tbl,
     95	.probe    = ixgb_probe,
     96	.remove   = ixgb_remove,
     97	.err_handler = &ixgb_err_handler
     98};
     99
    100MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
    101MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
    102MODULE_LICENSE("GPL v2");
    103
    104#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
    105static int debug = -1;
    106module_param(debug, int, 0);
    107MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
    108
    109/**
    110 * ixgb_init_module - Driver Registration Routine
    111 *
    112 * ixgb_init_module is the first routine called when the driver is
    113 * loaded. All it does is register with the PCI subsystem.
    114 **/
    115
    116static int __init
    117ixgb_init_module(void)
    118{
    119	pr_info("%s\n", ixgb_driver_string);
    120	pr_info("%s\n", ixgb_copyright);
    121
    122	return pci_register_driver(&ixgb_driver);
    123}
    124
    125module_init(ixgb_init_module);
    126
    127/**
    128 * ixgb_exit_module - Driver Exit Cleanup Routine
    129 *
    130 * ixgb_exit_module is called just before the driver is removed
    131 * from memory.
    132 **/
    133
    134static void __exit
    135ixgb_exit_module(void)
    136{
    137	pci_unregister_driver(&ixgb_driver);
    138}
    139
    140module_exit(ixgb_exit_module);
    141
    142/**
    143 * ixgb_irq_disable - Mask off interrupt generation on the NIC
    144 * @adapter: board private structure
    145 **/
    146
    147static void
    148ixgb_irq_disable(struct ixgb_adapter *adapter)
    149{
    150	IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
    151	IXGB_WRITE_FLUSH(&adapter->hw);
    152	synchronize_irq(adapter->pdev->irq);
    153}
    154
    155/**
    156 * ixgb_irq_enable - Enable default interrupt generation settings
    157 * @adapter: board private structure
    158 **/
    159
    160static void
    161ixgb_irq_enable(struct ixgb_adapter *adapter)
    162{
    163	u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
    164		  IXGB_INT_TXDW | IXGB_INT_LSC;
    165	if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
    166		val |= IXGB_INT_GPI0;
    167	IXGB_WRITE_REG(&adapter->hw, IMS, val);
    168	IXGB_WRITE_FLUSH(&adapter->hw);
    169}
    170
    171int
    172ixgb_up(struct ixgb_adapter *adapter)
    173{
    174	struct net_device *netdev = adapter->netdev;
    175	int err, irq_flags = IRQF_SHARED;
    176	int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
    177	struct ixgb_hw *hw = &adapter->hw;
    178
    179	/* hardware has been reset, we need to reload some things */
    180
    181	ixgb_rar_set(hw, netdev->dev_addr, 0);
    182	ixgb_set_multi(netdev);
    183
    184	ixgb_restore_vlan(adapter);
    185
    186	ixgb_configure_tx(adapter);
    187	ixgb_setup_rctl(adapter);
    188	ixgb_configure_rx(adapter);
    189	ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
    190
    191	/* disable interrupts and get the hardware into a known state */
    192	IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
    193
    194	/* only enable MSI if bus is in PCI-X mode */
    195	if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
    196		err = pci_enable_msi(adapter->pdev);
    197		if (!err) {
    198			adapter->have_msi = true;
    199			irq_flags = 0;
    200		}
    201		/* proceed to try to request regular interrupt */
    202	}
    203
    204	err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
    205	                  netdev->name, netdev);
    206	if (err) {
    207		if (adapter->have_msi)
    208			pci_disable_msi(adapter->pdev);
    209		netif_err(adapter, probe, adapter->netdev,
    210			  "Unable to allocate interrupt Error: %d\n", err);
    211		return err;
    212	}
    213
    214	if ((hw->max_frame_size != max_frame) ||
    215		(hw->max_frame_size !=
    216		(IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
    217
    218		hw->max_frame_size = max_frame;
    219
    220		IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
    221
    222		if (hw->max_frame_size >
    223		   IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
    224			u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
    225
    226			if (!(ctrl0 & IXGB_CTRL0_JFE)) {
    227				ctrl0 |= IXGB_CTRL0_JFE;
    228				IXGB_WRITE_REG(hw, CTRL0, ctrl0);
    229			}
    230		}
    231	}
    232
    233	clear_bit(__IXGB_DOWN, &adapter->flags);
    234
    235	napi_enable(&adapter->napi);
    236	ixgb_irq_enable(adapter);
    237
    238	netif_wake_queue(netdev);
    239
    240	mod_timer(&adapter->watchdog_timer, jiffies);
    241
    242	return 0;
    243}
    244
    245void
    246ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
    247{
    248	struct net_device *netdev = adapter->netdev;
    249
    250	/* prevent the interrupt handler from restarting watchdog */
    251	set_bit(__IXGB_DOWN, &adapter->flags);
    252
    253	netif_carrier_off(netdev);
    254
    255	napi_disable(&adapter->napi);
    256	/* waiting for NAPI to complete can re-enable interrupts */
    257	ixgb_irq_disable(adapter);
    258	free_irq(adapter->pdev->irq, netdev);
    259
    260	if (adapter->have_msi)
    261		pci_disable_msi(adapter->pdev);
    262
    263	if (kill_watchdog)
    264		del_timer_sync(&adapter->watchdog_timer);
    265
    266	adapter->link_speed = 0;
    267	adapter->link_duplex = 0;
    268	netif_stop_queue(netdev);
    269
    270	ixgb_reset(adapter);
    271	ixgb_clean_tx_ring(adapter);
    272	ixgb_clean_rx_ring(adapter);
    273}
    274
    275void
    276ixgb_reset(struct ixgb_adapter *adapter)
    277{
    278	struct ixgb_hw *hw = &adapter->hw;
    279
    280	ixgb_adapter_stop(hw);
    281	if (!ixgb_init_hw(hw))
    282		netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
    283
    284	/* restore frame size information */
    285	IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
    286	if (hw->max_frame_size >
    287	    IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
    288		u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
    289		if (!(ctrl0 & IXGB_CTRL0_JFE)) {
    290			ctrl0 |= IXGB_CTRL0_JFE;
    291			IXGB_WRITE_REG(hw, CTRL0, ctrl0);
    292		}
    293	}
    294}
    295
    296static netdev_features_t
    297ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
    298{
    299	/*
    300	 * Tx VLAN insertion does not work per HW design when Rx stripping is
    301	 * disabled.
    302	 */
    303	if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
    304		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
    305
    306	return features;
    307}
    308
    309static int
    310ixgb_set_features(struct net_device *netdev, netdev_features_t features)
    311{
    312	struct ixgb_adapter *adapter = netdev_priv(netdev);
    313	netdev_features_t changed = features ^ netdev->features;
    314
    315	if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
    316		return 0;
    317
    318	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
    319
    320	if (netif_running(netdev)) {
    321		ixgb_down(adapter, true);
    322		ixgb_up(adapter);
    323		ixgb_set_speed_duplex(netdev);
    324	} else
    325		ixgb_reset(adapter);
    326
    327	return 0;
    328}
    329
    330
    331static const struct net_device_ops ixgb_netdev_ops = {
    332	.ndo_open 		= ixgb_open,
    333	.ndo_stop		= ixgb_close,
    334	.ndo_start_xmit		= ixgb_xmit_frame,
    335	.ndo_set_rx_mode	= ixgb_set_multi,
    336	.ndo_validate_addr	= eth_validate_addr,
    337	.ndo_set_mac_address	= ixgb_set_mac,
    338	.ndo_change_mtu		= ixgb_change_mtu,
    339	.ndo_tx_timeout		= ixgb_tx_timeout,
    340	.ndo_vlan_rx_add_vid	= ixgb_vlan_rx_add_vid,
    341	.ndo_vlan_rx_kill_vid	= ixgb_vlan_rx_kill_vid,
    342	.ndo_fix_features       = ixgb_fix_features,
    343	.ndo_set_features       = ixgb_set_features,
    344};
    345
    346/**
    347 * ixgb_probe - Device Initialization Routine
    348 * @pdev: PCI device information struct
    349 * @ent: entry in ixgb_pci_tbl
    350 *
    351 * Returns 0 on success, negative on failure
    352 *
    353 * ixgb_probe initializes an adapter identified by a pci_dev structure.
    354 * The OS initialization, configuring of the adapter private structure,
    355 * and a hardware reset occur.
    356 **/
    357
    358static int
    359ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
    360{
    361	struct net_device *netdev = NULL;
    362	struct ixgb_adapter *adapter;
    363	static int cards_found = 0;
    364	u8 addr[ETH_ALEN];
    365	int i;
    366	int err;
    367
    368	err = pci_enable_device(pdev);
    369	if (err)
    370		return err;
    371
    372	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
    373	if (err) {
    374		pr_err("No usable DMA configuration, aborting\n");
    375		goto err_dma_mask;
    376	}
    377
    378	err = pci_request_regions(pdev, ixgb_driver_name);
    379	if (err)
    380		goto err_request_regions;
    381
    382	pci_set_master(pdev);
    383
    384	netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
    385	if (!netdev) {
    386		err = -ENOMEM;
    387		goto err_alloc_etherdev;
    388	}
    389
    390	SET_NETDEV_DEV(netdev, &pdev->dev);
    391
    392	pci_set_drvdata(pdev, netdev);
    393	adapter = netdev_priv(netdev);
    394	adapter->netdev = netdev;
    395	adapter->pdev = pdev;
    396	adapter->hw.back = adapter;
    397	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
    398
    399	adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
    400	if (!adapter->hw.hw_addr) {
    401		err = -EIO;
    402		goto err_ioremap;
    403	}
    404
    405	for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
    406		if (pci_resource_len(pdev, i) == 0)
    407			continue;
    408		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
    409			adapter->hw.io_base = pci_resource_start(pdev, i);
    410			break;
    411		}
    412	}
    413
    414	netdev->netdev_ops = &ixgb_netdev_ops;
    415	ixgb_set_ethtool_ops(netdev);
    416	netdev->watchdog_timeo = 5 * HZ;
    417	netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
    418
    419	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
    420
    421	adapter->bd_number = cards_found;
    422	adapter->link_speed = 0;
    423	adapter->link_duplex = 0;
    424
    425	/* setup the private structure */
    426
    427	err = ixgb_sw_init(adapter);
    428	if (err)
    429		goto err_sw_init;
    430
    431	netdev->hw_features = NETIF_F_SG |
    432			   NETIF_F_TSO |
    433			   NETIF_F_HW_CSUM |
    434			   NETIF_F_HW_VLAN_CTAG_TX |
    435			   NETIF_F_HW_VLAN_CTAG_RX;
    436	netdev->features = netdev->hw_features |
    437			   NETIF_F_HW_VLAN_CTAG_FILTER;
    438	netdev->hw_features |= NETIF_F_RXCSUM;
    439
    440	netdev->features |= NETIF_F_HIGHDMA;
    441	netdev->vlan_features |= NETIF_F_HIGHDMA;
    442
    443	/* MTU range: 68 - 16114 */
    444	netdev->min_mtu = ETH_MIN_MTU;
    445	netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
    446
    447	/* make sure the EEPROM is good */
    448
    449	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
    450		netif_err(adapter, probe, adapter->netdev,
    451			  "The EEPROM Checksum Is Not Valid\n");
    452		err = -EIO;
    453		goto err_eeprom;
    454	}
    455
    456	ixgb_get_ee_mac_addr(&adapter->hw, addr);
    457	eth_hw_addr_set(netdev, addr);
    458
    459	if (!is_valid_ether_addr(netdev->dev_addr)) {
    460		netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
    461		err = -EIO;
    462		goto err_eeprom;
    463	}
    464
    465	adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
    466
    467	timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0);
    468
    469	INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
    470
    471	strcpy(netdev->name, "eth%d");
    472	err = register_netdev(netdev);
    473	if (err)
    474		goto err_register;
    475
    476	/* carrier off reporting is important to ethtool even BEFORE open */
    477	netif_carrier_off(netdev);
    478
    479	netif_info(adapter, probe, adapter->netdev,
    480		   "Intel(R) PRO/10GbE Network Connection\n");
    481	ixgb_check_options(adapter);
    482	/* reset the hardware with the new settings */
    483
    484	ixgb_reset(adapter);
    485
    486	cards_found++;
    487	return 0;
    488
    489err_register:
    490err_sw_init:
    491err_eeprom:
    492	iounmap(adapter->hw.hw_addr);
    493err_ioremap:
    494	free_netdev(netdev);
    495err_alloc_etherdev:
    496	pci_release_regions(pdev);
    497err_request_regions:
    498err_dma_mask:
    499	pci_disable_device(pdev);
    500	return err;
    501}
    502
    503/**
    504 * ixgb_remove - Device Removal Routine
    505 * @pdev: PCI device information struct
    506 *
    507 * ixgb_remove is called by the PCI subsystem to alert the driver
    508 * that it should release a PCI device.  The could be caused by a
    509 * Hot-Plug event, or because the driver is going to be removed from
    510 * memory.
    511 **/
    512
    513static void
    514ixgb_remove(struct pci_dev *pdev)
    515{
    516	struct net_device *netdev = pci_get_drvdata(pdev);
    517	struct ixgb_adapter *adapter = netdev_priv(netdev);
    518
    519	cancel_work_sync(&adapter->tx_timeout_task);
    520
    521	unregister_netdev(netdev);
    522
    523	iounmap(adapter->hw.hw_addr);
    524	pci_release_regions(pdev);
    525
    526	free_netdev(netdev);
    527	pci_disable_device(pdev);
    528}
    529
    530/**
    531 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
    532 * @adapter: board private structure to initialize
    533 *
    534 * ixgb_sw_init initializes the Adapter private data structure.
    535 * Fields are initialized based on PCI device information and
    536 * OS network device settings (MTU size).
    537 **/
    538
    539static int
    540ixgb_sw_init(struct ixgb_adapter *adapter)
    541{
    542	struct ixgb_hw *hw = &adapter->hw;
    543	struct net_device *netdev = adapter->netdev;
    544	struct pci_dev *pdev = adapter->pdev;
    545
    546	/* PCI config space info */
    547
    548	hw->vendor_id = pdev->vendor;
    549	hw->device_id = pdev->device;
    550	hw->subsystem_vendor_id = pdev->subsystem_vendor;
    551	hw->subsystem_id = pdev->subsystem_device;
    552
    553	hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
    554	adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
    555
    556	if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
    557	    (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
    558	    (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
    559	    (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
    560		hw->mac_type = ixgb_82597;
    561	else {
    562		/* should never have loaded on this device */
    563		netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
    564	}
    565
    566	/* enable flow control to be programmed */
    567	hw->fc.send_xon = 1;
    568
    569	set_bit(__IXGB_DOWN, &adapter->flags);
    570	return 0;
    571}
    572
    573/**
    574 * ixgb_open - Called when a network interface is made active
    575 * @netdev: network interface device structure
    576 *
    577 * Returns 0 on success, negative value on failure
    578 *
    579 * The open entry point is called when a network interface is made
    580 * active by the system (IFF_UP).  At this point all resources needed
    581 * for transmit and receive operations are allocated, the interrupt
    582 * handler is registered with the OS, the watchdog timer is started,
    583 * and the stack is notified that the interface is ready.
    584 **/
    585
    586static int
    587ixgb_open(struct net_device *netdev)
    588{
    589	struct ixgb_adapter *adapter = netdev_priv(netdev);
    590	int err;
    591
    592	/* allocate transmit descriptors */
    593	err = ixgb_setup_tx_resources(adapter);
    594	if (err)
    595		goto err_setup_tx;
    596
    597	netif_carrier_off(netdev);
    598
    599	/* allocate receive descriptors */
    600
    601	err = ixgb_setup_rx_resources(adapter);
    602	if (err)
    603		goto err_setup_rx;
    604
    605	err = ixgb_up(adapter);
    606	if (err)
    607		goto err_up;
    608
    609	netif_start_queue(netdev);
    610
    611	return 0;
    612
    613err_up:
    614	ixgb_free_rx_resources(adapter);
    615err_setup_rx:
    616	ixgb_free_tx_resources(adapter);
    617err_setup_tx:
    618	ixgb_reset(adapter);
    619
    620	return err;
    621}
    622
    623/**
    624 * ixgb_close - Disables a network interface
    625 * @netdev: network interface device structure
    626 *
    627 * Returns 0, this is not allowed to fail
    628 *
    629 * The close entry point is called when an interface is de-activated
    630 * by the OS.  The hardware is still under the drivers control, but
    631 * needs to be disabled.  A global MAC reset is issued to stop the
    632 * hardware, and all transmit and receive resources are freed.
    633 **/
    634
    635static int
    636ixgb_close(struct net_device *netdev)
    637{
    638	struct ixgb_adapter *adapter = netdev_priv(netdev);
    639
    640	ixgb_down(adapter, true);
    641
    642	ixgb_free_tx_resources(adapter);
    643	ixgb_free_rx_resources(adapter);
    644
    645	return 0;
    646}
    647
    648/**
    649 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
    650 * @adapter: board private structure
    651 *
    652 * Return 0 on success, negative on failure
    653 **/
    654
    655int
    656ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
    657{
    658	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
    659	struct pci_dev *pdev = adapter->pdev;
    660	int size;
    661
    662	size = sizeof(struct ixgb_buffer) * txdr->count;
    663	txdr->buffer_info = vzalloc(size);
    664	if (!txdr->buffer_info)
    665		return -ENOMEM;
    666
    667	/* round up to nearest 4K */
    668
    669	txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
    670	txdr->size = ALIGN(txdr->size, 4096);
    671
    672	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
    673					GFP_KERNEL);
    674	if (!txdr->desc) {
    675		vfree(txdr->buffer_info);
    676		return -ENOMEM;
    677	}
    678
    679	txdr->next_to_use = 0;
    680	txdr->next_to_clean = 0;
    681
    682	return 0;
    683}
    684
    685/**
    686 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
    687 * @adapter: board private structure
    688 *
    689 * Configure the Tx unit of the MAC after a reset.
    690 **/
    691
    692static void
    693ixgb_configure_tx(struct ixgb_adapter *adapter)
    694{
    695	u64 tdba = adapter->tx_ring.dma;
    696	u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
    697	u32 tctl;
    698	struct ixgb_hw *hw = &adapter->hw;
    699
    700	/* Setup the Base and Length of the Tx Descriptor Ring
    701	 * tx_ring.dma can be either a 32 or 64 bit value
    702	 */
    703
    704	IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
    705	IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
    706
    707	IXGB_WRITE_REG(hw, TDLEN, tdlen);
    708
    709	/* Setup the HW Tx Head and Tail descriptor pointers */
    710
    711	IXGB_WRITE_REG(hw, TDH, 0);
    712	IXGB_WRITE_REG(hw, TDT, 0);
    713
    714	/* don't set up txdctl, it induces performance problems if configured
    715	 * incorrectly */
    716	/* Set the Tx Interrupt Delay register */
    717
    718	IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
    719
    720	/* Program the Transmit Control Register */
    721
    722	tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
    723	IXGB_WRITE_REG(hw, TCTL, tctl);
    724
    725	/* Setup Transmit Descriptor Settings for this adapter */
    726	adapter->tx_cmd_type =
    727		IXGB_TX_DESC_TYPE |
    728		(adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
    729}
    730
    731/**
    732 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
    733 * @adapter: board private structure
    734 *
    735 * Returns 0 on success, negative on failure
    736 **/
    737
    738int
    739ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
    740{
    741	struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
    742	struct pci_dev *pdev = adapter->pdev;
    743	int size;
    744
    745	size = sizeof(struct ixgb_buffer) * rxdr->count;
    746	rxdr->buffer_info = vzalloc(size);
    747	if (!rxdr->buffer_info)
    748		return -ENOMEM;
    749
    750	/* Round up to nearest 4K */
    751
    752	rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
    753	rxdr->size = ALIGN(rxdr->size, 4096);
    754
    755	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
    756					GFP_KERNEL);
    757
    758	if (!rxdr->desc) {
    759		vfree(rxdr->buffer_info);
    760		return -ENOMEM;
    761	}
    762
    763	rxdr->next_to_clean = 0;
    764	rxdr->next_to_use = 0;
    765
    766	return 0;
    767}
    768
    769/**
    770 * ixgb_setup_rctl - configure the receive control register
    771 * @adapter: Board private structure
    772 **/
    773
    774static void
    775ixgb_setup_rctl(struct ixgb_adapter *adapter)
    776{
    777	u32 rctl;
    778
    779	rctl = IXGB_READ_REG(&adapter->hw, RCTL);
    780
    781	rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
    782
    783	rctl |=
    784		IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
    785		IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
    786		(adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
    787
    788	rctl |= IXGB_RCTL_SECRC;
    789
    790	if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
    791		rctl |= IXGB_RCTL_BSIZE_2048;
    792	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
    793		rctl |= IXGB_RCTL_BSIZE_4096;
    794	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
    795		rctl |= IXGB_RCTL_BSIZE_8192;
    796	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
    797		rctl |= IXGB_RCTL_BSIZE_16384;
    798
    799	IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
    800}
    801
    802/**
    803 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
    804 * @adapter: board private structure
    805 *
    806 * Configure the Rx unit of the MAC after a reset.
    807 **/
    808
    809static void
    810ixgb_configure_rx(struct ixgb_adapter *adapter)
    811{
    812	u64 rdba = adapter->rx_ring.dma;
    813	u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
    814	struct ixgb_hw *hw = &adapter->hw;
    815	u32 rctl;
    816	u32 rxcsum;
    817
    818	/* make sure receives are disabled while setting up the descriptors */
    819
    820	rctl = IXGB_READ_REG(hw, RCTL);
    821	IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
    822
    823	/* set the Receive Delay Timer Register */
    824
    825	IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
    826
    827	/* Setup the Base and Length of the Rx Descriptor Ring */
    828
    829	IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
    830	IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
    831
    832	IXGB_WRITE_REG(hw, RDLEN, rdlen);
    833
    834	/* Setup the HW Rx Head and Tail Descriptor Pointers */
    835	IXGB_WRITE_REG(hw, RDH, 0);
    836	IXGB_WRITE_REG(hw, RDT, 0);
    837
    838	/* due to the hardware errata with RXDCTL, we are unable to use any of
    839	 * the performance enhancing features of it without causing other
    840	 * subtle bugs, some of the bugs could include receive length
    841	 * corruption at high data rates (WTHRESH > 0) and/or receive
    842	 * descriptor ring irregularites (particularly in hardware cache) */
    843	IXGB_WRITE_REG(hw, RXDCTL, 0);
    844
    845	/* Enable Receive Checksum Offload for TCP and UDP */
    846	if (adapter->rx_csum) {
    847		rxcsum = IXGB_READ_REG(hw, RXCSUM);
    848		rxcsum |= IXGB_RXCSUM_TUOFL;
    849		IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
    850	}
    851
    852	/* Enable Receives */
    853
    854	IXGB_WRITE_REG(hw, RCTL, rctl);
    855}
    856
    857/**
    858 * ixgb_free_tx_resources - Free Tx Resources
    859 * @adapter: board private structure
    860 *
    861 * Free all transmit software resources
    862 **/
    863
    864void
    865ixgb_free_tx_resources(struct ixgb_adapter *adapter)
    866{
    867	struct pci_dev *pdev = adapter->pdev;
    868
    869	ixgb_clean_tx_ring(adapter);
    870
    871	vfree(adapter->tx_ring.buffer_info);
    872	adapter->tx_ring.buffer_info = NULL;
    873
    874	dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
    875			  adapter->tx_ring.desc, adapter->tx_ring.dma);
    876
    877	adapter->tx_ring.desc = NULL;
    878}
    879
    880static void
    881ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
    882                                struct ixgb_buffer *buffer_info)
    883{
    884	if (buffer_info->dma) {
    885		if (buffer_info->mapped_as_page)
    886			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
    887				       buffer_info->length, DMA_TO_DEVICE);
    888		else
    889			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
    890					 buffer_info->length, DMA_TO_DEVICE);
    891		buffer_info->dma = 0;
    892	}
    893
    894	if (buffer_info->skb) {
    895		dev_kfree_skb_any(buffer_info->skb);
    896		buffer_info->skb = NULL;
    897	}
    898	buffer_info->time_stamp = 0;
    899	/* these fields must always be initialized in tx
    900	 * buffer_info->length = 0;
    901	 * buffer_info->next_to_watch = 0; */
    902}
    903
    904/**
    905 * ixgb_clean_tx_ring - Free Tx Buffers
    906 * @adapter: board private structure
    907 **/
    908
    909static void
    910ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
    911{
    912	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
    913	struct ixgb_buffer *buffer_info;
    914	unsigned long size;
    915	unsigned int i;
    916
    917	/* Free all the Tx ring sk_buffs */
    918
    919	for (i = 0; i < tx_ring->count; i++) {
    920		buffer_info = &tx_ring->buffer_info[i];
    921		ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
    922	}
    923
    924	size = sizeof(struct ixgb_buffer) * tx_ring->count;
    925	memset(tx_ring->buffer_info, 0, size);
    926
    927	/* Zero out the descriptor ring */
    928
    929	memset(tx_ring->desc, 0, tx_ring->size);
    930
    931	tx_ring->next_to_use = 0;
    932	tx_ring->next_to_clean = 0;
    933
    934	IXGB_WRITE_REG(&adapter->hw, TDH, 0);
    935	IXGB_WRITE_REG(&adapter->hw, TDT, 0);
    936}
    937
    938/**
    939 * ixgb_free_rx_resources - Free Rx Resources
    940 * @adapter: board private structure
    941 *
    942 * Free all receive software resources
    943 **/
    944
    945void
    946ixgb_free_rx_resources(struct ixgb_adapter *adapter)
    947{
    948	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
    949	struct pci_dev *pdev = adapter->pdev;
    950
    951	ixgb_clean_rx_ring(adapter);
    952
    953	vfree(rx_ring->buffer_info);
    954	rx_ring->buffer_info = NULL;
    955
    956	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
    957			  rx_ring->dma);
    958
    959	rx_ring->desc = NULL;
    960}
    961
    962/**
    963 * ixgb_clean_rx_ring - Free Rx Buffers
    964 * @adapter: board private structure
    965 **/
    966
    967static void
    968ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
    969{
    970	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
    971	struct ixgb_buffer *buffer_info;
    972	struct pci_dev *pdev = adapter->pdev;
    973	unsigned long size;
    974	unsigned int i;
    975
    976	/* Free all the Rx ring sk_buffs */
    977
    978	for (i = 0; i < rx_ring->count; i++) {
    979		buffer_info = &rx_ring->buffer_info[i];
    980		if (buffer_info->dma) {
    981			dma_unmap_single(&pdev->dev,
    982					 buffer_info->dma,
    983					 buffer_info->length,
    984					 DMA_FROM_DEVICE);
    985			buffer_info->dma = 0;
    986			buffer_info->length = 0;
    987		}
    988
    989		if (buffer_info->skb) {
    990			dev_kfree_skb(buffer_info->skb);
    991			buffer_info->skb = NULL;
    992		}
    993	}
    994
    995	size = sizeof(struct ixgb_buffer) * rx_ring->count;
    996	memset(rx_ring->buffer_info, 0, size);
    997
    998	/* Zero out the descriptor ring */
    999
   1000	memset(rx_ring->desc, 0, rx_ring->size);
   1001
   1002	rx_ring->next_to_clean = 0;
   1003	rx_ring->next_to_use = 0;
   1004
   1005	IXGB_WRITE_REG(&adapter->hw, RDH, 0);
   1006	IXGB_WRITE_REG(&adapter->hw, RDT, 0);
   1007}
   1008
   1009/**
   1010 * ixgb_set_mac - Change the Ethernet Address of the NIC
   1011 * @netdev: network interface device structure
   1012 * @p: pointer to an address structure
   1013 *
   1014 * Returns 0 on success, negative on failure
   1015 **/
   1016
   1017static int
   1018ixgb_set_mac(struct net_device *netdev, void *p)
   1019{
   1020	struct ixgb_adapter *adapter = netdev_priv(netdev);
   1021	struct sockaddr *addr = p;
   1022
   1023	if (!is_valid_ether_addr(addr->sa_data))
   1024		return -EADDRNOTAVAIL;
   1025
   1026	eth_hw_addr_set(netdev, addr->sa_data);
   1027
   1028	ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
   1029
   1030	return 0;
   1031}
   1032
   1033/**
   1034 * ixgb_set_multi - Multicast and Promiscuous mode set
   1035 * @netdev: network interface device structure
   1036 *
   1037 * The set_multi entry point is called whenever the multicast address
   1038 * list or the network interface flags are updated.  This routine is
   1039 * responsible for configuring the hardware for proper multicast,
   1040 * promiscuous mode, and all-multi behavior.
   1041 **/
   1042
   1043static void
   1044ixgb_set_multi(struct net_device *netdev)
   1045{
   1046	struct ixgb_adapter *adapter = netdev_priv(netdev);
   1047	struct ixgb_hw *hw = &adapter->hw;
   1048	struct netdev_hw_addr *ha;
   1049	u32 rctl;
   1050
   1051	/* Check for Promiscuous and All Multicast modes */
   1052
   1053	rctl = IXGB_READ_REG(hw, RCTL);
   1054
   1055	if (netdev->flags & IFF_PROMISC) {
   1056		rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
   1057		/* disable VLAN filtering */
   1058		rctl &= ~IXGB_RCTL_CFIEN;
   1059		rctl &= ~IXGB_RCTL_VFE;
   1060	} else {
   1061		if (netdev->flags & IFF_ALLMULTI) {
   1062			rctl |= IXGB_RCTL_MPE;
   1063			rctl &= ~IXGB_RCTL_UPE;
   1064		} else {
   1065			rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
   1066		}
   1067		/* enable VLAN filtering */
   1068		rctl |= IXGB_RCTL_VFE;
   1069		rctl &= ~IXGB_RCTL_CFIEN;
   1070	}
   1071
   1072	if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
   1073		rctl |= IXGB_RCTL_MPE;
   1074		IXGB_WRITE_REG(hw, RCTL, rctl);
   1075	} else {
   1076		u8 *mta = kmalloc_array(ETH_ALEN,
   1077				        IXGB_MAX_NUM_MULTICAST_ADDRESSES,
   1078				        GFP_ATOMIC);
   1079		u8 *addr;
   1080		if (!mta)
   1081			goto alloc_failed;
   1082
   1083		IXGB_WRITE_REG(hw, RCTL, rctl);
   1084
   1085		addr = mta;
   1086		netdev_for_each_mc_addr(ha, netdev) {
   1087			memcpy(addr, ha->addr, ETH_ALEN);
   1088			addr += ETH_ALEN;
   1089		}
   1090
   1091		ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
   1092		kfree(mta);
   1093	}
   1094
   1095alloc_failed:
   1096	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
   1097		ixgb_vlan_strip_enable(adapter);
   1098	else
   1099		ixgb_vlan_strip_disable(adapter);
   1100
   1101}
   1102
   1103/**
   1104 * ixgb_watchdog - Timer Call-back
   1105 * @t: pointer to timer_list containing our private info pointer
   1106 **/
   1107
   1108static void
   1109ixgb_watchdog(struct timer_list *t)
   1110{
   1111	struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
   1112	struct net_device *netdev = adapter->netdev;
   1113	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
   1114
   1115	ixgb_check_for_link(&adapter->hw);
   1116
   1117	if (ixgb_check_for_bad_link(&adapter->hw)) {
   1118		/* force the reset path */
   1119		netif_stop_queue(netdev);
   1120	}
   1121
   1122	if (adapter->hw.link_up) {
   1123		if (!netif_carrier_ok(netdev)) {
   1124			netdev_info(netdev,
   1125				    "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
   1126				    (adapter->hw.fc.type == ixgb_fc_full) ?
   1127				    "RX/TX" :
   1128				    (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
   1129				     "RX" :
   1130				    (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
   1131				    "TX" : "None");
   1132			adapter->link_speed = 10000;
   1133			adapter->link_duplex = FULL_DUPLEX;
   1134			netif_carrier_on(netdev);
   1135		}
   1136	} else {
   1137		if (netif_carrier_ok(netdev)) {
   1138			adapter->link_speed = 0;
   1139			adapter->link_duplex = 0;
   1140			netdev_info(netdev, "NIC Link is Down\n");
   1141			netif_carrier_off(netdev);
   1142		}
   1143	}
   1144
   1145	ixgb_update_stats(adapter);
   1146
   1147	if (!netif_carrier_ok(netdev)) {
   1148		if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
   1149			/* We've lost link, so the controller stops DMA,
   1150			 * but we've got queued Tx work that's never going
   1151			 * to get done, so reset controller to flush Tx.
   1152			 * (Do the reset outside of interrupt context). */
   1153			schedule_work(&adapter->tx_timeout_task);
   1154			/* return immediately since reset is imminent */
   1155			return;
   1156		}
   1157	}
   1158
   1159	/* Force detection of hung controller every watchdog period */
   1160	adapter->detect_tx_hung = true;
   1161
   1162	/* generate an interrupt to force clean up of any stragglers */
   1163	IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
   1164
   1165	/* Reset the timer */
   1166	mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
   1167}
   1168
   1169#define IXGB_TX_FLAGS_CSUM		0x00000001
   1170#define IXGB_TX_FLAGS_VLAN		0x00000002
   1171#define IXGB_TX_FLAGS_TSO		0x00000004
   1172
   1173static int
   1174ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
   1175{
   1176	struct ixgb_context_desc *context_desc;
   1177	unsigned int i;
   1178	u8 ipcss, ipcso, tucss, tucso, hdr_len;
   1179	u16 ipcse, tucse, mss;
   1180
   1181	if (likely(skb_is_gso(skb))) {
   1182		struct ixgb_buffer *buffer_info;
   1183		struct iphdr *iph;
   1184		int err;
   1185
   1186		err = skb_cow_head(skb, 0);
   1187		if (err < 0)
   1188			return err;
   1189
   1190		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
   1191		mss = skb_shinfo(skb)->gso_size;
   1192		iph = ip_hdr(skb);
   1193		iph->tot_len = 0;
   1194		iph->check = 0;
   1195		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
   1196							 iph->daddr, 0,
   1197							 IPPROTO_TCP, 0);
   1198		ipcss = skb_network_offset(skb);
   1199		ipcso = (void *)&(iph->check) - (void *)skb->data;
   1200		ipcse = skb_transport_offset(skb) - 1;
   1201		tucss = skb_transport_offset(skb);
   1202		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
   1203		tucse = 0;
   1204
   1205		i = adapter->tx_ring.next_to_use;
   1206		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
   1207		buffer_info = &adapter->tx_ring.buffer_info[i];
   1208		WARN_ON(buffer_info->dma != 0);
   1209
   1210		context_desc->ipcss = ipcss;
   1211		context_desc->ipcso = ipcso;
   1212		context_desc->ipcse = cpu_to_le16(ipcse);
   1213		context_desc->tucss = tucss;
   1214		context_desc->tucso = tucso;
   1215		context_desc->tucse = cpu_to_le16(tucse);
   1216		context_desc->mss = cpu_to_le16(mss);
   1217		context_desc->hdr_len = hdr_len;
   1218		context_desc->status = 0;
   1219		context_desc->cmd_type_len = cpu_to_le32(
   1220						  IXGB_CONTEXT_DESC_TYPE
   1221						| IXGB_CONTEXT_DESC_CMD_TSE
   1222						| IXGB_CONTEXT_DESC_CMD_IP
   1223						| IXGB_CONTEXT_DESC_CMD_TCP
   1224						| IXGB_CONTEXT_DESC_CMD_IDE
   1225						| (skb->len - (hdr_len)));
   1226
   1227
   1228		if (++i == adapter->tx_ring.count) i = 0;
   1229		adapter->tx_ring.next_to_use = i;
   1230
   1231		return 1;
   1232	}
   1233
   1234	return 0;
   1235}
   1236
   1237static bool
   1238ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
   1239{
   1240	struct ixgb_context_desc *context_desc;
   1241	unsigned int i;
   1242	u8 css, cso;
   1243
   1244	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
   1245		struct ixgb_buffer *buffer_info;
   1246		css = skb_checksum_start_offset(skb);
   1247		cso = css + skb->csum_offset;
   1248
   1249		i = adapter->tx_ring.next_to_use;
   1250		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
   1251		buffer_info = &adapter->tx_ring.buffer_info[i];
   1252		WARN_ON(buffer_info->dma != 0);
   1253
   1254		context_desc->tucss = css;
   1255		context_desc->tucso = cso;
   1256		context_desc->tucse = 0;
   1257		/* zero out any previously existing data in one instruction */
   1258		*(u32 *)&(context_desc->ipcss) = 0;
   1259		context_desc->status = 0;
   1260		context_desc->hdr_len = 0;
   1261		context_desc->mss = 0;
   1262		context_desc->cmd_type_len =
   1263			cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
   1264				    | IXGB_TX_DESC_CMD_IDE);
   1265
   1266		if (++i == adapter->tx_ring.count) i = 0;
   1267		adapter->tx_ring.next_to_use = i;
   1268
   1269		return true;
   1270	}
   1271
   1272	return false;
   1273}
   1274
   1275#define IXGB_MAX_TXD_PWR	14
   1276#define IXGB_MAX_DATA_PER_TXD	(1<<IXGB_MAX_TXD_PWR)
   1277
   1278static int
   1279ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
   1280	    unsigned int first)
   1281{
   1282	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
   1283	struct pci_dev *pdev = adapter->pdev;
   1284	struct ixgb_buffer *buffer_info;
   1285	int len = skb_headlen(skb);
   1286	unsigned int offset = 0, size, count = 0, i;
   1287	unsigned int mss = skb_shinfo(skb)->gso_size;
   1288	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
   1289	unsigned int f;
   1290
   1291	i = tx_ring->next_to_use;
   1292
   1293	while (len) {
   1294		buffer_info = &tx_ring->buffer_info[i];
   1295		size = min(len, IXGB_MAX_DATA_PER_TXD);
   1296		/* Workaround for premature desc write-backs
   1297		 * in TSO mode.  Append 4-byte sentinel desc */
   1298		if (unlikely(mss && !nr_frags && size == len && size > 8))
   1299			size -= 4;
   1300
   1301		buffer_info->length = size;
   1302		WARN_ON(buffer_info->dma != 0);
   1303		buffer_info->time_stamp = jiffies;
   1304		buffer_info->mapped_as_page = false;
   1305		buffer_info->dma = dma_map_single(&pdev->dev,
   1306						  skb->data + offset,
   1307						  size, DMA_TO_DEVICE);
   1308		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
   1309			goto dma_error;
   1310		buffer_info->next_to_watch = 0;
   1311
   1312		len -= size;
   1313		offset += size;
   1314		count++;
   1315		if (len) {
   1316			i++;
   1317			if (i == tx_ring->count)
   1318				i = 0;
   1319		}
   1320	}
   1321
   1322	for (f = 0; f < nr_frags; f++) {
   1323		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
   1324		len = skb_frag_size(frag);
   1325		offset = 0;
   1326
   1327		while (len) {
   1328			i++;
   1329			if (i == tx_ring->count)
   1330				i = 0;
   1331
   1332			buffer_info = &tx_ring->buffer_info[i];
   1333			size = min(len, IXGB_MAX_DATA_PER_TXD);
   1334
   1335			/* Workaround for premature desc write-backs
   1336			 * in TSO mode.  Append 4-byte sentinel desc */
   1337			if (unlikely(mss && (f == (nr_frags - 1))
   1338				     && size == len && size > 8))
   1339				size -= 4;
   1340
   1341			buffer_info->length = size;
   1342			buffer_info->time_stamp = jiffies;
   1343			buffer_info->mapped_as_page = true;
   1344			buffer_info->dma =
   1345				skb_frag_dma_map(&pdev->dev, frag, offset, size,
   1346						 DMA_TO_DEVICE);
   1347			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
   1348				goto dma_error;
   1349			buffer_info->next_to_watch = 0;
   1350
   1351			len -= size;
   1352			offset += size;
   1353			count++;
   1354		}
   1355	}
   1356	tx_ring->buffer_info[i].skb = skb;
   1357	tx_ring->buffer_info[first].next_to_watch = i;
   1358
   1359	return count;
   1360
   1361dma_error:
   1362	dev_err(&pdev->dev, "TX DMA map failed\n");
   1363	buffer_info->dma = 0;
   1364	if (count)
   1365		count--;
   1366
   1367	while (count--) {
   1368		if (i==0)
   1369			i += tx_ring->count;
   1370		i--;
   1371		buffer_info = &tx_ring->buffer_info[i];
   1372		ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
   1373	}
   1374
   1375	return 0;
   1376}
   1377
   1378static void
   1379ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
   1380{
   1381	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
   1382	struct ixgb_tx_desc *tx_desc = NULL;
   1383	struct ixgb_buffer *buffer_info;
   1384	u32 cmd_type_len = adapter->tx_cmd_type;
   1385	u8 status = 0;
   1386	u8 popts = 0;
   1387	unsigned int i;
   1388
   1389	if (tx_flags & IXGB_TX_FLAGS_TSO) {
   1390		cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
   1391		popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
   1392	}
   1393
   1394	if (tx_flags & IXGB_TX_FLAGS_CSUM)
   1395		popts |= IXGB_TX_DESC_POPTS_TXSM;
   1396
   1397	if (tx_flags & IXGB_TX_FLAGS_VLAN)
   1398		cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
   1399
   1400	i = tx_ring->next_to_use;
   1401
   1402	while (count--) {
   1403		buffer_info = &tx_ring->buffer_info[i];
   1404		tx_desc = IXGB_TX_DESC(*tx_ring, i);
   1405		tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
   1406		tx_desc->cmd_type_len =
   1407			cpu_to_le32(cmd_type_len | buffer_info->length);
   1408		tx_desc->status = status;
   1409		tx_desc->popts = popts;
   1410		tx_desc->vlan = cpu_to_le16(vlan_id);
   1411
   1412		if (++i == tx_ring->count) i = 0;
   1413	}
   1414
   1415	tx_desc->cmd_type_len |=
   1416		cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
   1417
   1418	/* Force memory writes to complete before letting h/w
   1419	 * know there are new descriptors to fetch.  (Only
   1420	 * applicable for weak-ordered memory model archs,
   1421	 * such as IA-64). */
   1422	wmb();
   1423
   1424	tx_ring->next_to_use = i;
   1425	IXGB_WRITE_REG(&adapter->hw, TDT, i);
   1426}
   1427
   1428static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
   1429{
   1430	struct ixgb_adapter *adapter = netdev_priv(netdev);
   1431	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
   1432
   1433	netif_stop_queue(netdev);
   1434	/* Herbert's original patch had:
   1435	 *  smp_mb__after_netif_stop_queue();
   1436	 * but since that doesn't exist yet, just open code it. */
   1437	smp_mb();
   1438
   1439	/* We need to check again in a case another CPU has just
   1440	 * made room available. */
   1441	if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
   1442		return -EBUSY;
   1443
   1444	/* A reprieve! */
   1445	netif_start_queue(netdev);
   1446	++adapter->restart_queue;
   1447	return 0;
   1448}
   1449
   1450static int ixgb_maybe_stop_tx(struct net_device *netdev,
   1451                              struct ixgb_desc_ring *tx_ring, int size)
   1452{
   1453	if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
   1454		return 0;
   1455	return __ixgb_maybe_stop_tx(netdev, size);
   1456}
   1457
   1458
   1459/* Tx Descriptors needed, worst case */
   1460#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
   1461			 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
   1462#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
   1463	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
   1464	+ 1 /* one more needed for sentinel TSO workaround */
   1465
   1466static netdev_tx_t
   1467ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
   1468{
   1469	struct ixgb_adapter *adapter = netdev_priv(netdev);
   1470	unsigned int first;
   1471	unsigned int tx_flags = 0;
   1472	int vlan_id = 0;
   1473	int count = 0;
   1474	int tso;
   1475
   1476	if (test_bit(__IXGB_DOWN, &adapter->flags)) {
   1477		dev_kfree_skb_any(skb);
   1478		return NETDEV_TX_OK;
   1479	}
   1480
   1481	if (skb->len <= 0) {
   1482		dev_kfree_skb_any(skb);
   1483		return NETDEV_TX_OK;
   1484	}
   1485
   1486	if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
   1487                     DESC_NEEDED)))
   1488		return NETDEV_TX_BUSY;
   1489
   1490	if (skb_vlan_tag_present(skb)) {
   1491		tx_flags |= IXGB_TX_FLAGS_VLAN;
   1492		vlan_id = skb_vlan_tag_get(skb);
   1493	}
   1494
   1495	first = adapter->tx_ring.next_to_use;
   1496
   1497	tso = ixgb_tso(adapter, skb);
   1498	if (tso < 0) {
   1499		dev_kfree_skb_any(skb);
   1500		return NETDEV_TX_OK;
   1501	}
   1502
   1503	if (likely(tso))
   1504		tx_flags |= IXGB_TX_FLAGS_TSO;
   1505	else if (ixgb_tx_csum(adapter, skb))
   1506		tx_flags |= IXGB_TX_FLAGS_CSUM;
   1507
   1508	count = ixgb_tx_map(adapter, skb, first);
   1509
   1510	if (count) {
   1511		ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
   1512		/* Make sure there is space in the ring for the next send. */
   1513		ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
   1514
   1515	} else {
   1516		dev_kfree_skb_any(skb);
   1517		adapter->tx_ring.buffer_info[first].time_stamp = 0;
   1518		adapter->tx_ring.next_to_use = first;
   1519	}
   1520
   1521	return NETDEV_TX_OK;
   1522}
   1523
   1524/**
   1525 * ixgb_tx_timeout - Respond to a Tx Hang
   1526 * @netdev: network interface device structure
   1527 * @txqueue: queue hanging (unused)
   1528 **/
   1529
   1530static void
   1531ixgb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
   1532{
   1533	struct ixgb_adapter *adapter = netdev_priv(netdev);
   1534
   1535	/* Do the reset outside of interrupt context */
   1536	schedule_work(&adapter->tx_timeout_task);
   1537}
   1538
   1539static void
   1540ixgb_tx_timeout_task(struct work_struct *work)
   1541{
   1542	struct ixgb_adapter *adapter =
   1543		container_of(work, struct ixgb_adapter, tx_timeout_task);
   1544
   1545	adapter->tx_timeout_count++;
   1546	ixgb_down(adapter, true);
   1547	ixgb_up(adapter);
   1548}
   1549
   1550/**
   1551 * ixgb_change_mtu - Change the Maximum Transfer Unit
   1552 * @netdev: network interface device structure
   1553 * @new_mtu: new value for maximum frame size
   1554 *
   1555 * Returns 0 on success, negative on failure
   1556 **/
   1557
   1558static int
   1559ixgb_change_mtu(struct net_device *netdev, int new_mtu)
   1560{
   1561	struct ixgb_adapter *adapter = netdev_priv(netdev);
   1562	int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
   1563
   1564	if (netif_running(netdev))
   1565		ixgb_down(adapter, true);
   1566
   1567	adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
   1568
   1569	netdev->mtu = new_mtu;
   1570
   1571	if (netif_running(netdev))
   1572		ixgb_up(adapter);
   1573
   1574	return 0;
   1575}
   1576
   1577/**
   1578 * ixgb_update_stats - Update the board statistics counters.
   1579 * @adapter: board private structure
   1580 **/
   1581
   1582void
   1583ixgb_update_stats(struct ixgb_adapter *adapter)
   1584{
   1585	struct net_device *netdev = adapter->netdev;
   1586	struct pci_dev *pdev = adapter->pdev;
   1587
   1588	/* Prevent stats update while adapter is being reset */
   1589	if (pci_channel_offline(pdev))
   1590		return;
   1591
   1592	if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
   1593	   (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
   1594		u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
   1595		u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
   1596		u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
   1597		u64 bcast = ((u64)bcast_h << 32) | bcast_l;
   1598
   1599		multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
   1600		/* fix up multicast stats by removing broadcasts */
   1601		if (multi >= bcast)
   1602			multi -= bcast;
   1603
   1604		adapter->stats.mprcl += (multi & 0xFFFFFFFF);
   1605		adapter->stats.mprch += (multi >> 32);
   1606		adapter->stats.bprcl += bcast_l;
   1607		adapter->stats.bprch += bcast_h;
   1608	} else {
   1609		adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
   1610		adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
   1611		adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
   1612		adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
   1613	}
   1614	adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
   1615	adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
   1616	adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
   1617	adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
   1618	adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
   1619	adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
   1620	adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
   1621	adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
   1622	adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
   1623	adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
   1624	adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
   1625	adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
   1626	adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
   1627	adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
   1628	adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
   1629	adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
   1630	adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
   1631	adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
   1632	adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
   1633	adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
   1634	adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
   1635	adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
   1636	adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
   1637	adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
   1638	adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
   1639	adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
   1640	adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
   1641	adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
   1642	adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
   1643	adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
   1644	adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
   1645	adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
   1646	adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
   1647	adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
   1648	adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
   1649	adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
   1650	adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
   1651	adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
   1652	adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
   1653	adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
   1654	adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
   1655	adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
   1656	adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
   1657	adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
   1658	adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
   1659	adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
   1660	adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
   1661	adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
   1662	adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
   1663	adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
   1664	adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
   1665	adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
   1666	adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
   1667	adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
   1668	adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
   1669	adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
   1670
   1671	/* Fill out the OS statistics structure */
   1672
   1673	netdev->stats.rx_packets = adapter->stats.gprcl;
   1674	netdev->stats.tx_packets = adapter->stats.gptcl;
   1675	netdev->stats.rx_bytes = adapter->stats.gorcl;
   1676	netdev->stats.tx_bytes = adapter->stats.gotcl;
   1677	netdev->stats.multicast = adapter->stats.mprcl;
   1678	netdev->stats.collisions = 0;
   1679
   1680	/* ignore RLEC as it reports errors for padded (<64bytes) frames
   1681	 * with a length in the type/len field */
   1682	netdev->stats.rx_errors =
   1683	    /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
   1684	    adapter->stats.ruc +
   1685	    adapter->stats.roc /*+ adapter->stats.rlec */  +
   1686	    adapter->stats.icbc +
   1687	    adapter->stats.ecbc + adapter->stats.mpc;
   1688
   1689	/* see above
   1690	 * netdev->stats.rx_length_errors = adapter->stats.rlec;
   1691	 */
   1692
   1693	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
   1694	netdev->stats.rx_fifo_errors = adapter->stats.mpc;
   1695	netdev->stats.rx_missed_errors = adapter->stats.mpc;
   1696	netdev->stats.rx_over_errors = adapter->stats.mpc;
   1697
   1698	netdev->stats.tx_errors = 0;
   1699	netdev->stats.rx_frame_errors = 0;
   1700	netdev->stats.tx_aborted_errors = 0;
   1701	netdev->stats.tx_carrier_errors = 0;
   1702	netdev->stats.tx_fifo_errors = 0;
   1703	netdev->stats.tx_heartbeat_errors = 0;
   1704	netdev->stats.tx_window_errors = 0;
   1705}
   1706
   1707#define IXGB_MAX_INTR 10
   1708/**
   1709 * ixgb_intr - Interrupt Handler
   1710 * @irq: interrupt number
   1711 * @data: pointer to a network interface device structure
   1712 **/
   1713
   1714static irqreturn_t
   1715ixgb_intr(int irq, void *data)
   1716{
   1717	struct net_device *netdev = data;
   1718	struct ixgb_adapter *adapter = netdev_priv(netdev);
   1719	struct ixgb_hw *hw = &adapter->hw;
   1720	u32 icr = IXGB_READ_REG(hw, ICR);
   1721
   1722	if (unlikely(!icr))
   1723		return IRQ_NONE;  /* Not our interrupt */
   1724
   1725	if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
   1726		if (!test_bit(__IXGB_DOWN, &adapter->flags))
   1727			mod_timer(&adapter->watchdog_timer, jiffies);
   1728
   1729	if (napi_schedule_prep(&adapter->napi)) {
   1730
   1731		/* Disable interrupts and register for poll. The flush
   1732		  of the posted write is intentionally left out.
   1733		*/
   1734
   1735		IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
   1736		__napi_schedule(&adapter->napi);
   1737	}
   1738	return IRQ_HANDLED;
   1739}
   1740
   1741/**
   1742 * ixgb_clean - NAPI Rx polling callback
   1743 * @napi: napi struct pointer
   1744 * @budget: max number of receives to clean
   1745 **/
   1746
   1747static int
   1748ixgb_clean(struct napi_struct *napi, int budget)
   1749{
   1750	struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
   1751	int work_done = 0;
   1752
   1753	ixgb_clean_tx_irq(adapter);
   1754	ixgb_clean_rx_irq(adapter, &work_done, budget);
   1755
   1756	/* If budget not fully consumed, exit the polling mode */
   1757	if (work_done < budget) {
   1758		napi_complete_done(napi, work_done);
   1759		if (!test_bit(__IXGB_DOWN, &adapter->flags))
   1760			ixgb_irq_enable(adapter);
   1761	}
   1762
   1763	return work_done;
   1764}
   1765
   1766/**
   1767 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
   1768 * @adapter: board private structure
   1769 **/
   1770
   1771static bool
   1772ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
   1773{
   1774	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
   1775	struct net_device *netdev = adapter->netdev;
   1776	struct ixgb_tx_desc *tx_desc, *eop_desc;
   1777	struct ixgb_buffer *buffer_info;
   1778	unsigned int i, eop;
   1779	bool cleaned = false;
   1780
   1781	i = tx_ring->next_to_clean;
   1782	eop = tx_ring->buffer_info[i].next_to_watch;
   1783	eop_desc = IXGB_TX_DESC(*tx_ring, eop);
   1784
   1785	while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
   1786
   1787		rmb(); /* read buffer_info after eop_desc */
   1788		for (cleaned = false; !cleaned; ) {
   1789			tx_desc = IXGB_TX_DESC(*tx_ring, i);
   1790			buffer_info = &tx_ring->buffer_info[i];
   1791
   1792			if (tx_desc->popts &
   1793			   (IXGB_TX_DESC_POPTS_TXSM |
   1794			    IXGB_TX_DESC_POPTS_IXSM))
   1795				adapter->hw_csum_tx_good++;
   1796
   1797			ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
   1798
   1799			*(u32 *)&(tx_desc->status) = 0;
   1800
   1801			cleaned = (i == eop);
   1802			if (++i == tx_ring->count) i = 0;
   1803		}
   1804
   1805		eop = tx_ring->buffer_info[i].next_to_watch;
   1806		eop_desc = IXGB_TX_DESC(*tx_ring, eop);
   1807	}
   1808
   1809	tx_ring->next_to_clean = i;
   1810
   1811	if (unlikely(cleaned && netif_carrier_ok(netdev) &&
   1812		     IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
   1813		/* Make sure that anybody stopping the queue after this
   1814		 * sees the new next_to_clean. */
   1815		smp_mb();
   1816
   1817		if (netif_queue_stopped(netdev) &&
   1818		    !(test_bit(__IXGB_DOWN, &adapter->flags))) {
   1819			netif_wake_queue(netdev);
   1820			++adapter->restart_queue;
   1821		}
   1822	}
   1823
   1824	if (adapter->detect_tx_hung) {
   1825		/* detect a transmit hang in hardware, this serializes the
   1826		 * check with the clearing of time_stamp and movement of i */
   1827		adapter->detect_tx_hung = false;
   1828		if (tx_ring->buffer_info[eop].time_stamp &&
   1829		   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
   1830		   && !(IXGB_READ_REG(&adapter->hw, STATUS) &
   1831		        IXGB_STATUS_TXOFF)) {
   1832			/* detected Tx unit hang */
   1833			netif_err(adapter, drv, adapter->netdev,
   1834				  "Detected Tx Unit Hang\n"
   1835				  "  TDH                  <%x>\n"
   1836				  "  TDT                  <%x>\n"
   1837				  "  next_to_use          <%x>\n"
   1838				  "  next_to_clean        <%x>\n"
   1839				  "buffer_info[next_to_clean]\n"
   1840				  "  time_stamp           <%lx>\n"
   1841				  "  next_to_watch        <%x>\n"
   1842				  "  jiffies              <%lx>\n"
   1843				  "  next_to_watch.status <%x>\n",
   1844				  IXGB_READ_REG(&adapter->hw, TDH),
   1845				  IXGB_READ_REG(&adapter->hw, TDT),
   1846				  tx_ring->next_to_use,
   1847				  tx_ring->next_to_clean,
   1848				  tx_ring->buffer_info[eop].time_stamp,
   1849				  eop,
   1850				  jiffies,
   1851				  eop_desc->status);
   1852			netif_stop_queue(netdev);
   1853		}
   1854	}
   1855
   1856	return cleaned;
   1857}
   1858
   1859/**
   1860 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
   1861 * @adapter: board private structure
   1862 * @rx_desc: receive descriptor
   1863 * @skb: socket buffer with received data
   1864 **/
   1865
   1866static void
   1867ixgb_rx_checksum(struct ixgb_adapter *adapter,
   1868                 struct ixgb_rx_desc *rx_desc,
   1869                 struct sk_buff *skb)
   1870{
   1871	/* Ignore Checksum bit is set OR
   1872	 * TCP Checksum has not been calculated
   1873	 */
   1874	if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
   1875	   (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
   1876		skb_checksum_none_assert(skb);
   1877		return;
   1878	}
   1879
   1880	/* At this point we know the hardware did the TCP checksum */
   1881	/* now look at the TCP checksum error bit */
   1882	if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
   1883		/* let the stack verify checksum errors */
   1884		skb_checksum_none_assert(skb);
   1885		adapter->hw_csum_rx_error++;
   1886	} else {
   1887		/* TCP checksum is good */
   1888		skb->ip_summed = CHECKSUM_UNNECESSARY;
   1889		adapter->hw_csum_rx_good++;
   1890	}
   1891}
   1892
   1893/*
   1894 * this should improve performance for small packets with large amounts
   1895 * of reassembly being done in the stack
   1896 */
   1897static void ixgb_check_copybreak(struct napi_struct *napi,
   1898				 struct ixgb_buffer *buffer_info,
   1899				 u32 length, struct sk_buff **skb)
   1900{
   1901	struct sk_buff *new_skb;
   1902
   1903	if (length > copybreak)
   1904		return;
   1905
   1906	new_skb = napi_alloc_skb(napi, length);
   1907	if (!new_skb)
   1908		return;
   1909
   1910	skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
   1911				       (*skb)->data - NET_IP_ALIGN,
   1912				       length + NET_IP_ALIGN);
   1913	/* save the skb in buffer_info as good */
   1914	buffer_info->skb = *skb;
   1915	*skb = new_skb;
   1916}
   1917
   1918/**
   1919 * ixgb_clean_rx_irq - Send received data up the network stack,
   1920 * @adapter: board private structure
   1921 * @work_done: output pointer to amount of packets cleaned
   1922 * @work_to_do: how much work we can complete
   1923 **/
   1924
   1925static bool
   1926ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
   1927{
   1928	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
   1929	struct net_device *netdev = adapter->netdev;
   1930	struct pci_dev *pdev = adapter->pdev;
   1931	struct ixgb_rx_desc *rx_desc, *next_rxd;
   1932	struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
   1933	u32 length;
   1934	unsigned int i, j;
   1935	int cleaned_count = 0;
   1936	bool cleaned = false;
   1937
   1938	i = rx_ring->next_to_clean;
   1939	rx_desc = IXGB_RX_DESC(*rx_ring, i);
   1940	buffer_info = &rx_ring->buffer_info[i];
   1941
   1942	while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
   1943		struct sk_buff *skb;
   1944		u8 status;
   1945
   1946		if (*work_done >= work_to_do)
   1947			break;
   1948
   1949		(*work_done)++;
   1950		rmb();	/* read descriptor and rx_buffer_info after status DD */
   1951		status = rx_desc->status;
   1952		skb = buffer_info->skb;
   1953		buffer_info->skb = NULL;
   1954
   1955		prefetch(skb->data - NET_IP_ALIGN);
   1956
   1957		if (++i == rx_ring->count)
   1958			i = 0;
   1959		next_rxd = IXGB_RX_DESC(*rx_ring, i);
   1960		prefetch(next_rxd);
   1961
   1962		j = i + 1;
   1963		if (j == rx_ring->count)
   1964			j = 0;
   1965		next2_buffer = &rx_ring->buffer_info[j];
   1966		prefetch(next2_buffer);
   1967
   1968		next_buffer = &rx_ring->buffer_info[i];
   1969
   1970		cleaned = true;
   1971		cleaned_count++;
   1972
   1973		dma_unmap_single(&pdev->dev,
   1974				 buffer_info->dma,
   1975				 buffer_info->length,
   1976				 DMA_FROM_DEVICE);
   1977		buffer_info->dma = 0;
   1978
   1979		length = le16_to_cpu(rx_desc->length);
   1980		rx_desc->length = 0;
   1981
   1982		if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
   1983
   1984			/* All receives must fit into a single buffer */
   1985
   1986			pr_debug("Receive packet consumed multiple buffers length<%x>\n",
   1987				 length);
   1988
   1989			dev_kfree_skb_irq(skb);
   1990			goto rxdesc_done;
   1991		}
   1992
   1993		if (unlikely(rx_desc->errors &
   1994		    (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
   1995		     IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
   1996			dev_kfree_skb_irq(skb);
   1997			goto rxdesc_done;
   1998		}
   1999
   2000		ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
   2001
   2002		/* Good Receive */
   2003		skb_put(skb, length);
   2004
   2005		/* Receive Checksum Offload */
   2006		ixgb_rx_checksum(adapter, rx_desc, skb);
   2007
   2008		skb->protocol = eth_type_trans(skb, netdev);
   2009		if (status & IXGB_RX_DESC_STATUS_VP)
   2010			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
   2011				       le16_to_cpu(rx_desc->special));
   2012
   2013		netif_receive_skb(skb);
   2014
   2015rxdesc_done:
   2016		/* clean up descriptor, might be written over by hw */
   2017		rx_desc->status = 0;
   2018
   2019		/* return some buffers to hardware, one at a time is too slow */
   2020		if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
   2021			ixgb_alloc_rx_buffers(adapter, cleaned_count);
   2022			cleaned_count = 0;
   2023		}
   2024
   2025		/* use prefetched values */
   2026		rx_desc = next_rxd;
   2027		buffer_info = next_buffer;
   2028	}
   2029
   2030	rx_ring->next_to_clean = i;
   2031
   2032	cleaned_count = IXGB_DESC_UNUSED(rx_ring);
   2033	if (cleaned_count)
   2034		ixgb_alloc_rx_buffers(adapter, cleaned_count);
   2035
   2036	return cleaned;
   2037}
   2038
   2039/**
   2040 * ixgb_alloc_rx_buffers - Replace used receive buffers
   2041 * @adapter: address of board private structure
   2042 * @cleaned_count: how many buffers to allocate
   2043 **/
   2044
   2045static void
   2046ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
   2047{
   2048	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
   2049	struct net_device *netdev = adapter->netdev;
   2050	struct pci_dev *pdev = adapter->pdev;
   2051	struct ixgb_rx_desc *rx_desc;
   2052	struct ixgb_buffer *buffer_info;
   2053	struct sk_buff *skb;
   2054	unsigned int i;
   2055	long cleancount;
   2056
   2057	i = rx_ring->next_to_use;
   2058	buffer_info = &rx_ring->buffer_info[i];
   2059	cleancount = IXGB_DESC_UNUSED(rx_ring);
   2060
   2061
   2062	/* leave three descriptors unused */
   2063	while (--cleancount > 2 && cleaned_count--) {
   2064		/* recycle! its good for you */
   2065		skb = buffer_info->skb;
   2066		if (skb) {
   2067			skb_trim(skb, 0);
   2068			goto map_skb;
   2069		}
   2070
   2071		skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
   2072		if (unlikely(!skb)) {
   2073			/* Better luck next round */
   2074			adapter->alloc_rx_buff_failed++;
   2075			break;
   2076		}
   2077
   2078		buffer_info->skb = skb;
   2079		buffer_info->length = adapter->rx_buffer_len;
   2080map_skb:
   2081		buffer_info->dma = dma_map_single(&pdev->dev,
   2082		                                  skb->data,
   2083		                                  adapter->rx_buffer_len,
   2084						  DMA_FROM_DEVICE);
   2085		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
   2086			adapter->alloc_rx_buff_failed++;
   2087			break;
   2088		}
   2089
   2090		rx_desc = IXGB_RX_DESC(*rx_ring, i);
   2091		rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
   2092		/* guarantee DD bit not set now before h/w gets descriptor
   2093		 * this is the rest of the workaround for h/w double
   2094		 * writeback. */
   2095		rx_desc->status = 0;
   2096
   2097
   2098		if (++i == rx_ring->count)
   2099			i = 0;
   2100		buffer_info = &rx_ring->buffer_info[i];
   2101	}
   2102
   2103	if (likely(rx_ring->next_to_use != i)) {
   2104		rx_ring->next_to_use = i;
   2105		if (unlikely(i-- == 0))
   2106			i = (rx_ring->count - 1);
   2107
   2108		/* Force memory writes to complete before letting h/w
   2109		 * know there are new descriptors to fetch.  (Only
   2110		 * applicable for weak-ordered memory model archs, such
   2111		 * as IA-64). */
   2112		wmb();
   2113		IXGB_WRITE_REG(&adapter->hw, RDT, i);
   2114	}
   2115}
   2116
   2117static void
   2118ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
   2119{
   2120	u32 ctrl;
   2121
   2122	/* enable VLAN tag insert/strip */
   2123	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
   2124	ctrl |= IXGB_CTRL0_VME;
   2125	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
   2126}
   2127
   2128static void
   2129ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
   2130{
   2131	u32 ctrl;
   2132
   2133	/* disable VLAN tag insert/strip */
   2134	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
   2135	ctrl &= ~IXGB_CTRL0_VME;
   2136	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
   2137}
   2138
   2139static int
   2140ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
   2141{
   2142	struct ixgb_adapter *adapter = netdev_priv(netdev);
   2143	u32 vfta, index;
   2144
   2145	/* add VID to filter table */
   2146
   2147	index = (vid >> 5) & 0x7F;
   2148	vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
   2149	vfta |= (1 << (vid & 0x1F));
   2150	ixgb_write_vfta(&adapter->hw, index, vfta);
   2151	set_bit(vid, adapter->active_vlans);
   2152
   2153	return 0;
   2154}
   2155
   2156static int
   2157ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
   2158{
   2159	struct ixgb_adapter *adapter = netdev_priv(netdev);
   2160	u32 vfta, index;
   2161
   2162	/* remove VID from filter table */
   2163
   2164	index = (vid >> 5) & 0x7F;
   2165	vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
   2166	vfta &= ~(1 << (vid & 0x1F));
   2167	ixgb_write_vfta(&adapter->hw, index, vfta);
   2168	clear_bit(vid, adapter->active_vlans);
   2169
   2170	return 0;
   2171}
   2172
   2173static void
   2174ixgb_restore_vlan(struct ixgb_adapter *adapter)
   2175{
   2176	u16 vid;
   2177
   2178	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
   2179		ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
   2180}
   2181
   2182/**
   2183 * ixgb_io_error_detected - called when PCI error is detected
   2184 * @pdev:    pointer to pci device with error
   2185 * @state:   pci channel state after error
   2186 *
   2187 * This callback is called by the PCI subsystem whenever
   2188 * a PCI bus error is detected.
   2189 */
   2190static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
   2191                                               pci_channel_state_t state)
   2192{
   2193	struct net_device *netdev = pci_get_drvdata(pdev);
   2194	struct ixgb_adapter *adapter = netdev_priv(netdev);
   2195
   2196	netif_device_detach(netdev);
   2197
   2198	if (state == pci_channel_io_perm_failure)
   2199		return PCI_ERS_RESULT_DISCONNECT;
   2200
   2201	if (netif_running(netdev))
   2202		ixgb_down(adapter, true);
   2203
   2204	pci_disable_device(pdev);
   2205
   2206	/* Request a slot reset. */
   2207	return PCI_ERS_RESULT_NEED_RESET;
   2208}
   2209
   2210/**
   2211 * ixgb_io_slot_reset - called after the pci bus has been reset.
   2212 * @pdev: pointer to pci device with error
   2213 *
   2214 * This callback is called after the PCI bus has been reset.
   2215 * Basically, this tries to restart the card from scratch.
   2216 * This is a shortened version of the device probe/discovery code,
   2217 * it resembles the first-half of the ixgb_probe() routine.
   2218 */
   2219static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
   2220{
   2221	struct net_device *netdev = pci_get_drvdata(pdev);
   2222	struct ixgb_adapter *adapter = netdev_priv(netdev);
   2223	u8 addr[ETH_ALEN];
   2224
   2225	if (pci_enable_device(pdev)) {
   2226		netif_err(adapter, probe, adapter->netdev,
   2227			  "Cannot re-enable PCI device after reset\n");
   2228		return PCI_ERS_RESULT_DISCONNECT;
   2229	}
   2230
   2231	/* Perform card reset only on one instance of the card */
   2232	if (0 != PCI_FUNC (pdev->devfn))
   2233		return PCI_ERS_RESULT_RECOVERED;
   2234
   2235	pci_set_master(pdev);
   2236
   2237	netif_carrier_off(netdev);
   2238	netif_stop_queue(netdev);
   2239	ixgb_reset(adapter);
   2240
   2241	/* Make sure the EEPROM is good */
   2242	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
   2243		netif_err(adapter, probe, adapter->netdev,
   2244			  "After reset, the EEPROM checksum is not valid\n");
   2245		return PCI_ERS_RESULT_DISCONNECT;
   2246	}
   2247	ixgb_get_ee_mac_addr(&adapter->hw, addr);
   2248	eth_hw_addr_set(netdev, addr);
   2249	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
   2250
   2251	if (!is_valid_ether_addr(netdev->perm_addr)) {
   2252		netif_err(adapter, probe, adapter->netdev,
   2253			  "After reset, invalid MAC address\n");
   2254		return PCI_ERS_RESULT_DISCONNECT;
   2255	}
   2256
   2257	return PCI_ERS_RESULT_RECOVERED;
   2258}
   2259
   2260/**
   2261 * ixgb_io_resume - called when its OK to resume normal operations
   2262 * @pdev: pointer to pci device with error
   2263 *
   2264 * The error recovery driver tells us that its OK to resume
   2265 * normal operation. Implementation resembles the second-half
   2266 * of the ixgb_probe() routine.
   2267 */
   2268static void ixgb_io_resume(struct pci_dev *pdev)
   2269{
   2270	struct net_device *netdev = pci_get_drvdata(pdev);
   2271	struct ixgb_adapter *adapter = netdev_priv(netdev);
   2272
   2273	pci_set_master(pdev);
   2274
   2275	if (netif_running(netdev)) {
   2276		if (ixgb_up(adapter)) {
   2277			pr_err("can't bring device back up after reset\n");
   2278			return;
   2279		}
   2280	}
   2281
   2282	netif_device_attach(netdev);
   2283	mod_timer(&adapter->watchdog_timer, jiffies);
   2284}
   2285
   2286/* ixgb_main.c */