cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qla3xxx.c (103276B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * QLogic QLA3xxx NIC HBA Driver
      4 * Copyright (c)  2003-2006 QLogic Corporation
      5 */
      6
      7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      8
      9#include <linux/kernel.h>
     10#include <linux/types.h>
     11#include <linux/module.h>
     12#include <linux/list.h>
     13#include <linux/pci.h>
     14#include <linux/dma-mapping.h>
     15#include <linux/sched.h>
     16#include <linux/slab.h>
     17#include <linux/dmapool.h>
     18#include <linux/mempool.h>
     19#include <linux/spinlock.h>
     20#include <linux/kthread.h>
     21#include <linux/interrupt.h>
     22#include <linux/errno.h>
     23#include <linux/ioport.h>
     24#include <linux/ip.h>
     25#include <linux/in.h>
     26#include <linux/if_arp.h>
     27#include <linux/if_ether.h>
     28#include <linux/netdevice.h>
     29#include <linux/etherdevice.h>
     30#include <linux/ethtool.h>
     31#include <linux/skbuff.h>
     32#include <linux/rtnetlink.h>
     33#include <linux/if_vlan.h>
     34#include <linux/delay.h>
     35#include <linux/mm.h>
     36#include <linux/prefetch.h>
     37
     38#include "qla3xxx.h"
     39
     40#define DRV_NAME	"qla3xxx"
     41#define DRV_STRING	"QLogic ISP3XXX Network Driver"
     42#define DRV_VERSION	"v2.03.00-k5"
     43
     44static const char ql3xxx_driver_name[] = DRV_NAME;
     45static const char ql3xxx_driver_version[] = DRV_VERSION;
     46
     47#define TIMED_OUT_MSG							\
     48"Timed out waiting for management port to get free before issuing command\n"
     49
     50MODULE_AUTHOR("QLogic Corporation");
     51MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
     52MODULE_LICENSE("GPL");
     53MODULE_VERSION(DRV_VERSION);
     54
     55static const u32 default_msg
     56    = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
     57    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
     58
     59static int debug = -1;		/* defaults above */
     60module_param(debug, int, 0);
     61MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
     62
     63static int msi;
     64module_param(msi, int, 0);
     65MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
     66
     67static const struct pci_device_id ql3xxx_pci_tbl[] = {
     68	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
     69	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
     70	/* required last entry */
     71	{0,}
     72};
     73
     74MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
     75
     76/*
     77 *  These are the known PHY's which are used
     78 */
     79enum PHY_DEVICE_TYPE {
     80   PHY_TYPE_UNKNOWN   = 0,
     81   PHY_VITESSE_VSC8211,
     82   PHY_AGERE_ET1011C,
     83   MAX_PHY_DEV_TYPES
     84};
     85
     86struct PHY_DEVICE_INFO {
     87	const enum PHY_DEVICE_TYPE	phyDevice;
     88	const u32		phyIdOUI;
     89	const u16		phyIdModel;
     90	const char		*name;
     91};
     92
     93static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
     94	{PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
     95	{PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
     96	{PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
     97};
     98
     99
    100/*
    101 * Caller must take hw_lock.
    102 */
    103static int ql_sem_spinlock(struct ql3_adapter *qdev,
    104			    u32 sem_mask, u32 sem_bits)
    105{
    106	struct ql3xxx_port_registers __iomem *port_regs =
    107		qdev->mem_map_registers;
    108	u32 value;
    109	unsigned int seconds = 3;
    110
    111	do {
    112		writel((sem_mask | sem_bits),
    113		       &port_regs->CommonRegs.semaphoreReg);
    114		value = readl(&port_regs->CommonRegs.semaphoreReg);
    115		if ((value & (sem_mask >> 16)) == sem_bits)
    116			return 0;
    117		mdelay(1000);
    118	} while (--seconds);
    119	return -1;
    120}
    121
    122static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
    123{
    124	struct ql3xxx_port_registers __iomem *port_regs =
    125		qdev->mem_map_registers;
    126	writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
    127	readl(&port_regs->CommonRegs.semaphoreReg);
    128}
    129
    130static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
    131{
    132	struct ql3xxx_port_registers __iomem *port_regs =
    133		qdev->mem_map_registers;
    134	u32 value;
    135
    136	writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
    137	value = readl(&port_regs->CommonRegs.semaphoreReg);
    138	return ((value & (sem_mask >> 16)) == sem_bits);
    139}
    140
    141/*
    142 * Caller holds hw_lock.
    143 */
    144static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
    145{
    146	int i = 0;
    147
    148	do {
    149		if (ql_sem_lock(qdev,
    150				QL_DRVR_SEM_MASK,
    151				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
    152				 * 2) << 1)) {
    153			netdev_printk(KERN_DEBUG, qdev->ndev,
    154				      "driver lock acquired\n");
    155			return 1;
    156		}
    157		mdelay(1000);
    158	} while (++i < 10);
    159
    160	netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
    161	return 0;
    162}
    163
    164static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
    165{
    166	struct ql3xxx_port_registers __iomem *port_regs =
    167		qdev->mem_map_registers;
    168
    169	writel(((ISP_CONTROL_NP_MASK << 16) | page),
    170			&port_regs->CommonRegs.ispControlStatus);
    171	readl(&port_regs->CommonRegs.ispControlStatus);
    172	qdev->current_page = page;
    173}
    174
    175static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
    176{
    177	u32 value;
    178	unsigned long hw_flags;
    179
    180	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
    181	value = readl(reg);
    182	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
    183
    184	return value;
    185}
    186
    187static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
    188{
    189	return readl(reg);
    190}
    191
    192static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
    193{
    194	u32 value;
    195	unsigned long hw_flags;
    196
    197	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
    198
    199	if (qdev->current_page != 0)
    200		ql_set_register_page(qdev, 0);
    201	value = readl(reg);
    202
    203	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
    204	return value;
    205}
    206
    207static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
    208{
    209	if (qdev->current_page != 0)
    210		ql_set_register_page(qdev, 0);
    211	return readl(reg);
    212}
    213
    214static void ql_write_common_reg_l(struct ql3_adapter *qdev,
    215				u32 __iomem *reg, u32 value)
    216{
    217	unsigned long hw_flags;
    218
    219	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
    220	writel(value, reg);
    221	readl(reg);
    222	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
    223}
    224
    225static void ql_write_common_reg(struct ql3_adapter *qdev,
    226				u32 __iomem *reg, u32 value)
    227{
    228	writel(value, reg);
    229	readl(reg);
    230}
    231
    232static void ql_write_nvram_reg(struct ql3_adapter *qdev,
    233				u32 __iomem *reg, u32 value)
    234{
    235	writel(value, reg);
    236	readl(reg);
    237	udelay(1);
    238}
    239
    240static void ql_write_page0_reg(struct ql3_adapter *qdev,
    241			       u32 __iomem *reg, u32 value)
    242{
    243	if (qdev->current_page != 0)
    244		ql_set_register_page(qdev, 0);
    245	writel(value, reg);
    246	readl(reg);
    247}
    248
    249/*
    250 * Caller holds hw_lock. Only called during init.
    251 */
    252static void ql_write_page1_reg(struct ql3_adapter *qdev,
    253			       u32 __iomem *reg, u32 value)
    254{
    255	if (qdev->current_page != 1)
    256		ql_set_register_page(qdev, 1);
    257	writel(value, reg);
    258	readl(reg);
    259}
    260
    261/*
    262 * Caller holds hw_lock. Only called during init.
    263 */
    264static void ql_write_page2_reg(struct ql3_adapter *qdev,
    265			       u32 __iomem *reg, u32 value)
    266{
    267	if (qdev->current_page != 2)
    268		ql_set_register_page(qdev, 2);
    269	writel(value, reg);
    270	readl(reg);
    271}
    272
    273static void ql_disable_interrupts(struct ql3_adapter *qdev)
    274{
    275	struct ql3xxx_port_registers __iomem *port_regs =
    276		qdev->mem_map_registers;
    277
    278	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
    279			    (ISP_IMR_ENABLE_INT << 16));
    280
    281}
    282
    283static void ql_enable_interrupts(struct ql3_adapter *qdev)
    284{
    285	struct ql3xxx_port_registers __iomem *port_regs =
    286		qdev->mem_map_registers;
    287
    288	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
    289			    ((0xff << 16) | ISP_IMR_ENABLE_INT));
    290
    291}
    292
    293static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
    294					    struct ql_rcv_buf_cb *lrg_buf_cb)
    295{
    296	dma_addr_t map;
    297	int err;
    298	lrg_buf_cb->next = NULL;
    299
    300	if (qdev->lrg_buf_free_tail == NULL) {	/* The list is empty  */
    301		qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
    302	} else {
    303		qdev->lrg_buf_free_tail->next = lrg_buf_cb;
    304		qdev->lrg_buf_free_tail = lrg_buf_cb;
    305	}
    306
    307	if (!lrg_buf_cb->skb) {
    308		lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
    309						   qdev->lrg_buffer_len);
    310		if (unlikely(!lrg_buf_cb->skb)) {
    311			qdev->lrg_buf_skb_check++;
    312		} else {
    313			/*
    314			 * We save some space to copy the ethhdr from first
    315			 * buffer
    316			 */
    317			skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
    318			map = dma_map_single(&qdev->pdev->dev,
    319					     lrg_buf_cb->skb->data,
    320					     qdev->lrg_buffer_len - QL_HEADER_SPACE,
    321					     DMA_FROM_DEVICE);
    322			err = dma_mapping_error(&qdev->pdev->dev, map);
    323			if (err) {
    324				netdev_err(qdev->ndev,
    325					   "PCI mapping failed with error: %d\n",
    326					   err);
    327				dev_kfree_skb(lrg_buf_cb->skb);
    328				lrg_buf_cb->skb = NULL;
    329
    330				qdev->lrg_buf_skb_check++;
    331				return;
    332			}
    333
    334			lrg_buf_cb->buf_phy_addr_low =
    335			    cpu_to_le32(LS_64BITS(map));
    336			lrg_buf_cb->buf_phy_addr_high =
    337			    cpu_to_le32(MS_64BITS(map));
    338			dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
    339			dma_unmap_len_set(lrg_buf_cb, maplen,
    340					  qdev->lrg_buffer_len -
    341					  QL_HEADER_SPACE);
    342		}
    343	}
    344
    345	qdev->lrg_buf_free_count++;
    346}
    347
    348static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
    349							   *qdev)
    350{
    351	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
    352
    353	if (lrg_buf_cb != NULL) {
    354		qdev->lrg_buf_free_head = lrg_buf_cb->next;
    355		if (qdev->lrg_buf_free_head == NULL)
    356			qdev->lrg_buf_free_tail = NULL;
    357		qdev->lrg_buf_free_count--;
    358	}
    359
    360	return lrg_buf_cb;
    361}
    362
    363static u32 addrBits = EEPROM_NO_ADDR_BITS;
    364static u32 dataBits = EEPROM_NO_DATA_BITS;
    365
    366static void fm93c56a_deselect(struct ql3_adapter *qdev);
    367static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
    368			    unsigned short *value);
    369
    370/*
    371 * Caller holds hw_lock.
    372 */
    373static void fm93c56a_select(struct ql3_adapter *qdev)
    374{
    375	struct ql3xxx_port_registers __iomem *port_regs =
    376			qdev->mem_map_registers;
    377	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
    378
    379	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
    380	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
    381}
    382
    383/*
    384 * Caller holds hw_lock.
    385 */
    386static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
    387{
    388	int i;
    389	u32 mask;
    390	u32 dataBit;
    391	u32 previousBit;
    392	struct ql3xxx_port_registers __iomem *port_regs =
    393			qdev->mem_map_registers;
    394	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
    395
    396	/* Clock in a zero, then do the start bit */
    397	ql_write_nvram_reg(qdev, spir,
    398			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
    399			    AUBURN_EEPROM_DO_1));
    400	ql_write_nvram_reg(qdev, spir,
    401			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
    402			    AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
    403	ql_write_nvram_reg(qdev, spir,
    404			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
    405			    AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
    406
    407	mask = 1 << (FM93C56A_CMD_BITS - 1);
    408	/* Force the previous data bit to be different */
    409	previousBit = 0xffff;
    410	for (i = 0; i < FM93C56A_CMD_BITS; i++) {
    411		dataBit = (cmd & mask)
    412			? AUBURN_EEPROM_DO_1
    413			: AUBURN_EEPROM_DO_0;
    414		if (previousBit != dataBit) {
    415			/* If the bit changed, change the DO state to match */
    416			ql_write_nvram_reg(qdev, spir,
    417					   (ISP_NVRAM_MASK |
    418					    qdev->eeprom_cmd_data | dataBit));
    419			previousBit = dataBit;
    420		}
    421		ql_write_nvram_reg(qdev, spir,
    422				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
    423				    dataBit | AUBURN_EEPROM_CLK_RISE));
    424		ql_write_nvram_reg(qdev, spir,
    425				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
    426				    dataBit | AUBURN_EEPROM_CLK_FALL));
    427		cmd = cmd << 1;
    428	}
    429
    430	mask = 1 << (addrBits - 1);
    431	/* Force the previous data bit to be different */
    432	previousBit = 0xffff;
    433	for (i = 0; i < addrBits; i++) {
    434		dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
    435			: AUBURN_EEPROM_DO_0;
    436		if (previousBit != dataBit) {
    437			/*
    438			 * If the bit changed, then change the DO state to
    439			 * match
    440			 */
    441			ql_write_nvram_reg(qdev, spir,
    442					   (ISP_NVRAM_MASK |
    443					    qdev->eeprom_cmd_data | dataBit));
    444			previousBit = dataBit;
    445		}
    446		ql_write_nvram_reg(qdev, spir,
    447				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
    448				    dataBit | AUBURN_EEPROM_CLK_RISE));
    449		ql_write_nvram_reg(qdev, spir,
    450				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
    451				    dataBit | AUBURN_EEPROM_CLK_FALL));
    452		eepromAddr = eepromAddr << 1;
    453	}
    454}
    455
    456/*
    457 * Caller holds hw_lock.
    458 */
    459static void fm93c56a_deselect(struct ql3_adapter *qdev)
    460{
    461	struct ql3xxx_port_registers __iomem *port_regs =
    462			qdev->mem_map_registers;
    463	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
    464
    465	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
    466	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
    467}
    468
    469/*
    470 * Caller holds hw_lock.
    471 */
    472static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
    473{
    474	int i;
    475	u32 data = 0;
    476	u32 dataBit;
    477	struct ql3xxx_port_registers __iomem *port_regs =
    478			qdev->mem_map_registers;
    479	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
    480
    481	/* Read the data bits */
    482	/* The first bit is a dummy.  Clock right over it. */
    483	for (i = 0; i < dataBits; i++) {
    484		ql_write_nvram_reg(qdev, spir,
    485				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
    486				   AUBURN_EEPROM_CLK_RISE);
    487		ql_write_nvram_reg(qdev, spir,
    488				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
    489				   AUBURN_EEPROM_CLK_FALL);
    490		dataBit = (ql_read_common_reg(qdev, spir) &
    491			   AUBURN_EEPROM_DI_1) ? 1 : 0;
    492		data = (data << 1) | dataBit;
    493	}
    494	*value = (u16)data;
    495}
    496
    497/*
    498 * Caller holds hw_lock.
    499 */
    500static void eeprom_readword(struct ql3_adapter *qdev,
    501			    u32 eepromAddr, unsigned short *value)
    502{
    503	fm93c56a_select(qdev);
    504	fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
    505	fm93c56a_datain(qdev, value);
    506	fm93c56a_deselect(qdev);
    507}
    508
    509static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
    510{
    511	__le16 buf[ETH_ALEN / 2];
    512
    513	buf[0] = cpu_to_le16(addr[0]);
    514	buf[1] = cpu_to_le16(addr[1]);
    515	buf[2] = cpu_to_le16(addr[2]);
    516	eth_hw_addr_set(ndev, (u8 *)buf);
    517}
    518
    519static int ql_get_nvram_params(struct ql3_adapter *qdev)
    520{
    521	u16 *pEEPROMData;
    522	u16 checksum = 0;
    523	u32 index;
    524	unsigned long hw_flags;
    525
    526	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
    527
    528	pEEPROMData = (u16 *)&qdev->nvram_data;
    529	qdev->eeprom_cmd_data = 0;
    530	if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
    531			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
    532			 2) << 10)) {
    533		pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
    534		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
    535		return -1;
    536	}
    537
    538	for (index = 0; index < EEPROM_SIZE; index++) {
    539		eeprom_readword(qdev, index, pEEPROMData);
    540		checksum += *pEEPROMData;
    541		pEEPROMData++;
    542	}
    543	ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
    544
    545	if (checksum != 0) {
    546		netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
    547			   checksum);
    548		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
    549		return -1;
    550	}
    551
    552	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
    553	return checksum;
    554}
    555
    556static const u32 PHYAddr[2] = {
    557	PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
    558};
    559
    560static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
    561{
    562	struct ql3xxx_port_registers __iomem *port_regs =
    563			qdev->mem_map_registers;
    564	u32 temp;
    565	int count = 1000;
    566
    567	while (count) {
    568		temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
    569		if (!(temp & MAC_MII_STATUS_BSY))
    570			return 0;
    571		udelay(10);
    572		count--;
    573	}
    574	return -1;
    575}
    576
    577static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
    578{
    579	struct ql3xxx_port_registers __iomem *port_regs =
    580			qdev->mem_map_registers;
    581	u32 scanControl;
    582
    583	if (qdev->numPorts > 1) {
    584		/* Auto scan will cycle through multiple ports */
    585		scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
    586	} else {
    587		scanControl = MAC_MII_CONTROL_SC;
    588	}
    589
    590	/*
    591	 * Scan register 1 of PHY/PETBI,
    592	 * Set up to scan both devices
    593	 * The autoscan starts from the first register, completes
    594	 * the last one before rolling over to the first
    595	 */
    596	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
    597			   PHYAddr[0] | MII_SCAN_REGISTER);
    598
    599	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
    600			   (scanControl) |
    601			   ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
    602}
    603
    604static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
    605{
    606	u8 ret;
    607	struct ql3xxx_port_registers __iomem *port_regs =
    608					qdev->mem_map_registers;
    609
    610	/* See if scan mode is enabled before we turn it off */
    611	if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
    612	    (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
    613		/* Scan is enabled */
    614		ret = 1;
    615	} else {
    616		/* Scan is disabled */
    617		ret = 0;
    618	}
    619
    620	/*
    621	 * When disabling scan mode you must first change the MII register
    622	 * address
    623	 */
    624	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
    625			   PHYAddr[0] | MII_SCAN_REGISTER);
    626
    627	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
    628			   ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
    629			     MAC_MII_CONTROL_RC) << 16));
    630
    631	return ret;
    632}
    633
    634static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
    635			       u16 regAddr, u16 value, u32 phyAddr)
    636{
    637	struct ql3xxx_port_registers __iomem *port_regs =
    638			qdev->mem_map_registers;
    639	u8 scanWasEnabled;
    640
    641	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
    642
    643	if (ql_wait_for_mii_ready(qdev)) {
    644		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
    645		return -1;
    646	}
    647
    648	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
    649			   phyAddr | regAddr);
    650
    651	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
    652
    653	/* Wait for write to complete 9/10/04 SJP */
    654	if (ql_wait_for_mii_ready(qdev)) {
    655		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
    656		return -1;
    657	}
    658
    659	if (scanWasEnabled)
    660		ql_mii_enable_scan_mode(qdev);
    661
    662	return 0;
    663}
    664
    665static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
    666			      u16 *value, u32 phyAddr)
    667{
    668	struct ql3xxx_port_registers __iomem *port_regs =
    669			qdev->mem_map_registers;
    670	u8 scanWasEnabled;
    671	u32 temp;
    672
    673	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
    674
    675	if (ql_wait_for_mii_ready(qdev)) {
    676		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
    677		return -1;
    678	}
    679
    680	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
    681			   phyAddr | regAddr);
    682
    683	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
    684			   (MAC_MII_CONTROL_RC << 16));
    685
    686	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
    687			   (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
    688
    689	/* Wait for the read to complete */
    690	if (ql_wait_for_mii_ready(qdev)) {
    691		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
    692		return -1;
    693	}
    694
    695	temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
    696	*value = (u16) temp;
    697
    698	if (scanWasEnabled)
    699		ql_mii_enable_scan_mode(qdev);
    700
    701	return 0;
    702}
    703
    704static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
    705{
    706	struct ql3xxx_port_registers __iomem *port_regs =
    707			qdev->mem_map_registers;
    708
    709	ql_mii_disable_scan_mode(qdev);
    710
    711	if (ql_wait_for_mii_ready(qdev)) {
    712		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
    713		return -1;
    714	}
    715
    716	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
    717			   qdev->PHYAddr | regAddr);
    718
    719	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
    720
    721	/* Wait for write to complete. */
    722	if (ql_wait_for_mii_ready(qdev)) {
    723		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
    724		return -1;
    725	}
    726
    727	ql_mii_enable_scan_mode(qdev);
    728
    729	return 0;
    730}
    731
    732static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
    733{
    734	u32 temp;
    735	struct ql3xxx_port_registers __iomem *port_regs =
    736			qdev->mem_map_registers;
    737
    738	ql_mii_disable_scan_mode(qdev);
    739
    740	if (ql_wait_for_mii_ready(qdev)) {
    741		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
    742		return -1;
    743	}
    744
    745	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
    746			   qdev->PHYAddr | regAddr);
    747
    748	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
    749			   (MAC_MII_CONTROL_RC << 16));
    750
    751	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
    752			   (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
    753
    754	/* Wait for the read to complete */
    755	if (ql_wait_for_mii_ready(qdev)) {
    756		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
    757		return -1;
    758	}
    759
    760	temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
    761	*value = (u16) temp;
    762
    763	ql_mii_enable_scan_mode(qdev);
    764
    765	return 0;
    766}
    767
    768static void ql_petbi_reset(struct ql3_adapter *qdev)
    769{
    770	ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
    771}
    772
    773static void ql_petbi_start_neg(struct ql3_adapter *qdev)
    774{
    775	u16 reg;
    776
    777	/* Enable Auto-negotiation sense */
    778	ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
    779	reg |= PETBI_TBI_AUTO_SENSE;
    780	ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
    781
    782	ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
    783			 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
    784
    785	ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
    786			 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
    787			 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
    788
    789}
    790
    791static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
    792{
    793	ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
    794			    PHYAddr[qdev->mac_index]);
    795}
    796
    797static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
    798{
    799	u16 reg;
    800
    801	/* Enable Auto-negotiation sense */
    802	ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
    803			   PHYAddr[qdev->mac_index]);
    804	reg |= PETBI_TBI_AUTO_SENSE;
    805	ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
    806			    PHYAddr[qdev->mac_index]);
    807
    808	ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
    809			    PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
    810			    PHYAddr[qdev->mac_index]);
    811
    812	ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
    813			    PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
    814			    PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
    815			    PHYAddr[qdev->mac_index]);
    816}
    817
    818static void ql_petbi_init(struct ql3_adapter *qdev)
    819{
    820	ql_petbi_reset(qdev);
    821	ql_petbi_start_neg(qdev);
    822}
    823
    824static void ql_petbi_init_ex(struct ql3_adapter *qdev)
    825{
    826	ql_petbi_reset_ex(qdev);
    827	ql_petbi_start_neg_ex(qdev);
    828}
    829
    830static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
    831{
    832	u16 reg;
    833
    834	if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
    835		return 0;
    836
    837	return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
    838}
    839
    840static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
    841{
    842	netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
    843	/* power down device bit 11 = 1 */
    844	ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
    845	/* enable diagnostic mode bit 2 = 1 */
    846	ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
    847	/* 1000MB amplitude adjust (see Agere errata) */
    848	ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
    849	/* 1000MB amplitude adjust (see Agere errata) */
    850	ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
    851	/* 100MB amplitude adjust (see Agere errata) */
    852	ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
    853	/* 100MB amplitude adjust (see Agere errata) */
    854	ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
    855	/* 10MB amplitude adjust (see Agere errata) */
    856	ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
    857	/* 10MB amplitude adjust (see Agere errata) */
    858	ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
    859	/* point to hidden reg 0x2806 */
    860	ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
    861	/* Write new PHYAD w/bit 5 set */
    862	ql_mii_write_reg_ex(qdev, 0x11,
    863			    0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
    864	/*
    865	 * Disable diagnostic mode bit 2 = 0
    866	 * Power up device bit 11 = 0
    867	 * Link up (on) and activity (blink)
    868	 */
    869	ql_mii_write_reg(qdev, 0x12, 0x840a);
    870	ql_mii_write_reg(qdev, 0x00, 0x1140);
    871	ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
    872}
    873
    874static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
    875				       u16 phyIdReg0, u16 phyIdReg1)
    876{
    877	enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
    878	u32   oui;
    879	u16   model;
    880	int i;
    881
    882	if (phyIdReg0 == 0xffff)
    883		return result;
    884
    885	if (phyIdReg1 == 0xffff)
    886		return result;
    887
    888	/* oui is split between two registers */
    889	oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
    890
    891	model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
    892
    893	/* Scan table for this PHY */
    894	for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
    895		if ((oui == PHY_DEVICES[i].phyIdOUI) &&
    896		    (model == PHY_DEVICES[i].phyIdModel)) {
    897			netdev_info(qdev->ndev, "Phy: %s\n",
    898				    PHY_DEVICES[i].name);
    899			result = PHY_DEVICES[i].phyDevice;
    900			break;
    901		}
    902	}
    903
    904	return result;
    905}
    906
    907static int ql_phy_get_speed(struct ql3_adapter *qdev)
    908{
    909	u16 reg;
    910
    911	switch (qdev->phyType) {
    912	case PHY_AGERE_ET1011C: {
    913		if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
    914			return 0;
    915
    916		reg = (reg >> 8) & 3;
    917		break;
    918	}
    919	default:
    920		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
    921			return 0;
    922
    923		reg = (((reg & 0x18) >> 3) & 3);
    924	}
    925
    926	switch (reg) {
    927	case 2:
    928		return SPEED_1000;
    929	case 1:
    930		return SPEED_100;
    931	case 0:
    932		return SPEED_10;
    933	default:
    934		return -1;
    935	}
    936}
    937
    938static int ql_is_full_dup(struct ql3_adapter *qdev)
    939{
    940	u16 reg;
    941
    942	switch (qdev->phyType) {
    943	case PHY_AGERE_ET1011C: {
    944		if (ql_mii_read_reg(qdev, 0x1A, &reg))
    945			return 0;
    946
    947		return ((reg & 0x0080) && (reg & 0x1000)) != 0;
    948	}
    949	case PHY_VITESSE_VSC8211:
    950	default: {
    951		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
    952			return 0;
    953		return (reg & PHY_AUX_DUPLEX_STAT) != 0;
    954	}
    955	}
    956}
    957
    958static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
    959{
    960	u16 reg;
    961
    962	if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
    963		return 0;
    964
    965	return (reg & PHY_NEG_PAUSE) != 0;
    966}
    967
    968static int PHY_Setup(struct ql3_adapter *qdev)
    969{
    970	u16   reg1;
    971	u16   reg2;
    972	bool  agereAddrChangeNeeded = false;
    973	u32 miiAddr = 0;
    974	int err;
    975
    976	/*  Determine the PHY we are using by reading the ID's */
    977	err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
    978	if (err != 0) {
    979		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
    980		return err;
    981	}
    982
    983	err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
    984	if (err != 0) {
    985		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
    986		return err;
    987	}
    988
    989	/*  Check if we have a Agere PHY */
    990	if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
    991
    992		/* Determine which MII address we should be using
    993		   determined by the index of the card */
    994		if (qdev->mac_index == 0)
    995			miiAddr = MII_AGERE_ADDR_1;
    996		else
    997			miiAddr = MII_AGERE_ADDR_2;
    998
    999		err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
   1000		if (err != 0) {
   1001			netdev_err(qdev->ndev,
   1002				   "Could not read from reg PHY_ID_0_REG after Agere detected\n");
   1003			return err;
   1004		}
   1005
   1006		err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
   1007		if (err != 0) {
   1008			netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
   1009			return err;
   1010		}
   1011
   1012		/*  We need to remember to initialize the Agere PHY */
   1013		agereAddrChangeNeeded = true;
   1014	}
   1015
   1016	/*  Determine the particular PHY we have on board to apply
   1017	    PHY specific initializations */
   1018	qdev->phyType = getPhyType(qdev, reg1, reg2);
   1019
   1020	if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
   1021		/* need this here so address gets changed */
   1022		phyAgereSpecificInit(qdev, miiAddr);
   1023	} else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
   1024		netdev_err(qdev->ndev, "PHY is unknown\n");
   1025		return -EIO;
   1026	}
   1027
   1028	return 0;
   1029}
   1030
   1031/*
   1032 * Caller holds hw_lock.
   1033 */
   1034static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
   1035{
   1036	struct ql3xxx_port_registers __iomem *port_regs =
   1037			qdev->mem_map_registers;
   1038	u32 value;
   1039
   1040	if (enable)
   1041		value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
   1042	else
   1043		value = (MAC_CONFIG_REG_PE << 16);
   1044
   1045	if (qdev->mac_index)
   1046		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
   1047	else
   1048		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
   1049}
   1050
   1051/*
   1052 * Caller holds hw_lock.
   1053 */
   1054static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
   1055{
   1056	struct ql3xxx_port_registers __iomem *port_regs =
   1057			qdev->mem_map_registers;
   1058	u32 value;
   1059
   1060	if (enable)
   1061		value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
   1062	else
   1063		value = (MAC_CONFIG_REG_SR << 16);
   1064
   1065	if (qdev->mac_index)
   1066		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
   1067	else
   1068		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
   1069}
   1070
   1071/*
   1072 * Caller holds hw_lock.
   1073 */
   1074static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
   1075{
   1076	struct ql3xxx_port_registers __iomem *port_regs =
   1077			qdev->mem_map_registers;
   1078	u32 value;
   1079
   1080	if (enable)
   1081		value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
   1082	else
   1083		value = (MAC_CONFIG_REG_GM << 16);
   1084
   1085	if (qdev->mac_index)
   1086		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
   1087	else
   1088		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
   1089}
   1090
   1091/*
   1092 * Caller holds hw_lock.
   1093 */
   1094static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
   1095{
   1096	struct ql3xxx_port_registers __iomem *port_regs =
   1097			qdev->mem_map_registers;
   1098	u32 value;
   1099
   1100	if (enable)
   1101		value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
   1102	else
   1103		value = (MAC_CONFIG_REG_FD << 16);
   1104
   1105	if (qdev->mac_index)
   1106		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
   1107	else
   1108		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
   1109}
   1110
   1111/*
   1112 * Caller holds hw_lock.
   1113 */
   1114static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
   1115{
   1116	struct ql3xxx_port_registers __iomem *port_regs =
   1117			qdev->mem_map_registers;
   1118	u32 value;
   1119
   1120	if (enable)
   1121		value =
   1122		    ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
   1123		     ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
   1124	else
   1125		value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
   1126
   1127	if (qdev->mac_index)
   1128		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
   1129	else
   1130		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
   1131}
   1132
   1133/*
   1134 * Caller holds hw_lock.
   1135 */
   1136static int ql_is_fiber(struct ql3_adapter *qdev)
   1137{
   1138	struct ql3xxx_port_registers __iomem *port_regs =
   1139			qdev->mem_map_registers;
   1140	u32 bitToCheck = 0;
   1141	u32 temp;
   1142
   1143	switch (qdev->mac_index) {
   1144	case 0:
   1145		bitToCheck = PORT_STATUS_SM0;
   1146		break;
   1147	case 1:
   1148		bitToCheck = PORT_STATUS_SM1;
   1149		break;
   1150	}
   1151
   1152	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
   1153	return (temp & bitToCheck) != 0;
   1154}
   1155
   1156static int ql_is_auto_cfg(struct ql3_adapter *qdev)
   1157{
   1158	u16 reg;
   1159	ql_mii_read_reg(qdev, 0x00, &reg);
   1160	return (reg & 0x1000) != 0;
   1161}
   1162
   1163/*
   1164 * Caller holds hw_lock.
   1165 */
   1166static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
   1167{
   1168	struct ql3xxx_port_registers __iomem *port_regs =
   1169			qdev->mem_map_registers;
   1170	u32 bitToCheck = 0;
   1171	u32 temp;
   1172
   1173	switch (qdev->mac_index) {
   1174	case 0:
   1175		bitToCheck = PORT_STATUS_AC0;
   1176		break;
   1177	case 1:
   1178		bitToCheck = PORT_STATUS_AC1;
   1179		break;
   1180	}
   1181
   1182	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
   1183	if (temp & bitToCheck) {
   1184		netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
   1185		return 1;
   1186	}
   1187	netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
   1188	return 0;
   1189}
   1190
   1191/*
   1192 *  ql_is_neg_pause() returns 1 if pause was negotiated to be on
   1193 */
   1194static int ql_is_neg_pause(struct ql3_adapter *qdev)
   1195{
   1196	if (ql_is_fiber(qdev))
   1197		return ql_is_petbi_neg_pause(qdev);
   1198	else
   1199		return ql_is_phy_neg_pause(qdev);
   1200}
   1201
   1202static int ql_auto_neg_error(struct ql3_adapter *qdev)
   1203{
   1204	struct ql3xxx_port_registers __iomem *port_regs =
   1205			qdev->mem_map_registers;
   1206	u32 bitToCheck = 0;
   1207	u32 temp;
   1208
   1209	switch (qdev->mac_index) {
   1210	case 0:
   1211		bitToCheck = PORT_STATUS_AE0;
   1212		break;
   1213	case 1:
   1214		bitToCheck = PORT_STATUS_AE1;
   1215		break;
   1216	}
   1217	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
   1218	return (temp & bitToCheck) != 0;
   1219}
   1220
   1221static u32 ql_get_link_speed(struct ql3_adapter *qdev)
   1222{
   1223	if (ql_is_fiber(qdev))
   1224		return SPEED_1000;
   1225	else
   1226		return ql_phy_get_speed(qdev);
   1227}
   1228
   1229static int ql_is_link_full_dup(struct ql3_adapter *qdev)
   1230{
   1231	if (ql_is_fiber(qdev))
   1232		return 1;
   1233	else
   1234		return ql_is_full_dup(qdev);
   1235}
   1236
   1237/*
   1238 * Caller holds hw_lock.
   1239 */
   1240static int ql_link_down_detect(struct ql3_adapter *qdev)
   1241{
   1242	struct ql3xxx_port_registers __iomem *port_regs =
   1243			qdev->mem_map_registers;
   1244	u32 bitToCheck = 0;
   1245	u32 temp;
   1246
   1247	switch (qdev->mac_index) {
   1248	case 0:
   1249		bitToCheck = ISP_CONTROL_LINK_DN_0;
   1250		break;
   1251	case 1:
   1252		bitToCheck = ISP_CONTROL_LINK_DN_1;
   1253		break;
   1254	}
   1255
   1256	temp =
   1257	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
   1258	return (temp & bitToCheck) != 0;
   1259}
   1260
   1261/*
   1262 * Caller holds hw_lock.
   1263 */
   1264static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
   1265{
   1266	struct ql3xxx_port_registers __iomem *port_regs =
   1267			qdev->mem_map_registers;
   1268
   1269	switch (qdev->mac_index) {
   1270	case 0:
   1271		ql_write_common_reg(qdev,
   1272				    &port_regs->CommonRegs.ispControlStatus,
   1273				    (ISP_CONTROL_LINK_DN_0) |
   1274				    (ISP_CONTROL_LINK_DN_0 << 16));
   1275		break;
   1276
   1277	case 1:
   1278		ql_write_common_reg(qdev,
   1279				    &port_regs->CommonRegs.ispControlStatus,
   1280				    (ISP_CONTROL_LINK_DN_1) |
   1281				    (ISP_CONTROL_LINK_DN_1 << 16));
   1282		break;
   1283
   1284	default:
   1285		return 1;
   1286	}
   1287
   1288	return 0;
   1289}
   1290
   1291/*
   1292 * Caller holds hw_lock.
   1293 */
   1294static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
   1295{
   1296	struct ql3xxx_port_registers __iomem *port_regs =
   1297			qdev->mem_map_registers;
   1298	u32 bitToCheck = 0;
   1299	u32 temp;
   1300
   1301	switch (qdev->mac_index) {
   1302	case 0:
   1303		bitToCheck = PORT_STATUS_F1_ENABLED;
   1304		break;
   1305	case 1:
   1306		bitToCheck = PORT_STATUS_F3_ENABLED;
   1307		break;
   1308	default:
   1309		break;
   1310	}
   1311
   1312	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
   1313	if (temp & bitToCheck) {
   1314		netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
   1315			     "not link master\n");
   1316		return 0;
   1317	}
   1318
   1319	netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
   1320	return 1;
   1321}
   1322
   1323static void ql_phy_reset_ex(struct ql3_adapter *qdev)
   1324{
   1325	ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
   1326			    PHYAddr[qdev->mac_index]);
   1327}
   1328
   1329static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
   1330{
   1331	u16 reg;
   1332	u16 portConfiguration;
   1333
   1334	if (qdev->phyType == PHY_AGERE_ET1011C)
   1335		ql_mii_write_reg(qdev, 0x13, 0x0000);
   1336					/* turn off external loopback */
   1337
   1338	if (qdev->mac_index == 0)
   1339		portConfiguration =
   1340			qdev->nvram_data.macCfg_port0.portConfiguration;
   1341	else
   1342		portConfiguration =
   1343			qdev->nvram_data.macCfg_port1.portConfiguration;
   1344
   1345	/*  Some HBA's in the field are set to 0 and they need to
   1346	    be reinterpreted with a default value */
   1347	if (portConfiguration == 0)
   1348		portConfiguration = PORT_CONFIG_DEFAULT;
   1349
   1350	/* Set the 1000 advertisements */
   1351	ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
   1352			   PHYAddr[qdev->mac_index]);
   1353	reg &= ~PHY_GIG_ALL_PARAMS;
   1354
   1355	if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
   1356		if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
   1357			reg |= PHY_GIG_ADV_1000F;
   1358		else
   1359			reg |= PHY_GIG_ADV_1000H;
   1360	}
   1361
   1362	ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
   1363			    PHYAddr[qdev->mac_index]);
   1364
   1365	/* Set the 10/100 & pause negotiation advertisements */
   1366	ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
   1367			   PHYAddr[qdev->mac_index]);
   1368	reg &= ~PHY_NEG_ALL_PARAMS;
   1369
   1370	if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
   1371		reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
   1372
   1373	if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
   1374		if (portConfiguration & PORT_CONFIG_100MB_SPEED)
   1375			reg |= PHY_NEG_ADV_100F;
   1376
   1377		if (portConfiguration & PORT_CONFIG_10MB_SPEED)
   1378			reg |= PHY_NEG_ADV_10F;
   1379	}
   1380
   1381	if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
   1382		if (portConfiguration & PORT_CONFIG_100MB_SPEED)
   1383			reg |= PHY_NEG_ADV_100H;
   1384
   1385		if (portConfiguration & PORT_CONFIG_10MB_SPEED)
   1386			reg |= PHY_NEG_ADV_10H;
   1387	}
   1388
   1389	if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
   1390		reg |= 1;
   1391
   1392	ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
   1393			    PHYAddr[qdev->mac_index]);
   1394
   1395	ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
   1396
   1397	ql_mii_write_reg_ex(qdev, CONTROL_REG,
   1398			    reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
   1399			    PHYAddr[qdev->mac_index]);
   1400}
   1401
   1402static void ql_phy_init_ex(struct ql3_adapter *qdev)
   1403{
   1404	ql_phy_reset_ex(qdev);
   1405	PHY_Setup(qdev);
   1406	ql_phy_start_neg_ex(qdev);
   1407}
   1408
   1409/*
   1410 * Caller holds hw_lock.
   1411 */
   1412static u32 ql_get_link_state(struct ql3_adapter *qdev)
   1413{
   1414	struct ql3xxx_port_registers __iomem *port_regs =
   1415			qdev->mem_map_registers;
   1416	u32 bitToCheck = 0;
   1417	u32 temp, linkState;
   1418
   1419	switch (qdev->mac_index) {
   1420	case 0:
   1421		bitToCheck = PORT_STATUS_UP0;
   1422		break;
   1423	case 1:
   1424		bitToCheck = PORT_STATUS_UP1;
   1425		break;
   1426	}
   1427
   1428	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
   1429	if (temp & bitToCheck)
   1430		linkState = LS_UP;
   1431	else
   1432		linkState = LS_DOWN;
   1433
   1434	return linkState;
   1435}
   1436
   1437static int ql_port_start(struct ql3_adapter *qdev)
   1438{
   1439	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
   1440		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
   1441			 2) << 7)) {
   1442		netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
   1443		return -1;
   1444	}
   1445
   1446	if (ql_is_fiber(qdev)) {
   1447		ql_petbi_init(qdev);
   1448	} else {
   1449		/* Copper port */
   1450		ql_phy_init_ex(qdev);
   1451	}
   1452
   1453	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
   1454	return 0;
   1455}
   1456
   1457static int ql_finish_auto_neg(struct ql3_adapter *qdev)
   1458{
   1459
   1460	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
   1461		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
   1462			 2) << 7))
   1463		return -1;
   1464
   1465	if (!ql_auto_neg_error(qdev)) {
   1466		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
   1467			/* configure the MAC */
   1468			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
   1469				     "Configuring link\n");
   1470			ql_mac_cfg_soft_reset(qdev, 1);
   1471			ql_mac_cfg_gig(qdev,
   1472				       (ql_get_link_speed
   1473					(qdev) ==
   1474					SPEED_1000));
   1475			ql_mac_cfg_full_dup(qdev,
   1476					    ql_is_link_full_dup
   1477					    (qdev));
   1478			ql_mac_cfg_pause(qdev,
   1479					 ql_is_neg_pause
   1480					 (qdev));
   1481			ql_mac_cfg_soft_reset(qdev, 0);
   1482
   1483			/* enable the MAC */
   1484			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
   1485				     "Enabling mac\n");
   1486			ql_mac_enable(qdev, 1);
   1487		}
   1488
   1489		qdev->port_link_state = LS_UP;
   1490		netif_start_queue(qdev->ndev);
   1491		netif_carrier_on(qdev->ndev);
   1492		netif_info(qdev, link, qdev->ndev,
   1493			   "Link is up at %d Mbps, %s duplex\n",
   1494			   ql_get_link_speed(qdev),
   1495			   ql_is_link_full_dup(qdev) ? "full" : "half");
   1496
   1497	} else {	/* Remote error detected */
   1498
   1499		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
   1500			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
   1501				     "Remote error detected. Calling ql_port_start()\n");
   1502			/*
   1503			 * ql_port_start() is shared code and needs
   1504			 * to lock the PHY on it's own.
   1505			 */
   1506			ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
   1507			if (ql_port_start(qdev))	/* Restart port */
   1508				return -1;
   1509			return 0;
   1510		}
   1511	}
   1512	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
   1513	return 0;
   1514}
   1515
   1516static void ql_link_state_machine_work(struct work_struct *work)
   1517{
   1518	struct ql3_adapter *qdev =
   1519		container_of(work, struct ql3_adapter, link_state_work.work);
   1520
   1521	u32 curr_link_state;
   1522	unsigned long hw_flags;
   1523
   1524	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
   1525
   1526	curr_link_state = ql_get_link_state(qdev);
   1527
   1528	if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
   1529		netif_info(qdev, link, qdev->ndev,
   1530			   "Reset in progress, skip processing link state\n");
   1531
   1532		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   1533
   1534		/* Restart timer on 2 second interval. */
   1535		mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
   1536
   1537		return;
   1538	}
   1539
   1540	switch (qdev->port_link_state) {
   1541	default:
   1542		if (test_bit(QL_LINK_MASTER, &qdev->flags))
   1543			ql_port_start(qdev);
   1544		qdev->port_link_state = LS_DOWN;
   1545		fallthrough;
   1546
   1547	case LS_DOWN:
   1548		if (curr_link_state == LS_UP) {
   1549			netif_info(qdev, link, qdev->ndev, "Link is up\n");
   1550			if (ql_is_auto_neg_complete(qdev))
   1551				ql_finish_auto_neg(qdev);
   1552
   1553			if (qdev->port_link_state == LS_UP)
   1554				ql_link_down_detect_clear(qdev);
   1555
   1556			qdev->port_link_state = LS_UP;
   1557		}
   1558		break;
   1559
   1560	case LS_UP:
   1561		/*
   1562		 * See if the link is currently down or went down and came
   1563		 * back up
   1564		 */
   1565		if (curr_link_state == LS_DOWN) {
   1566			netif_info(qdev, link, qdev->ndev, "Link is down\n");
   1567			qdev->port_link_state = LS_DOWN;
   1568		}
   1569		if (ql_link_down_detect(qdev))
   1570			qdev->port_link_state = LS_DOWN;
   1571		break;
   1572	}
   1573	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   1574
   1575	/* Restart timer on 2 second interval. */
   1576	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
   1577}
   1578
   1579/*
   1580 * Caller must take hw_lock and QL_PHY_GIO_SEM.
   1581 */
   1582static void ql_get_phy_owner(struct ql3_adapter *qdev)
   1583{
   1584	if (ql_this_adapter_controls_port(qdev))
   1585		set_bit(QL_LINK_MASTER, &qdev->flags);
   1586	else
   1587		clear_bit(QL_LINK_MASTER, &qdev->flags);
   1588}
   1589
   1590/*
   1591 * Caller must take hw_lock and QL_PHY_GIO_SEM.
   1592 */
   1593static void ql_init_scan_mode(struct ql3_adapter *qdev)
   1594{
   1595	ql_mii_enable_scan_mode(qdev);
   1596
   1597	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
   1598		if (ql_this_adapter_controls_port(qdev))
   1599			ql_petbi_init_ex(qdev);
   1600	} else {
   1601		if (ql_this_adapter_controls_port(qdev))
   1602			ql_phy_init_ex(qdev);
   1603	}
   1604}
   1605
   1606/*
   1607 * MII_Setup needs to be called before taking the PHY out of reset
   1608 * so that the management interface clock speed can be set properly.
   1609 * It would be better if we had a way to disable MDC until after the
   1610 * PHY is out of reset, but we don't have that capability.
   1611 */
   1612static int ql_mii_setup(struct ql3_adapter *qdev)
   1613{
   1614	u32 reg;
   1615	struct ql3xxx_port_registers __iomem *port_regs =
   1616			qdev->mem_map_registers;
   1617
   1618	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
   1619			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
   1620			 2) << 7))
   1621		return -1;
   1622
   1623	if (qdev->device_id == QL3032_DEVICE_ID)
   1624		ql_write_page0_reg(qdev,
   1625			&port_regs->macMIIMgmtControlReg, 0x0f00000);
   1626
   1627	/* Divide 125MHz clock by 28 to meet PHY timing requirements */
   1628	reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
   1629
   1630	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
   1631			   reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
   1632
   1633	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
   1634	return 0;
   1635}
   1636
   1637#define SUPPORTED_OPTICAL_MODES	(SUPPORTED_1000baseT_Full |	\
   1638				 SUPPORTED_FIBRE |		\
   1639				 SUPPORTED_Autoneg)
   1640#define SUPPORTED_TP_MODES	(SUPPORTED_10baseT_Half |	\
   1641				 SUPPORTED_10baseT_Full |	\
   1642				 SUPPORTED_100baseT_Half |	\
   1643				 SUPPORTED_100baseT_Full |	\
   1644				 SUPPORTED_1000baseT_Half |	\
   1645				 SUPPORTED_1000baseT_Full |	\
   1646				 SUPPORTED_Autoneg |		\
   1647				 SUPPORTED_TP)			\
   1648
   1649static u32 ql_supported_modes(struct ql3_adapter *qdev)
   1650{
   1651	if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
   1652		return SUPPORTED_OPTICAL_MODES;
   1653
   1654	return SUPPORTED_TP_MODES;
   1655}
   1656
   1657static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
   1658{
   1659	int status;
   1660	unsigned long hw_flags;
   1661	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
   1662	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
   1663			    (QL_RESOURCE_BITS_BASE_CODE |
   1664			     (qdev->mac_index) * 2) << 7)) {
   1665		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   1666		return 0;
   1667	}
   1668	status = ql_is_auto_cfg(qdev);
   1669	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
   1670	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   1671	return status;
   1672}
   1673
   1674static u32 ql_get_speed(struct ql3_adapter *qdev)
   1675{
   1676	u32 status;
   1677	unsigned long hw_flags;
   1678	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
   1679	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
   1680			    (QL_RESOURCE_BITS_BASE_CODE |
   1681			     (qdev->mac_index) * 2) << 7)) {
   1682		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   1683		return 0;
   1684	}
   1685	status = ql_get_link_speed(qdev);
   1686	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
   1687	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   1688	return status;
   1689}
   1690
   1691static int ql_get_full_dup(struct ql3_adapter *qdev)
   1692{
   1693	int status;
   1694	unsigned long hw_flags;
   1695	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
   1696	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
   1697			    (QL_RESOURCE_BITS_BASE_CODE |
   1698			     (qdev->mac_index) * 2) << 7)) {
   1699		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   1700		return 0;
   1701	}
   1702	status = ql_is_link_full_dup(qdev);
   1703	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
   1704	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   1705	return status;
   1706}
   1707
   1708static int ql_get_link_ksettings(struct net_device *ndev,
   1709				 struct ethtool_link_ksettings *cmd)
   1710{
   1711	struct ql3_adapter *qdev = netdev_priv(ndev);
   1712	u32 supported, advertising;
   1713
   1714	supported = ql_supported_modes(qdev);
   1715
   1716	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
   1717		cmd->base.port = PORT_FIBRE;
   1718	} else {
   1719		cmd->base.port = PORT_TP;
   1720		cmd->base.phy_address = qdev->PHYAddr;
   1721	}
   1722	advertising = ql_supported_modes(qdev);
   1723	cmd->base.autoneg = ql_get_auto_cfg_status(qdev);
   1724	cmd->base.speed = ql_get_speed(qdev);
   1725	cmd->base.duplex = ql_get_full_dup(qdev);
   1726
   1727	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
   1728						supported);
   1729	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
   1730						advertising);
   1731
   1732	return 0;
   1733}
   1734
   1735static void ql_get_drvinfo(struct net_device *ndev,
   1736			   struct ethtool_drvinfo *drvinfo)
   1737{
   1738	struct ql3_adapter *qdev = netdev_priv(ndev);
   1739	strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
   1740	strlcpy(drvinfo->version, ql3xxx_driver_version,
   1741		sizeof(drvinfo->version));
   1742	strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
   1743		sizeof(drvinfo->bus_info));
   1744}
   1745
   1746static u32 ql_get_msglevel(struct net_device *ndev)
   1747{
   1748	struct ql3_adapter *qdev = netdev_priv(ndev);
   1749	return qdev->msg_enable;
   1750}
   1751
   1752static void ql_set_msglevel(struct net_device *ndev, u32 value)
   1753{
   1754	struct ql3_adapter *qdev = netdev_priv(ndev);
   1755	qdev->msg_enable = value;
   1756}
   1757
   1758static void ql_get_pauseparam(struct net_device *ndev,
   1759			      struct ethtool_pauseparam *pause)
   1760{
   1761	struct ql3_adapter *qdev = netdev_priv(ndev);
   1762	struct ql3xxx_port_registers __iomem *port_regs =
   1763		qdev->mem_map_registers;
   1764
   1765	u32 reg;
   1766	if (qdev->mac_index == 0)
   1767		reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
   1768	else
   1769		reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
   1770
   1771	pause->autoneg  = ql_get_auto_cfg_status(qdev);
   1772	pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
   1773	pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
   1774}
   1775
   1776static const struct ethtool_ops ql3xxx_ethtool_ops = {
   1777	.get_drvinfo = ql_get_drvinfo,
   1778	.get_link = ethtool_op_get_link,
   1779	.get_msglevel = ql_get_msglevel,
   1780	.set_msglevel = ql_set_msglevel,
   1781	.get_pauseparam = ql_get_pauseparam,
   1782	.get_link_ksettings = ql_get_link_ksettings,
   1783};
   1784
   1785static int ql_populate_free_queue(struct ql3_adapter *qdev)
   1786{
   1787	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
   1788	dma_addr_t map;
   1789	int err;
   1790
   1791	while (lrg_buf_cb) {
   1792		if (!lrg_buf_cb->skb) {
   1793			lrg_buf_cb->skb =
   1794				netdev_alloc_skb(qdev->ndev,
   1795						 qdev->lrg_buffer_len);
   1796			if (unlikely(!lrg_buf_cb->skb)) {
   1797				netdev_printk(KERN_DEBUG, qdev->ndev,
   1798					      "Failed netdev_alloc_skb()\n");
   1799				break;
   1800			} else {
   1801				/*
   1802				 * We save some space to copy the ethhdr from
   1803				 * first buffer
   1804				 */
   1805				skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
   1806				map = dma_map_single(&qdev->pdev->dev,
   1807						     lrg_buf_cb->skb->data,
   1808						     qdev->lrg_buffer_len - QL_HEADER_SPACE,
   1809						     DMA_FROM_DEVICE);
   1810
   1811				err = dma_mapping_error(&qdev->pdev->dev, map);
   1812				if (err) {
   1813					netdev_err(qdev->ndev,
   1814						   "PCI mapping failed with error: %d\n",
   1815						   err);
   1816					dev_kfree_skb(lrg_buf_cb->skb);
   1817					lrg_buf_cb->skb = NULL;
   1818					break;
   1819				}
   1820
   1821
   1822				lrg_buf_cb->buf_phy_addr_low =
   1823					cpu_to_le32(LS_64BITS(map));
   1824				lrg_buf_cb->buf_phy_addr_high =
   1825					cpu_to_le32(MS_64BITS(map));
   1826				dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
   1827				dma_unmap_len_set(lrg_buf_cb, maplen,
   1828						  qdev->lrg_buffer_len -
   1829						  QL_HEADER_SPACE);
   1830				--qdev->lrg_buf_skb_check;
   1831				if (!qdev->lrg_buf_skb_check)
   1832					return 1;
   1833			}
   1834		}
   1835		lrg_buf_cb = lrg_buf_cb->next;
   1836	}
   1837	return 0;
   1838}
   1839
   1840/*
   1841 * Caller holds hw_lock.
   1842 */
   1843static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
   1844{
   1845	struct ql3xxx_port_registers __iomem *port_regs =
   1846		qdev->mem_map_registers;
   1847
   1848	if (qdev->small_buf_release_cnt >= 16) {
   1849		while (qdev->small_buf_release_cnt >= 16) {
   1850			qdev->small_buf_q_producer_index++;
   1851
   1852			if (qdev->small_buf_q_producer_index ==
   1853			    NUM_SBUFQ_ENTRIES)
   1854				qdev->small_buf_q_producer_index = 0;
   1855			qdev->small_buf_release_cnt -= 8;
   1856		}
   1857		wmb();
   1858		writel_relaxed(qdev->small_buf_q_producer_index,
   1859			       &port_regs->CommonRegs.rxSmallQProducerIndex);
   1860	}
   1861}
   1862
   1863/*
   1864 * Caller holds hw_lock.
   1865 */
   1866static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
   1867{
   1868	struct bufq_addr_element *lrg_buf_q_ele;
   1869	int i;
   1870	struct ql_rcv_buf_cb *lrg_buf_cb;
   1871	struct ql3xxx_port_registers __iomem *port_regs =
   1872		qdev->mem_map_registers;
   1873
   1874	if ((qdev->lrg_buf_free_count >= 8) &&
   1875	    (qdev->lrg_buf_release_cnt >= 16)) {
   1876
   1877		if (qdev->lrg_buf_skb_check)
   1878			if (!ql_populate_free_queue(qdev))
   1879				return;
   1880
   1881		lrg_buf_q_ele = qdev->lrg_buf_next_free;
   1882
   1883		while ((qdev->lrg_buf_release_cnt >= 16) &&
   1884		       (qdev->lrg_buf_free_count >= 8)) {
   1885
   1886			for (i = 0; i < 8; i++) {
   1887				lrg_buf_cb =
   1888				    ql_get_from_lrg_buf_free_list(qdev);
   1889				lrg_buf_q_ele->addr_high =
   1890				    lrg_buf_cb->buf_phy_addr_high;
   1891				lrg_buf_q_ele->addr_low =
   1892				    lrg_buf_cb->buf_phy_addr_low;
   1893				lrg_buf_q_ele++;
   1894
   1895				qdev->lrg_buf_release_cnt--;
   1896			}
   1897
   1898			qdev->lrg_buf_q_producer_index++;
   1899
   1900			if (qdev->lrg_buf_q_producer_index ==
   1901			    qdev->num_lbufq_entries)
   1902				qdev->lrg_buf_q_producer_index = 0;
   1903
   1904			if (qdev->lrg_buf_q_producer_index ==
   1905			    (qdev->num_lbufq_entries - 1)) {
   1906				lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
   1907			}
   1908		}
   1909		wmb();
   1910		qdev->lrg_buf_next_free = lrg_buf_q_ele;
   1911		writel(qdev->lrg_buf_q_producer_index,
   1912			&port_regs->CommonRegs.rxLargeQProducerIndex);
   1913	}
   1914}
   1915
   1916static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
   1917				   struct ob_mac_iocb_rsp *mac_rsp)
   1918{
   1919	struct ql_tx_buf_cb *tx_cb;
   1920	int i;
   1921
   1922	if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
   1923		netdev_warn(qdev->ndev,
   1924			    "Frame too short but it was padded and sent\n");
   1925	}
   1926
   1927	tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
   1928
   1929	/*  Check the transmit response flags for any errors */
   1930	if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
   1931		netdev_err(qdev->ndev,
   1932			   "Frame too short to be legal, frame not sent\n");
   1933
   1934		qdev->ndev->stats.tx_errors++;
   1935		goto frame_not_sent;
   1936	}
   1937
   1938	if (tx_cb->seg_count == 0) {
   1939		netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
   1940			   mac_rsp->transaction_id);
   1941
   1942		qdev->ndev->stats.tx_errors++;
   1943		goto invalid_seg_count;
   1944	}
   1945
   1946	dma_unmap_single(&qdev->pdev->dev,
   1947			 dma_unmap_addr(&tx_cb->map[0], mapaddr),
   1948			 dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE);
   1949	tx_cb->seg_count--;
   1950	if (tx_cb->seg_count) {
   1951		for (i = 1; i < tx_cb->seg_count; i++) {
   1952			dma_unmap_page(&qdev->pdev->dev,
   1953				       dma_unmap_addr(&tx_cb->map[i], mapaddr),
   1954				       dma_unmap_len(&tx_cb->map[i], maplen),
   1955				       DMA_TO_DEVICE);
   1956		}
   1957	}
   1958	qdev->ndev->stats.tx_packets++;
   1959	qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
   1960
   1961frame_not_sent:
   1962	dev_kfree_skb_irq(tx_cb->skb);
   1963	tx_cb->skb = NULL;
   1964
   1965invalid_seg_count:
   1966	atomic_inc(&qdev->tx_count);
   1967}
   1968
   1969static void ql_get_sbuf(struct ql3_adapter *qdev)
   1970{
   1971	if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
   1972		qdev->small_buf_index = 0;
   1973	qdev->small_buf_release_cnt++;
   1974}
   1975
   1976static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
   1977{
   1978	struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
   1979	lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
   1980	qdev->lrg_buf_release_cnt++;
   1981	if (++qdev->lrg_buf_index == qdev->num_large_buffers)
   1982		qdev->lrg_buf_index = 0;
   1983	return lrg_buf_cb;
   1984}
   1985
   1986/*
   1987 * The difference between 3022 and 3032 for inbound completions:
   1988 * 3022 uses two buffers per completion.  The first buffer contains
   1989 * (some) header info, the second the remainder of the headers plus
   1990 * the data.  For this chip we reserve some space at the top of the
   1991 * receive buffer so that the header info in buffer one can be
   1992 * prepended to the buffer two.  Buffer two is the sent up while
   1993 * buffer one is returned to the hardware to be reused.
   1994 * 3032 receives all of it's data and headers in one buffer for a
   1995 * simpler process.  3032 also supports checksum verification as
   1996 * can be seen in ql_process_macip_rx_intr().
   1997 */
   1998static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
   1999				   struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
   2000{
   2001	struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
   2002	struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
   2003	struct sk_buff *skb;
   2004	u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
   2005
   2006	/*
   2007	 * Get the inbound address list (small buffer).
   2008	 */
   2009	ql_get_sbuf(qdev);
   2010
   2011	if (qdev->device_id == QL3022_DEVICE_ID)
   2012		lrg_buf_cb1 = ql_get_lbuf(qdev);
   2013
   2014	/* start of second buffer */
   2015	lrg_buf_cb2 = ql_get_lbuf(qdev);
   2016	skb = lrg_buf_cb2->skb;
   2017
   2018	qdev->ndev->stats.rx_packets++;
   2019	qdev->ndev->stats.rx_bytes += length;
   2020
   2021	skb_put(skb, length);
   2022	dma_unmap_single(&qdev->pdev->dev,
   2023			 dma_unmap_addr(lrg_buf_cb2, mapaddr),
   2024			 dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
   2025	prefetch(skb->data);
   2026	skb_checksum_none_assert(skb);
   2027	skb->protocol = eth_type_trans(skb, qdev->ndev);
   2028
   2029	napi_gro_receive(&qdev->napi, skb);
   2030	lrg_buf_cb2->skb = NULL;
   2031
   2032	if (qdev->device_id == QL3022_DEVICE_ID)
   2033		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
   2034	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
   2035}
   2036
   2037static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
   2038				     struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
   2039{
   2040	struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
   2041	struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
   2042	struct sk_buff *skb1 = NULL, *skb2;
   2043	struct net_device *ndev = qdev->ndev;
   2044	u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
   2045	u16 size = 0;
   2046
   2047	/*
   2048	 * Get the inbound address list (small buffer).
   2049	 */
   2050
   2051	ql_get_sbuf(qdev);
   2052
   2053	if (qdev->device_id == QL3022_DEVICE_ID) {
   2054		/* start of first buffer on 3022 */
   2055		lrg_buf_cb1 = ql_get_lbuf(qdev);
   2056		skb1 = lrg_buf_cb1->skb;
   2057		size = ETH_HLEN;
   2058		if (*((u16 *) skb1->data) != 0xFFFF)
   2059			size += VLAN_ETH_HLEN - ETH_HLEN;
   2060	}
   2061
   2062	/* start of second buffer */
   2063	lrg_buf_cb2 = ql_get_lbuf(qdev);
   2064	skb2 = lrg_buf_cb2->skb;
   2065
   2066	skb_put(skb2, length);	/* Just the second buffer length here. */
   2067	dma_unmap_single(&qdev->pdev->dev,
   2068			 dma_unmap_addr(lrg_buf_cb2, mapaddr),
   2069			 dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
   2070	prefetch(skb2->data);
   2071
   2072	skb_checksum_none_assert(skb2);
   2073	if (qdev->device_id == QL3022_DEVICE_ID) {
   2074		/*
   2075		 * Copy the ethhdr from first buffer to second. This
   2076		 * is necessary for 3022 IP completions.
   2077		 */
   2078		skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
   2079						 skb_push(skb2, size), size);
   2080	} else {
   2081		u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
   2082		if (checksum &
   2083			(IB_IP_IOCB_RSP_3032_ICE |
   2084			 IB_IP_IOCB_RSP_3032_CE)) {
   2085			netdev_err(ndev,
   2086				   "%s: Bad checksum for this %s packet, checksum = %x\n",
   2087				   __func__,
   2088				   ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
   2089				    "TCP" : "UDP"), checksum);
   2090		} else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
   2091				(checksum & IB_IP_IOCB_RSP_3032_UDP &&
   2092				!(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
   2093			skb2->ip_summed = CHECKSUM_UNNECESSARY;
   2094		}
   2095	}
   2096	skb2->protocol = eth_type_trans(skb2, qdev->ndev);
   2097
   2098	napi_gro_receive(&qdev->napi, skb2);
   2099	ndev->stats.rx_packets++;
   2100	ndev->stats.rx_bytes += length;
   2101	lrg_buf_cb2->skb = NULL;
   2102
   2103	if (qdev->device_id == QL3022_DEVICE_ID)
   2104		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
   2105	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
   2106}
   2107
   2108static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
   2109{
   2110	struct net_rsp_iocb *net_rsp;
   2111	struct net_device *ndev = qdev->ndev;
   2112	int work_done = 0;
   2113
   2114	/* While there are entries in the completion queue. */
   2115	while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
   2116		qdev->rsp_consumer_index) && (work_done < budget)) {
   2117
   2118		net_rsp = qdev->rsp_current;
   2119		rmb();
   2120		/*
   2121		 * Fix 4032 chip's undocumented "feature" where bit-8 is set
   2122		 * if the inbound completion is for a VLAN.
   2123		 */
   2124		if (qdev->device_id == QL3032_DEVICE_ID)
   2125			net_rsp->opcode &= 0x7f;
   2126		switch (net_rsp->opcode) {
   2127
   2128		case OPCODE_OB_MAC_IOCB_FN0:
   2129		case OPCODE_OB_MAC_IOCB_FN2:
   2130			ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
   2131					       net_rsp);
   2132			break;
   2133
   2134		case OPCODE_IB_MAC_IOCB:
   2135		case OPCODE_IB_3032_MAC_IOCB:
   2136			ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
   2137					       net_rsp);
   2138			work_done++;
   2139			break;
   2140
   2141		case OPCODE_IB_IP_IOCB:
   2142		case OPCODE_IB_3032_IP_IOCB:
   2143			ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
   2144						 net_rsp);
   2145			work_done++;
   2146			break;
   2147		default: {
   2148			u32 *tmp = (u32 *)net_rsp;
   2149			netdev_err(ndev,
   2150				   "Hit default case, not handled!\n"
   2151				   "	dropping the packet, opcode = %x\n"
   2152				   "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
   2153				   net_rsp->opcode,
   2154				   (unsigned long int)tmp[0],
   2155				   (unsigned long int)tmp[1],
   2156				   (unsigned long int)tmp[2],
   2157				   (unsigned long int)tmp[3]);
   2158		}
   2159		}
   2160
   2161		qdev->rsp_consumer_index++;
   2162
   2163		if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
   2164			qdev->rsp_consumer_index = 0;
   2165			qdev->rsp_current = qdev->rsp_q_virt_addr;
   2166		} else {
   2167			qdev->rsp_current++;
   2168		}
   2169
   2170	}
   2171
   2172	return work_done;
   2173}
   2174
   2175static int ql_poll(struct napi_struct *napi, int budget)
   2176{
   2177	struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
   2178	struct ql3xxx_port_registers __iomem *port_regs =
   2179		qdev->mem_map_registers;
   2180	int work_done;
   2181
   2182	work_done = ql_tx_rx_clean(qdev, budget);
   2183
   2184	if (work_done < budget && napi_complete_done(napi, work_done)) {
   2185		unsigned long flags;
   2186
   2187		spin_lock_irqsave(&qdev->hw_lock, flags);
   2188		ql_update_small_bufq_prod_index(qdev);
   2189		ql_update_lrg_bufq_prod_index(qdev);
   2190		writel(qdev->rsp_consumer_index,
   2191			    &port_regs->CommonRegs.rspQConsumerIndex);
   2192		spin_unlock_irqrestore(&qdev->hw_lock, flags);
   2193
   2194		ql_enable_interrupts(qdev);
   2195	}
   2196	return work_done;
   2197}
   2198
   2199static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
   2200{
   2201
   2202	struct net_device *ndev = dev_id;
   2203	struct ql3_adapter *qdev = netdev_priv(ndev);
   2204	struct ql3xxx_port_registers __iomem *port_regs =
   2205		qdev->mem_map_registers;
   2206	u32 value;
   2207	int handled = 1;
   2208	u32 var;
   2209
   2210	value = ql_read_common_reg_l(qdev,
   2211				     &port_regs->CommonRegs.ispControlStatus);
   2212
   2213	if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
   2214		spin_lock(&qdev->adapter_lock);
   2215		netif_stop_queue(qdev->ndev);
   2216		netif_carrier_off(qdev->ndev);
   2217		ql_disable_interrupts(qdev);
   2218		qdev->port_link_state = LS_DOWN;
   2219		set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
   2220
   2221		if (value & ISP_CONTROL_FE) {
   2222			/*
   2223			 * Chip Fatal Error.
   2224			 */
   2225			var =
   2226			    ql_read_page0_reg_l(qdev,
   2227					      &port_regs->PortFatalErrStatus);
   2228			netdev_warn(ndev,
   2229				    "Resetting chip. PortFatalErrStatus register = 0x%x\n",
   2230				    var);
   2231			set_bit(QL_RESET_START, &qdev->flags) ;
   2232		} else {
   2233			/*
   2234			 * Soft Reset Requested.
   2235			 */
   2236			set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
   2237			netdev_err(ndev,
   2238				   "Another function issued a reset to the chip. ISR value = %x\n",
   2239				   value);
   2240		}
   2241		queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
   2242		spin_unlock(&qdev->adapter_lock);
   2243	} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
   2244		ql_disable_interrupts(qdev);
   2245		if (likely(napi_schedule_prep(&qdev->napi)))
   2246			__napi_schedule(&qdev->napi);
   2247	} else
   2248		return IRQ_NONE;
   2249
   2250	return IRQ_RETVAL(handled);
   2251}
   2252
   2253/*
   2254 * Get the total number of segments needed for the given number of fragments.
   2255 * This is necessary because outbound address lists (OAL) will be used when
   2256 * more than two frags are given.  Each address list has 5 addr/len pairs.
   2257 * The 5th pair in each OAL is used to  point to the next OAL if more frags
   2258 * are coming.  That is why the frags:segment count ratio is not linear.
   2259 */
   2260static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
   2261{
   2262	if (qdev->device_id == QL3022_DEVICE_ID)
   2263		return 1;
   2264
   2265	if (frags <= 2)
   2266		return frags + 1;
   2267	else if (frags <= 6)
   2268		return frags + 2;
   2269	else if (frags <= 10)
   2270		return frags + 3;
   2271	else if (frags <= 14)
   2272		return frags + 4;
   2273	else if (frags <= 18)
   2274		return frags + 5;
   2275	return -1;
   2276}
   2277
   2278static void ql_hw_csum_setup(const struct sk_buff *skb,
   2279			     struct ob_mac_iocb_req *mac_iocb_ptr)
   2280{
   2281	const struct iphdr *ip = ip_hdr(skb);
   2282
   2283	mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
   2284	mac_iocb_ptr->ip_hdr_len = ip->ihl;
   2285
   2286	if (ip->protocol == IPPROTO_TCP) {
   2287		mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
   2288			OB_3032MAC_IOCB_REQ_IC;
   2289	} else {
   2290		mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
   2291			OB_3032MAC_IOCB_REQ_IC;
   2292	}
   2293
   2294}
   2295
   2296/*
   2297 * Map the buffers for this transmit.
   2298 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
   2299 */
   2300static int ql_send_map(struct ql3_adapter *qdev,
   2301				struct ob_mac_iocb_req *mac_iocb_ptr,
   2302				struct ql_tx_buf_cb *tx_cb,
   2303				struct sk_buff *skb)
   2304{
   2305	struct oal *oal;
   2306	struct oal_entry *oal_entry;
   2307	int len = skb_headlen(skb);
   2308	dma_addr_t map;
   2309	int err;
   2310	int completed_segs, i;
   2311	int seg_cnt, seg = 0;
   2312	int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
   2313
   2314	seg_cnt = tx_cb->seg_count;
   2315	/*
   2316	 * Map the skb buffer first.
   2317	 */
   2318	map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
   2319
   2320	err = dma_mapping_error(&qdev->pdev->dev, map);
   2321	if (err) {
   2322		netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
   2323			   err);
   2324
   2325		return NETDEV_TX_BUSY;
   2326	}
   2327
   2328	oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
   2329	oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
   2330	oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
   2331	oal_entry->len = cpu_to_le32(len);
   2332	dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
   2333	dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
   2334	seg++;
   2335
   2336	if (seg_cnt == 1) {
   2337		/* Terminate the last segment. */
   2338		oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
   2339		return NETDEV_TX_OK;
   2340	}
   2341	oal = tx_cb->oal;
   2342	for (completed_segs = 0;
   2343	     completed_segs < frag_cnt;
   2344	     completed_segs++, seg++) {
   2345		skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
   2346		oal_entry++;
   2347		/*
   2348		 * Check for continuation requirements.
   2349		 * It's strange but necessary.
   2350		 * Continuation entry points to outbound address list.
   2351		 */
   2352		if ((seg == 2 && seg_cnt > 3) ||
   2353		    (seg == 7 && seg_cnt > 8) ||
   2354		    (seg == 12 && seg_cnt > 13) ||
   2355		    (seg == 17 && seg_cnt > 18)) {
   2356			map = dma_map_single(&qdev->pdev->dev, oal,
   2357					     sizeof(struct oal),
   2358					     DMA_TO_DEVICE);
   2359
   2360			err = dma_mapping_error(&qdev->pdev->dev, map);
   2361			if (err) {
   2362				netdev_err(qdev->ndev,
   2363					   "PCI mapping outbound address list with error: %d\n",
   2364					   err);
   2365				goto map_error;
   2366			}
   2367
   2368			oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
   2369			oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
   2370			oal_entry->len = cpu_to_le32(sizeof(struct oal) |
   2371						     OAL_CONT_ENTRY);
   2372			dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
   2373			dma_unmap_len_set(&tx_cb->map[seg], maplen,
   2374					  sizeof(struct oal));
   2375			oal_entry = (struct oal_entry *)oal;
   2376			oal++;
   2377			seg++;
   2378		}
   2379
   2380		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
   2381				       DMA_TO_DEVICE);
   2382
   2383		err = dma_mapping_error(&qdev->pdev->dev, map);
   2384		if (err) {
   2385			netdev_err(qdev->ndev,
   2386				   "PCI mapping frags failed with error: %d\n",
   2387				   err);
   2388			goto map_error;
   2389		}
   2390
   2391		oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
   2392		oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
   2393		oal_entry->len = cpu_to_le32(skb_frag_size(frag));
   2394		dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
   2395		dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
   2396		}
   2397	/* Terminate the last segment. */
   2398	oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
   2399	return NETDEV_TX_OK;
   2400
   2401map_error:
   2402	/* A PCI mapping failed and now we will need to back out
   2403	 * We need to traverse through the oal's and associated pages which
   2404	 * have been mapped and now we must unmap them to clean up properly
   2405	 */
   2406
   2407	seg = 1;
   2408	oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
   2409	oal = tx_cb->oal;
   2410	for (i = 0; i < completed_segs; i++, seg++) {
   2411		oal_entry++;
   2412
   2413		/*
   2414		 * Check for continuation requirements.
   2415		 * It's strange but necessary.
   2416		 */
   2417
   2418		if ((seg == 2 && seg_cnt > 3) ||
   2419		    (seg == 7 && seg_cnt > 8) ||
   2420		    (seg == 12 && seg_cnt > 13) ||
   2421		    (seg == 17 && seg_cnt > 18)) {
   2422			dma_unmap_single(&qdev->pdev->dev,
   2423					 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
   2424					 dma_unmap_len(&tx_cb->map[seg], maplen),
   2425					 DMA_TO_DEVICE);
   2426			oal++;
   2427			seg++;
   2428		}
   2429
   2430		dma_unmap_page(&qdev->pdev->dev,
   2431			       dma_unmap_addr(&tx_cb->map[seg], mapaddr),
   2432			       dma_unmap_len(&tx_cb->map[seg], maplen),
   2433			       DMA_TO_DEVICE);
   2434	}
   2435
   2436	dma_unmap_single(&qdev->pdev->dev,
   2437			 dma_unmap_addr(&tx_cb->map[0], mapaddr),
   2438			 dma_unmap_addr(&tx_cb->map[0], maplen),
   2439			 DMA_TO_DEVICE);
   2440
   2441	return NETDEV_TX_BUSY;
   2442
   2443}
   2444
   2445/*
   2446 * The difference between 3022 and 3032 sends:
   2447 * 3022 only supports a simple single segment transmission.
   2448 * 3032 supports checksumming and scatter/gather lists (fragments).
   2449 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
   2450 * in the IOCB plus a chain of outbound address lists (OAL) that
   2451 * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th)
   2452 * will be used to point to an OAL when more ALP entries are required.
   2453 * The IOCB is always the top of the chain followed by one or more
   2454 * OALs (when necessary).
   2455 */
   2456static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
   2457			       struct net_device *ndev)
   2458{
   2459	struct ql3_adapter *qdev = netdev_priv(ndev);
   2460	struct ql3xxx_port_registers __iomem *port_regs =
   2461			qdev->mem_map_registers;
   2462	struct ql_tx_buf_cb *tx_cb;
   2463	u32 tot_len = skb->len;
   2464	struct ob_mac_iocb_req *mac_iocb_ptr;
   2465
   2466	if (unlikely(atomic_read(&qdev->tx_count) < 2))
   2467		return NETDEV_TX_BUSY;
   2468
   2469	tx_cb = &qdev->tx_buf[qdev->req_producer_index];
   2470	tx_cb->seg_count = ql_get_seg_count(qdev,
   2471					     skb_shinfo(skb)->nr_frags);
   2472	if (tx_cb->seg_count == -1) {
   2473		netdev_err(ndev, "%s: invalid segment count!\n", __func__);
   2474		return NETDEV_TX_OK;
   2475	}
   2476
   2477	mac_iocb_ptr = tx_cb->queue_entry;
   2478	memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
   2479	mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
   2480	mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
   2481	mac_iocb_ptr->flags |= qdev->mb_bit_mask;
   2482	mac_iocb_ptr->transaction_id = qdev->req_producer_index;
   2483	mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
   2484	tx_cb->skb = skb;
   2485	if (qdev->device_id == QL3032_DEVICE_ID &&
   2486	    skb->ip_summed == CHECKSUM_PARTIAL)
   2487		ql_hw_csum_setup(skb, mac_iocb_ptr);
   2488
   2489	if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
   2490		netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
   2491		return NETDEV_TX_BUSY;
   2492	}
   2493
   2494	wmb();
   2495	qdev->req_producer_index++;
   2496	if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
   2497		qdev->req_producer_index = 0;
   2498	wmb();
   2499	ql_write_common_reg_l(qdev,
   2500			    &port_regs->CommonRegs.reqQProducerIndex,
   2501			    qdev->req_producer_index);
   2502
   2503	netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
   2504		     "tx queued, slot %d, len %d\n",
   2505		     qdev->req_producer_index, skb->len);
   2506
   2507	atomic_dec(&qdev->tx_count);
   2508	return NETDEV_TX_OK;
   2509}
   2510
   2511static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
   2512{
   2513	qdev->req_q_size =
   2514	    (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
   2515
   2516	qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
   2517
   2518	/* The barrier is required to ensure request and response queue
   2519	 * addr writes to the registers.
   2520	 */
   2521	wmb();
   2522
   2523	qdev->req_q_virt_addr =
   2524	    dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
   2525			       &qdev->req_q_phy_addr, GFP_KERNEL);
   2526
   2527	if ((qdev->req_q_virt_addr == NULL) ||
   2528	    LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
   2529		netdev_err(qdev->ndev, "reqQ failed\n");
   2530		return -ENOMEM;
   2531	}
   2532
   2533	qdev->rsp_q_virt_addr =
   2534	    dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size,
   2535			       &qdev->rsp_q_phy_addr, GFP_KERNEL);
   2536
   2537	if ((qdev->rsp_q_virt_addr == NULL) ||
   2538	    LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
   2539		netdev_err(qdev->ndev, "rspQ allocation failed\n");
   2540		dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
   2541				  qdev->req_q_virt_addr, qdev->req_q_phy_addr);
   2542		return -ENOMEM;
   2543	}
   2544
   2545	set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
   2546
   2547	return 0;
   2548}
   2549
   2550static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
   2551{
   2552	if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
   2553		netdev_info(qdev->ndev, "Already done\n");
   2554		return;
   2555	}
   2556
   2557	dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size,
   2558			  qdev->req_q_virt_addr, qdev->req_q_phy_addr);
   2559
   2560	qdev->req_q_virt_addr = NULL;
   2561
   2562	dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size,
   2563			  qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
   2564
   2565	qdev->rsp_q_virt_addr = NULL;
   2566
   2567	clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
   2568}
   2569
   2570static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
   2571{
   2572	/* Create Large Buffer Queue */
   2573	qdev->lrg_buf_q_size =
   2574		qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
   2575	if (qdev->lrg_buf_q_size < PAGE_SIZE)
   2576		qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
   2577	else
   2578		qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
   2579
   2580	qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
   2581				      sizeof(struct ql_rcv_buf_cb),
   2582				      GFP_KERNEL);
   2583	if (qdev->lrg_buf == NULL)
   2584		return -ENOMEM;
   2585
   2586	qdev->lrg_buf_q_alloc_virt_addr =
   2587		dma_alloc_coherent(&qdev->pdev->dev,
   2588				   qdev->lrg_buf_q_alloc_size,
   2589				   &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL);
   2590
   2591	if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
   2592		netdev_err(qdev->ndev, "lBufQ failed\n");
   2593		return -ENOMEM;
   2594	}
   2595	qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
   2596	qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
   2597
   2598	/* Create Small Buffer Queue */
   2599	qdev->small_buf_q_size =
   2600		NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
   2601	if (qdev->small_buf_q_size < PAGE_SIZE)
   2602		qdev->small_buf_q_alloc_size = PAGE_SIZE;
   2603	else
   2604		qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
   2605
   2606	qdev->small_buf_q_alloc_virt_addr =
   2607		dma_alloc_coherent(&qdev->pdev->dev,
   2608				   qdev->small_buf_q_alloc_size,
   2609				   &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL);
   2610
   2611	if (qdev->small_buf_q_alloc_virt_addr == NULL) {
   2612		netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
   2613		dma_free_coherent(&qdev->pdev->dev,
   2614				  qdev->lrg_buf_q_alloc_size,
   2615				  qdev->lrg_buf_q_alloc_virt_addr,
   2616				  qdev->lrg_buf_q_alloc_phy_addr);
   2617		return -ENOMEM;
   2618	}
   2619
   2620	qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
   2621	qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
   2622	set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
   2623	return 0;
   2624}
   2625
   2626static void ql_free_buffer_queues(struct ql3_adapter *qdev)
   2627{
   2628	if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
   2629		netdev_info(qdev->ndev, "Already done\n");
   2630		return;
   2631	}
   2632	kfree(qdev->lrg_buf);
   2633	dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size,
   2634			  qdev->lrg_buf_q_alloc_virt_addr,
   2635			  qdev->lrg_buf_q_alloc_phy_addr);
   2636
   2637	qdev->lrg_buf_q_virt_addr = NULL;
   2638
   2639	dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size,
   2640			  qdev->small_buf_q_alloc_virt_addr,
   2641			  qdev->small_buf_q_alloc_phy_addr);
   2642
   2643	qdev->small_buf_q_virt_addr = NULL;
   2644
   2645	clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
   2646}
   2647
   2648static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
   2649{
   2650	int i;
   2651	struct bufq_addr_element *small_buf_q_entry;
   2652
   2653	/* Currently we allocate on one of memory and use it for smallbuffers */
   2654	qdev->small_buf_total_size =
   2655		(QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
   2656		 QL_SMALL_BUFFER_SIZE);
   2657
   2658	qdev->small_buf_virt_addr =
   2659		dma_alloc_coherent(&qdev->pdev->dev,
   2660				   qdev->small_buf_total_size,
   2661				   &qdev->small_buf_phy_addr, GFP_KERNEL);
   2662
   2663	if (qdev->small_buf_virt_addr == NULL) {
   2664		netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
   2665		return -ENOMEM;
   2666	}
   2667
   2668	qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
   2669	qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
   2670
   2671	small_buf_q_entry = qdev->small_buf_q_virt_addr;
   2672
   2673	/* Initialize the small buffer queue. */
   2674	for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
   2675		small_buf_q_entry->addr_high =
   2676		    cpu_to_le32(qdev->small_buf_phy_addr_high);
   2677		small_buf_q_entry->addr_low =
   2678		    cpu_to_le32(qdev->small_buf_phy_addr_low +
   2679				(i * QL_SMALL_BUFFER_SIZE));
   2680		small_buf_q_entry++;
   2681	}
   2682	qdev->small_buf_index = 0;
   2683	set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
   2684	return 0;
   2685}
   2686
   2687static void ql_free_small_buffers(struct ql3_adapter *qdev)
   2688{
   2689	if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
   2690		netdev_info(qdev->ndev, "Already done\n");
   2691		return;
   2692	}
   2693	if (qdev->small_buf_virt_addr != NULL) {
   2694		dma_free_coherent(&qdev->pdev->dev,
   2695				  qdev->small_buf_total_size,
   2696				  qdev->small_buf_virt_addr,
   2697				  qdev->small_buf_phy_addr);
   2698
   2699		qdev->small_buf_virt_addr = NULL;
   2700	}
   2701}
   2702
   2703static void ql_free_large_buffers(struct ql3_adapter *qdev)
   2704{
   2705	int i = 0;
   2706	struct ql_rcv_buf_cb *lrg_buf_cb;
   2707
   2708	for (i = 0; i < qdev->num_large_buffers; i++) {
   2709		lrg_buf_cb = &qdev->lrg_buf[i];
   2710		if (lrg_buf_cb->skb) {
   2711			dev_kfree_skb(lrg_buf_cb->skb);
   2712			dma_unmap_single(&qdev->pdev->dev,
   2713					 dma_unmap_addr(lrg_buf_cb, mapaddr),
   2714					 dma_unmap_len(lrg_buf_cb, maplen),
   2715					 DMA_FROM_DEVICE);
   2716			memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
   2717		} else {
   2718			break;
   2719		}
   2720	}
   2721}
   2722
   2723static void ql_init_large_buffers(struct ql3_adapter *qdev)
   2724{
   2725	int i;
   2726	struct ql_rcv_buf_cb *lrg_buf_cb;
   2727	struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
   2728
   2729	for (i = 0; i < qdev->num_large_buffers; i++) {
   2730		lrg_buf_cb = &qdev->lrg_buf[i];
   2731		buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
   2732		buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
   2733		buf_addr_ele++;
   2734	}
   2735	qdev->lrg_buf_index = 0;
   2736	qdev->lrg_buf_skb_check = 0;
   2737}
   2738
   2739static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
   2740{
   2741	int i;
   2742	struct ql_rcv_buf_cb *lrg_buf_cb;
   2743	struct sk_buff *skb;
   2744	dma_addr_t map;
   2745	int err;
   2746
   2747	for (i = 0; i < qdev->num_large_buffers; i++) {
   2748		lrg_buf_cb = &qdev->lrg_buf[i];
   2749		memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
   2750
   2751		skb = netdev_alloc_skb(qdev->ndev,
   2752				       qdev->lrg_buffer_len);
   2753		if (unlikely(!skb)) {
   2754			/* Better luck next round */
   2755			netdev_err(qdev->ndev,
   2756				   "large buff alloc failed for %d bytes at index %d\n",
   2757				   qdev->lrg_buffer_len * 2, i);
   2758			ql_free_large_buffers(qdev);
   2759			return -ENOMEM;
   2760		} else {
   2761			lrg_buf_cb->index = i;
   2762			/*
   2763			 * We save some space to copy the ethhdr from first
   2764			 * buffer
   2765			 */
   2766			skb_reserve(skb, QL_HEADER_SPACE);
   2767			map = dma_map_single(&qdev->pdev->dev, skb->data,
   2768					     qdev->lrg_buffer_len - QL_HEADER_SPACE,
   2769					     DMA_FROM_DEVICE);
   2770
   2771			err = dma_mapping_error(&qdev->pdev->dev, map);
   2772			if (err) {
   2773				netdev_err(qdev->ndev,
   2774					   "PCI mapping failed with error: %d\n",
   2775					   err);
   2776				dev_kfree_skb_irq(skb);
   2777				ql_free_large_buffers(qdev);
   2778				return -ENOMEM;
   2779			}
   2780
   2781			lrg_buf_cb->skb = skb;
   2782			dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
   2783			dma_unmap_len_set(lrg_buf_cb, maplen,
   2784					  qdev->lrg_buffer_len -
   2785					  QL_HEADER_SPACE);
   2786			lrg_buf_cb->buf_phy_addr_low =
   2787			    cpu_to_le32(LS_64BITS(map));
   2788			lrg_buf_cb->buf_phy_addr_high =
   2789			    cpu_to_le32(MS_64BITS(map));
   2790		}
   2791	}
   2792	return 0;
   2793}
   2794
   2795static void ql_free_send_free_list(struct ql3_adapter *qdev)
   2796{
   2797	struct ql_tx_buf_cb *tx_cb;
   2798	int i;
   2799
   2800	tx_cb = &qdev->tx_buf[0];
   2801	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
   2802		kfree(tx_cb->oal);
   2803		tx_cb->oal = NULL;
   2804		tx_cb++;
   2805	}
   2806}
   2807
   2808static int ql_create_send_free_list(struct ql3_adapter *qdev)
   2809{
   2810	struct ql_tx_buf_cb *tx_cb;
   2811	int i;
   2812	struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
   2813
   2814	/* Create free list of transmit buffers */
   2815	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
   2816
   2817		tx_cb = &qdev->tx_buf[i];
   2818		tx_cb->skb = NULL;
   2819		tx_cb->queue_entry = req_q_curr;
   2820		req_q_curr++;
   2821		tx_cb->oal = kmalloc(512, GFP_KERNEL);
   2822		if (tx_cb->oal == NULL)
   2823			return -ENOMEM;
   2824	}
   2825	return 0;
   2826}
   2827
   2828static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
   2829{
   2830	if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
   2831		qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
   2832		qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
   2833	} else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
   2834		/*
   2835		 * Bigger buffers, so less of them.
   2836		 */
   2837		qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
   2838		qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
   2839	} else {
   2840		netdev_err(qdev->ndev, "Invalid mtu size: %d.  Only %d and %d are accepted.\n",
   2841			   qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
   2842		return -ENOMEM;
   2843	}
   2844	qdev->num_large_buffers =
   2845		qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
   2846	qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
   2847	qdev->max_frame_size =
   2848		(qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
   2849
   2850	/*
   2851	 * First allocate a page of shared memory and use it for shadow
   2852	 * locations of Network Request Queue Consumer Address Register and
   2853	 * Network Completion Queue Producer Index Register
   2854	 */
   2855	qdev->shadow_reg_virt_addr =
   2856		dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
   2857				   &qdev->shadow_reg_phy_addr, GFP_KERNEL);
   2858
   2859	if (qdev->shadow_reg_virt_addr != NULL) {
   2860		qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
   2861		qdev->req_consumer_index_phy_addr_high =
   2862			MS_64BITS(qdev->shadow_reg_phy_addr);
   2863		qdev->req_consumer_index_phy_addr_low =
   2864			LS_64BITS(qdev->shadow_reg_phy_addr);
   2865
   2866		qdev->prsp_producer_index =
   2867			(__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
   2868		qdev->rsp_producer_index_phy_addr_high =
   2869			qdev->req_consumer_index_phy_addr_high;
   2870		qdev->rsp_producer_index_phy_addr_low =
   2871			qdev->req_consumer_index_phy_addr_low + 8;
   2872	} else {
   2873		netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
   2874		return -ENOMEM;
   2875	}
   2876
   2877	if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
   2878		netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
   2879		goto err_req_rsp;
   2880	}
   2881
   2882	if (ql_alloc_buffer_queues(qdev) != 0) {
   2883		netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
   2884		goto err_buffer_queues;
   2885	}
   2886
   2887	if (ql_alloc_small_buffers(qdev) != 0) {
   2888		netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
   2889		goto err_small_buffers;
   2890	}
   2891
   2892	if (ql_alloc_large_buffers(qdev) != 0) {
   2893		netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
   2894		goto err_small_buffers;
   2895	}
   2896
   2897	/* Initialize the large buffer queue. */
   2898	ql_init_large_buffers(qdev);
   2899	if (ql_create_send_free_list(qdev))
   2900		goto err_free_list;
   2901
   2902	qdev->rsp_current = qdev->rsp_q_virt_addr;
   2903
   2904	return 0;
   2905err_free_list:
   2906	ql_free_send_free_list(qdev);
   2907err_small_buffers:
   2908	ql_free_buffer_queues(qdev);
   2909err_buffer_queues:
   2910	ql_free_net_req_rsp_queues(qdev);
   2911err_req_rsp:
   2912	dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
   2913			  qdev->shadow_reg_virt_addr,
   2914			  qdev->shadow_reg_phy_addr);
   2915
   2916	return -ENOMEM;
   2917}
   2918
   2919static void ql_free_mem_resources(struct ql3_adapter *qdev)
   2920{
   2921	ql_free_send_free_list(qdev);
   2922	ql_free_large_buffers(qdev);
   2923	ql_free_small_buffers(qdev);
   2924	ql_free_buffer_queues(qdev);
   2925	ql_free_net_req_rsp_queues(qdev);
   2926	if (qdev->shadow_reg_virt_addr != NULL) {
   2927		dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
   2928				  qdev->shadow_reg_virt_addr,
   2929				  qdev->shadow_reg_phy_addr);
   2930		qdev->shadow_reg_virt_addr = NULL;
   2931	}
   2932}
   2933
   2934static int ql_init_misc_registers(struct ql3_adapter *qdev)
   2935{
   2936	struct ql3xxx_local_ram_registers __iomem *local_ram =
   2937	    (void __iomem *)qdev->mem_map_registers;
   2938
   2939	if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
   2940			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
   2941			 2) << 4))
   2942		return -1;
   2943
   2944	ql_write_page2_reg(qdev,
   2945			   &local_ram->bufletSize, qdev->nvram_data.bufletSize);
   2946
   2947	ql_write_page2_reg(qdev,
   2948			   &local_ram->maxBufletCount,
   2949			   qdev->nvram_data.bufletCount);
   2950
   2951	ql_write_page2_reg(qdev,
   2952			   &local_ram->freeBufletThresholdLow,
   2953			   (qdev->nvram_data.tcpWindowThreshold25 << 16) |
   2954			   (qdev->nvram_data.tcpWindowThreshold0));
   2955
   2956	ql_write_page2_reg(qdev,
   2957			   &local_ram->freeBufletThresholdHigh,
   2958			   qdev->nvram_data.tcpWindowThreshold50);
   2959
   2960	ql_write_page2_reg(qdev,
   2961			   &local_ram->ipHashTableBase,
   2962			   (qdev->nvram_data.ipHashTableBaseHi << 16) |
   2963			   qdev->nvram_data.ipHashTableBaseLo);
   2964	ql_write_page2_reg(qdev,
   2965			   &local_ram->ipHashTableCount,
   2966			   qdev->nvram_data.ipHashTableSize);
   2967	ql_write_page2_reg(qdev,
   2968			   &local_ram->tcpHashTableBase,
   2969			   (qdev->nvram_data.tcpHashTableBaseHi << 16) |
   2970			   qdev->nvram_data.tcpHashTableBaseLo);
   2971	ql_write_page2_reg(qdev,
   2972			   &local_ram->tcpHashTableCount,
   2973			   qdev->nvram_data.tcpHashTableSize);
   2974	ql_write_page2_reg(qdev,
   2975			   &local_ram->ncbBase,
   2976			   (qdev->nvram_data.ncbTableBaseHi << 16) |
   2977			   qdev->nvram_data.ncbTableBaseLo);
   2978	ql_write_page2_reg(qdev,
   2979			   &local_ram->maxNcbCount,
   2980			   qdev->nvram_data.ncbTableSize);
   2981	ql_write_page2_reg(qdev,
   2982			   &local_ram->drbBase,
   2983			   (qdev->nvram_data.drbTableBaseHi << 16) |
   2984			   qdev->nvram_data.drbTableBaseLo);
   2985	ql_write_page2_reg(qdev,
   2986			   &local_ram->maxDrbCount,
   2987			   qdev->nvram_data.drbTableSize);
   2988	ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
   2989	return 0;
   2990}
   2991
   2992static int ql_adapter_initialize(struct ql3_adapter *qdev)
   2993{
   2994	u32 value;
   2995	struct ql3xxx_port_registers __iomem *port_regs =
   2996		qdev->mem_map_registers;
   2997	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
   2998	struct ql3xxx_host_memory_registers __iomem *hmem_regs =
   2999		(void __iomem *)port_regs;
   3000	u32 delay = 10;
   3001	int status = 0;
   3002
   3003	if (ql_mii_setup(qdev))
   3004		return -1;
   3005
   3006	/* Bring out PHY out of reset */
   3007	ql_write_common_reg(qdev, spir,
   3008			    (ISP_SERIAL_PORT_IF_WE |
   3009			     (ISP_SERIAL_PORT_IF_WE << 16)));
   3010	/* Give the PHY time to come out of reset. */
   3011	mdelay(100);
   3012	qdev->port_link_state = LS_DOWN;
   3013	netif_carrier_off(qdev->ndev);
   3014
   3015	/* V2 chip fix for ARS-39168. */
   3016	ql_write_common_reg(qdev, spir,
   3017			    (ISP_SERIAL_PORT_IF_SDE |
   3018			     (ISP_SERIAL_PORT_IF_SDE << 16)));
   3019
   3020	/* Request Queue Registers */
   3021	*((u32 *)(qdev->preq_consumer_index)) = 0;
   3022	atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
   3023	qdev->req_producer_index = 0;
   3024
   3025	ql_write_page1_reg(qdev,
   3026			   &hmem_regs->reqConsumerIndexAddrHigh,
   3027			   qdev->req_consumer_index_phy_addr_high);
   3028	ql_write_page1_reg(qdev,
   3029			   &hmem_regs->reqConsumerIndexAddrLow,
   3030			   qdev->req_consumer_index_phy_addr_low);
   3031
   3032	ql_write_page1_reg(qdev,
   3033			   &hmem_regs->reqBaseAddrHigh,
   3034			   MS_64BITS(qdev->req_q_phy_addr));
   3035	ql_write_page1_reg(qdev,
   3036			   &hmem_regs->reqBaseAddrLow,
   3037			   LS_64BITS(qdev->req_q_phy_addr));
   3038	ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
   3039
   3040	/* Response Queue Registers */
   3041	*((__le16 *) (qdev->prsp_producer_index)) = 0;
   3042	qdev->rsp_consumer_index = 0;
   3043	qdev->rsp_current = qdev->rsp_q_virt_addr;
   3044
   3045	ql_write_page1_reg(qdev,
   3046			   &hmem_regs->rspProducerIndexAddrHigh,
   3047			   qdev->rsp_producer_index_phy_addr_high);
   3048
   3049	ql_write_page1_reg(qdev,
   3050			   &hmem_regs->rspProducerIndexAddrLow,
   3051			   qdev->rsp_producer_index_phy_addr_low);
   3052
   3053	ql_write_page1_reg(qdev,
   3054			   &hmem_regs->rspBaseAddrHigh,
   3055			   MS_64BITS(qdev->rsp_q_phy_addr));
   3056
   3057	ql_write_page1_reg(qdev,
   3058			   &hmem_regs->rspBaseAddrLow,
   3059			   LS_64BITS(qdev->rsp_q_phy_addr));
   3060
   3061	ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
   3062
   3063	/* Large Buffer Queue */
   3064	ql_write_page1_reg(qdev,
   3065			   &hmem_regs->rxLargeQBaseAddrHigh,
   3066			   MS_64BITS(qdev->lrg_buf_q_phy_addr));
   3067
   3068	ql_write_page1_reg(qdev,
   3069			   &hmem_regs->rxLargeQBaseAddrLow,
   3070			   LS_64BITS(qdev->lrg_buf_q_phy_addr));
   3071
   3072	ql_write_page1_reg(qdev,
   3073			   &hmem_regs->rxLargeQLength,
   3074			   qdev->num_lbufq_entries);
   3075
   3076	ql_write_page1_reg(qdev,
   3077			   &hmem_regs->rxLargeBufferLength,
   3078			   qdev->lrg_buffer_len);
   3079
   3080	/* Small Buffer Queue */
   3081	ql_write_page1_reg(qdev,
   3082			   &hmem_regs->rxSmallQBaseAddrHigh,
   3083			   MS_64BITS(qdev->small_buf_q_phy_addr));
   3084
   3085	ql_write_page1_reg(qdev,
   3086			   &hmem_regs->rxSmallQBaseAddrLow,
   3087			   LS_64BITS(qdev->small_buf_q_phy_addr));
   3088
   3089	ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
   3090	ql_write_page1_reg(qdev,
   3091			   &hmem_regs->rxSmallBufferLength,
   3092			   QL_SMALL_BUFFER_SIZE);
   3093
   3094	qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
   3095	qdev->small_buf_release_cnt = 8;
   3096	qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
   3097	qdev->lrg_buf_release_cnt = 8;
   3098	qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
   3099	qdev->small_buf_index = 0;
   3100	qdev->lrg_buf_index = 0;
   3101	qdev->lrg_buf_free_count = 0;
   3102	qdev->lrg_buf_free_head = NULL;
   3103	qdev->lrg_buf_free_tail = NULL;
   3104
   3105	ql_write_common_reg(qdev,
   3106			    &port_regs->CommonRegs.
   3107			    rxSmallQProducerIndex,
   3108			    qdev->small_buf_q_producer_index);
   3109	ql_write_common_reg(qdev,
   3110			    &port_regs->CommonRegs.
   3111			    rxLargeQProducerIndex,
   3112			    qdev->lrg_buf_q_producer_index);
   3113
   3114	/*
   3115	 * Find out if the chip has already been initialized.  If it has, then
   3116	 * we skip some of the initialization.
   3117	 */
   3118	clear_bit(QL_LINK_MASTER, &qdev->flags);
   3119	value = ql_read_page0_reg(qdev, &port_regs->portStatus);
   3120	if ((value & PORT_STATUS_IC) == 0) {
   3121
   3122		/* Chip has not been configured yet, so let it rip. */
   3123		if (ql_init_misc_registers(qdev)) {
   3124			status = -1;
   3125			goto out;
   3126		}
   3127
   3128		value = qdev->nvram_data.tcpMaxWindowSize;
   3129		ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
   3130
   3131		value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
   3132
   3133		if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
   3134				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
   3135				 * 2) << 13)) {
   3136			status = -1;
   3137			goto out;
   3138		}
   3139		ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
   3140		ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
   3141				   (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
   3142				     16) | (INTERNAL_CHIP_SD |
   3143					    INTERNAL_CHIP_WE)));
   3144		ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
   3145	}
   3146
   3147	if (qdev->mac_index)
   3148		ql_write_page0_reg(qdev,
   3149				   &port_regs->mac1MaxFrameLengthReg,
   3150				   qdev->max_frame_size);
   3151	else
   3152		ql_write_page0_reg(qdev,
   3153					   &port_regs->mac0MaxFrameLengthReg,
   3154					   qdev->max_frame_size);
   3155
   3156	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
   3157			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
   3158			 2) << 7)) {
   3159		status = -1;
   3160		goto out;
   3161	}
   3162
   3163	PHY_Setup(qdev);
   3164	ql_init_scan_mode(qdev);
   3165	ql_get_phy_owner(qdev);
   3166
   3167	/* Load the MAC Configuration */
   3168
   3169	/* Program lower 32 bits of the MAC address */
   3170	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
   3171			   (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
   3172	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
   3173			   ((qdev->ndev->dev_addr[2] << 24)
   3174			    | (qdev->ndev->dev_addr[3] << 16)
   3175			    | (qdev->ndev->dev_addr[4] << 8)
   3176			    | qdev->ndev->dev_addr[5]));
   3177
   3178	/* Program top 16 bits of the MAC address */
   3179	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
   3180			   ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
   3181	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
   3182			   ((qdev->ndev->dev_addr[0] << 8)
   3183			    | qdev->ndev->dev_addr[1]));
   3184
   3185	/* Enable Primary MAC */
   3186	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
   3187			   ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
   3188			    MAC_ADDR_INDIRECT_PTR_REG_PE));
   3189
   3190	/* Clear Primary and Secondary IP addresses */
   3191	ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
   3192			   ((IP_ADDR_INDEX_REG_MASK << 16) |
   3193			    (qdev->mac_index << 2)));
   3194	ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
   3195
   3196	ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
   3197			   ((IP_ADDR_INDEX_REG_MASK << 16) |
   3198			    ((qdev->mac_index << 2) + 1)));
   3199	ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
   3200
   3201	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
   3202
   3203	/* Indicate Configuration Complete */
   3204	ql_write_page0_reg(qdev,
   3205			   &port_regs->portControl,
   3206			   ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
   3207
   3208	do {
   3209		value = ql_read_page0_reg(qdev, &port_regs->portStatus);
   3210		if (value & PORT_STATUS_IC)
   3211			break;
   3212		spin_unlock_irq(&qdev->hw_lock);
   3213		msleep(500);
   3214		spin_lock_irq(&qdev->hw_lock);
   3215	} while (--delay);
   3216
   3217	if (delay == 0) {
   3218		netdev_err(qdev->ndev, "Hw Initialization timeout\n");
   3219		status = -1;
   3220		goto out;
   3221	}
   3222
   3223	/* Enable Ethernet Function */
   3224	if (qdev->device_id == QL3032_DEVICE_ID) {
   3225		value =
   3226		    (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
   3227		     QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
   3228			QL3032_PORT_CONTROL_ET);
   3229		ql_write_page0_reg(qdev, &port_regs->functionControl,
   3230				   ((value << 16) | value));
   3231	} else {
   3232		value =
   3233		    (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
   3234		     PORT_CONTROL_HH);
   3235		ql_write_page0_reg(qdev, &port_regs->portControl,
   3236				   ((value << 16) | value));
   3237	}
   3238
   3239
   3240out:
   3241	return status;
   3242}
   3243
   3244/*
   3245 * Caller holds hw_lock.
   3246 */
   3247static int ql_adapter_reset(struct ql3_adapter *qdev)
   3248{
   3249	struct ql3xxx_port_registers __iomem *port_regs =
   3250		qdev->mem_map_registers;
   3251	int status = 0;
   3252	u16 value;
   3253	int max_wait_time;
   3254
   3255	set_bit(QL_RESET_ACTIVE, &qdev->flags);
   3256	clear_bit(QL_RESET_DONE, &qdev->flags);
   3257
   3258	/*
   3259	 * Issue soft reset to chip.
   3260	 */
   3261	netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
   3262	ql_write_common_reg(qdev,
   3263			    &port_regs->CommonRegs.ispControlStatus,
   3264			    ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
   3265
   3266	/* Wait 3 seconds for reset to complete. */
   3267	netdev_printk(KERN_DEBUG, qdev->ndev,
   3268		      "Wait 10 milliseconds for reset to complete\n");
   3269
   3270	/* Wait until the firmware tells us the Soft Reset is done */
   3271	max_wait_time = 5;
   3272	do {
   3273		value =
   3274		    ql_read_common_reg(qdev,
   3275				       &port_regs->CommonRegs.ispControlStatus);
   3276		if ((value & ISP_CONTROL_SR) == 0)
   3277			break;
   3278
   3279		mdelay(1000);
   3280	} while ((--max_wait_time));
   3281
   3282	/*
   3283	 * Also, make sure that the Network Reset Interrupt bit has been
   3284	 * cleared after the soft reset has taken place.
   3285	 */
   3286	value =
   3287	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
   3288	if (value & ISP_CONTROL_RI) {
   3289		netdev_printk(KERN_DEBUG, qdev->ndev,
   3290			      "clearing RI after reset\n");
   3291		ql_write_common_reg(qdev,
   3292				    &port_regs->CommonRegs.
   3293				    ispControlStatus,
   3294				    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
   3295	}
   3296
   3297	if (max_wait_time == 0) {
   3298		/* Issue Force Soft Reset */
   3299		ql_write_common_reg(qdev,
   3300				    &port_regs->CommonRegs.
   3301				    ispControlStatus,
   3302				    ((ISP_CONTROL_FSR << 16) |
   3303				     ISP_CONTROL_FSR));
   3304		/*
   3305		 * Wait until the firmware tells us the Force Soft Reset is
   3306		 * done
   3307		 */
   3308		max_wait_time = 5;
   3309		do {
   3310			value = ql_read_common_reg(qdev,
   3311						   &port_regs->CommonRegs.
   3312						   ispControlStatus);
   3313			if ((value & ISP_CONTROL_FSR) == 0)
   3314				break;
   3315			mdelay(1000);
   3316		} while ((--max_wait_time));
   3317	}
   3318	if (max_wait_time == 0)
   3319		status = 1;
   3320
   3321	clear_bit(QL_RESET_ACTIVE, &qdev->flags);
   3322	set_bit(QL_RESET_DONE, &qdev->flags);
   3323	return status;
   3324}
   3325
   3326static void ql_set_mac_info(struct ql3_adapter *qdev)
   3327{
   3328	struct ql3xxx_port_registers __iomem *port_regs =
   3329		qdev->mem_map_registers;
   3330	u32 value, port_status;
   3331	u8 func_number;
   3332
   3333	/* Get the function number */
   3334	value =
   3335	    ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
   3336	func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
   3337	port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
   3338	switch (value & ISP_CONTROL_FN_MASK) {
   3339	case ISP_CONTROL_FN0_NET:
   3340		qdev->mac_index = 0;
   3341		qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
   3342		qdev->mb_bit_mask = FN0_MA_BITS_MASK;
   3343		qdev->PHYAddr = PORT0_PHY_ADDRESS;
   3344		if (port_status & PORT_STATUS_SM0)
   3345			set_bit(QL_LINK_OPTICAL, &qdev->flags);
   3346		else
   3347			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
   3348		break;
   3349
   3350	case ISP_CONTROL_FN1_NET:
   3351		qdev->mac_index = 1;
   3352		qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
   3353		qdev->mb_bit_mask = FN1_MA_BITS_MASK;
   3354		qdev->PHYAddr = PORT1_PHY_ADDRESS;
   3355		if (port_status & PORT_STATUS_SM1)
   3356			set_bit(QL_LINK_OPTICAL, &qdev->flags);
   3357		else
   3358			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
   3359		break;
   3360
   3361	case ISP_CONTROL_FN0_SCSI:
   3362	case ISP_CONTROL_FN1_SCSI:
   3363	default:
   3364		netdev_printk(KERN_DEBUG, qdev->ndev,
   3365			      "Invalid function number, ispControlStatus = 0x%x\n",
   3366			      value);
   3367		break;
   3368	}
   3369	qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
   3370}
   3371
   3372static void ql_display_dev_info(struct net_device *ndev)
   3373{
   3374	struct ql3_adapter *qdev = netdev_priv(ndev);
   3375	struct pci_dev *pdev = qdev->pdev;
   3376
   3377	netdev_info(ndev,
   3378		    "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
   3379		    DRV_NAME, qdev->index, qdev->chip_rev_id,
   3380		    qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
   3381		    qdev->pci_slot);
   3382	netdev_info(ndev, "%s Interface\n",
   3383		test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
   3384
   3385	/*
   3386	 * Print PCI bus width/type.
   3387	 */
   3388	netdev_info(ndev, "Bus interface is %s %s\n",
   3389		    ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
   3390		    ((qdev->pci_x) ? "PCI-X" : "PCI"));
   3391
   3392	netdev_info(ndev, "mem  IO base address adjusted = 0x%p\n",
   3393		    qdev->mem_map_registers);
   3394	netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
   3395
   3396	netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
   3397}
   3398
   3399static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
   3400{
   3401	struct net_device *ndev = qdev->ndev;
   3402	int retval = 0;
   3403
   3404	netif_stop_queue(ndev);
   3405	netif_carrier_off(ndev);
   3406
   3407	clear_bit(QL_ADAPTER_UP, &qdev->flags);
   3408	clear_bit(QL_LINK_MASTER, &qdev->flags);
   3409
   3410	ql_disable_interrupts(qdev);
   3411
   3412	free_irq(qdev->pdev->irq, ndev);
   3413
   3414	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
   3415		netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
   3416		clear_bit(QL_MSI_ENABLED, &qdev->flags);
   3417		pci_disable_msi(qdev->pdev);
   3418	}
   3419
   3420	del_timer_sync(&qdev->adapter_timer);
   3421
   3422	napi_disable(&qdev->napi);
   3423
   3424	if (do_reset) {
   3425		int soft_reset;
   3426		unsigned long hw_flags;
   3427
   3428		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
   3429		if (ql_wait_for_drvr_lock(qdev)) {
   3430			soft_reset = ql_adapter_reset(qdev);
   3431			if (soft_reset) {
   3432				netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
   3433					   qdev->index);
   3434			}
   3435			netdev_err(ndev,
   3436				   "Releasing driver lock via chip reset\n");
   3437		} else {
   3438			netdev_err(ndev,
   3439				   "Could not acquire driver lock to do reset!\n");
   3440			retval = -1;
   3441		}
   3442		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   3443	}
   3444	ql_free_mem_resources(qdev);
   3445	return retval;
   3446}
   3447
   3448static int ql_adapter_up(struct ql3_adapter *qdev)
   3449{
   3450	struct net_device *ndev = qdev->ndev;
   3451	int err;
   3452	unsigned long irq_flags = IRQF_SHARED;
   3453	unsigned long hw_flags;
   3454
   3455	if (ql_alloc_mem_resources(qdev)) {
   3456		netdev_err(ndev, "Unable to  allocate buffers\n");
   3457		return -ENOMEM;
   3458	}
   3459
   3460	if (qdev->msi) {
   3461		if (pci_enable_msi(qdev->pdev)) {
   3462			netdev_err(ndev,
   3463				   "User requested MSI, but MSI failed to initialize.  Continuing without MSI.\n");
   3464			qdev->msi = 0;
   3465		} else {
   3466			netdev_info(ndev, "MSI Enabled...\n");
   3467			set_bit(QL_MSI_ENABLED, &qdev->flags);
   3468			irq_flags &= ~IRQF_SHARED;
   3469		}
   3470	}
   3471
   3472	err = request_irq(qdev->pdev->irq, ql3xxx_isr,
   3473			  irq_flags, ndev->name, ndev);
   3474	if (err) {
   3475		netdev_err(ndev,
   3476			   "Failed to reserve interrupt %d - already in use\n",
   3477			   qdev->pdev->irq);
   3478		goto err_irq;
   3479	}
   3480
   3481	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
   3482
   3483	if (!ql_wait_for_drvr_lock(qdev)) {
   3484		netdev_err(ndev, "Could not acquire driver lock\n");
   3485		err = -ENODEV;
   3486		goto err_lock;
   3487	}
   3488
   3489	err = ql_adapter_initialize(qdev);
   3490	if (err) {
   3491		netdev_err(ndev, "Unable to initialize adapter\n");
   3492		goto err_init;
   3493	}
   3494	ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
   3495
   3496	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   3497
   3498	set_bit(QL_ADAPTER_UP, &qdev->flags);
   3499
   3500	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
   3501
   3502	napi_enable(&qdev->napi);
   3503	ql_enable_interrupts(qdev);
   3504	return 0;
   3505
   3506err_init:
   3507	ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
   3508err_lock:
   3509	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   3510	free_irq(qdev->pdev->irq, ndev);
   3511err_irq:
   3512	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
   3513		netdev_info(ndev, "calling pci_disable_msi()\n");
   3514		clear_bit(QL_MSI_ENABLED, &qdev->flags);
   3515		pci_disable_msi(qdev->pdev);
   3516	}
   3517	return err;
   3518}
   3519
   3520static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
   3521{
   3522	if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
   3523		netdev_err(qdev->ndev,
   3524			   "Driver up/down cycle failed, closing device\n");
   3525		rtnl_lock();
   3526		dev_close(qdev->ndev);
   3527		rtnl_unlock();
   3528		return -1;
   3529	}
   3530	return 0;
   3531}
   3532
   3533static int ql3xxx_close(struct net_device *ndev)
   3534{
   3535	struct ql3_adapter *qdev = netdev_priv(ndev);
   3536
   3537	/*
   3538	 * Wait for device to recover from a reset.
   3539	 * (Rarely happens, but possible.)
   3540	 */
   3541	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
   3542		msleep(50);
   3543
   3544	ql_adapter_down(qdev, QL_DO_RESET);
   3545	return 0;
   3546}
   3547
   3548static int ql3xxx_open(struct net_device *ndev)
   3549{
   3550	struct ql3_adapter *qdev = netdev_priv(ndev);
   3551	return ql_adapter_up(qdev);
   3552}
   3553
   3554static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
   3555{
   3556	struct ql3_adapter *qdev = netdev_priv(ndev);
   3557	struct ql3xxx_port_registers __iomem *port_regs =
   3558			qdev->mem_map_registers;
   3559	struct sockaddr *addr = p;
   3560	unsigned long hw_flags;
   3561
   3562	if (netif_running(ndev))
   3563		return -EBUSY;
   3564
   3565	if (!is_valid_ether_addr(addr->sa_data))
   3566		return -EADDRNOTAVAIL;
   3567
   3568	eth_hw_addr_set(ndev, addr->sa_data);
   3569
   3570	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
   3571	/* Program lower 32 bits of the MAC address */
   3572	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
   3573			   (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
   3574	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
   3575			   ((ndev->dev_addr[2] << 24) | (ndev->
   3576							 dev_addr[3] << 16) |
   3577			    (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
   3578
   3579	/* Program top 16 bits of the MAC address */
   3580	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
   3581			   ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
   3582	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
   3583			   ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
   3584	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   3585
   3586	return 0;
   3587}
   3588
   3589static void ql3xxx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
   3590{
   3591	struct ql3_adapter *qdev = netdev_priv(ndev);
   3592
   3593	netdev_err(ndev, "Resetting...\n");
   3594	/*
   3595	 * Stop the queues, we've got a problem.
   3596	 */
   3597	netif_stop_queue(ndev);
   3598
   3599	/*
   3600	 * Wake up the worker to process this event.
   3601	 */
   3602	queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
   3603}
   3604
   3605static void ql_reset_work(struct work_struct *work)
   3606{
   3607	struct ql3_adapter *qdev =
   3608		container_of(work, struct ql3_adapter, reset_work.work);
   3609	struct net_device *ndev = qdev->ndev;
   3610	u32 value;
   3611	struct ql_tx_buf_cb *tx_cb;
   3612	int max_wait_time, i;
   3613	struct ql3xxx_port_registers __iomem *port_regs =
   3614		qdev->mem_map_registers;
   3615	unsigned long hw_flags;
   3616
   3617	if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
   3618	    test_bit(QL_RESET_START, &qdev->flags)) {
   3619		clear_bit(QL_LINK_MASTER, &qdev->flags);
   3620
   3621		/*
   3622		 * Loop through the active list and return the skb.
   3623		 */
   3624		for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
   3625			int j;
   3626			tx_cb = &qdev->tx_buf[i];
   3627			if (tx_cb->skb) {
   3628				netdev_printk(KERN_DEBUG, ndev,
   3629					      "Freeing lost SKB\n");
   3630				dma_unmap_single(&qdev->pdev->dev,
   3631						 dma_unmap_addr(&tx_cb->map[0], mapaddr),
   3632						 dma_unmap_len(&tx_cb->map[0], maplen),
   3633						 DMA_TO_DEVICE);
   3634				for (j = 1; j < tx_cb->seg_count; j++) {
   3635					dma_unmap_page(&qdev->pdev->dev,
   3636						       dma_unmap_addr(&tx_cb->map[j], mapaddr),
   3637						       dma_unmap_len(&tx_cb->map[j], maplen),
   3638						       DMA_TO_DEVICE);
   3639				}
   3640				dev_kfree_skb(tx_cb->skb);
   3641				tx_cb->skb = NULL;
   3642			}
   3643		}
   3644
   3645		netdev_err(ndev, "Clearing NRI after reset\n");
   3646		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
   3647		ql_write_common_reg(qdev,
   3648				    &port_regs->CommonRegs.
   3649				    ispControlStatus,
   3650				    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
   3651		/*
   3652		 * Wait the for Soft Reset to Complete.
   3653		 */
   3654		max_wait_time = 10;
   3655		do {
   3656			value = ql_read_common_reg(qdev,
   3657						   &port_regs->CommonRegs.
   3658
   3659						   ispControlStatus);
   3660			if ((value & ISP_CONTROL_SR) == 0) {
   3661				netdev_printk(KERN_DEBUG, ndev,
   3662					      "reset completed\n");
   3663				break;
   3664			}
   3665
   3666			if (value & ISP_CONTROL_RI) {
   3667				netdev_printk(KERN_DEBUG, ndev,
   3668					      "clearing NRI after reset\n");
   3669				ql_write_common_reg(qdev,
   3670						    &port_regs->
   3671						    CommonRegs.
   3672						    ispControlStatus,
   3673						    ((ISP_CONTROL_RI <<
   3674						      16) | ISP_CONTROL_RI));
   3675			}
   3676
   3677			spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   3678			ssleep(1);
   3679			spin_lock_irqsave(&qdev->hw_lock, hw_flags);
   3680		} while (--max_wait_time);
   3681		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
   3682
   3683		if (value & ISP_CONTROL_SR) {
   3684
   3685			/*
   3686			 * Set the reset flags and clear the board again.
   3687			 * Nothing else to do...
   3688			 */
   3689			netdev_err(ndev,
   3690				   "Timed out waiting for reset to complete\n");
   3691			netdev_err(ndev, "Do a reset\n");
   3692			clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
   3693			clear_bit(QL_RESET_START, &qdev->flags);
   3694			ql_cycle_adapter(qdev, QL_DO_RESET);
   3695			return;
   3696		}
   3697
   3698		clear_bit(QL_RESET_ACTIVE, &qdev->flags);
   3699		clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
   3700		clear_bit(QL_RESET_START, &qdev->flags);
   3701		ql_cycle_adapter(qdev, QL_NO_RESET);
   3702	}
   3703}
   3704
   3705static void ql_tx_timeout_work(struct work_struct *work)
   3706{
   3707	struct ql3_adapter *qdev =
   3708		container_of(work, struct ql3_adapter, tx_timeout_work.work);
   3709
   3710	ql_cycle_adapter(qdev, QL_DO_RESET);
   3711}
   3712
   3713static void ql_get_board_info(struct ql3_adapter *qdev)
   3714{
   3715	struct ql3xxx_port_registers __iomem *port_regs =
   3716		qdev->mem_map_registers;
   3717	u32 value;
   3718
   3719	value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
   3720
   3721	qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
   3722	if (value & PORT_STATUS_64)
   3723		qdev->pci_width = 64;
   3724	else
   3725		qdev->pci_width = 32;
   3726	if (value & PORT_STATUS_X)
   3727		qdev->pci_x = 1;
   3728	else
   3729		qdev->pci_x = 0;
   3730	qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
   3731}
   3732
   3733static void ql3xxx_timer(struct timer_list *t)
   3734{
   3735	struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
   3736	queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
   3737}
   3738
   3739static const struct net_device_ops ql3xxx_netdev_ops = {
   3740	.ndo_open		= ql3xxx_open,
   3741	.ndo_start_xmit		= ql3xxx_send,
   3742	.ndo_stop		= ql3xxx_close,
   3743	.ndo_validate_addr	= eth_validate_addr,
   3744	.ndo_set_mac_address	= ql3xxx_set_mac_address,
   3745	.ndo_tx_timeout		= ql3xxx_tx_timeout,
   3746};
   3747
   3748static int ql3xxx_probe(struct pci_dev *pdev,
   3749			const struct pci_device_id *pci_entry)
   3750{
   3751	struct net_device *ndev = NULL;
   3752	struct ql3_adapter *qdev = NULL;
   3753	static int cards_found;
   3754	int err;
   3755
   3756	err = pci_enable_device(pdev);
   3757	if (err) {
   3758		pr_err("%s cannot enable PCI device\n", pci_name(pdev));
   3759		goto err_out;
   3760	}
   3761
   3762	err = pci_request_regions(pdev, DRV_NAME);
   3763	if (err) {
   3764		pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
   3765		goto err_out_disable_pdev;
   3766	}
   3767
   3768	pci_set_master(pdev);
   3769
   3770	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
   3771	if (err) {
   3772		pr_err("%s no usable DMA configuration\n", pci_name(pdev));
   3773		goto err_out_free_regions;
   3774	}
   3775
   3776	ndev = alloc_etherdev(sizeof(struct ql3_adapter));
   3777	if (!ndev) {
   3778		err = -ENOMEM;
   3779		goto err_out_free_regions;
   3780	}
   3781
   3782	SET_NETDEV_DEV(ndev, &pdev->dev);
   3783
   3784	pci_set_drvdata(pdev, ndev);
   3785
   3786	qdev = netdev_priv(ndev);
   3787	qdev->index = cards_found;
   3788	qdev->ndev = ndev;
   3789	qdev->pdev = pdev;
   3790	qdev->device_id = pci_entry->device;
   3791	qdev->port_link_state = LS_DOWN;
   3792	if (msi)
   3793		qdev->msi = 1;
   3794
   3795	qdev->msg_enable = netif_msg_init(debug, default_msg);
   3796
   3797	ndev->features |= NETIF_F_HIGHDMA;
   3798	if (qdev->device_id == QL3032_DEVICE_ID)
   3799		ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
   3800
   3801	qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
   3802	if (!qdev->mem_map_registers) {
   3803		pr_err("%s: cannot map device registers\n", pci_name(pdev));
   3804		err = -EIO;
   3805		goto err_out_free_ndev;
   3806	}
   3807
   3808	spin_lock_init(&qdev->adapter_lock);
   3809	spin_lock_init(&qdev->hw_lock);
   3810
   3811	/* Set driver entry points */
   3812	ndev->netdev_ops = &ql3xxx_netdev_ops;
   3813	ndev->ethtool_ops = &ql3xxx_ethtool_ops;
   3814	ndev->watchdog_timeo = 5 * HZ;
   3815
   3816	netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
   3817
   3818	ndev->irq = pdev->irq;
   3819
   3820	/* make sure the EEPROM is good */
   3821	if (ql_get_nvram_params(qdev)) {
   3822		pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
   3823			 __func__, qdev->index);
   3824		err = -EIO;
   3825		goto err_out_iounmap;
   3826	}
   3827
   3828	ql_set_mac_info(qdev);
   3829
   3830	/* Validate and set parameters */
   3831	if (qdev->mac_index) {
   3832		ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
   3833		ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
   3834	} else {
   3835		ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
   3836		ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
   3837	}
   3838
   3839	ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
   3840
   3841	/* Record PCI bus information. */
   3842	ql_get_board_info(qdev);
   3843
   3844	/*
   3845	 * Set the Maximum Memory Read Byte Count value. We do this to handle
   3846	 * jumbo frames.
   3847	 */
   3848	if (qdev->pci_x)
   3849		pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
   3850
   3851	err = register_netdev(ndev);
   3852	if (err) {
   3853		pr_err("%s: cannot register net device\n", pci_name(pdev));
   3854		goto err_out_iounmap;
   3855	}
   3856
   3857	/* we're going to reset, so assume we have no link for now */
   3858
   3859	netif_carrier_off(ndev);
   3860	netif_stop_queue(ndev);
   3861
   3862	qdev->workqueue = create_singlethread_workqueue(ndev->name);
   3863	if (!qdev->workqueue) {
   3864		unregister_netdev(ndev);
   3865		err = -ENOMEM;
   3866		goto err_out_iounmap;
   3867	}
   3868
   3869	INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
   3870	INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
   3871	INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
   3872
   3873	timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
   3874	qdev->adapter_timer.expires = jiffies + HZ * 2;	/* two second delay */
   3875
   3876	if (!cards_found) {
   3877		pr_alert("%s\n", DRV_STRING);
   3878		pr_alert("Driver name: %s, Version: %s\n",
   3879			 DRV_NAME, DRV_VERSION);
   3880	}
   3881	ql_display_dev_info(ndev);
   3882
   3883	cards_found++;
   3884	return 0;
   3885
   3886err_out_iounmap:
   3887	iounmap(qdev->mem_map_registers);
   3888err_out_free_ndev:
   3889	free_netdev(ndev);
   3890err_out_free_regions:
   3891	pci_release_regions(pdev);
   3892err_out_disable_pdev:
   3893	pci_disable_device(pdev);
   3894err_out:
   3895	return err;
   3896}
   3897
   3898static void ql3xxx_remove(struct pci_dev *pdev)
   3899{
   3900	struct net_device *ndev = pci_get_drvdata(pdev);
   3901	struct ql3_adapter *qdev = netdev_priv(ndev);
   3902
   3903	unregister_netdev(ndev);
   3904
   3905	ql_disable_interrupts(qdev);
   3906
   3907	if (qdev->workqueue) {
   3908		cancel_delayed_work(&qdev->reset_work);
   3909		cancel_delayed_work(&qdev->tx_timeout_work);
   3910		destroy_workqueue(qdev->workqueue);
   3911		qdev->workqueue = NULL;
   3912	}
   3913
   3914	iounmap(qdev->mem_map_registers);
   3915	pci_release_regions(pdev);
   3916	free_netdev(ndev);
   3917}
   3918
   3919static struct pci_driver ql3xxx_driver = {
   3920
   3921	.name = DRV_NAME,
   3922	.id_table = ql3xxx_pci_tbl,
   3923	.probe = ql3xxx_probe,
   3924	.remove = ql3xxx_remove,
   3925};
   3926
   3927module_pci_driver(ql3xxx_driver);