cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

via-velocity.c (96247B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * This code is derived from the VIA reference driver (copyright message
      4 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
      5 * addition to the Linux kernel.
      6 *
      7 * The code has been merged into one source file, cleaned up to follow
      8 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
      9 * for 64bit hardware platforms.
     10 *
     11 * TODO
     12 *	rx_copybreak/alignment
     13 *	More testing
     14 *
     15 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
     16 * Additional fixes and clean up: Francois Romieu
     17 *
     18 * This source has not been verified for use in safety critical systems.
     19 *
     20 * Please direct queries about the revamped driver to the linux-kernel
     21 * list not VIA.
     22 *
     23 * Original code:
     24 *
     25 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
     26 * All rights reserved.
     27 *
     28 * Author: Chuang Liang-Shing, AJ Jiang
     29 *
     30 * Date: Jan 24, 2003
     31 *
     32 * MODULE_LICENSE("GPL");
     33 */
     34
     35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     36
     37#include <linux/module.h>
     38#include <linux/types.h>
     39#include <linux/bitops.h>
     40#include <linux/init.h>
     41#include <linux/dma-mapping.h>
     42#include <linux/mm.h>
     43#include <linux/errno.h>
     44#include <linux/ioport.h>
     45#include <linux/pci.h>
     46#include <linux/kernel.h>
     47#include <linux/netdevice.h>
     48#include <linux/etherdevice.h>
     49#include <linux/skbuff.h>
     50#include <linux/delay.h>
     51#include <linux/timer.h>
     52#include <linux/slab.h>
     53#include <linux/interrupt.h>
     54#include <linux/string.h>
     55#include <linux/wait.h>
     56#include <linux/io.h>
     57#include <linux/if.h>
     58#include <linux/uaccess.h>
     59#include <linux/proc_fs.h>
     60#include <linux/of_address.h>
     61#include <linux/of_device.h>
     62#include <linux/of_irq.h>
     63#include <linux/inetdevice.h>
     64#include <linux/platform_device.h>
     65#include <linux/reboot.h>
     66#include <linux/ethtool.h>
     67#include <linux/mii.h>
     68#include <linux/in.h>
     69#include <linux/if_arp.h>
     70#include <linux/if_vlan.h>
     71#include <linux/ip.h>
     72#include <linux/tcp.h>
     73#include <linux/udp.h>
     74#include <linux/crc-ccitt.h>
     75#include <linux/crc32.h>
     76
     77#include "via-velocity.h"
     78
     79enum velocity_bus_type {
     80	BUS_PCI,
     81	BUS_PLATFORM,
     82};
     83
     84static int velocity_nics;
     85
     86static void velocity_set_power_state(struct velocity_info *vptr, char state)
     87{
     88	void *addr = vptr->mac_regs;
     89
     90	if (vptr->pdev)
     91		pci_set_power_state(vptr->pdev, state);
     92	else
     93		writeb(state, addr + 0x154);
     94}
     95
     96/**
     97 *	mac_get_cam_mask	-	Read a CAM mask
     98 *	@regs: register block for this velocity
     99 *	@mask: buffer to store mask
    100 *
    101 *	Fetch the mask bits of the selected CAM and store them into the
    102 *	provided mask buffer.
    103 */
    104static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
    105{
    106	int i;
    107
    108	/* Select CAM mask */
    109	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
    110
    111	writeb(0, &regs->CAMADDR);
    112
    113	/* read mask */
    114	for (i = 0; i < 8; i++)
    115		*mask++ = readb(&(regs->MARCAM[i]));
    116
    117	/* disable CAMEN */
    118	writeb(0, &regs->CAMADDR);
    119
    120	/* Select mar */
    121	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
    122}
    123
    124/**
    125 *	mac_set_cam_mask	-	Set a CAM mask
    126 *	@regs: register block for this velocity
    127 *	@mask: CAM mask to load
    128 *
    129 *	Store a new mask into a CAM
    130 */
    131static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
    132{
    133	int i;
    134	/* Select CAM mask */
    135	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
    136
    137	writeb(CAMADDR_CAMEN, &regs->CAMADDR);
    138
    139	for (i = 0; i < 8; i++)
    140		writeb(*mask++, &(regs->MARCAM[i]));
    141
    142	/* disable CAMEN */
    143	writeb(0, &regs->CAMADDR);
    144
    145	/* Select mar */
    146	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
    147}
    148
    149static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
    150{
    151	int i;
    152	/* Select CAM mask */
    153	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
    154
    155	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
    156
    157	for (i = 0; i < 8; i++)
    158		writeb(*mask++, &(regs->MARCAM[i]));
    159
    160	/* disable CAMEN */
    161	writeb(0, &regs->CAMADDR);
    162
    163	/* Select mar */
    164	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
    165}
    166
    167/**
    168 *	mac_set_cam	-	set CAM data
    169 *	@regs: register block of this velocity
    170 *	@idx: Cam index
    171 *	@addr: 2 or 6 bytes of CAM data
    172 *
    173 *	Load an address or vlan tag into a CAM
    174 */
    175static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
    176{
    177	int i;
    178
    179	/* Select CAM mask */
    180	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
    181
    182	idx &= (64 - 1);
    183
    184	writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
    185
    186	for (i = 0; i < 6; i++)
    187		writeb(*addr++, &(regs->MARCAM[i]));
    188
    189	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
    190
    191	udelay(10);
    192
    193	writeb(0, &regs->CAMADDR);
    194
    195	/* Select mar */
    196	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
    197}
    198
    199static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
    200			     const u8 *addr)
    201{
    202
    203	/* Select CAM mask */
    204	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
    205
    206	idx &= (64 - 1);
    207
    208	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
    209	writew(*((u16 *) addr), &regs->MARCAM[0]);
    210
    211	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
    212
    213	udelay(10);
    214
    215	writeb(0, &regs->CAMADDR);
    216
    217	/* Select mar */
    218	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
    219}
    220
    221
    222/**
    223 *	mac_wol_reset	-	reset WOL after exiting low power
    224 *	@regs: register block of this velocity
    225 *
    226 *	Called after we drop out of wake on lan mode in order to
    227 *	reset the Wake on lan features. This function doesn't restore
    228 *	the rest of the logic from the result of sleep/wakeup
    229 */
    230static void mac_wol_reset(struct mac_regs __iomem *regs)
    231{
    232
    233	/* Turn off SWPTAG right after leaving power mode */
    234	BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
    235	/* clear sticky bits */
    236	BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
    237
    238	BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
    239	BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
    240	/* disable force PME-enable */
    241	writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
    242	/* disable power-event config bit */
    243	writew(0xFFFF, &regs->WOLCRClr);
    244	/* clear power status */
    245	writew(0xFFFF, &regs->WOLSRClr);
    246}
    247
    248static const struct ethtool_ops velocity_ethtool_ops;
    249
    250/*
    251    Define module options
    252*/
    253
    254MODULE_AUTHOR("VIA Networking Technologies, Inc.");
    255MODULE_LICENSE("GPL");
    256MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
    257
    258#define VELOCITY_PARAM(N, D) \
    259	static int N[MAX_UNITS] = OPTION_DEFAULT;\
    260	module_param_array(N, int, NULL, 0); \
    261	MODULE_PARM_DESC(N, D);
    262
    263#define RX_DESC_MIN     64
    264#define RX_DESC_MAX     255
    265#define RX_DESC_DEF     64
    266VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
    267
    268#define TX_DESC_MIN     16
    269#define TX_DESC_MAX     256
    270#define TX_DESC_DEF     64
    271VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
    272
    273#define RX_THRESH_MIN   0
    274#define RX_THRESH_MAX   3
    275#define RX_THRESH_DEF   0
    276/* rx_thresh[] is used for controlling the receive fifo threshold.
    277   0: indicate the rxfifo threshold is 128 bytes.
    278   1: indicate the rxfifo threshold is 512 bytes.
    279   2: indicate the rxfifo threshold is 1024 bytes.
    280   3: indicate the rxfifo threshold is store & forward.
    281*/
    282VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
    283
    284#define DMA_LENGTH_MIN  0
    285#define DMA_LENGTH_MAX  7
    286#define DMA_LENGTH_DEF  6
    287
    288/* DMA_length[] is used for controlling the DMA length
    289   0: 8 DWORDs
    290   1: 16 DWORDs
    291   2: 32 DWORDs
    292   3: 64 DWORDs
    293   4: 128 DWORDs
    294   5: 256 DWORDs
    295   6: SF(flush till emply)
    296   7: SF(flush till emply)
    297*/
    298VELOCITY_PARAM(DMA_length, "DMA length");
    299
    300#define IP_ALIG_DEF     0
    301/* IP_byte_align[] is used for IP header DWORD byte aligned
    302   0: indicate the IP header won't be DWORD byte aligned.(Default) .
    303   1: indicate the IP header will be DWORD byte aligned.
    304      In some environment, the IP header should be DWORD byte aligned,
    305      or the packet will be droped when we receive it. (eg: IPVS)
    306*/
    307VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
    308
    309#define FLOW_CNTL_DEF   1
    310#define FLOW_CNTL_MIN   1
    311#define FLOW_CNTL_MAX   5
    312
    313/* flow_control[] is used for setting the flow control ability of NIC.
    314   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
    315   2: enable TX flow control.
    316   3: enable RX flow control.
    317   4: enable RX/TX flow control.
    318   5: disable
    319*/
    320VELOCITY_PARAM(flow_control, "Enable flow control ability");
    321
    322#define MED_LNK_DEF 0
    323#define MED_LNK_MIN 0
    324#define MED_LNK_MAX 5
    325/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
    326   0: indicate autonegotiation for both speed and duplex mode
    327   1: indicate 100Mbps half duplex mode
    328   2: indicate 100Mbps full duplex mode
    329   3: indicate 10Mbps half duplex mode
    330   4: indicate 10Mbps full duplex mode
    331   5: indicate 1000Mbps full duplex mode
    332
    333   Note:
    334   if EEPROM have been set to the force mode, this option is ignored
    335   by driver.
    336*/
    337VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
    338
    339#define WOL_OPT_DEF     0
    340#define WOL_OPT_MIN     0
    341#define WOL_OPT_MAX     7
    342/* wol_opts[] is used for controlling wake on lan behavior.
    343   0: Wake up if recevied a magic packet. (Default)
    344   1: Wake up if link status is on/off.
    345   2: Wake up if recevied an arp packet.
    346   4: Wake up if recevied any unicast packet.
    347   Those value can be sumed up to support more than one option.
    348*/
    349VELOCITY_PARAM(wol_opts, "Wake On Lan options");
    350
    351static int rx_copybreak = 200;
    352module_param(rx_copybreak, int, 0644);
    353MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
    354
    355/*
    356 *	Internal board variants. At the moment we have only one
    357 */
    358static struct velocity_info_tbl chip_info_table[] = {
    359	{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
    360	{ }
    361};
    362
    363/*
    364 *	Describe the PCI device identifiers that we support in this
    365 *	device driver. Used for hotplug autoloading.
    366 */
    367
    368static const struct pci_device_id velocity_pci_id_table[] = {
    369	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
    370	{ }
    371};
    372
    373MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
    374
    375/*
    376 *	Describe the OF device identifiers that we support in this
    377 *	device driver. Used for devicetree nodes.
    378 */
    379static const struct of_device_id velocity_of_ids[] = {
    380	{ .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
    381	{ /* Sentinel */ },
    382};
    383MODULE_DEVICE_TABLE(of, velocity_of_ids);
    384
    385/**
    386 *	get_chip_name	- 	identifier to name
    387 *	@chip_id: chip identifier
    388 *
    389 *	Given a chip identifier return a suitable description. Returns
    390 *	a pointer a static string valid while the driver is loaded.
    391 */
    392static const char *get_chip_name(enum chip_type chip_id)
    393{
    394	int i;
    395	for (i = 0; chip_info_table[i].name != NULL; i++)
    396		if (chip_info_table[i].chip_id == chip_id)
    397			break;
    398	return chip_info_table[i].name;
    399}
    400
    401/**
    402 *	velocity_set_int_opt	-	parser for integer options
    403 *	@opt: pointer to option value
    404 *	@val: value the user requested (or -1 for default)
    405 *	@min: lowest value allowed
    406 *	@max: highest value allowed
    407 *	@def: default value
    408 *	@name: property name
    409 *
    410 *	Set an integer property in the module options. This function does
    411 *	all the verification and checking as well as reporting so that
    412 *	we don't duplicate code for each option.
    413 */
    414static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
    415				 char *name)
    416{
    417	if (val == -1)
    418		*opt = def;
    419	else if (val < min || val > max) {
    420		pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
    421			  name, min, max);
    422		*opt = def;
    423	} else {
    424		pr_info("set value of parameter %s to %d\n", name, val);
    425		*opt = val;
    426	}
    427}
    428
    429/**
    430 *	velocity_set_bool_opt	-	parser for boolean options
    431 *	@opt: pointer to option value
    432 *	@val: value the user requested (or -1 for default)
    433 *	@def: default value (yes/no)
    434 *	@flag: numeric value to set for true.
    435 *	@name: property name
    436 *
    437 *	Set a boolean property in the module options. This function does
    438 *	all the verification and checking as well as reporting so that
    439 *	we don't duplicate code for each option.
    440 */
    441static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
    442				  char *name)
    443{
    444	(*opt) &= (~flag);
    445	if (val == -1)
    446		*opt |= (def ? flag : 0);
    447	else if (val < 0 || val > 1) {
    448		pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
    449			  name, 0, 1);
    450		*opt |= (def ? flag : 0);
    451	} else {
    452		pr_info("set parameter %s to %s\n",
    453			name, val ? "TRUE" : "FALSE");
    454		*opt |= (val ? flag : 0);
    455	}
    456}
    457
    458/**
    459 *	velocity_get_options	-	set options on device
    460 *	@opts: option structure for the device
    461 *	@index: index of option to use in module options array
    462 *
    463 *	Turn the module and command options into a single structure
    464 *	for the current device
    465 */
    466static void velocity_get_options(struct velocity_opt *opts, int index)
    467{
    468
    469	velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index],
    470			     RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF,
    471			     "rx_thresh");
    472	velocity_set_int_opt(&opts->DMA_length, DMA_length[index],
    473			     DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF,
    474			     "DMA_length");
    475	velocity_set_int_opt(&opts->numrx, RxDescriptors[index],
    476			     RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF,
    477			     "RxDescriptors");
    478	velocity_set_int_opt(&opts->numtx, TxDescriptors[index],
    479			     TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF,
    480			     "TxDescriptors");
    481
    482	velocity_set_int_opt(&opts->flow_cntl, flow_control[index],
    483			     FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF,
    484			     "flow_control");
    485	velocity_set_bool_opt(&opts->flags, IP_byte_align[index],
    486			      IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN,
    487			      "IP_byte_align");
    488	velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index],
    489			     MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF,
    490			     "Media link mode");
    491	velocity_set_int_opt(&opts->wol_opts, wol_opts[index],
    492			     WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF,
    493			     "Wake On Lan options");
    494	opts->numrx = (opts->numrx & ~3);
    495}
    496
    497/**
    498 *	velocity_init_cam_filter	-	initialise CAM
    499 *	@vptr: velocity to program
    500 *
    501 *	Initialize the content addressable memory used for filters. Load
    502 *	appropriately according to the presence of VLAN
    503 */
    504static void velocity_init_cam_filter(struct velocity_info *vptr)
    505{
    506	struct mac_regs __iomem *regs = vptr->mac_regs;
    507	unsigned int vid, i = 0;
    508
    509	/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
    510	WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
    511	WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
    512
    513	/* Disable all CAMs */
    514	memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
    515	memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
    516	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
    517	mac_set_cam_mask(regs, vptr->mCAMmask);
    518
    519	/* Enable VCAMs */
    520	for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
    521		mac_set_vlan_cam(regs, i, (u8 *) &vid);
    522		vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
    523		if (++i >= VCAM_SIZE)
    524			break;
    525	}
    526	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
    527}
    528
    529static int velocity_vlan_rx_add_vid(struct net_device *dev,
    530				    __be16 proto, u16 vid)
    531{
    532	struct velocity_info *vptr = netdev_priv(dev);
    533
    534	spin_lock_irq(&vptr->lock);
    535	set_bit(vid, vptr->active_vlans);
    536	velocity_init_cam_filter(vptr);
    537	spin_unlock_irq(&vptr->lock);
    538	return 0;
    539}
    540
    541static int velocity_vlan_rx_kill_vid(struct net_device *dev,
    542				     __be16 proto, u16 vid)
    543{
    544	struct velocity_info *vptr = netdev_priv(dev);
    545
    546	spin_lock_irq(&vptr->lock);
    547	clear_bit(vid, vptr->active_vlans);
    548	velocity_init_cam_filter(vptr);
    549	spin_unlock_irq(&vptr->lock);
    550	return 0;
    551}
    552
    553static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
    554{
    555	vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
    556}
    557
    558/**
    559 *	velocity_rx_reset	-	handle a receive reset
    560 *	@vptr: velocity we are resetting
    561 *
    562 *	Reset the ownership and status for the receive ring side.
    563 *	Hand all the receive queue to the NIC.
    564 */
    565static void velocity_rx_reset(struct velocity_info *vptr)
    566{
    567
    568	struct mac_regs __iomem *regs = vptr->mac_regs;
    569	int i;
    570
    571	velocity_init_rx_ring_indexes(vptr);
    572
    573	/*
    574	 *	Init state, all RD entries belong to the NIC
    575	 */
    576	for (i = 0; i < vptr->options.numrx; ++i)
    577		vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
    578
    579	writew(vptr->options.numrx, &regs->RBRDU);
    580	writel(vptr->rx.pool_dma, &regs->RDBaseLo);
    581	writew(0, &regs->RDIdx);
    582	writew(vptr->options.numrx - 1, &regs->RDCSize);
    583}
    584
    585/**
    586 *	velocity_get_opt_media_mode	-	get media selection
    587 *	@vptr: velocity adapter
    588 *
    589 *	Get the media mode stored in EEPROM or module options and load
    590 *	mii_status accordingly. The requested link state information
    591 *	is also returned.
    592 */
    593static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
    594{
    595	u32 status = 0;
    596
    597	switch (vptr->options.spd_dpx) {
    598	case SPD_DPX_AUTO:
    599		status = VELOCITY_AUTONEG_ENABLE;
    600		break;
    601	case SPD_DPX_100_FULL:
    602		status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
    603		break;
    604	case SPD_DPX_10_FULL:
    605		status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
    606		break;
    607	case SPD_DPX_100_HALF:
    608		status = VELOCITY_SPEED_100;
    609		break;
    610	case SPD_DPX_10_HALF:
    611		status = VELOCITY_SPEED_10;
    612		break;
    613	case SPD_DPX_1000_FULL:
    614		status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
    615		break;
    616	}
    617	vptr->mii_status = status;
    618	return status;
    619}
    620
    621/**
    622 *	safe_disable_mii_autopoll	-	autopoll off
    623 *	@regs: velocity registers
    624 *
    625 *	Turn off the autopoll and wait for it to disable on the chip
    626 */
    627static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
    628{
    629	u16 ww;
    630
    631	/*  turn off MAUTO */
    632	writeb(0, &regs->MIICR);
    633	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
    634		udelay(1);
    635		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
    636			break;
    637	}
    638}
    639
    640/**
    641 *	enable_mii_autopoll	-	turn on autopolling
    642 *	@regs: velocity registers
    643 *
    644 *	Enable the MII link status autopoll feature on the Velocity
    645 *	hardware. Wait for it to enable.
    646 */
    647static void enable_mii_autopoll(struct mac_regs __iomem *regs)
    648{
    649	int ii;
    650
    651	writeb(0, &(regs->MIICR));
    652	writeb(MIIADR_SWMPL, &regs->MIIADR);
    653
    654	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
    655		udelay(1);
    656		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
    657			break;
    658	}
    659
    660	writeb(MIICR_MAUTO, &regs->MIICR);
    661
    662	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
    663		udelay(1);
    664		if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
    665			break;
    666	}
    667
    668}
    669
    670/**
    671 *	velocity_mii_read	-	read MII data
    672 *	@regs: velocity registers
    673 *	@index: MII register index
    674 *	@data: buffer for received data
    675 *
    676 *	Perform a single read of an MII 16bit register. Returns zero
    677 *	on success or -ETIMEDOUT if the PHY did not respond.
    678 */
    679static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
    680{
    681	u16 ww;
    682
    683	/*
    684	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
    685	 */
    686	safe_disable_mii_autopoll(regs);
    687
    688	writeb(index, &regs->MIIADR);
    689
    690	BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
    691
    692	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
    693		if (!(readb(&regs->MIICR) & MIICR_RCMD))
    694			break;
    695	}
    696
    697	*data = readw(&regs->MIIDATA);
    698
    699	enable_mii_autopoll(regs);
    700	if (ww == W_MAX_TIMEOUT)
    701		return -ETIMEDOUT;
    702	return 0;
    703}
    704
    705/**
    706 *	mii_check_media_mode	-	check media state
    707 *	@regs: velocity registers
    708 *
    709 *	Check the current MII status and determine the link status
    710 *	accordingly
    711 */
    712static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
    713{
    714	u32 status = 0;
    715	u16 ANAR;
    716
    717	if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
    718		status |= VELOCITY_LINK_FAIL;
    719
    720	if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
    721		status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
    722	else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
    723		status |= (VELOCITY_SPEED_1000);
    724	else {
    725		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
    726		if (ANAR & ADVERTISE_100FULL)
    727			status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
    728		else if (ANAR & ADVERTISE_100HALF)
    729			status |= VELOCITY_SPEED_100;
    730		else if (ANAR & ADVERTISE_10FULL)
    731			status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
    732		else
    733			status |= (VELOCITY_SPEED_10);
    734	}
    735
    736	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
    737		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
    738		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
    739		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
    740			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
    741				status |= VELOCITY_AUTONEG_ENABLE;
    742		}
    743	}
    744
    745	return status;
    746}
    747
    748/**
    749 *	velocity_mii_write	-	write MII data
    750 *	@regs: velocity registers
    751 *	@mii_addr: MII register index
    752 *	@data: 16bit data for the MII register
    753 *
    754 *	Perform a single write to an MII 16bit register. Returns zero
    755 *	on success or -ETIMEDOUT if the PHY did not respond.
    756 */
    757static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
    758{
    759	u16 ww;
    760
    761	/*
    762	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
    763	 */
    764	safe_disable_mii_autopoll(regs);
    765
    766	/* MII reg offset */
    767	writeb(mii_addr, &regs->MIIADR);
    768	/* set MII data */
    769	writew(data, &regs->MIIDATA);
    770
    771	/* turn on MIICR_WCMD */
    772	BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
    773
    774	/* W_MAX_TIMEOUT is the timeout period */
    775	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
    776		udelay(5);
    777		if (!(readb(&regs->MIICR) & MIICR_WCMD))
    778			break;
    779	}
    780	enable_mii_autopoll(regs);
    781
    782	if (ww == W_MAX_TIMEOUT)
    783		return -ETIMEDOUT;
    784	return 0;
    785}
    786
    787/**
    788 *	set_mii_flow_control	-	flow control setup
    789 *	@vptr: velocity interface
    790 *
    791 *	Set up the flow control on this interface according to
    792 *	the supplied user/eeprom options.
    793 */
    794static void set_mii_flow_control(struct velocity_info *vptr)
    795{
    796	/*Enable or Disable PAUSE in ANAR */
    797	switch (vptr->options.flow_cntl) {
    798	case FLOW_CNTL_TX:
    799		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
    800		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
    801		break;
    802
    803	case FLOW_CNTL_RX:
    804		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
    805		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
    806		break;
    807
    808	case FLOW_CNTL_TX_RX:
    809		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
    810		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
    811		break;
    812
    813	case FLOW_CNTL_DISABLE:
    814		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
    815		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
    816		break;
    817	default:
    818		break;
    819	}
    820}
    821
    822/**
    823 *	mii_set_auto_on		-	autonegotiate on
    824 *	@vptr: velocity
    825 *
    826 *	Enable autonegotation on this interface
    827 */
    828static void mii_set_auto_on(struct velocity_info *vptr)
    829{
    830	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
    831		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
    832	else
    833		MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
    834}
    835
    836static u32 check_connection_type(struct mac_regs __iomem *regs)
    837{
    838	u32 status = 0;
    839	u8 PHYSR0;
    840	u16 ANAR;
    841	PHYSR0 = readb(&regs->PHYSR0);
    842
    843	/*
    844	   if (!(PHYSR0 & PHYSR0_LINKGD))
    845	   status|=VELOCITY_LINK_FAIL;
    846	 */
    847
    848	if (PHYSR0 & PHYSR0_FDPX)
    849		status |= VELOCITY_DUPLEX_FULL;
    850
    851	if (PHYSR0 & PHYSR0_SPDG)
    852		status |= VELOCITY_SPEED_1000;
    853	else if (PHYSR0 & PHYSR0_SPD10)
    854		status |= VELOCITY_SPEED_10;
    855	else
    856		status |= VELOCITY_SPEED_100;
    857
    858	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
    859		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
    860		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
    861		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
    862			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
    863				status |= VELOCITY_AUTONEG_ENABLE;
    864		}
    865	}
    866
    867	return status;
    868}
    869
    870/**
    871 *	velocity_set_media_mode		-	set media mode
    872 *	@vptr: velocity adapter
    873 *	@mii_status: old MII link state
    874 *
    875 *	Check the media link state and configure the flow control
    876 *	PHY and also velocity hardware setup accordingly. In particular
    877 *	we need to set up CD polling and frame bursting.
    878 */
    879static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
    880{
    881	struct mac_regs __iomem *regs = vptr->mac_regs;
    882
    883	vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
    884
    885	/* Set mii link status */
    886	set_mii_flow_control(vptr);
    887
    888	if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
    889		MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
    890
    891	/*
    892	 *	If connection type is AUTO
    893	 */
    894	if (mii_status & VELOCITY_AUTONEG_ENABLE) {
    895		netdev_info(vptr->netdev, "Velocity is in AUTO mode\n");
    896		/* clear force MAC mode bit */
    897		BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
    898		/* set duplex mode of MAC according to duplex mode of MII */
    899		MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
    900		MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
    901		MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
    902
    903		/* enable AUTO-NEGO mode */
    904		mii_set_auto_on(vptr);
    905	} else {
    906		u16 CTRL1000;
    907		u16 ANAR;
    908		u8 CHIPGCR;
    909
    910		/*
    911		 * 1. if it's 3119, disable frame bursting in halfduplex mode
    912		 *    and enable it in fullduplex mode
    913		 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
    914		 * 3. only enable CD heart beat counter in 10HD mode
    915		 */
    916
    917		/* set force MAC mode bit */
    918		BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
    919
    920		CHIPGCR = readb(&regs->CHIPGCR);
    921
    922		if (mii_status & VELOCITY_SPEED_1000)
    923			CHIPGCR |= CHIPGCR_FCGMII;
    924		else
    925			CHIPGCR &= ~CHIPGCR_FCGMII;
    926
    927		if (mii_status & VELOCITY_DUPLEX_FULL) {
    928			CHIPGCR |= CHIPGCR_FCFDX;
    929			writeb(CHIPGCR, &regs->CHIPGCR);
    930			netdev_info(vptr->netdev,
    931				    "set Velocity to forced full mode\n");
    932			if (vptr->rev_id < REV_ID_VT3216_A0)
    933				BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
    934		} else {
    935			CHIPGCR &= ~CHIPGCR_FCFDX;
    936			netdev_info(vptr->netdev,
    937				    "set Velocity to forced half mode\n");
    938			writeb(CHIPGCR, &regs->CHIPGCR);
    939			if (vptr->rev_id < REV_ID_VT3216_A0)
    940				BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
    941		}
    942
    943		velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
    944		CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
    945		if ((mii_status & VELOCITY_SPEED_1000) &&
    946		    (mii_status & VELOCITY_DUPLEX_FULL)) {
    947			CTRL1000 |= ADVERTISE_1000FULL;
    948		}
    949		velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
    950
    951		if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
    952			BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
    953		else
    954			BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
    955
    956		/* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
    957		velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
    958		ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
    959		if (mii_status & VELOCITY_SPEED_100) {
    960			if (mii_status & VELOCITY_DUPLEX_FULL)
    961				ANAR |= ADVERTISE_100FULL;
    962			else
    963				ANAR |= ADVERTISE_100HALF;
    964		} else if (mii_status & VELOCITY_SPEED_10) {
    965			if (mii_status & VELOCITY_DUPLEX_FULL)
    966				ANAR |= ADVERTISE_10FULL;
    967			else
    968				ANAR |= ADVERTISE_10HALF;
    969		}
    970		velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
    971		/* enable AUTO-NEGO mode */
    972		mii_set_auto_on(vptr);
    973		/* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
    974	}
    975	/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
    976	/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
    977	return VELOCITY_LINK_CHANGE;
    978}
    979
    980/**
    981 *	velocity_print_link_status	-	link status reporting
    982 *	@vptr: velocity to report on
    983 *
    984 *	Turn the link status of the velocity card into a kernel log
    985 *	description of the new link state, detailing speed and duplex
    986 *	status
    987 */
    988static void velocity_print_link_status(struct velocity_info *vptr)
    989{
    990	const char *link;
    991	const char *speed;
    992	const char *duplex;
    993
    994	if (vptr->mii_status & VELOCITY_LINK_FAIL) {
    995		netdev_notice(vptr->netdev, "failed to detect cable link\n");
    996		return;
    997	}
    998
    999	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
   1000		link = "auto-negotiation";
   1001
   1002		if (vptr->mii_status & VELOCITY_SPEED_1000)
   1003			speed = "1000";
   1004		else if (vptr->mii_status & VELOCITY_SPEED_100)
   1005			speed = "100";
   1006		else
   1007			speed = "10";
   1008
   1009		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
   1010			duplex = "full";
   1011		else
   1012			duplex = "half";
   1013	} else {
   1014		link = "forced";
   1015
   1016		switch (vptr->options.spd_dpx) {
   1017		case SPD_DPX_1000_FULL:
   1018			speed = "1000";
   1019			duplex = "full";
   1020			break;
   1021		case SPD_DPX_100_HALF:
   1022			speed = "100";
   1023			duplex = "half";
   1024			break;
   1025		case SPD_DPX_100_FULL:
   1026			speed = "100";
   1027			duplex = "full";
   1028			break;
   1029		case SPD_DPX_10_HALF:
   1030			speed = "10";
   1031			duplex = "half";
   1032			break;
   1033		case SPD_DPX_10_FULL:
   1034			speed = "10";
   1035			duplex = "full";
   1036			break;
   1037		default:
   1038			speed = "unknown";
   1039			duplex = "unknown";
   1040			break;
   1041		}
   1042	}
   1043	netdev_notice(vptr->netdev, "Link %s speed %sM bps %s duplex\n",
   1044		      link, speed, duplex);
   1045}
   1046
   1047/**
   1048 *	enable_flow_control_ability	-	flow control
   1049 *	@vptr: veloity to configure
   1050 *
   1051 *	Set up flow control according to the flow control options
   1052 *	determined by the eeprom/configuration.
   1053 */
   1054static void enable_flow_control_ability(struct velocity_info *vptr)
   1055{
   1056
   1057	struct mac_regs __iomem *regs = vptr->mac_regs;
   1058
   1059	switch (vptr->options.flow_cntl) {
   1060
   1061	case FLOW_CNTL_DEFAULT:
   1062		if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
   1063			writel(CR0_FDXRFCEN, &regs->CR0Set);
   1064		else
   1065			writel(CR0_FDXRFCEN, &regs->CR0Clr);
   1066
   1067		if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
   1068			writel(CR0_FDXTFCEN, &regs->CR0Set);
   1069		else
   1070			writel(CR0_FDXTFCEN, &regs->CR0Clr);
   1071		break;
   1072
   1073	case FLOW_CNTL_TX:
   1074		writel(CR0_FDXTFCEN, &regs->CR0Set);
   1075		writel(CR0_FDXRFCEN, &regs->CR0Clr);
   1076		break;
   1077
   1078	case FLOW_CNTL_RX:
   1079		writel(CR0_FDXRFCEN, &regs->CR0Set);
   1080		writel(CR0_FDXTFCEN, &regs->CR0Clr);
   1081		break;
   1082
   1083	case FLOW_CNTL_TX_RX:
   1084		writel(CR0_FDXTFCEN, &regs->CR0Set);
   1085		writel(CR0_FDXRFCEN, &regs->CR0Set);
   1086		break;
   1087
   1088	case FLOW_CNTL_DISABLE:
   1089		writel(CR0_FDXRFCEN, &regs->CR0Clr);
   1090		writel(CR0_FDXTFCEN, &regs->CR0Clr);
   1091		break;
   1092
   1093	default:
   1094		break;
   1095	}
   1096
   1097}
   1098
   1099/**
   1100 *	velocity_soft_reset	-	soft reset
   1101 *	@vptr: velocity to reset
   1102 *
   1103 *	Kick off a soft reset of the velocity adapter and then poll
   1104 *	until the reset sequence has completed before returning.
   1105 */
   1106static int velocity_soft_reset(struct velocity_info *vptr)
   1107{
   1108	struct mac_regs __iomem *regs = vptr->mac_regs;
   1109	int i = 0;
   1110
   1111	writel(CR0_SFRST, &regs->CR0Set);
   1112
   1113	for (i = 0; i < W_MAX_TIMEOUT; i++) {
   1114		udelay(5);
   1115		if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
   1116			break;
   1117	}
   1118
   1119	if (i == W_MAX_TIMEOUT) {
   1120		writel(CR0_FORSRST, &regs->CR0Set);
   1121		/* FIXME: PCI POSTING */
   1122		/* delay 2ms */
   1123		mdelay(2);
   1124	}
   1125	return 0;
   1126}
   1127
   1128/**
   1129 *	velocity_set_multi	-	filter list change callback
   1130 *	@dev: network device
   1131 *
   1132 *	Called by the network layer when the filter lists need to change
   1133 *	for a velocity adapter. Reload the CAMs with the new address
   1134 *	filter ruleset.
   1135 */
   1136static void velocity_set_multi(struct net_device *dev)
   1137{
   1138	struct velocity_info *vptr = netdev_priv(dev);
   1139	struct mac_regs __iomem *regs = vptr->mac_regs;
   1140	u8 rx_mode;
   1141	int i;
   1142	struct netdev_hw_addr *ha;
   1143
   1144	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
   1145		writel(0xffffffff, &regs->MARCAM[0]);
   1146		writel(0xffffffff, &regs->MARCAM[4]);
   1147		rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
   1148	} else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
   1149		   (dev->flags & IFF_ALLMULTI)) {
   1150		writel(0xffffffff, &regs->MARCAM[0]);
   1151		writel(0xffffffff, &regs->MARCAM[4]);
   1152		rx_mode = (RCR_AM | RCR_AB);
   1153	} else {
   1154		int offset = MCAM_SIZE - vptr->multicast_limit;
   1155		mac_get_cam_mask(regs, vptr->mCAMmask);
   1156
   1157		i = 0;
   1158		netdev_for_each_mc_addr(ha, dev) {
   1159			mac_set_cam(regs, i + offset, ha->addr);
   1160			vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
   1161			i++;
   1162		}
   1163
   1164		mac_set_cam_mask(regs, vptr->mCAMmask);
   1165		rx_mode = RCR_AM | RCR_AB | RCR_AP;
   1166	}
   1167	if (dev->mtu > 1500)
   1168		rx_mode |= RCR_AL;
   1169
   1170	BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
   1171
   1172}
   1173
   1174/*
   1175 * MII access , media link mode setting functions
   1176 */
   1177
   1178/**
   1179 *	mii_init	-	set up MII
   1180 *	@vptr: velocity adapter
   1181 *	@mii_status:  links tatus
   1182 *
   1183 *	Set up the PHY for the current link state.
   1184 */
   1185static void mii_init(struct velocity_info *vptr, u32 mii_status)
   1186{
   1187	u16 BMCR;
   1188
   1189	switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
   1190	case PHYID_ICPLUS_IP101A:
   1191		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
   1192						MII_ADVERTISE, vptr->mac_regs);
   1193		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
   1194			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
   1195								vptr->mac_regs);
   1196		else
   1197			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
   1198								vptr->mac_regs);
   1199		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
   1200		break;
   1201	case PHYID_CICADA_CS8201:
   1202		/*
   1203		 *	Reset to hardware default
   1204		 */
   1205		MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
   1206		/*
   1207		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
   1208		 *	off it in NWay-forced half mode for NWay-forced v.s.
   1209		 *	legacy-forced issue.
   1210		 */
   1211		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
   1212			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
   1213		else
   1214			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
   1215		/*
   1216		 *	Turn on Link/Activity LED enable bit for CIS8201
   1217		 */
   1218		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
   1219		break;
   1220	case PHYID_VT3216_32BIT:
   1221	case PHYID_VT3216_64BIT:
   1222		/*
   1223		 *	Reset to hardware default
   1224		 */
   1225		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
   1226		/*
   1227		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
   1228		 *	off it in NWay-forced half mode for NWay-forced v.s.
   1229		 *	legacy-forced issue
   1230		 */
   1231		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
   1232			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
   1233		else
   1234			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
   1235		break;
   1236
   1237	case PHYID_MARVELL_1000:
   1238	case PHYID_MARVELL_1000S:
   1239		/*
   1240		 *	Assert CRS on Transmit
   1241		 */
   1242		MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
   1243		/*
   1244		 *	Reset to hardware default
   1245		 */
   1246		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
   1247		break;
   1248	default:
   1249		;
   1250	}
   1251	velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
   1252	if (BMCR & BMCR_ISOLATE) {
   1253		BMCR &= ~BMCR_ISOLATE;
   1254		velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
   1255	}
   1256}
   1257
   1258/**
   1259 * setup_queue_timers	-	Setup interrupt timers
   1260 * @vptr: velocity adapter
   1261 *
   1262 * Setup interrupt frequency during suppression (timeout if the frame
   1263 * count isn't filled).
   1264 */
   1265static void setup_queue_timers(struct velocity_info *vptr)
   1266{
   1267	/* Only for newer revisions */
   1268	if (vptr->rev_id >= REV_ID_VT3216_A0) {
   1269		u8 txqueue_timer = 0;
   1270		u8 rxqueue_timer = 0;
   1271
   1272		if (vptr->mii_status & (VELOCITY_SPEED_1000 |
   1273				VELOCITY_SPEED_100)) {
   1274			txqueue_timer = vptr->options.txqueue_timer;
   1275			rxqueue_timer = vptr->options.rxqueue_timer;
   1276		}
   1277
   1278		writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
   1279		writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
   1280	}
   1281}
   1282
   1283/**
   1284 * setup_adaptive_interrupts  -  Setup interrupt suppression
   1285 * @vptr: velocity adapter
   1286 *
   1287 * The velocity is able to suppress interrupt during high interrupt load.
   1288 * This function turns on that feature.
   1289 */
   1290static void setup_adaptive_interrupts(struct velocity_info *vptr)
   1291{
   1292	struct mac_regs __iomem *regs = vptr->mac_regs;
   1293	u16 tx_intsup = vptr->options.tx_intsup;
   1294	u16 rx_intsup = vptr->options.rx_intsup;
   1295
   1296	/* Setup default interrupt mask (will be changed below) */
   1297	vptr->int_mask = INT_MASK_DEF;
   1298
   1299	/* Set Tx Interrupt Suppression Threshold */
   1300	writeb(CAMCR_PS0, &regs->CAMCR);
   1301	if (tx_intsup != 0) {
   1302		vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
   1303				ISR_PTX2I | ISR_PTX3I);
   1304		writew(tx_intsup, &regs->ISRCTL);
   1305	} else
   1306		writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
   1307
   1308	/* Set Rx Interrupt Suppression Threshold */
   1309	writeb(CAMCR_PS1, &regs->CAMCR);
   1310	if (rx_intsup != 0) {
   1311		vptr->int_mask &= ~ISR_PRXI;
   1312		writew(rx_intsup, &regs->ISRCTL);
   1313	} else
   1314		writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
   1315
   1316	/* Select page to interrupt hold timer */
   1317	writeb(0, &regs->CAMCR);
   1318}
   1319
   1320/**
   1321 *	velocity_init_registers	-	initialise MAC registers
   1322 *	@vptr: velocity to init
   1323 *	@type: type of initialisation (hot or cold)
   1324 *
   1325 *	Initialise the MAC on a reset or on first set up on the
   1326 *	hardware.
   1327 */
   1328static void velocity_init_registers(struct velocity_info *vptr,
   1329				    enum velocity_init_type type)
   1330{
   1331	struct mac_regs __iomem *regs = vptr->mac_regs;
   1332	struct net_device *netdev = vptr->netdev;
   1333	int i, mii_status;
   1334
   1335	mac_wol_reset(regs);
   1336
   1337	switch (type) {
   1338	case VELOCITY_INIT_RESET:
   1339	case VELOCITY_INIT_WOL:
   1340
   1341		netif_stop_queue(netdev);
   1342
   1343		/*
   1344		 *	Reset RX to prevent RX pointer not on the 4X location
   1345		 */
   1346		velocity_rx_reset(vptr);
   1347		mac_rx_queue_run(regs);
   1348		mac_rx_queue_wake(regs);
   1349
   1350		mii_status = velocity_get_opt_media_mode(vptr);
   1351		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
   1352			velocity_print_link_status(vptr);
   1353			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
   1354				netif_wake_queue(netdev);
   1355		}
   1356
   1357		enable_flow_control_ability(vptr);
   1358
   1359		mac_clear_isr(regs);
   1360		writel(CR0_STOP, &regs->CR0Clr);
   1361		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
   1362							&regs->CR0Set);
   1363
   1364		break;
   1365
   1366	case VELOCITY_INIT_COLD:
   1367	default:
   1368		/*
   1369		 *	Do reset
   1370		 */
   1371		velocity_soft_reset(vptr);
   1372		mdelay(5);
   1373
   1374		if (!vptr->no_eeprom) {
   1375			mac_eeprom_reload(regs);
   1376			for (i = 0; i < 6; i++)
   1377				writeb(netdev->dev_addr[i], regs->PAR + i);
   1378		}
   1379
   1380		/*
   1381		 *	clear Pre_ACPI bit.
   1382		 */
   1383		BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
   1384		mac_set_rx_thresh(regs, vptr->options.rx_thresh);
   1385		mac_set_dma_length(regs, vptr->options.DMA_length);
   1386
   1387		writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
   1388		/*
   1389		 *	Back off algorithm use original IEEE standard
   1390		 */
   1391		BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
   1392
   1393		/*
   1394		 *	Init CAM filter
   1395		 */
   1396		velocity_init_cam_filter(vptr);
   1397
   1398		/*
   1399		 *	Set packet filter: Receive directed and broadcast address
   1400		 */
   1401		velocity_set_multi(netdev);
   1402
   1403		/*
   1404		 *	Enable MII auto-polling
   1405		 */
   1406		enable_mii_autopoll(regs);
   1407
   1408		setup_adaptive_interrupts(vptr);
   1409
   1410		writel(vptr->rx.pool_dma, &regs->RDBaseLo);
   1411		writew(vptr->options.numrx - 1, &regs->RDCSize);
   1412		mac_rx_queue_run(regs);
   1413		mac_rx_queue_wake(regs);
   1414
   1415		writew(vptr->options.numtx - 1, &regs->TDCSize);
   1416
   1417		for (i = 0; i < vptr->tx.numq; i++) {
   1418			writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
   1419			mac_tx_queue_run(regs, i);
   1420		}
   1421
   1422		init_flow_control_register(vptr);
   1423
   1424		writel(CR0_STOP, &regs->CR0Clr);
   1425		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
   1426
   1427		mii_status = velocity_get_opt_media_mode(vptr);
   1428		netif_stop_queue(netdev);
   1429
   1430		mii_init(vptr, mii_status);
   1431
   1432		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
   1433			velocity_print_link_status(vptr);
   1434			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
   1435				netif_wake_queue(netdev);
   1436		}
   1437
   1438		enable_flow_control_ability(vptr);
   1439		mac_hw_mibs_init(regs);
   1440		mac_write_int_mask(vptr->int_mask, regs);
   1441		mac_clear_isr(regs);
   1442
   1443	}
   1444}
   1445
   1446static void velocity_give_many_rx_descs(struct velocity_info *vptr)
   1447{
   1448	struct mac_regs __iomem *regs = vptr->mac_regs;
   1449	int avail, dirty, unusable;
   1450
   1451	/*
   1452	 * RD number must be equal to 4X per hardware spec
   1453	 * (programming guide rev 1.20, p.13)
   1454	 */
   1455	if (vptr->rx.filled < 4)
   1456		return;
   1457
   1458	wmb();
   1459
   1460	unusable = vptr->rx.filled & 0x0003;
   1461	dirty = vptr->rx.dirty - unusable;
   1462	for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
   1463		dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
   1464		vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
   1465	}
   1466
   1467	writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
   1468	vptr->rx.filled = unusable;
   1469}
   1470
   1471/**
   1472 *	velocity_init_dma_rings	-	set up DMA rings
   1473 *	@vptr: Velocity to set up
   1474 *
   1475 *	Allocate PCI mapped DMA rings for the receive and transmit layer
   1476 *	to use.
   1477 */
   1478static int velocity_init_dma_rings(struct velocity_info *vptr)
   1479{
   1480	struct velocity_opt *opt = &vptr->options;
   1481	const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
   1482	const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
   1483	dma_addr_t pool_dma;
   1484	void *pool;
   1485	unsigned int i;
   1486
   1487	/*
   1488	 * Allocate all RD/TD rings a single pool.
   1489	 *
   1490	 * dma_alloc_coherent() fulfills the requirement for 64 bytes
   1491	 * alignment
   1492	 */
   1493	pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
   1494				    rx_ring_size, &pool_dma, GFP_ATOMIC);
   1495	if (!pool) {
   1496		dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
   1497			vptr->netdev->name);
   1498		return -ENOMEM;
   1499	}
   1500
   1501	vptr->rx.ring = pool;
   1502	vptr->rx.pool_dma = pool_dma;
   1503
   1504	pool += rx_ring_size;
   1505	pool_dma += rx_ring_size;
   1506
   1507	for (i = 0; i < vptr->tx.numq; i++) {
   1508		vptr->tx.rings[i] = pool;
   1509		vptr->tx.pool_dma[i] = pool_dma;
   1510		pool += tx_ring_size;
   1511		pool_dma += tx_ring_size;
   1512	}
   1513
   1514	return 0;
   1515}
   1516
   1517static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
   1518{
   1519	vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
   1520}
   1521
   1522/**
   1523 *	velocity_alloc_rx_buf	-	allocate aligned receive buffer
   1524 *	@vptr: velocity
   1525 *	@idx: ring index
   1526 *
   1527 *	Allocate a new full sized buffer for the reception of a frame and
   1528 *	map it into PCI space for the hardware to use. The hardware
   1529 *	requires *64* byte alignment of the buffer which makes life
   1530 *	less fun than would be ideal.
   1531 */
   1532static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
   1533{
   1534	struct rx_desc *rd = &(vptr->rx.ring[idx]);
   1535	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
   1536
   1537	rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
   1538	if (rd_info->skb == NULL)
   1539		return -ENOMEM;
   1540
   1541	/*
   1542	 *	Do the gymnastics to get the buffer head for data at
   1543	 *	64byte alignment.
   1544	 */
   1545	skb_reserve(rd_info->skb,
   1546			64 - ((unsigned long) rd_info->skb->data & 63));
   1547	rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
   1548					vptr->rx.buf_sz, DMA_FROM_DEVICE);
   1549
   1550	/*
   1551	 *	Fill in the descriptor to match
   1552	 */
   1553
   1554	*((u32 *) & (rd->rdesc0)) = 0;
   1555	rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
   1556	rd->pa_low = cpu_to_le32(rd_info->skb_dma);
   1557	rd->pa_high = 0;
   1558	return 0;
   1559}
   1560
   1561
   1562static int velocity_rx_refill(struct velocity_info *vptr)
   1563{
   1564	int dirty = vptr->rx.dirty, done = 0;
   1565
   1566	do {
   1567		struct rx_desc *rd = vptr->rx.ring + dirty;
   1568
   1569		/* Fine for an all zero Rx desc at init time as well */
   1570		if (rd->rdesc0.len & OWNED_BY_NIC)
   1571			break;
   1572
   1573		if (!vptr->rx.info[dirty].skb) {
   1574			if (velocity_alloc_rx_buf(vptr, dirty) < 0)
   1575				break;
   1576		}
   1577		done++;
   1578		dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
   1579	} while (dirty != vptr->rx.curr);
   1580
   1581	if (done) {
   1582		vptr->rx.dirty = dirty;
   1583		vptr->rx.filled += done;
   1584	}
   1585
   1586	return done;
   1587}
   1588
   1589/**
   1590 *	velocity_free_rd_ring	-	free receive ring
   1591 *	@vptr: velocity to clean up
   1592 *
   1593 *	Free the receive buffers for each ring slot and any
   1594 *	attached socket buffers that need to go away.
   1595 */
   1596static void velocity_free_rd_ring(struct velocity_info *vptr)
   1597{
   1598	int i;
   1599
   1600	if (vptr->rx.info == NULL)
   1601		return;
   1602
   1603	for (i = 0; i < vptr->options.numrx; i++) {
   1604		struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
   1605		struct rx_desc *rd = vptr->rx.ring + i;
   1606
   1607		memset(rd, 0, sizeof(*rd));
   1608
   1609		if (!rd_info->skb)
   1610			continue;
   1611		dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
   1612				 DMA_FROM_DEVICE);
   1613		rd_info->skb_dma = 0;
   1614
   1615		dev_kfree_skb(rd_info->skb);
   1616		rd_info->skb = NULL;
   1617	}
   1618
   1619	kfree(vptr->rx.info);
   1620	vptr->rx.info = NULL;
   1621}
   1622
   1623/**
   1624 *	velocity_init_rd_ring	-	set up receive ring
   1625 *	@vptr: velocity to configure
   1626 *
   1627 *	Allocate and set up the receive buffers for each ring slot and
   1628 *	assign them to the network adapter.
   1629 */
   1630static int velocity_init_rd_ring(struct velocity_info *vptr)
   1631{
   1632	int ret = -ENOMEM;
   1633
   1634	vptr->rx.info = kcalloc(vptr->options.numrx,
   1635				sizeof(struct velocity_rd_info), GFP_KERNEL);
   1636	if (!vptr->rx.info)
   1637		goto out;
   1638
   1639	velocity_init_rx_ring_indexes(vptr);
   1640
   1641	if (velocity_rx_refill(vptr) != vptr->options.numrx) {
   1642		netdev_err(vptr->netdev, "failed to allocate RX buffer\n");
   1643		velocity_free_rd_ring(vptr);
   1644		goto out;
   1645	}
   1646
   1647	ret = 0;
   1648out:
   1649	return ret;
   1650}
   1651
   1652/**
   1653 *	velocity_init_td_ring	-	set up transmit ring
   1654 *	@vptr:	velocity
   1655 *
   1656 *	Set up the transmit ring and chain the ring pointers together.
   1657 *	Returns zero on success or a negative posix errno code for
   1658 *	failure.
   1659 */
   1660static int velocity_init_td_ring(struct velocity_info *vptr)
   1661{
   1662	int j;
   1663
   1664	/* Init the TD ring entries */
   1665	for (j = 0; j < vptr->tx.numq; j++) {
   1666
   1667		vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
   1668					    sizeof(struct velocity_td_info),
   1669					    GFP_KERNEL);
   1670		if (!vptr->tx.infos[j])	{
   1671			while (--j >= 0)
   1672				kfree(vptr->tx.infos[j]);
   1673			return -ENOMEM;
   1674		}
   1675
   1676		vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
   1677	}
   1678	return 0;
   1679}
   1680
   1681/**
   1682 *	velocity_free_dma_rings	-	free PCI ring pointers
   1683 *	@vptr: Velocity to free from
   1684 *
   1685 *	Clean up the PCI ring buffers allocated to this velocity.
   1686 */
   1687static void velocity_free_dma_rings(struct velocity_info *vptr)
   1688{
   1689	const int size = vptr->options.numrx * sizeof(struct rx_desc) +
   1690		vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
   1691
   1692	dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
   1693}
   1694
   1695static int velocity_init_rings(struct velocity_info *vptr, int mtu)
   1696{
   1697	int ret;
   1698
   1699	velocity_set_rxbufsize(vptr, mtu);
   1700
   1701	ret = velocity_init_dma_rings(vptr);
   1702	if (ret < 0)
   1703		goto out;
   1704
   1705	ret = velocity_init_rd_ring(vptr);
   1706	if (ret < 0)
   1707		goto err_free_dma_rings_0;
   1708
   1709	ret = velocity_init_td_ring(vptr);
   1710	if (ret < 0)
   1711		goto err_free_rd_ring_1;
   1712out:
   1713	return ret;
   1714
   1715err_free_rd_ring_1:
   1716	velocity_free_rd_ring(vptr);
   1717err_free_dma_rings_0:
   1718	velocity_free_dma_rings(vptr);
   1719	goto out;
   1720}
   1721
   1722/**
   1723 *	velocity_free_tx_buf	-	free transmit buffer
   1724 *	@vptr: velocity
   1725 *	@tdinfo: buffer
   1726 *	@td: transmit descriptor to free
   1727 *
   1728 *	Release an transmit buffer. If the buffer was preallocated then
   1729 *	recycle it, if not then unmap the buffer.
   1730 */
   1731static void velocity_free_tx_buf(struct velocity_info *vptr,
   1732		struct velocity_td_info *tdinfo, struct tx_desc *td)
   1733{
   1734	struct sk_buff *skb = tdinfo->skb;
   1735	int i;
   1736
   1737	/*
   1738	 *	Don't unmap the pre-allocated tx_bufs
   1739	 */
   1740	for (i = 0; i < tdinfo->nskb_dma; i++) {
   1741		size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
   1742
   1743		/* For scatter-gather */
   1744		if (skb_shinfo(skb)->nr_frags > 0)
   1745			pktlen = max_t(size_t, pktlen,
   1746				       td->td_buf[i].size & ~TD_QUEUE);
   1747
   1748		dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
   1749				 le16_to_cpu(pktlen), DMA_TO_DEVICE);
   1750	}
   1751	dev_consume_skb_irq(skb);
   1752	tdinfo->skb = NULL;
   1753}
   1754
   1755/*
   1756 *	FIXME: could we merge this with velocity_free_tx_buf ?
   1757 */
   1758static void velocity_free_td_ring_entry(struct velocity_info *vptr,
   1759							 int q, int n)
   1760{
   1761	struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
   1762	int i;
   1763
   1764	if (td_info == NULL)
   1765		return;
   1766
   1767	if (td_info->skb) {
   1768		for (i = 0; i < td_info->nskb_dma; i++) {
   1769			if (td_info->skb_dma[i]) {
   1770				dma_unmap_single(vptr->dev, td_info->skb_dma[i],
   1771					td_info->skb->len, DMA_TO_DEVICE);
   1772				td_info->skb_dma[i] = 0;
   1773			}
   1774		}
   1775		dev_kfree_skb(td_info->skb);
   1776		td_info->skb = NULL;
   1777	}
   1778}
   1779
   1780/**
   1781 *	velocity_free_td_ring	-	free td ring
   1782 *	@vptr: velocity
   1783 *
   1784 *	Free up the transmit ring for this particular velocity adapter.
   1785 *	We free the ring contents but not the ring itself.
   1786 */
   1787static void velocity_free_td_ring(struct velocity_info *vptr)
   1788{
   1789	int i, j;
   1790
   1791	for (j = 0; j < vptr->tx.numq; j++) {
   1792		if (vptr->tx.infos[j] == NULL)
   1793			continue;
   1794		for (i = 0; i < vptr->options.numtx; i++)
   1795			velocity_free_td_ring_entry(vptr, j, i);
   1796
   1797		kfree(vptr->tx.infos[j]);
   1798		vptr->tx.infos[j] = NULL;
   1799	}
   1800}
   1801
   1802static void velocity_free_rings(struct velocity_info *vptr)
   1803{
   1804	velocity_free_td_ring(vptr);
   1805	velocity_free_rd_ring(vptr);
   1806	velocity_free_dma_rings(vptr);
   1807}
   1808
   1809/**
   1810 *	velocity_error	-	handle error from controller
   1811 *	@vptr: velocity
   1812 *	@status: card status
   1813 *
   1814 *	Process an error report from the hardware and attempt to recover
   1815 *	the card itself. At the moment we cannot recover from some
   1816 *	theoretically impossible errors but this could be fixed using
   1817 *	the pci_device_failed logic to bounce the hardware
   1818 *
   1819 */
   1820static void velocity_error(struct velocity_info *vptr, int status)
   1821{
   1822
   1823	if (status & ISR_TXSTLI) {
   1824		struct mac_regs __iomem *regs = vptr->mac_regs;
   1825
   1826		netdev_err(vptr->netdev, "TD structure error TDindex=%hx\n",
   1827			   readw(&regs->TDIdx[0]));
   1828		BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
   1829		writew(TRDCSR_RUN, &regs->TDCSRClr);
   1830		netif_stop_queue(vptr->netdev);
   1831
   1832		/* FIXME: port over the pci_device_failed code and use it
   1833		   here */
   1834	}
   1835
   1836	if (status & ISR_SRCI) {
   1837		struct mac_regs __iomem *regs = vptr->mac_regs;
   1838		int linked;
   1839
   1840		if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
   1841			vptr->mii_status = check_connection_type(regs);
   1842
   1843			/*
   1844			 *	If it is a 3119, disable frame bursting in
   1845			 *	halfduplex mode and enable it in fullduplex
   1846			 *	 mode
   1847			 */
   1848			if (vptr->rev_id < REV_ID_VT3216_A0) {
   1849				if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
   1850					BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
   1851				else
   1852					BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
   1853			}
   1854			/*
   1855			 *	Only enable CD heart beat counter in 10HD mode
   1856			 */
   1857			if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
   1858				BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
   1859			else
   1860				BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
   1861
   1862			setup_queue_timers(vptr);
   1863		}
   1864		/*
   1865		 *	Get link status from PHYSR0
   1866		 */
   1867		linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
   1868
   1869		if (linked) {
   1870			vptr->mii_status &= ~VELOCITY_LINK_FAIL;
   1871			netif_carrier_on(vptr->netdev);
   1872		} else {
   1873			vptr->mii_status |= VELOCITY_LINK_FAIL;
   1874			netif_carrier_off(vptr->netdev);
   1875		}
   1876
   1877		velocity_print_link_status(vptr);
   1878		enable_flow_control_ability(vptr);
   1879
   1880		/*
   1881		 *	Re-enable auto-polling because SRCI will disable
   1882		 *	auto-polling
   1883		 */
   1884
   1885		enable_mii_autopoll(regs);
   1886
   1887		if (vptr->mii_status & VELOCITY_LINK_FAIL)
   1888			netif_stop_queue(vptr->netdev);
   1889		else
   1890			netif_wake_queue(vptr->netdev);
   1891
   1892	}
   1893	if (status & ISR_MIBFI)
   1894		velocity_update_hw_mibs(vptr);
   1895	if (status & ISR_LSTEI)
   1896		mac_rx_queue_wake(vptr->mac_regs);
   1897}
   1898
   1899/**
   1900 *	velocity_tx_srv		-	transmit interrupt service
   1901 *	@vptr: Velocity
   1902 *
   1903 *	Scan the queues looking for transmitted packets that
   1904 *	we can complete and clean up. Update any statistics as
   1905 *	necessary/
   1906 */
   1907static int velocity_tx_srv(struct velocity_info *vptr)
   1908{
   1909	struct tx_desc *td;
   1910	int qnum;
   1911	int full = 0;
   1912	int idx;
   1913	int works = 0;
   1914	struct velocity_td_info *tdinfo;
   1915	struct net_device_stats *stats = &vptr->netdev->stats;
   1916
   1917	for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
   1918		for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
   1919			idx = (idx + 1) % vptr->options.numtx) {
   1920
   1921			/*
   1922			 *	Get Tx Descriptor
   1923			 */
   1924			td = &(vptr->tx.rings[qnum][idx]);
   1925			tdinfo = &(vptr->tx.infos[qnum][idx]);
   1926
   1927			if (td->tdesc0.len & OWNED_BY_NIC)
   1928				break;
   1929
   1930			if ((works++ > 15))
   1931				break;
   1932
   1933			if (td->tdesc0.TSR & TSR0_TERR) {
   1934				stats->tx_errors++;
   1935				stats->tx_dropped++;
   1936				if (td->tdesc0.TSR & TSR0_CDH)
   1937					stats->tx_heartbeat_errors++;
   1938				if (td->tdesc0.TSR & TSR0_CRS)
   1939					stats->tx_carrier_errors++;
   1940				if (td->tdesc0.TSR & TSR0_ABT)
   1941					stats->tx_aborted_errors++;
   1942				if (td->tdesc0.TSR & TSR0_OWC)
   1943					stats->tx_window_errors++;
   1944			} else {
   1945				stats->tx_packets++;
   1946				stats->tx_bytes += tdinfo->skb->len;
   1947			}
   1948			velocity_free_tx_buf(vptr, tdinfo, td);
   1949			vptr->tx.used[qnum]--;
   1950		}
   1951		vptr->tx.tail[qnum] = idx;
   1952
   1953		if (AVAIL_TD(vptr, qnum) < 1)
   1954			full = 1;
   1955	}
   1956	/*
   1957	 *	Look to see if we should kick the transmit network
   1958	 *	layer for more work.
   1959	 */
   1960	if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
   1961	    (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
   1962		netif_wake_queue(vptr->netdev);
   1963	}
   1964	return works;
   1965}
   1966
   1967/**
   1968 *	velocity_rx_csum	-	checksum process
   1969 *	@rd: receive packet descriptor
   1970 *	@skb: network layer packet buffer
   1971 *
   1972 *	Process the status bits for the received packet and determine
   1973 *	if the checksum was computed and verified by the hardware
   1974 */
   1975static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
   1976{
   1977	skb_checksum_none_assert(skb);
   1978
   1979	if (rd->rdesc1.CSM & CSM_IPKT) {
   1980		if (rd->rdesc1.CSM & CSM_IPOK) {
   1981			if ((rd->rdesc1.CSM & CSM_TCPKT) ||
   1982					(rd->rdesc1.CSM & CSM_UDPKT)) {
   1983				if (!(rd->rdesc1.CSM & CSM_TUPOK))
   1984					return;
   1985			}
   1986			skb->ip_summed = CHECKSUM_UNNECESSARY;
   1987		}
   1988	}
   1989}
   1990
   1991/**
   1992 *	velocity_rx_copy	-	in place Rx copy for small packets
   1993 *	@rx_skb: network layer packet buffer candidate
   1994 *	@pkt_size: received data size
   1995 *	@vptr: velocity adapter
   1996 *
   1997 *	Replace the current skb that is scheduled for Rx processing by a
   1998 *	shorter, immediately allocated skb, if the received packet is small
   1999 *	enough. This function returns a negative value if the received
   2000 *	packet is too big or if memory is exhausted.
   2001 */
   2002static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
   2003			    struct velocity_info *vptr)
   2004{
   2005	int ret = -1;
   2006	if (pkt_size < rx_copybreak) {
   2007		struct sk_buff *new_skb;
   2008
   2009		new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
   2010		if (new_skb) {
   2011			new_skb->ip_summed = rx_skb[0]->ip_summed;
   2012			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
   2013			*rx_skb = new_skb;
   2014			ret = 0;
   2015		}
   2016
   2017	}
   2018	return ret;
   2019}
   2020
   2021/**
   2022 *	velocity_iph_realign	-	IP header alignment
   2023 *	@vptr: velocity we are handling
   2024 *	@skb: network layer packet buffer
   2025 *	@pkt_size: received data size
   2026 *
   2027 *	Align IP header on a 2 bytes boundary. This behavior can be
   2028 *	configured by the user.
   2029 */
   2030static inline void velocity_iph_realign(struct velocity_info *vptr,
   2031					struct sk_buff *skb, int pkt_size)
   2032{
   2033	if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
   2034		memmove(skb->data + 2, skb->data, pkt_size);
   2035		skb_reserve(skb, 2);
   2036	}
   2037}
   2038
   2039/**
   2040 *	velocity_receive_frame	-	received packet processor
   2041 *	@vptr: velocity we are handling
   2042 *	@idx: ring index
   2043 *
   2044 *	A packet has arrived. We process the packet and if appropriate
   2045 *	pass the frame up the network stack
   2046 */
   2047static int velocity_receive_frame(struct velocity_info *vptr, int idx)
   2048{
   2049	struct net_device_stats *stats = &vptr->netdev->stats;
   2050	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
   2051	struct rx_desc *rd = &(vptr->rx.ring[idx]);
   2052	int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
   2053	struct sk_buff *skb;
   2054
   2055	if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
   2056		if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
   2057			netdev_err(vptr->netdev, "received frame spans multiple RDs\n");
   2058		stats->rx_length_errors++;
   2059		return -EINVAL;
   2060	}
   2061
   2062	if (rd->rdesc0.RSR & RSR_MAR)
   2063		stats->multicast++;
   2064
   2065	skb = rd_info->skb;
   2066
   2067	dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
   2068				    vptr->rx.buf_sz, DMA_FROM_DEVICE);
   2069
   2070	velocity_rx_csum(rd, skb);
   2071
   2072	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
   2073		velocity_iph_realign(vptr, skb, pkt_len);
   2074		rd_info->skb = NULL;
   2075		dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
   2076				 DMA_FROM_DEVICE);
   2077	} else {
   2078		dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
   2079					   vptr->rx.buf_sz, DMA_FROM_DEVICE);
   2080	}
   2081
   2082	skb_put(skb, pkt_len - 4);
   2083	skb->protocol = eth_type_trans(skb, vptr->netdev);
   2084
   2085	if (rd->rdesc0.RSR & RSR_DETAG) {
   2086		u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
   2087
   2088		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
   2089	}
   2090	netif_receive_skb(skb);
   2091
   2092	stats->rx_bytes += pkt_len;
   2093	stats->rx_packets++;
   2094
   2095	return 0;
   2096}
   2097
   2098/**
   2099 *	velocity_rx_srv		-	service RX interrupt
   2100 *	@vptr: velocity
   2101 *	@budget_left: remaining budget
   2102 *
   2103 *	Walk the receive ring of the velocity adapter and remove
   2104 *	any received packets from the receive queue. Hand the ring
   2105 *	slots back to the adapter for reuse.
   2106 */
   2107static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
   2108{
   2109	struct net_device_stats *stats = &vptr->netdev->stats;
   2110	int rd_curr = vptr->rx.curr;
   2111	int works = 0;
   2112
   2113	while (works < budget_left) {
   2114		struct rx_desc *rd = vptr->rx.ring + rd_curr;
   2115
   2116		if (!vptr->rx.info[rd_curr].skb)
   2117			break;
   2118
   2119		if (rd->rdesc0.len & OWNED_BY_NIC)
   2120			break;
   2121
   2122		rmb();
   2123
   2124		/*
   2125		 *	Don't drop CE or RL error frame although RXOK is off
   2126		 */
   2127		if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
   2128			if (velocity_receive_frame(vptr, rd_curr) < 0)
   2129				stats->rx_dropped++;
   2130		} else {
   2131			if (rd->rdesc0.RSR & RSR_CRC)
   2132				stats->rx_crc_errors++;
   2133			if (rd->rdesc0.RSR & RSR_FAE)
   2134				stats->rx_frame_errors++;
   2135
   2136			stats->rx_dropped++;
   2137		}
   2138
   2139		rd->size |= RX_INTEN;
   2140
   2141		rd_curr++;
   2142		if (rd_curr >= vptr->options.numrx)
   2143			rd_curr = 0;
   2144		works++;
   2145	}
   2146
   2147	vptr->rx.curr = rd_curr;
   2148
   2149	if ((works > 0) && (velocity_rx_refill(vptr) > 0))
   2150		velocity_give_many_rx_descs(vptr);
   2151
   2152	VAR_USED(stats);
   2153	return works;
   2154}
   2155
   2156static int velocity_poll(struct napi_struct *napi, int budget)
   2157{
   2158	struct velocity_info *vptr = container_of(napi,
   2159			struct velocity_info, napi);
   2160	unsigned int rx_done;
   2161	unsigned long flags;
   2162
   2163	/*
   2164	 * Do rx and tx twice for performance (taken from the VIA
   2165	 * out-of-tree driver).
   2166	 */
   2167	rx_done = velocity_rx_srv(vptr, budget);
   2168	spin_lock_irqsave(&vptr->lock, flags);
   2169	velocity_tx_srv(vptr);
   2170	/* If budget not fully consumed, exit the polling mode */
   2171	if (rx_done < budget) {
   2172		napi_complete_done(napi, rx_done);
   2173		mac_enable_int(vptr->mac_regs);
   2174	}
   2175	spin_unlock_irqrestore(&vptr->lock, flags);
   2176
   2177	return rx_done;
   2178}
   2179
   2180/**
   2181 *	velocity_intr		-	interrupt callback
   2182 *	@irq: interrupt number
   2183 *	@dev_instance: interrupting device
   2184 *
   2185 *	Called whenever an interrupt is generated by the velocity
   2186 *	adapter IRQ line. We may not be the source of the interrupt
   2187 *	and need to identify initially if we are, and if not exit as
   2188 *	efficiently as possible.
   2189 */
   2190static irqreturn_t velocity_intr(int irq, void *dev_instance)
   2191{
   2192	struct net_device *dev = dev_instance;
   2193	struct velocity_info *vptr = netdev_priv(dev);
   2194	u32 isr_status;
   2195
   2196	spin_lock(&vptr->lock);
   2197	isr_status = mac_read_isr(vptr->mac_regs);
   2198
   2199	/* Not us ? */
   2200	if (isr_status == 0) {
   2201		spin_unlock(&vptr->lock);
   2202		return IRQ_NONE;
   2203	}
   2204
   2205	/* Ack the interrupt */
   2206	mac_write_isr(vptr->mac_regs, isr_status);
   2207
   2208	if (likely(napi_schedule_prep(&vptr->napi))) {
   2209		mac_disable_int(vptr->mac_regs);
   2210		__napi_schedule(&vptr->napi);
   2211	}
   2212
   2213	if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
   2214		velocity_error(vptr, isr_status);
   2215
   2216	spin_unlock(&vptr->lock);
   2217
   2218	return IRQ_HANDLED;
   2219}
   2220
   2221/**
   2222 *	velocity_open		-	interface activation callback
   2223 *	@dev: network layer device to open
   2224 *
   2225 *	Called when the network layer brings the interface up. Returns
   2226 *	a negative posix error code on failure, or zero on success.
   2227 *
   2228 *	All the ring allocation and set up is done on open for this
   2229 *	adapter to minimise memory usage when inactive
   2230 */
   2231static int velocity_open(struct net_device *dev)
   2232{
   2233	struct velocity_info *vptr = netdev_priv(dev);
   2234	int ret;
   2235
   2236	ret = velocity_init_rings(vptr, dev->mtu);
   2237	if (ret < 0)
   2238		goto out;
   2239
   2240	/* Ensure chip is running */
   2241	velocity_set_power_state(vptr, PCI_D0);
   2242
   2243	velocity_init_registers(vptr, VELOCITY_INIT_COLD);
   2244
   2245	ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
   2246			  dev->name, dev);
   2247	if (ret < 0) {
   2248		/* Power down the chip */
   2249		velocity_set_power_state(vptr, PCI_D3hot);
   2250		velocity_free_rings(vptr);
   2251		goto out;
   2252	}
   2253
   2254	velocity_give_many_rx_descs(vptr);
   2255
   2256	mac_enable_int(vptr->mac_regs);
   2257	netif_start_queue(dev);
   2258	napi_enable(&vptr->napi);
   2259	vptr->flags |= VELOCITY_FLAGS_OPENED;
   2260out:
   2261	return ret;
   2262}
   2263
   2264/**
   2265 *	velocity_shutdown	-	shut down the chip
   2266 *	@vptr: velocity to deactivate
   2267 *
   2268 *	Shuts down the internal operations of the velocity and
   2269 *	disables interrupts, autopolling, transmit and receive
   2270 */
   2271static void velocity_shutdown(struct velocity_info *vptr)
   2272{
   2273	struct mac_regs __iomem *regs = vptr->mac_regs;
   2274	mac_disable_int(regs);
   2275	writel(CR0_STOP, &regs->CR0Set);
   2276	writew(0xFFFF, &regs->TDCSRClr);
   2277	writeb(0xFF, &regs->RDCSRClr);
   2278	safe_disable_mii_autopoll(regs);
   2279	mac_clear_isr(regs);
   2280}
   2281
   2282/**
   2283 *	velocity_change_mtu	-	MTU change callback
   2284 *	@dev: network device
   2285 *	@new_mtu: desired MTU
   2286 *
   2287 *	Handle requests from the networking layer for MTU change on
   2288 *	this interface. It gets called on a change by the network layer.
   2289 *	Return zero for success or negative posix error code.
   2290 */
   2291static int velocity_change_mtu(struct net_device *dev, int new_mtu)
   2292{
   2293	struct velocity_info *vptr = netdev_priv(dev);
   2294	int ret = 0;
   2295
   2296	if (!netif_running(dev)) {
   2297		dev->mtu = new_mtu;
   2298		goto out_0;
   2299	}
   2300
   2301	if (dev->mtu != new_mtu) {
   2302		struct velocity_info *tmp_vptr;
   2303		unsigned long flags;
   2304		struct rx_info rx;
   2305		struct tx_info tx;
   2306
   2307		tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
   2308		if (!tmp_vptr) {
   2309			ret = -ENOMEM;
   2310			goto out_0;
   2311		}
   2312
   2313		tmp_vptr->netdev = dev;
   2314		tmp_vptr->pdev = vptr->pdev;
   2315		tmp_vptr->dev = vptr->dev;
   2316		tmp_vptr->options = vptr->options;
   2317		tmp_vptr->tx.numq = vptr->tx.numq;
   2318
   2319		ret = velocity_init_rings(tmp_vptr, new_mtu);
   2320		if (ret < 0)
   2321			goto out_free_tmp_vptr_1;
   2322
   2323		napi_disable(&vptr->napi);
   2324
   2325		spin_lock_irqsave(&vptr->lock, flags);
   2326
   2327		netif_stop_queue(dev);
   2328		velocity_shutdown(vptr);
   2329
   2330		rx = vptr->rx;
   2331		tx = vptr->tx;
   2332
   2333		vptr->rx = tmp_vptr->rx;
   2334		vptr->tx = tmp_vptr->tx;
   2335
   2336		tmp_vptr->rx = rx;
   2337		tmp_vptr->tx = tx;
   2338
   2339		dev->mtu = new_mtu;
   2340
   2341		velocity_init_registers(vptr, VELOCITY_INIT_COLD);
   2342
   2343		velocity_give_many_rx_descs(vptr);
   2344
   2345		napi_enable(&vptr->napi);
   2346
   2347		mac_enable_int(vptr->mac_regs);
   2348		netif_start_queue(dev);
   2349
   2350		spin_unlock_irqrestore(&vptr->lock, flags);
   2351
   2352		velocity_free_rings(tmp_vptr);
   2353
   2354out_free_tmp_vptr_1:
   2355		kfree(tmp_vptr);
   2356	}
   2357out_0:
   2358	return ret;
   2359}
   2360
   2361#ifdef CONFIG_NET_POLL_CONTROLLER
   2362/**
   2363 *  velocity_poll_controller		-	Velocity Poll controller function
   2364 *  @dev: network device
   2365 *
   2366 *
   2367 *  Used by NETCONSOLE and other diagnostic tools to allow network I/P
   2368 *  with interrupts disabled.
   2369 */
   2370static void velocity_poll_controller(struct net_device *dev)
   2371{
   2372	disable_irq(dev->irq);
   2373	velocity_intr(dev->irq, dev);
   2374	enable_irq(dev->irq);
   2375}
   2376#endif
   2377
   2378/**
   2379 *	velocity_mii_ioctl		-	MII ioctl handler
   2380 *	@dev: network device
   2381 *	@ifr: the ifreq block for the ioctl
   2382 *	@cmd: the command
   2383 *
   2384 *	Process MII requests made via ioctl from the network layer. These
   2385 *	are used by tools like kudzu to interrogate the link state of the
   2386 *	hardware
   2387 */
   2388static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
   2389{
   2390	struct velocity_info *vptr = netdev_priv(dev);
   2391	struct mac_regs __iomem *regs = vptr->mac_regs;
   2392	unsigned long flags;
   2393	struct mii_ioctl_data *miidata = if_mii(ifr);
   2394	int err;
   2395
   2396	switch (cmd) {
   2397	case SIOCGMIIPHY:
   2398		miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
   2399		break;
   2400	case SIOCGMIIREG:
   2401		if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
   2402			return -ETIMEDOUT;
   2403		break;
   2404	case SIOCSMIIREG:
   2405		spin_lock_irqsave(&vptr->lock, flags);
   2406		err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
   2407		spin_unlock_irqrestore(&vptr->lock, flags);
   2408		check_connection_type(vptr->mac_regs);
   2409		if (err)
   2410			return err;
   2411		break;
   2412	default:
   2413		return -EOPNOTSUPP;
   2414	}
   2415	return 0;
   2416}
   2417
   2418/**
   2419 *	velocity_ioctl		-	ioctl entry point
   2420 *	@dev: network device
   2421 *	@rq: interface request ioctl
   2422 *	@cmd: command code
   2423 *
   2424 *	Called when the user issues an ioctl request to the network
   2425 *	device in question. The velocity interface supports MII.
   2426 */
   2427static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
   2428{
   2429	struct velocity_info *vptr = netdev_priv(dev);
   2430	int ret;
   2431
   2432	/* If we are asked for information and the device is power
   2433	   saving then we need to bring the device back up to talk to it */
   2434
   2435	if (!netif_running(dev))
   2436		velocity_set_power_state(vptr, PCI_D0);
   2437
   2438	switch (cmd) {
   2439	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
   2440	case SIOCGMIIREG:	/* Read MII PHY register. */
   2441	case SIOCSMIIREG:	/* Write to MII PHY register. */
   2442		ret = velocity_mii_ioctl(dev, rq, cmd);
   2443		break;
   2444
   2445	default:
   2446		ret = -EOPNOTSUPP;
   2447	}
   2448	if (!netif_running(dev))
   2449		velocity_set_power_state(vptr, PCI_D3hot);
   2450
   2451
   2452	return ret;
   2453}
   2454
   2455/**
   2456 *	velocity_get_stats	-	statistics callback
   2457 *	@dev: network device
   2458 *
   2459 *	Callback from the network layer to allow driver statistics
   2460 *	to be resynchronized with hardware collected state. In the
   2461 *	case of the velocity we need to pull the MIB counters from
   2462 *	the hardware into the counters before letting the network
   2463 *	layer display them.
   2464 */
   2465static struct net_device_stats *velocity_get_stats(struct net_device *dev)
   2466{
   2467	struct velocity_info *vptr = netdev_priv(dev);
   2468
   2469	/* If the hardware is down, don't touch MII */
   2470	if (!netif_running(dev))
   2471		return &dev->stats;
   2472
   2473	spin_lock_irq(&vptr->lock);
   2474	velocity_update_hw_mibs(vptr);
   2475	spin_unlock_irq(&vptr->lock);
   2476
   2477	dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
   2478	dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
   2479	dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
   2480
   2481//  unsigned long   rx_dropped;     /* no space in linux buffers    */
   2482	dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
   2483	/* detailed rx_errors: */
   2484//  unsigned long   rx_length_errors;
   2485//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
   2486	dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
   2487//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
   2488//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
   2489//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
   2490
   2491	/* detailed tx_errors */
   2492//  unsigned long   tx_fifo_errors;
   2493
   2494	return &dev->stats;
   2495}
   2496
   2497/**
   2498 *	velocity_close		-	close adapter callback
   2499 *	@dev: network device
   2500 *
   2501 *	Callback from the network layer when the velocity is being
   2502 *	deactivated by the network layer
   2503 */
   2504static int velocity_close(struct net_device *dev)
   2505{
   2506	struct velocity_info *vptr = netdev_priv(dev);
   2507
   2508	napi_disable(&vptr->napi);
   2509	netif_stop_queue(dev);
   2510	velocity_shutdown(vptr);
   2511
   2512	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
   2513		velocity_get_ip(vptr);
   2514
   2515	free_irq(dev->irq, dev);
   2516
   2517	velocity_free_rings(vptr);
   2518
   2519	vptr->flags &= (~VELOCITY_FLAGS_OPENED);
   2520	return 0;
   2521}
   2522
   2523/**
   2524 *	velocity_xmit		-	transmit packet callback
   2525 *	@skb: buffer to transmit
   2526 *	@dev: network device
   2527 *
   2528 *	Called by the network layer to request a packet is queued to
   2529 *	the velocity. Returns zero on success.
   2530 */
   2531static netdev_tx_t velocity_xmit(struct sk_buff *skb,
   2532				 struct net_device *dev)
   2533{
   2534	struct velocity_info *vptr = netdev_priv(dev);
   2535	int qnum = 0;
   2536	struct tx_desc *td_ptr;
   2537	struct velocity_td_info *tdinfo;
   2538	unsigned long flags;
   2539	int pktlen;
   2540	int index, prev;
   2541	int i = 0;
   2542
   2543	if (skb_padto(skb, ETH_ZLEN))
   2544		goto out;
   2545
   2546	/* The hardware can handle at most 7 memory segments, so merge
   2547	 * the skb if there are more */
   2548	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
   2549		dev_kfree_skb_any(skb);
   2550		return NETDEV_TX_OK;
   2551	}
   2552
   2553	pktlen = skb_shinfo(skb)->nr_frags == 0 ?
   2554			max_t(unsigned int, skb->len, ETH_ZLEN) :
   2555				skb_headlen(skb);
   2556
   2557	spin_lock_irqsave(&vptr->lock, flags);
   2558
   2559	index = vptr->tx.curr[qnum];
   2560	td_ptr = &(vptr->tx.rings[qnum][index]);
   2561	tdinfo = &(vptr->tx.infos[qnum][index]);
   2562
   2563	td_ptr->tdesc1.TCR = TCR0_TIC;
   2564	td_ptr->td_buf[0].size &= ~TD_QUEUE;
   2565
   2566	/*
   2567	 *	Map the linear network buffer into PCI space and
   2568	 *	add it to the transmit ring.
   2569	 */
   2570	tdinfo->skb = skb;
   2571	tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
   2572								DMA_TO_DEVICE);
   2573	td_ptr->tdesc0.len = cpu_to_le16(pktlen);
   2574	td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
   2575	td_ptr->td_buf[0].pa_high = 0;
   2576	td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
   2577
   2578	/* Handle fragments */
   2579	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
   2580		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
   2581
   2582		tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
   2583							  frag, 0,
   2584							  skb_frag_size(frag),
   2585							  DMA_TO_DEVICE);
   2586
   2587		td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
   2588		td_ptr->td_buf[i + 1].pa_high = 0;
   2589		td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
   2590	}
   2591	tdinfo->nskb_dma = i + 1;
   2592
   2593	td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
   2594
   2595	if (skb_vlan_tag_present(skb)) {
   2596		td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
   2597		td_ptr->tdesc1.TCR |= TCR0_VETAG;
   2598	}
   2599
   2600	/*
   2601	 *	Handle hardware checksum
   2602	 */
   2603	if (skb->ip_summed == CHECKSUM_PARTIAL) {
   2604		const struct iphdr *ip = ip_hdr(skb);
   2605		if (ip->protocol == IPPROTO_TCP)
   2606			td_ptr->tdesc1.TCR |= TCR0_TCPCK;
   2607		else if (ip->protocol == IPPROTO_UDP)
   2608			td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
   2609		td_ptr->tdesc1.TCR |= TCR0_IPCK;
   2610	}
   2611
   2612	prev = index - 1;
   2613	if (prev < 0)
   2614		prev = vptr->options.numtx - 1;
   2615	td_ptr->tdesc0.len |= OWNED_BY_NIC;
   2616	vptr->tx.used[qnum]++;
   2617	vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
   2618
   2619	if (AVAIL_TD(vptr, qnum) < 1)
   2620		netif_stop_queue(dev);
   2621
   2622	td_ptr = &(vptr->tx.rings[qnum][prev]);
   2623	td_ptr->td_buf[0].size |= TD_QUEUE;
   2624	mac_tx_queue_wake(vptr->mac_regs, qnum);
   2625
   2626	spin_unlock_irqrestore(&vptr->lock, flags);
   2627out:
   2628	return NETDEV_TX_OK;
   2629}
   2630
   2631static const struct net_device_ops velocity_netdev_ops = {
   2632	.ndo_open		= velocity_open,
   2633	.ndo_stop		= velocity_close,
   2634	.ndo_start_xmit		= velocity_xmit,
   2635	.ndo_get_stats		= velocity_get_stats,
   2636	.ndo_validate_addr	= eth_validate_addr,
   2637	.ndo_set_mac_address	= eth_mac_addr,
   2638	.ndo_set_rx_mode	= velocity_set_multi,
   2639	.ndo_change_mtu		= velocity_change_mtu,
   2640	.ndo_eth_ioctl		= velocity_ioctl,
   2641	.ndo_vlan_rx_add_vid	= velocity_vlan_rx_add_vid,
   2642	.ndo_vlan_rx_kill_vid	= velocity_vlan_rx_kill_vid,
   2643#ifdef CONFIG_NET_POLL_CONTROLLER
   2644	.ndo_poll_controller = velocity_poll_controller,
   2645#endif
   2646};
   2647
   2648/**
   2649 *	velocity_init_info	-	init private data
   2650 *	@vptr: Velocity info
   2651 *	@info: Board type
   2652 *
   2653 *	Set up the initial velocity_info struct for the device that has been
   2654 *	discovered.
   2655 */
   2656static void velocity_init_info(struct velocity_info *vptr,
   2657				const struct velocity_info_tbl *info)
   2658{
   2659	vptr->chip_id = info->chip_id;
   2660	vptr->tx.numq = info->txqueue;
   2661	vptr->multicast_limit = MCAM_SIZE;
   2662	spin_lock_init(&vptr->lock);
   2663}
   2664
   2665/**
   2666 *	velocity_get_pci_info	-	retrieve PCI info for device
   2667 *	@vptr: velocity device
   2668 *
   2669 *	Retrieve the PCI configuration space data that interests us from
   2670 *	the kernel PCI layer
   2671 */
   2672static int velocity_get_pci_info(struct velocity_info *vptr)
   2673{
   2674	struct pci_dev *pdev = vptr->pdev;
   2675
   2676	pci_set_master(pdev);
   2677
   2678	vptr->ioaddr = pci_resource_start(pdev, 0);
   2679	vptr->memaddr = pci_resource_start(pdev, 1);
   2680
   2681	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
   2682		dev_err(&pdev->dev,
   2683			   "region #0 is not an I/O resource, aborting.\n");
   2684		return -EINVAL;
   2685	}
   2686
   2687	if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
   2688		dev_err(&pdev->dev,
   2689			   "region #1 is an I/O resource, aborting.\n");
   2690		return -EINVAL;
   2691	}
   2692
   2693	if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
   2694		dev_err(&pdev->dev, "region #1 is too small.\n");
   2695		return -EINVAL;
   2696	}
   2697
   2698	return 0;
   2699}
   2700
   2701/**
   2702 *	velocity_get_platform_info - retrieve platform info for device
   2703 *	@vptr: velocity device
   2704 *
   2705 *	Retrieve the Platform configuration data that interests us
   2706 */
   2707static int velocity_get_platform_info(struct velocity_info *vptr)
   2708{
   2709	struct resource res;
   2710	int ret;
   2711
   2712	if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
   2713		vptr->no_eeprom = 1;
   2714
   2715	ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
   2716	if (ret) {
   2717		dev_err(vptr->dev, "unable to find memory address\n");
   2718		return ret;
   2719	}
   2720
   2721	vptr->memaddr = res.start;
   2722
   2723	if (resource_size(&res) < VELOCITY_IO_SIZE) {
   2724		dev_err(vptr->dev, "memory region is too small.\n");
   2725		return -EINVAL;
   2726	}
   2727
   2728	return 0;
   2729}
   2730
   2731/**
   2732 *	velocity_print_info	-	per driver data
   2733 *	@vptr: velocity
   2734 *
   2735 *	Print per driver data as the kernel driver finds Velocity
   2736 *	hardware
   2737 */
   2738static void velocity_print_info(struct velocity_info *vptr)
   2739{
   2740	netdev_info(vptr->netdev, "%s - Ethernet Address: %pM\n",
   2741		    get_chip_name(vptr->chip_id), vptr->netdev->dev_addr);
   2742}
   2743
   2744static u32 velocity_get_link(struct net_device *dev)
   2745{
   2746	struct velocity_info *vptr = netdev_priv(dev);
   2747	struct mac_regs __iomem *regs = vptr->mac_regs;
   2748	return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
   2749}
   2750
   2751/**
   2752 *	velocity_probe - set up discovered velocity device
   2753 *	@dev: PCI device
   2754 *	@info: table of match
   2755 *	@irq: interrupt info
   2756 *	@bustype: bus that device is connected to
   2757 *
   2758 *	Configure a discovered adapter from scratch. Return a negative
   2759 *	errno error code on failure paths.
   2760 */
   2761static int velocity_probe(struct device *dev, int irq,
   2762			   const struct velocity_info_tbl *info,
   2763			   enum velocity_bus_type bustype)
   2764{
   2765	struct net_device *netdev;
   2766	int i;
   2767	struct velocity_info *vptr;
   2768	struct mac_regs __iomem *regs;
   2769	int ret = -ENOMEM;
   2770	u8 addr[ETH_ALEN];
   2771
   2772	/* FIXME: this driver, like almost all other ethernet drivers,
   2773	 * can support more than MAX_UNITS.
   2774	 */
   2775	if (velocity_nics >= MAX_UNITS) {
   2776		dev_notice(dev, "already found %d NICs.\n", velocity_nics);
   2777		return -ENODEV;
   2778	}
   2779
   2780	netdev = alloc_etherdev(sizeof(struct velocity_info));
   2781	if (!netdev)
   2782		goto out;
   2783
   2784	/* Chain it all together */
   2785
   2786	SET_NETDEV_DEV(netdev, dev);
   2787	vptr = netdev_priv(netdev);
   2788
   2789	pr_info_once("%s Ver. %s\n", VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
   2790	pr_info_once("Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
   2791	pr_info_once("Copyright (c) 2004 Red Hat Inc.\n");
   2792
   2793	netdev->irq = irq;
   2794	vptr->netdev = netdev;
   2795	vptr->dev = dev;
   2796
   2797	velocity_init_info(vptr, info);
   2798
   2799	if (bustype == BUS_PCI) {
   2800		vptr->pdev = to_pci_dev(dev);
   2801
   2802		ret = velocity_get_pci_info(vptr);
   2803		if (ret < 0)
   2804			goto err_free_dev;
   2805	} else {
   2806		vptr->pdev = NULL;
   2807		ret = velocity_get_platform_info(vptr);
   2808		if (ret < 0)
   2809			goto err_free_dev;
   2810	}
   2811
   2812	regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
   2813	if (regs == NULL) {
   2814		ret = -EIO;
   2815		goto err_free_dev;
   2816	}
   2817
   2818	vptr->mac_regs = regs;
   2819	vptr->rev_id = readb(&regs->rev_id);
   2820
   2821	mac_wol_reset(regs);
   2822
   2823	for (i = 0; i < 6; i++)
   2824		addr[i] = readb(&regs->PAR[i]);
   2825	eth_hw_addr_set(netdev, addr);
   2826
   2827
   2828	velocity_get_options(&vptr->options, velocity_nics);
   2829
   2830	/*
   2831	 *	Mask out the options cannot be set to the chip
   2832	 */
   2833
   2834	vptr->options.flags &= info->flags;
   2835
   2836	/*
   2837	 *	Enable the chip specified capbilities
   2838	 */
   2839
   2840	vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
   2841
   2842	vptr->wol_opts = vptr->options.wol_opts;
   2843	vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
   2844
   2845	vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
   2846
   2847	netdev->netdev_ops = &velocity_netdev_ops;
   2848	netdev->ethtool_ops = &velocity_ethtool_ops;
   2849	netif_napi_add(netdev, &vptr->napi, velocity_poll, NAPI_POLL_WEIGHT);
   2850
   2851	netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
   2852			   NETIF_F_HW_VLAN_CTAG_TX;
   2853	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
   2854			NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
   2855			NETIF_F_IP_CSUM;
   2856
   2857	/* MTU range: 64 - 9000 */
   2858	netdev->min_mtu = VELOCITY_MIN_MTU;
   2859	netdev->max_mtu = VELOCITY_MAX_MTU;
   2860
   2861	ret = register_netdev(netdev);
   2862	if (ret < 0)
   2863		goto err_iounmap;
   2864
   2865	if (!velocity_get_link(netdev)) {
   2866		netif_carrier_off(netdev);
   2867		vptr->mii_status |= VELOCITY_LINK_FAIL;
   2868	}
   2869
   2870	velocity_print_info(vptr);
   2871	dev_set_drvdata(vptr->dev, netdev);
   2872
   2873	/* and leave the chip powered down */
   2874
   2875	velocity_set_power_state(vptr, PCI_D3hot);
   2876	velocity_nics++;
   2877out:
   2878	return ret;
   2879
   2880err_iounmap:
   2881	netif_napi_del(&vptr->napi);
   2882	iounmap(regs);
   2883err_free_dev:
   2884	free_netdev(netdev);
   2885	goto out;
   2886}
   2887
   2888/**
   2889 *	velocity_remove	- device unplug
   2890 *	@dev: device being removed
   2891 *
   2892 *	Device unload callback. Called on an unplug or on module
   2893 *	unload for each active device that is present. Disconnects
   2894 *	the device from the network layer and frees all the resources
   2895 */
   2896static int velocity_remove(struct device *dev)
   2897{
   2898	struct net_device *netdev = dev_get_drvdata(dev);
   2899	struct velocity_info *vptr = netdev_priv(netdev);
   2900
   2901	unregister_netdev(netdev);
   2902	netif_napi_del(&vptr->napi);
   2903	iounmap(vptr->mac_regs);
   2904	free_netdev(netdev);
   2905	velocity_nics--;
   2906
   2907	return 0;
   2908}
   2909
   2910static int velocity_pci_probe(struct pci_dev *pdev,
   2911			       const struct pci_device_id *ent)
   2912{
   2913	const struct velocity_info_tbl *info =
   2914					&chip_info_table[ent->driver_data];
   2915	int ret;
   2916
   2917	ret = pci_enable_device(pdev);
   2918	if (ret < 0)
   2919		return ret;
   2920
   2921	ret = pci_request_regions(pdev, VELOCITY_NAME);
   2922	if (ret < 0) {
   2923		dev_err(&pdev->dev, "No PCI resources.\n");
   2924		goto fail1;
   2925	}
   2926
   2927	ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
   2928	if (ret == 0)
   2929		return 0;
   2930
   2931	pci_release_regions(pdev);
   2932fail1:
   2933	pci_disable_device(pdev);
   2934	return ret;
   2935}
   2936
   2937static void velocity_pci_remove(struct pci_dev *pdev)
   2938{
   2939	velocity_remove(&pdev->dev);
   2940
   2941	pci_release_regions(pdev);
   2942	pci_disable_device(pdev);
   2943}
   2944
   2945static int velocity_platform_probe(struct platform_device *pdev)
   2946{
   2947	const struct velocity_info_tbl *info;
   2948	int irq;
   2949
   2950	info = of_device_get_match_data(&pdev->dev);
   2951	if (!info)
   2952		return -EINVAL;
   2953
   2954	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
   2955	if (!irq)
   2956		return -EINVAL;
   2957
   2958	return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
   2959}
   2960
   2961static int velocity_platform_remove(struct platform_device *pdev)
   2962{
   2963	velocity_remove(&pdev->dev);
   2964
   2965	return 0;
   2966}
   2967
   2968#ifdef CONFIG_PM_SLEEP
   2969/**
   2970 *	wol_calc_crc		-	WOL CRC
   2971 *	@size: size of the wake mask
   2972 *	@pattern: data pattern
   2973 *	@mask_pattern: mask
   2974 *
   2975 *	Compute the wake on lan crc hashes for the packet header
   2976 *	we are interested in.
   2977 */
   2978static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
   2979{
   2980	u16 crc = 0xFFFF;
   2981	u8 mask;
   2982	int i, j;
   2983
   2984	for (i = 0; i < size; i++) {
   2985		mask = mask_pattern[i];
   2986
   2987		/* Skip this loop if the mask equals to zero */
   2988		if (mask == 0x00)
   2989			continue;
   2990
   2991		for (j = 0; j < 8; j++) {
   2992			if ((mask & 0x01) == 0) {
   2993				mask >>= 1;
   2994				continue;
   2995			}
   2996			mask >>= 1;
   2997			crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
   2998		}
   2999	}
   3000	/*	Finally, invert the result once to get the correct data */
   3001	crc = ~crc;
   3002	return bitrev32(crc) >> 16;
   3003}
   3004
   3005/**
   3006 *	velocity_set_wol	-	set up for wake on lan
   3007 *	@vptr: velocity to set WOL status on
   3008 *
   3009 *	Set a card up for wake on lan either by unicast or by
   3010 *	ARP packet.
   3011 *
   3012 *	FIXME: check static buffer is safe here
   3013 */
   3014static int velocity_set_wol(struct velocity_info *vptr)
   3015{
   3016	struct mac_regs __iomem *regs = vptr->mac_regs;
   3017	enum speed_opt spd_dpx = vptr->options.spd_dpx;
   3018	static u8 buf[256];
   3019	int i;
   3020
   3021	static u32 mask_pattern[2][4] = {
   3022		{0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
   3023		{0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}	 /* Magic Packet */
   3024	};
   3025
   3026	writew(0xFFFF, &regs->WOLCRClr);
   3027	writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
   3028	writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
   3029
   3030	/*
   3031	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
   3032	   writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
   3033	 */
   3034
   3035	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
   3036		writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
   3037
   3038	if (vptr->wol_opts & VELOCITY_WOL_ARP) {
   3039		struct arp_packet *arp = (struct arp_packet *) buf;
   3040		u16 crc;
   3041		memset(buf, 0, sizeof(struct arp_packet) + 7);
   3042
   3043		for (i = 0; i < 4; i++)
   3044			writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
   3045
   3046		arp->type = htons(ETH_P_ARP);
   3047		arp->ar_op = htons(1);
   3048
   3049		memcpy(arp->ar_tip, vptr->ip_addr, 4);
   3050
   3051		crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
   3052				(u8 *) & mask_pattern[0][0]);
   3053
   3054		writew(crc, &regs->PatternCRC[0]);
   3055		writew(WOLCR_ARP_EN, &regs->WOLCRSet);
   3056	}
   3057
   3058	BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
   3059	BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
   3060
   3061	writew(0x0FFF, &regs->WOLSRClr);
   3062
   3063	if (spd_dpx == SPD_DPX_1000_FULL)
   3064		goto mac_done;
   3065
   3066	if (spd_dpx != SPD_DPX_AUTO)
   3067		goto advertise_done;
   3068
   3069	if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
   3070		if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
   3071			MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
   3072
   3073		MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
   3074	}
   3075
   3076	if (vptr->mii_status & VELOCITY_SPEED_1000)
   3077		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
   3078
   3079advertise_done:
   3080	BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
   3081
   3082	{
   3083		u8 GCR;
   3084		GCR = readb(&regs->CHIPGCR);
   3085		GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
   3086		writeb(GCR, &regs->CHIPGCR);
   3087	}
   3088
   3089mac_done:
   3090	BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
   3091	/* Turn on SWPTAG just before entering power mode */
   3092	BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
   3093	/* Go to bed ..... */
   3094	BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
   3095
   3096	return 0;
   3097}
   3098
   3099/**
   3100 *	velocity_save_context	-	save registers
   3101 *	@vptr: velocity
   3102 *	@context: buffer for stored context
   3103 *
   3104 *	Retrieve the current configuration from the velocity hardware
   3105 *	and stash it in the context structure, for use by the context
   3106 *	restore functions. This allows us to save things we need across
   3107 *	power down states
   3108 */
   3109static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
   3110{
   3111	struct mac_regs __iomem *regs = vptr->mac_regs;
   3112	u16 i;
   3113	u8 __iomem *ptr = (u8 __iomem *)regs;
   3114
   3115	for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
   3116		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
   3117
   3118	for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
   3119		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
   3120
   3121	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
   3122		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
   3123
   3124}
   3125
   3126static int velocity_suspend(struct device *dev)
   3127{
   3128	struct net_device *netdev = dev_get_drvdata(dev);
   3129	struct velocity_info *vptr = netdev_priv(netdev);
   3130	unsigned long flags;
   3131
   3132	if (!netif_running(vptr->netdev))
   3133		return 0;
   3134
   3135	netif_device_detach(vptr->netdev);
   3136
   3137	spin_lock_irqsave(&vptr->lock, flags);
   3138	if (vptr->pdev)
   3139		pci_save_state(vptr->pdev);
   3140
   3141	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
   3142		velocity_get_ip(vptr);
   3143		velocity_save_context(vptr, &vptr->context);
   3144		velocity_shutdown(vptr);
   3145		velocity_set_wol(vptr);
   3146		if (vptr->pdev)
   3147			pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
   3148		velocity_set_power_state(vptr, PCI_D3hot);
   3149	} else {
   3150		velocity_save_context(vptr, &vptr->context);
   3151		velocity_shutdown(vptr);
   3152		if (vptr->pdev)
   3153			pci_disable_device(vptr->pdev);
   3154		velocity_set_power_state(vptr, PCI_D3hot);
   3155	}
   3156
   3157	spin_unlock_irqrestore(&vptr->lock, flags);
   3158	return 0;
   3159}
   3160
   3161/**
   3162 *	velocity_restore_context	-	restore registers
   3163 *	@vptr: velocity
   3164 *	@context: buffer for stored context
   3165 *
   3166 *	Reload the register configuration from the velocity context
   3167 *	created by velocity_save_context.
   3168 */
   3169static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
   3170{
   3171	struct mac_regs __iomem *regs = vptr->mac_regs;
   3172	int i;
   3173	u8 __iomem *ptr = (u8 __iomem *)regs;
   3174
   3175	for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
   3176		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
   3177
   3178	/* Just skip cr0 */
   3179	for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
   3180		/* Clear */
   3181		writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
   3182		/* Set */
   3183		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
   3184	}
   3185
   3186	for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
   3187		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
   3188
   3189	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
   3190		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
   3191
   3192	for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
   3193		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
   3194}
   3195
   3196static int velocity_resume(struct device *dev)
   3197{
   3198	struct net_device *netdev = dev_get_drvdata(dev);
   3199	struct velocity_info *vptr = netdev_priv(netdev);
   3200	unsigned long flags;
   3201	int i;
   3202
   3203	if (!netif_running(vptr->netdev))
   3204		return 0;
   3205
   3206	velocity_set_power_state(vptr, PCI_D0);
   3207
   3208	if (vptr->pdev) {
   3209		pci_enable_wake(vptr->pdev, PCI_D0, 0);
   3210		pci_restore_state(vptr->pdev);
   3211	}
   3212
   3213	mac_wol_reset(vptr->mac_regs);
   3214
   3215	spin_lock_irqsave(&vptr->lock, flags);
   3216	velocity_restore_context(vptr, &vptr->context);
   3217	velocity_init_registers(vptr, VELOCITY_INIT_WOL);
   3218	mac_disable_int(vptr->mac_regs);
   3219
   3220	velocity_tx_srv(vptr);
   3221
   3222	for (i = 0; i < vptr->tx.numq; i++) {
   3223		if (vptr->tx.used[i])
   3224			mac_tx_queue_wake(vptr->mac_regs, i);
   3225	}
   3226
   3227	mac_enable_int(vptr->mac_regs);
   3228	spin_unlock_irqrestore(&vptr->lock, flags);
   3229	netif_device_attach(vptr->netdev);
   3230
   3231	return 0;
   3232}
   3233#endif	/* CONFIG_PM_SLEEP */
   3234
   3235static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
   3236
   3237/*
   3238 *	Definition for our device driver. The PCI layer interface
   3239 *	uses this to handle all our card discover and plugging
   3240 */
   3241static struct pci_driver velocity_pci_driver = {
   3242	.name		= VELOCITY_NAME,
   3243	.id_table	= velocity_pci_id_table,
   3244	.probe		= velocity_pci_probe,
   3245	.remove		= velocity_pci_remove,
   3246	.driver = {
   3247		.pm = &velocity_pm_ops,
   3248	},
   3249};
   3250
   3251static struct platform_driver velocity_platform_driver = {
   3252	.probe		= velocity_platform_probe,
   3253	.remove		= velocity_platform_remove,
   3254	.driver = {
   3255		.name = "via-velocity",
   3256		.of_match_table = velocity_of_ids,
   3257		.pm = &velocity_pm_ops,
   3258	},
   3259};
   3260
   3261/**
   3262 *	velocity_ethtool_up	-	pre hook for ethtool
   3263 *	@dev: network device
   3264 *
   3265 *	Called before an ethtool operation. We need to make sure the
   3266 *	chip is out of D3 state before we poke at it. In case of ethtool
   3267 *	ops nesting, only wake the device up in the outermost block.
   3268 */
   3269static int velocity_ethtool_up(struct net_device *dev)
   3270{
   3271	struct velocity_info *vptr = netdev_priv(dev);
   3272
   3273	if (vptr->ethtool_ops_nesting == U32_MAX)
   3274		return -EBUSY;
   3275	if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
   3276		velocity_set_power_state(vptr, PCI_D0);
   3277	return 0;
   3278}
   3279
   3280/**
   3281 *	velocity_ethtool_down	-	post hook for ethtool
   3282 *	@dev: network device
   3283 *
   3284 *	Called after an ethtool operation. Restore the chip back to D3
   3285 *	state if it isn't running. In case of ethtool ops nesting, only
   3286 *	put the device to sleep in the outermost block.
   3287 */
   3288static void velocity_ethtool_down(struct net_device *dev)
   3289{
   3290	struct velocity_info *vptr = netdev_priv(dev);
   3291
   3292	if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
   3293		velocity_set_power_state(vptr, PCI_D3hot);
   3294}
   3295
   3296static int velocity_get_link_ksettings(struct net_device *dev,
   3297				       struct ethtool_link_ksettings *cmd)
   3298{
   3299	struct velocity_info *vptr = netdev_priv(dev);
   3300	struct mac_regs __iomem *regs = vptr->mac_regs;
   3301	u32 status;
   3302	u32 supported, advertising;
   3303
   3304	status = check_connection_type(vptr->mac_regs);
   3305
   3306	supported = SUPPORTED_TP |
   3307			SUPPORTED_Autoneg |
   3308			SUPPORTED_10baseT_Half |
   3309			SUPPORTED_10baseT_Full |
   3310			SUPPORTED_100baseT_Half |
   3311			SUPPORTED_100baseT_Full |
   3312			SUPPORTED_1000baseT_Half |
   3313			SUPPORTED_1000baseT_Full;
   3314
   3315	advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
   3316	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
   3317		advertising |=
   3318			ADVERTISED_10baseT_Half |
   3319			ADVERTISED_10baseT_Full |
   3320			ADVERTISED_100baseT_Half |
   3321			ADVERTISED_100baseT_Full |
   3322			ADVERTISED_1000baseT_Half |
   3323			ADVERTISED_1000baseT_Full;
   3324	} else {
   3325		switch (vptr->options.spd_dpx) {
   3326		case SPD_DPX_1000_FULL:
   3327			advertising |= ADVERTISED_1000baseT_Full;
   3328			break;
   3329		case SPD_DPX_100_HALF:
   3330			advertising |= ADVERTISED_100baseT_Half;
   3331			break;
   3332		case SPD_DPX_100_FULL:
   3333			advertising |= ADVERTISED_100baseT_Full;
   3334			break;
   3335		case SPD_DPX_10_HALF:
   3336			advertising |= ADVERTISED_10baseT_Half;
   3337			break;
   3338		case SPD_DPX_10_FULL:
   3339			advertising |= ADVERTISED_10baseT_Full;
   3340			break;
   3341		default:
   3342			break;
   3343		}
   3344	}
   3345
   3346	if (status & VELOCITY_SPEED_1000)
   3347		cmd->base.speed = SPEED_1000;
   3348	else if (status & VELOCITY_SPEED_100)
   3349		cmd->base.speed = SPEED_100;
   3350	else
   3351		cmd->base.speed = SPEED_10;
   3352
   3353	cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
   3354		AUTONEG_ENABLE : AUTONEG_DISABLE;
   3355	cmd->base.port = PORT_TP;
   3356	cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
   3357
   3358	if (status & VELOCITY_DUPLEX_FULL)
   3359		cmd->base.duplex = DUPLEX_FULL;
   3360	else
   3361		cmd->base.duplex = DUPLEX_HALF;
   3362
   3363	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
   3364						supported);
   3365	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
   3366						advertising);
   3367
   3368	return 0;
   3369}
   3370
   3371static int velocity_set_link_ksettings(struct net_device *dev,
   3372				       const struct ethtool_link_ksettings *cmd)
   3373{
   3374	struct velocity_info *vptr = netdev_priv(dev);
   3375	u32 speed = cmd->base.speed;
   3376	u32 curr_status;
   3377	u32 new_status = 0;
   3378	int ret = 0;
   3379
   3380	curr_status = check_connection_type(vptr->mac_regs);
   3381	curr_status &= (~VELOCITY_LINK_FAIL);
   3382
   3383	new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
   3384	new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
   3385	new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
   3386	new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
   3387	new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
   3388		       VELOCITY_DUPLEX_FULL : 0);
   3389
   3390	if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
   3391	    (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
   3392		ret = -EINVAL;
   3393	} else {
   3394		enum speed_opt spd_dpx;
   3395
   3396		if (new_status & VELOCITY_AUTONEG_ENABLE)
   3397			spd_dpx = SPD_DPX_AUTO;
   3398		else if ((new_status & VELOCITY_SPEED_1000) &&
   3399			 (new_status & VELOCITY_DUPLEX_FULL)) {
   3400			spd_dpx = SPD_DPX_1000_FULL;
   3401		} else if (new_status & VELOCITY_SPEED_100)
   3402			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
   3403				SPD_DPX_100_FULL : SPD_DPX_100_HALF;
   3404		else if (new_status & VELOCITY_SPEED_10)
   3405			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
   3406				SPD_DPX_10_FULL : SPD_DPX_10_HALF;
   3407		else
   3408			return -EOPNOTSUPP;
   3409
   3410		vptr->options.spd_dpx = spd_dpx;
   3411
   3412		velocity_set_media_mode(vptr, new_status);
   3413	}
   3414
   3415	return ret;
   3416}
   3417
   3418static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
   3419{
   3420	struct velocity_info *vptr = netdev_priv(dev);
   3421
   3422	strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
   3423	strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
   3424	if (vptr->pdev)
   3425		strlcpy(info->bus_info, pci_name(vptr->pdev),
   3426						sizeof(info->bus_info));
   3427	else
   3428		strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
   3429}
   3430
   3431static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
   3432{
   3433	struct velocity_info *vptr = netdev_priv(dev);
   3434	wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
   3435	wol->wolopts |= WAKE_MAGIC;
   3436	/*
   3437	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
   3438		   wol.wolopts|=WAKE_PHY;
   3439			 */
   3440	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
   3441		wol->wolopts |= WAKE_UCAST;
   3442	if (vptr->wol_opts & VELOCITY_WOL_ARP)
   3443		wol->wolopts |= WAKE_ARP;
   3444	memcpy(&wol->sopass, vptr->wol_passwd, 6);
   3445}
   3446
   3447static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
   3448{
   3449	struct velocity_info *vptr = netdev_priv(dev);
   3450
   3451	if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
   3452		return -EFAULT;
   3453	vptr->wol_opts = VELOCITY_WOL_MAGIC;
   3454
   3455	/*
   3456	   if (wol.wolopts & WAKE_PHY) {
   3457	   vptr->wol_opts|=VELOCITY_WOL_PHY;
   3458	   vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
   3459	   }
   3460	 */
   3461
   3462	if (wol->wolopts & WAKE_MAGIC) {
   3463		vptr->wol_opts |= VELOCITY_WOL_MAGIC;
   3464		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
   3465	}
   3466	if (wol->wolopts & WAKE_UCAST) {
   3467		vptr->wol_opts |= VELOCITY_WOL_UCAST;
   3468		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
   3469	}
   3470	if (wol->wolopts & WAKE_ARP) {
   3471		vptr->wol_opts |= VELOCITY_WOL_ARP;
   3472		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
   3473	}
   3474	memcpy(vptr->wol_passwd, wol->sopass, 6);
   3475	return 0;
   3476}
   3477
   3478static int get_pending_timer_val(int val)
   3479{
   3480	int mult_bits = val >> 6;
   3481	int mult = 1;
   3482
   3483	switch (mult_bits)
   3484	{
   3485	case 1:
   3486		mult = 4; break;
   3487	case 2:
   3488		mult = 16; break;
   3489	case 3:
   3490		mult = 64; break;
   3491	case 0:
   3492	default:
   3493		break;
   3494	}
   3495
   3496	return (val & 0x3f) * mult;
   3497}
   3498
   3499static void set_pending_timer_val(int *val, u32 us)
   3500{
   3501	u8 mult = 0;
   3502	u8 shift = 0;
   3503
   3504	if (us >= 0x3f) {
   3505		mult = 1; /* mult with 4 */
   3506		shift = 2;
   3507	}
   3508	if (us >= 0x3f * 4) {
   3509		mult = 2; /* mult with 16 */
   3510		shift = 4;
   3511	}
   3512	if (us >= 0x3f * 16) {
   3513		mult = 3; /* mult with 64 */
   3514		shift = 6;
   3515	}
   3516
   3517	*val = (mult << 6) | ((us >> shift) & 0x3f);
   3518}
   3519
   3520
   3521static int velocity_get_coalesce(struct net_device *dev,
   3522				 struct ethtool_coalesce *ecmd,
   3523				 struct kernel_ethtool_coalesce *kernel_coal,
   3524				 struct netlink_ext_ack *extack)
   3525{
   3526	struct velocity_info *vptr = netdev_priv(dev);
   3527
   3528	ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
   3529	ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
   3530
   3531	ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
   3532	ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
   3533
   3534	return 0;
   3535}
   3536
   3537static int velocity_set_coalesce(struct net_device *dev,
   3538				 struct ethtool_coalesce *ecmd,
   3539				 struct kernel_ethtool_coalesce *kernel_coal,
   3540				 struct netlink_ext_ack *extack)
   3541{
   3542	struct velocity_info *vptr = netdev_priv(dev);
   3543	int max_us = 0x3f * 64;
   3544	unsigned long flags;
   3545
   3546	/* 6 bits of  */
   3547	if (ecmd->tx_coalesce_usecs > max_us)
   3548		return -EINVAL;
   3549	if (ecmd->rx_coalesce_usecs > max_us)
   3550		return -EINVAL;
   3551
   3552	if (ecmd->tx_max_coalesced_frames > 0xff)
   3553		return -EINVAL;
   3554	if (ecmd->rx_max_coalesced_frames > 0xff)
   3555		return -EINVAL;
   3556
   3557	vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
   3558	vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
   3559
   3560	set_pending_timer_val(&vptr->options.rxqueue_timer,
   3561			ecmd->rx_coalesce_usecs);
   3562	set_pending_timer_val(&vptr->options.txqueue_timer,
   3563			ecmd->tx_coalesce_usecs);
   3564
   3565	/* Setup the interrupt suppression and queue timers */
   3566	spin_lock_irqsave(&vptr->lock, flags);
   3567	mac_disable_int(vptr->mac_regs);
   3568	setup_adaptive_interrupts(vptr);
   3569	setup_queue_timers(vptr);
   3570
   3571	mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
   3572	mac_clear_isr(vptr->mac_regs);
   3573	mac_enable_int(vptr->mac_regs);
   3574	spin_unlock_irqrestore(&vptr->lock, flags);
   3575
   3576	return 0;
   3577}
   3578
   3579static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
   3580	"rx_all",
   3581	"rx_ok",
   3582	"tx_ok",
   3583	"rx_error",
   3584	"rx_runt_ok",
   3585	"rx_runt_err",
   3586	"rx_64",
   3587	"tx_64",
   3588	"rx_65_to_127",
   3589	"tx_65_to_127",
   3590	"rx_128_to_255",
   3591	"tx_128_to_255",
   3592	"rx_256_to_511",
   3593	"tx_256_to_511",
   3594	"rx_512_to_1023",
   3595	"tx_512_to_1023",
   3596	"rx_1024_to_1518",
   3597	"tx_1024_to_1518",
   3598	"tx_ether_collisions",
   3599	"rx_crc_errors",
   3600	"rx_jumbo",
   3601	"tx_jumbo",
   3602	"rx_mac_control_frames",
   3603	"tx_mac_control_frames",
   3604	"rx_frame_alignment_errors",
   3605	"rx_long_ok",
   3606	"rx_long_err",
   3607	"tx_sqe_errors",
   3608	"rx_no_buf",
   3609	"rx_symbol_errors",
   3610	"in_range_length_errors",
   3611	"late_collisions"
   3612};
   3613
   3614static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
   3615{
   3616	switch (sset) {
   3617	case ETH_SS_STATS:
   3618		memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
   3619		break;
   3620	}
   3621}
   3622
   3623static int velocity_get_sset_count(struct net_device *dev, int sset)
   3624{
   3625	switch (sset) {
   3626	case ETH_SS_STATS:
   3627		return ARRAY_SIZE(velocity_gstrings);
   3628	default:
   3629		return -EOPNOTSUPP;
   3630	}
   3631}
   3632
   3633static void velocity_get_ethtool_stats(struct net_device *dev,
   3634				       struct ethtool_stats *stats, u64 *data)
   3635{
   3636	if (netif_running(dev)) {
   3637		struct velocity_info *vptr = netdev_priv(dev);
   3638		u32 *p = vptr->mib_counter;
   3639		int i;
   3640
   3641		spin_lock_irq(&vptr->lock);
   3642		velocity_update_hw_mibs(vptr);
   3643		spin_unlock_irq(&vptr->lock);
   3644
   3645		for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
   3646			*data++ = *p++;
   3647	}
   3648}
   3649
   3650static const struct ethtool_ops velocity_ethtool_ops = {
   3651	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
   3652				     ETHTOOL_COALESCE_MAX_FRAMES,
   3653	.get_drvinfo		= velocity_get_drvinfo,
   3654	.get_wol		= velocity_ethtool_get_wol,
   3655	.set_wol		= velocity_ethtool_set_wol,
   3656	.get_link		= velocity_get_link,
   3657	.get_strings		= velocity_get_strings,
   3658	.get_sset_count		= velocity_get_sset_count,
   3659	.get_ethtool_stats	= velocity_get_ethtool_stats,
   3660	.get_coalesce		= velocity_get_coalesce,
   3661	.set_coalesce		= velocity_set_coalesce,
   3662	.begin			= velocity_ethtool_up,
   3663	.complete		= velocity_ethtool_down,
   3664	.get_link_ksettings	= velocity_get_link_ksettings,
   3665	.set_link_ksettings	= velocity_set_link_ksettings,
   3666};
   3667
   3668#if defined(CONFIG_PM) && defined(CONFIG_INET)
   3669static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
   3670{
   3671	struct in_ifaddr *ifa = ptr;
   3672	struct net_device *dev = ifa->ifa_dev->dev;
   3673
   3674	if (dev_net(dev) == &init_net &&
   3675	    dev->netdev_ops == &velocity_netdev_ops)
   3676		velocity_get_ip(netdev_priv(dev));
   3677
   3678	return NOTIFY_DONE;
   3679}
   3680
   3681static struct notifier_block velocity_inetaddr_notifier = {
   3682	.notifier_call	= velocity_netdev_event,
   3683};
   3684
   3685static void velocity_register_notifier(void)
   3686{
   3687	register_inetaddr_notifier(&velocity_inetaddr_notifier);
   3688}
   3689
   3690static void velocity_unregister_notifier(void)
   3691{
   3692	unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
   3693}
   3694
   3695#else
   3696
   3697#define velocity_register_notifier()	do {} while (0)
   3698#define velocity_unregister_notifier()	do {} while (0)
   3699
   3700#endif	/* defined(CONFIG_PM) && defined(CONFIG_INET) */
   3701
   3702/**
   3703 *	velocity_init_module	-	load time function
   3704 *
   3705 *	Called when the velocity module is loaded. The PCI driver
   3706 *	is registered with the PCI layer, and in turn will call
   3707 *	the probe functions for each velocity adapter installed
   3708 *	in the system.
   3709 */
   3710static int __init velocity_init_module(void)
   3711{
   3712	int ret_pci, ret_platform;
   3713
   3714	velocity_register_notifier();
   3715
   3716	ret_pci = pci_register_driver(&velocity_pci_driver);
   3717	ret_platform = platform_driver_register(&velocity_platform_driver);
   3718
   3719	/* if both_registers failed, remove the notifier */
   3720	if ((ret_pci < 0) && (ret_platform < 0)) {
   3721		velocity_unregister_notifier();
   3722		return ret_pci;
   3723	}
   3724
   3725	return 0;
   3726}
   3727
   3728/**
   3729 *	velocity_cleanup_module		-	module unload
   3730 *
   3731 *	When the velocity hardware is unloaded this function is called.
   3732 *	It will clean up the notifiers and the unregister the PCI
   3733 *	driver interface for this hardware. This in turn cleans up
   3734 *	all discovered interfaces before returning from the function
   3735 */
   3736static void __exit velocity_cleanup_module(void)
   3737{
   3738	velocity_unregister_notifier();
   3739
   3740	pci_unregister_driver(&velocity_pci_driver);
   3741	platform_driver_unregister(&velocity_platform_driver);
   3742}
   3743
   3744module_init(velocity_init_module);
   3745module_exit(velocity_cleanup_module);