tc35815.c (64317B)
1/* 2 * tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux. 3 * 4 * Based on skelton.c by Donald Becker. 5 * 6 * This driver is a replacement of older and less maintained version. 7 * This is a header of the older version: 8 * -----<snip>----- 9 * Copyright 2001 MontaVista Software Inc. 10 * Author: MontaVista Software, Inc. 11 * ahennessy@mvista.com 12 * Copyright (C) 2000-2001 Toshiba Corporation 13 * static const char *version = 14 * "tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n"; 15 * -----<snip>----- 16 * 17 * This file is subject to the terms and conditions of the GNU General Public 18 * License. See the file "COPYING" in the main directory of this archive 19 * for more details. 20 * 21 * (C) Copyright TOSHIBA CORPORATION 2004-2005 22 * All Rights Reserved. 23 */ 24 25#define DRV_VERSION "1.39" 26static const char version[] = "tc35815.c:v" DRV_VERSION "\n"; 27#define MODNAME "tc35815" 28 29#include <linux/module.h> 30#include <linux/kernel.h> 31#include <linux/types.h> 32#include <linux/fcntl.h> 33#include <linux/interrupt.h> 34#include <linux/ioport.h> 35#include <linux/in.h> 36#include <linux/if_vlan.h> 37#include <linux/slab.h> 38#include <linux/string.h> 39#include <linux/spinlock.h> 40#include <linux/errno.h> 41#include <linux/netdevice.h> 42#include <linux/etherdevice.h> 43#include <linux/skbuff.h> 44#include <linux/delay.h> 45#include <linux/pci.h> 46#include <linux/phy.h> 47#include <linux/workqueue.h> 48#include <linux/platform_device.h> 49#include <linux/prefetch.h> 50#include <asm/io.h> 51#include <asm/byteorder.h> 52 53enum tc35815_chiptype { 54 TC35815CF = 0, 55 TC35815_NWU, 56 TC35815_TX4939, 57}; 58 59/* indexed by tc35815_chiptype, above */ 60static const struct { 61 const char *name; 62} chip_info[] = { 63 { "TOSHIBA TC35815CF 10/100BaseTX" }, 64 { "TOSHIBA TC35815 with Wake on LAN" }, 65 { "TOSHIBA TC35815/TX4939" }, 66}; 67 68static const struct pci_device_id tc35815_pci_tbl[] = { 69 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF }, 70 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU }, 71 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, 72 {0,} 73}; 74MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl); 75 76/* see MODULE_PARM_DESC */ 77static struct tc35815_options { 78 int speed; 79 int duplex; 80} options; 81 82/* 83 * Registers 84 */ 85struct tc35815_regs { 86 __u32 DMA_Ctl; /* 0x00 */ 87 __u32 TxFrmPtr; 88 __u32 TxThrsh; 89 __u32 TxPollCtr; 90 __u32 BLFrmPtr; 91 __u32 RxFragSize; 92 __u32 Int_En; 93 __u32 FDA_Bas; 94 __u32 FDA_Lim; /* 0x20 */ 95 __u32 Int_Src; 96 __u32 unused0[2]; 97 __u32 PauseCnt; 98 __u32 RemPauCnt; 99 __u32 TxCtlFrmStat; 100 __u32 unused1; 101 __u32 MAC_Ctl; /* 0x40 */ 102 __u32 CAM_Ctl; 103 __u32 Tx_Ctl; 104 __u32 Tx_Stat; 105 __u32 Rx_Ctl; 106 __u32 Rx_Stat; 107 __u32 MD_Data; 108 __u32 MD_CA; 109 __u32 CAM_Adr; /* 0x60 */ 110 __u32 CAM_Data; 111 __u32 CAM_Ena; 112 __u32 PROM_Ctl; 113 __u32 PROM_Data; 114 __u32 Algn_Cnt; 115 __u32 CRC_Cnt; 116 __u32 Miss_Cnt; 117}; 118 119/* 120 * Bit assignments 121 */ 122/* DMA_Ctl bit assign ------------------------------------------------------- */ 123#define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */ 124#define DMA_RxAlign_1 0x00400000 125#define DMA_RxAlign_2 0x00800000 126#define DMA_RxAlign_3 0x00c00000 127#define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */ 128#define DMA_IntMask 0x00040000 /* 1:Interrupt mask */ 129#define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */ 130#define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */ 131#define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */ 132#define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */ 133#define DMA_TestMode 0x00002000 /* 1:Test Mode */ 134#define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */ 135#define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */ 136 137/* RxFragSize bit assign ---------------------------------------------------- */ 138#define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */ 139#define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */ 140 141/* MAC_Ctl bit assign ------------------------------------------------------- */ 142#define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */ 143#define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */ 144#define MAC_MissRoll 0x00000400 /* 1:Missed Roll */ 145#define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */ 146#define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */ 147#define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/ 148#define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */ 149#define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */ 150#define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */ 151#define MAC_Reset 0x00000004 /* 1:Software Reset */ 152#define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */ 153#define MAC_HaltReq 0x00000001 /* 1:Halt request */ 154 155/* PROM_Ctl bit assign ------------------------------------------------------ */ 156#define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */ 157#define PROM_Read 0x00004000 /*10:Read operation */ 158#define PROM_Write 0x00002000 /*01:Write operation */ 159#define PROM_Erase 0x00006000 /*11:Erase operation */ 160 /*00:Enable or Disable Writting, */ 161 /* as specified in PROM_Addr. */ 162#define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */ 163 /*00xxxx: disable */ 164 165/* CAM_Ctl bit assign ------------------------------------------------------- */ 166#define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */ 167#define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/ 168 /* accept other */ 169#define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */ 170#define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */ 171#define CAM_StationAcc 0x00000001 /* 1:unicast accept */ 172 173/* CAM_Ena bit assign ------------------------------------------------------- */ 174#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */ 175#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */ 176#define CAM_Ena_Bit(index) (1 << (index)) 177#define CAM_ENTRY_DESTINATION 0 178#define CAM_ENTRY_SOURCE 1 179#define CAM_ENTRY_MACCTL 20 180 181/* Tx_Ctl bit assign -------------------------------------------------------- */ 182#define Tx_En 0x00000001 /* 1:Transmit enable */ 183#define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */ 184#define Tx_NoPad 0x00000004 /* 1:Suppress Padding */ 185#define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */ 186#define Tx_FBack 0x00000010 /* 1:Fast Back-off */ 187#define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */ 188#define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */ 189#define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */ 190#define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */ 191#define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */ 192#define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */ 193#define Tx_EnComp 0x00004000 /* 1:Enable Completion */ 194 195/* Tx_Stat bit assign ------------------------------------------------------- */ 196#define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */ 197#define Tx_ExColl 0x00000010 /* Excessive Collision */ 198#define Tx_TXDefer 0x00000020 /* Transmit Defered */ 199#define Tx_Paused 0x00000040 /* Transmit Paused */ 200#define Tx_IntTx 0x00000080 /* Interrupt on Tx */ 201#define Tx_Under 0x00000100 /* Underrun */ 202#define Tx_Defer 0x00000200 /* Deferral */ 203#define Tx_NCarr 0x00000400 /* No Carrier */ 204#define Tx_10Stat 0x00000800 /* 10Mbps Status */ 205#define Tx_LateColl 0x00001000 /* Late Collision */ 206#define Tx_TxPar 0x00002000 /* Tx Parity Error */ 207#define Tx_Comp 0x00004000 /* Completion */ 208#define Tx_Halted 0x00008000 /* Tx Halted */ 209#define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */ 210 211/* Rx_Ctl bit assign -------------------------------------------------------- */ 212#define Rx_EnGood 0x00004000 /* 1:Enable Good */ 213#define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */ 214#define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */ 215#define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */ 216#define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */ 217#define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */ 218#define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */ 219#define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */ 220#define Rx_ShortEn 0x00000008 /* 1:Short Enable */ 221#define Rx_LongEn 0x00000004 /* 1:Long Enable */ 222#define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */ 223#define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */ 224 225/* Rx_Stat bit assign ------------------------------------------------------- */ 226#define Rx_Halted 0x00008000 /* Rx Halted */ 227#define Rx_Good 0x00004000 /* Rx Good */ 228#define Rx_RxPar 0x00002000 /* Rx Parity Error */ 229#define Rx_TypePkt 0x00001000 /* Rx Type Packet */ 230#define Rx_LongErr 0x00000800 /* Rx Long Error */ 231#define Rx_Over 0x00000400 /* Rx Overflow */ 232#define Rx_CRCErr 0x00000200 /* Rx CRC Error */ 233#define Rx_Align 0x00000100 /* Rx Alignment Error */ 234#define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */ 235#define Rx_IntRx 0x00000040 /* Rx Interrupt */ 236#define Rx_CtlRecd 0x00000020 /* Rx Control Receive */ 237#define Rx_InLenErr 0x00000010 /* Rx In Range Frame Length Error */ 238 239#define Rx_Stat_Mask 0x0000FFF0 /* Rx All Status Mask */ 240 241/* Int_En bit assign -------------------------------------------------------- */ 242#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */ 243#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */ 244#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */ 245#define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */ 246#define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */ 247#define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */ 248#define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */ 249#define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */ 250#define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */ 251#define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */ 252#define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */ 253#define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */ 254 /* Exhausted Enable */ 255 256/* Int_Src bit assign ------------------------------------------------------- */ 257#define Int_NRabt 0x00004000 /* 1:Non Recoverable error */ 258#define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */ 259#define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */ 260#define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */ 261#define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */ 262#define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */ 263#define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */ 264#define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */ 265#define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */ 266#define Int_SWInt 0x00000020 /* 1:Software request & Clear */ 267#define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */ 268#define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */ 269#define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */ 270#define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */ 271#define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */ 272 273/* MD_CA bit assign --------------------------------------------------------- */ 274#define MD_CA_PreSup 0x00001000 /* 1:Preamble Suppress */ 275#define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */ 276#define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */ 277 278 279/* 280 * Descriptors 281 */ 282 283/* Frame descriptor */ 284struct FDesc { 285 volatile __u32 FDNext; 286 volatile __u32 FDSystem; 287 volatile __u32 FDStat; 288 volatile __u32 FDCtl; 289}; 290 291/* Buffer descriptor */ 292struct BDesc { 293 volatile __u32 BuffData; 294 volatile __u32 BDCtl; 295}; 296 297#define FD_ALIGN 16 298 299/* Frame Descriptor bit assign ---------------------------------------------- */ 300#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */ 301#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */ 302#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */ 303#define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */ 304#define FD_FrmOpt_IntTx 0x20000000 /* Tx only */ 305#define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */ 306#define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */ 307#define FD_FrmOpt_Packing 0x04000000 /* Rx only */ 308#define FD_CownsFD 0x80000000 /* FD Controller owner bit */ 309#define FD_Next_EOL 0x00000001 /* FD EOL indicator */ 310#define FD_BDCnt_SHIFT 16 311 312/* Buffer Descriptor bit assign --------------------------------------------- */ 313#define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */ 314#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */ 315#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */ 316#define BD_CownsBD 0x80000000 /* BD Controller owner bit */ 317#define BD_RxBDID_SHIFT 16 318#define BD_RxBDSeqN_SHIFT 24 319 320 321/* Some useful constants. */ 322 323#define TX_CTL_CMD (Tx_EnTxPar | Tx_EnLateColl | \ 324 Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \ 325 Tx_En) /* maybe 0x7b01 */ 326/* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */ 327#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \ 328 | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */ 329#define INT_EN_CMD (Int_NRAbtEn | \ 330 Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \ 331 Int_SSysErrEn | Int_RMasAbtEn | Int_RTargAbtEn | \ 332 Int_STargAbtEn | \ 333 Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/ 334#define DMA_CTL_CMD DMA_BURST_SIZE 335#define HAVE_DMA_RXALIGN(lp) likely((lp)->chiptype != TC35815CF) 336 337/* Tuning parameters */ 338#define DMA_BURST_SIZE 32 339#define TX_THRESHOLD 1024 340/* used threshold with packet max byte for low pci transfer ability.*/ 341#define TX_THRESHOLD_MAX 1536 342/* setting threshold max value when overrun error occurred this count. */ 343#define TX_THRESHOLD_KEEP_LIMIT 10 344 345/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ 346#define FD_PAGE_NUM 4 347#define RX_BUF_NUM 128 /* < 256 */ 348#define RX_FD_NUM 256 /* >= 32 */ 349#define TX_FD_NUM 128 350#if RX_CTL_CMD & Rx_LongEn 351#define RX_BUF_SIZE PAGE_SIZE 352#elif RX_CTL_CMD & Rx_StripCRC 353#define RX_BUF_SIZE \ 354 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + NET_IP_ALIGN) 355#else 356#define RX_BUF_SIZE \ 357 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN) 358#endif 359#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */ 360#define NAPI_WEIGHT 16 361 362struct TxFD { 363 struct FDesc fd; 364 struct BDesc bd; 365 struct BDesc unused; 366}; 367 368struct RxFD { 369 struct FDesc fd; 370 struct BDesc bd[]; /* variable length */ 371}; 372 373struct FrFD { 374 struct FDesc fd; 375 struct BDesc bd[RX_BUF_NUM]; 376}; 377 378 379#define tc_readl(addr) ioread32(addr) 380#define tc_writel(d, addr) iowrite32(d, addr) 381 382#define TC35815_TX_TIMEOUT msecs_to_jiffies(400) 383 384/* Information that need to be kept for each controller. */ 385struct tc35815_local { 386 struct pci_dev *pci_dev; 387 388 struct net_device *dev; 389 struct napi_struct napi; 390 391 /* statistics */ 392 struct { 393 int max_tx_qlen; 394 int tx_ints; 395 int rx_ints; 396 int tx_underrun; 397 } lstats; 398 399 /* Tx control lock. This protects the transmit buffer ring 400 * state along with the "tx full" state of the driver. This 401 * means all netif_queue flow control actions are protected 402 * by this lock as well. 403 */ 404 spinlock_t lock; 405 spinlock_t rx_lock; 406 407 struct mii_bus *mii_bus; 408 int duplex; 409 int speed; 410 int link; 411 struct work_struct restart_work; 412 413 /* 414 * Transmitting: Batch Mode. 415 * 1 BD in 1 TxFD. 416 * Receiving: Non-Packing Mode. 417 * 1 circular FD for Free Buffer List. 418 * RX_BUF_NUM BD in Free Buffer FD. 419 * One Free Buffer BD has ETH_FRAME_LEN data buffer. 420 */ 421 void *fd_buf; /* for TxFD, RxFD, FrFD */ 422 dma_addr_t fd_buf_dma; 423 struct TxFD *tfd_base; 424 unsigned int tfd_start; 425 unsigned int tfd_end; 426 struct RxFD *rfd_base; 427 struct RxFD *rfd_limit; 428 struct RxFD *rfd_cur; 429 struct FrFD *fbl_ptr; 430 unsigned int fbl_count; 431 struct { 432 struct sk_buff *skb; 433 dma_addr_t skb_dma; 434 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; 435 u32 msg_enable; 436 enum tc35815_chiptype chiptype; 437}; 438 439static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt) 440{ 441 return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf); 442} 443#ifdef DEBUG 444static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus) 445{ 446 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma)); 447} 448#endif 449static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, 450 struct pci_dev *hwdev, 451 dma_addr_t *dma_handle) 452{ 453 struct sk_buff *skb; 454 skb = netdev_alloc_skb(dev, RX_BUF_SIZE); 455 if (!skb) 456 return NULL; 457 *dma_handle = dma_map_single(&hwdev->dev, skb->data, RX_BUF_SIZE, 458 DMA_FROM_DEVICE); 459 if (dma_mapping_error(&hwdev->dev, *dma_handle)) { 460 dev_kfree_skb_any(skb); 461 return NULL; 462 } 463 skb_reserve(skb, 2); /* make IP header 4byte aligned */ 464 return skb; 465} 466 467static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle) 468{ 469 dma_unmap_single(&hwdev->dev, dma_handle, RX_BUF_SIZE, 470 DMA_FROM_DEVICE); 471 dev_kfree_skb_any(skb); 472} 473 474/* Index to functions, as function prototypes. */ 475 476static int tc35815_open(struct net_device *dev); 477static netdev_tx_t tc35815_send_packet(struct sk_buff *skb, 478 struct net_device *dev); 479static irqreturn_t tc35815_interrupt(int irq, void *dev_id); 480static int tc35815_rx(struct net_device *dev, int limit); 481static int tc35815_poll(struct napi_struct *napi, int budget); 482static void tc35815_txdone(struct net_device *dev); 483static int tc35815_close(struct net_device *dev); 484static struct net_device_stats *tc35815_get_stats(struct net_device *dev); 485static void tc35815_set_multicast_list(struct net_device *dev); 486static void tc35815_tx_timeout(struct net_device *dev, unsigned int txqueue); 487#ifdef CONFIG_NET_POLL_CONTROLLER 488static void tc35815_poll_controller(struct net_device *dev); 489#endif 490static const struct ethtool_ops tc35815_ethtool_ops; 491 492/* Example routines you must write ;->. */ 493static void tc35815_chip_reset(struct net_device *dev); 494static void tc35815_chip_init(struct net_device *dev); 495 496#ifdef DEBUG 497static void panic_queues(struct net_device *dev); 498#endif 499 500static void tc35815_restart_work(struct work_struct *work); 501 502static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 503{ 504 struct net_device *dev = bus->priv; 505 struct tc35815_regs __iomem *tr = 506 (struct tc35815_regs __iomem *)dev->base_addr; 507 unsigned long timeout = jiffies + HZ; 508 509 tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA); 510 udelay(12); /* it takes 32 x 400ns at least */ 511 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { 512 if (time_after(jiffies, timeout)) 513 return -EIO; 514 cpu_relax(); 515 } 516 return tc_readl(&tr->MD_Data) & 0xffff; 517} 518 519static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val) 520{ 521 struct net_device *dev = bus->priv; 522 struct tc35815_regs __iomem *tr = 523 (struct tc35815_regs __iomem *)dev->base_addr; 524 unsigned long timeout = jiffies + HZ; 525 526 tc_writel(val, &tr->MD_Data); 527 tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f), 528 &tr->MD_CA); 529 udelay(12); /* it takes 32 x 400ns at least */ 530 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { 531 if (time_after(jiffies, timeout)) 532 return -EIO; 533 cpu_relax(); 534 } 535 return 0; 536} 537 538static void tc_handle_link_change(struct net_device *dev) 539{ 540 struct tc35815_local *lp = netdev_priv(dev); 541 struct phy_device *phydev = dev->phydev; 542 unsigned long flags; 543 int status_change = 0; 544 545 spin_lock_irqsave(&lp->lock, flags); 546 if (phydev->link && 547 (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) { 548 struct tc35815_regs __iomem *tr = 549 (struct tc35815_regs __iomem *)dev->base_addr; 550 u32 reg; 551 552 reg = tc_readl(&tr->MAC_Ctl); 553 reg |= MAC_HaltReq; 554 tc_writel(reg, &tr->MAC_Ctl); 555 if (phydev->duplex == DUPLEX_FULL) 556 reg |= MAC_FullDup; 557 else 558 reg &= ~MAC_FullDup; 559 tc_writel(reg, &tr->MAC_Ctl); 560 reg &= ~MAC_HaltReq; 561 tc_writel(reg, &tr->MAC_Ctl); 562 563 /* 564 * TX4939 PCFG.SPEEDn bit will be changed on 565 * NETDEV_CHANGE event. 566 */ 567 /* 568 * WORKAROUND: enable LostCrS only if half duplex 569 * operation. 570 * (TX4939 does not have EnLCarr) 571 */ 572 if (phydev->duplex == DUPLEX_HALF && 573 lp->chiptype != TC35815_TX4939) 574 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, 575 &tr->Tx_Ctl); 576 577 lp->speed = phydev->speed; 578 lp->duplex = phydev->duplex; 579 status_change = 1; 580 } 581 582 if (phydev->link != lp->link) { 583 if (phydev->link) { 584 /* delayed promiscuous enabling */ 585 if (dev->flags & IFF_PROMISC) 586 tc35815_set_multicast_list(dev); 587 } else { 588 lp->speed = 0; 589 lp->duplex = -1; 590 } 591 lp->link = phydev->link; 592 593 status_change = 1; 594 } 595 spin_unlock_irqrestore(&lp->lock, flags); 596 597 if (status_change && netif_msg_link(lp)) { 598 phy_print_status(phydev); 599 pr_debug("%s: MII BMCR %04x BMSR %04x LPA %04x\n", 600 dev->name, 601 phy_read(phydev, MII_BMCR), 602 phy_read(phydev, MII_BMSR), 603 phy_read(phydev, MII_LPA)); 604 } 605} 606 607static int tc_mii_probe(struct net_device *dev) 608{ 609 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 610 struct tc35815_local *lp = netdev_priv(dev); 611 struct phy_device *phydev; 612 613 phydev = phy_find_first(lp->mii_bus); 614 if (!phydev) { 615 printk(KERN_ERR "%s: no PHY found\n", dev->name); 616 return -ENODEV; 617 } 618 619 /* attach the mac to the phy */ 620 phydev = phy_connect(dev, phydev_name(phydev), 621 &tc_handle_link_change, 622 lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); 623 if (IS_ERR(phydev)) { 624 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 625 return PTR_ERR(phydev); 626 } 627 628 phy_attached_info(phydev); 629 630 /* mask with MAC supported features */ 631 phy_set_max_speed(phydev, SPEED_100); 632 if (options.speed == 10) { 633 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); 634 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask); 635 } else if (options.speed == 100) { 636 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); 637 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask); 638 } 639 if (options.duplex == 1) { 640 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask); 641 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask); 642 } else if (options.duplex == 2) { 643 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); 644 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); 645 } 646 linkmode_andnot(phydev->supported, phydev->supported, mask); 647 linkmode_copy(phydev->advertising, phydev->supported); 648 649 lp->link = 0; 650 lp->speed = 0; 651 lp->duplex = -1; 652 653 return 0; 654} 655 656static int tc_mii_init(struct net_device *dev) 657{ 658 struct tc35815_local *lp = netdev_priv(dev); 659 int err; 660 661 lp->mii_bus = mdiobus_alloc(); 662 if (lp->mii_bus == NULL) { 663 err = -ENOMEM; 664 goto err_out; 665 } 666 667 lp->mii_bus->name = "tc35815_mii_bus"; 668 lp->mii_bus->read = tc_mdio_read; 669 lp->mii_bus->write = tc_mdio_write; 670 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 671 (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn); 672 lp->mii_bus->priv = dev; 673 lp->mii_bus->parent = &lp->pci_dev->dev; 674 err = mdiobus_register(lp->mii_bus); 675 if (err) 676 goto err_out_free_mii_bus; 677 err = tc_mii_probe(dev); 678 if (err) 679 goto err_out_unregister_bus; 680 return 0; 681 682err_out_unregister_bus: 683 mdiobus_unregister(lp->mii_bus); 684err_out_free_mii_bus: 685 mdiobus_free(lp->mii_bus); 686err_out: 687 return err; 688} 689 690#ifdef CONFIG_CPU_TX49XX 691/* 692 * Find a platform_device providing a MAC address. The platform code 693 * should provide a "tc35815-mac" device with a MAC address in its 694 * platform_data. 695 */ 696static int tc35815_mac_match(struct device *dev, const void *data) 697{ 698 struct platform_device *plat_dev = to_platform_device(dev); 699 const struct pci_dev *pci_dev = data; 700 unsigned int id = pci_dev->irq; 701 return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id; 702} 703 704static int tc35815_read_plat_dev_addr(struct net_device *dev) 705{ 706 struct tc35815_local *lp = netdev_priv(dev); 707 struct device *pd = bus_find_device(&platform_bus_type, NULL, 708 lp->pci_dev, tc35815_mac_match); 709 if (pd) { 710 if (pd->platform_data) 711 eth_hw_addr_set(dev, pd->platform_data); 712 put_device(pd); 713 return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV; 714 } 715 return -ENODEV; 716} 717#else 718static int tc35815_read_plat_dev_addr(struct net_device *dev) 719{ 720 return -ENODEV; 721} 722#endif 723 724static int tc35815_init_dev_addr(struct net_device *dev) 725{ 726 struct tc35815_regs __iomem *tr = 727 (struct tc35815_regs __iomem *)dev->base_addr; 728 u8 addr[ETH_ALEN]; 729 int i; 730 731 while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) 732 ; 733 for (i = 0; i < 6; i += 2) { 734 unsigned short data; 735 tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl); 736 while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) 737 ; 738 data = tc_readl(&tr->PROM_Data); 739 addr[i] = data & 0xff; 740 addr[i+1] = data >> 8; 741 } 742 eth_hw_addr_set(dev, addr); 743 if (!is_valid_ether_addr(dev->dev_addr)) 744 return tc35815_read_plat_dev_addr(dev); 745 return 0; 746} 747 748static const struct net_device_ops tc35815_netdev_ops = { 749 .ndo_open = tc35815_open, 750 .ndo_stop = tc35815_close, 751 .ndo_start_xmit = tc35815_send_packet, 752 .ndo_get_stats = tc35815_get_stats, 753 .ndo_set_rx_mode = tc35815_set_multicast_list, 754 .ndo_tx_timeout = tc35815_tx_timeout, 755 .ndo_eth_ioctl = phy_do_ioctl_running, 756 .ndo_validate_addr = eth_validate_addr, 757 .ndo_set_mac_address = eth_mac_addr, 758#ifdef CONFIG_NET_POLL_CONTROLLER 759 .ndo_poll_controller = tc35815_poll_controller, 760#endif 761}; 762 763static int tc35815_init_one(struct pci_dev *pdev, 764 const struct pci_device_id *ent) 765{ 766 void __iomem *ioaddr = NULL; 767 struct net_device *dev; 768 struct tc35815_local *lp; 769 int rc; 770 771 static int printed_version; 772 if (!printed_version++) { 773 printk(version); 774 dev_printk(KERN_DEBUG, &pdev->dev, 775 "speed:%d duplex:%d\n", 776 options.speed, options.duplex); 777 } 778 779 if (!pdev->irq) { 780 dev_warn(&pdev->dev, "no IRQ assigned.\n"); 781 return -ENODEV; 782 } 783 784 /* dev zeroed in alloc_etherdev */ 785 dev = alloc_etherdev(sizeof(*lp)); 786 if (dev == NULL) 787 return -ENOMEM; 788 789 SET_NETDEV_DEV(dev, &pdev->dev); 790 lp = netdev_priv(dev); 791 lp->dev = dev; 792 793 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 794 rc = pcim_enable_device(pdev); 795 if (rc) 796 goto err_out; 797 rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME); 798 if (rc) 799 goto err_out; 800 pci_set_master(pdev); 801 ioaddr = pcim_iomap_table(pdev)[1]; 802 803 /* Initialize the device structure. */ 804 dev->netdev_ops = &tc35815_netdev_ops; 805 dev->ethtool_ops = &tc35815_ethtool_ops; 806 dev->watchdog_timeo = TC35815_TX_TIMEOUT; 807 netif_napi_add_weight(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); 808 809 dev->irq = pdev->irq; 810 dev->base_addr = (unsigned long)ioaddr; 811 812 INIT_WORK(&lp->restart_work, tc35815_restart_work); 813 spin_lock_init(&lp->lock); 814 spin_lock_init(&lp->rx_lock); 815 lp->pci_dev = pdev; 816 lp->chiptype = ent->driver_data; 817 818 lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK; 819 pci_set_drvdata(pdev, dev); 820 821 /* Soft reset the chip. */ 822 tc35815_chip_reset(dev); 823 824 /* Retrieve the ethernet address. */ 825 if (tc35815_init_dev_addr(dev)) { 826 dev_warn(&pdev->dev, "not valid ether addr\n"); 827 eth_hw_addr_random(dev); 828 } 829 830 rc = register_netdev(dev); 831 if (rc) 832 goto err_out; 833 834 printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n", 835 dev->name, 836 chip_info[ent->driver_data].name, 837 dev->base_addr, 838 dev->dev_addr, 839 dev->irq); 840 841 rc = tc_mii_init(dev); 842 if (rc) 843 goto err_out_unregister; 844 845 return 0; 846 847err_out_unregister: 848 unregister_netdev(dev); 849err_out: 850 free_netdev(dev); 851 return rc; 852} 853 854 855static void tc35815_remove_one(struct pci_dev *pdev) 856{ 857 struct net_device *dev = pci_get_drvdata(pdev); 858 struct tc35815_local *lp = netdev_priv(dev); 859 860 phy_disconnect(dev->phydev); 861 mdiobus_unregister(lp->mii_bus); 862 mdiobus_free(lp->mii_bus); 863 unregister_netdev(dev); 864 free_netdev(dev); 865} 866 867static int 868tc35815_init_queues(struct net_device *dev) 869{ 870 struct tc35815_local *lp = netdev_priv(dev); 871 int i; 872 unsigned long fd_addr; 873 874 if (!lp->fd_buf) { 875 BUG_ON(sizeof(struct FDesc) + 876 sizeof(struct BDesc) * RX_BUF_NUM + 877 sizeof(struct FDesc) * RX_FD_NUM + 878 sizeof(struct TxFD) * TX_FD_NUM > 879 PAGE_SIZE * FD_PAGE_NUM); 880 881 lp->fd_buf = dma_alloc_coherent(&lp->pci_dev->dev, 882 PAGE_SIZE * FD_PAGE_NUM, 883 &lp->fd_buf_dma, GFP_ATOMIC); 884 if (!lp->fd_buf) 885 return -ENOMEM; 886 for (i = 0; i < RX_BUF_NUM; i++) { 887 lp->rx_skbs[i].skb = 888 alloc_rxbuf_skb(dev, lp->pci_dev, 889 &lp->rx_skbs[i].skb_dma); 890 if (!lp->rx_skbs[i].skb) { 891 while (--i >= 0) { 892 free_rxbuf_skb(lp->pci_dev, 893 lp->rx_skbs[i].skb, 894 lp->rx_skbs[i].skb_dma); 895 lp->rx_skbs[i].skb = NULL; 896 } 897 dma_free_coherent(&lp->pci_dev->dev, 898 PAGE_SIZE * FD_PAGE_NUM, 899 lp->fd_buf, lp->fd_buf_dma); 900 lp->fd_buf = NULL; 901 return -ENOMEM; 902 } 903 } 904 printk(KERN_DEBUG "%s: FD buf %p DataBuf", 905 dev->name, lp->fd_buf); 906 printk("\n"); 907 } else { 908 for (i = 0; i < FD_PAGE_NUM; i++) 909 clear_page((void *)((unsigned long)lp->fd_buf + 910 i * PAGE_SIZE)); 911 } 912 fd_addr = (unsigned long)lp->fd_buf; 913 914 /* Free Descriptors (for Receive) */ 915 lp->rfd_base = (struct RxFD *)fd_addr; 916 fd_addr += sizeof(struct RxFD) * RX_FD_NUM; 917 for (i = 0; i < RX_FD_NUM; i++) 918 lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD); 919 lp->rfd_cur = lp->rfd_base; 920 lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1); 921 922 /* Transmit Descriptors */ 923 lp->tfd_base = (struct TxFD *)fd_addr; 924 fd_addr += sizeof(struct TxFD) * TX_FD_NUM; 925 for (i = 0; i < TX_FD_NUM; i++) { 926 lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1])); 927 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); 928 lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0); 929 } 930 lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0])); 931 lp->tfd_start = 0; 932 lp->tfd_end = 0; 933 934 /* Buffer List (for Receive) */ 935 lp->fbl_ptr = (struct FrFD *)fd_addr; 936 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr)); 937 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD); 938 /* 939 * move all allocated skbs to head of rx_skbs[] array. 940 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in 941 * tc35815_rx() had failed. 942 */ 943 lp->fbl_count = 0; 944 for (i = 0; i < RX_BUF_NUM; i++) { 945 if (lp->rx_skbs[i].skb) { 946 if (i != lp->fbl_count) { 947 lp->rx_skbs[lp->fbl_count].skb = 948 lp->rx_skbs[i].skb; 949 lp->rx_skbs[lp->fbl_count].skb_dma = 950 lp->rx_skbs[i].skb_dma; 951 } 952 lp->fbl_count++; 953 } 954 } 955 for (i = 0; i < RX_BUF_NUM; i++) { 956 if (i >= lp->fbl_count) { 957 lp->fbl_ptr->bd[i].BuffData = 0; 958 lp->fbl_ptr->bd[i].BDCtl = 0; 959 continue; 960 } 961 lp->fbl_ptr->bd[i].BuffData = 962 cpu_to_le32(lp->rx_skbs[i].skb_dma); 963 /* BDID is index of FrFD.bd[] */ 964 lp->fbl_ptr->bd[i].BDCtl = 965 cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) | 966 RX_BUF_SIZE); 967 } 968 969 printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n", 970 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr); 971 return 0; 972} 973 974static void 975tc35815_clear_queues(struct net_device *dev) 976{ 977 struct tc35815_local *lp = netdev_priv(dev); 978 int i; 979 980 for (i = 0; i < TX_FD_NUM; i++) { 981 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); 982 struct sk_buff *skb = 983 fdsystem != 0xffffffff ? 984 lp->tx_skbs[fdsystem].skb : NULL; 985#ifdef DEBUG 986 if (lp->tx_skbs[i].skb != skb) { 987 printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); 988 panic_queues(dev); 989 } 990#else 991 BUG_ON(lp->tx_skbs[i].skb != skb); 992#endif 993 if (skb) { 994 dma_unmap_single(&lp->pci_dev->dev, 995 lp->tx_skbs[i].skb_dma, skb->len, 996 DMA_TO_DEVICE); 997 lp->tx_skbs[i].skb = NULL; 998 lp->tx_skbs[i].skb_dma = 0; 999 dev_kfree_skb_any(skb); 1000 } 1001 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); 1002 } 1003 1004 tc35815_init_queues(dev); 1005} 1006 1007static void 1008tc35815_free_queues(struct net_device *dev) 1009{ 1010 struct tc35815_local *lp = netdev_priv(dev); 1011 int i; 1012 1013 if (lp->tfd_base) { 1014 for (i = 0; i < TX_FD_NUM; i++) { 1015 u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); 1016 struct sk_buff *skb = 1017 fdsystem != 0xffffffff ? 1018 lp->tx_skbs[fdsystem].skb : NULL; 1019#ifdef DEBUG 1020 if (lp->tx_skbs[i].skb != skb) { 1021 printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); 1022 panic_queues(dev); 1023 } 1024#else 1025 BUG_ON(lp->tx_skbs[i].skb != skb); 1026#endif 1027 if (skb) { 1028 dma_unmap_single(&lp->pci_dev->dev, 1029 lp->tx_skbs[i].skb_dma, 1030 skb->len, DMA_TO_DEVICE); 1031 dev_kfree_skb(skb); 1032 lp->tx_skbs[i].skb = NULL; 1033 lp->tx_skbs[i].skb_dma = 0; 1034 } 1035 lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); 1036 } 1037 } 1038 1039 lp->rfd_base = NULL; 1040 lp->rfd_limit = NULL; 1041 lp->rfd_cur = NULL; 1042 lp->fbl_ptr = NULL; 1043 1044 for (i = 0; i < RX_BUF_NUM; i++) { 1045 if (lp->rx_skbs[i].skb) { 1046 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, 1047 lp->rx_skbs[i].skb_dma); 1048 lp->rx_skbs[i].skb = NULL; 1049 } 1050 } 1051 if (lp->fd_buf) { 1052 dma_free_coherent(&lp->pci_dev->dev, PAGE_SIZE * FD_PAGE_NUM, 1053 lp->fd_buf, lp->fd_buf_dma); 1054 lp->fd_buf = NULL; 1055 } 1056} 1057 1058static void 1059dump_txfd(struct TxFD *fd) 1060{ 1061 printk("TxFD(%p): %08x %08x %08x %08x\n", fd, 1062 le32_to_cpu(fd->fd.FDNext), 1063 le32_to_cpu(fd->fd.FDSystem), 1064 le32_to_cpu(fd->fd.FDStat), 1065 le32_to_cpu(fd->fd.FDCtl)); 1066 printk("BD: "); 1067 printk(" %08x %08x", 1068 le32_to_cpu(fd->bd.BuffData), 1069 le32_to_cpu(fd->bd.BDCtl)); 1070 printk("\n"); 1071} 1072 1073static int 1074dump_rxfd(struct RxFD *fd) 1075{ 1076 int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT; 1077 if (bd_count > 8) 1078 bd_count = 8; 1079 printk("RxFD(%p): %08x %08x %08x %08x\n", fd, 1080 le32_to_cpu(fd->fd.FDNext), 1081 le32_to_cpu(fd->fd.FDSystem), 1082 le32_to_cpu(fd->fd.FDStat), 1083 le32_to_cpu(fd->fd.FDCtl)); 1084 if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD) 1085 return 0; 1086 printk("BD: "); 1087 for (i = 0; i < bd_count; i++) 1088 printk(" %08x %08x", 1089 le32_to_cpu(fd->bd[i].BuffData), 1090 le32_to_cpu(fd->bd[i].BDCtl)); 1091 printk("\n"); 1092 return bd_count; 1093} 1094 1095#ifdef DEBUG 1096static void 1097dump_frfd(struct FrFD *fd) 1098{ 1099 int i; 1100 printk("FrFD(%p): %08x %08x %08x %08x\n", fd, 1101 le32_to_cpu(fd->fd.FDNext), 1102 le32_to_cpu(fd->fd.FDSystem), 1103 le32_to_cpu(fd->fd.FDStat), 1104 le32_to_cpu(fd->fd.FDCtl)); 1105 printk("BD: "); 1106 for (i = 0; i < RX_BUF_NUM; i++) 1107 printk(" %08x %08x", 1108 le32_to_cpu(fd->bd[i].BuffData), 1109 le32_to_cpu(fd->bd[i].BDCtl)); 1110 printk("\n"); 1111} 1112 1113static void 1114panic_queues(struct net_device *dev) 1115{ 1116 struct tc35815_local *lp = netdev_priv(dev); 1117 int i; 1118 1119 printk("TxFD base %p, start %u, end %u\n", 1120 lp->tfd_base, lp->tfd_start, lp->tfd_end); 1121 printk("RxFD base %p limit %p cur %p\n", 1122 lp->rfd_base, lp->rfd_limit, lp->rfd_cur); 1123 printk("FrFD %p\n", lp->fbl_ptr); 1124 for (i = 0; i < TX_FD_NUM; i++) 1125 dump_txfd(&lp->tfd_base[i]); 1126 for (i = 0; i < RX_FD_NUM; i++) { 1127 int bd_count = dump_rxfd(&lp->rfd_base[i]); 1128 i += (bd_count + 1) / 2; /* skip BDs */ 1129 } 1130 dump_frfd(lp->fbl_ptr); 1131 panic("%s: Illegal queue state.", dev->name); 1132} 1133#endif 1134 1135static void print_eth(const u8 *add) 1136{ 1137 printk(KERN_DEBUG "print_eth(%p)\n", add); 1138 printk(KERN_DEBUG " %pM => %pM : %02x%02x\n", 1139 add + 6, add, add[12], add[13]); 1140} 1141 1142static int tc35815_tx_full(struct net_device *dev) 1143{ 1144 struct tc35815_local *lp = netdev_priv(dev); 1145 return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end; 1146} 1147 1148static void tc35815_restart(struct net_device *dev) 1149{ 1150 struct tc35815_local *lp = netdev_priv(dev); 1151 int ret; 1152 1153 if (dev->phydev) { 1154 ret = phy_init_hw(dev->phydev); 1155 if (ret) 1156 printk(KERN_ERR "%s: PHY init failed.\n", dev->name); 1157 } 1158 1159 spin_lock_bh(&lp->rx_lock); 1160 spin_lock_irq(&lp->lock); 1161 tc35815_chip_reset(dev); 1162 tc35815_clear_queues(dev); 1163 tc35815_chip_init(dev); 1164 /* Reconfigure CAM again since tc35815_chip_init() initialize it. */ 1165 tc35815_set_multicast_list(dev); 1166 spin_unlock_irq(&lp->lock); 1167 spin_unlock_bh(&lp->rx_lock); 1168 1169 netif_wake_queue(dev); 1170} 1171 1172static void tc35815_restart_work(struct work_struct *work) 1173{ 1174 struct tc35815_local *lp = 1175 container_of(work, struct tc35815_local, restart_work); 1176 struct net_device *dev = lp->dev; 1177 1178 tc35815_restart(dev); 1179} 1180 1181static void tc35815_schedule_restart(struct net_device *dev) 1182{ 1183 struct tc35815_local *lp = netdev_priv(dev); 1184 struct tc35815_regs __iomem *tr = 1185 (struct tc35815_regs __iomem *)dev->base_addr; 1186 unsigned long flags; 1187 1188 /* disable interrupts */ 1189 spin_lock_irqsave(&lp->lock, flags); 1190 tc_writel(0, &tr->Int_En); 1191 tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl); 1192 schedule_work(&lp->restart_work); 1193 spin_unlock_irqrestore(&lp->lock, flags); 1194} 1195 1196static void tc35815_tx_timeout(struct net_device *dev, unsigned int txqueue) 1197{ 1198 struct tc35815_regs __iomem *tr = 1199 (struct tc35815_regs __iomem *)dev->base_addr; 1200 1201 printk(KERN_WARNING "%s: transmit timed out, status %#x\n", 1202 dev->name, tc_readl(&tr->Tx_Stat)); 1203 1204 /* Try to restart the adaptor. */ 1205 tc35815_schedule_restart(dev); 1206 dev->stats.tx_errors++; 1207} 1208 1209/* 1210 * Open/initialize the controller. This is called (in the current kernel) 1211 * sometime after booting when the 'ifconfig' program is run. 1212 * 1213 * This routine should set everything up anew at each open, even 1214 * registers that "should" only need to be set once at boot, so that 1215 * there is non-reboot way to recover if something goes wrong. 1216 */ 1217static int 1218tc35815_open(struct net_device *dev) 1219{ 1220 struct tc35815_local *lp = netdev_priv(dev); 1221 1222 /* 1223 * This is used if the interrupt line can turned off (shared). 1224 * See 3c503.c for an example of selecting the IRQ at config-time. 1225 */ 1226 if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED, 1227 dev->name, dev)) 1228 return -EAGAIN; 1229 1230 tc35815_chip_reset(dev); 1231 1232 if (tc35815_init_queues(dev) != 0) { 1233 free_irq(dev->irq, dev); 1234 return -EAGAIN; 1235 } 1236 1237 napi_enable(&lp->napi); 1238 1239 /* Reset the hardware here. Don't forget to set the station address. */ 1240 spin_lock_irq(&lp->lock); 1241 tc35815_chip_init(dev); 1242 spin_unlock_irq(&lp->lock); 1243 1244 netif_carrier_off(dev); 1245 /* schedule a link state check */ 1246 phy_start(dev->phydev); 1247 1248 /* We are now ready to accept transmit requeusts from 1249 * the queueing layer of the networking. 1250 */ 1251 netif_start_queue(dev); 1252 1253 return 0; 1254} 1255 1256/* This will only be invoked if your driver is _not_ in XOFF state. 1257 * What this means is that you need not check it, and that this 1258 * invariant will hold if you make sure that the netif_*_queue() 1259 * calls are done at the proper times. 1260 */ 1261static netdev_tx_t 1262tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) 1263{ 1264 struct tc35815_local *lp = netdev_priv(dev); 1265 struct TxFD *txfd; 1266 unsigned long flags; 1267 1268 /* If some error occurs while trying to transmit this 1269 * packet, you should return '1' from this function. 1270 * In such a case you _may not_ do anything to the 1271 * SKB, it is still owned by the network queueing 1272 * layer when an error is returned. This means you 1273 * may not modify any SKB fields, you may not free 1274 * the SKB, etc. 1275 */ 1276 1277 /* This is the most common case for modern hardware. 1278 * The spinlock protects this code from the TX complete 1279 * hardware interrupt handler. Queue flow control is 1280 * thus managed under this lock as well. 1281 */ 1282 spin_lock_irqsave(&lp->lock, flags); 1283 1284 /* failsafe... (handle txdone now if half of FDs are used) */ 1285 if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM > 1286 TX_FD_NUM / 2) 1287 tc35815_txdone(dev); 1288 1289 if (netif_msg_pktdata(lp)) 1290 print_eth(skb->data); 1291#ifdef DEBUG 1292 if (lp->tx_skbs[lp->tfd_start].skb) { 1293 printk("%s: tx_skbs conflict.\n", dev->name); 1294 panic_queues(dev); 1295 } 1296#else 1297 BUG_ON(lp->tx_skbs[lp->tfd_start].skb); 1298#endif 1299 lp->tx_skbs[lp->tfd_start].skb = skb; 1300 lp->tx_skbs[lp->tfd_start].skb_dma = dma_map_single(&lp->pci_dev->dev, 1301 skb->data, 1302 skb->len, 1303 DMA_TO_DEVICE); 1304 1305 /*add to ring */ 1306 txfd = &lp->tfd_base[lp->tfd_start]; 1307 txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma); 1308 txfd->bd.BDCtl = cpu_to_le32(skb->len); 1309 txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start); 1310 txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT)); 1311 1312 if (lp->tfd_start == lp->tfd_end) { 1313 struct tc35815_regs __iomem *tr = 1314 (struct tc35815_regs __iomem *)dev->base_addr; 1315 /* Start DMA Transmitter. */ 1316 txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL); 1317 txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); 1318 if (netif_msg_tx_queued(lp)) { 1319 printk("%s: starting TxFD.\n", dev->name); 1320 dump_txfd(txfd); 1321 } 1322 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); 1323 } else { 1324 txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL); 1325 if (netif_msg_tx_queued(lp)) { 1326 printk("%s: queueing TxFD.\n", dev->name); 1327 dump_txfd(txfd); 1328 } 1329 } 1330 lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM; 1331 1332 /* If we just used up the very last entry in the 1333 * TX ring on this device, tell the queueing 1334 * layer to send no more. 1335 */ 1336 if (tc35815_tx_full(dev)) { 1337 if (netif_msg_tx_queued(lp)) 1338 printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name); 1339 netif_stop_queue(dev); 1340 } 1341 1342 /* When the TX completion hw interrupt arrives, this 1343 * is when the transmit statistics are updated. 1344 */ 1345 1346 spin_unlock_irqrestore(&lp->lock, flags); 1347 return NETDEV_TX_OK; 1348} 1349 1350#define FATAL_ERROR_INT \ 1351 (Int_IntPCI | Int_DmParErr | Int_IntNRAbt) 1352static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status) 1353{ 1354 static int count; 1355 printk(KERN_WARNING "%s: Fatal Error Interrupt (%#x):", 1356 dev->name, status); 1357 if (status & Int_IntPCI) 1358 printk(" IntPCI"); 1359 if (status & Int_DmParErr) 1360 printk(" DmParErr"); 1361 if (status & Int_IntNRAbt) 1362 printk(" IntNRAbt"); 1363 printk("\n"); 1364 if (count++ > 100) 1365 panic("%s: Too many fatal errors.", dev->name); 1366 printk(KERN_WARNING "%s: Resetting ...\n", dev->name); 1367 /* Try to restart the adaptor. */ 1368 tc35815_schedule_restart(dev); 1369} 1370 1371static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) 1372{ 1373 struct tc35815_local *lp = netdev_priv(dev); 1374 int ret = -1; 1375 1376 /* Fatal errors... */ 1377 if (status & FATAL_ERROR_INT) { 1378 tc35815_fatal_error_interrupt(dev, status); 1379 return 0; 1380 } 1381 /* recoverable errors */ 1382 if (status & Int_IntFDAEx) { 1383 if (netif_msg_rx_err(lp)) 1384 dev_warn(&dev->dev, 1385 "Free Descriptor Area Exhausted (%#x).\n", 1386 status); 1387 dev->stats.rx_dropped++; 1388 ret = 0; 1389 } 1390 if (status & Int_IntBLEx) { 1391 if (netif_msg_rx_err(lp)) 1392 dev_warn(&dev->dev, 1393 "Buffer List Exhausted (%#x).\n", 1394 status); 1395 dev->stats.rx_dropped++; 1396 ret = 0; 1397 } 1398 if (status & Int_IntExBD) { 1399 if (netif_msg_rx_err(lp)) 1400 dev_warn(&dev->dev, 1401 "Excessive Buffer Descriptors (%#x).\n", 1402 status); 1403 dev->stats.rx_length_errors++; 1404 ret = 0; 1405 } 1406 1407 /* normal notification */ 1408 if (status & Int_IntMacRx) { 1409 /* Got a packet(s). */ 1410 ret = tc35815_rx(dev, limit); 1411 lp->lstats.rx_ints++; 1412 } 1413 if (status & Int_IntMacTx) { 1414 /* Transmit complete. */ 1415 lp->lstats.tx_ints++; 1416 spin_lock_irq(&lp->lock); 1417 tc35815_txdone(dev); 1418 spin_unlock_irq(&lp->lock); 1419 if (ret < 0) 1420 ret = 0; 1421 } 1422 return ret; 1423} 1424 1425/* 1426 * The typical workload of the driver: 1427 * Handle the network interface interrupts. 1428 */ 1429static irqreturn_t tc35815_interrupt(int irq, void *dev_id) 1430{ 1431 struct net_device *dev = dev_id; 1432 struct tc35815_local *lp = netdev_priv(dev); 1433 struct tc35815_regs __iomem *tr = 1434 (struct tc35815_regs __iomem *)dev->base_addr; 1435 u32 dmactl = tc_readl(&tr->DMA_Ctl); 1436 1437 if (!(dmactl & DMA_IntMask)) { 1438 /* disable interrupts */ 1439 tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); 1440 if (napi_schedule_prep(&lp->napi)) 1441 __napi_schedule(&lp->napi); 1442 else { 1443 printk(KERN_ERR "%s: interrupt taken in poll\n", 1444 dev->name); 1445 BUG(); 1446 } 1447 (void)tc_readl(&tr->Int_Src); /* flush */ 1448 return IRQ_HANDLED; 1449 } 1450 return IRQ_NONE; 1451} 1452 1453#ifdef CONFIG_NET_POLL_CONTROLLER 1454static void tc35815_poll_controller(struct net_device *dev) 1455{ 1456 disable_irq(dev->irq); 1457 tc35815_interrupt(dev->irq, dev); 1458 enable_irq(dev->irq); 1459} 1460#endif 1461 1462/* We have a good packet(s), get it/them out of the buffers. */ 1463static int 1464tc35815_rx(struct net_device *dev, int limit) 1465{ 1466 struct tc35815_local *lp = netdev_priv(dev); 1467 unsigned int fdctl; 1468 int i; 1469 int received = 0; 1470 1471 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) { 1472 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat); 1473 int pkt_len = fdctl & FD_FDLength_MASK; 1474 int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT; 1475#ifdef DEBUG 1476 struct RxFD *next_rfd; 1477#endif 1478#if (RX_CTL_CMD & Rx_StripCRC) == 0 1479 pkt_len -= ETH_FCS_LEN; 1480#endif 1481 1482 if (netif_msg_rx_status(lp)) 1483 dump_rxfd(lp->rfd_cur); 1484 if (status & Rx_Good) { 1485 struct sk_buff *skb; 1486 unsigned char *data; 1487 int cur_bd; 1488 1489 if (--limit < 0) 1490 break; 1491 BUG_ON(bd_count > 1); 1492 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl) 1493 & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; 1494#ifdef DEBUG 1495 if (cur_bd >= RX_BUF_NUM) { 1496 printk("%s: invalid BDID.\n", dev->name); 1497 panic_queues(dev); 1498 } 1499 BUG_ON(lp->rx_skbs[cur_bd].skb_dma != 1500 (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3)); 1501 if (!lp->rx_skbs[cur_bd].skb) { 1502 printk("%s: NULL skb.\n", dev->name); 1503 panic_queues(dev); 1504 } 1505#else 1506 BUG_ON(cur_bd >= RX_BUF_NUM); 1507#endif 1508 skb = lp->rx_skbs[cur_bd].skb; 1509 prefetch(skb->data); 1510 lp->rx_skbs[cur_bd].skb = NULL; 1511 dma_unmap_single(&lp->pci_dev->dev, 1512 lp->rx_skbs[cur_bd].skb_dma, 1513 RX_BUF_SIZE, DMA_FROM_DEVICE); 1514 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0) 1515 memmove(skb->data, skb->data - NET_IP_ALIGN, 1516 pkt_len); 1517 data = skb_put(skb, pkt_len); 1518 if (netif_msg_pktdata(lp)) 1519 print_eth(data); 1520 skb->protocol = eth_type_trans(skb, dev); 1521 netif_receive_skb(skb); 1522 received++; 1523 dev->stats.rx_packets++; 1524 dev->stats.rx_bytes += pkt_len; 1525 } else { 1526 dev->stats.rx_errors++; 1527 if (netif_msg_rx_err(lp)) 1528 dev_info(&dev->dev, "Rx error (status %x)\n", 1529 status & Rx_Stat_Mask); 1530 /* WORKAROUND: LongErr and CRCErr means Overflow. */ 1531 if ((status & Rx_LongErr) && (status & Rx_CRCErr)) { 1532 status &= ~(Rx_LongErr|Rx_CRCErr); 1533 status |= Rx_Over; 1534 } 1535 if (status & Rx_LongErr) 1536 dev->stats.rx_length_errors++; 1537 if (status & Rx_Over) 1538 dev->stats.rx_fifo_errors++; 1539 if (status & Rx_CRCErr) 1540 dev->stats.rx_crc_errors++; 1541 if (status & Rx_Align) 1542 dev->stats.rx_frame_errors++; 1543 } 1544 1545 if (bd_count > 0) { 1546 /* put Free Buffer back to controller */ 1547 int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl); 1548 unsigned char id = 1549 (bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; 1550#ifdef DEBUG 1551 if (id >= RX_BUF_NUM) { 1552 printk("%s: invalid BDID.\n", dev->name); 1553 panic_queues(dev); 1554 } 1555#else 1556 BUG_ON(id >= RX_BUF_NUM); 1557#endif 1558 /* free old buffers */ 1559 lp->fbl_count--; 1560 while (lp->fbl_count < RX_BUF_NUM) 1561 { 1562 unsigned char curid = 1563 (id + 1 + lp->fbl_count) % RX_BUF_NUM; 1564 struct BDesc *bd = &lp->fbl_ptr->bd[curid]; 1565#ifdef DEBUG 1566 bdctl = le32_to_cpu(bd->BDCtl); 1567 if (bdctl & BD_CownsBD) { 1568 printk("%s: Freeing invalid BD.\n", 1569 dev->name); 1570 panic_queues(dev); 1571 } 1572#endif 1573 /* pass BD to controller */ 1574 if (!lp->rx_skbs[curid].skb) { 1575 lp->rx_skbs[curid].skb = 1576 alloc_rxbuf_skb(dev, 1577 lp->pci_dev, 1578 &lp->rx_skbs[curid].skb_dma); 1579 if (!lp->rx_skbs[curid].skb) 1580 break; /* try on next reception */ 1581 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma); 1582 } 1583 /* Note: BDLength was modified by chip. */ 1584 bd->BDCtl = cpu_to_le32(BD_CownsBD | 1585 (curid << BD_RxBDID_SHIFT) | 1586 RX_BUF_SIZE); 1587 lp->fbl_count++; 1588 } 1589 } 1590 1591 /* put RxFD back to controller */ 1592#ifdef DEBUG 1593 next_rfd = fd_bus_to_virt(lp, 1594 le32_to_cpu(lp->rfd_cur->fd.FDNext)); 1595 if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) { 1596 printk("%s: RxFD FDNext invalid.\n", dev->name); 1597 panic_queues(dev); 1598 } 1599#endif 1600 for (i = 0; i < (bd_count + 1) / 2 + 1; i++) { 1601 /* pass FD to controller */ 1602#ifdef DEBUG 1603 lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead); 1604#else 1605 lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL); 1606#endif 1607 lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD); 1608 lp->rfd_cur++; 1609 } 1610 if (lp->rfd_cur > lp->rfd_limit) 1611 lp->rfd_cur = lp->rfd_base; 1612#ifdef DEBUG 1613 if (lp->rfd_cur != next_rfd) 1614 printk("rfd_cur = %p, next_rfd %p\n", 1615 lp->rfd_cur, next_rfd); 1616#endif 1617 } 1618 1619 return received; 1620} 1621 1622static int tc35815_poll(struct napi_struct *napi, int budget) 1623{ 1624 struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi); 1625 struct net_device *dev = lp->dev; 1626 struct tc35815_regs __iomem *tr = 1627 (struct tc35815_regs __iomem *)dev->base_addr; 1628 int received = 0, handled; 1629 u32 status; 1630 1631 if (budget <= 0) 1632 return received; 1633 1634 spin_lock(&lp->rx_lock); 1635 status = tc_readl(&tr->Int_Src); 1636 do { 1637 /* BLEx, FDAEx will be cleared later */ 1638 tc_writel(status & ~(Int_BLEx | Int_FDAEx), 1639 &tr->Int_Src); /* write to clear */ 1640 1641 handled = tc35815_do_interrupt(dev, status, budget - received); 1642 if (status & (Int_BLEx | Int_FDAEx)) 1643 tc_writel(status & (Int_BLEx | Int_FDAEx), 1644 &tr->Int_Src); 1645 if (handled >= 0) { 1646 received += handled; 1647 if (received >= budget) 1648 break; 1649 } 1650 status = tc_readl(&tr->Int_Src); 1651 } while (status); 1652 spin_unlock(&lp->rx_lock); 1653 1654 if (received < budget) { 1655 napi_complete_done(napi, received); 1656 /* enable interrupts */ 1657 tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); 1658 } 1659 return received; 1660} 1661 1662#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr) 1663 1664static void 1665tc35815_check_tx_stat(struct net_device *dev, int status) 1666{ 1667 struct tc35815_local *lp = netdev_priv(dev); 1668 const char *msg = NULL; 1669 1670 /* count collisions */ 1671 if (status & Tx_ExColl) 1672 dev->stats.collisions += 16; 1673 if (status & Tx_TxColl_MASK) 1674 dev->stats.collisions += status & Tx_TxColl_MASK; 1675 1676 /* TX4939 does not have NCarr */ 1677 if (lp->chiptype == TC35815_TX4939) 1678 status &= ~Tx_NCarr; 1679 /* WORKAROUND: ignore LostCrS in full duplex operation */ 1680 if (!lp->link || lp->duplex == DUPLEX_FULL) 1681 status &= ~Tx_NCarr; 1682 1683 if (!(status & TX_STA_ERR)) { 1684 /* no error. */ 1685 dev->stats.tx_packets++; 1686 return; 1687 } 1688 1689 dev->stats.tx_errors++; 1690 if (status & Tx_ExColl) { 1691 dev->stats.tx_aborted_errors++; 1692 msg = "Excessive Collision."; 1693 } 1694 if (status & Tx_Under) { 1695 dev->stats.tx_fifo_errors++; 1696 msg = "Tx FIFO Underrun."; 1697 if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) { 1698 lp->lstats.tx_underrun++; 1699 if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) { 1700 struct tc35815_regs __iomem *tr = 1701 (struct tc35815_regs __iomem *)dev->base_addr; 1702 tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh); 1703 msg = "Tx FIFO Underrun.Change Tx threshold to max."; 1704 } 1705 } 1706 } 1707 if (status & Tx_Defer) { 1708 dev->stats.tx_fifo_errors++; 1709 msg = "Excessive Deferral."; 1710 } 1711 if (status & Tx_NCarr) { 1712 dev->stats.tx_carrier_errors++; 1713 msg = "Lost Carrier Sense."; 1714 } 1715 if (status & Tx_LateColl) { 1716 dev->stats.tx_aborted_errors++; 1717 msg = "Late Collision."; 1718 } 1719 if (status & Tx_TxPar) { 1720 dev->stats.tx_fifo_errors++; 1721 msg = "Transmit Parity Error."; 1722 } 1723 if (status & Tx_SQErr) { 1724 dev->stats.tx_heartbeat_errors++; 1725 msg = "Signal Quality Error."; 1726 } 1727 if (msg && netif_msg_tx_err(lp)) 1728 printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status); 1729} 1730 1731/* This handles TX complete events posted by the device 1732 * via interrupts. 1733 */ 1734static void 1735tc35815_txdone(struct net_device *dev) 1736{ 1737 struct tc35815_local *lp = netdev_priv(dev); 1738 struct TxFD *txfd; 1739 unsigned int fdctl; 1740 1741 txfd = &lp->tfd_base[lp->tfd_end]; 1742 while (lp->tfd_start != lp->tfd_end && 1743 !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) { 1744 int status = le32_to_cpu(txfd->fd.FDStat); 1745 struct sk_buff *skb; 1746 unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext); 1747 u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem); 1748 1749 if (netif_msg_tx_done(lp)) { 1750 printk("%s: complete TxFD.\n", dev->name); 1751 dump_txfd(txfd); 1752 } 1753 tc35815_check_tx_stat(dev, status); 1754 1755 skb = fdsystem != 0xffffffff ? 1756 lp->tx_skbs[fdsystem].skb : NULL; 1757#ifdef DEBUG 1758 if (lp->tx_skbs[lp->tfd_end].skb != skb) { 1759 printk("%s: tx_skbs mismatch.\n", dev->name); 1760 panic_queues(dev); 1761 } 1762#else 1763 BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb); 1764#endif 1765 if (skb) { 1766 dev->stats.tx_bytes += skb->len; 1767 dma_unmap_single(&lp->pci_dev->dev, 1768 lp->tx_skbs[lp->tfd_end].skb_dma, 1769 skb->len, DMA_TO_DEVICE); 1770 lp->tx_skbs[lp->tfd_end].skb = NULL; 1771 lp->tx_skbs[lp->tfd_end].skb_dma = 0; 1772 dev_kfree_skb_any(skb); 1773 } 1774 txfd->fd.FDSystem = cpu_to_le32(0xffffffff); 1775 1776 lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM; 1777 txfd = &lp->tfd_base[lp->tfd_end]; 1778#ifdef DEBUG 1779 if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) { 1780 printk("%s: TxFD FDNext invalid.\n", dev->name); 1781 panic_queues(dev); 1782 } 1783#endif 1784 if (fdnext & FD_Next_EOL) { 1785 /* DMA Transmitter has been stopping... */ 1786 if (lp->tfd_end != lp->tfd_start) { 1787 struct tc35815_regs __iomem *tr = 1788 (struct tc35815_regs __iomem *)dev->base_addr; 1789 int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM; 1790 struct TxFD *txhead = &lp->tfd_base[head]; 1791 int qlen = (lp->tfd_start + TX_FD_NUM 1792 - lp->tfd_end) % TX_FD_NUM; 1793 1794#ifdef DEBUG 1795 if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) { 1796 printk("%s: TxFD FDCtl invalid.\n", dev->name); 1797 panic_queues(dev); 1798 } 1799#endif 1800 /* log max queue length */ 1801 if (lp->lstats.max_tx_qlen < qlen) 1802 lp->lstats.max_tx_qlen = qlen; 1803 1804 1805 /* start DMA Transmitter again */ 1806 txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL); 1807 txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); 1808 if (netif_msg_tx_queued(lp)) { 1809 printk("%s: start TxFD on queue.\n", 1810 dev->name); 1811 dump_txfd(txfd); 1812 } 1813 tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); 1814 } 1815 break; 1816 } 1817 } 1818 1819 /* If we had stopped the queue due to a "tx full" 1820 * condition, and space has now been made available, 1821 * wake up the queue. 1822 */ 1823 if (netif_queue_stopped(dev) && !tc35815_tx_full(dev)) 1824 netif_wake_queue(dev); 1825} 1826 1827/* The inverse routine to tc35815_open(). */ 1828static int 1829tc35815_close(struct net_device *dev) 1830{ 1831 struct tc35815_local *lp = netdev_priv(dev); 1832 1833 netif_stop_queue(dev); 1834 napi_disable(&lp->napi); 1835 if (dev->phydev) 1836 phy_stop(dev->phydev); 1837 cancel_work_sync(&lp->restart_work); 1838 1839 /* Flush the Tx and disable Rx here. */ 1840 tc35815_chip_reset(dev); 1841 free_irq(dev->irq, dev); 1842 1843 tc35815_free_queues(dev); 1844 1845 return 0; 1846 1847} 1848 1849/* 1850 * Get the current statistics. 1851 * This may be called with the card open or closed. 1852 */ 1853static struct net_device_stats *tc35815_get_stats(struct net_device *dev) 1854{ 1855 struct tc35815_regs __iomem *tr = 1856 (struct tc35815_regs __iomem *)dev->base_addr; 1857 if (netif_running(dev)) 1858 /* Update the statistics from the device registers. */ 1859 dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt); 1860 1861 return &dev->stats; 1862} 1863 1864static void tc35815_set_cam_entry(struct net_device *dev, int index, 1865 const unsigned char *addr) 1866{ 1867 struct tc35815_local *lp = netdev_priv(dev); 1868 struct tc35815_regs __iomem *tr = 1869 (struct tc35815_regs __iomem *)dev->base_addr; 1870 int cam_index = index * 6; 1871 u32 cam_data; 1872 u32 saved_addr; 1873 1874 saved_addr = tc_readl(&tr->CAM_Adr); 1875 1876 if (netif_msg_hw(lp)) 1877 printk(KERN_DEBUG "%s: CAM %d: %pM\n", 1878 dev->name, index, addr); 1879 if (index & 1) { 1880 /* read modify write */ 1881 tc_writel(cam_index - 2, &tr->CAM_Adr); 1882 cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000; 1883 cam_data |= addr[0] << 8 | addr[1]; 1884 tc_writel(cam_data, &tr->CAM_Data); 1885 /* write whole word */ 1886 tc_writel(cam_index + 2, &tr->CAM_Adr); 1887 cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]; 1888 tc_writel(cam_data, &tr->CAM_Data); 1889 } else { 1890 /* write whole word */ 1891 tc_writel(cam_index, &tr->CAM_Adr); 1892 cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; 1893 tc_writel(cam_data, &tr->CAM_Data); 1894 /* read modify write */ 1895 tc_writel(cam_index + 4, &tr->CAM_Adr); 1896 cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff; 1897 cam_data |= addr[4] << 24 | (addr[5] << 16); 1898 tc_writel(cam_data, &tr->CAM_Data); 1899 } 1900 1901 tc_writel(saved_addr, &tr->CAM_Adr); 1902} 1903 1904 1905/* 1906 * Set or clear the multicast filter for this adaptor. 1907 * num_addrs == -1 Promiscuous mode, receive all packets 1908 * num_addrs == 0 Normal mode, clear multicast list 1909 * num_addrs > 0 Multicast mode, receive normal and MC packets, 1910 * and do best-effort filtering. 1911 */ 1912static void 1913tc35815_set_multicast_list(struct net_device *dev) 1914{ 1915 struct tc35815_regs __iomem *tr = 1916 (struct tc35815_regs __iomem *)dev->base_addr; 1917 1918 if (dev->flags & IFF_PROMISC) { 1919 /* With some (all?) 100MHalf HUB, controller will hang 1920 * if we enabled promiscuous mode before linkup... 1921 */ 1922 struct tc35815_local *lp = netdev_priv(dev); 1923 1924 if (!lp->link) 1925 return; 1926 /* Enable promiscuous mode */ 1927 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); 1928 } else if ((dev->flags & IFF_ALLMULTI) || 1929 netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) { 1930 /* CAM 0, 1, 20 are reserved. */ 1931 /* Disable promiscuous mode, use normal mode. */ 1932 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); 1933 } else if (!netdev_mc_empty(dev)) { 1934 struct netdev_hw_addr *ha; 1935 int i; 1936 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); 1937 1938 tc_writel(0, &tr->CAM_Ctl); 1939 /* Walk the address list, and load the filter */ 1940 i = 0; 1941 netdev_for_each_mc_addr(ha, dev) { 1942 /* entry 0,1 is reserved. */ 1943 tc35815_set_cam_entry(dev, i + 2, ha->addr); 1944 ena_bits |= CAM_Ena_Bit(i + 2); 1945 i++; 1946 } 1947 tc_writel(ena_bits, &tr->CAM_Ena); 1948 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); 1949 } else { 1950 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); 1951 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); 1952 } 1953} 1954 1955static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1956{ 1957 struct tc35815_local *lp = netdev_priv(dev); 1958 1959 strlcpy(info->driver, MODNAME, sizeof(info->driver)); 1960 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1961 strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info)); 1962} 1963 1964static u32 tc35815_get_msglevel(struct net_device *dev) 1965{ 1966 struct tc35815_local *lp = netdev_priv(dev); 1967 return lp->msg_enable; 1968} 1969 1970static void tc35815_set_msglevel(struct net_device *dev, u32 datum) 1971{ 1972 struct tc35815_local *lp = netdev_priv(dev); 1973 lp->msg_enable = datum; 1974} 1975 1976static int tc35815_get_sset_count(struct net_device *dev, int sset) 1977{ 1978 struct tc35815_local *lp = netdev_priv(dev); 1979 1980 switch (sset) { 1981 case ETH_SS_STATS: 1982 return sizeof(lp->lstats) / sizeof(int); 1983 default: 1984 return -EOPNOTSUPP; 1985 } 1986} 1987 1988static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) 1989{ 1990 struct tc35815_local *lp = netdev_priv(dev); 1991 data[0] = lp->lstats.max_tx_qlen; 1992 data[1] = lp->lstats.tx_ints; 1993 data[2] = lp->lstats.rx_ints; 1994 data[3] = lp->lstats.tx_underrun; 1995} 1996 1997static struct { 1998 const char str[ETH_GSTRING_LEN]; 1999} ethtool_stats_keys[] = { 2000 { "max_tx_qlen" }, 2001 { "tx_ints" }, 2002 { "rx_ints" }, 2003 { "tx_underrun" }, 2004}; 2005 2006static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2007{ 2008 memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); 2009} 2010 2011static const struct ethtool_ops tc35815_ethtool_ops = { 2012 .get_drvinfo = tc35815_get_drvinfo, 2013 .get_link = ethtool_op_get_link, 2014 .get_msglevel = tc35815_get_msglevel, 2015 .set_msglevel = tc35815_set_msglevel, 2016 .get_strings = tc35815_get_strings, 2017 .get_sset_count = tc35815_get_sset_count, 2018 .get_ethtool_stats = tc35815_get_ethtool_stats, 2019 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2020 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2021}; 2022 2023static void tc35815_chip_reset(struct net_device *dev) 2024{ 2025 struct tc35815_regs __iomem *tr = 2026 (struct tc35815_regs __iomem *)dev->base_addr; 2027 int i; 2028 /* reset the controller */ 2029 tc_writel(MAC_Reset, &tr->MAC_Ctl); 2030 udelay(4); /* 3200ns */ 2031 i = 0; 2032 while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) { 2033 if (i++ > 100) { 2034 printk(KERN_ERR "%s: MAC reset failed.\n", dev->name); 2035 break; 2036 } 2037 mdelay(1); 2038 } 2039 tc_writel(0, &tr->MAC_Ctl); 2040 2041 /* initialize registers to default value */ 2042 tc_writel(0, &tr->DMA_Ctl); 2043 tc_writel(0, &tr->TxThrsh); 2044 tc_writel(0, &tr->TxPollCtr); 2045 tc_writel(0, &tr->RxFragSize); 2046 tc_writel(0, &tr->Int_En); 2047 tc_writel(0, &tr->FDA_Bas); 2048 tc_writel(0, &tr->FDA_Lim); 2049 tc_writel(0xffffffff, &tr->Int_Src); /* Write 1 to clear */ 2050 tc_writel(0, &tr->CAM_Ctl); 2051 tc_writel(0, &tr->Tx_Ctl); 2052 tc_writel(0, &tr->Rx_Ctl); 2053 tc_writel(0, &tr->CAM_Ena); 2054 (void)tc_readl(&tr->Miss_Cnt); /* Read to clear */ 2055 2056 /* initialize internal SRAM */ 2057 tc_writel(DMA_TestMode, &tr->DMA_Ctl); 2058 for (i = 0; i < 0x1000; i += 4) { 2059 tc_writel(i, &tr->CAM_Adr); 2060 tc_writel(0, &tr->CAM_Data); 2061 } 2062 tc_writel(0, &tr->DMA_Ctl); 2063} 2064 2065static void tc35815_chip_init(struct net_device *dev) 2066{ 2067 struct tc35815_local *lp = netdev_priv(dev); 2068 struct tc35815_regs __iomem *tr = 2069 (struct tc35815_regs __iomem *)dev->base_addr; 2070 unsigned long txctl = TX_CTL_CMD; 2071 2072 /* load station address to CAM */ 2073 tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr); 2074 2075 /* Enable CAM (broadcast and unicast) */ 2076 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); 2077 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); 2078 2079 /* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */ 2080 if (HAVE_DMA_RXALIGN(lp)) 2081 tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); 2082 else 2083 tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl); 2084 tc_writel(0, &tr->TxPollCtr); /* Batch mode */ 2085 tc_writel(TX_THRESHOLD, &tr->TxThrsh); 2086 tc_writel(INT_EN_CMD, &tr->Int_En); 2087 2088 /* set queues */ 2089 tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas); 2090 tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base, 2091 &tr->FDA_Lim); 2092 /* 2093 * Activation method: 2094 * First, enable the MAC Transmitter and the DMA Receive circuits. 2095 * Then enable the DMA Transmitter and the MAC Receive circuits. 2096 */ 2097 tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */ 2098 tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */ 2099 2100 /* start MAC transmitter */ 2101 /* TX4939 does not have EnLCarr */ 2102 if (lp->chiptype == TC35815_TX4939) 2103 txctl &= ~Tx_EnLCarr; 2104 /* WORKAROUND: ignore LostCrS in full duplex operation */ 2105 if (!dev->phydev || !lp->link || lp->duplex == DUPLEX_FULL) 2106 txctl &= ~Tx_EnLCarr; 2107 tc_writel(txctl, &tr->Tx_Ctl); 2108} 2109 2110#ifdef CONFIG_PM 2111static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) 2112{ 2113 struct net_device *dev = pci_get_drvdata(pdev); 2114 struct tc35815_local *lp = netdev_priv(dev); 2115 unsigned long flags; 2116 2117 pci_save_state(pdev); 2118 if (!netif_running(dev)) 2119 return 0; 2120 netif_device_detach(dev); 2121 if (dev->phydev) 2122 phy_stop(dev->phydev); 2123 spin_lock_irqsave(&lp->lock, flags); 2124 tc35815_chip_reset(dev); 2125 spin_unlock_irqrestore(&lp->lock, flags); 2126 pci_set_power_state(pdev, PCI_D3hot); 2127 return 0; 2128} 2129 2130static int tc35815_resume(struct pci_dev *pdev) 2131{ 2132 struct net_device *dev = pci_get_drvdata(pdev); 2133 2134 pci_restore_state(pdev); 2135 if (!netif_running(dev)) 2136 return 0; 2137 pci_set_power_state(pdev, PCI_D0); 2138 tc35815_restart(dev); 2139 netif_carrier_off(dev); 2140 if (dev->phydev) 2141 phy_start(dev->phydev); 2142 netif_device_attach(dev); 2143 return 0; 2144} 2145#endif /* CONFIG_PM */ 2146 2147static struct pci_driver tc35815_pci_driver = { 2148 .name = MODNAME, 2149 .id_table = tc35815_pci_tbl, 2150 .probe = tc35815_init_one, 2151 .remove = tc35815_remove_one, 2152#ifdef CONFIG_PM 2153 .suspend = tc35815_suspend, 2154 .resume = tc35815_resume, 2155#endif 2156}; 2157 2158module_param_named(speed, options.speed, int, 0); 2159MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps"); 2160module_param_named(duplex, options.duplex, int, 0); 2161MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); 2162 2163module_pci_driver(tc35815_pci_driver); 2164MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver"); 2165MODULE_LICENSE("GPL");