cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bmac.c (41960B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Network device driver for the BMAC ethernet controller on
      4 * Apple Powermacs.  Assumes it's under a DBDMA controller.
      5 *
      6 * Copyright (C) 1998 Randy Gobbel.
      7 *
      8 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
      9 * dynamic procfs inode.
     10 */
     11#include <linux/interrupt.h>
     12#include <linux/module.h>
     13#include <linux/kernel.h>
     14#include <linux/netdevice.h>
     15#include <linux/etherdevice.h>
     16#include <linux/delay.h>
     17#include <linux/string.h>
     18#include <linux/timer.h>
     19#include <linux/proc_fs.h>
     20#include <linux/init.h>
     21#include <linux/spinlock.h>
     22#include <linux/crc32.h>
     23#include <linux/crc32poly.h>
     24#include <linux/bitrev.h>
     25#include <linux/ethtool.h>
     26#include <linux/slab.h>
     27#include <linux/pgtable.h>
     28#include <asm/dbdma.h>
     29#include <asm/io.h>
     30#include <asm/page.h>
     31#include <asm/machdep.h>
     32#include <asm/pmac_feature.h>
     33#include <asm/macio.h>
     34#include <asm/irq.h>
     35
     36#include "bmac.h"
     37
     38#define trunc_page(x)	((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
     39#define round_page(x)	trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
     40
     41/* switch to use multicast code lifted from sunhme driver */
     42#define SUNHME_MULTICAST
     43
     44#define N_RX_RING	64
     45#define N_TX_RING	32
     46#define MAX_TX_ACTIVE	1
     47#define ETHERCRC	4
     48#define ETHERMINPACKET	64
     49#define ETHERMTU	1500
     50#define RX_BUFLEN	(ETHERMTU + 14 + ETHERCRC + 2)
     51#define TX_TIMEOUT	HZ	/* 1 second */
     52
     53/* Bits in transmit DMA status */
     54#define TX_DMA_ERR	0x80
     55
     56#define XXDEBUG(args)
     57
     58struct bmac_data {
     59	/* volatile struct bmac *bmac; */
     60	struct sk_buff_head *queue;
     61	volatile struct dbdma_regs __iomem *tx_dma;
     62	int tx_dma_intr;
     63	volatile struct dbdma_regs __iomem *rx_dma;
     64	int rx_dma_intr;
     65	volatile struct dbdma_cmd *tx_cmds;	/* xmit dma command list */
     66	volatile struct dbdma_cmd *rx_cmds;	/* recv dma command list */
     67	struct macio_dev *mdev;
     68	int is_bmac_plus;
     69	struct sk_buff *rx_bufs[N_RX_RING];
     70	int rx_fill;
     71	int rx_empty;
     72	struct sk_buff *tx_bufs[N_TX_RING];
     73	int tx_fill;
     74	int tx_empty;
     75	unsigned char tx_fullup;
     76	struct timer_list tx_timeout;
     77	int timeout_active;
     78	int sleeping;
     79	int opened;
     80	unsigned short hash_use_count[64];
     81	unsigned short hash_table_mask[4];
     82	spinlock_t lock;
     83};
     84
     85#if 0 /* Move that to ethtool */
     86
     87typedef struct bmac_reg_entry {
     88	char *name;
     89	unsigned short reg_offset;
     90} bmac_reg_entry_t;
     91
     92#define N_REG_ENTRIES 31
     93
     94static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
     95	{"MEMADD", MEMADD},
     96	{"MEMDATAHI", MEMDATAHI},
     97	{"MEMDATALO", MEMDATALO},
     98	{"TXPNTR", TXPNTR},
     99	{"RXPNTR", RXPNTR},
    100	{"IPG1", IPG1},
    101	{"IPG2", IPG2},
    102	{"ALIMIT", ALIMIT},
    103	{"SLOT", SLOT},
    104	{"PALEN", PALEN},
    105	{"PAPAT", PAPAT},
    106	{"TXSFD", TXSFD},
    107	{"JAM", JAM},
    108	{"TXCFG", TXCFG},
    109	{"TXMAX", TXMAX},
    110	{"TXMIN", TXMIN},
    111	{"PAREG", PAREG},
    112	{"DCNT", DCNT},
    113	{"NCCNT", NCCNT},
    114	{"NTCNT", NTCNT},
    115	{"EXCNT", EXCNT},
    116	{"LTCNT", LTCNT},
    117	{"TXSM", TXSM},
    118	{"RXCFG", RXCFG},
    119	{"RXMAX", RXMAX},
    120	{"RXMIN", RXMIN},
    121	{"FRCNT", FRCNT},
    122	{"AECNT", AECNT},
    123	{"FECNT", FECNT},
    124	{"RXSM", RXSM},
    125	{"RXCV", RXCV}
    126};
    127
    128#endif
    129
    130static unsigned char *bmac_emergency_rxbuf;
    131
    132/*
    133 * Number of bytes of private data per BMAC: allow enough for
    134 * the rx and tx dma commands plus a branch dma command each,
    135 * and another 16 bytes to allow us to align the dma command
    136 * buffers on a 16 byte boundary.
    137 */
    138#define PRIV_BYTES	(sizeof(struct bmac_data) \
    139	+ (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
    140	+ sizeof(struct sk_buff_head))
    141
    142static int bmac_open(struct net_device *dev);
    143static int bmac_close(struct net_device *dev);
    144static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
    145static void bmac_set_multicast(struct net_device *dev);
    146static void bmac_reset_and_enable(struct net_device *dev);
    147static void bmac_start_chip(struct net_device *dev);
    148static void bmac_init_chip(struct net_device *dev);
    149static void bmac_init_registers(struct net_device *dev);
    150static void bmac_enable_and_reset_chip(struct net_device *dev);
    151static int bmac_set_address(struct net_device *dev, void *addr);
    152static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
    153static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
    154static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
    155static void bmac_set_timeout(struct net_device *dev);
    156static void bmac_tx_timeout(struct timer_list *t);
    157static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
    158static void bmac_start(struct net_device *dev);
    159
    160#define	DBDMA_SET(x)	( ((x) | (x) << 16) )
    161#define	DBDMA_CLEAR(x)	( (x) << 16)
    162
    163static inline void
    164dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
    165{
    166	__asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
    167}
    168
    169static inline unsigned long
    170dbdma_ld32(volatile __u32 __iomem *a)
    171{
    172	__u32 swap;
    173	__asm__ volatile ("lwbrx %0,0,%1" :  "=r" (swap) : "r" (a));
    174	return swap;
    175}
    176
    177static void
    178dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
    179{
    180	dbdma_st32(&dmap->control,
    181		   DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
    182	eieio();
    183}
    184
    185static void
    186dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
    187{
    188	dbdma_st32(&dmap->control,
    189		   DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
    190	eieio();
    191	while (dbdma_ld32(&dmap->status) & RUN)
    192		eieio();
    193}
    194
    195static void
    196dbdma_setcmd(volatile struct dbdma_cmd *cp,
    197	     unsigned short cmd, unsigned count, unsigned long addr,
    198	     unsigned long cmd_dep)
    199{
    200	out_le16(&cp->command, cmd);
    201	out_le16(&cp->req_count, count);
    202	out_le32(&cp->phy_addr, addr);
    203	out_le32(&cp->cmd_dep, cmd_dep);
    204	out_le16(&cp->xfer_status, 0);
    205	out_le16(&cp->res_count, 0);
    206}
    207
    208static inline
    209void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
    210{
    211	out_le16((void __iomem *)dev->base_addr + reg_offset, data);
    212}
    213
    214
    215static inline
    216unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
    217{
    218	return in_le16((void __iomem *)dev->base_addr + reg_offset);
    219}
    220
    221static void
    222bmac_enable_and_reset_chip(struct net_device *dev)
    223{
    224	struct bmac_data *bp = netdev_priv(dev);
    225	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
    226	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
    227
    228	if (rd)
    229		dbdma_reset(rd);
    230	if (td)
    231		dbdma_reset(td);
    232
    233	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
    234}
    235
    236#define MIFDELAY	udelay(10)
    237
    238static unsigned int
    239bmac_mif_readbits(struct net_device *dev, int nb)
    240{
    241	unsigned int val = 0;
    242
    243	while (--nb >= 0) {
    244		bmwrite(dev, MIFCSR, 0);
    245		MIFDELAY;
    246		if (bmread(dev, MIFCSR) & 8)
    247			val |= 1 << nb;
    248		bmwrite(dev, MIFCSR, 1);
    249		MIFDELAY;
    250	}
    251	bmwrite(dev, MIFCSR, 0);
    252	MIFDELAY;
    253	bmwrite(dev, MIFCSR, 1);
    254	MIFDELAY;
    255	return val;
    256}
    257
    258static void
    259bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
    260{
    261	int b;
    262
    263	while (--nb >= 0) {
    264		b = (val & (1 << nb))? 6: 4;
    265		bmwrite(dev, MIFCSR, b);
    266		MIFDELAY;
    267		bmwrite(dev, MIFCSR, b|1);
    268		MIFDELAY;
    269	}
    270}
    271
    272static unsigned int
    273bmac_mif_read(struct net_device *dev, unsigned int addr)
    274{
    275	unsigned int val;
    276
    277	bmwrite(dev, MIFCSR, 4);
    278	MIFDELAY;
    279	bmac_mif_writebits(dev, ~0U, 32);
    280	bmac_mif_writebits(dev, 6, 4);
    281	bmac_mif_writebits(dev, addr, 10);
    282	bmwrite(dev, MIFCSR, 2);
    283	MIFDELAY;
    284	bmwrite(dev, MIFCSR, 1);
    285	MIFDELAY;
    286	val = bmac_mif_readbits(dev, 17);
    287	bmwrite(dev, MIFCSR, 4);
    288	MIFDELAY;
    289	return val;
    290}
    291
    292static void
    293bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
    294{
    295	bmwrite(dev, MIFCSR, 4);
    296	MIFDELAY;
    297	bmac_mif_writebits(dev, ~0U, 32);
    298	bmac_mif_writebits(dev, 5, 4);
    299	bmac_mif_writebits(dev, addr, 10);
    300	bmac_mif_writebits(dev, 2, 2);
    301	bmac_mif_writebits(dev, val, 16);
    302	bmac_mif_writebits(dev, 3, 2);
    303}
    304
    305static void
    306bmac_init_registers(struct net_device *dev)
    307{
    308	struct bmac_data *bp = netdev_priv(dev);
    309	volatile unsigned short regValue;
    310	const unsigned short *pWord16;
    311	int i;
    312
    313	/* XXDEBUG(("bmac: enter init_registers\n")); */
    314
    315	bmwrite(dev, RXRST, RxResetValue);
    316	bmwrite(dev, TXRST, TxResetBit);
    317
    318	i = 100;
    319	do {
    320		--i;
    321		udelay(10000);
    322		regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
    323	} while ((regValue & TxResetBit) && i > 0);
    324
    325	if (!bp->is_bmac_plus) {
    326		regValue = bmread(dev, XCVRIF);
    327		regValue |= ClkBit | SerialMode | COLActiveLow;
    328		bmwrite(dev, XCVRIF, regValue);
    329		udelay(10000);
    330	}
    331
    332	bmwrite(dev, RSEED, (unsigned short)0x1968);
    333
    334	regValue = bmread(dev, XIFC);
    335	regValue |= TxOutputEnable;
    336	bmwrite(dev, XIFC, regValue);
    337
    338	bmread(dev, PAREG);
    339
    340	/* set collision counters to 0 */
    341	bmwrite(dev, NCCNT, 0);
    342	bmwrite(dev, NTCNT, 0);
    343	bmwrite(dev, EXCNT, 0);
    344	bmwrite(dev, LTCNT, 0);
    345
    346	/* set rx counters to 0 */
    347	bmwrite(dev, FRCNT, 0);
    348	bmwrite(dev, LECNT, 0);
    349	bmwrite(dev, AECNT, 0);
    350	bmwrite(dev, FECNT, 0);
    351	bmwrite(dev, RXCV, 0);
    352
    353	/* set tx fifo information */
    354	bmwrite(dev, TXTH, 4);	/* 4 octets before tx starts */
    355
    356	bmwrite(dev, TXFIFOCSR, 0);	/* first disable txFIFO */
    357	bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
    358
    359	/* set rx fifo information */
    360	bmwrite(dev, RXFIFOCSR, 0);	/* first disable rxFIFO */
    361	bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
    362
    363	//bmwrite(dev, TXCFG, TxMACEnable);	       	/* TxNeverGiveUp maybe later */
    364	bmread(dev, STATUS);		/* read it just to clear it */
    365
    366	/* zero out the chip Hash Filter registers */
    367	for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
    368	bmwrite(dev, BHASH3, bp->hash_table_mask[0]); 	/* bits 15 - 0 */
    369	bmwrite(dev, BHASH2, bp->hash_table_mask[1]); 	/* bits 31 - 16 */
    370	bmwrite(dev, BHASH1, bp->hash_table_mask[2]); 	/* bits 47 - 32 */
    371	bmwrite(dev, BHASH0, bp->hash_table_mask[3]); 	/* bits 63 - 48 */
    372
    373	pWord16 = (const unsigned short *)dev->dev_addr;
    374	bmwrite(dev, MADD0, *pWord16++);
    375	bmwrite(dev, MADD1, *pWord16++);
    376	bmwrite(dev, MADD2, *pWord16);
    377
    378	bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
    379
    380	bmwrite(dev, INTDISABLE, EnableNormal);
    381}
    382
    383#if 0
    384static void
    385bmac_disable_interrupts(struct net_device *dev)
    386{
    387	bmwrite(dev, INTDISABLE, DisableAll);
    388}
    389
    390static void
    391bmac_enable_interrupts(struct net_device *dev)
    392{
    393	bmwrite(dev, INTDISABLE, EnableNormal);
    394}
    395#endif
    396
    397
    398static void
    399bmac_start_chip(struct net_device *dev)
    400{
    401	struct bmac_data *bp = netdev_priv(dev);
    402	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
    403	unsigned short	oldConfig;
    404
    405	/* enable rx dma channel */
    406	dbdma_continue(rd);
    407
    408	oldConfig = bmread(dev, TXCFG);
    409	bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
    410
    411	/* turn on rx plus any other bits already on (promiscuous possibly) */
    412	oldConfig = bmread(dev, RXCFG);
    413	bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
    414	udelay(20000);
    415}
    416
    417static void
    418bmac_init_phy(struct net_device *dev)
    419{
    420	unsigned int addr;
    421	struct bmac_data *bp = netdev_priv(dev);
    422
    423	printk(KERN_DEBUG "phy registers:");
    424	for (addr = 0; addr < 32; ++addr) {
    425		if ((addr & 7) == 0)
    426			printk(KERN_DEBUG);
    427		printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
    428	}
    429	printk(KERN_CONT "\n");
    430
    431	if (bp->is_bmac_plus) {
    432		unsigned int capable, ctrl;
    433
    434		ctrl = bmac_mif_read(dev, 0);
    435		capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
    436		if (bmac_mif_read(dev, 4) != capable ||
    437		    (ctrl & 0x1000) == 0) {
    438			bmac_mif_write(dev, 4, capable);
    439			bmac_mif_write(dev, 0, 0x1200);
    440		} else
    441			bmac_mif_write(dev, 0, 0x1000);
    442	}
    443}
    444
    445static void bmac_init_chip(struct net_device *dev)
    446{
    447	bmac_init_phy(dev);
    448	bmac_init_registers(dev);
    449}
    450
    451#ifdef CONFIG_PM
    452static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
    453{
    454	struct net_device* dev = macio_get_drvdata(mdev);
    455	struct bmac_data *bp = netdev_priv(dev);
    456	unsigned long flags;
    457	unsigned short config;
    458	int i;
    459
    460	netif_device_detach(dev);
    461	/* prolly should wait for dma to finish & turn off the chip */
    462	spin_lock_irqsave(&bp->lock, flags);
    463	if (bp->timeout_active) {
    464		del_timer(&bp->tx_timeout);
    465		bp->timeout_active = 0;
    466	}
    467	disable_irq(dev->irq);
    468	disable_irq(bp->tx_dma_intr);
    469	disable_irq(bp->rx_dma_intr);
    470	bp->sleeping = 1;
    471	spin_unlock_irqrestore(&bp->lock, flags);
    472	if (bp->opened) {
    473		volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
    474		volatile struct dbdma_regs __iomem *td = bp->tx_dma;
    475
    476		config = bmread(dev, RXCFG);
    477		bmwrite(dev, RXCFG, (config & ~RxMACEnable));
    478		config = bmread(dev, TXCFG);
    479		bmwrite(dev, TXCFG, (config & ~TxMACEnable));
    480		bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
    481		/* disable rx and tx dma */
    482		rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
    483		td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
    484		/* free some skb's */
    485		for (i=0; i<N_RX_RING; i++) {
    486			if (bp->rx_bufs[i] != NULL) {
    487				dev_kfree_skb(bp->rx_bufs[i]);
    488				bp->rx_bufs[i] = NULL;
    489			}
    490		}
    491		for (i = 0; i<N_TX_RING; i++) {
    492			if (bp->tx_bufs[i] != NULL) {
    493		       		dev_kfree_skb(bp->tx_bufs[i]);
    494	       			bp->tx_bufs[i] = NULL;
    495		       	}
    496		}
    497	}
    498	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
    499	return 0;
    500}
    501
    502static int bmac_resume(struct macio_dev *mdev)
    503{
    504	struct net_device* dev = macio_get_drvdata(mdev);
    505	struct bmac_data *bp = netdev_priv(dev);
    506
    507	/* see if this is enough */
    508	if (bp->opened)
    509		bmac_reset_and_enable(dev);
    510
    511	enable_irq(dev->irq);
    512	enable_irq(bp->tx_dma_intr);
    513	enable_irq(bp->rx_dma_intr);
    514	netif_device_attach(dev);
    515
    516	return 0;
    517}
    518#endif /* CONFIG_PM */
    519
    520static int bmac_set_address(struct net_device *dev, void *addr)
    521{
    522	struct bmac_data *bp = netdev_priv(dev);
    523	const unsigned short *pWord16;
    524	unsigned long flags;
    525
    526	XXDEBUG(("bmac: enter set_address\n"));
    527	spin_lock_irqsave(&bp->lock, flags);
    528
    529	eth_hw_addr_set(dev, addr);
    530
    531	/* load up the hardware address */
    532	pWord16  = (const unsigned short *)dev->dev_addr;
    533	bmwrite(dev, MADD0, *pWord16++);
    534	bmwrite(dev, MADD1, *pWord16++);
    535	bmwrite(dev, MADD2, *pWord16);
    536
    537	spin_unlock_irqrestore(&bp->lock, flags);
    538	XXDEBUG(("bmac: exit set_address\n"));
    539	return 0;
    540}
    541
    542static inline void bmac_set_timeout(struct net_device *dev)
    543{
    544	struct bmac_data *bp = netdev_priv(dev);
    545	unsigned long flags;
    546
    547	spin_lock_irqsave(&bp->lock, flags);
    548	if (bp->timeout_active)
    549		del_timer(&bp->tx_timeout);
    550	bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
    551	add_timer(&bp->tx_timeout);
    552	bp->timeout_active = 1;
    553	spin_unlock_irqrestore(&bp->lock, flags);
    554}
    555
    556static void
    557bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
    558{
    559	void *vaddr;
    560	unsigned long baddr;
    561	unsigned long len;
    562
    563	len = skb->len;
    564	vaddr = skb->data;
    565	baddr = virt_to_bus(vaddr);
    566
    567	dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
    568}
    569
    570static void
    571bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
    572{
    573	unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
    574
    575	dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
    576		     virt_to_bus(addr), 0);
    577}
    578
    579static void
    580bmac_init_tx_ring(struct bmac_data *bp)
    581{
    582	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
    583
    584	memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
    585
    586	bp->tx_empty = 0;
    587	bp->tx_fill = 0;
    588	bp->tx_fullup = 0;
    589
    590	/* put a branch at the end of the tx command list */
    591	dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
    592		     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
    593
    594	/* reset tx dma */
    595	dbdma_reset(td);
    596	out_le32(&td->wait_sel, 0x00200020);
    597	out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
    598}
    599
    600static int
    601bmac_init_rx_ring(struct net_device *dev)
    602{
    603	struct bmac_data *bp = netdev_priv(dev);
    604	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
    605	int i;
    606	struct sk_buff *skb;
    607
    608	/* initialize list of sk_buffs for receiving and set up recv dma */
    609	memset((char *)bp->rx_cmds, 0,
    610	       (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
    611	for (i = 0; i < N_RX_RING; i++) {
    612		if ((skb = bp->rx_bufs[i]) == NULL) {
    613			bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
    614			if (skb != NULL)
    615				skb_reserve(skb, 2);
    616		}
    617		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
    618	}
    619
    620	bp->rx_empty = 0;
    621	bp->rx_fill = i;
    622
    623	/* Put a branch back to the beginning of the receive command list */
    624	dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
    625		     (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
    626
    627	/* start rx dma */
    628	dbdma_reset(rd);
    629	out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
    630
    631	return 1;
    632}
    633
    634
    635static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
    636{
    637	struct bmac_data *bp = netdev_priv(dev);
    638	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
    639	int i;
    640
    641	/* see if there's a free slot in the tx ring */
    642	/* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
    643	/* 	     bp->tx_empty, bp->tx_fill)); */
    644	i = bp->tx_fill + 1;
    645	if (i >= N_TX_RING)
    646		i = 0;
    647	if (i == bp->tx_empty) {
    648		netif_stop_queue(dev);
    649		bp->tx_fullup = 1;
    650		XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
    651		return -1;		/* can't take it at the moment */
    652	}
    653
    654	dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
    655
    656	bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
    657
    658	bp->tx_bufs[bp->tx_fill] = skb;
    659	bp->tx_fill = i;
    660
    661	dev->stats.tx_bytes += skb->len;
    662
    663	dbdma_continue(td);
    664
    665	return 0;
    666}
    667
    668static int rxintcount;
    669
    670static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
    671{
    672	struct net_device *dev = (struct net_device *) dev_id;
    673	struct bmac_data *bp = netdev_priv(dev);
    674	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
    675	volatile struct dbdma_cmd *cp;
    676	int i, nb, stat;
    677	struct sk_buff *skb;
    678	unsigned int residual;
    679	int last;
    680	unsigned long flags;
    681
    682	spin_lock_irqsave(&bp->lock, flags);
    683
    684	if (++rxintcount < 10) {
    685		XXDEBUG(("bmac_rxdma_intr\n"));
    686	}
    687
    688	last = -1;
    689	i = bp->rx_empty;
    690
    691	while (1) {
    692		cp = &bp->rx_cmds[i];
    693		stat = le16_to_cpu(cp->xfer_status);
    694		residual = le16_to_cpu(cp->res_count);
    695		if ((stat & ACTIVE) == 0)
    696			break;
    697		nb = RX_BUFLEN - residual - 2;
    698		if (nb < (ETHERMINPACKET - ETHERCRC)) {
    699			skb = NULL;
    700			dev->stats.rx_length_errors++;
    701			dev->stats.rx_errors++;
    702		} else {
    703			skb = bp->rx_bufs[i];
    704			bp->rx_bufs[i] = NULL;
    705		}
    706		if (skb != NULL) {
    707			nb -= ETHERCRC;
    708			skb_put(skb, nb);
    709			skb->protocol = eth_type_trans(skb, dev);
    710			netif_rx(skb);
    711			++dev->stats.rx_packets;
    712			dev->stats.rx_bytes += nb;
    713		} else {
    714			++dev->stats.rx_dropped;
    715		}
    716		if ((skb = bp->rx_bufs[i]) == NULL) {
    717			bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
    718			if (skb != NULL)
    719				skb_reserve(bp->rx_bufs[i], 2);
    720		}
    721		bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
    722		cp->res_count = cpu_to_le16(0);
    723		cp->xfer_status = cpu_to_le16(0);
    724		last = i;
    725		if (++i >= N_RX_RING) i = 0;
    726	}
    727
    728	if (last != -1) {
    729		bp->rx_fill = last;
    730		bp->rx_empty = i;
    731	}
    732
    733	dbdma_continue(rd);
    734	spin_unlock_irqrestore(&bp->lock, flags);
    735
    736	if (rxintcount < 10) {
    737		XXDEBUG(("bmac_rxdma_intr done\n"));
    738	}
    739	return IRQ_HANDLED;
    740}
    741
    742static int txintcount;
    743
    744static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
    745{
    746	struct net_device *dev = (struct net_device *) dev_id;
    747	struct bmac_data *bp = netdev_priv(dev);
    748	volatile struct dbdma_cmd *cp;
    749	int stat;
    750	unsigned long flags;
    751
    752	spin_lock_irqsave(&bp->lock, flags);
    753
    754	if (txintcount++ < 10) {
    755		XXDEBUG(("bmac_txdma_intr\n"));
    756	}
    757
    758	/*     del_timer(&bp->tx_timeout); */
    759	/*     bp->timeout_active = 0; */
    760
    761	while (1) {
    762		cp = &bp->tx_cmds[bp->tx_empty];
    763		stat = le16_to_cpu(cp->xfer_status);
    764		if (txintcount < 10) {
    765			XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
    766		}
    767		if (!(stat & ACTIVE)) {
    768			/*
    769			 * status field might not have been filled by DBDMA
    770			 */
    771			if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
    772				break;
    773		}
    774
    775		if (bp->tx_bufs[bp->tx_empty]) {
    776			++dev->stats.tx_packets;
    777			dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]);
    778		}
    779		bp->tx_bufs[bp->tx_empty] = NULL;
    780		bp->tx_fullup = 0;
    781		netif_wake_queue(dev);
    782		if (++bp->tx_empty >= N_TX_RING)
    783			bp->tx_empty = 0;
    784		if (bp->tx_empty == bp->tx_fill)
    785			break;
    786	}
    787
    788	spin_unlock_irqrestore(&bp->lock, flags);
    789
    790	if (txintcount < 10) {
    791		XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
    792	}
    793
    794	bmac_start(dev);
    795	return IRQ_HANDLED;
    796}
    797
    798#ifndef SUNHME_MULTICAST
    799/* Real fast bit-reversal algorithm, 6-bit values */
    800static int reverse6[64] = {
    801	0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
    802	0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
    803	0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
    804	0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
    805	0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
    806	0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
    807	0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
    808	0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
    809};
    810
    811static unsigned int
    812crc416(unsigned int curval, unsigned short nxtval)
    813{
    814	unsigned int counter, cur = curval, next = nxtval;
    815	int high_crc_set, low_data_set;
    816
    817	/* Swap bytes */
    818	next = ((next & 0x00FF) << 8) | (next >> 8);
    819
    820	/* Compute bit-by-bit */
    821	for (counter = 0; counter < 16; ++counter) {
    822		/* is high CRC bit set? */
    823		if ((cur & 0x80000000) == 0) high_crc_set = 0;
    824		else high_crc_set = 1;
    825
    826		cur = cur << 1;
    827
    828		if ((next & 0x0001) == 0) low_data_set = 0;
    829		else low_data_set = 1;
    830
    831		next = next >> 1;
    832
    833		/* do the XOR */
    834		if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
    835	}
    836	return cur;
    837}
    838
    839static unsigned int
    840bmac_crc(unsigned short *address)
    841{
    842	unsigned int newcrc;
    843
    844	XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
    845	newcrc = crc416(0xffffffff, *address);	/* address bits 47 - 32 */
    846	newcrc = crc416(newcrc, address[1]);	/* address bits 31 - 16 */
    847	newcrc = crc416(newcrc, address[2]);	/* address bits 15 - 0  */
    848
    849	return(newcrc);
    850}
    851
    852/*
    853 * Add requested mcast addr to BMac's hash table filter.
    854 *
    855 */
    856
    857static void
    858bmac_addhash(struct bmac_data *bp, unsigned char *addr)
    859{
    860	unsigned int	 crc;
    861	unsigned short	 mask;
    862
    863	if (!(*addr)) return;
    864	crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
    865	crc = reverse6[crc];	/* Hyperfast bit-reversing algorithm */
    866	if (bp->hash_use_count[crc]++) return; /* This bit is already set */
    867	mask = crc % 16;
    868	mask = (unsigned char)1 << mask;
    869	bp->hash_use_count[crc/16] |= mask;
    870}
    871
    872static void
    873bmac_removehash(struct bmac_data *bp, unsigned char *addr)
    874{
    875	unsigned int crc;
    876	unsigned char mask;
    877
    878	/* Now, delete the address from the filter copy, as indicated */
    879	crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
    880	crc = reverse6[crc];	/* Hyperfast bit-reversing algorithm */
    881	if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
    882	if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
    883	mask = crc % 16;
    884	mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
    885	bp->hash_table_mask[crc/16] &= mask;
    886}
    887
    888/*
    889 * Sync the adapter with the software copy of the multicast mask
    890 *  (logical address filter).
    891 */
    892
    893static void
    894bmac_rx_off(struct net_device *dev)
    895{
    896	unsigned short rx_cfg;
    897
    898	rx_cfg = bmread(dev, RXCFG);
    899	rx_cfg &= ~RxMACEnable;
    900	bmwrite(dev, RXCFG, rx_cfg);
    901	do {
    902		rx_cfg = bmread(dev, RXCFG);
    903	}  while (rx_cfg & RxMACEnable);
    904}
    905
    906unsigned short
    907bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
    908{
    909	unsigned short rx_cfg;
    910
    911	rx_cfg = bmread(dev, RXCFG);
    912	rx_cfg |= RxMACEnable;
    913	if (hash_enable) rx_cfg |= RxHashFilterEnable;
    914	else rx_cfg &= ~RxHashFilterEnable;
    915	if (promisc_enable) rx_cfg |= RxPromiscEnable;
    916	else rx_cfg &= ~RxPromiscEnable;
    917	bmwrite(dev, RXRST, RxResetValue);
    918	bmwrite(dev, RXFIFOCSR, 0);	/* first disable rxFIFO */
    919	bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
    920	bmwrite(dev, RXCFG, rx_cfg );
    921	return rx_cfg;
    922}
    923
    924static void
    925bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
    926{
    927	bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
    928	bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
    929	bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
    930	bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
    931}
    932
    933#if 0
    934static void
    935bmac_add_multi(struct net_device *dev,
    936	       struct bmac_data *bp, unsigned char *addr)
    937{
    938	/* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
    939	bmac_addhash(bp, addr);
    940	bmac_rx_off(dev);
    941	bmac_update_hash_table_mask(dev, bp);
    942	bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
    943	/* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
    944}
    945
    946static void
    947bmac_remove_multi(struct net_device *dev,
    948		  struct bmac_data *bp, unsigned char *addr)
    949{
    950	bmac_removehash(bp, addr);
    951	bmac_rx_off(dev);
    952	bmac_update_hash_table_mask(dev, bp);
    953	bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
    954}
    955#endif
    956
    957/* Set or clear the multicast filter for this adaptor.
    958    num_addrs == -1	Promiscuous mode, receive all packets
    959    num_addrs == 0	Normal mode, clear multicast list
    960    num_addrs > 0	Multicast mode, receive normal and MC packets, and do
    961			best-effort filtering.
    962 */
    963static void bmac_set_multicast(struct net_device *dev)
    964{
    965	struct netdev_hw_addr *ha;
    966	struct bmac_data *bp = netdev_priv(dev);
    967	int num_addrs = netdev_mc_count(dev);
    968	unsigned short rx_cfg;
    969	int i;
    970
    971	if (bp->sleeping)
    972		return;
    973
    974	XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
    975
    976	if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
    977		for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
    978		bmac_update_hash_table_mask(dev, bp);
    979		rx_cfg = bmac_rx_on(dev, 1, 0);
    980		XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
    981	} else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
    982		rx_cfg = bmread(dev, RXCFG);
    983		rx_cfg |= RxPromiscEnable;
    984		bmwrite(dev, RXCFG, rx_cfg);
    985		rx_cfg = bmac_rx_on(dev, 0, 1);
    986		XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
    987	} else {
    988		for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
    989		for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
    990		if (num_addrs == 0) {
    991			rx_cfg = bmac_rx_on(dev, 0, 0);
    992			XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
    993		} else {
    994			netdev_for_each_mc_addr(ha, dev)
    995				bmac_addhash(bp, ha->addr);
    996			bmac_update_hash_table_mask(dev, bp);
    997			rx_cfg = bmac_rx_on(dev, 1, 0);
    998			XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
    999		}
   1000	}
   1001	/* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
   1002}
   1003#else /* ifdef SUNHME_MULTICAST */
   1004
   1005/* The version of set_multicast below was lifted from sunhme.c */
   1006
   1007static void bmac_set_multicast(struct net_device *dev)
   1008{
   1009	struct netdev_hw_addr *ha;
   1010	unsigned short rx_cfg;
   1011	u32 crc;
   1012
   1013	if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
   1014		bmwrite(dev, BHASH0, 0xffff);
   1015		bmwrite(dev, BHASH1, 0xffff);
   1016		bmwrite(dev, BHASH2, 0xffff);
   1017		bmwrite(dev, BHASH3, 0xffff);
   1018	} else if(dev->flags & IFF_PROMISC) {
   1019		rx_cfg = bmread(dev, RXCFG);
   1020		rx_cfg |= RxPromiscEnable;
   1021		bmwrite(dev, RXCFG, rx_cfg);
   1022	} else {
   1023		u16 hash_table[4] = { 0 };
   1024
   1025		rx_cfg = bmread(dev, RXCFG);
   1026		rx_cfg &= ~RxPromiscEnable;
   1027		bmwrite(dev, RXCFG, rx_cfg);
   1028
   1029		netdev_for_each_mc_addr(ha, dev) {
   1030			crc = ether_crc_le(6, ha->addr);
   1031			crc >>= 26;
   1032			hash_table[crc >> 4] |= 1 << (crc & 0xf);
   1033		}
   1034		bmwrite(dev, BHASH0, hash_table[0]);
   1035		bmwrite(dev, BHASH1, hash_table[1]);
   1036		bmwrite(dev, BHASH2, hash_table[2]);
   1037		bmwrite(dev, BHASH3, hash_table[3]);
   1038	}
   1039}
   1040#endif /* SUNHME_MULTICAST */
   1041
   1042static int miscintcount;
   1043
   1044static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
   1045{
   1046	struct net_device *dev = (struct net_device *) dev_id;
   1047	unsigned int status = bmread(dev, STATUS);
   1048	if (miscintcount++ < 10) {
   1049		XXDEBUG(("bmac_misc_intr\n"));
   1050	}
   1051	/* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
   1052	/*     bmac_txdma_intr_inner(irq, dev_id); */
   1053	/*   if (status & FrameReceived) dev->stats.rx_dropped++; */
   1054	if (status & RxErrorMask) dev->stats.rx_errors++;
   1055	if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
   1056	if (status & RxLenCntExp) dev->stats.rx_length_errors++;
   1057	if (status & RxOverFlow) dev->stats.rx_over_errors++;
   1058	if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
   1059
   1060	/*   if (status & FrameSent) dev->stats.tx_dropped++; */
   1061	if (status & TxErrorMask) dev->stats.tx_errors++;
   1062	if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
   1063	if (status & TxNormalCollExp) dev->stats.collisions++;
   1064	return IRQ_HANDLED;
   1065}
   1066
   1067/*
   1068 * Procedure for reading EEPROM
   1069 */
   1070#define SROMAddressLength	5
   1071#define DataInOn		0x0008
   1072#define DataInOff		0x0000
   1073#define Clk			0x0002
   1074#define ChipSelect		0x0001
   1075#define SDIShiftCount		3
   1076#define SD0ShiftCount		2
   1077#define	DelayValue		1000	/* number of microseconds */
   1078#define SROMStartOffset		10	/* this is in words */
   1079#define SROMReadCount		3	/* number of words to read from SROM */
   1080#define SROMAddressBits		6
   1081#define EnetAddressOffset	20
   1082
   1083static unsigned char
   1084bmac_clock_out_bit(struct net_device *dev)
   1085{
   1086	unsigned short         data;
   1087	unsigned short         val;
   1088
   1089	bmwrite(dev, SROMCSR, ChipSelect | Clk);
   1090	udelay(DelayValue);
   1091
   1092	data = bmread(dev, SROMCSR);
   1093	udelay(DelayValue);
   1094	val = (data >> SD0ShiftCount) & 1;
   1095
   1096	bmwrite(dev, SROMCSR, ChipSelect);
   1097	udelay(DelayValue);
   1098
   1099	return val;
   1100}
   1101
   1102static void
   1103bmac_clock_in_bit(struct net_device *dev, unsigned int val)
   1104{
   1105	unsigned short data;
   1106
   1107	if (val != 0 && val != 1) return;
   1108
   1109	data = (val << SDIShiftCount);
   1110	bmwrite(dev, SROMCSR, data | ChipSelect  );
   1111	udelay(DelayValue);
   1112
   1113	bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
   1114	udelay(DelayValue);
   1115
   1116	bmwrite(dev, SROMCSR, data | ChipSelect);
   1117	udelay(DelayValue);
   1118}
   1119
   1120static void
   1121reset_and_select_srom(struct net_device *dev)
   1122{
   1123	/* first reset */
   1124	bmwrite(dev, SROMCSR, 0);
   1125	udelay(DelayValue);
   1126
   1127	/* send it the read command (110) */
   1128	bmac_clock_in_bit(dev, 1);
   1129	bmac_clock_in_bit(dev, 1);
   1130	bmac_clock_in_bit(dev, 0);
   1131}
   1132
   1133static unsigned short
   1134read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
   1135{
   1136	unsigned short data, val;
   1137	int i;
   1138
   1139	/* send out the address we want to read from */
   1140	for (i = 0; i < addr_len; i++)	{
   1141		val = addr >> (addr_len-i-1);
   1142		bmac_clock_in_bit(dev, val & 1);
   1143	}
   1144
   1145	/* Now read in the 16-bit data */
   1146	data = 0;
   1147	for (i = 0; i < 16; i++)	{
   1148		val = bmac_clock_out_bit(dev);
   1149		data <<= 1;
   1150		data |= val;
   1151	}
   1152	bmwrite(dev, SROMCSR, 0);
   1153
   1154	return data;
   1155}
   1156
   1157/*
   1158 * It looks like Cogent and SMC use different methods for calculating
   1159 * checksums. What a pain..
   1160 */
   1161
   1162static int
   1163bmac_verify_checksum(struct net_device *dev)
   1164{
   1165	unsigned short data, storedCS;
   1166
   1167	reset_and_select_srom(dev);
   1168	data = read_srom(dev, 3, SROMAddressBits);
   1169	storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
   1170
   1171	return 0;
   1172}
   1173
   1174
   1175static void
   1176bmac_get_station_address(struct net_device *dev, unsigned char *ea)
   1177{
   1178	int i;
   1179	unsigned short data;
   1180
   1181	for (i = 0; i < 3; i++)
   1182		{
   1183			reset_and_select_srom(dev);
   1184			data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
   1185			ea[2*i]   = bitrev8(data & 0x0ff);
   1186			ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
   1187		}
   1188}
   1189
   1190static void bmac_reset_and_enable(struct net_device *dev)
   1191{
   1192	struct bmac_data *bp = netdev_priv(dev);
   1193	unsigned long flags;
   1194	struct sk_buff *skb;
   1195	unsigned char *data;
   1196
   1197	spin_lock_irqsave(&bp->lock, flags);
   1198	bmac_enable_and_reset_chip(dev);
   1199	bmac_init_tx_ring(bp);
   1200	bmac_init_rx_ring(dev);
   1201	bmac_init_chip(dev);
   1202	bmac_start_chip(dev);
   1203	bmwrite(dev, INTDISABLE, EnableNormal);
   1204	bp->sleeping = 0;
   1205
   1206	/*
   1207	 * It seems that the bmac can't receive until it's transmitted
   1208	 * a packet.  So we give it a dummy packet to transmit.
   1209	 */
   1210	skb = netdev_alloc_skb(dev, ETHERMINPACKET);
   1211	if (skb != NULL) {
   1212		data = skb_put_zero(skb, ETHERMINPACKET);
   1213		memcpy(data, dev->dev_addr, ETH_ALEN);
   1214		memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
   1215		bmac_transmit_packet(skb, dev);
   1216	}
   1217	spin_unlock_irqrestore(&bp->lock, flags);
   1218}
   1219
   1220static const struct ethtool_ops bmac_ethtool_ops = {
   1221	.get_link		= ethtool_op_get_link,
   1222};
   1223
   1224static const struct net_device_ops bmac_netdev_ops = {
   1225	.ndo_open		= bmac_open,
   1226	.ndo_stop		= bmac_close,
   1227	.ndo_start_xmit		= bmac_output,
   1228	.ndo_set_rx_mode	= bmac_set_multicast,
   1229	.ndo_set_mac_address	= bmac_set_address,
   1230	.ndo_validate_addr	= eth_validate_addr,
   1231};
   1232
   1233static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
   1234{
   1235	int j, rev, ret;
   1236	struct bmac_data *bp;
   1237	const unsigned char *prop_addr;
   1238	unsigned char addr[6];
   1239	u8 macaddr[6];
   1240	struct net_device *dev;
   1241	int is_bmac_plus = ((int)match->data) != 0;
   1242
   1243	if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
   1244		printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
   1245		return -ENODEV;
   1246	}
   1247	prop_addr = of_get_property(macio_get_of_node(mdev),
   1248			"mac-address", NULL);
   1249	if (prop_addr == NULL) {
   1250		prop_addr = of_get_property(macio_get_of_node(mdev),
   1251				"local-mac-address", NULL);
   1252		if (prop_addr == NULL) {
   1253			printk(KERN_ERR "BMAC: Can't get mac-address\n");
   1254			return -ENODEV;
   1255		}
   1256	}
   1257	memcpy(addr, prop_addr, sizeof(addr));
   1258
   1259	dev = alloc_etherdev(PRIV_BYTES);
   1260	if (!dev)
   1261		return -ENOMEM;
   1262
   1263	bp = netdev_priv(dev);
   1264	SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
   1265	macio_set_drvdata(mdev, dev);
   1266
   1267	bp->mdev = mdev;
   1268	spin_lock_init(&bp->lock);
   1269
   1270	if (macio_request_resources(mdev, "bmac")) {
   1271		printk(KERN_ERR "BMAC: can't request IO resource !\n");
   1272		goto out_free;
   1273	}
   1274
   1275	dev->base_addr = (unsigned long)
   1276		ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
   1277	if (dev->base_addr == 0)
   1278		goto out_release;
   1279
   1280	dev->irq = macio_irq(mdev, 0);
   1281
   1282	bmac_enable_and_reset_chip(dev);
   1283	bmwrite(dev, INTDISABLE, DisableAll);
   1284
   1285	rev = addr[0] == 0 && addr[1] == 0xA0;
   1286	for (j = 0; j < 6; ++j)
   1287		macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
   1288
   1289	eth_hw_addr_set(dev, macaddr);
   1290
   1291	/* Enable chip without interrupts for now */
   1292	bmac_enable_and_reset_chip(dev);
   1293	bmwrite(dev, INTDISABLE, DisableAll);
   1294
   1295	dev->netdev_ops = &bmac_netdev_ops;
   1296	dev->ethtool_ops = &bmac_ethtool_ops;
   1297
   1298	bmac_get_station_address(dev, addr);
   1299	if (bmac_verify_checksum(dev) != 0)
   1300		goto err_out_iounmap;
   1301
   1302	bp->is_bmac_plus = is_bmac_plus;
   1303	bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
   1304	if (!bp->tx_dma)
   1305		goto err_out_iounmap;
   1306	bp->tx_dma_intr = macio_irq(mdev, 1);
   1307	bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
   1308	if (!bp->rx_dma)
   1309		goto err_out_iounmap_tx;
   1310	bp->rx_dma_intr = macio_irq(mdev, 2);
   1311
   1312	bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
   1313	bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
   1314
   1315	bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
   1316	skb_queue_head_init(bp->queue);
   1317
   1318	timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
   1319
   1320	ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
   1321	if (ret) {
   1322		printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
   1323		goto err_out_iounmap_rx;
   1324	}
   1325	ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
   1326	if (ret) {
   1327		printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
   1328		goto err_out_irq0;
   1329	}
   1330	ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
   1331	if (ret) {
   1332		printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
   1333		goto err_out_irq1;
   1334	}
   1335
   1336	/* Mask chip interrupts and disable chip, will be
   1337	 * re-enabled on open()
   1338	 */
   1339	disable_irq(dev->irq);
   1340	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
   1341
   1342	if (register_netdev(dev) != 0) {
   1343		printk(KERN_ERR "BMAC: Ethernet registration failed\n");
   1344		goto err_out_irq2;
   1345	}
   1346
   1347	printk(KERN_INFO "%s: BMAC%s at %pM",
   1348	       dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
   1349	XXDEBUG((", base_addr=%#0lx", dev->base_addr));
   1350	printk("\n");
   1351
   1352	return 0;
   1353
   1354err_out_irq2:
   1355	free_irq(bp->rx_dma_intr, dev);
   1356err_out_irq1:
   1357	free_irq(bp->tx_dma_intr, dev);
   1358err_out_irq0:
   1359	free_irq(dev->irq, dev);
   1360err_out_iounmap_rx:
   1361	iounmap(bp->rx_dma);
   1362err_out_iounmap_tx:
   1363	iounmap(bp->tx_dma);
   1364err_out_iounmap:
   1365	iounmap((void __iomem *)dev->base_addr);
   1366out_release:
   1367	macio_release_resources(mdev);
   1368out_free:
   1369	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
   1370	free_netdev(dev);
   1371
   1372	return -ENODEV;
   1373}
   1374
   1375static int bmac_open(struct net_device *dev)
   1376{
   1377	struct bmac_data *bp = netdev_priv(dev);
   1378	/* XXDEBUG(("bmac: enter open\n")); */
   1379	/* reset the chip */
   1380	bp->opened = 1;
   1381	bmac_reset_and_enable(dev);
   1382	enable_irq(dev->irq);
   1383	return 0;
   1384}
   1385
   1386static int bmac_close(struct net_device *dev)
   1387{
   1388	struct bmac_data *bp = netdev_priv(dev);
   1389	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
   1390	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
   1391	unsigned short config;
   1392	int i;
   1393
   1394	bp->sleeping = 1;
   1395
   1396	/* disable rx and tx */
   1397	config = bmread(dev, RXCFG);
   1398	bmwrite(dev, RXCFG, (config & ~RxMACEnable));
   1399
   1400	config = bmread(dev, TXCFG);
   1401	bmwrite(dev, TXCFG, (config & ~TxMACEnable));
   1402
   1403	bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
   1404
   1405	/* disable rx and tx dma */
   1406	rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
   1407	td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE));	/* clear run bit */
   1408
   1409	/* free some skb's */
   1410	XXDEBUG(("bmac: free rx bufs\n"));
   1411	for (i=0; i<N_RX_RING; i++) {
   1412		if (bp->rx_bufs[i] != NULL) {
   1413			dev_kfree_skb(bp->rx_bufs[i]);
   1414			bp->rx_bufs[i] = NULL;
   1415		}
   1416	}
   1417	XXDEBUG(("bmac: free tx bufs\n"));
   1418	for (i = 0; i<N_TX_RING; i++) {
   1419		if (bp->tx_bufs[i] != NULL) {
   1420			dev_kfree_skb(bp->tx_bufs[i]);
   1421			bp->tx_bufs[i] = NULL;
   1422		}
   1423	}
   1424	XXDEBUG(("bmac: all bufs freed\n"));
   1425
   1426	bp->opened = 0;
   1427	disable_irq(dev->irq);
   1428	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
   1429
   1430	return 0;
   1431}
   1432
   1433static void
   1434bmac_start(struct net_device *dev)
   1435{
   1436	struct bmac_data *bp = netdev_priv(dev);
   1437	int i;
   1438	struct sk_buff *skb;
   1439	unsigned long flags;
   1440
   1441	if (bp->sleeping)
   1442		return;
   1443
   1444	spin_lock_irqsave(&bp->lock, flags);
   1445	while (1) {
   1446		i = bp->tx_fill + 1;
   1447		if (i >= N_TX_RING)
   1448			i = 0;
   1449		if (i == bp->tx_empty)
   1450			break;
   1451		skb = skb_dequeue(bp->queue);
   1452		if (skb == NULL)
   1453			break;
   1454		bmac_transmit_packet(skb, dev);
   1455	}
   1456	spin_unlock_irqrestore(&bp->lock, flags);
   1457}
   1458
   1459static netdev_tx_t
   1460bmac_output(struct sk_buff *skb, struct net_device *dev)
   1461{
   1462	struct bmac_data *bp = netdev_priv(dev);
   1463	skb_queue_tail(bp->queue, skb);
   1464	bmac_start(dev);
   1465	return NETDEV_TX_OK;
   1466}
   1467
   1468static void bmac_tx_timeout(struct timer_list *t)
   1469{
   1470	struct bmac_data *bp = from_timer(bp, t, tx_timeout);
   1471	struct net_device *dev = macio_get_drvdata(bp->mdev);
   1472	volatile struct dbdma_regs __iomem *td = bp->tx_dma;
   1473	volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
   1474	volatile struct dbdma_cmd *cp;
   1475	unsigned long flags;
   1476	unsigned short config, oldConfig;
   1477	int i;
   1478
   1479	XXDEBUG(("bmac: tx_timeout called\n"));
   1480	spin_lock_irqsave(&bp->lock, flags);
   1481	bp->timeout_active = 0;
   1482
   1483	/* update various counters */
   1484/*     	bmac_handle_misc_intrs(bp, 0); */
   1485
   1486	cp = &bp->tx_cmds[bp->tx_empty];
   1487/*	XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
   1488/* 	   le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */
   1489/* 	   mb->pr, mb->xmtfs, mb->fifofc)); */
   1490
   1491	/* turn off both tx and rx and reset the chip */
   1492	config = bmread(dev, RXCFG);
   1493	bmwrite(dev, RXCFG, (config & ~RxMACEnable));
   1494	config = bmread(dev, TXCFG);
   1495	bmwrite(dev, TXCFG, (config & ~TxMACEnable));
   1496	out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
   1497	printk(KERN_ERR "bmac: transmit timeout - resetting\n");
   1498	bmac_enable_and_reset_chip(dev);
   1499
   1500	/* restart rx dma */
   1501	cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
   1502	out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
   1503	out_le16(&cp->xfer_status, 0);
   1504	out_le32(&rd->cmdptr, virt_to_bus(cp));
   1505	out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
   1506
   1507	/* fix up the transmit side */
   1508	XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
   1509		 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
   1510	i = bp->tx_empty;
   1511	++dev->stats.tx_errors;
   1512	if (i != bp->tx_fill) {
   1513		dev_kfree_skb(bp->tx_bufs[i]);
   1514		bp->tx_bufs[i] = NULL;
   1515		if (++i >= N_TX_RING) i = 0;
   1516		bp->tx_empty = i;
   1517	}
   1518	bp->tx_fullup = 0;
   1519	netif_wake_queue(dev);
   1520	if (i != bp->tx_fill) {
   1521		cp = &bp->tx_cmds[i];
   1522		out_le16(&cp->xfer_status, 0);
   1523		out_le16(&cp->command, OUTPUT_LAST);
   1524		out_le32(&td->cmdptr, virt_to_bus(cp));
   1525		out_le32(&td->control, DBDMA_SET(RUN));
   1526		/* 	bmac_set_timeout(dev); */
   1527		XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
   1528	}
   1529
   1530	/* turn it back on */
   1531	oldConfig = bmread(dev, RXCFG);
   1532	bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
   1533	oldConfig = bmread(dev, TXCFG);
   1534	bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
   1535
   1536	spin_unlock_irqrestore(&bp->lock, flags);
   1537}
   1538
   1539#if 0
   1540static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
   1541{
   1542	int i,*ip;
   1543
   1544	for (i=0;i< count;i++) {
   1545		ip = (int*)(cp+i);
   1546
   1547		printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
   1548		       le32_to_cpup(ip+0),
   1549		       le32_to_cpup(ip+1),
   1550		       le32_to_cpup(ip+2),
   1551		       le32_to_cpup(ip+3));
   1552	}
   1553
   1554}
   1555#endif
   1556
   1557#if 0
   1558static int
   1559bmac_proc_info(char *buffer, char **start, off_t offset, int length)
   1560{
   1561	int len = 0;
   1562	off_t pos   = 0;
   1563	off_t begin = 0;
   1564	int i;
   1565
   1566	if (bmac_devs == NULL)
   1567		return -ENOSYS;
   1568
   1569	len += sprintf(buffer, "BMAC counters & registers\n");
   1570
   1571	for (i = 0; i<N_REG_ENTRIES; i++) {
   1572		len += sprintf(buffer + len, "%s: %#08x\n",
   1573			       reg_entries[i].name,
   1574			       bmread(bmac_devs, reg_entries[i].reg_offset));
   1575		pos = begin + len;
   1576
   1577		if (pos < offset) {
   1578			len = 0;
   1579			begin = pos;
   1580		}
   1581
   1582		if (pos > offset+length) break;
   1583	}
   1584
   1585	*start = buffer + (offset - begin);
   1586	len -= (offset - begin);
   1587
   1588	if (len > length) len = length;
   1589
   1590	return len;
   1591}
   1592#endif
   1593
   1594static int bmac_remove(struct macio_dev *mdev)
   1595{
   1596	struct net_device *dev = macio_get_drvdata(mdev);
   1597	struct bmac_data *bp = netdev_priv(dev);
   1598
   1599	unregister_netdev(dev);
   1600
   1601	free_irq(dev->irq, dev);
   1602	free_irq(bp->tx_dma_intr, dev);
   1603	free_irq(bp->rx_dma_intr, dev);
   1604
   1605	iounmap((void __iomem *)dev->base_addr);
   1606	iounmap(bp->tx_dma);
   1607	iounmap(bp->rx_dma);
   1608
   1609	macio_release_resources(mdev);
   1610
   1611	free_netdev(dev);
   1612
   1613	return 0;
   1614}
   1615
   1616static const struct of_device_id bmac_match[] =
   1617{
   1618	{
   1619	.name 		= "bmac",
   1620	.data		= (void *)0,
   1621	},
   1622	{
   1623	.type		= "network",
   1624	.compatible	= "bmac+",
   1625	.data		= (void *)1,
   1626	},
   1627	{},
   1628};
   1629MODULE_DEVICE_TABLE (of, bmac_match);
   1630
   1631static struct macio_driver bmac_driver =
   1632{
   1633	.driver = {
   1634		.name 		= "bmac",
   1635		.owner		= THIS_MODULE,
   1636		.of_match_table	= bmac_match,
   1637	},
   1638	.probe		= bmac_probe,
   1639	.remove		= bmac_remove,
   1640#ifdef CONFIG_PM
   1641	.suspend	= bmac_suspend,
   1642	.resume		= bmac_resume,
   1643#endif
   1644};
   1645
   1646
   1647static int __init bmac_init(void)
   1648{
   1649	if (bmac_emergency_rxbuf == NULL) {
   1650		bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
   1651		if (bmac_emergency_rxbuf == NULL)
   1652			return -ENOMEM;
   1653	}
   1654
   1655	return macio_register_driver(&bmac_driver);
   1656}
   1657
   1658static void __exit bmac_exit(void)
   1659{
   1660	macio_unregister_driver(&bmac_driver);
   1661
   1662	kfree(bmac_emergency_rxbuf);
   1663	bmac_emergency_rxbuf = NULL;
   1664}
   1665
   1666MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
   1667MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
   1668MODULE_LICENSE("GPL");
   1669
   1670module_init(bmac_init);
   1671module_exit(bmac_exit);