cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

7990.c (17285B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * 7990.c -- LANCE ethernet IC generic routines.
      4 * This is an attempt to separate out the bits of various ethernet
      5 * drivers that are common because they all use the AMD 7990 LANCE
      6 * (Local Area Network Controller for Ethernet) chip.
      7 *
      8 * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
      9 *
     10 * Most of this stuff was obtained by looking at other LANCE drivers,
     11 * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
     12 * NB: this was made easy by the fact that Jes Sorensen had cleaned up
     13 * most of a2025 and sunlance with the aim of merging them, so the
     14 * common code was pretty obvious.
     15 */
     16#include <linux/crc32.h>
     17#include <linux/delay.h>
     18#include <linux/errno.h>
     19#include <linux/netdevice.h>
     20#include <linux/etherdevice.h>
     21#include <linux/module.h>
     22#include <linux/kernel.h>
     23#include <linux/types.h>
     24#include <linux/fcntl.h>
     25#include <linux/interrupt.h>
     26#include <linux/ioport.h>
     27#include <linux/in.h>
     28#include <linux/route.h>
     29#include <linux/string.h>
     30#include <linux/skbuff.h>
     31#include <linux/pgtable.h>
     32#include <asm/irq.h>
     33/* Used for the temporal inet entries and routing */
     34#include <linux/socket.h>
     35#include <linux/bitops.h>
     36
     37#include <asm/io.h>
     38#include <asm/dma.h>
     39#ifdef CONFIG_HP300
     40#include <asm/blinken.h>
     41#endif
     42
     43#include "7990.h"
     44
     45#define WRITERAP(lp, x)	out_be16(lp->base + LANCE_RAP, (x))
     46#define WRITERDP(lp, x)	out_be16(lp->base + LANCE_RDP, (x))
     47#define READRDP(lp)	in_be16(lp->base + LANCE_RDP)
     48
     49#if IS_ENABLED(CONFIG_HPLANCE)
     50#include "hplance.h"
     51
     52#undef WRITERAP
     53#undef WRITERDP
     54#undef READRDP
     55
     56#if IS_ENABLED(CONFIG_MVME147_NET)
     57
     58/* Lossage Factor Nine, Mr Sulu. */
     59#define WRITERAP(lp, x)	(lp->writerap(lp, x))
     60#define WRITERDP(lp, x)	(lp->writerdp(lp, x))
     61#define READRDP(lp)	(lp->readrdp(lp))
     62
     63#else
     64
     65/* These inlines can be used if only CONFIG_HPLANCE is defined */
     66static inline void WRITERAP(struct lance_private *lp, __u16 value)
     67{
     68	do {
     69		out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
     70	} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
     71}
     72
     73static inline void WRITERDP(struct lance_private *lp, __u16 value)
     74{
     75	do {
     76		out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
     77	} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
     78}
     79
     80static inline __u16 READRDP(struct lance_private *lp)
     81{
     82	__u16 value;
     83	do {
     84		value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
     85	} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
     86	return value;
     87}
     88
     89#endif
     90#endif /* IS_ENABLED(CONFIG_HPLANCE) */
     91
     92/* debugging output macros, various flavours */
     93/* #define TEST_HITS */
     94#ifdef UNDEF
     95#define PRINT_RINGS() \
     96do { \
     97	int t; \
     98	for (t = 0; t < RX_RING_SIZE; t++) { \
     99		printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
    100		       t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
    101		       ib->brx_ring[t].length, \
    102		       ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
    103	} \
    104	for (t = 0; t < TX_RING_SIZE; t++) { \
    105		printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
    106		       t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
    107		       ib->btx_ring[t].length, \
    108		       ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
    109	} \
    110} while (0)
    111#else
    112#define PRINT_RINGS()
    113#endif
    114
    115/* Load the CSR registers. The LANCE has to be STOPped when we do this! */
    116static void load_csrs(struct lance_private *lp)
    117{
    118	volatile struct lance_init_block *aib = lp->lance_init_block;
    119	int leptr;
    120
    121	leptr = LANCE_ADDR(aib);
    122
    123	WRITERAP(lp, LE_CSR1);                    /* load address of init block */
    124	WRITERDP(lp, leptr & 0xFFFF);
    125	WRITERAP(lp, LE_CSR2);
    126	WRITERDP(lp, leptr >> 16);
    127	WRITERAP(lp, LE_CSR3);
    128	WRITERDP(lp, lp->busmaster_regval);       /* set byteswap/ALEctrl/byte ctrl */
    129
    130	/* Point back to csr0 */
    131	WRITERAP(lp, LE_CSR0);
    132}
    133
    134/* #define to 0 or 1 appropriately */
    135#define DEBUG_IRING 0
    136/* Set up the Lance Rx and Tx rings and the init block */
    137static void lance_init_ring(struct net_device *dev)
    138{
    139	struct lance_private *lp = netdev_priv(dev);
    140	volatile struct lance_init_block *ib = lp->init_block;
    141	volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
    142	int leptr;
    143	int i;
    144
    145	aib = lp->lance_init_block;
    146
    147	lp->rx_new = lp->tx_new = 0;
    148	lp->rx_old = lp->tx_old = 0;
    149
    150	ib->mode = LE_MO_PROM;                             /* normal, enable Tx & Rx */
    151
    152	/* Copy the ethernet address to the lance init block
    153	 * Notice that we do a byteswap if we're big endian.
    154	 * [I think this is the right criterion; at least, sunlance,
    155	 * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
    156	 * However, the datasheet says that the BSWAP bit doesn't affect
    157	 * the init block, so surely it should be low byte first for
    158	 * everybody? Um.]
    159	 * We could define the ib->physaddr as three 16bit values and
    160	 * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
    161	 */
    162#ifdef __BIG_ENDIAN
    163	ib->phys_addr[0] = dev->dev_addr[1];
    164	ib->phys_addr[1] = dev->dev_addr[0];
    165	ib->phys_addr[2] = dev->dev_addr[3];
    166	ib->phys_addr[3] = dev->dev_addr[2];
    167	ib->phys_addr[4] = dev->dev_addr[5];
    168	ib->phys_addr[5] = dev->dev_addr[4];
    169#else
    170	for (i = 0; i < 6; i++)
    171	       ib->phys_addr[i] = dev->dev_addr[i];
    172#endif
    173
    174	if (DEBUG_IRING)
    175		printk("TX rings:\n");
    176
    177	lp->tx_full = 0;
    178	/* Setup the Tx ring entries */
    179	for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
    180		leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
    181		ib->btx_ring[i].tmd0      = leptr;
    182		ib->btx_ring[i].tmd1_hadr = leptr >> 16;
    183		ib->btx_ring[i].tmd1_bits = 0;
    184		ib->btx_ring[i].length    = 0xf000; /* The ones required by tmd2 */
    185		ib->btx_ring[i].misc      = 0;
    186		if (DEBUG_IRING)
    187			printk("%d: 0x%8.8x\n", i, leptr);
    188	}
    189
    190	/* Setup the Rx ring entries */
    191	if (DEBUG_IRING)
    192		printk("RX rings:\n");
    193	for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
    194		leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
    195
    196		ib->brx_ring[i].rmd0      = leptr;
    197		ib->brx_ring[i].rmd1_hadr = leptr >> 16;
    198		ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
    199		/* 0xf000 == bits that must be one (reserved, presumably) */
    200		ib->brx_ring[i].length    = -RX_BUFF_SIZE | 0xf000;
    201		ib->brx_ring[i].mblength  = 0;
    202		if (DEBUG_IRING)
    203			printk("%d: 0x%8.8x\n", i, leptr);
    204	}
    205
    206	/* Setup the initialization block */
    207
    208	/* Setup rx descriptor pointer */
    209	leptr = LANCE_ADDR(&aib->brx_ring);
    210	ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
    211	ib->rx_ptr = leptr;
    212	if (DEBUG_IRING)
    213		printk("RX ptr: %8.8x\n", leptr);
    214
    215	/* Setup tx descriptor pointer */
    216	leptr = LANCE_ADDR(&aib->btx_ring);
    217	ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
    218	ib->tx_ptr = leptr;
    219	if (DEBUG_IRING)
    220		printk("TX ptr: %8.8x\n", leptr);
    221
    222	/* Clear the multicast filter */
    223	ib->filter[0] = 0;
    224	ib->filter[1] = 0;
    225	PRINT_RINGS();
    226}
    227
    228/* LANCE must be STOPped before we do this, too... */
    229static int init_restart_lance(struct lance_private *lp)
    230{
    231	int i;
    232
    233	WRITERAP(lp, LE_CSR0);
    234	WRITERDP(lp, LE_C0_INIT);
    235
    236	/* Need a hook here for sunlance ledma stuff */
    237
    238	/* Wait for the lance to complete initialization */
    239	for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
    240		barrier();
    241	if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
    242		printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
    243		return -1;
    244	}
    245
    246	/* Clear IDON by writing a "1", enable interrupts and start lance */
    247	WRITERDP(lp, LE_C0_IDON);
    248	WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
    249
    250	return 0;
    251}
    252
    253static int lance_reset(struct net_device *dev)
    254{
    255	struct lance_private *lp = netdev_priv(dev);
    256	int status;
    257
    258	/* Stop the lance */
    259	WRITERAP(lp, LE_CSR0);
    260	WRITERDP(lp, LE_C0_STOP);
    261
    262	load_csrs(lp);
    263	lance_init_ring(dev);
    264	netif_trans_update(dev); /* prevent tx timeout */
    265	status = init_restart_lance(lp);
    266#ifdef DEBUG_DRIVER
    267	printk("Lance restart=%d\n", status);
    268#endif
    269	return status;
    270}
    271
    272static int lance_rx(struct net_device *dev)
    273{
    274	struct lance_private *lp = netdev_priv(dev);
    275	volatile struct lance_init_block *ib = lp->init_block;
    276	volatile struct lance_rx_desc *rd;
    277	unsigned char bits;
    278#ifdef TEST_HITS
    279	int i;
    280#endif
    281
    282#ifdef TEST_HITS
    283	printk("[");
    284	for (i = 0; i < RX_RING_SIZE; i++) {
    285		if (i == lp->rx_new)
    286			printk("%s",
    287			       ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
    288		else
    289			printk("%s",
    290			      ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
    291	}
    292	printk("]");
    293#endif
    294#ifdef CONFIG_HP300
    295	blinken_leds(0x40, 0);
    296#endif
    297	WRITERDP(lp, LE_C0_RINT | LE_C0_INEA);     /* ack Rx int, reenable ints */
    298	for (rd = &ib->brx_ring[lp->rx_new];     /* For each Rx ring we own... */
    299	     !((bits = rd->rmd1_bits) & LE_R1_OWN);
    300	     rd = &ib->brx_ring[lp->rx_new]) {
    301
    302		/* We got an incomplete frame? */
    303		if ((bits & LE_R1_POK) != LE_R1_POK) {
    304			dev->stats.rx_over_errors++;
    305			dev->stats.rx_errors++;
    306			continue;
    307		} else if (bits & LE_R1_ERR) {
    308			/* Count only the end frame as a rx error,
    309			 * not the beginning
    310			 */
    311			if (bits & LE_R1_BUF)
    312				dev->stats.rx_fifo_errors++;
    313			if (bits & LE_R1_CRC)
    314				dev->stats.rx_crc_errors++;
    315			if (bits & LE_R1_OFL)
    316				dev->stats.rx_over_errors++;
    317			if (bits & LE_R1_FRA)
    318				dev->stats.rx_frame_errors++;
    319			if (bits & LE_R1_EOP)
    320				dev->stats.rx_errors++;
    321		} else {
    322			int len = (rd->mblength & 0xfff) - 4;
    323			struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
    324
    325			if (!skb) {
    326				dev->stats.rx_dropped++;
    327				rd->mblength = 0;
    328				rd->rmd1_bits = LE_R1_OWN;
    329				lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
    330				return 0;
    331			}
    332
    333			skb_reserve(skb, 2);           /* 16 byte align */
    334			skb_put(skb, len);             /* make room */
    335			skb_copy_to_linear_data(skb,
    336					 (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
    337					 len);
    338			skb->protocol = eth_type_trans(skb, dev);
    339			netif_rx(skb);
    340			dev->stats.rx_packets++;
    341			dev->stats.rx_bytes += len;
    342		}
    343
    344		/* Return the packet to the pool */
    345		rd->mblength = 0;
    346		rd->rmd1_bits = LE_R1_OWN;
    347		lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
    348	}
    349	return 0;
    350}
    351
    352static int lance_tx(struct net_device *dev)
    353{
    354	struct lance_private *lp = netdev_priv(dev);
    355	volatile struct lance_init_block *ib = lp->init_block;
    356	volatile struct lance_tx_desc *td;
    357	int i, j;
    358	int status;
    359
    360#ifdef CONFIG_HP300
    361	blinken_leds(0x80, 0);
    362#endif
    363	/* csr0 is 2f3 */
    364	WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
    365	/* csr0 is 73 */
    366
    367	j = lp->tx_old;
    368	for (i = j; i != lp->tx_new; i = j) {
    369		td = &ib->btx_ring[i];
    370
    371		/* If we hit a packet not owned by us, stop */
    372		if (td->tmd1_bits & LE_T1_OWN)
    373			break;
    374
    375		if (td->tmd1_bits & LE_T1_ERR) {
    376			status = td->misc;
    377
    378			dev->stats.tx_errors++;
    379			if (status & LE_T3_RTY)
    380				dev->stats.tx_aborted_errors++;
    381			if (status & LE_T3_LCOL)
    382				dev->stats.tx_window_errors++;
    383
    384			if (status & LE_T3_CLOS) {
    385				dev->stats.tx_carrier_errors++;
    386				if (lp->auto_select) {
    387					lp->tpe = 1 - lp->tpe;
    388					printk("%s: Carrier Lost, trying %s\n",
    389					       dev->name,
    390					       lp->tpe ? "TPE" : "AUI");
    391					/* Stop the lance */
    392					WRITERAP(lp, LE_CSR0);
    393					WRITERDP(lp, LE_C0_STOP);
    394					lance_init_ring(dev);
    395					load_csrs(lp);
    396					init_restart_lance(lp);
    397					return 0;
    398				}
    399			}
    400
    401			/* buffer errors and underflows turn off the transmitter */
    402			/* Restart the adapter */
    403			if (status & (LE_T3_BUF|LE_T3_UFL)) {
    404				dev->stats.tx_fifo_errors++;
    405
    406				printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
    407				       dev->name);
    408				/* Stop the lance */
    409				WRITERAP(lp, LE_CSR0);
    410				WRITERDP(lp, LE_C0_STOP);
    411				lance_init_ring(dev);
    412				load_csrs(lp);
    413				init_restart_lance(lp);
    414				return 0;
    415			}
    416		} else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
    417			/*
    418			 * So we don't count the packet more than once.
    419			 */
    420			td->tmd1_bits &= ~(LE_T1_POK);
    421
    422			/* One collision before packet was sent. */
    423			if (td->tmd1_bits & LE_T1_EONE)
    424				dev->stats.collisions++;
    425
    426			/* More than one collision, be optimistic. */
    427			if (td->tmd1_bits & LE_T1_EMORE)
    428				dev->stats.collisions += 2;
    429
    430			dev->stats.tx_packets++;
    431		}
    432
    433		j = (j + 1) & lp->tx_ring_mod_mask;
    434	}
    435	lp->tx_old = j;
    436	WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
    437	return 0;
    438}
    439
    440static irqreturn_t
    441lance_interrupt(int irq, void *dev_id)
    442{
    443	struct net_device *dev = (struct net_device *)dev_id;
    444	struct lance_private *lp = netdev_priv(dev);
    445	int csr0;
    446
    447	spin_lock(&lp->devlock);
    448
    449	WRITERAP(lp, LE_CSR0);              /* LANCE Controller Status */
    450	csr0 = READRDP(lp);
    451
    452	PRINT_RINGS();
    453
    454	if (!(csr0 & LE_C0_INTR)) {     /* Check if any interrupt has */
    455		spin_unlock(&lp->devlock);
    456		return IRQ_NONE;        /* been generated by the Lance. */
    457	}
    458
    459	/* Acknowledge all the interrupt sources ASAP */
    460	WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
    461
    462	if ((csr0 & LE_C0_ERR)) {
    463		/* Clear the error condition */
    464		WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
    465	}
    466
    467	if (csr0 & LE_C0_RINT)
    468		lance_rx(dev);
    469
    470	if (csr0 & LE_C0_TINT)
    471		lance_tx(dev);
    472
    473	/* Log misc errors. */
    474	if (csr0 & LE_C0_BABL)
    475		dev->stats.tx_errors++;       /* Tx babble. */
    476	if (csr0 & LE_C0_MISS)
    477		dev->stats.rx_errors++;       /* Missed a Rx frame. */
    478	if (csr0 & LE_C0_MERR) {
    479		printk("%s: Bus master arbitration failure, status %4.4x.\n",
    480		       dev->name, csr0);
    481		/* Restart the chip. */
    482		WRITERDP(lp, LE_C0_STRT);
    483	}
    484
    485	if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
    486		lp->tx_full = 0;
    487		netif_wake_queue(dev);
    488	}
    489
    490	WRITERAP(lp, LE_CSR0);
    491	WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
    492
    493	spin_unlock(&lp->devlock);
    494	return IRQ_HANDLED;
    495}
    496
    497int lance_open(struct net_device *dev)
    498{
    499	struct lance_private *lp = netdev_priv(dev);
    500	int res;
    501
    502	/* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
    503	if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
    504		return -EAGAIN;
    505
    506	res = lance_reset(dev);
    507	spin_lock_init(&lp->devlock);
    508	netif_start_queue(dev);
    509
    510	return res;
    511}
    512EXPORT_SYMBOL_GPL(lance_open);
    513
    514int lance_close(struct net_device *dev)
    515{
    516	struct lance_private *lp = netdev_priv(dev);
    517
    518	netif_stop_queue(dev);
    519
    520	/* Stop the LANCE */
    521	WRITERAP(lp, LE_CSR0);
    522	WRITERDP(lp, LE_C0_STOP);
    523
    524	free_irq(lp->irq, dev);
    525
    526	return 0;
    527}
    528EXPORT_SYMBOL_GPL(lance_close);
    529
    530void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
    531{
    532	printk("lance_tx_timeout\n");
    533	lance_reset(dev);
    534	netif_trans_update(dev); /* prevent tx timeout */
    535	netif_wake_queue(dev);
    536}
    537EXPORT_SYMBOL_GPL(lance_tx_timeout);
    538
    539netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
    540{
    541	struct lance_private *lp = netdev_priv(dev);
    542	volatile struct lance_init_block *ib = lp->init_block;
    543	int entry, skblen, len;
    544	static int outs;
    545	unsigned long flags;
    546
    547	netif_stop_queue(dev);
    548
    549	if (!TX_BUFFS_AVAIL) {
    550		dev_consume_skb_any(skb);
    551		return NETDEV_TX_OK;
    552	}
    553
    554	skblen = skb->len;
    555
    556#ifdef DEBUG_DRIVER
    557	/* dump the packet */
    558	{
    559		int i;
    560
    561		for (i = 0; i < 64; i++) {
    562			if ((i % 16) == 0)
    563				printk("\n");
    564			printk("%2.2x ", skb->data[i]);
    565		}
    566	}
    567#endif
    568	len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
    569	entry = lp->tx_new & lp->tx_ring_mod_mask;
    570	ib->btx_ring[entry].length = (-len) | 0xf000;
    571	ib->btx_ring[entry].misc = 0;
    572
    573	if (skb->len < ETH_ZLEN)
    574		memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
    575	skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
    576
    577	/* Now, give the packet to the lance */
    578	ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
    579	lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
    580
    581	outs++;
    582	/* Kick the lance: transmit now */
    583	WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
    584	dev_consume_skb_any(skb);
    585
    586	spin_lock_irqsave(&lp->devlock, flags);
    587	if (TX_BUFFS_AVAIL)
    588		netif_start_queue(dev);
    589	else
    590		lp->tx_full = 1;
    591	spin_unlock_irqrestore(&lp->devlock, flags);
    592
    593	return NETDEV_TX_OK;
    594}
    595EXPORT_SYMBOL_GPL(lance_start_xmit);
    596
    597/* taken from the depca driver via a2065.c */
    598static void lance_load_multicast(struct net_device *dev)
    599{
    600	struct lance_private *lp = netdev_priv(dev);
    601	volatile struct lance_init_block *ib = lp->init_block;
    602	volatile u16 *mcast_table = (u16 *)&ib->filter;
    603	struct netdev_hw_addr *ha;
    604	u32 crc;
    605
    606	/* set all multicast bits */
    607	if (dev->flags & IFF_ALLMULTI) {
    608		ib->filter[0] = 0xffffffff;
    609		ib->filter[1] = 0xffffffff;
    610		return;
    611	}
    612	/* clear the multicast filter */
    613	ib->filter[0] = 0;
    614	ib->filter[1] = 0;
    615
    616	/* Add addresses */
    617	netdev_for_each_mc_addr(ha, dev) {
    618		crc = ether_crc_le(6, ha->addr);
    619		crc = crc >> 26;
    620		mcast_table[crc >> 4] |= 1 << (crc & 0xf);
    621	}
    622}
    623
    624
    625void lance_set_multicast(struct net_device *dev)
    626{
    627	struct lance_private *lp = netdev_priv(dev);
    628	volatile struct lance_init_block *ib = lp->init_block;
    629	int stopped;
    630
    631	stopped = netif_queue_stopped(dev);
    632	if (!stopped)
    633		netif_stop_queue(dev);
    634
    635	while (lp->tx_old != lp->tx_new)
    636		schedule();
    637
    638	WRITERAP(lp, LE_CSR0);
    639	WRITERDP(lp, LE_C0_STOP);
    640	lance_init_ring(dev);
    641
    642	if (dev->flags & IFF_PROMISC) {
    643		ib->mode |= LE_MO_PROM;
    644	} else {
    645		ib->mode &= ~LE_MO_PROM;
    646		lance_load_multicast(dev);
    647	}
    648	load_csrs(lp);
    649	init_restart_lance(lp);
    650
    651	if (!stopped)
    652		netif_start_queue(dev);
    653}
    654EXPORT_SYMBOL_GPL(lance_set_multicast);
    655
    656#ifdef CONFIG_NET_POLL_CONTROLLER
    657void lance_poll(struct net_device *dev)
    658{
    659	struct lance_private *lp = netdev_priv(dev);
    660
    661	spin_lock(&lp->devlock);
    662	WRITERAP(lp, LE_CSR0);
    663	WRITERDP(lp, LE_C0_STRT);
    664	spin_unlock(&lp->devlock);
    665	lance_interrupt(dev->irq, dev);
    666}
    667EXPORT_SYMBOL_GPL(lance_poll);
    668#endif
    669
    670MODULE_LICENSE("GPL");