cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ucc_geth.c (114344B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
      4 *
      5 * Author: Shlomi Gridish <gridish@freescale.com>
      6 *	   Li Yang <leoli@freescale.com>
      7 *
      8 * Description:
      9 * QE UCC Gigabit Ethernet Driver
     10 */
     11
     12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     13
     14#include <linux/kernel.h>
     15#include <linux/init.h>
     16#include <linux/errno.h>
     17#include <linux/slab.h>
     18#include <linux/stddef.h>
     19#include <linux/module.h>
     20#include <linux/interrupt.h>
     21#include <linux/netdevice.h>
     22#include <linux/etherdevice.h>
     23#include <linux/skbuff.h>
     24#include <linux/spinlock.h>
     25#include <linux/mm.h>
     26#include <linux/dma-mapping.h>
     27#include <linux/mii.h>
     28#include <linux/phy.h>
     29#include <linux/phy_fixed.h>
     30#include <linux/workqueue.h>
     31#include <linux/of_address.h>
     32#include <linux/of_irq.h>
     33#include <linux/of_mdio.h>
     34#include <linux/of_net.h>
     35#include <linux/of_platform.h>
     36
     37#include <linux/uaccess.h>
     38#include <asm/irq.h>
     39#include <asm/io.h>
     40#include <soc/fsl/qe/immap_qe.h>
     41#include <soc/fsl/qe/qe.h>
     42#include <soc/fsl/qe/ucc.h>
     43#include <soc/fsl/qe/ucc_fast.h>
     44#include <asm/machdep.h>
     45
     46#include "ucc_geth.h"
     47
     48#undef DEBUG
     49
     50#define ugeth_printk(level, format, arg...)  \
     51        printk(level format "\n", ## arg)
     52
     53#define ugeth_dbg(format, arg...)            \
     54        ugeth_printk(KERN_DEBUG , format , ## arg)
     55
     56#ifdef UGETH_VERBOSE_DEBUG
     57#define ugeth_vdbg ugeth_dbg
     58#else
     59#define ugeth_vdbg(fmt, args...) do { } while (0)
     60#endif				/* UGETH_VERBOSE_DEBUG */
     61#define UGETH_MSG_DEFAULT	(NETIF_MSG_IFUP << 1 ) - 1
     62
     63
     64static DEFINE_SPINLOCK(ugeth_lock);
     65
     66static struct {
     67	u32 msg_enable;
     68} debug = { -1 };
     69
     70module_param_named(debug, debug.msg_enable, int, 0);
     71MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
     72
     73static int ucc_geth_thread_count(enum ucc_geth_num_of_threads idx)
     74{
     75	static const u8 count[] = {
     76		[UCC_GETH_NUM_OF_THREADS_1] = 1,
     77		[UCC_GETH_NUM_OF_THREADS_2] = 2,
     78		[UCC_GETH_NUM_OF_THREADS_4] = 4,
     79		[UCC_GETH_NUM_OF_THREADS_6] = 6,
     80		[UCC_GETH_NUM_OF_THREADS_8] = 8,
     81	};
     82	if (idx >= ARRAY_SIZE(count))
     83		return 0;
     84	return count[idx];
     85}
     86
     87static inline int ucc_geth_tx_queues(const struct ucc_geth_info *info)
     88{
     89	return 1;
     90}
     91
     92static inline int ucc_geth_rx_queues(const struct ucc_geth_info *info)
     93{
     94	return 1;
     95}
     96
     97static const struct ucc_geth_info ugeth_primary_info = {
     98	.uf_info = {
     99		    .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
    100		    .max_rx_buf_length = 1536,
    101		    /* adjusted at startup if max-speed 1000 */
    102		    .urfs = UCC_GETH_URFS_INIT,
    103		    .urfet = UCC_GETH_URFET_INIT,
    104		    .urfset = UCC_GETH_URFSET_INIT,
    105		    .utfs = UCC_GETH_UTFS_INIT,
    106		    .utfet = UCC_GETH_UTFET_INIT,
    107		    .utftt = UCC_GETH_UTFTT_INIT,
    108		    .ufpt = 256,
    109		    .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
    110		    .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
    111		    .tenc = UCC_FAST_TX_ENCODING_NRZ,
    112		    .renc = UCC_FAST_RX_ENCODING_NRZ,
    113		    .tcrc = UCC_FAST_16_BIT_CRC,
    114		    .synl = UCC_FAST_SYNC_LEN_NOT_USED,
    115		    },
    116	.extendedFilteringChainPointer = ((uint32_t) NULL),
    117	.typeorlen = 3072 /*1536 */ ,
    118	.nonBackToBackIfgPart1 = 0x40,
    119	.nonBackToBackIfgPart2 = 0x60,
    120	.miminumInterFrameGapEnforcement = 0x50,
    121	.backToBackInterFrameGap = 0x60,
    122	.mblinterval = 128,
    123	.nortsrbytetime = 5,
    124	.fracsiz = 1,
    125	.strictpriorityq = 0xff,
    126	.altBebTruncation = 0xa,
    127	.excessDefer = 1,
    128	.maxRetransmission = 0xf,
    129	.collisionWindow = 0x37,
    130	.receiveFlowControl = 1,
    131	.transmitFlowControl = 1,
    132	.maxGroupAddrInHash = 4,
    133	.maxIndAddrInHash = 4,
    134	.prel = 7,
    135	.maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
    136	.minFrameLength = 64,
    137	.maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
    138	.maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */
    139	.vlantype = 0x8100,
    140	.ecamptr = ((uint32_t) NULL),
    141	.eventRegMask = UCCE_OTHER,
    142	.pausePeriod = 0xf000,
    143	.interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
    144	.bdRingLenTx = {
    145			TX_BD_RING_LEN,
    146			TX_BD_RING_LEN,
    147			TX_BD_RING_LEN,
    148			TX_BD_RING_LEN,
    149			TX_BD_RING_LEN,
    150			TX_BD_RING_LEN,
    151			TX_BD_RING_LEN,
    152			TX_BD_RING_LEN},
    153
    154	.bdRingLenRx = {
    155			RX_BD_RING_LEN,
    156			RX_BD_RING_LEN,
    157			RX_BD_RING_LEN,
    158			RX_BD_RING_LEN,
    159			RX_BD_RING_LEN,
    160			RX_BD_RING_LEN,
    161			RX_BD_RING_LEN,
    162			RX_BD_RING_LEN},
    163
    164	.numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
    165	.largestexternallookupkeysize =
    166	    QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
    167	.statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
    168		UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
    169		UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
    170	.vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
    171	.vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
    172	.rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
    173	.aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
    174	.padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
    175	.numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
    176	.numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
    177	.riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
    178	.riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
    179};
    180
    181#ifdef DEBUG
    182static void mem_disp(u8 *addr, int size)
    183{
    184	u8 *i;
    185	int size16Aling = (size >> 4) << 4;
    186	int size4Aling = (size >> 2) << 2;
    187	int notAlign = 0;
    188	if (size % 16)
    189		notAlign = 1;
    190
    191	for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
    192		printk("0x%08x: %08x %08x %08x %08x\r\n",
    193		       (u32) i,
    194		       *((u32 *) (i)),
    195		       *((u32 *) (i + 4)),
    196		       *((u32 *) (i + 8)), *((u32 *) (i + 12)));
    197	if (notAlign == 1)
    198		printk("0x%08x: ", (u32) i);
    199	for (; (u32) i < (u32) addr + size4Aling; i += 4)
    200		printk("%08x ", *((u32 *) (i)));
    201	for (; (u32) i < (u32) addr + size; i++)
    202		printk("%02x", *((i)));
    203	if (notAlign == 1)
    204		printk("\r\n");
    205}
    206#endif /* DEBUG */
    207
    208static struct list_head *dequeue(struct list_head *lh)
    209{
    210	unsigned long flags;
    211
    212	spin_lock_irqsave(&ugeth_lock, flags);
    213	if (!list_empty(lh)) {
    214		struct list_head *node = lh->next;
    215		list_del(node);
    216		spin_unlock_irqrestore(&ugeth_lock, flags);
    217		return node;
    218	} else {
    219		spin_unlock_irqrestore(&ugeth_lock, flags);
    220		return NULL;
    221	}
    222}
    223
    224static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
    225		u8 __iomem *bd)
    226{
    227	struct sk_buff *skb;
    228
    229	skb = netdev_alloc_skb(ugeth->ndev,
    230			       ugeth->ug_info->uf_info.max_rx_buf_length +
    231			       UCC_GETH_RX_DATA_BUF_ALIGNMENT);
    232	if (!skb)
    233		return NULL;
    234
    235	/* We need the data buffer to be aligned properly.  We will reserve
    236	 * as many bytes as needed to align the data properly
    237	 */
    238	skb_reserve(skb,
    239		    UCC_GETH_RX_DATA_BUF_ALIGNMENT -
    240		    (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
    241					      1)));
    242
    243	out_be32(&((struct qe_bd __iomem *)bd)->buf,
    244		      dma_map_single(ugeth->dev,
    245				     skb->data,
    246				     ugeth->ug_info->uf_info.max_rx_buf_length +
    247				     UCC_GETH_RX_DATA_BUF_ALIGNMENT,
    248				     DMA_FROM_DEVICE));
    249
    250	out_be32((u32 __iomem *)bd,
    251			(R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
    252
    253	return skb;
    254}
    255
    256static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
    257{
    258	u8 __iomem *bd;
    259	u32 bd_status;
    260	struct sk_buff *skb;
    261	int i;
    262
    263	bd = ugeth->p_rx_bd_ring[rxQ];
    264	i = 0;
    265
    266	do {
    267		bd_status = in_be32((u32 __iomem *)bd);
    268		skb = get_new_skb(ugeth, bd);
    269
    270		if (!skb)	/* If can not allocate data buffer,
    271				abort. Cleanup will be elsewhere */
    272			return -ENOMEM;
    273
    274		ugeth->rx_skbuff[rxQ][i] = skb;
    275
    276		/* advance the BD pointer */
    277		bd += sizeof(struct qe_bd);
    278		i++;
    279	} while (!(bd_status & R_W));
    280
    281	return 0;
    282}
    283
    284static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
    285				  u32 *p_start,
    286				  u8 num_entries,
    287				  u32 thread_size,
    288				  u32 thread_alignment,
    289				  unsigned int risc,
    290				  int skip_page_for_first_entry)
    291{
    292	u32 init_enet_offset;
    293	u8 i;
    294	int snum;
    295
    296	for (i = 0; i < num_entries; i++) {
    297		if ((snum = qe_get_snum()) < 0) {
    298			if (netif_msg_ifup(ugeth))
    299				pr_err("Can not get SNUM\n");
    300			return snum;
    301		}
    302		if ((i == 0) && skip_page_for_first_entry)
    303		/* First entry of Rx does not have page */
    304			init_enet_offset = 0;
    305		else {
    306			init_enet_offset =
    307			    qe_muram_alloc(thread_size, thread_alignment);
    308			if (IS_ERR_VALUE(init_enet_offset)) {
    309				if (netif_msg_ifup(ugeth))
    310					pr_err("Can not allocate DPRAM memory\n");
    311				qe_put_snum((u8) snum);
    312				return -ENOMEM;
    313			}
    314		}
    315		*(p_start++) =
    316		    ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
    317		    | risc;
    318	}
    319
    320	return 0;
    321}
    322
    323static int return_init_enet_entries(struct ucc_geth_private *ugeth,
    324				    u32 *p_start,
    325				    u8 num_entries,
    326				    unsigned int risc,
    327				    int skip_page_for_first_entry)
    328{
    329	u32 init_enet_offset;
    330	u8 i;
    331	int snum;
    332
    333	for (i = 0; i < num_entries; i++) {
    334		u32 val = *p_start;
    335
    336		/* Check that this entry was actually valid --
    337		needed in case failed in allocations */
    338		if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
    339			snum =
    340			    (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
    341			    ENET_INIT_PARAM_SNUM_SHIFT;
    342			qe_put_snum((u8) snum);
    343			if (!((i == 0) && skip_page_for_first_entry)) {
    344			/* First entry of Rx does not have page */
    345				init_enet_offset =
    346				    (val & ENET_INIT_PARAM_PTR_MASK);
    347				qe_muram_free(init_enet_offset);
    348			}
    349			*p_start++ = 0;
    350		}
    351	}
    352
    353	return 0;
    354}
    355
    356#ifdef DEBUG
    357static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
    358				  u32 __iomem *p_start,
    359				  u8 num_entries,
    360				  u32 thread_size,
    361				  unsigned int risc,
    362				  int skip_page_for_first_entry)
    363{
    364	u32 init_enet_offset;
    365	u8 i;
    366	int snum;
    367
    368	for (i = 0; i < num_entries; i++) {
    369		u32 val = in_be32(p_start);
    370
    371		/* Check that this entry was actually valid --
    372		needed in case failed in allocations */
    373		if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
    374			snum =
    375			    (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
    376			    ENET_INIT_PARAM_SNUM_SHIFT;
    377			qe_put_snum((u8) snum);
    378			if (!((i == 0) && skip_page_for_first_entry)) {
    379			/* First entry of Rx does not have page */
    380				init_enet_offset =
    381				    (in_be32(p_start) &
    382				     ENET_INIT_PARAM_PTR_MASK);
    383				pr_info("Init enet entry %d:\n", i);
    384				pr_info("Base address: 0x%08x\n",
    385					(u32)qe_muram_addr(init_enet_offset));
    386				mem_disp(qe_muram_addr(init_enet_offset),
    387					 thread_size);
    388			}
    389			p_start++;
    390		}
    391	}
    392
    393	return 0;
    394}
    395#endif
    396
    397static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
    398{
    399	kfree(enet_addr_cont);
    400}
    401
    402static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
    403{
    404	out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]);
    405	out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]);
    406	out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]);
    407}
    408
    409static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
    410{
    411	struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
    412
    413	if (paddr_num >= NUM_OF_PADDRS) {
    414		pr_warn("%s: Invalid paddr_num: %u\n", __func__, paddr_num);
    415		return -EINVAL;
    416	}
    417
    418	p_82xx_addr_filt =
    419	    (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
    420	    addressfiltering;
    421
    422	/* Writing address ff.ff.ff.ff.ff.ff disables address
    423	recognition for this register */
    424	out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
    425	out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
    426	out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
    427
    428	return 0;
    429}
    430
    431static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
    432                                u8 *p_enet_addr)
    433{
    434	struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
    435	u32 cecr_subblock;
    436
    437	p_82xx_addr_filt =
    438	    (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
    439	    addressfiltering;
    440
    441	cecr_subblock =
    442	    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
    443
    444	/* Ethernet frames are defined in Little Endian mode,
    445	therefore to insert */
    446	/* the address to the hash (Big Endian mode), we reverse the bytes.*/
    447
    448	set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
    449
    450	qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
    451		     QE_CR_PROTOCOL_ETHERNET, 0);
    452}
    453
    454#ifdef DEBUG
    455static void get_statistics(struct ucc_geth_private *ugeth,
    456			   struct ucc_geth_tx_firmware_statistics *
    457			   tx_firmware_statistics,
    458			   struct ucc_geth_rx_firmware_statistics *
    459			   rx_firmware_statistics,
    460			   struct ucc_geth_hardware_statistics *hardware_statistics)
    461{
    462	struct ucc_fast __iomem *uf_regs;
    463	struct ucc_geth __iomem *ug_regs;
    464	struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
    465	struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
    466
    467	ug_regs = ugeth->ug_regs;
    468	uf_regs = (struct ucc_fast __iomem *) ug_regs;
    469	p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
    470	p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
    471
    472	/* Tx firmware only if user handed pointer and driver actually
    473	gathers Tx firmware statistics */
    474	if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
    475		tx_firmware_statistics->sicoltx =
    476		    in_be32(&p_tx_fw_statistics_pram->sicoltx);
    477		tx_firmware_statistics->mulcoltx =
    478		    in_be32(&p_tx_fw_statistics_pram->mulcoltx);
    479		tx_firmware_statistics->latecoltxfr =
    480		    in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
    481		tx_firmware_statistics->frabortduecol =
    482		    in_be32(&p_tx_fw_statistics_pram->frabortduecol);
    483		tx_firmware_statistics->frlostinmactxer =
    484		    in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
    485		tx_firmware_statistics->carriersenseertx =
    486		    in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
    487		tx_firmware_statistics->frtxok =
    488		    in_be32(&p_tx_fw_statistics_pram->frtxok);
    489		tx_firmware_statistics->txfrexcessivedefer =
    490		    in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
    491		tx_firmware_statistics->txpkts256 =
    492		    in_be32(&p_tx_fw_statistics_pram->txpkts256);
    493		tx_firmware_statistics->txpkts512 =
    494		    in_be32(&p_tx_fw_statistics_pram->txpkts512);
    495		tx_firmware_statistics->txpkts1024 =
    496		    in_be32(&p_tx_fw_statistics_pram->txpkts1024);
    497		tx_firmware_statistics->txpktsjumbo =
    498		    in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
    499	}
    500
    501	/* Rx firmware only if user handed pointer and driver actually
    502	 * gathers Rx firmware statistics */
    503	if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
    504		int i;
    505		rx_firmware_statistics->frrxfcser =
    506		    in_be32(&p_rx_fw_statistics_pram->frrxfcser);
    507		rx_firmware_statistics->fraligner =
    508		    in_be32(&p_rx_fw_statistics_pram->fraligner);
    509		rx_firmware_statistics->inrangelenrxer =
    510		    in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
    511		rx_firmware_statistics->outrangelenrxer =
    512		    in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
    513		rx_firmware_statistics->frtoolong =
    514		    in_be32(&p_rx_fw_statistics_pram->frtoolong);
    515		rx_firmware_statistics->runt =
    516		    in_be32(&p_rx_fw_statistics_pram->runt);
    517		rx_firmware_statistics->verylongevent =
    518		    in_be32(&p_rx_fw_statistics_pram->verylongevent);
    519		rx_firmware_statistics->symbolerror =
    520		    in_be32(&p_rx_fw_statistics_pram->symbolerror);
    521		rx_firmware_statistics->dropbsy =
    522		    in_be32(&p_rx_fw_statistics_pram->dropbsy);
    523		for (i = 0; i < 0x8; i++)
    524			rx_firmware_statistics->res0[i] =
    525			    p_rx_fw_statistics_pram->res0[i];
    526		rx_firmware_statistics->mismatchdrop =
    527		    in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
    528		rx_firmware_statistics->underpkts =
    529		    in_be32(&p_rx_fw_statistics_pram->underpkts);
    530		rx_firmware_statistics->pkts256 =
    531		    in_be32(&p_rx_fw_statistics_pram->pkts256);
    532		rx_firmware_statistics->pkts512 =
    533		    in_be32(&p_rx_fw_statistics_pram->pkts512);
    534		rx_firmware_statistics->pkts1024 =
    535		    in_be32(&p_rx_fw_statistics_pram->pkts1024);
    536		rx_firmware_statistics->pktsjumbo =
    537		    in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
    538		rx_firmware_statistics->frlossinmacer =
    539		    in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
    540		rx_firmware_statistics->pausefr =
    541		    in_be32(&p_rx_fw_statistics_pram->pausefr);
    542		for (i = 0; i < 0x4; i++)
    543			rx_firmware_statistics->res1[i] =
    544			    p_rx_fw_statistics_pram->res1[i];
    545		rx_firmware_statistics->removevlan =
    546		    in_be32(&p_rx_fw_statistics_pram->removevlan);
    547		rx_firmware_statistics->replacevlan =
    548		    in_be32(&p_rx_fw_statistics_pram->replacevlan);
    549		rx_firmware_statistics->insertvlan =
    550		    in_be32(&p_rx_fw_statistics_pram->insertvlan);
    551	}
    552
    553	/* Hardware only if user handed pointer and driver actually
    554	gathers hardware statistics */
    555	if (hardware_statistics &&
    556	    (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
    557		hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
    558		hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
    559		hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
    560		hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
    561		hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
    562		hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
    563		hardware_statistics->txok = in_be32(&ug_regs->txok);
    564		hardware_statistics->txcf = in_be16(&ug_regs->txcf);
    565		hardware_statistics->tmca = in_be32(&ug_regs->tmca);
    566		hardware_statistics->tbca = in_be32(&ug_regs->tbca);
    567		hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
    568		hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
    569		hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
    570		hardware_statistics->rmca = in_be32(&ug_regs->rmca);
    571		hardware_statistics->rbca = in_be32(&ug_regs->rbca);
    572	}
    573}
    574
    575static void dump_bds(struct ucc_geth_private *ugeth)
    576{
    577	int i;
    578	int length;
    579
    580	for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
    581		if (ugeth->p_tx_bd_ring[i]) {
    582			length =
    583			    (ugeth->ug_info->bdRingLenTx[i] *
    584			     sizeof(struct qe_bd));
    585			pr_info("TX BDs[%d]\n", i);
    586			mem_disp(ugeth->p_tx_bd_ring[i], length);
    587		}
    588	}
    589	for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
    590		if (ugeth->p_rx_bd_ring[i]) {
    591			length =
    592			    (ugeth->ug_info->bdRingLenRx[i] *
    593			     sizeof(struct qe_bd));
    594			pr_info("RX BDs[%d]\n", i);
    595			mem_disp(ugeth->p_rx_bd_ring[i], length);
    596		}
    597	}
    598}
    599
    600static void dump_regs(struct ucc_geth_private *ugeth)
    601{
    602	int i;
    603
    604	pr_info("UCC%d Geth registers:\n", ugeth->ug_info->uf_info.ucc_num + 1);
    605	pr_info("Base address: 0x%08x\n", (u32)ugeth->ug_regs);
    606
    607	pr_info("maccfg1    : addr - 0x%08x, val - 0x%08x\n",
    608		(u32)&ugeth->ug_regs->maccfg1,
    609		in_be32(&ugeth->ug_regs->maccfg1));
    610	pr_info("maccfg2    : addr - 0x%08x, val - 0x%08x\n",
    611		(u32)&ugeth->ug_regs->maccfg2,
    612		in_be32(&ugeth->ug_regs->maccfg2));
    613	pr_info("ipgifg     : addr - 0x%08x, val - 0x%08x\n",
    614		(u32)&ugeth->ug_regs->ipgifg,
    615		in_be32(&ugeth->ug_regs->ipgifg));
    616	pr_info("hafdup     : addr - 0x%08x, val - 0x%08x\n",
    617		(u32)&ugeth->ug_regs->hafdup,
    618		in_be32(&ugeth->ug_regs->hafdup));
    619	pr_info("ifctl      : addr - 0x%08x, val - 0x%08x\n",
    620		(u32)&ugeth->ug_regs->ifctl,
    621		in_be32(&ugeth->ug_regs->ifctl));
    622	pr_info("ifstat     : addr - 0x%08x, val - 0x%08x\n",
    623		(u32)&ugeth->ug_regs->ifstat,
    624		in_be32(&ugeth->ug_regs->ifstat));
    625	pr_info("macstnaddr1: addr - 0x%08x, val - 0x%08x\n",
    626		(u32)&ugeth->ug_regs->macstnaddr1,
    627		in_be32(&ugeth->ug_regs->macstnaddr1));
    628	pr_info("macstnaddr2: addr - 0x%08x, val - 0x%08x\n",
    629		(u32)&ugeth->ug_regs->macstnaddr2,
    630		in_be32(&ugeth->ug_regs->macstnaddr2));
    631	pr_info("uempr      : addr - 0x%08x, val - 0x%08x\n",
    632		(u32)&ugeth->ug_regs->uempr,
    633		in_be32(&ugeth->ug_regs->uempr));
    634	pr_info("utbipar    : addr - 0x%08x, val - 0x%08x\n",
    635		(u32)&ugeth->ug_regs->utbipar,
    636		in_be32(&ugeth->ug_regs->utbipar));
    637	pr_info("uescr      : addr - 0x%08x, val - 0x%04x\n",
    638		(u32)&ugeth->ug_regs->uescr,
    639		in_be16(&ugeth->ug_regs->uescr));
    640	pr_info("tx64       : addr - 0x%08x, val - 0x%08x\n",
    641		(u32)&ugeth->ug_regs->tx64,
    642		in_be32(&ugeth->ug_regs->tx64));
    643	pr_info("tx127      : addr - 0x%08x, val - 0x%08x\n",
    644		(u32)&ugeth->ug_regs->tx127,
    645		in_be32(&ugeth->ug_regs->tx127));
    646	pr_info("tx255      : addr - 0x%08x, val - 0x%08x\n",
    647		(u32)&ugeth->ug_regs->tx255,
    648		in_be32(&ugeth->ug_regs->tx255));
    649	pr_info("rx64       : addr - 0x%08x, val - 0x%08x\n",
    650		(u32)&ugeth->ug_regs->rx64,
    651		in_be32(&ugeth->ug_regs->rx64));
    652	pr_info("rx127      : addr - 0x%08x, val - 0x%08x\n",
    653		(u32)&ugeth->ug_regs->rx127,
    654		in_be32(&ugeth->ug_regs->rx127));
    655	pr_info("rx255      : addr - 0x%08x, val - 0x%08x\n",
    656		(u32)&ugeth->ug_regs->rx255,
    657		in_be32(&ugeth->ug_regs->rx255));
    658	pr_info("txok       : addr - 0x%08x, val - 0x%08x\n",
    659		(u32)&ugeth->ug_regs->txok,
    660		in_be32(&ugeth->ug_regs->txok));
    661	pr_info("txcf       : addr - 0x%08x, val - 0x%04x\n",
    662		(u32)&ugeth->ug_regs->txcf,
    663		in_be16(&ugeth->ug_regs->txcf));
    664	pr_info("tmca       : addr - 0x%08x, val - 0x%08x\n",
    665		(u32)&ugeth->ug_regs->tmca,
    666		in_be32(&ugeth->ug_regs->tmca));
    667	pr_info("tbca       : addr - 0x%08x, val - 0x%08x\n",
    668		(u32)&ugeth->ug_regs->tbca,
    669		in_be32(&ugeth->ug_regs->tbca));
    670	pr_info("rxfok      : addr - 0x%08x, val - 0x%08x\n",
    671		(u32)&ugeth->ug_regs->rxfok,
    672		in_be32(&ugeth->ug_regs->rxfok));
    673	pr_info("rxbok      : addr - 0x%08x, val - 0x%08x\n",
    674		(u32)&ugeth->ug_regs->rxbok,
    675		in_be32(&ugeth->ug_regs->rxbok));
    676	pr_info("rbyt       : addr - 0x%08x, val - 0x%08x\n",
    677		(u32)&ugeth->ug_regs->rbyt,
    678		in_be32(&ugeth->ug_regs->rbyt));
    679	pr_info("rmca       : addr - 0x%08x, val - 0x%08x\n",
    680		(u32)&ugeth->ug_regs->rmca,
    681		in_be32(&ugeth->ug_regs->rmca));
    682	pr_info("rbca       : addr - 0x%08x, val - 0x%08x\n",
    683		(u32)&ugeth->ug_regs->rbca,
    684		in_be32(&ugeth->ug_regs->rbca));
    685	pr_info("scar       : addr - 0x%08x, val - 0x%08x\n",
    686		(u32)&ugeth->ug_regs->scar,
    687		in_be32(&ugeth->ug_regs->scar));
    688	pr_info("scam       : addr - 0x%08x, val - 0x%08x\n",
    689		(u32)&ugeth->ug_regs->scam,
    690		in_be32(&ugeth->ug_regs->scam));
    691
    692	if (ugeth->p_thread_data_tx) {
    693		int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsTx);
    694
    695		pr_info("Thread data TXs:\n");
    696		pr_info("Base address: 0x%08x\n",
    697			(u32)ugeth->p_thread_data_tx);
    698		for (i = 0; i < count; i++) {
    699			pr_info("Thread data TX[%d]:\n", i);
    700			pr_info("Base address: 0x%08x\n",
    701				(u32)&ugeth->p_thread_data_tx[i]);
    702			mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
    703				 sizeof(struct ucc_geth_thread_data_tx));
    704		}
    705	}
    706	if (ugeth->p_thread_data_rx) {
    707		int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsRx);
    708
    709		pr_info("Thread data RX:\n");
    710		pr_info("Base address: 0x%08x\n",
    711			(u32)ugeth->p_thread_data_rx);
    712		for (i = 0; i < count; i++) {
    713			pr_info("Thread data RX[%d]:\n", i);
    714			pr_info("Base address: 0x%08x\n",
    715				(u32)&ugeth->p_thread_data_rx[i]);
    716			mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
    717				 sizeof(struct ucc_geth_thread_data_rx));
    718		}
    719	}
    720	if (ugeth->p_exf_glbl_param) {
    721		pr_info("EXF global param:\n");
    722		pr_info("Base address: 0x%08x\n",
    723			(u32)ugeth->p_exf_glbl_param);
    724		mem_disp((u8 *) ugeth->p_exf_glbl_param,
    725			 sizeof(*ugeth->p_exf_glbl_param));
    726	}
    727	if (ugeth->p_tx_glbl_pram) {
    728		pr_info("TX global param:\n");
    729		pr_info("Base address: 0x%08x\n", (u32)ugeth->p_tx_glbl_pram);
    730		pr_info("temoder      : addr - 0x%08x, val - 0x%04x\n",
    731			(u32)&ugeth->p_tx_glbl_pram->temoder,
    732			in_be16(&ugeth->p_tx_glbl_pram->temoder));
    733	       pr_info("sqptr        : addr - 0x%08x, val - 0x%08x\n",
    734			(u32)&ugeth->p_tx_glbl_pram->sqptr,
    735			in_be32(&ugeth->p_tx_glbl_pram->sqptr));
    736		pr_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x\n",
    737			(u32)&ugeth->p_tx_glbl_pram->schedulerbasepointer,
    738			in_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer));
    739		pr_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x\n",
    740			(u32)&ugeth->p_tx_glbl_pram->txrmonbaseptr,
    741			in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
    742		pr_info("tstate       : addr - 0x%08x, val - 0x%08x\n",
    743			(u32)&ugeth->p_tx_glbl_pram->tstate,
    744			in_be32(&ugeth->p_tx_glbl_pram->tstate));
    745		pr_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x\n",
    746			(u32)&ugeth->p_tx_glbl_pram->iphoffset[0],
    747			ugeth->p_tx_glbl_pram->iphoffset[0]);
    748		pr_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x\n",
    749			(u32)&ugeth->p_tx_glbl_pram->iphoffset[1],
    750			ugeth->p_tx_glbl_pram->iphoffset[1]);
    751		pr_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x\n",
    752			(u32)&ugeth->p_tx_glbl_pram->iphoffset[2],
    753			ugeth->p_tx_glbl_pram->iphoffset[2]);
    754		pr_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x\n",
    755			(u32)&ugeth->p_tx_glbl_pram->iphoffset[3],
    756			ugeth->p_tx_glbl_pram->iphoffset[3]);
    757		pr_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x\n",
    758			(u32)&ugeth->p_tx_glbl_pram->iphoffset[4],
    759			ugeth->p_tx_glbl_pram->iphoffset[4]);
    760		pr_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x\n",
    761			(u32)&ugeth->p_tx_glbl_pram->iphoffset[5],
    762			ugeth->p_tx_glbl_pram->iphoffset[5]);
    763		pr_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x\n",
    764			(u32)&ugeth->p_tx_glbl_pram->iphoffset[6],
    765			ugeth->p_tx_glbl_pram->iphoffset[6]);
    766		pr_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x\n",
    767			(u32)&ugeth->p_tx_glbl_pram->iphoffset[7],
    768			ugeth->p_tx_glbl_pram->iphoffset[7]);
    769		pr_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x\n",
    770			(u32)&ugeth->p_tx_glbl_pram->vtagtable[0],
    771			in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
    772		pr_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x\n",
    773			(u32)&ugeth->p_tx_glbl_pram->vtagtable[1],
    774			in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
    775		pr_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x\n",
    776			(u32)&ugeth->p_tx_glbl_pram->vtagtable[2],
    777			in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
    778		pr_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x\n",
    779			(u32)&ugeth->p_tx_glbl_pram->vtagtable[3],
    780			in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
    781		pr_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x\n",
    782			(u32)&ugeth->p_tx_glbl_pram->vtagtable[4],
    783			in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
    784		pr_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x\n",
    785			(u32)&ugeth->p_tx_glbl_pram->vtagtable[5],
    786			in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
    787		pr_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x\n",
    788			(u32)&ugeth->p_tx_glbl_pram->vtagtable[6],
    789			in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
    790		pr_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x\n",
    791			(u32)&ugeth->p_tx_glbl_pram->vtagtable[7],
    792			in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
    793		pr_info("tqptr        : addr - 0x%08x, val - 0x%08x\n",
    794			(u32)&ugeth->p_tx_glbl_pram->tqptr,
    795			in_be32(&ugeth->p_tx_glbl_pram->tqptr));
    796	}
    797	if (ugeth->p_rx_glbl_pram) {
    798		pr_info("RX global param:\n");
    799		pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_glbl_pram);
    800		pr_info("remoder         : addr - 0x%08x, val - 0x%08x\n",
    801			(u32)&ugeth->p_rx_glbl_pram->remoder,
    802			in_be32(&ugeth->p_rx_glbl_pram->remoder));
    803		pr_info("rqptr           : addr - 0x%08x, val - 0x%08x\n",
    804			(u32)&ugeth->p_rx_glbl_pram->rqptr,
    805			in_be32(&ugeth->p_rx_glbl_pram->rqptr));
    806		pr_info("typeorlen       : addr - 0x%08x, val - 0x%04x\n",
    807			(u32)&ugeth->p_rx_glbl_pram->typeorlen,
    808			in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
    809		pr_info("rxgstpack       : addr - 0x%08x, val - 0x%02x\n",
    810			(u32)&ugeth->p_rx_glbl_pram->rxgstpack,
    811			ugeth->p_rx_glbl_pram->rxgstpack);
    812		pr_info("rxrmonbaseptr   : addr - 0x%08x, val - 0x%08x\n",
    813			(u32)&ugeth->p_rx_glbl_pram->rxrmonbaseptr,
    814			in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
    815		pr_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x\n",
    816			(u32)&ugeth->p_rx_glbl_pram->intcoalescingptr,
    817			in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
    818		pr_info("rstate          : addr - 0x%08x, val - 0x%02x\n",
    819			(u32)&ugeth->p_rx_glbl_pram->rstate,
    820			ugeth->p_rx_glbl_pram->rstate);
    821		pr_info("mrblr           : addr - 0x%08x, val - 0x%04x\n",
    822			(u32)&ugeth->p_rx_glbl_pram->mrblr,
    823			in_be16(&ugeth->p_rx_glbl_pram->mrblr));
    824		pr_info("rbdqptr         : addr - 0x%08x, val - 0x%08x\n",
    825			(u32)&ugeth->p_rx_glbl_pram->rbdqptr,
    826			in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
    827		pr_info("mflr            : addr - 0x%08x, val - 0x%04x\n",
    828			(u32)&ugeth->p_rx_glbl_pram->mflr,
    829			in_be16(&ugeth->p_rx_glbl_pram->mflr));
    830		pr_info("minflr          : addr - 0x%08x, val - 0x%04x\n",
    831			(u32)&ugeth->p_rx_glbl_pram->minflr,
    832			in_be16(&ugeth->p_rx_glbl_pram->minflr));
    833		pr_info("maxd1           : addr - 0x%08x, val - 0x%04x\n",
    834			(u32)&ugeth->p_rx_glbl_pram->maxd1,
    835			in_be16(&ugeth->p_rx_glbl_pram->maxd1));
    836		pr_info("maxd2           : addr - 0x%08x, val - 0x%04x\n",
    837			(u32)&ugeth->p_rx_glbl_pram->maxd2,
    838			in_be16(&ugeth->p_rx_glbl_pram->maxd2));
    839		pr_info("ecamptr         : addr - 0x%08x, val - 0x%08x\n",
    840			(u32)&ugeth->p_rx_glbl_pram->ecamptr,
    841			in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
    842		pr_info("l2qt            : addr - 0x%08x, val - 0x%08x\n",
    843			(u32)&ugeth->p_rx_glbl_pram->l2qt,
    844			in_be32(&ugeth->p_rx_glbl_pram->l2qt));
    845		pr_info("l3qt[0]         : addr - 0x%08x, val - 0x%08x\n",
    846			(u32)&ugeth->p_rx_glbl_pram->l3qt[0],
    847			in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
    848		pr_info("l3qt[1]         : addr - 0x%08x, val - 0x%08x\n",
    849			(u32)&ugeth->p_rx_glbl_pram->l3qt[1],
    850			in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
    851		pr_info("l3qt[2]         : addr - 0x%08x, val - 0x%08x\n",
    852			(u32)&ugeth->p_rx_glbl_pram->l3qt[2],
    853			in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
    854		pr_info("l3qt[3]         : addr - 0x%08x, val - 0x%08x\n",
    855			(u32)&ugeth->p_rx_glbl_pram->l3qt[3],
    856			in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
    857		pr_info("l3qt[4]         : addr - 0x%08x, val - 0x%08x\n",
    858			(u32)&ugeth->p_rx_glbl_pram->l3qt[4],
    859			in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
    860		pr_info("l3qt[5]         : addr - 0x%08x, val - 0x%08x\n",
    861			(u32)&ugeth->p_rx_glbl_pram->l3qt[5],
    862			in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
    863		pr_info("l3qt[6]         : addr - 0x%08x, val - 0x%08x\n",
    864			(u32)&ugeth->p_rx_glbl_pram->l3qt[6],
    865			in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
    866		pr_info("l3qt[7]         : addr - 0x%08x, val - 0x%08x\n",
    867			(u32)&ugeth->p_rx_glbl_pram->l3qt[7],
    868			in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
    869		pr_info("vlantype        : addr - 0x%08x, val - 0x%04x\n",
    870			(u32)&ugeth->p_rx_glbl_pram->vlantype,
    871			in_be16(&ugeth->p_rx_glbl_pram->vlantype));
    872		pr_info("vlantci         : addr - 0x%08x, val - 0x%04x\n",
    873			(u32)&ugeth->p_rx_glbl_pram->vlantci,
    874			in_be16(&ugeth->p_rx_glbl_pram->vlantci));
    875		for (i = 0; i < 64; i++)
    876			pr_info("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x\n",
    877				i,
    878				(u32)&ugeth->p_rx_glbl_pram->addressfiltering[i],
    879				ugeth->p_rx_glbl_pram->addressfiltering[i]);
    880		pr_info("exfGlobalParam  : addr - 0x%08x, val - 0x%08x\n",
    881			(u32)&ugeth->p_rx_glbl_pram->exfGlobalParam,
    882			in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
    883	}
    884	if (ugeth->p_send_q_mem_reg) {
    885		pr_info("Send Q memory registers:\n");
    886		pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg);
    887		for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
    888			pr_info("SQQD[%d]:\n", i);
    889			pr_info("Base address: 0x%08x\n",
    890				(u32)&ugeth->p_send_q_mem_reg->sqqd[i]);
    891			mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
    892				 sizeof(struct ucc_geth_send_queue_qd));
    893		}
    894	}
    895	if (ugeth->p_scheduler) {
    896		pr_info("Scheduler:\n");
    897		pr_info("Base address: 0x%08x\n", (u32)ugeth->p_scheduler);
    898		mem_disp((u8 *) ugeth->p_scheduler,
    899			 sizeof(*ugeth->p_scheduler));
    900	}
    901	if (ugeth->p_tx_fw_statistics_pram) {
    902		pr_info("TX FW statistics pram:\n");
    903		pr_info("Base address: 0x%08x\n",
    904			(u32)ugeth->p_tx_fw_statistics_pram);
    905		mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
    906			 sizeof(*ugeth->p_tx_fw_statistics_pram));
    907	}
    908	if (ugeth->p_rx_fw_statistics_pram) {
    909		pr_info("RX FW statistics pram:\n");
    910		pr_info("Base address: 0x%08x\n",
    911			(u32)ugeth->p_rx_fw_statistics_pram);
    912		mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
    913			 sizeof(*ugeth->p_rx_fw_statistics_pram));
    914	}
    915	if (ugeth->p_rx_irq_coalescing_tbl) {
    916		pr_info("RX IRQ coalescing tables:\n");
    917		pr_info("Base address: 0x%08x\n",
    918			(u32)ugeth->p_rx_irq_coalescing_tbl);
    919		for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
    920			pr_info("RX IRQ coalescing table entry[%d]:\n", i);
    921			pr_info("Base address: 0x%08x\n",
    922				(u32)&ugeth->p_rx_irq_coalescing_tbl->
    923				coalescingentry[i]);
    924			pr_info("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x\n",
    925				(u32)&ugeth->p_rx_irq_coalescing_tbl->
    926				coalescingentry[i].interruptcoalescingmaxvalue,
    927				in_be32(&ugeth->p_rx_irq_coalescing_tbl->
    928					coalescingentry[i].
    929					interruptcoalescingmaxvalue));
    930			pr_info("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x\n",
    931				(u32)&ugeth->p_rx_irq_coalescing_tbl->
    932				coalescingentry[i].interruptcoalescingcounter,
    933				in_be32(&ugeth->p_rx_irq_coalescing_tbl->
    934					coalescingentry[i].
    935					interruptcoalescingcounter));
    936		}
    937	}
    938	if (ugeth->p_rx_bd_qs_tbl) {
    939		pr_info("RX BD QS tables:\n");
    940		pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl);
    941		for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
    942			pr_info("RX BD QS table[%d]:\n", i);
    943			pr_info("Base address: 0x%08x\n",
    944				(u32)&ugeth->p_rx_bd_qs_tbl[i]);
    945			pr_info("bdbaseptr        : addr - 0x%08x, val - 0x%08x\n",
    946				(u32)&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
    947				in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
    948			pr_info("bdptr            : addr - 0x%08x, val - 0x%08x\n",
    949				(u32)&ugeth->p_rx_bd_qs_tbl[i].bdptr,
    950				in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
    951			pr_info("externalbdbaseptr: addr - 0x%08x, val - 0x%08x\n",
    952				(u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
    953				in_be32(&ugeth->p_rx_bd_qs_tbl[i].
    954					externalbdbaseptr));
    955			pr_info("externalbdptr    : addr - 0x%08x, val - 0x%08x\n",
    956				(u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
    957				in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
    958			pr_info("ucode RX Prefetched BDs:\n");
    959			pr_info("Base address: 0x%08x\n",
    960				(u32)qe_muram_addr(in_be32
    961						   (&ugeth->p_rx_bd_qs_tbl[i].
    962						    bdbaseptr)));
    963			mem_disp((u8 *)
    964				 qe_muram_addr(in_be32
    965					       (&ugeth->p_rx_bd_qs_tbl[i].
    966						bdbaseptr)),
    967				 sizeof(struct ucc_geth_rx_prefetched_bds));
    968		}
    969	}
    970	if (ugeth->p_init_enet_param_shadow) {
    971		int size;
    972		pr_info("Init enet param shadow:\n");
    973		pr_info("Base address: 0x%08x\n",
    974			(u32) ugeth->p_init_enet_param_shadow);
    975		mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
    976			 sizeof(*ugeth->p_init_enet_param_shadow));
    977
    978		size = sizeof(struct ucc_geth_thread_rx_pram);
    979		if (ugeth->ug_info->rxExtendedFiltering) {
    980			size +=
    981			    THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
    982			if (ugeth->ug_info->largestexternallookupkeysize ==
    983			    QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
    984				size +=
    985			THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
    986			if (ugeth->ug_info->largestexternallookupkeysize ==
    987			    QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
    988				size +=
    989			THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
    990		}
    991
    992		dump_init_enet_entries(ugeth,
    993				       &(ugeth->p_init_enet_param_shadow->
    994					 txthread[0]),
    995				       ENET_INIT_PARAM_MAX_ENTRIES_TX,
    996				       sizeof(struct ucc_geth_thread_tx_pram),
    997				       ugeth->ug_info->riscTx, 0);
    998		dump_init_enet_entries(ugeth,
    999				       &(ugeth->p_init_enet_param_shadow->
   1000					 rxthread[0]),
   1001				       ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
   1002				       ugeth->ug_info->riscRx, 1);
   1003	}
   1004}
   1005#endif /* DEBUG */
   1006
   1007static void init_default_reg_vals(u32 __iomem *upsmr_register,
   1008				  u32 __iomem *maccfg1_register,
   1009				  u32 __iomem *maccfg2_register)
   1010{
   1011	out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
   1012	out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
   1013	out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
   1014}
   1015
   1016static int init_half_duplex_params(int alt_beb,
   1017				   int back_pressure_no_backoff,
   1018				   int no_backoff,
   1019				   int excess_defer,
   1020				   u8 alt_beb_truncation,
   1021				   u8 max_retransmissions,
   1022				   u8 collision_window,
   1023				   u32 __iomem *hafdup_register)
   1024{
   1025	u32 value = 0;
   1026
   1027	if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
   1028	    (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
   1029	    (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
   1030		return -EINVAL;
   1031
   1032	value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
   1033
   1034	if (alt_beb)
   1035		value |= HALFDUP_ALT_BEB;
   1036	if (back_pressure_no_backoff)
   1037		value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
   1038	if (no_backoff)
   1039		value |= HALFDUP_NO_BACKOFF;
   1040	if (excess_defer)
   1041		value |= HALFDUP_EXCESSIVE_DEFER;
   1042
   1043	value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
   1044
   1045	value |= collision_window;
   1046
   1047	out_be32(hafdup_register, value);
   1048	return 0;
   1049}
   1050
   1051static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
   1052				       u8 non_btb_ipg,
   1053				       u8 min_ifg,
   1054				       u8 btb_ipg,
   1055				       u32 __iomem *ipgifg_register)
   1056{
   1057	u32 value = 0;
   1058
   1059	/* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
   1060	IPG part 2 */
   1061	if (non_btb_cs_ipg > non_btb_ipg)
   1062		return -EINVAL;
   1063
   1064	if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
   1065	    (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
   1066	    /*(min_ifg        > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
   1067	    (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
   1068		return -EINVAL;
   1069
   1070	value |=
   1071	    ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
   1072	     IPGIFG_NBTB_CS_IPG_MASK);
   1073	value |=
   1074	    ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
   1075	     IPGIFG_NBTB_IPG_MASK);
   1076	value |=
   1077	    ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
   1078	     IPGIFG_MIN_IFG_MASK);
   1079	value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
   1080
   1081	out_be32(ipgifg_register, value);
   1082	return 0;
   1083}
   1084
   1085int init_flow_control_params(u32 automatic_flow_control_mode,
   1086				    int rx_flow_control_enable,
   1087				    int tx_flow_control_enable,
   1088				    u16 pause_period,
   1089				    u16 extension_field,
   1090				    u32 __iomem *upsmr_register,
   1091				    u32 __iomem *uempr_register,
   1092				    u32 __iomem *maccfg1_register)
   1093{
   1094	u32 value = 0;
   1095
   1096	/* Set UEMPR register */
   1097	value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
   1098	value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
   1099	out_be32(uempr_register, value);
   1100
   1101	/* Set UPSMR register */
   1102	setbits32(upsmr_register, automatic_flow_control_mode);
   1103
   1104	value = in_be32(maccfg1_register);
   1105	if (rx_flow_control_enable)
   1106		value |= MACCFG1_FLOW_RX;
   1107	if (tx_flow_control_enable)
   1108		value |= MACCFG1_FLOW_TX;
   1109	out_be32(maccfg1_register, value);
   1110
   1111	return 0;
   1112}
   1113
   1114static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
   1115					     int auto_zero_hardware_statistics,
   1116					     u32 __iomem *upsmr_register,
   1117					     u16 __iomem *uescr_register)
   1118{
   1119	u16 uescr_value = 0;
   1120
   1121	/* Enable hardware statistics gathering if requested */
   1122	if (enable_hardware_statistics)
   1123		setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
   1124
   1125	/* Clear hardware statistics counters */
   1126	uescr_value = in_be16(uescr_register);
   1127	uescr_value |= UESCR_CLRCNT;
   1128	/* Automatically zero hardware statistics counters on read,
   1129	if requested */
   1130	if (auto_zero_hardware_statistics)
   1131		uescr_value |= UESCR_AUTOZ;
   1132	out_be16(uescr_register, uescr_value);
   1133
   1134	return 0;
   1135}
   1136
   1137static int init_firmware_statistics_gathering_mode(int
   1138		enable_tx_firmware_statistics,
   1139		int enable_rx_firmware_statistics,
   1140		u32 __iomem *tx_rmon_base_ptr,
   1141		u32 tx_firmware_statistics_structure_address,
   1142		u32 __iomem *rx_rmon_base_ptr,
   1143		u32 rx_firmware_statistics_structure_address,
   1144		u16 __iomem *temoder_register,
   1145		u32 __iomem *remoder_register)
   1146{
   1147	/* Note: this function does not check if */
   1148	/* the parameters it receives are NULL   */
   1149
   1150	if (enable_tx_firmware_statistics) {
   1151		out_be32(tx_rmon_base_ptr,
   1152			 tx_firmware_statistics_structure_address);
   1153		setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
   1154	}
   1155
   1156	if (enable_rx_firmware_statistics) {
   1157		out_be32(rx_rmon_base_ptr,
   1158			 rx_firmware_statistics_structure_address);
   1159		setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
   1160	}
   1161
   1162	return 0;
   1163}
   1164
   1165static int init_mac_station_addr_regs(u8 address_byte_0,
   1166				      u8 address_byte_1,
   1167				      u8 address_byte_2,
   1168				      u8 address_byte_3,
   1169				      u8 address_byte_4,
   1170				      u8 address_byte_5,
   1171				      u32 __iomem *macstnaddr1_register,
   1172				      u32 __iomem *macstnaddr2_register)
   1173{
   1174	u32 value = 0;
   1175
   1176	/* Example: for a station address of 0x12345678ABCD, */
   1177	/* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
   1178
   1179	/* MACSTNADDR1 Register: */
   1180
   1181	/* 0                      7   8                      15  */
   1182	/* station address byte 5     station address byte 4     */
   1183	/* 16                     23  24                     31  */
   1184	/* station address byte 3     station address byte 2     */
   1185	value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
   1186	value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
   1187	value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
   1188	value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
   1189
   1190	out_be32(macstnaddr1_register, value);
   1191
   1192	/* MACSTNADDR2 Register: */
   1193
   1194	/* 0                      7   8                      15  */
   1195	/* station address byte 1     station address byte 0     */
   1196	/* 16                     23  24                     31  */
   1197	/*         reserved                   reserved           */
   1198	value = 0;
   1199	value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
   1200	value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
   1201
   1202	out_be32(macstnaddr2_register, value);
   1203
   1204	return 0;
   1205}
   1206
   1207static int init_check_frame_length_mode(int length_check,
   1208					u32 __iomem *maccfg2_register)
   1209{
   1210	u32 value = 0;
   1211
   1212	value = in_be32(maccfg2_register);
   1213
   1214	if (length_check)
   1215		value |= MACCFG2_LC;
   1216	else
   1217		value &= ~MACCFG2_LC;
   1218
   1219	out_be32(maccfg2_register, value);
   1220	return 0;
   1221}
   1222
   1223static int init_preamble_length(u8 preamble_length,
   1224				u32 __iomem *maccfg2_register)
   1225{
   1226	if ((preamble_length < 3) || (preamble_length > 7))
   1227		return -EINVAL;
   1228
   1229	clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
   1230			preamble_length << MACCFG2_PREL_SHIFT);
   1231
   1232	return 0;
   1233}
   1234
   1235static int init_rx_parameters(int reject_broadcast,
   1236			      int receive_short_frames,
   1237			      int promiscuous, u32 __iomem *upsmr_register)
   1238{
   1239	u32 value = 0;
   1240
   1241	value = in_be32(upsmr_register);
   1242
   1243	if (reject_broadcast)
   1244		value |= UCC_GETH_UPSMR_BRO;
   1245	else
   1246		value &= ~UCC_GETH_UPSMR_BRO;
   1247
   1248	if (receive_short_frames)
   1249		value |= UCC_GETH_UPSMR_RSH;
   1250	else
   1251		value &= ~UCC_GETH_UPSMR_RSH;
   1252
   1253	if (promiscuous)
   1254		value |= UCC_GETH_UPSMR_PRO;
   1255	else
   1256		value &= ~UCC_GETH_UPSMR_PRO;
   1257
   1258	out_be32(upsmr_register, value);
   1259
   1260	return 0;
   1261}
   1262
   1263static int init_max_rx_buff_len(u16 max_rx_buf_len,
   1264				u16 __iomem *mrblr_register)
   1265{
   1266	/* max_rx_buf_len value must be a multiple of 128 */
   1267	if ((max_rx_buf_len == 0) ||
   1268	    (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
   1269		return -EINVAL;
   1270
   1271	out_be16(mrblr_register, max_rx_buf_len);
   1272	return 0;
   1273}
   1274
   1275static int init_min_frame_len(u16 min_frame_length,
   1276			      u16 __iomem *minflr_register,
   1277			      u16 __iomem *mrblr_register)
   1278{
   1279	u16 mrblr_value = 0;
   1280
   1281	mrblr_value = in_be16(mrblr_register);
   1282	if (min_frame_length >= (mrblr_value - 4))
   1283		return -EINVAL;
   1284
   1285	out_be16(minflr_register, min_frame_length);
   1286	return 0;
   1287}
   1288
   1289static int adjust_enet_interface(struct ucc_geth_private *ugeth)
   1290{
   1291	struct ucc_geth_info *ug_info;
   1292	struct ucc_geth __iomem *ug_regs;
   1293	struct ucc_fast __iomem *uf_regs;
   1294	int ret_val;
   1295	u32 upsmr, maccfg2;
   1296	u16 value;
   1297
   1298	ugeth_vdbg("%s: IN", __func__);
   1299
   1300	ug_info = ugeth->ug_info;
   1301	ug_regs = ugeth->ug_regs;
   1302	uf_regs = ugeth->uccf->uf_regs;
   1303
   1304	/*                    Set MACCFG2                    */
   1305	maccfg2 = in_be32(&ug_regs->maccfg2);
   1306	maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
   1307	if ((ugeth->max_speed == SPEED_10) ||
   1308	    (ugeth->max_speed == SPEED_100))
   1309		maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
   1310	else if (ugeth->max_speed == SPEED_1000)
   1311		maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
   1312	maccfg2 |= ug_info->padAndCrc;
   1313	out_be32(&ug_regs->maccfg2, maccfg2);
   1314
   1315	/*                    Set UPSMR                      */
   1316	upsmr = in_be32(&uf_regs->upsmr);
   1317	upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
   1318		   UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
   1319	if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
   1320	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
   1321	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
   1322	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
   1323	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
   1324	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
   1325		if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
   1326			upsmr |= UCC_GETH_UPSMR_RPM;
   1327		switch (ugeth->max_speed) {
   1328		case SPEED_10:
   1329			upsmr |= UCC_GETH_UPSMR_R10M;
   1330			fallthrough;
   1331		case SPEED_100:
   1332			if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
   1333				upsmr |= UCC_GETH_UPSMR_RMM;
   1334		}
   1335	}
   1336	if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
   1337	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
   1338		upsmr |= UCC_GETH_UPSMR_TBIM;
   1339	}
   1340	if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
   1341		upsmr |= UCC_GETH_UPSMR_SGMM;
   1342
   1343	out_be32(&uf_regs->upsmr, upsmr);
   1344
   1345	/* Disable autonegotiation in tbi mode, because by default it
   1346	comes up in autonegotiation mode. */
   1347	/* Note that this depends on proper setting in utbipar register. */
   1348	if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
   1349	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
   1350		struct ucc_geth_info *ug_info = ugeth->ug_info;
   1351		struct phy_device *tbiphy;
   1352
   1353		if (!ug_info->tbi_node)
   1354			pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
   1355
   1356		tbiphy = of_phy_find_device(ug_info->tbi_node);
   1357		if (!tbiphy)
   1358			pr_warn("Could not get TBI device\n");
   1359
   1360		value = phy_read(tbiphy, ENET_TBI_MII_CR);
   1361		value &= ~0x1000;	/* Turn off autonegotiation */
   1362		phy_write(tbiphy, ENET_TBI_MII_CR, value);
   1363
   1364		put_device(&tbiphy->mdio.dev);
   1365	}
   1366
   1367	init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
   1368
   1369	ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
   1370	if (ret_val != 0) {
   1371		if (netif_msg_probe(ugeth))
   1372			pr_err("Preamble length must be between 3 and 7 inclusive\n");
   1373		return ret_val;
   1374	}
   1375
   1376	return 0;
   1377}
   1378
   1379static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
   1380{
   1381	struct ucc_fast_private *uccf;
   1382	u32 cecr_subblock;
   1383	u32 temp;
   1384	int i = 10;
   1385
   1386	uccf = ugeth->uccf;
   1387
   1388	/* Mask GRACEFUL STOP TX interrupt bit and clear it */
   1389	clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
   1390	out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA);  /* clear by writing 1 */
   1391
   1392	/* Issue host command */
   1393	cecr_subblock =
   1394	    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
   1395	qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
   1396		     QE_CR_PROTOCOL_ETHERNET, 0);
   1397
   1398	/* Wait for command to complete */
   1399	do {
   1400		msleep(10);
   1401		temp = in_be32(uccf->p_ucce);
   1402	} while (!(temp & UCC_GETH_UCCE_GRA) && --i);
   1403
   1404	uccf->stopped_tx = 1;
   1405
   1406	return 0;
   1407}
   1408
   1409static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth)
   1410{
   1411	struct ucc_fast_private *uccf;
   1412	u32 cecr_subblock;
   1413	u8 temp;
   1414	int i = 10;
   1415
   1416	uccf = ugeth->uccf;
   1417
   1418	/* Clear acknowledge bit */
   1419	temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
   1420	temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
   1421	out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
   1422
   1423	/* Keep issuing command and checking acknowledge bit until
   1424	it is asserted, according to spec */
   1425	do {
   1426		/* Issue host command */
   1427		cecr_subblock =
   1428		    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
   1429						ucc_num);
   1430		qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
   1431			     QE_CR_PROTOCOL_ETHERNET, 0);
   1432		msleep(10);
   1433		temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
   1434	} while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i);
   1435
   1436	uccf->stopped_rx = 1;
   1437
   1438	return 0;
   1439}
   1440
   1441static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
   1442{
   1443	struct ucc_fast_private *uccf;
   1444	u32 cecr_subblock;
   1445
   1446	uccf = ugeth->uccf;
   1447
   1448	cecr_subblock =
   1449	    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
   1450	qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
   1451	uccf->stopped_tx = 0;
   1452
   1453	return 0;
   1454}
   1455
   1456static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
   1457{
   1458	struct ucc_fast_private *uccf;
   1459	u32 cecr_subblock;
   1460
   1461	uccf = ugeth->uccf;
   1462
   1463	cecr_subblock =
   1464	    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
   1465	qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
   1466		     0);
   1467	uccf->stopped_rx = 0;
   1468
   1469	return 0;
   1470}
   1471
   1472static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
   1473{
   1474	struct ucc_fast_private *uccf;
   1475	int enabled_tx, enabled_rx;
   1476
   1477	uccf = ugeth->uccf;
   1478
   1479	/* check if the UCC number is in range. */
   1480	if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
   1481		if (netif_msg_probe(ugeth))
   1482			pr_err("ucc_num out of range\n");
   1483		return -EINVAL;
   1484	}
   1485
   1486	enabled_tx = uccf->enabled_tx;
   1487	enabled_rx = uccf->enabled_rx;
   1488
   1489	/* Get Tx and Rx going again, in case this channel was actively
   1490	disabled. */
   1491	if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
   1492		ugeth_restart_tx(ugeth);
   1493	if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
   1494		ugeth_restart_rx(ugeth);
   1495
   1496	ucc_fast_enable(uccf, mode);	/* OK to do even if not disabled */
   1497
   1498	return 0;
   1499
   1500}
   1501
   1502static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
   1503{
   1504	struct ucc_fast_private *uccf;
   1505
   1506	uccf = ugeth->uccf;
   1507
   1508	/* check if the UCC number is in range. */
   1509	if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
   1510		if (netif_msg_probe(ugeth))
   1511			pr_err("ucc_num out of range\n");
   1512		return -EINVAL;
   1513	}
   1514
   1515	/* Stop any transmissions */
   1516	if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
   1517		ugeth_graceful_stop_tx(ugeth);
   1518
   1519	/* Stop any receptions */
   1520	if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
   1521		ugeth_graceful_stop_rx(ugeth);
   1522
   1523	ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
   1524
   1525	return 0;
   1526}
   1527
   1528static void ugeth_quiesce(struct ucc_geth_private *ugeth)
   1529{
   1530	/* Prevent any further xmits */
   1531	netif_tx_stop_all_queues(ugeth->ndev);
   1532
   1533	/* Disable the interrupt to avoid NAPI rescheduling. */
   1534	disable_irq(ugeth->ug_info->uf_info.irq);
   1535
   1536	/* Stop NAPI, and possibly wait for its completion. */
   1537	napi_disable(&ugeth->napi);
   1538}
   1539
   1540static void ugeth_activate(struct ucc_geth_private *ugeth)
   1541{
   1542	napi_enable(&ugeth->napi);
   1543	enable_irq(ugeth->ug_info->uf_info.irq);
   1544
   1545	/* allow to xmit again  */
   1546	netif_tx_wake_all_queues(ugeth->ndev);
   1547	__netdev_watchdog_up(ugeth->ndev);
   1548}
   1549
   1550/* Called every time the controller might need to be made
   1551 * aware of new link state.  The PHY code conveys this
   1552 * information through variables in the ugeth structure, and this
   1553 * function converts those variables into the appropriate
   1554 * register values, and can bring down the device if needed.
   1555 */
   1556
   1557static void adjust_link(struct net_device *dev)
   1558{
   1559	struct ucc_geth_private *ugeth = netdev_priv(dev);
   1560	struct ucc_geth __iomem *ug_regs;
   1561	struct ucc_fast __iomem *uf_regs;
   1562	struct phy_device *phydev = ugeth->phydev;
   1563	int new_state = 0;
   1564
   1565	ug_regs = ugeth->ug_regs;
   1566	uf_regs = ugeth->uccf->uf_regs;
   1567
   1568	if (phydev->link) {
   1569		u32 tempval = in_be32(&ug_regs->maccfg2);
   1570		u32 upsmr = in_be32(&uf_regs->upsmr);
   1571		/* Now we make sure that we can be in full duplex mode.
   1572		 * If not, we operate in half-duplex mode. */
   1573		if (phydev->duplex != ugeth->oldduplex) {
   1574			new_state = 1;
   1575			if (!(phydev->duplex))
   1576				tempval &= ~(MACCFG2_FDX);
   1577			else
   1578				tempval |= MACCFG2_FDX;
   1579			ugeth->oldduplex = phydev->duplex;
   1580		}
   1581
   1582		if (phydev->speed != ugeth->oldspeed) {
   1583			new_state = 1;
   1584			switch (phydev->speed) {
   1585			case SPEED_1000:
   1586				tempval = ((tempval &
   1587					    ~(MACCFG2_INTERFACE_MODE_MASK)) |
   1588					    MACCFG2_INTERFACE_MODE_BYTE);
   1589				break;
   1590			case SPEED_100:
   1591			case SPEED_10:
   1592				tempval = ((tempval &
   1593					    ~(MACCFG2_INTERFACE_MODE_MASK)) |
   1594					    MACCFG2_INTERFACE_MODE_NIBBLE);
   1595				/* if reduced mode, re-set UPSMR.R10M */
   1596				if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
   1597				    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
   1598				    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
   1599				    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
   1600				    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
   1601				    (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
   1602					if (phydev->speed == SPEED_10)
   1603						upsmr |= UCC_GETH_UPSMR_R10M;
   1604					else
   1605						upsmr &= ~UCC_GETH_UPSMR_R10M;
   1606				}
   1607				break;
   1608			default:
   1609				if (netif_msg_link(ugeth))
   1610					pr_warn(
   1611						"%s: Ack!  Speed (%d) is not 10/100/1000!",
   1612						dev->name, phydev->speed);
   1613				break;
   1614			}
   1615			ugeth->oldspeed = phydev->speed;
   1616		}
   1617
   1618		if (!ugeth->oldlink) {
   1619			new_state = 1;
   1620			ugeth->oldlink = 1;
   1621		}
   1622
   1623		if (new_state) {
   1624			/*
   1625			 * To change the MAC configuration we need to disable
   1626			 * the controller. To do so, we have to either grab
   1627			 * ugeth->lock, which is a bad idea since 'graceful
   1628			 * stop' commands might take quite a while, or we can
   1629			 * quiesce driver's activity.
   1630			 */
   1631			ugeth_quiesce(ugeth);
   1632			ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
   1633
   1634			out_be32(&ug_regs->maccfg2, tempval);
   1635			out_be32(&uf_regs->upsmr, upsmr);
   1636
   1637			ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
   1638			ugeth_activate(ugeth);
   1639		}
   1640	} else if (ugeth->oldlink) {
   1641			new_state = 1;
   1642			ugeth->oldlink = 0;
   1643			ugeth->oldspeed = 0;
   1644			ugeth->oldduplex = -1;
   1645	}
   1646
   1647	if (new_state && netif_msg_link(ugeth))
   1648		phy_print_status(phydev);
   1649}
   1650
   1651/* Initialize TBI PHY interface for communicating with the
   1652 * SERDES lynx PHY on the chip.  We communicate with this PHY
   1653 * through the MDIO bus on each controller, treating it as a
   1654 * "normal" PHY at the address found in the UTBIPA register.  We assume
   1655 * that the UTBIPA register is valid.  Either the MDIO bus code will set
   1656 * it to a value that doesn't conflict with other PHYs on the bus, or the
   1657 * value doesn't matter, as there are no other PHYs on the bus.
   1658 */
   1659static void uec_configure_serdes(struct net_device *dev)
   1660{
   1661	struct ucc_geth_private *ugeth = netdev_priv(dev);
   1662	struct ucc_geth_info *ug_info = ugeth->ug_info;
   1663	struct phy_device *tbiphy;
   1664
   1665	if (!ug_info->tbi_node) {
   1666		dev_warn(&dev->dev, "SGMII mode requires that the device "
   1667			"tree specify a tbi-handle\n");
   1668		return;
   1669	}
   1670
   1671	tbiphy = of_phy_find_device(ug_info->tbi_node);
   1672	if (!tbiphy) {
   1673		dev_err(&dev->dev, "error: Could not get TBI device\n");
   1674		return;
   1675	}
   1676
   1677	/*
   1678	 * If the link is already up, we must already be ok, and don't need to
   1679	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
   1680	 * everything for us?  Resetting it takes the link down and requires
   1681	 * several seconds for it to come back.
   1682	 */
   1683	if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) {
   1684		put_device(&tbiphy->mdio.dev);
   1685		return;
   1686	}
   1687
   1688	/* Single clk mode, mii mode off(for serdes communication) */
   1689	phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
   1690
   1691	phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
   1692
   1693	phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
   1694
   1695	put_device(&tbiphy->mdio.dev);
   1696}
   1697
   1698/* Configure the PHY for dev.
   1699 * returns 0 if success.  -1 if failure
   1700 */
   1701static int init_phy(struct net_device *dev)
   1702{
   1703	struct ucc_geth_private *priv = netdev_priv(dev);
   1704	struct ucc_geth_info *ug_info = priv->ug_info;
   1705	struct phy_device *phydev;
   1706
   1707	priv->oldlink = 0;
   1708	priv->oldspeed = 0;
   1709	priv->oldduplex = -1;
   1710
   1711	phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
   1712				priv->phy_interface);
   1713	if (!phydev) {
   1714		dev_err(&dev->dev, "Could not attach to PHY\n");
   1715		return -ENODEV;
   1716	}
   1717
   1718	if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
   1719		uec_configure_serdes(dev);
   1720
   1721	phy_set_max_speed(phydev, priv->max_speed);
   1722
   1723	priv->phydev = phydev;
   1724
   1725	return 0;
   1726}
   1727
   1728static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
   1729{
   1730#ifdef DEBUG
   1731	ucc_fast_dump_regs(ugeth->uccf);
   1732	dump_regs(ugeth);
   1733	dump_bds(ugeth);
   1734#endif
   1735}
   1736
   1737static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
   1738						       ugeth,
   1739						       enum enet_addr_type
   1740						       enet_addr_type)
   1741{
   1742	struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
   1743	struct ucc_fast_private *uccf;
   1744	enum comm_dir comm_dir;
   1745	struct list_head *p_lh;
   1746	u16 i, num;
   1747	u32 __iomem *addr_h;
   1748	u32 __iomem *addr_l;
   1749	u8 *p_counter;
   1750
   1751	uccf = ugeth->uccf;
   1752
   1753	p_82xx_addr_filt =
   1754	    (struct ucc_geth_82xx_address_filtering_pram __iomem *)
   1755	    ugeth->p_rx_glbl_pram->addressfiltering;
   1756
   1757	if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
   1758		addr_h = &(p_82xx_addr_filt->gaddr_h);
   1759		addr_l = &(p_82xx_addr_filt->gaddr_l);
   1760		p_lh = &ugeth->group_hash_q;
   1761		p_counter = &(ugeth->numGroupAddrInHash);
   1762	} else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
   1763		addr_h = &(p_82xx_addr_filt->iaddr_h);
   1764		addr_l = &(p_82xx_addr_filt->iaddr_l);
   1765		p_lh = &ugeth->ind_hash_q;
   1766		p_counter = &(ugeth->numIndAddrInHash);
   1767	} else
   1768		return -EINVAL;
   1769
   1770	comm_dir = 0;
   1771	if (uccf->enabled_tx)
   1772		comm_dir |= COMM_DIR_TX;
   1773	if (uccf->enabled_rx)
   1774		comm_dir |= COMM_DIR_RX;
   1775	if (comm_dir)
   1776		ugeth_disable(ugeth, comm_dir);
   1777
   1778	/* Clear the hash table. */
   1779	out_be32(addr_h, 0x00000000);
   1780	out_be32(addr_l, 0x00000000);
   1781
   1782	if (!p_lh)
   1783		return 0;
   1784
   1785	num = *p_counter;
   1786
   1787	/* Delete all remaining CQ elements */
   1788	for (i = 0; i < num; i++)
   1789		put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
   1790
   1791	*p_counter = 0;
   1792
   1793	if (comm_dir)
   1794		ugeth_enable(ugeth, comm_dir);
   1795
   1796	return 0;
   1797}
   1798
   1799static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
   1800						    u8 paddr_num)
   1801{
   1802	ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
   1803	return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
   1804}
   1805
   1806static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
   1807{
   1808	struct ucc_geth_info *ug_info;
   1809	struct ucc_fast_info *uf_info;
   1810	u16 i, j;
   1811	u8 __iomem *bd;
   1812
   1813
   1814	ug_info = ugeth->ug_info;
   1815	uf_info = &ug_info->uf_info;
   1816
   1817	for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
   1818		if (ugeth->p_rx_bd_ring[i]) {
   1819			/* Return existing data buffers in ring */
   1820			bd = ugeth->p_rx_bd_ring[i];
   1821			for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
   1822				if (ugeth->rx_skbuff[i][j]) {
   1823					dma_unmap_single(ugeth->dev,
   1824						in_be32(&((struct qe_bd __iomem *)bd)->buf),
   1825						ugeth->ug_info->
   1826						uf_info.max_rx_buf_length +
   1827						UCC_GETH_RX_DATA_BUF_ALIGNMENT,
   1828						DMA_FROM_DEVICE);
   1829					dev_kfree_skb_any(
   1830						ugeth->rx_skbuff[i][j]);
   1831					ugeth->rx_skbuff[i][j] = NULL;
   1832				}
   1833				bd += sizeof(struct qe_bd);
   1834			}
   1835
   1836			kfree(ugeth->rx_skbuff[i]);
   1837
   1838			kfree(ugeth->p_rx_bd_ring[i]);
   1839			ugeth->p_rx_bd_ring[i] = NULL;
   1840		}
   1841	}
   1842
   1843}
   1844
   1845static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
   1846{
   1847	struct ucc_geth_info *ug_info;
   1848	struct ucc_fast_info *uf_info;
   1849	u16 i, j;
   1850	u8 __iomem *bd;
   1851
   1852	netdev_reset_queue(ugeth->ndev);
   1853
   1854	ug_info = ugeth->ug_info;
   1855	uf_info = &ug_info->uf_info;
   1856
   1857	for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
   1858		bd = ugeth->p_tx_bd_ring[i];
   1859		if (!bd)
   1860			continue;
   1861		for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
   1862			if (ugeth->tx_skbuff[i][j]) {
   1863				dma_unmap_single(ugeth->dev,
   1864						 in_be32(&((struct qe_bd __iomem *)bd)->buf),
   1865						 (in_be32((u32 __iomem *)bd) &
   1866						  BD_LENGTH_MASK),
   1867						 DMA_TO_DEVICE);
   1868				dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
   1869				ugeth->tx_skbuff[i][j] = NULL;
   1870			}
   1871		}
   1872
   1873		kfree(ugeth->tx_skbuff[i]);
   1874
   1875		kfree(ugeth->p_tx_bd_ring[i]);
   1876		ugeth->p_tx_bd_ring[i] = NULL;
   1877	}
   1878
   1879}
   1880
   1881static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
   1882{
   1883	if (!ugeth)
   1884		return;
   1885
   1886	if (ugeth->uccf) {
   1887		ucc_fast_free(ugeth->uccf);
   1888		ugeth->uccf = NULL;
   1889	}
   1890
   1891	qe_muram_free_addr(ugeth->p_thread_data_tx);
   1892	ugeth->p_thread_data_tx = NULL;
   1893
   1894	qe_muram_free_addr(ugeth->p_thread_data_rx);
   1895	ugeth->p_thread_data_rx = NULL;
   1896
   1897	qe_muram_free_addr(ugeth->p_exf_glbl_param);
   1898	ugeth->p_exf_glbl_param = NULL;
   1899
   1900	qe_muram_free_addr(ugeth->p_rx_glbl_pram);
   1901	ugeth->p_rx_glbl_pram = NULL;
   1902
   1903	qe_muram_free_addr(ugeth->p_tx_glbl_pram);
   1904	ugeth->p_tx_glbl_pram = NULL;
   1905
   1906	qe_muram_free_addr(ugeth->p_send_q_mem_reg);
   1907	ugeth->p_send_q_mem_reg = NULL;
   1908
   1909	qe_muram_free_addr(ugeth->p_scheduler);
   1910	ugeth->p_scheduler = NULL;
   1911
   1912	qe_muram_free_addr(ugeth->p_tx_fw_statistics_pram);
   1913	ugeth->p_tx_fw_statistics_pram = NULL;
   1914
   1915	qe_muram_free_addr(ugeth->p_rx_fw_statistics_pram);
   1916	ugeth->p_rx_fw_statistics_pram = NULL;
   1917
   1918	qe_muram_free_addr(ugeth->p_rx_irq_coalescing_tbl);
   1919	ugeth->p_rx_irq_coalescing_tbl = NULL;
   1920
   1921	qe_muram_free_addr(ugeth->p_rx_bd_qs_tbl);
   1922	ugeth->p_rx_bd_qs_tbl = NULL;
   1923
   1924	if (ugeth->p_init_enet_param_shadow) {
   1925		return_init_enet_entries(ugeth,
   1926					 &(ugeth->p_init_enet_param_shadow->
   1927					   rxthread[0]),
   1928					 ENET_INIT_PARAM_MAX_ENTRIES_RX,
   1929					 ugeth->ug_info->riscRx, 1);
   1930		return_init_enet_entries(ugeth,
   1931					 &(ugeth->p_init_enet_param_shadow->
   1932					   txthread[0]),
   1933					 ENET_INIT_PARAM_MAX_ENTRIES_TX,
   1934					 ugeth->ug_info->riscTx, 0);
   1935		kfree(ugeth->p_init_enet_param_shadow);
   1936		ugeth->p_init_enet_param_shadow = NULL;
   1937	}
   1938	ucc_geth_free_tx(ugeth);
   1939	ucc_geth_free_rx(ugeth);
   1940	while (!list_empty(&ugeth->group_hash_q))
   1941		put_enet_addr_container(ENET_ADDR_CONT_ENTRY
   1942					(dequeue(&ugeth->group_hash_q)));
   1943	while (!list_empty(&ugeth->ind_hash_q))
   1944		put_enet_addr_container(ENET_ADDR_CONT_ENTRY
   1945					(dequeue(&ugeth->ind_hash_q)));
   1946	if (ugeth->ug_regs) {
   1947		iounmap(ugeth->ug_regs);
   1948		ugeth->ug_regs = NULL;
   1949	}
   1950}
   1951
   1952static void ucc_geth_set_multi(struct net_device *dev)
   1953{
   1954	struct ucc_geth_private *ugeth;
   1955	struct netdev_hw_addr *ha;
   1956	struct ucc_fast __iomem *uf_regs;
   1957	struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
   1958
   1959	ugeth = netdev_priv(dev);
   1960
   1961	uf_regs = ugeth->uccf->uf_regs;
   1962
   1963	if (dev->flags & IFF_PROMISC) {
   1964		setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
   1965	} else {
   1966		clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
   1967
   1968		p_82xx_addr_filt =
   1969		    (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
   1970		    p_rx_glbl_pram->addressfiltering;
   1971
   1972		if (dev->flags & IFF_ALLMULTI) {
   1973			/* Catch all multicast addresses, so set the
   1974			 * filter to all 1's.
   1975			 */
   1976			out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
   1977			out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
   1978		} else {
   1979			/* Clear filter and add the addresses in the list.
   1980			 */
   1981			out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
   1982			out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
   1983
   1984			netdev_for_each_mc_addr(ha, dev) {
   1985				/* Ask CPM to run CRC and set bit in
   1986				 * filter mask.
   1987				 */
   1988				hw_add_addr_in_hash(ugeth, ha->addr);
   1989			}
   1990		}
   1991	}
   1992}
   1993
   1994static void ucc_geth_stop(struct ucc_geth_private *ugeth)
   1995{
   1996	struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
   1997	struct phy_device *phydev = ugeth->phydev;
   1998
   1999	ugeth_vdbg("%s: IN", __func__);
   2000
   2001	/*
   2002	 * Tell the kernel the link is down.
   2003	 * Must be done before disabling the controller
   2004	 * or deadlock may happen.
   2005	 */
   2006	phy_stop(phydev);
   2007
   2008	/* Disable the controller */
   2009	ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
   2010
   2011	/* Mask all interrupts */
   2012	out_be32(ugeth->uccf->p_uccm, 0x00000000);
   2013
   2014	/* Clear all interrupts */
   2015	out_be32(ugeth->uccf->p_ucce, 0xffffffff);
   2016
   2017	/* Disable Rx and Tx */
   2018	clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
   2019
   2020	ucc_geth_memclean(ugeth);
   2021}
   2022
   2023static int ucc_struct_init(struct ucc_geth_private *ugeth)
   2024{
   2025	struct ucc_geth_info *ug_info;
   2026	struct ucc_fast_info *uf_info;
   2027	int i;
   2028
   2029	ug_info = ugeth->ug_info;
   2030	uf_info = &ug_info->uf_info;
   2031
   2032	/* Rx BD lengths */
   2033	for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
   2034		if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
   2035		    (ug_info->bdRingLenRx[i] %
   2036		     UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
   2037			if (netif_msg_probe(ugeth))
   2038				pr_err("Rx BD ring length must be multiple of 4, no smaller than 8\n");
   2039			return -EINVAL;
   2040		}
   2041	}
   2042
   2043	/* Tx BD lengths */
   2044	for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
   2045		if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
   2046			if (netif_msg_probe(ugeth))
   2047				pr_err("Tx BD ring length must be no smaller than 2\n");
   2048			return -EINVAL;
   2049		}
   2050	}
   2051
   2052	/* mrblr */
   2053	if ((uf_info->max_rx_buf_length == 0) ||
   2054	    (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
   2055		if (netif_msg_probe(ugeth))
   2056			pr_err("max_rx_buf_length must be non-zero multiple of 128\n");
   2057		return -EINVAL;
   2058	}
   2059
   2060	/* num Tx queues */
   2061	if (ucc_geth_tx_queues(ug_info) > NUM_TX_QUEUES) {
   2062		if (netif_msg_probe(ugeth))
   2063			pr_err("number of tx queues too large\n");
   2064		return -EINVAL;
   2065	}
   2066
   2067	/* num Rx queues */
   2068	if (ucc_geth_rx_queues(ug_info) > NUM_RX_QUEUES) {
   2069		if (netif_msg_probe(ugeth))
   2070			pr_err("number of rx queues too large\n");
   2071		return -EINVAL;
   2072	}
   2073
   2074	/* l2qt */
   2075	for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
   2076		if (ug_info->l2qt[i] >= ucc_geth_rx_queues(ug_info)) {
   2077			if (netif_msg_probe(ugeth))
   2078				pr_err("VLAN priority table entry must not be larger than number of Rx queues\n");
   2079			return -EINVAL;
   2080		}
   2081	}
   2082
   2083	/* l3qt */
   2084	for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
   2085		if (ug_info->l3qt[i] >= ucc_geth_rx_queues(ug_info)) {
   2086			if (netif_msg_probe(ugeth))
   2087				pr_err("IP priority table entry must not be larger than number of Rx queues\n");
   2088			return -EINVAL;
   2089		}
   2090	}
   2091
   2092	if (ug_info->cam && !ug_info->ecamptr) {
   2093		if (netif_msg_probe(ugeth))
   2094			pr_err("If cam mode is chosen, must supply cam ptr\n");
   2095		return -EINVAL;
   2096	}
   2097
   2098	if ((ug_info->numStationAddresses !=
   2099	     UCC_GETH_NUM_OF_STATION_ADDRESSES_1) &&
   2100	    ug_info->rxExtendedFiltering) {
   2101		if (netif_msg_probe(ugeth))
   2102			pr_err("Number of station addresses greater than 1 not allowed in extended parsing mode\n");
   2103		return -EINVAL;
   2104	}
   2105
   2106	/* Generate uccm_mask for receive */
   2107	uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
   2108	for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
   2109		uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
   2110
   2111	for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
   2112		uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
   2113	/* Initialize the general fast UCC block. */
   2114	if (ucc_fast_init(uf_info, &ugeth->uccf)) {
   2115		if (netif_msg_probe(ugeth))
   2116			pr_err("Failed to init uccf\n");
   2117		return -ENOMEM;
   2118	}
   2119
   2120	/* read the number of risc engines, update the riscTx and riscRx
   2121	 * if there are 4 riscs in QE
   2122	 */
   2123	if (qe_get_num_of_risc() == 4) {
   2124		ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS;
   2125		ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS;
   2126	}
   2127
   2128	ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
   2129	if (!ugeth->ug_regs) {
   2130		if (netif_msg_probe(ugeth))
   2131			pr_err("Failed to ioremap regs\n");
   2132		return -ENOMEM;
   2133	}
   2134
   2135	return 0;
   2136}
   2137
   2138static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
   2139{
   2140	struct ucc_geth_info *ug_info;
   2141	struct ucc_fast_info *uf_info;
   2142	int length;
   2143	u16 i, j;
   2144	u8 __iomem *bd;
   2145
   2146	ug_info = ugeth->ug_info;
   2147	uf_info = &ug_info->uf_info;
   2148
   2149	/* Allocate Tx bds */
   2150	for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
   2151		u32 align = max(UCC_GETH_TX_BD_RING_ALIGNMENT,
   2152				UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT);
   2153		u32 alloc;
   2154
   2155		length = ug_info->bdRingLenTx[j] * sizeof(struct qe_bd);
   2156		alloc = round_up(length, align);
   2157		alloc = roundup_pow_of_two(alloc);
   2158
   2159		ugeth->p_tx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
   2160
   2161		if (!ugeth->p_tx_bd_ring[j]) {
   2162			if (netif_msg_ifup(ugeth))
   2163				pr_err("Can not allocate memory for Tx bd rings\n");
   2164			return -ENOMEM;
   2165		}
   2166		/* Zero unused end of bd ring, according to spec */
   2167		memset(ugeth->p_tx_bd_ring[j] + length, 0, alloc - length);
   2168	}
   2169
   2170	/* Init Tx bds */
   2171	for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
   2172		/* Setup the skbuff rings */
   2173		ugeth->tx_skbuff[j] =
   2174			kcalloc(ugeth->ug_info->bdRingLenTx[j],
   2175				sizeof(struct sk_buff *), GFP_KERNEL);
   2176
   2177		if (ugeth->tx_skbuff[j] == NULL) {
   2178			if (netif_msg_ifup(ugeth))
   2179				pr_err("Could not allocate tx_skbuff\n");
   2180			return -ENOMEM;
   2181		}
   2182
   2183		ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
   2184		bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
   2185		for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
   2186			/* clear bd buffer */
   2187			out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
   2188			/* set bd status and length */
   2189			out_be32((u32 __iomem *)bd, 0);
   2190			bd += sizeof(struct qe_bd);
   2191		}
   2192		bd -= sizeof(struct qe_bd);
   2193		/* set bd status and length */
   2194		out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
   2195	}
   2196
   2197	return 0;
   2198}
   2199
   2200static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
   2201{
   2202	struct ucc_geth_info *ug_info;
   2203	struct ucc_fast_info *uf_info;
   2204	int length;
   2205	u16 i, j;
   2206	u8 __iomem *bd;
   2207
   2208	ug_info = ugeth->ug_info;
   2209	uf_info = &ug_info->uf_info;
   2210
   2211	/* Allocate Rx bds */
   2212	for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
   2213		u32 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
   2214		u32 alloc;
   2215
   2216		length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
   2217		alloc = round_up(length, align);
   2218		alloc = roundup_pow_of_two(alloc);
   2219
   2220		ugeth->p_rx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
   2221		if (!ugeth->p_rx_bd_ring[j]) {
   2222			if (netif_msg_ifup(ugeth))
   2223				pr_err("Can not allocate memory for Rx bd rings\n");
   2224			return -ENOMEM;
   2225		}
   2226	}
   2227
   2228	/* Init Rx bds */
   2229	for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
   2230		/* Setup the skbuff rings */
   2231		ugeth->rx_skbuff[j] =
   2232			kcalloc(ugeth->ug_info->bdRingLenRx[j],
   2233				sizeof(struct sk_buff *), GFP_KERNEL);
   2234
   2235		if (ugeth->rx_skbuff[j] == NULL) {
   2236			if (netif_msg_ifup(ugeth))
   2237				pr_err("Could not allocate rx_skbuff\n");
   2238			return -ENOMEM;
   2239		}
   2240
   2241		ugeth->skb_currx[j] = 0;
   2242		bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
   2243		for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
   2244			/* set bd status and length */
   2245			out_be32((u32 __iomem *)bd, R_I);
   2246			/* clear bd buffer */
   2247			out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
   2248			bd += sizeof(struct qe_bd);
   2249		}
   2250		bd -= sizeof(struct qe_bd);
   2251		/* set bd status and length */
   2252		out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
   2253	}
   2254
   2255	return 0;
   2256}
   2257
   2258static int ucc_geth_startup(struct ucc_geth_private *ugeth)
   2259{
   2260	struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
   2261	struct ucc_geth_init_pram __iomem *p_init_enet_pram;
   2262	struct ucc_fast_private *uccf;
   2263	struct ucc_geth_info *ug_info;
   2264	struct ucc_fast_info *uf_info;
   2265	struct ucc_fast __iomem *uf_regs;
   2266	struct ucc_geth __iomem *ug_regs;
   2267	int ret_val = -EINVAL;
   2268	u32 remoder = UCC_GETH_REMODER_INIT;
   2269	u32 init_enet_pram_offset, cecr_subblock, command;
   2270	u32 ifstat, i, j, size, l2qt, l3qt;
   2271	u16 temoder = UCC_GETH_TEMODER_INIT;
   2272	u8 function_code = 0;
   2273	u8 __iomem *endOfRing;
   2274	u8 numThreadsRxNumerical, numThreadsTxNumerical;
   2275	s32 rx_glbl_pram_offset, tx_glbl_pram_offset;
   2276
   2277	ugeth_vdbg("%s: IN", __func__);
   2278	uccf = ugeth->uccf;
   2279	ug_info = ugeth->ug_info;
   2280	uf_info = &ug_info->uf_info;
   2281	uf_regs = uccf->uf_regs;
   2282	ug_regs = ugeth->ug_regs;
   2283
   2284	numThreadsRxNumerical = ucc_geth_thread_count(ug_info->numThreadsRx);
   2285	if (!numThreadsRxNumerical) {
   2286		if (netif_msg_ifup(ugeth))
   2287			pr_err("Bad number of Rx threads value\n");
   2288		return -EINVAL;
   2289	}
   2290
   2291	numThreadsTxNumerical = ucc_geth_thread_count(ug_info->numThreadsTx);
   2292	if (!numThreadsTxNumerical) {
   2293		if (netif_msg_ifup(ugeth))
   2294			pr_err("Bad number of Tx threads value\n");
   2295		return -EINVAL;
   2296	}
   2297
   2298	/* Calculate rx_extended_features */
   2299	ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
   2300	    ug_info->ipAddressAlignment ||
   2301	    (ug_info->numStationAddresses !=
   2302	     UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
   2303
   2304	ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
   2305		(ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) ||
   2306		(ug_info->vlanOperationNonTagged !=
   2307		 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
   2308
   2309	init_default_reg_vals(&uf_regs->upsmr,
   2310			      &ug_regs->maccfg1, &ug_regs->maccfg2);
   2311
   2312	/*                    Set UPSMR                      */
   2313	/* For more details see the hardware spec.           */
   2314	init_rx_parameters(ug_info->bro,
   2315			   ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
   2316
   2317	/* We're going to ignore other registers for now, */
   2318	/* except as needed to get up and running         */
   2319
   2320	/*                    Set MACCFG1                    */
   2321	/* For more details see the hardware spec.           */
   2322	init_flow_control_params(ug_info->aufc,
   2323				 ug_info->receiveFlowControl,
   2324				 ug_info->transmitFlowControl,
   2325				 ug_info->pausePeriod,
   2326				 ug_info->extensionField,
   2327				 &uf_regs->upsmr,
   2328				 &ug_regs->uempr, &ug_regs->maccfg1);
   2329
   2330	setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
   2331
   2332	/*                    Set IPGIFG                     */
   2333	/* For more details see the hardware spec.           */
   2334	ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
   2335					      ug_info->nonBackToBackIfgPart2,
   2336					      ug_info->
   2337					      miminumInterFrameGapEnforcement,
   2338					      ug_info->backToBackInterFrameGap,
   2339					      &ug_regs->ipgifg);
   2340	if (ret_val != 0) {
   2341		if (netif_msg_ifup(ugeth))
   2342			pr_err("IPGIFG initialization parameter too large\n");
   2343		return ret_val;
   2344	}
   2345
   2346	/*                    Set HAFDUP                     */
   2347	/* For more details see the hardware spec.           */
   2348	ret_val = init_half_duplex_params(ug_info->altBeb,
   2349					  ug_info->backPressureNoBackoff,
   2350					  ug_info->noBackoff,
   2351					  ug_info->excessDefer,
   2352					  ug_info->altBebTruncation,
   2353					  ug_info->maxRetransmission,
   2354					  ug_info->collisionWindow,
   2355					  &ug_regs->hafdup);
   2356	if (ret_val != 0) {
   2357		if (netif_msg_ifup(ugeth))
   2358			pr_err("Half Duplex initialization parameter too large\n");
   2359		return ret_val;
   2360	}
   2361
   2362	/*                    Set IFSTAT                     */
   2363	/* For more details see the hardware spec.           */
   2364	/* Read only - resets upon read                      */
   2365	ifstat = in_be32(&ug_regs->ifstat);
   2366
   2367	/*                    Clear UEMPR                    */
   2368	/* For more details see the hardware spec.           */
   2369	out_be32(&ug_regs->uempr, 0);
   2370
   2371	/*                    Set UESCR                      */
   2372	/* For more details see the hardware spec.           */
   2373	init_hw_statistics_gathering_mode((ug_info->statisticsMode &
   2374				UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
   2375				0, &uf_regs->upsmr, &ug_regs->uescr);
   2376
   2377	ret_val = ucc_geth_alloc_tx(ugeth);
   2378	if (ret_val != 0)
   2379		return ret_val;
   2380
   2381	ret_val = ucc_geth_alloc_rx(ugeth);
   2382	if (ret_val != 0)
   2383		return ret_val;
   2384
   2385	/*
   2386	 * Global PRAM
   2387	 */
   2388	/* Tx global PRAM */
   2389	/* Allocate global tx parameter RAM page */
   2390	tx_glbl_pram_offset =
   2391	    qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
   2392			   UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
   2393	if (tx_glbl_pram_offset < 0) {
   2394		if (netif_msg_ifup(ugeth))
   2395			pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n");
   2396		return -ENOMEM;
   2397	}
   2398	ugeth->p_tx_glbl_pram = qe_muram_addr(tx_glbl_pram_offset);
   2399	/* Fill global PRAM */
   2400
   2401	/* TQPTR */
   2402	/* Size varies with number of Tx threads */
   2403	ugeth->thread_dat_tx_offset =
   2404	    qe_muram_alloc(numThreadsTxNumerical *
   2405			   sizeof(struct ucc_geth_thread_data_tx) +
   2406			   32 * (numThreadsTxNumerical == 1),
   2407			   UCC_GETH_THREAD_DATA_ALIGNMENT);
   2408	if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
   2409		if (netif_msg_ifup(ugeth))
   2410			pr_err("Can not allocate DPRAM memory for p_thread_data_tx\n");
   2411		return -ENOMEM;
   2412	}
   2413
   2414	ugeth->p_thread_data_tx =
   2415	    (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
   2416							thread_dat_tx_offset);
   2417	out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
   2418
   2419	/* vtagtable */
   2420	for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
   2421		out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
   2422			 ug_info->vtagtable[i]);
   2423
   2424	/* iphoffset */
   2425	for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
   2426		out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
   2427				ug_info->iphoffset[i]);
   2428
   2429	/* SQPTR */
   2430	/* Size varies with number of Tx queues */
   2431	ugeth->send_q_mem_reg_offset =
   2432	    qe_muram_alloc(ucc_geth_tx_queues(ug_info) *
   2433			   sizeof(struct ucc_geth_send_queue_qd),
   2434			   UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
   2435	if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
   2436		if (netif_msg_ifup(ugeth))
   2437			pr_err("Can not allocate DPRAM memory for p_send_q_mem_reg\n");
   2438		return -ENOMEM;
   2439	}
   2440
   2441	ugeth->p_send_q_mem_reg =
   2442	    (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
   2443			send_q_mem_reg_offset);
   2444	out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
   2445
   2446	/* Setup the table */
   2447	/* Assume BD rings are already established */
   2448	for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
   2449		endOfRing =
   2450		    ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
   2451					      1) * sizeof(struct qe_bd);
   2452		out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
   2453			 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
   2454		out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
   2455			 last_bd_completed_address,
   2456			 (u32) virt_to_phys(endOfRing));
   2457	}
   2458
   2459	/* schedulerbasepointer */
   2460
   2461	if (ucc_geth_tx_queues(ug_info) > 1) {
   2462	/* scheduler exists only if more than 1 tx queue */
   2463		ugeth->scheduler_offset =
   2464		    qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
   2465				   UCC_GETH_SCHEDULER_ALIGNMENT);
   2466		if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
   2467			if (netif_msg_ifup(ugeth))
   2468				pr_err("Can not allocate DPRAM memory for p_scheduler\n");
   2469			return -ENOMEM;
   2470		}
   2471
   2472		ugeth->p_scheduler =
   2473		    (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
   2474							   scheduler_offset);
   2475		out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
   2476			 ugeth->scheduler_offset);
   2477
   2478		/* Set values in scheduler */
   2479		out_be32(&ugeth->p_scheduler->mblinterval,
   2480			 ug_info->mblinterval);
   2481		out_be16(&ugeth->p_scheduler->nortsrbytetime,
   2482			 ug_info->nortsrbytetime);
   2483		out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
   2484		out_8(&ugeth->p_scheduler->strictpriorityq,
   2485				ug_info->strictpriorityq);
   2486		out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
   2487		out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
   2488		for (i = 0; i < NUM_TX_QUEUES; i++)
   2489			out_8(&ugeth->p_scheduler->weightfactor[i],
   2490			    ug_info->weightfactor[i]);
   2491
   2492		/* Set pointers to cpucount registers in scheduler */
   2493		ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
   2494		ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
   2495		ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
   2496		ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
   2497		ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
   2498		ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
   2499		ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
   2500		ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
   2501	}
   2502
   2503	/* schedulerbasepointer */
   2504	/* TxRMON_PTR (statistics) */
   2505	if (ug_info->
   2506	    statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
   2507		ugeth->tx_fw_statistics_pram_offset =
   2508		    qe_muram_alloc(sizeof
   2509				   (struct ucc_geth_tx_firmware_statistics_pram),
   2510				   UCC_GETH_TX_STATISTICS_ALIGNMENT);
   2511		if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
   2512			if (netif_msg_ifup(ugeth))
   2513				pr_err("Can not allocate DPRAM memory for p_tx_fw_statistics_pram\n");
   2514			return -ENOMEM;
   2515		}
   2516		ugeth->p_tx_fw_statistics_pram =
   2517		    (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
   2518		    qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
   2519	}
   2520
   2521	/* temoder */
   2522	/* Already has speed set */
   2523
   2524	if (ucc_geth_tx_queues(ug_info) > 1)
   2525		temoder |= TEMODER_SCHEDULER_ENABLE;
   2526	if (ug_info->ipCheckSumGenerate)
   2527		temoder |= TEMODER_IP_CHECKSUM_GENERATE;
   2528	temoder |= ((ucc_geth_tx_queues(ug_info) - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
   2529	out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
   2530
   2531	/* Function code register value to be used later */
   2532	function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
   2533	/* Required for QE */
   2534
   2535	/* function code register */
   2536	out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
   2537
   2538	/* Rx global PRAM */
   2539	/* Allocate global rx parameter RAM page */
   2540	rx_glbl_pram_offset =
   2541	    qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
   2542			   UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
   2543	if (rx_glbl_pram_offset < 0) {
   2544		if (netif_msg_ifup(ugeth))
   2545			pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n");
   2546		return -ENOMEM;
   2547	}
   2548	ugeth->p_rx_glbl_pram = qe_muram_addr(rx_glbl_pram_offset);
   2549	/* Fill global PRAM */
   2550
   2551	/* RQPTR */
   2552	/* Size varies with number of Rx threads */
   2553	ugeth->thread_dat_rx_offset =
   2554	    qe_muram_alloc(numThreadsRxNumerical *
   2555			   sizeof(struct ucc_geth_thread_data_rx),
   2556			   UCC_GETH_THREAD_DATA_ALIGNMENT);
   2557	if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
   2558		if (netif_msg_ifup(ugeth))
   2559			pr_err("Can not allocate DPRAM memory for p_thread_data_rx\n");
   2560		return -ENOMEM;
   2561	}
   2562
   2563	ugeth->p_thread_data_rx =
   2564	    (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
   2565							thread_dat_rx_offset);
   2566	out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
   2567
   2568	/* typeorlen */
   2569	out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
   2570
   2571	/* rxrmonbaseptr (statistics) */
   2572	if (ug_info->
   2573	    statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
   2574		ugeth->rx_fw_statistics_pram_offset =
   2575		    qe_muram_alloc(sizeof
   2576				   (struct ucc_geth_rx_firmware_statistics_pram),
   2577				   UCC_GETH_RX_STATISTICS_ALIGNMENT);
   2578		if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
   2579			if (netif_msg_ifup(ugeth))
   2580				pr_err("Can not allocate DPRAM memory for p_rx_fw_statistics_pram\n");
   2581			return -ENOMEM;
   2582		}
   2583		ugeth->p_rx_fw_statistics_pram =
   2584		    (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
   2585		    qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
   2586	}
   2587
   2588	/* intCoalescingPtr */
   2589
   2590	/* Size varies with number of Rx queues */
   2591	ugeth->rx_irq_coalescing_tbl_offset =
   2592	    qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
   2593			   sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
   2594			   + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
   2595	if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
   2596		if (netif_msg_ifup(ugeth))
   2597			pr_err("Can not allocate DPRAM memory for p_rx_irq_coalescing_tbl\n");
   2598		return -ENOMEM;
   2599	}
   2600
   2601	ugeth->p_rx_irq_coalescing_tbl =
   2602	    (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
   2603	    qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
   2604	out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
   2605		 ugeth->rx_irq_coalescing_tbl_offset);
   2606
   2607	/* Fill interrupt coalescing table */
   2608	for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
   2609		out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
   2610			 interruptcoalescingmaxvalue,
   2611			 ug_info->interruptcoalescingmaxvalue[i]);
   2612		out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
   2613			 interruptcoalescingcounter,
   2614			 ug_info->interruptcoalescingmaxvalue[i]);
   2615	}
   2616
   2617	/* MRBLR */
   2618	init_max_rx_buff_len(uf_info->max_rx_buf_length,
   2619			     &ugeth->p_rx_glbl_pram->mrblr);
   2620	/* MFLR */
   2621	out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
   2622	/* MINFLR */
   2623	init_min_frame_len(ug_info->minFrameLength,
   2624			   &ugeth->p_rx_glbl_pram->minflr,
   2625			   &ugeth->p_rx_glbl_pram->mrblr);
   2626	/* MAXD1 */
   2627	out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
   2628	/* MAXD2 */
   2629	out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
   2630
   2631	/* l2qt */
   2632	l2qt = 0;
   2633	for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
   2634		l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
   2635	out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
   2636
   2637	/* l3qt */
   2638	for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
   2639		l3qt = 0;
   2640		for (i = 0; i < 8; i++)
   2641			l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
   2642		out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
   2643	}
   2644
   2645	/* vlantype */
   2646	out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
   2647
   2648	/* vlantci */
   2649	out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
   2650
   2651	/* ecamptr */
   2652	out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
   2653
   2654	/* RBDQPTR */
   2655	/* Size varies with number of Rx queues */
   2656	ugeth->rx_bd_qs_tbl_offset =
   2657	    qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
   2658			   (sizeof(struct ucc_geth_rx_bd_queues_entry) +
   2659			    sizeof(struct ucc_geth_rx_prefetched_bds)),
   2660			   UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
   2661	if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
   2662		if (netif_msg_ifup(ugeth))
   2663			pr_err("Can not allocate DPRAM memory for p_rx_bd_qs_tbl\n");
   2664		return -ENOMEM;
   2665	}
   2666
   2667	ugeth->p_rx_bd_qs_tbl =
   2668	    (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
   2669				    rx_bd_qs_tbl_offset);
   2670	out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
   2671
   2672	/* Setup the table */
   2673	/* Assume BD rings are already established */
   2674	for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
   2675		out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
   2676			 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
   2677		/* rest of fields handled by QE */
   2678	}
   2679
   2680	/* remoder */
   2681	/* Already has speed set */
   2682
   2683	if (ugeth->rx_extended_features)
   2684		remoder |= REMODER_RX_EXTENDED_FEATURES;
   2685	if (ug_info->rxExtendedFiltering)
   2686		remoder |= REMODER_RX_EXTENDED_FILTERING;
   2687	if (ug_info->dynamicMaxFrameLength)
   2688		remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
   2689	if (ug_info->dynamicMinFrameLength)
   2690		remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
   2691	remoder |=
   2692	    ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
   2693	remoder |=
   2694	    ug_info->
   2695	    vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
   2696	remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
   2697	remoder |= ((ucc_geth_rx_queues(ug_info) - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
   2698	if (ug_info->ipCheckSumCheck)
   2699		remoder |= REMODER_IP_CHECKSUM_CHECK;
   2700	if (ug_info->ipAddressAlignment)
   2701		remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
   2702	out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
   2703
   2704	/* Note that this function must be called */
   2705	/* ONLY AFTER p_tx_fw_statistics_pram */
   2706	/* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
   2707	init_firmware_statistics_gathering_mode((ug_info->
   2708		statisticsMode &
   2709		UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
   2710		(ug_info->statisticsMode &
   2711		UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
   2712		&ugeth->p_tx_glbl_pram->txrmonbaseptr,
   2713		ugeth->tx_fw_statistics_pram_offset,
   2714		&ugeth->p_rx_glbl_pram->rxrmonbaseptr,
   2715		ugeth->rx_fw_statistics_pram_offset,
   2716		&ugeth->p_tx_glbl_pram->temoder,
   2717		&ugeth->p_rx_glbl_pram->remoder);
   2718
   2719	/* function code register */
   2720	out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
   2721
   2722	/* initialize extended filtering */
   2723	if (ug_info->rxExtendedFiltering) {
   2724		if (!ug_info->extendedFilteringChainPointer) {
   2725			if (netif_msg_ifup(ugeth))
   2726				pr_err("Null Extended Filtering Chain Pointer\n");
   2727			return -EINVAL;
   2728		}
   2729
   2730		/* Allocate memory for extended filtering Mode Global
   2731		Parameters */
   2732		ugeth->exf_glbl_param_offset =
   2733		    qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
   2734		UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
   2735		if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
   2736			if (netif_msg_ifup(ugeth))
   2737				pr_err("Can not allocate DPRAM memory for p_exf_glbl_param\n");
   2738			return -ENOMEM;
   2739		}
   2740
   2741		ugeth->p_exf_glbl_param =
   2742		    (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
   2743				 exf_glbl_param_offset);
   2744		out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
   2745			 ugeth->exf_glbl_param_offset);
   2746		out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
   2747			 (u32) ug_info->extendedFilteringChainPointer);
   2748
   2749	} else {		/* initialize 82xx style address filtering */
   2750
   2751		/* Init individual address recognition registers to disabled */
   2752
   2753		for (j = 0; j < NUM_OF_PADDRS; j++)
   2754			ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
   2755
   2756		p_82xx_addr_filt =
   2757		    (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
   2758		    p_rx_glbl_pram->addressfiltering;
   2759
   2760		ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
   2761			ENET_ADDR_TYPE_GROUP);
   2762		ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
   2763			ENET_ADDR_TYPE_INDIVIDUAL);
   2764	}
   2765
   2766	/*
   2767	 * Initialize UCC at QE level
   2768	 */
   2769
   2770	command = QE_INIT_TX_RX;
   2771
   2772	/* Allocate shadow InitEnet command parameter structure.
   2773	 * This is needed because after the InitEnet command is executed,
   2774	 * the structure in DPRAM is released, because DPRAM is a premium
   2775	 * resource.
   2776	 * This shadow structure keeps a copy of what was done so that the
   2777	 * allocated resources can be released when the channel is freed.
   2778	 */
   2779	if (!(ugeth->p_init_enet_param_shadow =
   2780	      kzalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
   2781		if (netif_msg_ifup(ugeth))
   2782			pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n");
   2783		return -ENOMEM;
   2784	}
   2785
   2786	/* Fill shadow InitEnet command parameter structure */
   2787
   2788	ugeth->p_init_enet_param_shadow->resinit1 =
   2789	    ENET_INIT_PARAM_MAGIC_RES_INIT1;
   2790	ugeth->p_init_enet_param_shadow->resinit2 =
   2791	    ENET_INIT_PARAM_MAGIC_RES_INIT2;
   2792	ugeth->p_init_enet_param_shadow->resinit3 =
   2793	    ENET_INIT_PARAM_MAGIC_RES_INIT3;
   2794	ugeth->p_init_enet_param_shadow->resinit4 =
   2795	    ENET_INIT_PARAM_MAGIC_RES_INIT4;
   2796	ugeth->p_init_enet_param_shadow->resinit5 =
   2797	    ENET_INIT_PARAM_MAGIC_RES_INIT5;
   2798	ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
   2799	    ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
   2800	ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
   2801	    ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
   2802
   2803	ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
   2804	    rx_glbl_pram_offset | ug_info->riscRx;
   2805	if ((ug_info->largestexternallookupkeysize !=
   2806	     QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
   2807	    (ug_info->largestexternallookupkeysize !=
   2808	     QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) &&
   2809	    (ug_info->largestexternallookupkeysize !=
   2810	     QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
   2811		if (netif_msg_ifup(ugeth))
   2812			pr_err("Invalid largest External Lookup Key Size\n");
   2813		return -EINVAL;
   2814	}
   2815	ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
   2816	    ug_info->largestexternallookupkeysize;
   2817	size = sizeof(struct ucc_geth_thread_rx_pram);
   2818	if (ug_info->rxExtendedFiltering) {
   2819		size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
   2820		if (ug_info->largestexternallookupkeysize ==
   2821		    QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
   2822			size +=
   2823			    THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
   2824		if (ug_info->largestexternallookupkeysize ==
   2825		    QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
   2826			size +=
   2827			    THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
   2828	}
   2829
   2830	if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
   2831		p_init_enet_param_shadow->rxthread[0]),
   2832		(u8) (numThreadsRxNumerical + 1)
   2833		/* Rx needs one extra for terminator */
   2834		, size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
   2835		ug_info->riscRx, 1)) != 0) {
   2836		if (netif_msg_ifup(ugeth))
   2837			pr_err("Can not fill p_init_enet_param_shadow\n");
   2838		return ret_val;
   2839	}
   2840
   2841	ugeth->p_init_enet_param_shadow->txglobal =
   2842	    tx_glbl_pram_offset | ug_info->riscTx;
   2843	if ((ret_val =
   2844	     fill_init_enet_entries(ugeth,
   2845				    &(ugeth->p_init_enet_param_shadow->
   2846				      txthread[0]), numThreadsTxNumerical,
   2847				    sizeof(struct ucc_geth_thread_tx_pram),
   2848				    UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
   2849				    ug_info->riscTx, 0)) != 0) {
   2850		if (netif_msg_ifup(ugeth))
   2851			pr_err("Can not fill p_init_enet_param_shadow\n");
   2852		return ret_val;
   2853	}
   2854
   2855	/* Load Rx bds with buffers */
   2856	for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
   2857		if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
   2858			if (netif_msg_ifup(ugeth))
   2859				pr_err("Can not fill Rx bds with buffers\n");
   2860			return ret_val;
   2861		}
   2862	}
   2863
   2864	/* Allocate InitEnet command parameter structure */
   2865	init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
   2866	if (IS_ERR_VALUE(init_enet_pram_offset)) {
   2867		if (netif_msg_ifup(ugeth))
   2868			pr_err("Can not allocate DPRAM memory for p_init_enet_pram\n");
   2869		return -ENOMEM;
   2870	}
   2871	p_init_enet_pram =
   2872	    (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
   2873
   2874	/* Copy shadow InitEnet command parameter structure into PRAM */
   2875	out_8(&p_init_enet_pram->resinit1,
   2876			ugeth->p_init_enet_param_shadow->resinit1);
   2877	out_8(&p_init_enet_pram->resinit2,
   2878			ugeth->p_init_enet_param_shadow->resinit2);
   2879	out_8(&p_init_enet_pram->resinit3,
   2880			ugeth->p_init_enet_param_shadow->resinit3);
   2881	out_8(&p_init_enet_pram->resinit4,
   2882			ugeth->p_init_enet_param_shadow->resinit4);
   2883	out_be16(&p_init_enet_pram->resinit5,
   2884		 ugeth->p_init_enet_param_shadow->resinit5);
   2885	out_8(&p_init_enet_pram->largestexternallookupkeysize,
   2886	    ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
   2887	out_be32(&p_init_enet_pram->rgftgfrxglobal,
   2888		 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
   2889	for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
   2890		out_be32(&p_init_enet_pram->rxthread[i],
   2891			 ugeth->p_init_enet_param_shadow->rxthread[i]);
   2892	out_be32(&p_init_enet_pram->txglobal,
   2893		 ugeth->p_init_enet_param_shadow->txglobal);
   2894	for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
   2895		out_be32(&p_init_enet_pram->txthread[i],
   2896			 ugeth->p_init_enet_param_shadow->txthread[i]);
   2897
   2898	/* Issue QE command */
   2899	cecr_subblock =
   2900	    ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
   2901	qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
   2902		     init_enet_pram_offset);
   2903
   2904	/* Free InitEnet command parameter */
   2905	qe_muram_free(init_enet_pram_offset);
   2906
   2907	return 0;
   2908}
   2909
   2910/* This is called by the kernel when a frame is ready for transmission. */
   2911/* It is pointed to by the dev->hard_start_xmit function pointer */
   2912static netdev_tx_t
   2913ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
   2914{
   2915	struct ucc_geth_private *ugeth = netdev_priv(dev);
   2916#ifdef CONFIG_UGETH_TX_ON_DEMAND
   2917	struct ucc_fast_private *uccf;
   2918#endif
   2919	u8 __iomem *bd;			/* BD pointer */
   2920	u32 bd_status;
   2921	u8 txQ = 0;
   2922	unsigned long flags;
   2923
   2924	ugeth_vdbg("%s: IN", __func__);
   2925
   2926	netdev_sent_queue(dev, skb->len);
   2927	spin_lock_irqsave(&ugeth->lock, flags);
   2928
   2929	dev->stats.tx_bytes += skb->len;
   2930
   2931	/* Start from the next BD that should be filled */
   2932	bd = ugeth->txBd[txQ];
   2933	bd_status = in_be32((u32 __iomem *)bd);
   2934	/* Save the skb pointer so we can free it later */
   2935	ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
   2936
   2937	/* Update the current skb pointer (wrapping if this was the last) */
   2938	ugeth->skb_curtx[txQ] =
   2939	    (ugeth->skb_curtx[txQ] +
   2940	     1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
   2941
   2942	/* set up the buffer descriptor */
   2943	out_be32(&((struct qe_bd __iomem *)bd)->buf,
   2944		      dma_map_single(ugeth->dev, skb->data,
   2945			      skb->len, DMA_TO_DEVICE));
   2946
   2947	/* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
   2948
   2949	bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
   2950
   2951	/* set bd status and length */
   2952	out_be32((u32 __iomem *)bd, bd_status);
   2953
   2954	/* Move to next BD in the ring */
   2955	if (!(bd_status & T_W))
   2956		bd += sizeof(struct qe_bd);
   2957	else
   2958		bd = ugeth->p_tx_bd_ring[txQ];
   2959
   2960	/* If the next BD still needs to be cleaned up, then the bds
   2961	   are full.  We need to tell the kernel to stop sending us stuff. */
   2962	if (bd == ugeth->confBd[txQ]) {
   2963		if (!netif_queue_stopped(dev))
   2964			netif_stop_queue(dev);
   2965	}
   2966
   2967	ugeth->txBd[txQ] = bd;
   2968
   2969	skb_tx_timestamp(skb);
   2970
   2971	if (ugeth->p_scheduler) {
   2972		ugeth->cpucount[txQ]++;
   2973		/* Indicate to QE that there are more Tx bds ready for
   2974		transmission */
   2975		/* This is done by writing a running counter of the bd
   2976		count to the scheduler PRAM. */
   2977		out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
   2978	}
   2979
   2980#ifdef CONFIG_UGETH_TX_ON_DEMAND
   2981	uccf = ugeth->uccf;
   2982	out_be16(uccf->p_utodr, UCC_FAST_TOD);
   2983#endif
   2984	spin_unlock_irqrestore(&ugeth->lock, flags);
   2985
   2986	return NETDEV_TX_OK;
   2987}
   2988
   2989static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
   2990{
   2991	struct sk_buff *skb;
   2992	u8 __iomem *bd;
   2993	u16 length, howmany = 0;
   2994	u32 bd_status;
   2995	u8 *bdBuffer;
   2996	struct net_device *dev;
   2997
   2998	ugeth_vdbg("%s: IN", __func__);
   2999
   3000	dev = ugeth->ndev;
   3001
   3002	/* collect received buffers */
   3003	bd = ugeth->rxBd[rxQ];
   3004
   3005	bd_status = in_be32((u32 __iomem *)bd);
   3006
   3007	/* while there are received buffers and BD is full (~R_E) */
   3008	while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
   3009		bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
   3010		length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
   3011		skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
   3012
   3013		/* determine whether buffer is first, last, first and last
   3014		(single buffer frame) or middle (not first and not last) */
   3015		if (!skb ||
   3016		    (!(bd_status & (R_F | R_L))) ||
   3017		    (bd_status & R_ERRORS_FATAL)) {
   3018			if (netif_msg_rx_err(ugeth))
   3019				pr_err("%d: ERROR!!! skb - 0x%08x\n",
   3020				       __LINE__, (u32)skb);
   3021			dev_kfree_skb(skb);
   3022
   3023			ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
   3024			dev->stats.rx_dropped++;
   3025		} else {
   3026			dev->stats.rx_packets++;
   3027			howmany++;
   3028
   3029			/* Prep the skb for the packet */
   3030			skb_put(skb, length);
   3031
   3032			/* Tell the skb what kind of packet this is */
   3033			skb->protocol = eth_type_trans(skb, ugeth->ndev);
   3034
   3035			dev->stats.rx_bytes += length;
   3036			/* Send the packet up the stack */
   3037			netif_receive_skb(skb);
   3038		}
   3039
   3040		skb = get_new_skb(ugeth, bd);
   3041		if (!skb) {
   3042			if (netif_msg_rx_err(ugeth))
   3043				pr_warn("No Rx Data Buffer\n");
   3044			dev->stats.rx_dropped++;
   3045			break;
   3046		}
   3047
   3048		ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
   3049
   3050		/* update to point at the next skb */
   3051		ugeth->skb_currx[rxQ] =
   3052		    (ugeth->skb_currx[rxQ] +
   3053		     1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
   3054
   3055		if (bd_status & R_W)
   3056			bd = ugeth->p_rx_bd_ring[rxQ];
   3057		else
   3058			bd += sizeof(struct qe_bd);
   3059
   3060		bd_status = in_be32((u32 __iomem *)bd);
   3061	}
   3062
   3063	ugeth->rxBd[rxQ] = bd;
   3064	return howmany;
   3065}
   3066
   3067static int ucc_geth_tx(struct net_device *dev, u8 txQ)
   3068{
   3069	/* Start from the next BD that should be filled */
   3070	struct ucc_geth_private *ugeth = netdev_priv(dev);
   3071	unsigned int bytes_sent = 0;
   3072	int howmany = 0;
   3073	u8 __iomem *bd;		/* BD pointer */
   3074	u32 bd_status;
   3075
   3076	bd = ugeth->confBd[txQ];
   3077	bd_status = in_be32((u32 __iomem *)bd);
   3078
   3079	/* Normal processing. */
   3080	while ((bd_status & T_R) == 0) {
   3081		struct sk_buff *skb;
   3082
   3083		/* BD contains already transmitted buffer.   */
   3084		/* Handle the transmitted buffer and release */
   3085		/* the BD to be used with the current frame  */
   3086
   3087		skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
   3088		if (!skb)
   3089			break;
   3090		howmany++;
   3091		bytes_sent += skb->len;
   3092		dev->stats.tx_packets++;
   3093
   3094		dev_consume_skb_any(skb);
   3095
   3096		ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
   3097		ugeth->skb_dirtytx[txQ] =
   3098		    (ugeth->skb_dirtytx[txQ] +
   3099		     1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
   3100
   3101		/* We freed a buffer, so now we can restart transmission */
   3102		if (netif_queue_stopped(dev))
   3103			netif_wake_queue(dev);
   3104
   3105		/* Advance the confirmation BD pointer */
   3106		if (!(bd_status & T_W))
   3107			bd += sizeof(struct qe_bd);
   3108		else
   3109			bd = ugeth->p_tx_bd_ring[txQ];
   3110		bd_status = in_be32((u32 __iomem *)bd);
   3111	}
   3112	ugeth->confBd[txQ] = bd;
   3113	netdev_completed_queue(dev, howmany, bytes_sent);
   3114	return 0;
   3115}
   3116
   3117static int ucc_geth_poll(struct napi_struct *napi, int budget)
   3118{
   3119	struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
   3120	struct ucc_geth_info *ug_info;
   3121	int howmany, i;
   3122
   3123	ug_info = ugeth->ug_info;
   3124
   3125	/* Tx event processing */
   3126	spin_lock(&ugeth->lock);
   3127	for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
   3128		ucc_geth_tx(ugeth->ndev, i);
   3129	spin_unlock(&ugeth->lock);
   3130
   3131	howmany = 0;
   3132	for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
   3133		howmany += ucc_geth_rx(ugeth, i, budget - howmany);
   3134
   3135	if (howmany < budget) {
   3136		napi_complete_done(napi, howmany);
   3137		setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
   3138	}
   3139
   3140	return howmany;
   3141}
   3142
   3143static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
   3144{
   3145	struct net_device *dev = info;
   3146	struct ucc_geth_private *ugeth = netdev_priv(dev);
   3147	struct ucc_fast_private *uccf;
   3148	struct ucc_geth_info *ug_info;
   3149	register u32 ucce;
   3150	register u32 uccm;
   3151
   3152	ugeth_vdbg("%s: IN", __func__);
   3153
   3154	uccf = ugeth->uccf;
   3155	ug_info = ugeth->ug_info;
   3156
   3157	/* read and clear events */
   3158	ucce = (u32) in_be32(uccf->p_ucce);
   3159	uccm = (u32) in_be32(uccf->p_uccm);
   3160	ucce &= uccm;
   3161	out_be32(uccf->p_ucce, ucce);
   3162
   3163	/* check for receive events that require processing */
   3164	if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
   3165		if (napi_schedule_prep(&ugeth->napi)) {
   3166			uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
   3167			out_be32(uccf->p_uccm, uccm);
   3168			__napi_schedule(&ugeth->napi);
   3169		}
   3170	}
   3171
   3172	/* Errors and other events */
   3173	if (ucce & UCCE_OTHER) {
   3174		if (ucce & UCC_GETH_UCCE_BSY)
   3175			dev->stats.rx_errors++;
   3176		if (ucce & UCC_GETH_UCCE_TXE)
   3177			dev->stats.tx_errors++;
   3178	}
   3179
   3180	return IRQ_HANDLED;
   3181}
   3182
   3183#ifdef CONFIG_NET_POLL_CONTROLLER
   3184/*
   3185 * Polling 'interrupt' - used by things like netconsole to send skbs
   3186 * without having to re-enable interrupts. It's not called while
   3187 * the interrupt routine is executing.
   3188 */
   3189static void ucc_netpoll(struct net_device *dev)
   3190{
   3191	struct ucc_geth_private *ugeth = netdev_priv(dev);
   3192	int irq = ugeth->ug_info->uf_info.irq;
   3193
   3194	disable_irq(irq);
   3195	ucc_geth_irq_handler(irq, dev);
   3196	enable_irq(irq);
   3197}
   3198#endif /* CONFIG_NET_POLL_CONTROLLER */
   3199
   3200static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
   3201{
   3202	struct ucc_geth_private *ugeth = netdev_priv(dev);
   3203	struct sockaddr *addr = p;
   3204
   3205	if (!is_valid_ether_addr(addr->sa_data))
   3206		return -EADDRNOTAVAIL;
   3207
   3208	eth_hw_addr_set(dev, addr->sa_data);
   3209
   3210	/*
   3211	 * If device is not running, we will set mac addr register
   3212	 * when opening the device.
   3213	 */
   3214	if (!netif_running(dev))
   3215		return 0;
   3216
   3217	spin_lock_irq(&ugeth->lock);
   3218	init_mac_station_addr_regs(dev->dev_addr[0],
   3219				   dev->dev_addr[1],
   3220				   dev->dev_addr[2],
   3221				   dev->dev_addr[3],
   3222				   dev->dev_addr[4],
   3223				   dev->dev_addr[5],
   3224				   &ugeth->ug_regs->macstnaddr1,
   3225				   &ugeth->ug_regs->macstnaddr2);
   3226	spin_unlock_irq(&ugeth->lock);
   3227
   3228	return 0;
   3229}
   3230
   3231static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
   3232{
   3233	struct net_device *dev = ugeth->ndev;
   3234	int err;
   3235
   3236	err = ucc_struct_init(ugeth);
   3237	if (err) {
   3238		netif_err(ugeth, ifup, dev, "Cannot configure internal struct, aborting\n");
   3239		goto err;
   3240	}
   3241
   3242	err = ucc_geth_startup(ugeth);
   3243	if (err) {
   3244		netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
   3245		goto err;
   3246	}
   3247
   3248	err = adjust_enet_interface(ugeth);
   3249	if (err) {
   3250		netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
   3251		goto err;
   3252	}
   3253
   3254	/*       Set MACSTNADDR1, MACSTNADDR2                */
   3255	/* For more details see the hardware spec.           */
   3256	init_mac_station_addr_regs(dev->dev_addr[0],
   3257				   dev->dev_addr[1],
   3258				   dev->dev_addr[2],
   3259				   dev->dev_addr[3],
   3260				   dev->dev_addr[4],
   3261				   dev->dev_addr[5],
   3262				   &ugeth->ug_regs->macstnaddr1,
   3263				   &ugeth->ug_regs->macstnaddr2);
   3264
   3265	err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
   3266	if (err) {
   3267		netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
   3268		goto err;
   3269	}
   3270
   3271	return 0;
   3272err:
   3273	ucc_geth_stop(ugeth);
   3274	return err;
   3275}
   3276
   3277/* Called when something needs to use the ethernet device */
   3278/* Returns 0 for success. */
   3279static int ucc_geth_open(struct net_device *dev)
   3280{
   3281	struct ucc_geth_private *ugeth = netdev_priv(dev);
   3282	int err;
   3283
   3284	ugeth_vdbg("%s: IN", __func__);
   3285
   3286	/* Test station address */
   3287	if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
   3288		netif_err(ugeth, ifup, dev,
   3289			  "Multicast address used for station address - is this what you wanted?\n");
   3290		return -EINVAL;
   3291	}
   3292
   3293	err = init_phy(dev);
   3294	if (err) {
   3295		netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
   3296		return err;
   3297	}
   3298
   3299	err = ucc_geth_init_mac(ugeth);
   3300	if (err) {
   3301		netif_err(ugeth, ifup, dev, "Cannot initialize MAC, aborting\n");
   3302		goto err;
   3303	}
   3304
   3305	err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
   3306			  0, "UCC Geth", dev);
   3307	if (err) {
   3308		netif_err(ugeth, ifup, dev, "Cannot get IRQ for net device, aborting\n");
   3309		goto err;
   3310	}
   3311
   3312	phy_start(ugeth->phydev);
   3313	napi_enable(&ugeth->napi);
   3314	netdev_reset_queue(dev);
   3315	netif_start_queue(dev);
   3316
   3317	device_set_wakeup_capable(&dev->dev,
   3318			qe_alive_during_sleep() || ugeth->phydev->irq);
   3319	device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
   3320
   3321	return err;
   3322
   3323err:
   3324	ucc_geth_stop(ugeth);
   3325	return err;
   3326}
   3327
   3328/* Stops the kernel queue, and halts the controller */
   3329static int ucc_geth_close(struct net_device *dev)
   3330{
   3331	struct ucc_geth_private *ugeth = netdev_priv(dev);
   3332
   3333	ugeth_vdbg("%s: IN", __func__);
   3334
   3335	napi_disable(&ugeth->napi);
   3336
   3337	cancel_work_sync(&ugeth->timeout_work);
   3338	ucc_geth_stop(ugeth);
   3339	phy_disconnect(ugeth->phydev);
   3340	ugeth->phydev = NULL;
   3341
   3342	free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
   3343
   3344	netif_stop_queue(dev);
   3345	netdev_reset_queue(dev);
   3346
   3347	return 0;
   3348}
   3349
   3350/* Reopen device. This will reset the MAC and PHY. */
   3351static void ucc_geth_timeout_work(struct work_struct *work)
   3352{
   3353	struct ucc_geth_private *ugeth;
   3354	struct net_device *dev;
   3355
   3356	ugeth = container_of(work, struct ucc_geth_private, timeout_work);
   3357	dev = ugeth->ndev;
   3358
   3359	ugeth_vdbg("%s: IN", __func__);
   3360
   3361	dev->stats.tx_errors++;
   3362
   3363	ugeth_dump_regs(ugeth);
   3364
   3365	if (dev->flags & IFF_UP) {
   3366		/*
   3367		 * Must reset MAC *and* PHY. This is done by reopening
   3368		 * the device.
   3369		 */
   3370		netif_tx_stop_all_queues(dev);
   3371		ucc_geth_stop(ugeth);
   3372		ucc_geth_init_mac(ugeth);
   3373		/* Must start PHY here */
   3374		phy_start(ugeth->phydev);
   3375		netif_tx_start_all_queues(dev);
   3376	}
   3377
   3378	netif_tx_schedule_all(dev);
   3379}
   3380
   3381/*
   3382 * ucc_geth_timeout gets called when a packet has not been
   3383 * transmitted after a set amount of time.
   3384 */
   3385static void ucc_geth_timeout(struct net_device *dev, unsigned int txqueue)
   3386{
   3387	struct ucc_geth_private *ugeth = netdev_priv(dev);
   3388
   3389	schedule_work(&ugeth->timeout_work);
   3390}
   3391
   3392
   3393#ifdef CONFIG_PM
   3394
   3395static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
   3396{
   3397	struct net_device *ndev = platform_get_drvdata(ofdev);
   3398	struct ucc_geth_private *ugeth = netdev_priv(ndev);
   3399
   3400	if (!netif_running(ndev))
   3401		return 0;
   3402
   3403	netif_device_detach(ndev);
   3404	napi_disable(&ugeth->napi);
   3405
   3406	/*
   3407	 * Disable the controller, otherwise we'll wakeup on any network
   3408	 * activity.
   3409	 */
   3410	ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
   3411
   3412	if (ugeth->wol_en & WAKE_MAGIC) {
   3413		setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
   3414		setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
   3415		ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
   3416	} else if (!(ugeth->wol_en & WAKE_PHY)) {
   3417		phy_stop(ugeth->phydev);
   3418	}
   3419
   3420	return 0;
   3421}
   3422
   3423static int ucc_geth_resume(struct platform_device *ofdev)
   3424{
   3425	struct net_device *ndev = platform_get_drvdata(ofdev);
   3426	struct ucc_geth_private *ugeth = netdev_priv(ndev);
   3427	int err;
   3428
   3429	if (!netif_running(ndev))
   3430		return 0;
   3431
   3432	if (qe_alive_during_sleep()) {
   3433		if (ugeth->wol_en & WAKE_MAGIC) {
   3434			ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX);
   3435			clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
   3436			clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
   3437		}
   3438		ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
   3439	} else {
   3440		/*
   3441		 * Full reinitialization is required if QE shuts down
   3442		 * during sleep.
   3443		 */
   3444		ucc_geth_memclean(ugeth);
   3445
   3446		err = ucc_geth_init_mac(ugeth);
   3447		if (err) {
   3448			netdev_err(ndev, "Cannot initialize MAC, aborting\n");
   3449			return err;
   3450		}
   3451	}
   3452
   3453	ugeth->oldlink = 0;
   3454	ugeth->oldspeed = 0;
   3455	ugeth->oldduplex = -1;
   3456
   3457	phy_stop(ugeth->phydev);
   3458	phy_start(ugeth->phydev);
   3459
   3460	napi_enable(&ugeth->napi);
   3461	netif_device_attach(ndev);
   3462
   3463	return 0;
   3464}
   3465
   3466#else
   3467#define ucc_geth_suspend NULL
   3468#define ucc_geth_resume NULL
   3469#endif
   3470
   3471static phy_interface_t to_phy_interface(const char *phy_connection_type)
   3472{
   3473	if (strcasecmp(phy_connection_type, "mii") == 0)
   3474		return PHY_INTERFACE_MODE_MII;
   3475	if (strcasecmp(phy_connection_type, "gmii") == 0)
   3476		return PHY_INTERFACE_MODE_GMII;
   3477	if (strcasecmp(phy_connection_type, "tbi") == 0)
   3478		return PHY_INTERFACE_MODE_TBI;
   3479	if (strcasecmp(phy_connection_type, "rmii") == 0)
   3480		return PHY_INTERFACE_MODE_RMII;
   3481	if (strcasecmp(phy_connection_type, "rgmii") == 0)
   3482		return PHY_INTERFACE_MODE_RGMII;
   3483	if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
   3484		return PHY_INTERFACE_MODE_RGMII_ID;
   3485	if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
   3486		return PHY_INTERFACE_MODE_RGMII_TXID;
   3487	if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
   3488		return PHY_INTERFACE_MODE_RGMII_RXID;
   3489	if (strcasecmp(phy_connection_type, "rtbi") == 0)
   3490		return PHY_INTERFACE_MODE_RTBI;
   3491	if (strcasecmp(phy_connection_type, "sgmii") == 0)
   3492		return PHY_INTERFACE_MODE_SGMII;
   3493
   3494	return PHY_INTERFACE_MODE_MII;
   3495}
   3496
   3497static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
   3498{
   3499	struct ucc_geth_private *ugeth = netdev_priv(dev);
   3500
   3501	if (!netif_running(dev))
   3502		return -EINVAL;
   3503
   3504	if (!ugeth->phydev)
   3505		return -ENODEV;
   3506
   3507	return phy_mii_ioctl(ugeth->phydev, rq, cmd);
   3508}
   3509
   3510static const struct net_device_ops ucc_geth_netdev_ops = {
   3511	.ndo_open		= ucc_geth_open,
   3512	.ndo_stop		= ucc_geth_close,
   3513	.ndo_start_xmit		= ucc_geth_start_xmit,
   3514	.ndo_validate_addr	= eth_validate_addr,
   3515	.ndo_change_carrier     = fixed_phy_change_carrier,
   3516	.ndo_set_mac_address	= ucc_geth_set_mac_addr,
   3517	.ndo_set_rx_mode	= ucc_geth_set_multi,
   3518	.ndo_tx_timeout		= ucc_geth_timeout,
   3519	.ndo_eth_ioctl		= ucc_geth_ioctl,
   3520#ifdef CONFIG_NET_POLL_CONTROLLER
   3521	.ndo_poll_controller	= ucc_netpoll,
   3522#endif
   3523};
   3524
   3525static int ucc_geth_parse_clock(struct device_node *np, const char *which,
   3526				enum qe_clock *out)
   3527{
   3528	const char *sprop;
   3529	char buf[24];
   3530
   3531	snprintf(buf, sizeof(buf), "%s-clock-name", which);
   3532	sprop = of_get_property(np, buf, NULL);
   3533	if (sprop) {
   3534		*out = qe_clock_source(sprop);
   3535	} else {
   3536		u32 val;
   3537
   3538		snprintf(buf, sizeof(buf), "%s-clock", which);
   3539		if (of_property_read_u32(np, buf, &val)) {
   3540			/* If both *-clock-name and *-clock are missing,
   3541			 * we want to tell people to use *-clock-name.
   3542			 */
   3543			pr_err("missing %s-clock-name property\n", buf);
   3544			return -EINVAL;
   3545		}
   3546		*out = val;
   3547	}
   3548	if (*out < QE_CLK_NONE || *out > QE_CLK24) {
   3549		pr_err("invalid %s property\n", buf);
   3550		return -EINVAL;
   3551	}
   3552	return 0;
   3553}
   3554
   3555static int ucc_geth_probe(struct platform_device* ofdev)
   3556{
   3557	struct device *device = &ofdev->dev;
   3558	struct device_node *np = ofdev->dev.of_node;
   3559	struct net_device *dev = NULL;
   3560	struct ucc_geth_private *ugeth = NULL;
   3561	struct ucc_geth_info *ug_info;
   3562	struct resource res;
   3563	int err, ucc_num, max_speed = 0;
   3564	const unsigned int *prop;
   3565	phy_interface_t phy_interface;
   3566	static const int enet_to_speed[] = {
   3567		SPEED_10, SPEED_10, SPEED_10,
   3568		SPEED_100, SPEED_100, SPEED_100,
   3569		SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
   3570	};
   3571	static const phy_interface_t enet_to_phy_interface[] = {
   3572		PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
   3573		PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
   3574		PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
   3575		PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
   3576		PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
   3577		PHY_INTERFACE_MODE_SGMII,
   3578	};
   3579
   3580	ugeth_vdbg("%s: IN", __func__);
   3581
   3582	prop = of_get_property(np, "cell-index", NULL);
   3583	if (!prop) {
   3584		prop = of_get_property(np, "device-id", NULL);
   3585		if (!prop)
   3586			return -ENODEV;
   3587	}
   3588
   3589	ucc_num = *prop - 1;
   3590	if ((ucc_num < 0) || (ucc_num > 7))
   3591		return -ENODEV;
   3592
   3593	ug_info = kmemdup(&ugeth_primary_info, sizeof(*ug_info), GFP_KERNEL);
   3594	if (ug_info == NULL)
   3595		return -ENOMEM;
   3596
   3597	ug_info->uf_info.ucc_num = ucc_num;
   3598
   3599	err = ucc_geth_parse_clock(np, "rx", &ug_info->uf_info.rx_clock);
   3600	if (err)
   3601		goto err_free_info;
   3602	err = ucc_geth_parse_clock(np, "tx", &ug_info->uf_info.tx_clock);
   3603	if (err)
   3604		goto err_free_info;
   3605
   3606	err = of_address_to_resource(np, 0, &res);
   3607	if (err)
   3608		goto err_free_info;
   3609
   3610	ug_info->uf_info.regs = res.start;
   3611	ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
   3612
   3613	ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
   3614	if (!ug_info->phy_node && of_phy_is_fixed_link(np)) {
   3615		/*
   3616		 * In the case of a fixed PHY, the DT node associated
   3617		 * to the PHY is the Ethernet MAC DT node.
   3618		 */
   3619		err = of_phy_register_fixed_link(np);
   3620		if (err)
   3621			goto err_free_info;
   3622		ug_info->phy_node = of_node_get(np);
   3623	}
   3624
   3625	/* Find the TBI PHY node.  If it's not there, we don't support SGMII */
   3626	ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
   3627
   3628	/* get the phy interface type, or default to MII */
   3629	prop = of_get_property(np, "phy-connection-type", NULL);
   3630	if (!prop) {
   3631		/* handle interface property present in old trees */
   3632		prop = of_get_property(ug_info->phy_node, "interface", NULL);
   3633		if (prop != NULL) {
   3634			phy_interface = enet_to_phy_interface[*prop];
   3635			max_speed = enet_to_speed[*prop];
   3636		} else
   3637			phy_interface = PHY_INTERFACE_MODE_MII;
   3638	} else {
   3639		phy_interface = to_phy_interface((const char *)prop);
   3640	}
   3641
   3642	/* get speed, or derive from PHY interface */
   3643	if (max_speed == 0)
   3644		switch (phy_interface) {
   3645		case PHY_INTERFACE_MODE_GMII:
   3646		case PHY_INTERFACE_MODE_RGMII:
   3647		case PHY_INTERFACE_MODE_RGMII_ID:
   3648		case PHY_INTERFACE_MODE_RGMII_RXID:
   3649		case PHY_INTERFACE_MODE_RGMII_TXID:
   3650		case PHY_INTERFACE_MODE_TBI:
   3651		case PHY_INTERFACE_MODE_RTBI:
   3652		case PHY_INTERFACE_MODE_SGMII:
   3653			max_speed = SPEED_1000;
   3654			break;
   3655		default:
   3656			max_speed = SPEED_100;
   3657			break;
   3658		}
   3659
   3660	if (max_speed == SPEED_1000) {
   3661		unsigned int snums = qe_get_num_of_snums();
   3662
   3663		/* configure muram FIFOs for gigabit operation */
   3664		ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
   3665		ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
   3666		ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
   3667		ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
   3668		ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
   3669		ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
   3670		ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
   3671
   3672		/* If QE's snum number is 46/76 which means we need to support
   3673		 * 4 UECs at 1000Base-T simultaneously, we need to allocate
   3674		 * more Threads to Rx.
   3675		 */
   3676		if ((snums == 76) || (snums == 46))
   3677			ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
   3678		else
   3679			ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
   3680	}
   3681
   3682	if (netif_msg_probe(&debug))
   3683		pr_info("UCC%1d at 0x%8llx (irq = %d)\n",
   3684			ug_info->uf_info.ucc_num + 1,
   3685			(u64)ug_info->uf_info.regs,
   3686			ug_info->uf_info.irq);
   3687
   3688	/* Create an ethernet device instance */
   3689	dev = alloc_etherdev(sizeof(*ugeth));
   3690
   3691	if (dev == NULL) {
   3692		err = -ENOMEM;
   3693		goto err_deregister_fixed_link;
   3694	}
   3695
   3696	ugeth = netdev_priv(dev);
   3697	spin_lock_init(&ugeth->lock);
   3698
   3699	/* Create CQs for hash tables */
   3700	INIT_LIST_HEAD(&ugeth->group_hash_q);
   3701	INIT_LIST_HEAD(&ugeth->ind_hash_q);
   3702
   3703	dev_set_drvdata(device, dev);
   3704
   3705	/* Set the dev->base_addr to the gfar reg region */
   3706	dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
   3707
   3708	SET_NETDEV_DEV(dev, device);
   3709
   3710	/* Fill in the dev structure */
   3711	uec_set_ethtool_ops(dev);
   3712	dev->netdev_ops = &ucc_geth_netdev_ops;
   3713	dev->watchdog_timeo = TX_TIMEOUT;
   3714	INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
   3715	netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
   3716	dev->mtu = 1500;
   3717	dev->max_mtu = 1518;
   3718
   3719	ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
   3720	ugeth->phy_interface = phy_interface;
   3721	ugeth->max_speed = max_speed;
   3722
   3723	/* Carrier starts down, phylib will bring it up */
   3724	netif_carrier_off(dev);
   3725
   3726	err = register_netdev(dev);
   3727	if (err) {
   3728		if (netif_msg_probe(ugeth))
   3729			pr_err("%s: Cannot register net device, aborting\n",
   3730			       dev->name);
   3731		goto err_free_netdev;
   3732	}
   3733
   3734	of_get_ethdev_address(np, dev);
   3735
   3736	ugeth->ug_info = ug_info;
   3737	ugeth->dev = device;
   3738	ugeth->ndev = dev;
   3739	ugeth->node = np;
   3740
   3741	return 0;
   3742
   3743err_free_netdev:
   3744	free_netdev(dev);
   3745err_deregister_fixed_link:
   3746	if (of_phy_is_fixed_link(np))
   3747		of_phy_deregister_fixed_link(np);
   3748	of_node_put(ug_info->tbi_node);
   3749	of_node_put(ug_info->phy_node);
   3750err_free_info:
   3751	kfree(ug_info);
   3752
   3753	return err;
   3754}
   3755
   3756static int ucc_geth_remove(struct platform_device* ofdev)
   3757{
   3758	struct net_device *dev = platform_get_drvdata(ofdev);
   3759	struct ucc_geth_private *ugeth = netdev_priv(dev);
   3760	struct device_node *np = ofdev->dev.of_node;
   3761
   3762	unregister_netdev(dev);
   3763	ucc_geth_memclean(ugeth);
   3764	if (of_phy_is_fixed_link(np))
   3765		of_phy_deregister_fixed_link(np);
   3766	of_node_put(ugeth->ug_info->tbi_node);
   3767	of_node_put(ugeth->ug_info->phy_node);
   3768	kfree(ugeth->ug_info);
   3769	free_netdev(dev);
   3770
   3771	return 0;
   3772}
   3773
   3774static const struct of_device_id ucc_geth_match[] = {
   3775	{
   3776		.type = "network",
   3777		.compatible = "ucc_geth",
   3778	},
   3779	{},
   3780};
   3781
   3782MODULE_DEVICE_TABLE(of, ucc_geth_match);
   3783
   3784static struct platform_driver ucc_geth_driver = {
   3785	.driver = {
   3786		.name = DRV_NAME,
   3787		.of_match_table = ucc_geth_match,
   3788	},
   3789	.probe		= ucc_geth_probe,
   3790	.remove		= ucc_geth_remove,
   3791	.suspend	= ucc_geth_suspend,
   3792	.resume		= ucc_geth_resume,
   3793};
   3794
   3795static int __init ucc_geth_init(void)
   3796{
   3797	if (netif_msg_drv(&debug))
   3798		pr_info(DRV_DESC "\n");
   3799
   3800	return platform_driver_register(&ucc_geth_driver);
   3801}
   3802
   3803static void __exit ucc_geth_exit(void)
   3804{
   3805	platform_driver_unregister(&ucc_geth_driver);
   3806}
   3807
   3808module_init(ucc_geth_init);
   3809module_exit(ucc_geth_exit);
   3810
   3811MODULE_AUTHOR("Freescale Semiconductor, Inc");
   3812MODULE_DESCRIPTION(DRV_DESC);
   3813MODULE_LICENSE("GPL");