cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xgmac.c (57000B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright 2010-2011 Calxeda, Inc.
      4 */
      5#include <linux/module.h>
      6#include <linux/mod_devicetable.h>
      7#include <linux/kernel.h>
      8#include <linux/circ_buf.h>
      9#include <linux/interrupt.h>
     10#include <linux/etherdevice.h>
     11#include <linux/platform_device.h>
     12#include <linux/skbuff.h>
     13#include <linux/ethtool.h>
     14#include <linux/if.h>
     15#include <linux/crc32.h>
     16#include <linux/dma-mapping.h>
     17#include <linux/slab.h>
     18
     19/* XGMAC Register definitions */
     20#define XGMAC_CONTROL		0x00000000	/* MAC Configuration */
     21#define XGMAC_FRAME_FILTER	0x00000004	/* MAC Frame Filter */
     22#define XGMAC_FLOW_CTRL		0x00000018	/* MAC Flow Control */
     23#define XGMAC_VLAN_TAG		0x0000001C	/* VLAN Tags */
     24#define XGMAC_VERSION		0x00000020	/* Version */
     25#define XGMAC_VLAN_INCL		0x00000024	/* VLAN tag for tx frames */
     26#define XGMAC_LPI_CTRL		0x00000028	/* LPI Control and Status */
     27#define XGMAC_LPI_TIMER		0x0000002C	/* LPI Timers Control */
     28#define XGMAC_TX_PACE		0x00000030	/* Transmit Pace and Stretch */
     29#define XGMAC_VLAN_HASH		0x00000034	/* VLAN Hash Table */
     30#define XGMAC_DEBUG		0x00000038	/* Debug */
     31#define XGMAC_INT_STAT		0x0000003C	/* Interrupt and Control */
     32#define XGMAC_ADDR_HIGH(reg)	(0x00000040 + ((reg) * 8))
     33#define XGMAC_ADDR_LOW(reg)	(0x00000044 + ((reg) * 8))
     34#define XGMAC_HASH(n)		(0x00000300 + (n) * 4) /* HASH table regs */
     35#define XGMAC_NUM_HASH		16
     36#define XGMAC_OMR		0x00000400
     37#define XGMAC_REMOTE_WAKE	0x00000700	/* Remote Wake-Up Frm Filter */
     38#define XGMAC_PMT		0x00000704	/* PMT Control and Status */
     39#define XGMAC_MMC_CTRL		0x00000800	/* XGMAC MMC Control */
     40#define XGMAC_MMC_INTR_RX	0x00000804	/* Receive Interrupt */
     41#define XGMAC_MMC_INTR_TX	0x00000808	/* Transmit Interrupt */
     42#define XGMAC_MMC_INTR_MASK_RX	0x0000080c	/* Receive Interrupt Mask */
     43#define XGMAC_MMC_INTR_MASK_TX	0x00000810	/* Transmit Interrupt Mask */
     44
     45/* Hardware TX Statistics Counters */
     46#define XGMAC_MMC_TXOCTET_GB_LO	0x00000814
     47#define XGMAC_MMC_TXOCTET_GB_HI	0x00000818
     48#define XGMAC_MMC_TXFRAME_GB_LO	0x0000081C
     49#define XGMAC_MMC_TXFRAME_GB_HI	0x00000820
     50#define XGMAC_MMC_TXBCFRAME_G	0x00000824
     51#define XGMAC_MMC_TXMCFRAME_G	0x0000082C
     52#define XGMAC_MMC_TXUCFRAME_GB	0x00000864
     53#define XGMAC_MMC_TXMCFRAME_GB	0x0000086C
     54#define XGMAC_MMC_TXBCFRAME_GB	0x00000874
     55#define XGMAC_MMC_TXUNDERFLOW	0x0000087C
     56#define XGMAC_MMC_TXOCTET_G_LO	0x00000884
     57#define XGMAC_MMC_TXOCTET_G_HI	0x00000888
     58#define XGMAC_MMC_TXFRAME_G_LO	0x0000088C
     59#define XGMAC_MMC_TXFRAME_G_HI	0x00000890
     60#define XGMAC_MMC_TXPAUSEFRAME	0x00000894
     61#define XGMAC_MMC_TXVLANFRAME	0x0000089C
     62
     63/* Hardware RX Statistics Counters */
     64#define XGMAC_MMC_RXFRAME_GB_LO	0x00000900
     65#define XGMAC_MMC_RXFRAME_GB_HI	0x00000904
     66#define XGMAC_MMC_RXOCTET_GB_LO	0x00000908
     67#define XGMAC_MMC_RXOCTET_GB_HI	0x0000090C
     68#define XGMAC_MMC_RXOCTET_G_LO	0x00000910
     69#define XGMAC_MMC_RXOCTET_G_HI	0x00000914
     70#define XGMAC_MMC_RXBCFRAME_G	0x00000918
     71#define XGMAC_MMC_RXMCFRAME_G	0x00000920
     72#define XGMAC_MMC_RXCRCERR	0x00000928
     73#define XGMAC_MMC_RXRUNT	0x00000930
     74#define XGMAC_MMC_RXJABBER	0x00000934
     75#define XGMAC_MMC_RXUCFRAME_G	0x00000970
     76#define XGMAC_MMC_RXLENGTHERR	0x00000978
     77#define XGMAC_MMC_RXPAUSEFRAME	0x00000988
     78#define XGMAC_MMC_RXOVERFLOW	0x00000990
     79#define XGMAC_MMC_RXVLANFRAME	0x00000998
     80#define XGMAC_MMC_RXWATCHDOG	0x000009a0
     81
     82/* DMA Control and Status Registers */
     83#define XGMAC_DMA_BUS_MODE	0x00000f00	/* Bus Mode */
     84#define XGMAC_DMA_TX_POLL	0x00000f04	/* Transmit Poll Demand */
     85#define XGMAC_DMA_RX_POLL	0x00000f08	/* Received Poll Demand */
     86#define XGMAC_DMA_RX_BASE_ADDR	0x00000f0c	/* Receive List Base */
     87#define XGMAC_DMA_TX_BASE_ADDR	0x00000f10	/* Transmit List Base */
     88#define XGMAC_DMA_STATUS	0x00000f14	/* Status Register */
     89#define XGMAC_DMA_CONTROL	0x00000f18	/* Ctrl (Operational Mode) */
     90#define XGMAC_DMA_INTR_ENA	0x00000f1c	/* Interrupt Enable */
     91#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20	/* Missed Frame Counter */
     92#define XGMAC_DMA_RI_WDOG_TIMER	0x00000f24	/* RX Intr Watchdog Timer */
     93#define XGMAC_DMA_AXI_BUS	0x00000f28	/* AXI Bus Mode */
     94#define XGMAC_DMA_AXI_STATUS	0x00000f2C	/* AXI Status */
     95#define XGMAC_DMA_HW_FEATURE	0x00000f58	/* Enabled Hardware Features */
     96
     97#define XGMAC_ADDR_AE		0x80000000
     98
     99/* PMT Control and Status */
    100#define XGMAC_PMT_POINTER_RESET	0x80000000
    101#define XGMAC_PMT_GLBL_UNICAST	0x00000200
    102#define XGMAC_PMT_WAKEUP_RX_FRM	0x00000040
    103#define XGMAC_PMT_MAGIC_PKT	0x00000020
    104#define XGMAC_PMT_WAKEUP_FRM_EN	0x00000004
    105#define XGMAC_PMT_MAGIC_PKT_EN	0x00000002
    106#define XGMAC_PMT_POWERDOWN	0x00000001
    107
    108#define XGMAC_CONTROL_SPD	0x40000000	/* Speed control */
    109#define XGMAC_CONTROL_SPD_MASK	0x60000000
    110#define XGMAC_CONTROL_SPD_1G	0x60000000
    111#define XGMAC_CONTROL_SPD_2_5G	0x40000000
    112#define XGMAC_CONTROL_SPD_10G	0x00000000
    113#define XGMAC_CONTROL_SARC	0x10000000	/* Source Addr Insert/Replace */
    114#define XGMAC_CONTROL_SARK_MASK	0x18000000
    115#define XGMAC_CONTROL_CAR	0x04000000	/* CRC Addition/Replacement */
    116#define XGMAC_CONTROL_CAR_MASK	0x06000000
    117#define XGMAC_CONTROL_DP	0x01000000	/* Disable Padding */
    118#define XGMAC_CONTROL_WD	0x00800000	/* Disable Watchdog on rx */
    119#define XGMAC_CONTROL_JD	0x00400000	/* Jabber disable */
    120#define XGMAC_CONTROL_JE	0x00100000	/* Jumbo frame */
    121#define XGMAC_CONTROL_LM	0x00001000	/* Loop-back mode */
    122#define XGMAC_CONTROL_IPC	0x00000400	/* Checksum Offload */
    123#define XGMAC_CONTROL_ACS	0x00000080	/* Automatic Pad/FCS Strip */
    124#define XGMAC_CONTROL_DDIC	0x00000010	/* Disable Deficit Idle Count */
    125#define XGMAC_CONTROL_TE	0x00000008	/* Transmitter Enable */
    126#define XGMAC_CONTROL_RE	0x00000004	/* Receiver Enable */
    127
    128/* XGMAC Frame Filter defines */
    129#define XGMAC_FRAME_FILTER_PR	0x00000001	/* Promiscuous Mode */
    130#define XGMAC_FRAME_FILTER_HUC	0x00000002	/* Hash Unicast */
    131#define XGMAC_FRAME_FILTER_HMC	0x00000004	/* Hash Multicast */
    132#define XGMAC_FRAME_FILTER_DAIF	0x00000008	/* DA Inverse Filtering */
    133#define XGMAC_FRAME_FILTER_PM	0x00000010	/* Pass all multicast */
    134#define XGMAC_FRAME_FILTER_DBF	0x00000020	/* Disable Broadcast frames */
    135#define XGMAC_FRAME_FILTER_SAIF	0x00000100	/* Inverse Filtering */
    136#define XGMAC_FRAME_FILTER_SAF	0x00000200	/* Source Address Filter */
    137#define XGMAC_FRAME_FILTER_HPF	0x00000400	/* Hash or perfect Filter */
    138#define XGMAC_FRAME_FILTER_VHF	0x00000800	/* VLAN Hash Filter */
    139#define XGMAC_FRAME_FILTER_VPF	0x00001000	/* VLAN Perfect Filter */
    140#define XGMAC_FRAME_FILTER_RA	0x80000000	/* Receive all mode */
    141
    142/* XGMAC FLOW CTRL defines */
    143#define XGMAC_FLOW_CTRL_PT_MASK	0xffff0000	/* Pause Time Mask */
    144#define XGMAC_FLOW_CTRL_PT_SHIFT	16
    145#define XGMAC_FLOW_CTRL_DZQP	0x00000080	/* Disable Zero-Quanta Phase */
    146#define XGMAC_FLOW_CTRL_PLT	0x00000020	/* Pause Low Threshold */
    147#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030	/* PLT MASK */
    148#define XGMAC_FLOW_CTRL_UP	0x00000008	/* Unicast Pause Frame Detect */
    149#define XGMAC_FLOW_CTRL_RFE	0x00000004	/* Rx Flow Control Enable */
    150#define XGMAC_FLOW_CTRL_TFE	0x00000002	/* Tx Flow Control Enable */
    151#define XGMAC_FLOW_CTRL_FCB_BPA	0x00000001	/* Flow Control Busy ... */
    152
    153/* XGMAC_INT_STAT reg */
    154#define XGMAC_INT_STAT_PMTIM	0x00800000	/* PMT Interrupt Mask */
    155#define XGMAC_INT_STAT_PMT	0x0080		/* PMT Interrupt Status */
    156#define XGMAC_INT_STAT_LPI	0x0040		/* LPI Interrupt Status */
    157
    158/* DMA Bus Mode register defines */
    159#define DMA_BUS_MODE_SFT_RESET	0x00000001	/* Software Reset */
    160#define DMA_BUS_MODE_DSL_MASK	0x0000007c	/* Descriptor Skip Length */
    161#define DMA_BUS_MODE_DSL_SHIFT	2		/* (in DWORDS) */
    162#define DMA_BUS_MODE_ATDS	0x00000080	/* Alternate Descriptor Size */
    163
    164/* Programmable burst length */
    165#define DMA_BUS_MODE_PBL_MASK	0x00003f00	/* Programmable Burst Len */
    166#define DMA_BUS_MODE_PBL_SHIFT	8
    167#define DMA_BUS_MODE_FB		0x00010000	/* Fixed burst */
    168#define DMA_BUS_MODE_RPBL_MASK	0x003e0000	/* Rx-Programmable Burst Len */
    169#define DMA_BUS_MODE_RPBL_SHIFT	17
    170#define DMA_BUS_MODE_USP	0x00800000
    171#define DMA_BUS_MODE_8PBL	0x01000000
    172#define DMA_BUS_MODE_AAL	0x02000000
    173
    174/* DMA Bus Mode register defines */
    175#define DMA_BUS_PR_RATIO_MASK	0x0000c000	/* Rx/Tx priority ratio */
    176#define DMA_BUS_PR_RATIO_SHIFT	14
    177#define DMA_BUS_FB		0x00010000	/* Fixed Burst */
    178
    179/* DMA Control register defines */
    180#define DMA_CONTROL_ST		0x00002000	/* Start/Stop Transmission */
    181#define DMA_CONTROL_SR		0x00000002	/* Start/Stop Receive */
    182#define DMA_CONTROL_DFF		0x01000000	/* Disable flush of rx frames */
    183#define DMA_CONTROL_OSF		0x00000004	/* Operate on 2nd tx frame */
    184
    185/* DMA Normal interrupt */
    186#define DMA_INTR_ENA_NIE	0x00010000	/* Normal Summary */
    187#define DMA_INTR_ENA_AIE	0x00008000	/* Abnormal Summary */
    188#define DMA_INTR_ENA_ERE	0x00004000	/* Early Receive */
    189#define DMA_INTR_ENA_FBE	0x00002000	/* Fatal Bus Error */
    190#define DMA_INTR_ENA_ETE	0x00000400	/* Early Transmit */
    191#define DMA_INTR_ENA_RWE	0x00000200	/* Receive Watchdog */
    192#define DMA_INTR_ENA_RSE	0x00000100	/* Receive Stopped */
    193#define DMA_INTR_ENA_RUE	0x00000080	/* Receive Buffer Unavailable */
    194#define DMA_INTR_ENA_RIE	0x00000040	/* Receive Interrupt */
    195#define DMA_INTR_ENA_UNE	0x00000020	/* Tx Underflow */
    196#define DMA_INTR_ENA_OVE	0x00000010	/* Receive Overflow */
    197#define DMA_INTR_ENA_TJE	0x00000008	/* Transmit Jabber */
    198#define DMA_INTR_ENA_TUE	0x00000004	/* Transmit Buffer Unavail */
    199#define DMA_INTR_ENA_TSE	0x00000002	/* Transmit Stopped */
    200#define DMA_INTR_ENA_TIE	0x00000001	/* Transmit Interrupt */
    201
    202#define DMA_INTR_NORMAL		(DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
    203				 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
    204
    205#define DMA_INTR_ABNORMAL	(DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
    206				 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
    207				 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
    208				 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
    209				 DMA_INTR_ENA_TSE)
    210
    211/* DMA default interrupt mask */
    212#define DMA_INTR_DEFAULT_MASK	(DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
    213
    214/* DMA Status register defines */
    215#define DMA_STATUS_GMI		0x08000000	/* MMC interrupt */
    216#define DMA_STATUS_GLI		0x04000000	/* GMAC Line interface int */
    217#define DMA_STATUS_EB_MASK	0x00380000	/* Error Bits Mask */
    218#define DMA_STATUS_EB_TX_ABORT	0x00080000	/* Error Bits - TX Abort */
    219#define DMA_STATUS_EB_RX_ABORT	0x00100000	/* Error Bits - RX Abort */
    220#define DMA_STATUS_TS_MASK	0x00700000	/* Transmit Process State */
    221#define DMA_STATUS_TS_SHIFT	20
    222#define DMA_STATUS_RS_MASK	0x000e0000	/* Receive Process State */
    223#define DMA_STATUS_RS_SHIFT	17
    224#define DMA_STATUS_NIS		0x00010000	/* Normal Interrupt Summary */
    225#define DMA_STATUS_AIS		0x00008000	/* Abnormal Interrupt Summary */
    226#define DMA_STATUS_ERI		0x00004000	/* Early Receive Interrupt */
    227#define DMA_STATUS_FBI		0x00002000	/* Fatal Bus Error Interrupt */
    228#define DMA_STATUS_ETI		0x00000400	/* Early Transmit Interrupt */
    229#define DMA_STATUS_RWT		0x00000200	/* Receive Watchdog Timeout */
    230#define DMA_STATUS_RPS		0x00000100	/* Receive Process Stopped */
    231#define DMA_STATUS_RU		0x00000080	/* Receive Buffer Unavailable */
    232#define DMA_STATUS_RI		0x00000040	/* Receive Interrupt */
    233#define DMA_STATUS_UNF		0x00000020	/* Transmit Underflow */
    234#define DMA_STATUS_OVF		0x00000010	/* Receive Overflow */
    235#define DMA_STATUS_TJT		0x00000008	/* Transmit Jabber Timeout */
    236#define DMA_STATUS_TU		0x00000004	/* Transmit Buffer Unavail */
    237#define DMA_STATUS_TPS		0x00000002	/* Transmit Process Stopped */
    238#define DMA_STATUS_TI		0x00000001	/* Transmit Interrupt */
    239
    240/* Common MAC defines */
    241#define MAC_ENABLE_TX		0x00000008	/* Transmitter Enable */
    242#define MAC_ENABLE_RX		0x00000004	/* Receiver Enable */
    243
    244/* XGMAC Operation Mode Register */
    245#define XGMAC_OMR_TSF		0x00200000	/* TX FIFO Store and Forward */
    246#define XGMAC_OMR_FTF		0x00100000	/* Flush Transmit FIFO */
    247#define XGMAC_OMR_TTC		0x00020000	/* Transmit Threshold Ctrl */
    248#define XGMAC_OMR_TTC_MASK	0x00030000
    249#define XGMAC_OMR_RFD		0x00006000	/* FC Deactivation Threshold */
    250#define XGMAC_OMR_RFD_MASK	0x00007000	/* FC Deact Threshold MASK */
    251#define XGMAC_OMR_RFA		0x00000600	/* FC Activation Threshold */
    252#define XGMAC_OMR_RFA_MASK	0x00000E00	/* FC Act Threshold MASK */
    253#define XGMAC_OMR_EFC		0x00000100	/* Enable Hardware FC */
    254#define XGMAC_OMR_FEF		0x00000080	/* Forward Error Frames */
    255#define XGMAC_OMR_DT		0x00000040	/* Drop TCP/IP csum Errors */
    256#define XGMAC_OMR_RSF		0x00000020	/* RX FIFO Store and Forward */
    257#define XGMAC_OMR_RTC_256	0x00000018	/* RX Threshold Ctrl */
    258#define XGMAC_OMR_RTC_MASK	0x00000018	/* RX Threshold Ctrl MASK */
    259
    260/* XGMAC HW Features Register */
    261#define DMA_HW_FEAT_TXCOESEL	0x00010000	/* TX Checksum offload */
    262
    263#define XGMAC_MMC_CTRL_CNT_FRZ	0x00000008
    264
    265/* XGMAC Descriptor Defines */
    266#define MAX_DESC_BUF_SZ		(0x2000 - 8)
    267
    268#define RXDESC_EXT_STATUS	0x00000001
    269#define RXDESC_CRC_ERR		0x00000002
    270#define RXDESC_RX_ERR		0x00000008
    271#define RXDESC_RX_WDOG		0x00000010
    272#define RXDESC_FRAME_TYPE	0x00000020
    273#define RXDESC_GIANT_FRAME	0x00000080
    274#define RXDESC_LAST_SEG		0x00000100
    275#define RXDESC_FIRST_SEG	0x00000200
    276#define RXDESC_VLAN_FRAME	0x00000400
    277#define RXDESC_OVERFLOW_ERR	0x00000800
    278#define RXDESC_LENGTH_ERR	0x00001000
    279#define RXDESC_SA_FILTER_FAIL	0x00002000
    280#define RXDESC_DESCRIPTOR_ERR	0x00004000
    281#define RXDESC_ERROR_SUMMARY	0x00008000
    282#define RXDESC_FRAME_LEN_OFFSET	16
    283#define RXDESC_FRAME_LEN_MASK	0x3fff0000
    284#define RXDESC_DA_FILTER_FAIL	0x40000000
    285
    286#define RXDESC1_END_RING	0x00008000
    287
    288#define RXDESC_IP_PAYLOAD_MASK	0x00000003
    289#define RXDESC_IP_PAYLOAD_UDP	0x00000001
    290#define RXDESC_IP_PAYLOAD_TCP	0x00000002
    291#define RXDESC_IP_PAYLOAD_ICMP	0x00000003
    292#define RXDESC_IP_HEADER_ERR	0x00000008
    293#define RXDESC_IP_PAYLOAD_ERR	0x00000010
    294#define RXDESC_IPV4_PACKET	0x00000040
    295#define RXDESC_IPV6_PACKET	0x00000080
    296#define TXDESC_UNDERFLOW_ERR	0x00000001
    297#define TXDESC_JABBER_TIMEOUT	0x00000002
    298#define TXDESC_LOCAL_FAULT	0x00000004
    299#define TXDESC_REMOTE_FAULT	0x00000008
    300#define TXDESC_VLAN_FRAME	0x00000010
    301#define TXDESC_FRAME_FLUSHED	0x00000020
    302#define TXDESC_IP_HEADER_ERR	0x00000040
    303#define TXDESC_PAYLOAD_CSUM_ERR	0x00000080
    304#define TXDESC_ERROR_SUMMARY	0x00008000
    305#define TXDESC_SA_CTRL_INSERT	0x00040000
    306#define TXDESC_SA_CTRL_REPLACE	0x00080000
    307#define TXDESC_2ND_ADDR_CHAINED	0x00100000
    308#define TXDESC_END_RING		0x00200000
    309#define TXDESC_CSUM_IP		0x00400000
    310#define TXDESC_CSUM_IP_PAYLD	0x00800000
    311#define TXDESC_CSUM_ALL		0x00C00000
    312#define TXDESC_CRC_EN_REPLACE	0x01000000
    313#define TXDESC_CRC_EN_APPEND	0x02000000
    314#define TXDESC_DISABLE_PAD	0x04000000
    315#define TXDESC_FIRST_SEG	0x10000000
    316#define TXDESC_LAST_SEG		0x20000000
    317#define TXDESC_INTERRUPT	0x40000000
    318
    319#define DESC_OWN		0x80000000
    320#define DESC_BUFFER1_SZ_MASK	0x00001fff
    321#define DESC_BUFFER2_SZ_MASK	0x1fff0000
    322#define DESC_BUFFER2_SZ_OFFSET	16
    323
    324struct xgmac_dma_desc {
    325	__le32 flags;
    326	__le32 buf_size;
    327	__le32 buf1_addr;		/* Buffer 1 Address Pointer */
    328	__le32 buf2_addr;		/* Buffer 2 Address Pointer */
    329	__le32 ext_status;
    330	__le32 res[3];
    331};
    332
    333struct xgmac_extra_stats {
    334	/* Transmit errors */
    335	unsigned long tx_jabber;
    336	unsigned long tx_frame_flushed;
    337	unsigned long tx_payload_error;
    338	unsigned long tx_ip_header_error;
    339	unsigned long tx_local_fault;
    340	unsigned long tx_remote_fault;
    341	/* Receive errors */
    342	unsigned long rx_watchdog;
    343	unsigned long rx_da_filter_fail;
    344	unsigned long rx_payload_error;
    345	unsigned long rx_ip_header_error;
    346	/* Tx/Rx IRQ errors */
    347	unsigned long tx_process_stopped;
    348	unsigned long rx_buf_unav;
    349	unsigned long rx_process_stopped;
    350	unsigned long tx_early;
    351	unsigned long fatal_bus_error;
    352};
    353
    354struct xgmac_priv {
    355	struct xgmac_dma_desc *dma_rx;
    356	struct sk_buff **rx_skbuff;
    357	unsigned int rx_tail;
    358	unsigned int rx_head;
    359
    360	struct xgmac_dma_desc *dma_tx;
    361	struct sk_buff **tx_skbuff;
    362	unsigned int tx_head;
    363	unsigned int tx_tail;
    364	int tx_irq_cnt;
    365
    366	void __iomem *base;
    367	unsigned int dma_buf_sz;
    368	dma_addr_t dma_rx_phy;
    369	dma_addr_t dma_tx_phy;
    370
    371	struct net_device *dev;
    372	struct device *device;
    373	struct napi_struct napi;
    374
    375	int max_macs;
    376	struct xgmac_extra_stats xstats;
    377
    378	spinlock_t stats_lock;
    379	int pmt_irq;
    380	char rx_pause;
    381	char tx_pause;
    382	int wolopts;
    383	struct work_struct tx_timeout_work;
    384};
    385
    386/* XGMAC Configuration Settings */
    387#define XGMAC_MAX_MTU		9000
    388#define PAUSE_TIME		0x400
    389
    390#define DMA_RX_RING_SZ		256
    391#define DMA_TX_RING_SZ		128
    392/* minimum number of free TX descriptors required to wake up TX process */
    393#define TX_THRESH		(DMA_TX_RING_SZ/4)
    394
    395/* DMA descriptor ring helpers */
    396#define dma_ring_incr(n, s)	(((n) + 1) & ((s) - 1))
    397#define dma_ring_space(h, t, s)	CIRC_SPACE(h, t, s)
    398#define dma_ring_cnt(h, t, s)	CIRC_CNT(h, t, s)
    399
    400#define tx_dma_ring_space(p) \
    401	dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
    402
    403/* XGMAC Descriptor Access Helpers */
    404static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
    405{
    406	if (buf_sz > MAX_DESC_BUF_SZ)
    407		p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
    408			(buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
    409	else
    410		p->buf_size = cpu_to_le32(buf_sz);
    411}
    412
    413static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
    414{
    415	u32 len = le32_to_cpu(p->buf_size);
    416	return (len & DESC_BUFFER1_SZ_MASK) +
    417		((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
    418}
    419
    420static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
    421				     int buf_sz)
    422{
    423	struct xgmac_dma_desc *end = p + ring_size - 1;
    424
    425	memset(p, 0, sizeof(*p) * ring_size);
    426
    427	for (; p <= end; p++)
    428		desc_set_buf_len(p, buf_sz);
    429
    430	end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
    431}
    432
    433static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
    434{
    435	memset(p, 0, sizeof(*p) * ring_size);
    436	p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
    437}
    438
    439static inline int desc_get_owner(struct xgmac_dma_desc *p)
    440{
    441	return le32_to_cpu(p->flags) & DESC_OWN;
    442}
    443
    444static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
    445{
    446	/* Clear all fields and set the owner */
    447	p->flags = cpu_to_le32(DESC_OWN);
    448}
    449
    450static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
    451{
    452	u32 tmpflags = le32_to_cpu(p->flags);
    453	tmpflags &= TXDESC_END_RING;
    454	tmpflags |= flags | DESC_OWN;
    455	p->flags = cpu_to_le32(tmpflags);
    456}
    457
    458static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
    459{
    460	u32 tmpflags = le32_to_cpu(p->flags);
    461	tmpflags &= TXDESC_END_RING;
    462	p->flags = cpu_to_le32(tmpflags);
    463}
    464
    465static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
    466{
    467	return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
    468}
    469
    470static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
    471{
    472	return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
    473}
    474
    475static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
    476{
    477	return le32_to_cpu(p->buf1_addr);
    478}
    479
    480static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
    481				     u32 paddr, int len)
    482{
    483	p->buf1_addr = cpu_to_le32(paddr);
    484	if (len > MAX_DESC_BUF_SZ)
    485		p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
    486}
    487
    488static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
    489					      u32 paddr, int len)
    490{
    491	desc_set_buf_len(p, len);
    492	desc_set_buf_addr(p, paddr, len);
    493}
    494
    495static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
    496{
    497	u32 data = le32_to_cpu(p->flags);
    498	u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
    499	if (data & RXDESC_FRAME_TYPE)
    500		len -= ETH_FCS_LEN;
    501
    502	return len;
    503}
    504
    505static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
    506{
    507	int timeout = 1000;
    508	u32 reg = readl(ioaddr + XGMAC_OMR);
    509	writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
    510
    511	while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
    512		udelay(1);
    513}
    514
    515static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
    516{
    517	struct xgmac_extra_stats *x = &priv->xstats;
    518	u32 status = le32_to_cpu(p->flags);
    519
    520	if (!(status & TXDESC_ERROR_SUMMARY))
    521		return 0;
    522
    523	netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
    524	if (status & TXDESC_JABBER_TIMEOUT)
    525		x->tx_jabber++;
    526	if (status & TXDESC_FRAME_FLUSHED)
    527		x->tx_frame_flushed++;
    528	if (status & TXDESC_UNDERFLOW_ERR)
    529		xgmac_dma_flush_tx_fifo(priv->base);
    530	if (status & TXDESC_IP_HEADER_ERR)
    531		x->tx_ip_header_error++;
    532	if (status & TXDESC_LOCAL_FAULT)
    533		x->tx_local_fault++;
    534	if (status & TXDESC_REMOTE_FAULT)
    535		x->tx_remote_fault++;
    536	if (status & TXDESC_PAYLOAD_CSUM_ERR)
    537		x->tx_payload_error++;
    538
    539	return -1;
    540}
    541
    542static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
    543{
    544	struct xgmac_extra_stats *x = &priv->xstats;
    545	int ret = CHECKSUM_UNNECESSARY;
    546	u32 status = le32_to_cpu(p->flags);
    547	u32 ext_status = le32_to_cpu(p->ext_status);
    548
    549	if (status & RXDESC_DA_FILTER_FAIL) {
    550		netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
    551		x->rx_da_filter_fail++;
    552		return -1;
    553	}
    554
    555	/* All frames should fit into a single buffer */
    556	if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
    557		return -1;
    558
    559	/* Check if packet has checksum already */
    560	if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
    561		!(ext_status & RXDESC_IP_PAYLOAD_MASK))
    562		ret = CHECKSUM_NONE;
    563
    564	netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
    565		   (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
    566
    567	if (!(status & RXDESC_ERROR_SUMMARY))
    568		return ret;
    569
    570	/* Handle any errors */
    571	if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
    572		RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
    573		return -1;
    574
    575	if (status & RXDESC_EXT_STATUS) {
    576		if (ext_status & RXDESC_IP_HEADER_ERR)
    577			x->rx_ip_header_error++;
    578		if (ext_status & RXDESC_IP_PAYLOAD_ERR)
    579			x->rx_payload_error++;
    580		netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
    581			   ext_status);
    582		return CHECKSUM_NONE;
    583	}
    584
    585	return ret;
    586}
    587
    588static inline void xgmac_mac_enable(void __iomem *ioaddr)
    589{
    590	u32 value = readl(ioaddr + XGMAC_CONTROL);
    591	value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
    592	writel(value, ioaddr + XGMAC_CONTROL);
    593
    594	value = readl(ioaddr + XGMAC_DMA_CONTROL);
    595	value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
    596	writel(value, ioaddr + XGMAC_DMA_CONTROL);
    597}
    598
    599static inline void xgmac_mac_disable(void __iomem *ioaddr)
    600{
    601	u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
    602	value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
    603	writel(value, ioaddr + XGMAC_DMA_CONTROL);
    604
    605	value = readl(ioaddr + XGMAC_CONTROL);
    606	value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
    607	writel(value, ioaddr + XGMAC_CONTROL);
    608}
    609
    610static void xgmac_set_mac_addr(void __iomem *ioaddr, const unsigned char *addr,
    611			       int num)
    612{
    613	u32 data;
    614
    615	if (addr) {
    616		data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
    617		writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
    618		data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
    619		writel(data, ioaddr + XGMAC_ADDR_LOW(num));
    620	} else {
    621		writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
    622		writel(0, ioaddr + XGMAC_ADDR_LOW(num));
    623	}
    624}
    625
    626static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
    627			       int num)
    628{
    629	u32 hi_addr, lo_addr;
    630
    631	/* Read the MAC address from the hardware */
    632	hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
    633	lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
    634
    635	/* Extract the MAC address from the high and low words */
    636	addr[0] = lo_addr & 0xff;
    637	addr[1] = (lo_addr >> 8) & 0xff;
    638	addr[2] = (lo_addr >> 16) & 0xff;
    639	addr[3] = (lo_addr >> 24) & 0xff;
    640	addr[4] = hi_addr & 0xff;
    641	addr[5] = (hi_addr >> 8) & 0xff;
    642}
    643
    644static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
    645{
    646	u32 reg;
    647	unsigned int flow = 0;
    648
    649	priv->rx_pause = rx;
    650	priv->tx_pause = tx;
    651
    652	if (rx || tx) {
    653		if (rx)
    654			flow |= XGMAC_FLOW_CTRL_RFE;
    655		if (tx)
    656			flow |= XGMAC_FLOW_CTRL_TFE;
    657
    658		flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
    659		flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
    660
    661		writel(flow, priv->base + XGMAC_FLOW_CTRL);
    662
    663		reg = readl(priv->base + XGMAC_OMR);
    664		reg |= XGMAC_OMR_EFC;
    665		writel(reg, priv->base + XGMAC_OMR);
    666	} else {
    667		writel(0, priv->base + XGMAC_FLOW_CTRL);
    668
    669		reg = readl(priv->base + XGMAC_OMR);
    670		reg &= ~XGMAC_OMR_EFC;
    671		writel(reg, priv->base + XGMAC_OMR);
    672	}
    673
    674	return 0;
    675}
    676
    677static void xgmac_rx_refill(struct xgmac_priv *priv)
    678{
    679	struct xgmac_dma_desc *p;
    680	dma_addr_t paddr;
    681	int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
    682
    683	while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
    684		int entry = priv->rx_head;
    685		struct sk_buff *skb;
    686
    687		p = priv->dma_rx + entry;
    688
    689		if (priv->rx_skbuff[entry] == NULL) {
    690			skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
    691			if (unlikely(skb == NULL))
    692				break;
    693
    694			paddr = dma_map_single(priv->device, skb->data,
    695					       priv->dma_buf_sz - NET_IP_ALIGN,
    696					       DMA_FROM_DEVICE);
    697			if (dma_mapping_error(priv->device, paddr)) {
    698				dev_kfree_skb_any(skb);
    699				break;
    700			}
    701			priv->rx_skbuff[entry] = skb;
    702			desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
    703		}
    704
    705		netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
    706			priv->rx_head, priv->rx_tail);
    707
    708		priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
    709		desc_set_rx_owner(p);
    710	}
    711}
    712
    713/**
    714 * xgmac_dma_desc_rings_init - init the RX/TX descriptor rings
    715 * @dev: net device structure
    716 * Description:  this function initializes the DMA RX/TX descriptors
    717 * and allocates the socket buffers.
    718 */
    719static int xgmac_dma_desc_rings_init(struct net_device *dev)
    720{
    721	struct xgmac_priv *priv = netdev_priv(dev);
    722	unsigned int bfsize;
    723
    724	/* Set the Buffer size according to the MTU;
    725	 * The total buffer size including any IP offset must be a multiple
    726	 * of 8 bytes.
    727	 */
    728	bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
    729
    730	netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
    731
    732	priv->rx_skbuff = kcalloc(DMA_RX_RING_SZ, sizeof(struct sk_buff *),
    733				  GFP_KERNEL);
    734	if (!priv->rx_skbuff)
    735		return -ENOMEM;
    736
    737	priv->dma_rx = dma_alloc_coherent(priv->device,
    738					  DMA_RX_RING_SZ *
    739					  sizeof(struct xgmac_dma_desc),
    740					  &priv->dma_rx_phy,
    741					  GFP_KERNEL);
    742	if (!priv->dma_rx)
    743		goto err_dma_rx;
    744
    745	priv->tx_skbuff = kcalloc(DMA_TX_RING_SZ, sizeof(struct sk_buff *),
    746				  GFP_KERNEL);
    747	if (!priv->tx_skbuff)
    748		goto err_tx_skb;
    749
    750	priv->dma_tx = dma_alloc_coherent(priv->device,
    751					  DMA_TX_RING_SZ *
    752					  sizeof(struct xgmac_dma_desc),
    753					  &priv->dma_tx_phy,
    754					  GFP_KERNEL);
    755	if (!priv->dma_tx)
    756		goto err_dma_tx;
    757
    758	netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
    759	    "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
    760	    priv->dma_rx, priv->dma_tx,
    761	    (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
    762
    763	priv->rx_tail = 0;
    764	priv->rx_head = 0;
    765	priv->dma_buf_sz = bfsize;
    766	desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
    767	xgmac_rx_refill(priv);
    768
    769	priv->tx_tail = 0;
    770	priv->tx_head = 0;
    771	desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
    772
    773	writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
    774	writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
    775
    776	return 0;
    777
    778err_dma_tx:
    779	kfree(priv->tx_skbuff);
    780err_tx_skb:
    781	dma_free_coherent(priv->device,
    782			  DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
    783			  priv->dma_rx, priv->dma_rx_phy);
    784err_dma_rx:
    785	kfree(priv->rx_skbuff);
    786	return -ENOMEM;
    787}
    788
    789static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
    790{
    791	int i;
    792	struct xgmac_dma_desc *p;
    793
    794	if (!priv->rx_skbuff)
    795		return;
    796
    797	for (i = 0; i < DMA_RX_RING_SZ; i++) {
    798		struct sk_buff *skb = priv->rx_skbuff[i];
    799		if (skb == NULL)
    800			continue;
    801
    802		p = priv->dma_rx + i;
    803		dma_unmap_single(priv->device, desc_get_buf_addr(p),
    804				 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
    805		dev_kfree_skb_any(skb);
    806		priv->rx_skbuff[i] = NULL;
    807	}
    808}
    809
    810static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
    811{
    812	int i;
    813	struct xgmac_dma_desc *p;
    814
    815	if (!priv->tx_skbuff)
    816		return;
    817
    818	for (i = 0; i < DMA_TX_RING_SZ; i++) {
    819		if (priv->tx_skbuff[i] == NULL)
    820			continue;
    821
    822		p = priv->dma_tx + i;
    823		if (desc_get_tx_fs(p))
    824			dma_unmap_single(priv->device, desc_get_buf_addr(p),
    825					 desc_get_buf_len(p), DMA_TO_DEVICE);
    826		else
    827			dma_unmap_page(priv->device, desc_get_buf_addr(p),
    828				       desc_get_buf_len(p), DMA_TO_DEVICE);
    829
    830		if (desc_get_tx_ls(p))
    831			dev_kfree_skb_any(priv->tx_skbuff[i]);
    832		priv->tx_skbuff[i] = NULL;
    833	}
    834}
    835
    836static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
    837{
    838	/* Release the DMA TX/RX socket buffers */
    839	xgmac_free_rx_skbufs(priv);
    840	xgmac_free_tx_skbufs(priv);
    841
    842	/* Free the consistent memory allocated for descriptor rings */
    843	if (priv->dma_tx) {
    844		dma_free_coherent(priv->device,
    845				  DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
    846				  priv->dma_tx, priv->dma_tx_phy);
    847		priv->dma_tx = NULL;
    848	}
    849	if (priv->dma_rx) {
    850		dma_free_coherent(priv->device,
    851				  DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
    852				  priv->dma_rx, priv->dma_rx_phy);
    853		priv->dma_rx = NULL;
    854	}
    855	kfree(priv->rx_skbuff);
    856	priv->rx_skbuff = NULL;
    857	kfree(priv->tx_skbuff);
    858	priv->tx_skbuff = NULL;
    859}
    860
    861/**
    862 * xgmac_tx_complete:
    863 * @priv: private driver structure
    864 * Description: it reclaims resources after transmission completes.
    865 */
    866static void xgmac_tx_complete(struct xgmac_priv *priv)
    867{
    868	while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
    869		unsigned int entry = priv->tx_tail;
    870		struct sk_buff *skb = priv->tx_skbuff[entry];
    871		struct xgmac_dma_desc *p = priv->dma_tx + entry;
    872
    873		/* Check if the descriptor is owned by the DMA. */
    874		if (desc_get_owner(p))
    875			break;
    876
    877		netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
    878			priv->tx_head, priv->tx_tail);
    879
    880		if (desc_get_tx_fs(p))
    881			dma_unmap_single(priv->device, desc_get_buf_addr(p),
    882					 desc_get_buf_len(p), DMA_TO_DEVICE);
    883		else
    884			dma_unmap_page(priv->device, desc_get_buf_addr(p),
    885				       desc_get_buf_len(p), DMA_TO_DEVICE);
    886
    887		/* Check tx error on the last segment */
    888		if (desc_get_tx_ls(p)) {
    889			desc_get_tx_status(priv, p);
    890			dev_consume_skb_any(skb);
    891		}
    892
    893		priv->tx_skbuff[entry] = NULL;
    894		priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
    895	}
    896
    897	/* Ensure tx_tail is visible to xgmac_xmit */
    898	smp_mb();
    899	if (unlikely(netif_queue_stopped(priv->dev) &&
    900	    (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
    901		netif_wake_queue(priv->dev);
    902}
    903
    904static void xgmac_tx_timeout_work(struct work_struct *work)
    905{
    906	u32 reg, value;
    907	struct xgmac_priv *priv =
    908		container_of(work, struct xgmac_priv, tx_timeout_work);
    909
    910	napi_disable(&priv->napi);
    911
    912	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
    913
    914	netif_tx_lock(priv->dev);
    915
    916	reg = readl(priv->base + XGMAC_DMA_CONTROL);
    917	writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
    918	do {
    919		value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
    920	} while (value && (value != 0x600000));
    921
    922	xgmac_free_tx_skbufs(priv);
    923	desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
    924	priv->tx_tail = 0;
    925	priv->tx_head = 0;
    926	writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
    927	writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
    928
    929	writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
    930		priv->base + XGMAC_DMA_STATUS);
    931
    932	netif_tx_unlock(priv->dev);
    933	netif_wake_queue(priv->dev);
    934
    935	napi_enable(&priv->napi);
    936
    937	/* Enable interrupts */
    938	writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
    939	writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
    940}
    941
    942static int xgmac_hw_init(struct net_device *dev)
    943{
    944	u32 value, ctrl;
    945	int limit;
    946	struct xgmac_priv *priv = netdev_priv(dev);
    947	void __iomem *ioaddr = priv->base;
    948
    949	/* Save the ctrl register value */
    950	ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
    951
    952	/* SW reset */
    953	value = DMA_BUS_MODE_SFT_RESET;
    954	writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
    955	limit = 15000;
    956	while (limit-- &&
    957		(readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
    958		cpu_relax();
    959	if (limit < 0)
    960		return -EBUSY;
    961
    962	value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
    963		(0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
    964		DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
    965	writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
    966
    967	writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
    968
    969	/* Mask power mgt interrupt */
    970	writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
    971
    972	/* XGMAC requires AXI bus init. This is a 'magic number' for now */
    973	writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
    974
    975	ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
    976		XGMAC_CONTROL_CAR;
    977	if (dev->features & NETIF_F_RXCSUM)
    978		ctrl |= XGMAC_CONTROL_IPC;
    979	writel(ctrl, ioaddr + XGMAC_CONTROL);
    980
    981	writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
    982
    983	/* Set the HW DMA mode and the COE */
    984	writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
    985		XGMAC_OMR_RTC_256,
    986		ioaddr + XGMAC_OMR);
    987
    988	/* Reset the MMC counters */
    989	writel(1, ioaddr + XGMAC_MMC_CTRL);
    990	return 0;
    991}
    992
    993/**
    994 *  xgmac_open - open entry point of the driver
    995 *  @dev : pointer to the device structure.
    996 *  Description:
    997 *  This function is the open entry point of the driver.
    998 *  Return value:
    999 *  0 on success and an appropriate (-)ve integer as defined in errno.h
   1000 *  file on failure.
   1001 */
   1002static int xgmac_open(struct net_device *dev)
   1003{
   1004	int ret;
   1005	struct xgmac_priv *priv = netdev_priv(dev);
   1006	void __iomem *ioaddr = priv->base;
   1007
   1008	/* Check that the MAC address is valid.  If its not, refuse
   1009	 * to bring the device up. The user must specify an
   1010	 * address using the following linux command:
   1011	 *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
   1012	if (!is_valid_ether_addr(dev->dev_addr)) {
   1013		eth_hw_addr_random(dev);
   1014		netdev_dbg(priv->dev, "generated random MAC address %pM\n",
   1015			dev->dev_addr);
   1016	}
   1017
   1018	memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
   1019
   1020	/* Initialize the XGMAC and descriptors */
   1021	xgmac_hw_init(dev);
   1022	xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
   1023	xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
   1024
   1025	ret = xgmac_dma_desc_rings_init(dev);
   1026	if (ret < 0)
   1027		return ret;
   1028
   1029	/* Enable the MAC Rx/Tx */
   1030	xgmac_mac_enable(ioaddr);
   1031
   1032	napi_enable(&priv->napi);
   1033	netif_start_queue(dev);
   1034
   1035	/* Enable interrupts */
   1036	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
   1037	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
   1038
   1039	return 0;
   1040}
   1041
   1042/**
   1043 *  xgmac_stop - close entry point of the driver
   1044 *  @dev : device pointer.
   1045 *  Description:
   1046 *  This is the stop entry point of the driver.
   1047 */
   1048static int xgmac_stop(struct net_device *dev)
   1049{
   1050	struct xgmac_priv *priv = netdev_priv(dev);
   1051
   1052	if (readl(priv->base + XGMAC_DMA_INTR_ENA))
   1053		napi_disable(&priv->napi);
   1054
   1055	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
   1056
   1057	netif_tx_disable(dev);
   1058
   1059	/* Disable the MAC core */
   1060	xgmac_mac_disable(priv->base);
   1061
   1062	/* Release and free the Rx/Tx resources */
   1063	xgmac_free_dma_desc_rings(priv);
   1064
   1065	return 0;
   1066}
   1067
   1068/**
   1069 *  xgmac_xmit:
   1070 *  @skb : the socket buffer
   1071 *  @dev : device pointer
   1072 *  Description : Tx entry point of the driver.
   1073 */
   1074static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
   1075{
   1076	struct xgmac_priv *priv = netdev_priv(dev);
   1077	unsigned int entry;
   1078	int i;
   1079	u32 irq_flag;
   1080	int nfrags = skb_shinfo(skb)->nr_frags;
   1081	struct xgmac_dma_desc *desc, *first;
   1082	unsigned int desc_flags;
   1083	unsigned int len;
   1084	dma_addr_t paddr;
   1085
   1086	priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
   1087	irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
   1088
   1089	desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
   1090		TXDESC_CSUM_ALL : 0;
   1091	entry = priv->tx_head;
   1092	desc = priv->dma_tx + entry;
   1093	first = desc;
   1094
   1095	len = skb_headlen(skb);
   1096	paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
   1097	if (dma_mapping_error(priv->device, paddr)) {
   1098		dev_kfree_skb_any(skb);
   1099		return NETDEV_TX_OK;
   1100	}
   1101	priv->tx_skbuff[entry] = skb;
   1102	desc_set_buf_addr_and_size(desc, paddr, len);
   1103
   1104	for (i = 0; i < nfrags; i++) {
   1105		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
   1106
   1107		len = skb_frag_size(frag);
   1108
   1109		paddr = skb_frag_dma_map(priv->device, frag, 0, len,
   1110					 DMA_TO_DEVICE);
   1111		if (dma_mapping_error(priv->device, paddr))
   1112			goto dma_err;
   1113
   1114		entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
   1115		desc = priv->dma_tx + entry;
   1116		priv->tx_skbuff[entry] = skb;
   1117
   1118		desc_set_buf_addr_and_size(desc, paddr, len);
   1119		if (i < (nfrags - 1))
   1120			desc_set_tx_owner(desc, desc_flags);
   1121	}
   1122
   1123	/* Interrupt on completition only for the latest segment */
   1124	if (desc != first)
   1125		desc_set_tx_owner(desc, desc_flags |
   1126			TXDESC_LAST_SEG | irq_flag);
   1127	else
   1128		desc_flags |= TXDESC_LAST_SEG | irq_flag;
   1129
   1130	/* Set owner on first desc last to avoid race condition */
   1131	wmb();
   1132	desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
   1133
   1134	writel(1, priv->base + XGMAC_DMA_TX_POLL);
   1135
   1136	priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
   1137
   1138	/* Ensure tx_head update is visible to tx completion */
   1139	smp_mb();
   1140	if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
   1141		netif_stop_queue(dev);
   1142		/* Ensure netif_stop_queue is visible to tx completion */
   1143		smp_mb();
   1144		if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
   1145			netif_start_queue(dev);
   1146	}
   1147	return NETDEV_TX_OK;
   1148
   1149dma_err:
   1150	entry = priv->tx_head;
   1151	for ( ; i > 0; i--) {
   1152		entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
   1153		desc = priv->dma_tx + entry;
   1154		priv->tx_skbuff[entry] = NULL;
   1155		dma_unmap_page(priv->device, desc_get_buf_addr(desc),
   1156			       desc_get_buf_len(desc), DMA_TO_DEVICE);
   1157		desc_clear_tx_owner(desc);
   1158	}
   1159	desc = first;
   1160	dma_unmap_single(priv->device, desc_get_buf_addr(desc),
   1161			 desc_get_buf_len(desc), DMA_TO_DEVICE);
   1162	dev_kfree_skb_any(skb);
   1163	return NETDEV_TX_OK;
   1164}
   1165
   1166static int xgmac_rx(struct xgmac_priv *priv, int limit)
   1167{
   1168	unsigned int entry;
   1169	unsigned int count = 0;
   1170	struct xgmac_dma_desc *p;
   1171
   1172	while (count < limit) {
   1173		int ip_checksum;
   1174		struct sk_buff *skb;
   1175		int frame_len;
   1176
   1177		if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
   1178			break;
   1179
   1180		entry = priv->rx_tail;
   1181		p = priv->dma_rx + entry;
   1182		if (desc_get_owner(p))
   1183			break;
   1184
   1185		count++;
   1186		priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
   1187
   1188		/* read the status of the incoming frame */
   1189		ip_checksum = desc_get_rx_status(priv, p);
   1190		if (ip_checksum < 0)
   1191			continue;
   1192
   1193		skb = priv->rx_skbuff[entry];
   1194		if (unlikely(!skb)) {
   1195			netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
   1196			break;
   1197		}
   1198		priv->rx_skbuff[entry] = NULL;
   1199
   1200		frame_len = desc_get_rx_frame_len(p);
   1201		netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
   1202			frame_len, ip_checksum);
   1203
   1204		skb_put(skb, frame_len);
   1205		dma_unmap_single(priv->device, desc_get_buf_addr(p),
   1206				 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
   1207
   1208		skb->protocol = eth_type_trans(skb, priv->dev);
   1209		skb->ip_summed = ip_checksum;
   1210		if (ip_checksum == CHECKSUM_NONE)
   1211			netif_receive_skb(skb);
   1212		else
   1213			napi_gro_receive(&priv->napi, skb);
   1214	}
   1215
   1216	xgmac_rx_refill(priv);
   1217
   1218	return count;
   1219}
   1220
   1221/**
   1222 *  xgmac_poll - xgmac poll method (NAPI)
   1223 *  @napi : pointer to the napi structure.
   1224 *  @budget : maximum number of packets that the current CPU can receive from
   1225 *	      all interfaces.
   1226 *  Description :
   1227 *   This function implements the reception process.
   1228 *   Also it runs the TX completion thread
   1229 */
   1230static int xgmac_poll(struct napi_struct *napi, int budget)
   1231{
   1232	struct xgmac_priv *priv = container_of(napi,
   1233				       struct xgmac_priv, napi);
   1234	int work_done = 0;
   1235
   1236	xgmac_tx_complete(priv);
   1237	work_done = xgmac_rx(priv, budget);
   1238
   1239	if (work_done < budget) {
   1240		napi_complete_done(napi, work_done);
   1241		__raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
   1242	}
   1243	return work_done;
   1244}
   1245
   1246/**
   1247 *  xgmac_tx_timeout
   1248 *  @dev : Pointer to net device structure
   1249 *  @txqueue: index of the hung transmit queue
   1250 *
   1251 *  Description: this function is called when a packet transmission fails to
   1252 *   complete within a reasonable tmrate. The driver will mark the error in the
   1253 *   netdev structure and arrange for the device to be reset to a sane state
   1254 *   in order to transmit a new packet.
   1255 */
   1256static void xgmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
   1257{
   1258	struct xgmac_priv *priv = netdev_priv(dev);
   1259	schedule_work(&priv->tx_timeout_work);
   1260}
   1261
   1262/**
   1263 *  xgmac_set_rx_mode - entry point for multicast addressing
   1264 *  @dev : pointer to the device structure
   1265 *  Description:
   1266 *  This function is a driver entry point which gets called by the kernel
   1267 *  whenever multicast addresses must be enabled/disabled.
   1268 *  Return value:
   1269 *  void.
   1270 */
   1271static void xgmac_set_rx_mode(struct net_device *dev)
   1272{
   1273	int i;
   1274	struct xgmac_priv *priv = netdev_priv(dev);
   1275	void __iomem *ioaddr = priv->base;
   1276	unsigned int value = 0;
   1277	u32 hash_filter[XGMAC_NUM_HASH];
   1278	int reg = 1;
   1279	struct netdev_hw_addr *ha;
   1280	bool use_hash = false;
   1281
   1282	netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
   1283		 netdev_mc_count(dev), netdev_uc_count(dev));
   1284
   1285	if (dev->flags & IFF_PROMISC)
   1286		value |= XGMAC_FRAME_FILTER_PR;
   1287
   1288	memset(hash_filter, 0, sizeof(hash_filter));
   1289
   1290	if (netdev_uc_count(dev) > priv->max_macs) {
   1291		use_hash = true;
   1292		value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
   1293	}
   1294	netdev_for_each_uc_addr(ha, dev) {
   1295		if (use_hash) {
   1296			u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
   1297
   1298			/* The most significant 4 bits determine the register to
   1299			 * use (H/L) while the other 5 bits determine the bit
   1300			 * within the register. */
   1301			hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
   1302		} else {
   1303			xgmac_set_mac_addr(ioaddr, ha->addr, reg);
   1304			reg++;
   1305		}
   1306	}
   1307
   1308	if (dev->flags & IFF_ALLMULTI) {
   1309		value |= XGMAC_FRAME_FILTER_PM;
   1310		goto out;
   1311	}
   1312
   1313	if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
   1314		use_hash = true;
   1315		value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
   1316	} else {
   1317		use_hash = false;
   1318	}
   1319	netdev_for_each_mc_addr(ha, dev) {
   1320		if (use_hash) {
   1321			u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
   1322
   1323			/* The most significant 4 bits determine the register to
   1324			 * use (H/L) while the other 5 bits determine the bit
   1325			 * within the register. */
   1326			hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
   1327		} else {
   1328			xgmac_set_mac_addr(ioaddr, ha->addr, reg);
   1329			reg++;
   1330		}
   1331	}
   1332
   1333out:
   1334	for (i = reg; i <= priv->max_macs; i++)
   1335		xgmac_set_mac_addr(ioaddr, NULL, i);
   1336	for (i = 0; i < XGMAC_NUM_HASH; i++)
   1337		writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
   1338
   1339	writel(value, ioaddr + XGMAC_FRAME_FILTER);
   1340}
   1341
   1342/**
   1343 *  xgmac_change_mtu - entry point to change MTU size for the device.
   1344 *  @dev : device pointer.
   1345 *  @new_mtu : the new MTU size for the device.
   1346 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
   1347 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
   1348 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
   1349 *  Return value:
   1350 *  0 on success and an appropriate (-)ve integer as defined in errno.h
   1351 *  file on failure.
   1352 */
   1353static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
   1354{
   1355	/* Stop everything, get ready to change the MTU */
   1356	if (!netif_running(dev))
   1357		return 0;
   1358
   1359	/* Bring interface down, change mtu and bring interface back up */
   1360	xgmac_stop(dev);
   1361	dev->mtu = new_mtu;
   1362	return xgmac_open(dev);
   1363}
   1364
   1365static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
   1366{
   1367	u32 intr_status;
   1368	struct net_device *dev = (struct net_device *)dev_id;
   1369	struct xgmac_priv *priv = netdev_priv(dev);
   1370	void __iomem *ioaddr = priv->base;
   1371
   1372	intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
   1373	if (intr_status & XGMAC_INT_STAT_PMT) {
   1374		netdev_dbg(priv->dev, "received Magic frame\n");
   1375		/* clear the PMT bits 5 and 6 by reading the PMT */
   1376		readl(ioaddr + XGMAC_PMT);
   1377	}
   1378	return IRQ_HANDLED;
   1379}
   1380
   1381static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
   1382{
   1383	u32 intr_status;
   1384	struct net_device *dev = (struct net_device *)dev_id;
   1385	struct xgmac_priv *priv = netdev_priv(dev);
   1386	struct xgmac_extra_stats *x = &priv->xstats;
   1387
   1388	/* read the status register (CSR5) */
   1389	intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
   1390	intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
   1391	__raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
   1392
   1393	/* It displays the DMA process states (CSR5 register) */
   1394	/* ABNORMAL interrupts */
   1395	if (unlikely(intr_status & DMA_STATUS_AIS)) {
   1396		if (intr_status & DMA_STATUS_TJT) {
   1397			netdev_err(priv->dev, "transmit jabber\n");
   1398			x->tx_jabber++;
   1399		}
   1400		if (intr_status & DMA_STATUS_RU)
   1401			x->rx_buf_unav++;
   1402		if (intr_status & DMA_STATUS_RPS) {
   1403			netdev_err(priv->dev, "receive process stopped\n");
   1404			x->rx_process_stopped++;
   1405		}
   1406		if (intr_status & DMA_STATUS_ETI) {
   1407			netdev_err(priv->dev, "transmit early interrupt\n");
   1408			x->tx_early++;
   1409		}
   1410		if (intr_status & DMA_STATUS_TPS) {
   1411			netdev_err(priv->dev, "transmit process stopped\n");
   1412			x->tx_process_stopped++;
   1413			schedule_work(&priv->tx_timeout_work);
   1414		}
   1415		if (intr_status & DMA_STATUS_FBI) {
   1416			netdev_err(priv->dev, "fatal bus error\n");
   1417			x->fatal_bus_error++;
   1418		}
   1419	}
   1420
   1421	/* TX/RX NORMAL interrupts */
   1422	if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
   1423		__raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
   1424		napi_schedule(&priv->napi);
   1425	}
   1426
   1427	return IRQ_HANDLED;
   1428}
   1429
   1430#ifdef CONFIG_NET_POLL_CONTROLLER
   1431/* Polling receive - used by NETCONSOLE and other diagnostic tools
   1432 * to allow network I/O with interrupts disabled. */
   1433static void xgmac_poll_controller(struct net_device *dev)
   1434{
   1435	disable_irq(dev->irq);
   1436	xgmac_interrupt(dev->irq, dev);
   1437	enable_irq(dev->irq);
   1438}
   1439#endif
   1440
   1441static void
   1442xgmac_get_stats64(struct net_device *dev,
   1443		  struct rtnl_link_stats64 *storage)
   1444{
   1445	struct xgmac_priv *priv = netdev_priv(dev);
   1446	void __iomem *base = priv->base;
   1447	u32 count;
   1448
   1449	spin_lock_bh(&priv->stats_lock);
   1450	writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
   1451
   1452	storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
   1453	storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
   1454
   1455	storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
   1456	storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
   1457	storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
   1458	storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
   1459	storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
   1460
   1461	storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
   1462	storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
   1463
   1464	count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
   1465	storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
   1466	storage->tx_packets = count;
   1467	storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
   1468
   1469	writel(0, base + XGMAC_MMC_CTRL);
   1470	spin_unlock_bh(&priv->stats_lock);
   1471}
   1472
   1473static int xgmac_set_mac_address(struct net_device *dev, void *p)
   1474{
   1475	struct xgmac_priv *priv = netdev_priv(dev);
   1476	void __iomem *ioaddr = priv->base;
   1477	struct sockaddr *addr = p;
   1478
   1479	if (!is_valid_ether_addr(addr->sa_data))
   1480		return -EADDRNOTAVAIL;
   1481
   1482	eth_hw_addr_set(dev, addr->sa_data);
   1483
   1484	xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
   1485
   1486	return 0;
   1487}
   1488
   1489static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
   1490{
   1491	u32 ctrl;
   1492	struct xgmac_priv *priv = netdev_priv(dev);
   1493	void __iomem *ioaddr = priv->base;
   1494	netdev_features_t changed = dev->features ^ features;
   1495
   1496	if (!(changed & NETIF_F_RXCSUM))
   1497		return 0;
   1498
   1499	ctrl = readl(ioaddr + XGMAC_CONTROL);
   1500	if (features & NETIF_F_RXCSUM)
   1501		ctrl |= XGMAC_CONTROL_IPC;
   1502	else
   1503		ctrl &= ~XGMAC_CONTROL_IPC;
   1504	writel(ctrl, ioaddr + XGMAC_CONTROL);
   1505
   1506	return 0;
   1507}
   1508
   1509static const struct net_device_ops xgmac_netdev_ops = {
   1510	.ndo_open = xgmac_open,
   1511	.ndo_start_xmit = xgmac_xmit,
   1512	.ndo_stop = xgmac_stop,
   1513	.ndo_change_mtu = xgmac_change_mtu,
   1514	.ndo_set_rx_mode = xgmac_set_rx_mode,
   1515	.ndo_tx_timeout = xgmac_tx_timeout,
   1516	.ndo_get_stats64 = xgmac_get_stats64,
   1517#ifdef CONFIG_NET_POLL_CONTROLLER
   1518	.ndo_poll_controller = xgmac_poll_controller,
   1519#endif
   1520	.ndo_set_mac_address = xgmac_set_mac_address,
   1521	.ndo_set_features = xgmac_set_features,
   1522};
   1523
   1524static int xgmac_ethtool_get_link_ksettings(struct net_device *dev,
   1525					    struct ethtool_link_ksettings *cmd)
   1526{
   1527	cmd->base.autoneg = 0;
   1528	cmd->base.duplex = DUPLEX_FULL;
   1529	cmd->base.speed = 10000;
   1530	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 0);
   1531	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 0);
   1532	return 0;
   1533}
   1534
   1535static void xgmac_get_pauseparam(struct net_device *netdev,
   1536				      struct ethtool_pauseparam *pause)
   1537{
   1538	struct xgmac_priv *priv = netdev_priv(netdev);
   1539
   1540	pause->rx_pause = priv->rx_pause;
   1541	pause->tx_pause = priv->tx_pause;
   1542}
   1543
   1544static int xgmac_set_pauseparam(struct net_device *netdev,
   1545				     struct ethtool_pauseparam *pause)
   1546{
   1547	struct xgmac_priv *priv = netdev_priv(netdev);
   1548
   1549	if (pause->autoneg)
   1550		return -EINVAL;
   1551
   1552	return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
   1553}
   1554
   1555struct xgmac_stats {
   1556	char stat_string[ETH_GSTRING_LEN];
   1557	int stat_offset;
   1558	bool is_reg;
   1559};
   1560
   1561#define XGMAC_STAT(m)	\
   1562	{ #m, offsetof(struct xgmac_priv, xstats.m), false }
   1563#define XGMAC_HW_STAT(m, reg_offset)	\
   1564	{ #m, reg_offset, true }
   1565
   1566static const struct xgmac_stats xgmac_gstrings_stats[] = {
   1567	XGMAC_STAT(tx_frame_flushed),
   1568	XGMAC_STAT(tx_payload_error),
   1569	XGMAC_STAT(tx_ip_header_error),
   1570	XGMAC_STAT(tx_local_fault),
   1571	XGMAC_STAT(tx_remote_fault),
   1572	XGMAC_STAT(tx_early),
   1573	XGMAC_STAT(tx_process_stopped),
   1574	XGMAC_STAT(tx_jabber),
   1575	XGMAC_STAT(rx_buf_unav),
   1576	XGMAC_STAT(rx_process_stopped),
   1577	XGMAC_STAT(rx_payload_error),
   1578	XGMAC_STAT(rx_ip_header_error),
   1579	XGMAC_STAT(rx_da_filter_fail),
   1580	XGMAC_STAT(fatal_bus_error),
   1581	XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
   1582	XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
   1583	XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
   1584	XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
   1585	XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
   1586};
   1587#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
   1588
   1589static void xgmac_get_ethtool_stats(struct net_device *dev,
   1590					 struct ethtool_stats *dummy,
   1591					 u64 *data)
   1592{
   1593	struct xgmac_priv *priv = netdev_priv(dev);
   1594	void *p = priv;
   1595	int i;
   1596
   1597	for (i = 0; i < XGMAC_STATS_LEN; i++) {
   1598		if (xgmac_gstrings_stats[i].is_reg)
   1599			*data++ = readl(priv->base +
   1600				xgmac_gstrings_stats[i].stat_offset);
   1601		else
   1602			*data++ = *(u32 *)(p +
   1603				xgmac_gstrings_stats[i].stat_offset);
   1604	}
   1605}
   1606
   1607static int xgmac_get_sset_count(struct net_device *netdev, int sset)
   1608{
   1609	switch (sset) {
   1610	case ETH_SS_STATS:
   1611		return XGMAC_STATS_LEN;
   1612	default:
   1613		return -EINVAL;
   1614	}
   1615}
   1616
   1617static void xgmac_get_strings(struct net_device *dev, u32 stringset,
   1618				   u8 *data)
   1619{
   1620	int i;
   1621	u8 *p = data;
   1622
   1623	switch (stringset) {
   1624	case ETH_SS_STATS:
   1625		for (i = 0; i < XGMAC_STATS_LEN; i++) {
   1626			memcpy(p, xgmac_gstrings_stats[i].stat_string,
   1627			       ETH_GSTRING_LEN);
   1628			p += ETH_GSTRING_LEN;
   1629		}
   1630		break;
   1631	default:
   1632		WARN_ON(1);
   1633		break;
   1634	}
   1635}
   1636
   1637static void xgmac_get_wol(struct net_device *dev,
   1638			       struct ethtool_wolinfo *wol)
   1639{
   1640	struct xgmac_priv *priv = netdev_priv(dev);
   1641
   1642	if (device_can_wakeup(priv->device)) {
   1643		wol->supported = WAKE_MAGIC | WAKE_UCAST;
   1644		wol->wolopts = priv->wolopts;
   1645	}
   1646}
   1647
   1648static int xgmac_set_wol(struct net_device *dev,
   1649			      struct ethtool_wolinfo *wol)
   1650{
   1651	struct xgmac_priv *priv = netdev_priv(dev);
   1652	u32 support = WAKE_MAGIC | WAKE_UCAST;
   1653
   1654	if (!device_can_wakeup(priv->device))
   1655		return -ENOTSUPP;
   1656
   1657	if (wol->wolopts & ~support)
   1658		return -EINVAL;
   1659
   1660	priv->wolopts = wol->wolopts;
   1661
   1662	if (wol->wolopts) {
   1663		device_set_wakeup_enable(priv->device, 1);
   1664		enable_irq_wake(dev->irq);
   1665	} else {
   1666		device_set_wakeup_enable(priv->device, 0);
   1667		disable_irq_wake(dev->irq);
   1668	}
   1669
   1670	return 0;
   1671}
   1672
   1673static const struct ethtool_ops xgmac_ethtool_ops = {
   1674	.get_link = ethtool_op_get_link,
   1675	.get_pauseparam = xgmac_get_pauseparam,
   1676	.set_pauseparam = xgmac_set_pauseparam,
   1677	.get_ethtool_stats = xgmac_get_ethtool_stats,
   1678	.get_strings = xgmac_get_strings,
   1679	.get_wol = xgmac_get_wol,
   1680	.set_wol = xgmac_set_wol,
   1681	.get_sset_count = xgmac_get_sset_count,
   1682	.get_link_ksettings = xgmac_ethtool_get_link_ksettings,
   1683};
   1684
   1685/**
   1686 * xgmac_probe
   1687 * @pdev: platform device pointer
   1688 * Description: the driver is initialized through platform_device.
   1689 */
   1690static int xgmac_probe(struct platform_device *pdev)
   1691{
   1692	int ret = 0;
   1693	struct resource *res;
   1694	struct net_device *ndev = NULL;
   1695	struct xgmac_priv *priv = NULL;
   1696	u8 addr[ETH_ALEN];
   1697	u32 uid;
   1698
   1699	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1700	if (!res)
   1701		return -ENODEV;
   1702
   1703	if (!request_mem_region(res->start, resource_size(res), pdev->name))
   1704		return -EBUSY;
   1705
   1706	ndev = alloc_etherdev(sizeof(struct xgmac_priv));
   1707	if (!ndev) {
   1708		ret = -ENOMEM;
   1709		goto err_alloc;
   1710	}
   1711
   1712	SET_NETDEV_DEV(ndev, &pdev->dev);
   1713	priv = netdev_priv(ndev);
   1714	platform_set_drvdata(pdev, ndev);
   1715	ndev->netdev_ops = &xgmac_netdev_ops;
   1716	ndev->ethtool_ops = &xgmac_ethtool_ops;
   1717	spin_lock_init(&priv->stats_lock);
   1718	INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
   1719
   1720	priv->device = &pdev->dev;
   1721	priv->dev = ndev;
   1722	priv->rx_pause = 1;
   1723	priv->tx_pause = 1;
   1724
   1725	priv->base = ioremap(res->start, resource_size(res));
   1726	if (!priv->base) {
   1727		netdev_err(ndev, "ioremap failed\n");
   1728		ret = -ENOMEM;
   1729		goto err_io;
   1730	}
   1731
   1732	uid = readl(priv->base + XGMAC_VERSION);
   1733	netdev_info(ndev, "h/w version is 0x%x\n", uid);
   1734
   1735	/* Figure out how many valid mac address filter registers we have */
   1736	writel(1, priv->base + XGMAC_ADDR_HIGH(31));
   1737	if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
   1738		priv->max_macs = 31;
   1739	else
   1740		priv->max_macs = 7;
   1741
   1742	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
   1743	ndev->irq = platform_get_irq(pdev, 0);
   1744	if (ndev->irq == -ENXIO) {
   1745		netdev_err(ndev, "No irq resource\n");
   1746		ret = ndev->irq;
   1747		goto err_irq;
   1748	}
   1749
   1750	ret = request_irq(ndev->irq, xgmac_interrupt, 0,
   1751			  dev_name(&pdev->dev), ndev);
   1752	if (ret < 0) {
   1753		netdev_err(ndev, "Could not request irq %d - ret %d)\n",
   1754			ndev->irq, ret);
   1755		goto err_irq;
   1756	}
   1757
   1758	priv->pmt_irq = platform_get_irq(pdev, 1);
   1759	if (priv->pmt_irq == -ENXIO) {
   1760		netdev_err(ndev, "No pmt irq resource\n");
   1761		ret = priv->pmt_irq;
   1762		goto err_pmt_irq;
   1763	}
   1764
   1765	ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
   1766			  dev_name(&pdev->dev), ndev);
   1767	if (ret < 0) {
   1768		netdev_err(ndev, "Could not request irq %d - ret %d)\n",
   1769			priv->pmt_irq, ret);
   1770		goto err_pmt_irq;
   1771	}
   1772
   1773	device_set_wakeup_capable(&pdev->dev, 1);
   1774	if (device_can_wakeup(priv->device))
   1775		priv->wolopts = WAKE_MAGIC;	/* Magic Frame as default */
   1776
   1777	ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
   1778	if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
   1779		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
   1780				     NETIF_F_RXCSUM;
   1781	ndev->features |= ndev->hw_features;
   1782	ndev->priv_flags |= IFF_UNICAST_FLT;
   1783
   1784	/* MTU range: 46 - 9000 */
   1785	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
   1786	ndev->max_mtu = XGMAC_MAX_MTU;
   1787
   1788	/* Get the MAC address */
   1789	xgmac_get_mac_addr(priv->base, addr, 0);
   1790	eth_hw_addr_set(ndev, addr);
   1791	if (!is_valid_ether_addr(ndev->dev_addr))
   1792		netdev_warn(ndev, "MAC address %pM not valid",
   1793			 ndev->dev_addr);
   1794
   1795	netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
   1796	ret = register_netdev(ndev);
   1797	if (ret)
   1798		goto err_reg;
   1799
   1800	return 0;
   1801
   1802err_reg:
   1803	netif_napi_del(&priv->napi);
   1804	free_irq(priv->pmt_irq, ndev);
   1805err_pmt_irq:
   1806	free_irq(ndev->irq, ndev);
   1807err_irq:
   1808	iounmap(priv->base);
   1809err_io:
   1810	free_netdev(ndev);
   1811err_alloc:
   1812	release_mem_region(res->start, resource_size(res));
   1813	return ret;
   1814}
   1815
   1816/**
   1817 * xgmac_remove
   1818 * @pdev: platform device pointer
   1819 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
   1820 * changes the link status, releases the DMA descriptor rings,
   1821 * unregisters the MDIO bus and unmaps the allocated memory.
   1822 */
   1823static int xgmac_remove(struct platform_device *pdev)
   1824{
   1825	struct net_device *ndev = platform_get_drvdata(pdev);
   1826	struct xgmac_priv *priv = netdev_priv(ndev);
   1827	struct resource *res;
   1828
   1829	xgmac_mac_disable(priv->base);
   1830
   1831	/* Free the IRQ lines */
   1832	free_irq(ndev->irq, ndev);
   1833	free_irq(priv->pmt_irq, ndev);
   1834
   1835	unregister_netdev(ndev);
   1836	netif_napi_del(&priv->napi);
   1837
   1838	iounmap(priv->base);
   1839	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1840	release_mem_region(res->start, resource_size(res));
   1841
   1842	free_netdev(ndev);
   1843
   1844	return 0;
   1845}
   1846
   1847#ifdef CONFIG_PM_SLEEP
   1848static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
   1849{
   1850	unsigned int pmt = 0;
   1851
   1852	if (mode & WAKE_MAGIC)
   1853		pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
   1854	if (mode & WAKE_UCAST)
   1855		pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
   1856
   1857	writel(pmt, ioaddr + XGMAC_PMT);
   1858}
   1859
   1860static int xgmac_suspend(struct device *dev)
   1861{
   1862	struct net_device *ndev = dev_get_drvdata(dev);
   1863	struct xgmac_priv *priv = netdev_priv(ndev);
   1864	u32 value;
   1865
   1866	if (!ndev || !netif_running(ndev))
   1867		return 0;
   1868
   1869	netif_device_detach(ndev);
   1870	napi_disable(&priv->napi);
   1871	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
   1872
   1873	if (device_may_wakeup(priv->device)) {
   1874		/* Stop TX/RX DMA Only */
   1875		value = readl(priv->base + XGMAC_DMA_CONTROL);
   1876		value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
   1877		writel(value, priv->base + XGMAC_DMA_CONTROL);
   1878
   1879		xgmac_pmt(priv->base, priv->wolopts);
   1880	} else
   1881		xgmac_mac_disable(priv->base);
   1882
   1883	return 0;
   1884}
   1885
   1886static int xgmac_resume(struct device *dev)
   1887{
   1888	struct net_device *ndev = dev_get_drvdata(dev);
   1889	struct xgmac_priv *priv = netdev_priv(ndev);
   1890	void __iomem *ioaddr = priv->base;
   1891
   1892	if (!netif_running(ndev))
   1893		return 0;
   1894
   1895	xgmac_pmt(ioaddr, 0);
   1896
   1897	/* Enable the MAC and DMA */
   1898	xgmac_mac_enable(ioaddr);
   1899	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
   1900	writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
   1901
   1902	netif_device_attach(ndev);
   1903	napi_enable(&priv->napi);
   1904
   1905	return 0;
   1906}
   1907#endif /* CONFIG_PM_SLEEP */
   1908
   1909static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
   1910
   1911static const struct of_device_id xgmac_of_match[] = {
   1912	{ .compatible = "calxeda,hb-xgmac", },
   1913	{},
   1914};
   1915MODULE_DEVICE_TABLE(of, xgmac_of_match);
   1916
   1917static struct platform_driver xgmac_driver = {
   1918	.driver = {
   1919		.name = "calxedaxgmac",
   1920		.of_match_table = xgmac_of_match,
   1921		.pm = &xgmac_pm_ops,
   1922	},
   1923	.probe = xgmac_probe,
   1924	.remove = xgmac_remove,
   1925};
   1926
   1927module_platform_driver(xgmac_driver);
   1928
   1929MODULE_AUTHOR("Calxeda, Inc.");
   1930MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
   1931MODULE_LICENSE("GPL v2");