cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sh_eth.c (88127B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*  SuperH Ethernet device driver
      3 *
      4 *  Copyright (C) 2014 Renesas Electronics Corporation
      5 *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
      6 *  Copyright (C) 2008-2014 Renesas Solutions Corp.
      7 *  Copyright (C) 2013-2017 Cogent Embedded, Inc.
      8 *  Copyright (C) 2014 Codethink Limited
      9 */
     10
     11#include <linux/module.h>
     12#include <linux/kernel.h>
     13#include <linux/spinlock.h>
     14#include <linux/interrupt.h>
     15#include <linux/dma-mapping.h>
     16#include <linux/etherdevice.h>
     17#include <linux/delay.h>
     18#include <linux/platform_device.h>
     19#include <linux/mdio-bitbang.h>
     20#include <linux/netdevice.h>
     21#include <linux/of.h>
     22#include <linux/of_device.h>
     23#include <linux/of_irq.h>
     24#include <linux/of_net.h>
     25#include <linux/phy.h>
     26#include <linux/cache.h>
     27#include <linux/io.h>
     28#include <linux/pm_runtime.h>
     29#include <linux/slab.h>
     30#include <linux/ethtool.h>
     31#include <linux/if_vlan.h>
     32#include <linux/sh_eth.h>
     33#include <linux/of_mdio.h>
     34
     35#include "sh_eth.h"
     36
     37#define SH_ETH_DEF_MSG_ENABLE \
     38		(NETIF_MSG_LINK	| \
     39		NETIF_MSG_TIMER	| \
     40		NETIF_MSG_RX_ERR| \
     41		NETIF_MSG_TX_ERR)
     42
     43#define SH_ETH_OFFSET_INVALID	((u16)~0)
     44
     45#define SH_ETH_OFFSET_DEFAULTS			\
     46	[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
     47
     48/* use some intentionally tricky logic here to initialize the whole struct to
     49 * 0xffff, but then override certain fields, requiring us to indicate that we
     50 * "know" that there are overrides in this structure, and we'll need to disable
     51 * that warning from W=1 builds. GCC has supported this option since 4.2.X, but
     52 * the macros available to do this only define GCC 8.
     53 */
     54__diag_push();
     55__diag_ignore(GCC, 8, "-Woverride-init",
     56	      "logic to initialize all and then override some is OK");
     57static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
     58	SH_ETH_OFFSET_DEFAULTS,
     59
     60	[EDSR]		= 0x0000,
     61	[EDMR]		= 0x0400,
     62	[EDTRR]		= 0x0408,
     63	[EDRRR]		= 0x0410,
     64	[EESR]		= 0x0428,
     65	[EESIPR]	= 0x0430,
     66	[TDLAR]		= 0x0010,
     67	[TDFAR]		= 0x0014,
     68	[TDFXR]		= 0x0018,
     69	[TDFFR]		= 0x001c,
     70	[RDLAR]		= 0x0030,
     71	[RDFAR]		= 0x0034,
     72	[RDFXR]		= 0x0038,
     73	[RDFFR]		= 0x003c,
     74	[TRSCER]	= 0x0438,
     75	[RMFCR]		= 0x0440,
     76	[TFTR]		= 0x0448,
     77	[FDR]		= 0x0450,
     78	[RMCR]		= 0x0458,
     79	[RPADIR]	= 0x0460,
     80	[FCFTR]		= 0x0468,
     81	[CSMR]		= 0x04E4,
     82
     83	[ECMR]		= 0x0500,
     84	[ECSR]		= 0x0510,
     85	[ECSIPR]	= 0x0518,
     86	[PIR]		= 0x0520,
     87	[PSR]		= 0x0528,
     88	[PIPR]		= 0x052c,
     89	[RFLR]		= 0x0508,
     90	[APR]		= 0x0554,
     91	[MPR]		= 0x0558,
     92	[PFTCR]		= 0x055c,
     93	[PFRCR]		= 0x0560,
     94	[TPAUSER]	= 0x0564,
     95	[GECMR]		= 0x05b0,
     96	[BCULR]		= 0x05b4,
     97	[MAHR]		= 0x05c0,
     98	[MALR]		= 0x05c8,
     99	[TROCR]		= 0x0700,
    100	[CDCR]		= 0x0708,
    101	[LCCR]		= 0x0710,
    102	[CEFCR]		= 0x0740,
    103	[FRECR]		= 0x0748,
    104	[TSFRCR]	= 0x0750,
    105	[TLFRCR]	= 0x0758,
    106	[RFCR]		= 0x0760,
    107	[CERCR]		= 0x0768,
    108	[CEECR]		= 0x0770,
    109	[MAFCR]		= 0x0778,
    110	[RMII_MII]	= 0x0790,
    111
    112	[ARSTR]		= 0x0000,
    113	[TSU_CTRST]	= 0x0004,
    114	[TSU_FWEN0]	= 0x0010,
    115	[TSU_FWEN1]	= 0x0014,
    116	[TSU_FCM]	= 0x0018,
    117	[TSU_BSYSL0]	= 0x0020,
    118	[TSU_BSYSL1]	= 0x0024,
    119	[TSU_PRISL0]	= 0x0028,
    120	[TSU_PRISL1]	= 0x002c,
    121	[TSU_FWSL0]	= 0x0030,
    122	[TSU_FWSL1]	= 0x0034,
    123	[TSU_FWSLC]	= 0x0038,
    124	[TSU_QTAGM0]	= 0x0040,
    125	[TSU_QTAGM1]	= 0x0044,
    126	[TSU_FWSR]	= 0x0050,
    127	[TSU_FWINMK]	= 0x0054,
    128	[TSU_ADQT0]	= 0x0048,
    129	[TSU_ADQT1]	= 0x004c,
    130	[TSU_VTAG0]	= 0x0058,
    131	[TSU_VTAG1]	= 0x005c,
    132	[TSU_ADSBSY]	= 0x0060,
    133	[TSU_TEN]	= 0x0064,
    134	[TSU_POST1]	= 0x0070,
    135	[TSU_POST2]	= 0x0074,
    136	[TSU_POST3]	= 0x0078,
    137	[TSU_POST4]	= 0x007c,
    138	[TSU_ADRH0]	= 0x0100,
    139
    140	[TXNLCR0]	= 0x0080,
    141	[TXALCR0]	= 0x0084,
    142	[RXNLCR0]	= 0x0088,
    143	[RXALCR0]	= 0x008c,
    144	[FWNLCR0]	= 0x0090,
    145	[FWALCR0]	= 0x0094,
    146	[TXNLCR1]	= 0x00a0,
    147	[TXALCR1]	= 0x00a4,
    148	[RXNLCR1]	= 0x00a8,
    149	[RXALCR1]	= 0x00ac,
    150	[FWNLCR1]	= 0x00b0,
    151	[FWALCR1]	= 0x00b4,
    152};
    153
    154static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
    155	SH_ETH_OFFSET_DEFAULTS,
    156
    157	[ECMR]		= 0x0300,
    158	[RFLR]		= 0x0308,
    159	[ECSR]		= 0x0310,
    160	[ECSIPR]	= 0x0318,
    161	[PIR]		= 0x0320,
    162	[PSR]		= 0x0328,
    163	[RDMLR]		= 0x0340,
    164	[IPGR]		= 0x0350,
    165	[APR]		= 0x0354,
    166	[MPR]		= 0x0358,
    167	[RFCF]		= 0x0360,
    168	[TPAUSER]	= 0x0364,
    169	[TPAUSECR]	= 0x0368,
    170	[MAHR]		= 0x03c0,
    171	[MALR]		= 0x03c8,
    172	[TROCR]		= 0x03d0,
    173	[CDCR]		= 0x03d4,
    174	[LCCR]		= 0x03d8,
    175	[CNDCR]		= 0x03dc,
    176	[CEFCR]		= 0x03e4,
    177	[FRECR]		= 0x03e8,
    178	[TSFRCR]	= 0x03ec,
    179	[TLFRCR]	= 0x03f0,
    180	[RFCR]		= 0x03f4,
    181	[MAFCR]		= 0x03f8,
    182
    183	[EDMR]		= 0x0200,
    184	[EDTRR]		= 0x0208,
    185	[EDRRR]		= 0x0210,
    186	[TDLAR]		= 0x0218,
    187	[RDLAR]		= 0x0220,
    188	[EESR]		= 0x0228,
    189	[EESIPR]	= 0x0230,
    190	[TRSCER]	= 0x0238,
    191	[RMFCR]		= 0x0240,
    192	[TFTR]		= 0x0248,
    193	[FDR]		= 0x0250,
    194	[RMCR]		= 0x0258,
    195	[TFUCR]		= 0x0264,
    196	[RFOCR]		= 0x0268,
    197	[RMIIMODE]      = 0x026c,
    198	[FCFTR]		= 0x0270,
    199	[TRIMD]		= 0x027c,
    200};
    201
    202static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
    203	SH_ETH_OFFSET_DEFAULTS,
    204
    205	[ECMR]		= 0x0100,
    206	[RFLR]		= 0x0108,
    207	[ECSR]		= 0x0110,
    208	[ECSIPR]	= 0x0118,
    209	[PIR]		= 0x0120,
    210	[PSR]		= 0x0128,
    211	[RDMLR]		= 0x0140,
    212	[IPGR]		= 0x0150,
    213	[APR]		= 0x0154,
    214	[MPR]		= 0x0158,
    215	[TPAUSER]	= 0x0164,
    216	[RFCF]		= 0x0160,
    217	[TPAUSECR]	= 0x0168,
    218	[BCFRR]		= 0x016c,
    219	[MAHR]		= 0x01c0,
    220	[MALR]		= 0x01c8,
    221	[TROCR]		= 0x01d0,
    222	[CDCR]		= 0x01d4,
    223	[LCCR]		= 0x01d8,
    224	[CNDCR]		= 0x01dc,
    225	[CEFCR]		= 0x01e4,
    226	[FRECR]		= 0x01e8,
    227	[TSFRCR]	= 0x01ec,
    228	[TLFRCR]	= 0x01f0,
    229	[RFCR]		= 0x01f4,
    230	[MAFCR]		= 0x01f8,
    231	[RTRATE]	= 0x01fc,
    232
    233	[EDMR]		= 0x0000,
    234	[EDTRR]		= 0x0008,
    235	[EDRRR]		= 0x0010,
    236	[TDLAR]		= 0x0018,
    237	[RDLAR]		= 0x0020,
    238	[EESR]		= 0x0028,
    239	[EESIPR]	= 0x0030,
    240	[TRSCER]	= 0x0038,
    241	[RMFCR]		= 0x0040,
    242	[TFTR]		= 0x0048,
    243	[FDR]		= 0x0050,
    244	[RMCR]		= 0x0058,
    245	[TFUCR]		= 0x0064,
    246	[RFOCR]		= 0x0068,
    247	[FCFTR]		= 0x0070,
    248	[RPADIR]	= 0x0078,
    249	[TRIMD]		= 0x007c,
    250	[RBWAR]		= 0x00c8,
    251	[RDFAR]		= 0x00cc,
    252	[TBRAR]		= 0x00d4,
    253	[TDFAR]		= 0x00d8,
    254};
    255
    256static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
    257	SH_ETH_OFFSET_DEFAULTS,
    258
    259	[EDMR]		= 0x0000,
    260	[EDTRR]		= 0x0004,
    261	[EDRRR]		= 0x0008,
    262	[TDLAR]		= 0x000c,
    263	[RDLAR]		= 0x0010,
    264	[EESR]		= 0x0014,
    265	[EESIPR]	= 0x0018,
    266	[TRSCER]	= 0x001c,
    267	[RMFCR]		= 0x0020,
    268	[TFTR]		= 0x0024,
    269	[FDR]		= 0x0028,
    270	[RMCR]		= 0x002c,
    271	[EDOCR]		= 0x0030,
    272	[FCFTR]		= 0x0034,
    273	[RPADIR]	= 0x0038,
    274	[TRIMD]		= 0x003c,
    275	[RBWAR]		= 0x0040,
    276	[RDFAR]		= 0x0044,
    277	[TBRAR]		= 0x004c,
    278	[TDFAR]		= 0x0050,
    279
    280	[ECMR]		= 0x0160,
    281	[ECSR]		= 0x0164,
    282	[ECSIPR]	= 0x0168,
    283	[PIR]		= 0x016c,
    284	[MAHR]		= 0x0170,
    285	[MALR]		= 0x0174,
    286	[RFLR]		= 0x0178,
    287	[PSR]		= 0x017c,
    288	[TROCR]		= 0x0180,
    289	[CDCR]		= 0x0184,
    290	[LCCR]		= 0x0188,
    291	[CNDCR]		= 0x018c,
    292	[CEFCR]		= 0x0194,
    293	[FRECR]		= 0x0198,
    294	[TSFRCR]	= 0x019c,
    295	[TLFRCR]	= 0x01a0,
    296	[RFCR]		= 0x01a4,
    297	[MAFCR]		= 0x01a8,
    298	[IPGR]		= 0x01b4,
    299	[APR]		= 0x01b8,
    300	[MPR]		= 0x01bc,
    301	[TPAUSER]	= 0x01c4,
    302	[BCFR]		= 0x01cc,
    303
    304	[ARSTR]		= 0x0000,
    305	[TSU_CTRST]	= 0x0004,
    306	[TSU_FWEN0]	= 0x0010,
    307	[TSU_FWEN1]	= 0x0014,
    308	[TSU_FCM]	= 0x0018,
    309	[TSU_BSYSL0]	= 0x0020,
    310	[TSU_BSYSL1]	= 0x0024,
    311	[TSU_PRISL0]	= 0x0028,
    312	[TSU_PRISL1]	= 0x002c,
    313	[TSU_FWSL0]	= 0x0030,
    314	[TSU_FWSL1]	= 0x0034,
    315	[TSU_FWSLC]	= 0x0038,
    316	[TSU_QTAGM0]	= 0x0040,
    317	[TSU_QTAGM1]	= 0x0044,
    318	[TSU_ADQT0]	= 0x0048,
    319	[TSU_ADQT1]	= 0x004c,
    320	[TSU_FWSR]	= 0x0050,
    321	[TSU_FWINMK]	= 0x0054,
    322	[TSU_ADSBSY]	= 0x0060,
    323	[TSU_TEN]	= 0x0064,
    324	[TSU_POST1]	= 0x0070,
    325	[TSU_POST2]	= 0x0074,
    326	[TSU_POST3]	= 0x0078,
    327	[TSU_POST4]	= 0x007c,
    328
    329	[TXNLCR0]	= 0x0080,
    330	[TXALCR0]	= 0x0084,
    331	[RXNLCR0]	= 0x0088,
    332	[RXALCR0]	= 0x008c,
    333	[FWNLCR0]	= 0x0090,
    334	[FWALCR0]	= 0x0094,
    335	[TXNLCR1]	= 0x00a0,
    336	[TXALCR1]	= 0x00a4,
    337	[RXNLCR1]	= 0x00a8,
    338	[RXALCR1]	= 0x00ac,
    339	[FWNLCR1]	= 0x00b0,
    340	[FWALCR1]	= 0x00b4,
    341
    342	[TSU_ADRH0]	= 0x0100,
    343};
    344__diag_pop();
    345
    346static void sh_eth_rcv_snd_disable(struct net_device *ndev);
    347static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
    348
    349static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
    350{
    351	struct sh_eth_private *mdp = netdev_priv(ndev);
    352	u16 offset = mdp->reg_offset[enum_index];
    353
    354	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
    355		return;
    356
    357	iowrite32(data, mdp->addr + offset);
    358}
    359
    360static u32 sh_eth_read(struct net_device *ndev, int enum_index)
    361{
    362	struct sh_eth_private *mdp = netdev_priv(ndev);
    363	u16 offset = mdp->reg_offset[enum_index];
    364
    365	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
    366		return ~0U;
    367
    368	return ioread32(mdp->addr + offset);
    369}
    370
    371static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
    372			  u32 set)
    373{
    374	sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
    375		     enum_index);
    376}
    377
    378static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index)
    379{
    380	return mdp->reg_offset[enum_index];
    381}
    382
    383static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
    384			     int enum_index)
    385{
    386	u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
    387
    388	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
    389		return;
    390
    391	iowrite32(data, mdp->tsu_addr + offset);
    392}
    393
    394static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
    395{
    396	u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
    397
    398	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
    399		return ~0U;
    400
    401	return ioread32(mdp->tsu_addr + offset);
    402}
    403
    404static void sh_eth_soft_swap(char *src, int len)
    405{
    406#ifdef __LITTLE_ENDIAN
    407	u32 *p = (u32 *)src;
    408	u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32));
    409
    410	for (; p < maxp; p++)
    411		*p = swab32(*p);
    412#endif
    413}
    414
    415static void sh_eth_select_mii(struct net_device *ndev)
    416{
    417	struct sh_eth_private *mdp = netdev_priv(ndev);
    418	u32 value;
    419
    420	switch (mdp->phy_interface) {
    421	case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID:
    422		value = 0x3;
    423		break;
    424	case PHY_INTERFACE_MODE_GMII:
    425		value = 0x2;
    426		break;
    427	case PHY_INTERFACE_MODE_MII:
    428		value = 0x1;
    429		break;
    430	case PHY_INTERFACE_MODE_RMII:
    431		value = 0x0;
    432		break;
    433	default:
    434		netdev_warn(ndev,
    435			    "PHY interface mode was not setup. Set to MII.\n");
    436		value = 0x1;
    437		break;
    438	}
    439
    440	sh_eth_write(ndev, value, RMII_MII);
    441}
    442
    443static void sh_eth_set_duplex(struct net_device *ndev)
    444{
    445	struct sh_eth_private *mdp = netdev_priv(ndev);
    446
    447	sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
    448}
    449
    450static void sh_eth_chip_reset(struct net_device *ndev)
    451{
    452	struct sh_eth_private *mdp = netdev_priv(ndev);
    453
    454	/* reset device */
    455	sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
    456	mdelay(1);
    457}
    458
    459static int sh_eth_soft_reset(struct net_device *ndev)
    460{
    461	sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
    462	mdelay(3);
    463	sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
    464
    465	return 0;
    466}
    467
    468static int sh_eth_check_soft_reset(struct net_device *ndev)
    469{
    470	int cnt;
    471
    472	for (cnt = 100; cnt > 0; cnt--) {
    473		if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
    474			return 0;
    475		mdelay(1);
    476	}
    477
    478	netdev_err(ndev, "Device reset failed\n");
    479	return -ETIMEDOUT;
    480}
    481
    482static int sh_eth_soft_reset_gether(struct net_device *ndev)
    483{
    484	struct sh_eth_private *mdp = netdev_priv(ndev);
    485	int ret;
    486
    487	sh_eth_write(ndev, EDSR_ENALL, EDSR);
    488	sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
    489
    490	ret = sh_eth_check_soft_reset(ndev);
    491	if (ret)
    492		return ret;
    493
    494	/* Table Init */
    495	sh_eth_write(ndev, 0, TDLAR);
    496	sh_eth_write(ndev, 0, TDFAR);
    497	sh_eth_write(ndev, 0, TDFXR);
    498	sh_eth_write(ndev, 0, TDFFR);
    499	sh_eth_write(ndev, 0, RDLAR);
    500	sh_eth_write(ndev, 0, RDFAR);
    501	sh_eth_write(ndev, 0, RDFXR);
    502	sh_eth_write(ndev, 0, RDFFR);
    503
    504	/* Reset HW CRC register */
    505	if (mdp->cd->csmr)
    506		sh_eth_write(ndev, 0, CSMR);
    507
    508	/* Select MII mode */
    509	if (mdp->cd->select_mii)
    510		sh_eth_select_mii(ndev);
    511
    512	return ret;
    513}
    514
    515static void sh_eth_set_rate_gether(struct net_device *ndev)
    516{
    517	struct sh_eth_private *mdp = netdev_priv(ndev);
    518
    519	if (WARN_ON(!mdp->cd->gecmr))
    520		return;
    521
    522	switch (mdp->speed) {
    523	case 10: /* 10BASE */
    524		sh_eth_write(ndev, GECMR_10, GECMR);
    525		break;
    526	case 100:/* 100BASE */
    527		sh_eth_write(ndev, GECMR_100, GECMR);
    528		break;
    529	case 1000: /* 1000BASE */
    530		sh_eth_write(ndev, GECMR_1000, GECMR);
    531		break;
    532	}
    533}
    534
    535#ifdef CONFIG_OF
    536/* R7S72100 */
    537static struct sh_eth_cpu_data r7s72100_data = {
    538	.soft_reset	= sh_eth_soft_reset_gether,
    539
    540	.chip_reset	= sh_eth_chip_reset,
    541	.set_duplex	= sh_eth_set_duplex,
    542
    543	.register_type	= SH_ETH_REG_GIGABIT,
    544
    545	.edtrr_trns	= EDTRR_TRNS_GETHER,
    546	.ecsr_value	= ECSR_ICD,
    547	.ecsipr_value	= ECSIPR_ICDIP,
    548	.eesipr_value	= EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
    549			  EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP |
    550			  EESIPR_ECIIP |
    551			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
    552			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
    553			  EESIPR_RMAFIP | EESIPR_RRFIP |
    554			  EESIPR_RTLFIP | EESIPR_RTSFIP |
    555			  EESIPR_PREIP | EESIPR_CERFIP,
    556
    557	.tx_check	= EESR_TC1 | EESR_FTC,
    558	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
    559			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
    560			  EESR_TDE,
    561	.fdr_value	= 0x0000070f,
    562
    563	.trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE,
    564
    565	.no_psr		= 1,
    566	.apr		= 1,
    567	.mpr		= 1,
    568	.tpauser	= 1,
    569	.hw_swap	= 1,
    570	.rpadir		= 1,
    571	.no_trimd	= 1,
    572	.no_ade		= 1,
    573	.xdfar_rw	= 1,
    574	.csmr		= 1,
    575	.rx_csum	= 1,
    576	.tsu		= 1,
    577	.no_tx_cntrs	= 1,
    578};
    579
    580static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
    581{
    582	sh_eth_chip_reset(ndev);
    583
    584	sh_eth_select_mii(ndev);
    585}
    586
    587/* R8A7740 */
    588static struct sh_eth_cpu_data r8a7740_data = {
    589	.soft_reset	= sh_eth_soft_reset_gether,
    590
    591	.chip_reset	= sh_eth_chip_reset_r8a7740,
    592	.set_duplex	= sh_eth_set_duplex,
    593	.set_rate	= sh_eth_set_rate_gether,
    594
    595	.register_type	= SH_ETH_REG_GIGABIT,
    596
    597	.edtrr_trns	= EDTRR_TRNS_GETHER,
    598	.ecsr_value	= ECSR_ICD | ECSR_MPD,
    599	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
    600	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
    601			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
    602			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
    603			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
    604			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
    605			  EESIPR_CEEFIP | EESIPR_CELFIP |
    606			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
    607			  EESIPR_PREIP | EESIPR_CERFIP,
    608
    609	.tx_check	= EESR_TC1 | EESR_FTC,
    610	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
    611			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
    612			  EESR_TDE,
    613	.fdr_value	= 0x0000070f,
    614
    615	.apr		= 1,
    616	.mpr		= 1,
    617	.tpauser	= 1,
    618	.gecmr		= 1,
    619	.bculr		= 1,
    620	.hw_swap	= 1,
    621	.rpadir		= 1,
    622	.no_trimd	= 1,
    623	.no_ade		= 1,
    624	.xdfar_rw	= 1,
    625	.csmr		= 1,
    626	.rx_csum	= 1,
    627	.tsu		= 1,
    628	.select_mii	= 1,
    629	.magic		= 1,
    630	.cexcr		= 1,
    631};
    632
    633/* There is CPU dependent code */
    634static void sh_eth_set_rate_rcar(struct net_device *ndev)
    635{
    636	struct sh_eth_private *mdp = netdev_priv(ndev);
    637
    638	switch (mdp->speed) {
    639	case 10: /* 10BASE */
    640		sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
    641		break;
    642	case 100:/* 100BASE */
    643		sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
    644		break;
    645	}
    646}
    647
    648/* R-Car Gen1 */
    649static struct sh_eth_cpu_data rcar_gen1_data = {
    650	.soft_reset	= sh_eth_soft_reset,
    651
    652	.set_duplex	= sh_eth_set_duplex,
    653	.set_rate	= sh_eth_set_rate_rcar,
    654
    655	.register_type	= SH_ETH_REG_FAST_RCAR,
    656
    657	.edtrr_trns	= EDTRR_TRNS_ETHER,
    658	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
    659	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
    660	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
    661			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
    662			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
    663			  EESIPR_RMAFIP | EESIPR_RRFIP |
    664			  EESIPR_RTLFIP | EESIPR_RTSFIP |
    665			  EESIPR_PREIP | EESIPR_CERFIP,
    666
    667	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
    668	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
    669			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
    670	.fdr_value	= 0x00000f0f,
    671
    672	.apr		= 1,
    673	.mpr		= 1,
    674	.tpauser	= 1,
    675	.hw_swap	= 1,
    676	.no_xdfar	= 1,
    677};
    678
    679/* R-Car Gen2 and RZ/G1 */
    680static struct sh_eth_cpu_data rcar_gen2_data = {
    681	.soft_reset	= sh_eth_soft_reset,
    682
    683	.set_duplex	= sh_eth_set_duplex,
    684	.set_rate	= sh_eth_set_rate_rcar,
    685
    686	.register_type	= SH_ETH_REG_FAST_RCAR,
    687
    688	.edtrr_trns	= EDTRR_TRNS_ETHER,
    689	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
    690	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
    691			  ECSIPR_MPDIP,
    692	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
    693			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
    694			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
    695			  EESIPR_RMAFIP | EESIPR_RRFIP |
    696			  EESIPR_RTLFIP | EESIPR_RTSFIP |
    697			  EESIPR_PREIP | EESIPR_CERFIP,
    698
    699	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
    700	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
    701			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
    702	.fdr_value	= 0x00000f0f,
    703
    704	.trscer_err_mask = TRSCER_RMAFCE,
    705
    706	.apr		= 1,
    707	.mpr		= 1,
    708	.tpauser	= 1,
    709	.hw_swap	= 1,
    710	.no_xdfar	= 1,
    711	.rmiimode	= 1,
    712	.magic		= 1,
    713};
    714
    715/* R8A77980 */
    716static struct sh_eth_cpu_data r8a77980_data = {
    717	.soft_reset	= sh_eth_soft_reset_gether,
    718
    719	.set_duplex	= sh_eth_set_duplex,
    720	.set_rate	= sh_eth_set_rate_gether,
    721
    722	.register_type  = SH_ETH_REG_GIGABIT,
    723
    724	.edtrr_trns	= EDTRR_TRNS_GETHER,
    725	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
    726	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
    727			  ECSIPR_MPDIP,
    728	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
    729			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
    730			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
    731			  EESIPR_RMAFIP | EESIPR_RRFIP |
    732			  EESIPR_RTLFIP | EESIPR_RTSFIP |
    733			  EESIPR_PREIP | EESIPR_CERFIP,
    734
    735	.tx_check       = EESR_FTC | EESR_CD | EESR_TRO,
    736	.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
    737			  EESR_RFE | EESR_RDE | EESR_RFRMER |
    738			  EESR_TFE | EESR_TDE | EESR_ECI,
    739	.fdr_value	= 0x0000070f,
    740
    741	.apr		= 1,
    742	.mpr		= 1,
    743	.tpauser	= 1,
    744	.gecmr		= 1,
    745	.bculr		= 1,
    746	.hw_swap	= 1,
    747	.nbst		= 1,
    748	.rpadir		= 1,
    749	.no_trimd	= 1,
    750	.no_ade		= 1,
    751	.xdfar_rw	= 1,
    752	.csmr		= 1,
    753	.rx_csum	= 1,
    754	.select_mii	= 1,
    755	.magic		= 1,
    756	.cexcr		= 1,
    757};
    758
    759/* R7S9210 */
    760static struct sh_eth_cpu_data r7s9210_data = {
    761	.soft_reset	= sh_eth_soft_reset,
    762
    763	.set_duplex	= sh_eth_set_duplex,
    764	.set_rate	= sh_eth_set_rate_rcar,
    765
    766	.register_type	= SH_ETH_REG_FAST_SH4,
    767
    768	.edtrr_trns	= EDTRR_TRNS_ETHER,
    769	.ecsr_value	= ECSR_ICD,
    770	.ecsipr_value	= ECSIPR_ICDIP,
    771	.eesipr_value	= EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
    772			  EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
    773			  EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
    774			  EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
    775			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
    776			  EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
    777			  EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
    778
    779	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
    780	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
    781			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
    782
    783	.fdr_value	= 0x0000070f,
    784
    785	.trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE,
    786
    787	.apr		= 1,
    788	.mpr		= 1,
    789	.tpauser	= 1,
    790	.hw_swap	= 1,
    791	.rpadir		= 1,
    792	.no_ade		= 1,
    793	.xdfar_rw	= 1,
    794};
    795#endif /* CONFIG_OF */
    796
    797static void sh_eth_set_rate_sh7724(struct net_device *ndev)
    798{
    799	struct sh_eth_private *mdp = netdev_priv(ndev);
    800
    801	switch (mdp->speed) {
    802	case 10: /* 10BASE */
    803		sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
    804		break;
    805	case 100:/* 100BASE */
    806		sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
    807		break;
    808	}
    809}
    810
    811/* SH7724 */
    812static struct sh_eth_cpu_data sh7724_data = {
    813	.soft_reset	= sh_eth_soft_reset,
    814
    815	.set_duplex	= sh_eth_set_duplex,
    816	.set_rate	= sh_eth_set_rate_sh7724,
    817
    818	.register_type	= SH_ETH_REG_FAST_SH4,
    819
    820	.edtrr_trns	= EDTRR_TRNS_ETHER,
    821	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
    822	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
    823	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
    824			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
    825			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
    826			  EESIPR_RMAFIP | EESIPR_RRFIP |
    827			  EESIPR_RTLFIP | EESIPR_RTSFIP |
    828			  EESIPR_PREIP | EESIPR_CERFIP,
    829
    830	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
    831	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
    832			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
    833
    834	.apr		= 1,
    835	.mpr		= 1,
    836	.tpauser	= 1,
    837	.hw_swap	= 1,
    838	.rpadir		= 1,
    839};
    840
    841static void sh_eth_set_rate_sh7757(struct net_device *ndev)
    842{
    843	struct sh_eth_private *mdp = netdev_priv(ndev);
    844
    845	switch (mdp->speed) {
    846	case 10: /* 10BASE */
    847		sh_eth_write(ndev, 0, RTRATE);
    848		break;
    849	case 100:/* 100BASE */
    850		sh_eth_write(ndev, 1, RTRATE);
    851		break;
    852	}
    853}
    854
    855/* SH7757 */
    856static struct sh_eth_cpu_data sh7757_data = {
    857	.soft_reset	= sh_eth_soft_reset,
    858
    859	.set_duplex	= sh_eth_set_duplex,
    860	.set_rate	= sh_eth_set_rate_sh7757,
    861
    862	.register_type	= SH_ETH_REG_FAST_SH4,
    863
    864	.edtrr_trns	= EDTRR_TRNS_ETHER,
    865	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
    866			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
    867			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
    868			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
    869			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
    870			  EESIPR_CEEFIP | EESIPR_CELFIP |
    871			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
    872			  EESIPR_PREIP | EESIPR_CERFIP,
    873
    874	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
    875	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
    876			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
    877
    878	.irq_flags	= IRQF_SHARED,
    879	.apr		= 1,
    880	.mpr		= 1,
    881	.tpauser	= 1,
    882	.hw_swap	= 1,
    883	.no_ade		= 1,
    884	.rpadir		= 1,
    885	.rtrate		= 1,
    886	.dual_port	= 1,
    887};
    888
    889#define SH_GIGA_ETH_BASE	0xfee00000UL
    890#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
    891#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
    892static void sh_eth_chip_reset_giga(struct net_device *ndev)
    893{
    894	u32 mahr[2], malr[2];
    895	int i;
    896
    897	/* save MAHR and MALR */
    898	for (i = 0; i < 2; i++) {
    899		malr[i] = ioread32((void *)GIGA_MALR(i));
    900		mahr[i] = ioread32((void *)GIGA_MAHR(i));
    901	}
    902
    903	sh_eth_chip_reset(ndev);
    904
    905	/* restore MAHR and MALR */
    906	for (i = 0; i < 2; i++) {
    907		iowrite32(malr[i], (void *)GIGA_MALR(i));
    908		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
    909	}
    910}
    911
    912static void sh_eth_set_rate_giga(struct net_device *ndev)
    913{
    914	struct sh_eth_private *mdp = netdev_priv(ndev);
    915
    916	if (WARN_ON(!mdp->cd->gecmr))
    917		return;
    918
    919	switch (mdp->speed) {
    920	case 10: /* 10BASE */
    921		sh_eth_write(ndev, 0x00000000, GECMR);
    922		break;
    923	case 100:/* 100BASE */
    924		sh_eth_write(ndev, 0x00000010, GECMR);
    925		break;
    926	case 1000: /* 1000BASE */
    927		sh_eth_write(ndev, 0x00000020, GECMR);
    928		break;
    929	}
    930}
    931
    932/* SH7757(GETHERC) */
    933static struct sh_eth_cpu_data sh7757_data_giga = {
    934	.soft_reset	= sh_eth_soft_reset_gether,
    935
    936	.chip_reset	= sh_eth_chip_reset_giga,
    937	.set_duplex	= sh_eth_set_duplex,
    938	.set_rate	= sh_eth_set_rate_giga,
    939
    940	.register_type	= SH_ETH_REG_GIGABIT,
    941
    942	.edtrr_trns	= EDTRR_TRNS_GETHER,
    943	.ecsr_value	= ECSR_ICD | ECSR_MPD,
    944	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
    945	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
    946			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
    947			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
    948			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
    949			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
    950			  EESIPR_CEEFIP | EESIPR_CELFIP |
    951			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
    952			  EESIPR_PREIP | EESIPR_CERFIP,
    953
    954	.tx_check	= EESR_TC1 | EESR_FTC,
    955	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
    956			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
    957			  EESR_TDE,
    958	.fdr_value	= 0x0000072f,
    959
    960	.irq_flags	= IRQF_SHARED,
    961	.apr		= 1,
    962	.mpr		= 1,
    963	.tpauser	= 1,
    964	.gecmr		= 1,
    965	.bculr		= 1,
    966	.hw_swap	= 1,
    967	.rpadir		= 1,
    968	.no_trimd	= 1,
    969	.no_ade		= 1,
    970	.xdfar_rw	= 1,
    971	.tsu		= 1,
    972	.cexcr		= 1,
    973	.dual_port	= 1,
    974};
    975
    976/* SH7734 */
    977static struct sh_eth_cpu_data sh7734_data = {
    978	.soft_reset	= sh_eth_soft_reset_gether,
    979
    980	.chip_reset	= sh_eth_chip_reset,
    981	.set_duplex	= sh_eth_set_duplex,
    982	.set_rate	= sh_eth_set_rate_gether,
    983
    984	.register_type	= SH_ETH_REG_GIGABIT,
    985
    986	.edtrr_trns	= EDTRR_TRNS_GETHER,
    987	.ecsr_value	= ECSR_ICD | ECSR_MPD,
    988	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
    989	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
    990			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
    991			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
    992			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
    993			  EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
    994			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
    995			  EESIPR_PREIP | EESIPR_CERFIP,
    996
    997	.tx_check	= EESR_TC1 | EESR_FTC,
    998	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
    999			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
   1000			  EESR_TDE,
   1001
   1002	.apr		= 1,
   1003	.mpr		= 1,
   1004	.tpauser	= 1,
   1005	.gecmr		= 1,
   1006	.bculr		= 1,
   1007	.hw_swap	= 1,
   1008	.no_trimd	= 1,
   1009	.no_ade		= 1,
   1010	.xdfar_rw	= 1,
   1011	.tsu		= 1,
   1012	.csmr		= 1,
   1013	.rx_csum	= 1,
   1014	.select_mii	= 1,
   1015	.magic		= 1,
   1016	.cexcr		= 1,
   1017};
   1018
   1019/* SH7763 */
   1020static struct sh_eth_cpu_data sh7763_data = {
   1021	.soft_reset	= sh_eth_soft_reset_gether,
   1022
   1023	.chip_reset	= sh_eth_chip_reset,
   1024	.set_duplex	= sh_eth_set_duplex,
   1025	.set_rate	= sh_eth_set_rate_gether,
   1026
   1027	.register_type	= SH_ETH_REG_GIGABIT,
   1028
   1029	.edtrr_trns	= EDTRR_TRNS_GETHER,
   1030	.ecsr_value	= ECSR_ICD | ECSR_MPD,
   1031	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
   1032	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
   1033			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
   1034			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
   1035			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
   1036			  EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
   1037			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
   1038			  EESIPR_PREIP | EESIPR_CERFIP,
   1039
   1040	.tx_check	= EESR_TC1 | EESR_FTC,
   1041	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
   1042			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
   1043
   1044	.apr		= 1,
   1045	.mpr		= 1,
   1046	.tpauser	= 1,
   1047	.gecmr		= 1,
   1048	.bculr		= 1,
   1049	.hw_swap	= 1,
   1050	.no_trimd	= 1,
   1051	.no_ade		= 1,
   1052	.xdfar_rw	= 1,
   1053	.tsu		= 1,
   1054	.irq_flags	= IRQF_SHARED,
   1055	.magic		= 1,
   1056	.cexcr		= 1,
   1057	.rx_csum	= 1,
   1058	.dual_port	= 1,
   1059};
   1060
   1061static struct sh_eth_cpu_data sh7619_data = {
   1062	.soft_reset	= sh_eth_soft_reset,
   1063
   1064	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
   1065
   1066	.edtrr_trns	= EDTRR_TRNS_ETHER,
   1067	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
   1068			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
   1069			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
   1070			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
   1071			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
   1072			  EESIPR_CEEFIP | EESIPR_CELFIP |
   1073			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
   1074			  EESIPR_PREIP | EESIPR_CERFIP,
   1075
   1076	.apr		= 1,
   1077	.mpr		= 1,
   1078	.tpauser	= 1,
   1079	.hw_swap	= 1,
   1080};
   1081
   1082static struct sh_eth_cpu_data sh771x_data = {
   1083	.soft_reset	= sh_eth_soft_reset,
   1084
   1085	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
   1086
   1087	.edtrr_trns	= EDTRR_TRNS_ETHER,
   1088	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
   1089			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
   1090			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
   1091			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
   1092			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
   1093			  EESIPR_CEEFIP | EESIPR_CELFIP |
   1094			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
   1095			  EESIPR_PREIP | EESIPR_CERFIP,
   1096
   1097	.trscer_err_mask = TRSCER_RMAFCE,
   1098
   1099	.tsu		= 1,
   1100	.dual_port	= 1,
   1101};
   1102
   1103static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
   1104{
   1105	if (!cd->ecsr_value)
   1106		cd->ecsr_value = DEFAULT_ECSR_INIT;
   1107
   1108	if (!cd->ecsipr_value)
   1109		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
   1110
   1111	if (!cd->fcftr_value)
   1112		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
   1113				  DEFAULT_FIFO_F_D_RFD;
   1114
   1115	if (!cd->fdr_value)
   1116		cd->fdr_value = DEFAULT_FDR_INIT;
   1117
   1118	if (!cd->tx_check)
   1119		cd->tx_check = DEFAULT_TX_CHECK;
   1120
   1121	if (!cd->eesr_err_check)
   1122		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
   1123
   1124	if (!cd->trscer_err_mask)
   1125		cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
   1126}
   1127
   1128static void sh_eth_set_receive_align(struct sk_buff *skb)
   1129{
   1130	uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
   1131
   1132	if (reserve)
   1133		skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
   1134}
   1135
   1136/* Program the hardware MAC address from dev->dev_addr. */
   1137static void update_mac_address(struct net_device *ndev)
   1138{
   1139	sh_eth_write(ndev,
   1140		     (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
   1141		     (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
   1142	sh_eth_write(ndev,
   1143		     (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
   1144}
   1145
   1146/* Get MAC address from SuperH MAC address register
   1147 *
   1148 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
   1149 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
   1150 * When you want use this device, you must set MAC address in bootloader.
   1151 *
   1152 */
   1153static void read_mac_address(struct net_device *ndev, unsigned char *mac)
   1154{
   1155	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
   1156		eth_hw_addr_set(ndev, mac);
   1157	} else {
   1158		u32 mahr = sh_eth_read(ndev, MAHR);
   1159		u32 malr = sh_eth_read(ndev, MALR);
   1160		u8 addr[ETH_ALEN];
   1161
   1162		addr[0] = (mahr >> 24) & 0xFF;
   1163		addr[1] = (mahr >> 16) & 0xFF;
   1164		addr[2] = (mahr >>  8) & 0xFF;
   1165		addr[3] = (mahr >>  0) & 0xFF;
   1166		addr[4] = (malr >>  8) & 0xFF;
   1167		addr[5] = (malr >>  0) & 0xFF;
   1168		eth_hw_addr_set(ndev, addr);
   1169	}
   1170}
   1171
   1172struct bb_info {
   1173	void (*set_gate)(void *addr);
   1174	struct mdiobb_ctrl ctrl;
   1175	void *addr;
   1176};
   1177
   1178static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
   1179{
   1180	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
   1181	u32 pir;
   1182
   1183	if (bitbang->set_gate)
   1184		bitbang->set_gate(bitbang->addr);
   1185
   1186	pir = ioread32(bitbang->addr);
   1187	if (set)
   1188		pir |=  mask;
   1189	else
   1190		pir &= ~mask;
   1191	iowrite32(pir, bitbang->addr);
   1192}
   1193
   1194/* Data I/O pin control */
   1195static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
   1196{
   1197	sh_mdio_ctrl(ctrl, PIR_MMD, bit);
   1198}
   1199
   1200/* Set bit data*/
   1201static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
   1202{
   1203	sh_mdio_ctrl(ctrl, PIR_MDO, bit);
   1204}
   1205
   1206/* Get bit data*/
   1207static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
   1208{
   1209	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
   1210
   1211	if (bitbang->set_gate)
   1212		bitbang->set_gate(bitbang->addr);
   1213
   1214	return (ioread32(bitbang->addr) & PIR_MDI) != 0;
   1215}
   1216
   1217/* MDC pin control */
   1218static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
   1219{
   1220	sh_mdio_ctrl(ctrl, PIR_MDC, bit);
   1221}
   1222
   1223/* mdio bus control struct */
   1224static const struct mdiobb_ops bb_ops = {
   1225	.owner = THIS_MODULE,
   1226	.set_mdc = sh_mdc_ctrl,
   1227	.set_mdio_dir = sh_mmd_ctrl,
   1228	.set_mdio_data = sh_set_mdio,
   1229	.get_mdio_data = sh_get_mdio,
   1230};
   1231
   1232/* free Tx skb function */
   1233static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
   1234{
   1235	struct sh_eth_private *mdp = netdev_priv(ndev);
   1236	struct sh_eth_txdesc *txdesc;
   1237	int free_num = 0;
   1238	int entry;
   1239	bool sent;
   1240
   1241	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
   1242		entry = mdp->dirty_tx % mdp->num_tx_ring;
   1243		txdesc = &mdp->tx_ring[entry];
   1244		sent = !(txdesc->status & cpu_to_le32(TD_TACT));
   1245		if (sent_only && !sent)
   1246			break;
   1247		/* TACT bit must be checked before all the following reads */
   1248		dma_rmb();
   1249		netif_info(mdp, tx_done, ndev,
   1250			   "tx entry %d status 0x%08x\n",
   1251			   entry, le32_to_cpu(txdesc->status));
   1252		/* Free the original skb. */
   1253		if (mdp->tx_skbuff[entry]) {
   1254			dma_unmap_single(&mdp->pdev->dev,
   1255					 le32_to_cpu(txdesc->addr),
   1256					 le32_to_cpu(txdesc->len) >> 16,
   1257					 DMA_TO_DEVICE);
   1258			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
   1259			mdp->tx_skbuff[entry] = NULL;
   1260			free_num++;
   1261		}
   1262		txdesc->status = cpu_to_le32(TD_TFP);
   1263		if (entry >= mdp->num_tx_ring - 1)
   1264			txdesc->status |= cpu_to_le32(TD_TDLE);
   1265
   1266		if (sent) {
   1267			ndev->stats.tx_packets++;
   1268			ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
   1269		}
   1270	}
   1271	return free_num;
   1272}
   1273
   1274/* free skb and descriptor buffer */
   1275static void sh_eth_ring_free(struct net_device *ndev)
   1276{
   1277	struct sh_eth_private *mdp = netdev_priv(ndev);
   1278	int ringsize, i;
   1279
   1280	if (mdp->rx_ring) {
   1281		for (i = 0; i < mdp->num_rx_ring; i++) {
   1282			if (mdp->rx_skbuff[i]) {
   1283				struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
   1284
   1285				dma_unmap_single(&mdp->pdev->dev,
   1286						 le32_to_cpu(rxdesc->addr),
   1287						 ALIGN(mdp->rx_buf_sz, 32),
   1288						 DMA_FROM_DEVICE);
   1289			}
   1290		}
   1291		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
   1292		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
   1293				  mdp->rx_desc_dma);
   1294		mdp->rx_ring = NULL;
   1295	}
   1296
   1297	/* Free Rx skb ringbuffer */
   1298	if (mdp->rx_skbuff) {
   1299		for (i = 0; i < mdp->num_rx_ring; i++)
   1300			dev_kfree_skb(mdp->rx_skbuff[i]);
   1301	}
   1302	kfree(mdp->rx_skbuff);
   1303	mdp->rx_skbuff = NULL;
   1304
   1305	if (mdp->tx_ring) {
   1306		sh_eth_tx_free(ndev, false);
   1307
   1308		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
   1309		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
   1310				  mdp->tx_desc_dma);
   1311		mdp->tx_ring = NULL;
   1312	}
   1313
   1314	/* Free Tx skb ringbuffer */
   1315	kfree(mdp->tx_skbuff);
   1316	mdp->tx_skbuff = NULL;
   1317}
   1318
   1319/* format skb and descriptor buffer */
   1320static void sh_eth_ring_format(struct net_device *ndev)
   1321{
   1322	struct sh_eth_private *mdp = netdev_priv(ndev);
   1323	int i;
   1324	struct sk_buff *skb;
   1325	struct sh_eth_rxdesc *rxdesc = NULL;
   1326	struct sh_eth_txdesc *txdesc = NULL;
   1327	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
   1328	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
   1329	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
   1330	dma_addr_t dma_addr;
   1331	u32 buf_len;
   1332
   1333	mdp->cur_rx = 0;
   1334	mdp->cur_tx = 0;
   1335	mdp->dirty_rx = 0;
   1336	mdp->dirty_tx = 0;
   1337
   1338	memset(mdp->rx_ring, 0, rx_ringsize);
   1339
   1340	/* build Rx ring buffer */
   1341	for (i = 0; i < mdp->num_rx_ring; i++) {
   1342		/* skb */
   1343		mdp->rx_skbuff[i] = NULL;
   1344		skb = netdev_alloc_skb(ndev, skbuff_size);
   1345		if (skb == NULL)
   1346			break;
   1347		sh_eth_set_receive_align(skb);
   1348
   1349		/* The size of the buffer is a multiple of 32 bytes. */
   1350		buf_len = ALIGN(mdp->rx_buf_sz, 32);
   1351		dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
   1352					  DMA_FROM_DEVICE);
   1353		if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
   1354			kfree_skb(skb);
   1355			break;
   1356		}
   1357		mdp->rx_skbuff[i] = skb;
   1358
   1359		/* RX descriptor */
   1360		rxdesc = &mdp->rx_ring[i];
   1361		rxdesc->len = cpu_to_le32(buf_len << 16);
   1362		rxdesc->addr = cpu_to_le32(dma_addr);
   1363		rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
   1364
   1365		/* Rx descriptor address set */
   1366		if (i == 0) {
   1367			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
   1368			if (mdp->cd->xdfar_rw)
   1369				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
   1370		}
   1371	}
   1372
   1373	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
   1374
   1375	/* Mark the last entry as wrapping the ring. */
   1376	if (rxdesc)
   1377		rxdesc->status |= cpu_to_le32(RD_RDLE);
   1378
   1379	memset(mdp->tx_ring, 0, tx_ringsize);
   1380
   1381	/* build Tx ring buffer */
   1382	for (i = 0; i < mdp->num_tx_ring; i++) {
   1383		mdp->tx_skbuff[i] = NULL;
   1384		txdesc = &mdp->tx_ring[i];
   1385		txdesc->status = cpu_to_le32(TD_TFP);
   1386		txdesc->len = cpu_to_le32(0);
   1387		if (i == 0) {
   1388			/* Tx descriptor address set */
   1389			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
   1390			if (mdp->cd->xdfar_rw)
   1391				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
   1392		}
   1393	}
   1394
   1395	txdesc->status |= cpu_to_le32(TD_TDLE);
   1396}
   1397
   1398/* Get skb and descriptor buffer */
   1399static int sh_eth_ring_init(struct net_device *ndev)
   1400{
   1401	struct sh_eth_private *mdp = netdev_priv(ndev);
   1402	int rx_ringsize, tx_ringsize;
   1403
   1404	/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
   1405	 * card needs room to do 8 byte alignment, +2 so we can reserve
   1406	 * the first 2 bytes, and +16 gets room for the status word from the
   1407	 * card.
   1408	 */
   1409	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
   1410			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
   1411	if (mdp->cd->rpadir)
   1412		mdp->rx_buf_sz += NET_IP_ALIGN;
   1413
   1414	/* Allocate RX and TX skb rings */
   1415	mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
   1416				 GFP_KERNEL);
   1417	if (!mdp->rx_skbuff)
   1418		return -ENOMEM;
   1419
   1420	mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
   1421				 GFP_KERNEL);
   1422	if (!mdp->tx_skbuff)
   1423		goto ring_free;
   1424
   1425	/* Allocate all Rx descriptors. */
   1426	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
   1427	mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
   1428					  &mdp->rx_desc_dma, GFP_KERNEL);
   1429	if (!mdp->rx_ring)
   1430		goto ring_free;
   1431
   1432	mdp->dirty_rx = 0;
   1433
   1434	/* Allocate all Tx descriptors. */
   1435	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
   1436	mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
   1437					  &mdp->tx_desc_dma, GFP_KERNEL);
   1438	if (!mdp->tx_ring)
   1439		goto ring_free;
   1440	return 0;
   1441
   1442ring_free:
   1443	/* Free Rx and Tx skb ring buffer and DMA buffer */
   1444	sh_eth_ring_free(ndev);
   1445
   1446	return -ENOMEM;
   1447}
   1448
   1449static int sh_eth_dev_init(struct net_device *ndev)
   1450{
   1451	struct sh_eth_private *mdp = netdev_priv(ndev);
   1452	int ret;
   1453
   1454	/* Soft Reset */
   1455	ret = mdp->cd->soft_reset(ndev);
   1456	if (ret)
   1457		return ret;
   1458
   1459	if (mdp->cd->rmiimode)
   1460		sh_eth_write(ndev, 0x1, RMIIMODE);
   1461
   1462	/* Descriptor format */
   1463	sh_eth_ring_format(ndev);
   1464	if (mdp->cd->rpadir)
   1465		sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR);
   1466
   1467	/* all sh_eth int mask */
   1468	sh_eth_write(ndev, 0, EESIPR);
   1469
   1470#if defined(__LITTLE_ENDIAN)
   1471	if (mdp->cd->hw_swap)
   1472		sh_eth_write(ndev, EDMR_EL, EDMR);
   1473	else
   1474#endif
   1475		sh_eth_write(ndev, 0, EDMR);
   1476
   1477	/* FIFO size set */
   1478	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
   1479	sh_eth_write(ndev, 0, TFTR);
   1480
   1481	/* Frame recv control (enable multiple-packets per rx irq) */
   1482	sh_eth_write(ndev, RMCR_RNC, RMCR);
   1483
   1484	sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
   1485
   1486	/* DMA transfer burst mode */
   1487	if (mdp->cd->nbst)
   1488		sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST);
   1489
   1490	/* Burst cycle count upper-limit */
   1491	if (mdp->cd->bculr)
   1492		sh_eth_write(ndev, 0x800, BCULR);
   1493
   1494	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
   1495
   1496	if (!mdp->cd->no_trimd)
   1497		sh_eth_write(ndev, 0, TRIMD);
   1498
   1499	/* Recv frame limit set register */
   1500	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
   1501		     RFLR);
   1502
   1503	sh_eth_modify(ndev, EESR, 0, 0);
   1504	mdp->irq_enabled = true;
   1505	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
   1506
   1507	/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
   1508	sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
   1509		     (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
   1510		     ECMR_TE | ECMR_RE, ECMR);
   1511
   1512	if (mdp->cd->set_rate)
   1513		mdp->cd->set_rate(ndev);
   1514
   1515	/* E-MAC Status Register clear */
   1516	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
   1517
   1518	/* E-MAC Interrupt Enable register */
   1519	sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
   1520
   1521	/* Set MAC address */
   1522	update_mac_address(ndev);
   1523
   1524	/* mask reset */
   1525	if (mdp->cd->apr)
   1526		sh_eth_write(ndev, 1, APR);
   1527	if (mdp->cd->mpr)
   1528		sh_eth_write(ndev, 1, MPR);
   1529	if (mdp->cd->tpauser)
   1530		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
   1531
   1532	/* Setting the Rx mode will start the Rx process. */
   1533	sh_eth_write(ndev, EDRRR_R, EDRRR);
   1534
   1535	return ret;
   1536}
   1537
   1538static void sh_eth_dev_exit(struct net_device *ndev)
   1539{
   1540	struct sh_eth_private *mdp = netdev_priv(ndev);
   1541	int i;
   1542
   1543	/* Deactivate all TX descriptors, so DMA should stop at next
   1544	 * packet boundary if it's currently running
   1545	 */
   1546	for (i = 0; i < mdp->num_tx_ring; i++)
   1547		mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
   1548
   1549	/* Disable TX FIFO egress to MAC */
   1550	sh_eth_rcv_snd_disable(ndev);
   1551
   1552	/* Stop RX DMA at next packet boundary */
   1553	sh_eth_write(ndev, 0, EDRRR);
   1554
   1555	/* Aside from TX DMA, we can't tell when the hardware is
   1556	 * really stopped, so we need to reset to make sure.
   1557	 * Before doing that, wait for long enough to *probably*
   1558	 * finish transmitting the last packet and poll stats.
   1559	 */
   1560	msleep(2); /* max frame time at 10 Mbps < 1250 us */
   1561	sh_eth_get_stats(ndev);
   1562	mdp->cd->soft_reset(ndev);
   1563
   1564	/* Set the RMII mode again if required */
   1565	if (mdp->cd->rmiimode)
   1566		sh_eth_write(ndev, 0x1, RMIIMODE);
   1567
   1568	/* Set MAC address again */
   1569	update_mac_address(ndev);
   1570}
   1571
   1572static void sh_eth_rx_csum(struct sk_buff *skb)
   1573{
   1574	u8 *hw_csum;
   1575
   1576	/* The hardware checksum is 2 bytes appended to packet data */
   1577	if (unlikely(skb->len < sizeof(__sum16)))
   1578		return;
   1579	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
   1580	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
   1581	skb->ip_summed = CHECKSUM_COMPLETE;
   1582	skb_trim(skb, skb->len - sizeof(__sum16));
   1583}
   1584
   1585/* Packet receive function */
   1586static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
   1587{
   1588	struct sh_eth_private *mdp = netdev_priv(ndev);
   1589	struct sh_eth_rxdesc *rxdesc;
   1590
   1591	int entry = mdp->cur_rx % mdp->num_rx_ring;
   1592	int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
   1593	int limit;
   1594	struct sk_buff *skb;
   1595	u32 desc_status;
   1596	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
   1597	dma_addr_t dma_addr;
   1598	u16 pkt_len;
   1599	u32 buf_len;
   1600
   1601	boguscnt = min(boguscnt, *quota);
   1602	limit = boguscnt;
   1603	rxdesc = &mdp->rx_ring[entry];
   1604	while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
   1605		/* RACT bit must be checked before all the following reads */
   1606		dma_rmb();
   1607		desc_status = le32_to_cpu(rxdesc->status);
   1608		pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL;
   1609
   1610		if (--boguscnt < 0)
   1611			break;
   1612
   1613		netif_info(mdp, rx_status, ndev,
   1614			   "rx entry %d status 0x%08x len %d\n",
   1615			   entry, desc_status, pkt_len);
   1616
   1617		if (!(desc_status & RDFEND))
   1618			ndev->stats.rx_length_errors++;
   1619
   1620		/* In case of almost all GETHER/ETHERs, the Receive Frame State
   1621		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
   1622		 * bit 0. However, in case of the R8A7740 and R7S72100
   1623		 * the RFS bits are from bit 25 to bit 16. So, the
   1624		 * driver needs right shifting by 16.
   1625		 */
   1626		if (mdp->cd->csmr)
   1627			desc_status >>= 16;
   1628
   1629		skb = mdp->rx_skbuff[entry];
   1630		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
   1631				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
   1632			ndev->stats.rx_errors++;
   1633			if (desc_status & RD_RFS1)
   1634				ndev->stats.rx_crc_errors++;
   1635			if (desc_status & RD_RFS2)
   1636				ndev->stats.rx_frame_errors++;
   1637			if (desc_status & RD_RFS3)
   1638				ndev->stats.rx_length_errors++;
   1639			if (desc_status & RD_RFS4)
   1640				ndev->stats.rx_length_errors++;
   1641			if (desc_status & RD_RFS6)
   1642				ndev->stats.rx_missed_errors++;
   1643			if (desc_status & RD_RFS10)
   1644				ndev->stats.rx_over_errors++;
   1645		} else	if (skb) {
   1646			dma_addr = le32_to_cpu(rxdesc->addr);
   1647			if (!mdp->cd->hw_swap)
   1648				sh_eth_soft_swap(
   1649					phys_to_virt(ALIGN(dma_addr, 4)),
   1650					pkt_len + 2);
   1651			mdp->rx_skbuff[entry] = NULL;
   1652			if (mdp->cd->rpadir)
   1653				skb_reserve(skb, NET_IP_ALIGN);
   1654			dma_unmap_single(&mdp->pdev->dev, dma_addr,
   1655					 ALIGN(mdp->rx_buf_sz, 32),
   1656					 DMA_FROM_DEVICE);
   1657			skb_put(skb, pkt_len);
   1658			skb->protocol = eth_type_trans(skb, ndev);
   1659			if (ndev->features & NETIF_F_RXCSUM)
   1660				sh_eth_rx_csum(skb);
   1661			netif_receive_skb(skb);
   1662			ndev->stats.rx_packets++;
   1663			ndev->stats.rx_bytes += pkt_len;
   1664			if (desc_status & RD_RFS8)
   1665				ndev->stats.multicast++;
   1666		}
   1667		entry = (++mdp->cur_rx) % mdp->num_rx_ring;
   1668		rxdesc = &mdp->rx_ring[entry];
   1669	}
   1670
   1671	/* Refill the Rx ring buffers. */
   1672	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
   1673		entry = mdp->dirty_rx % mdp->num_rx_ring;
   1674		rxdesc = &mdp->rx_ring[entry];
   1675		/* The size of the buffer is 32 byte boundary. */
   1676		buf_len = ALIGN(mdp->rx_buf_sz, 32);
   1677		rxdesc->len = cpu_to_le32(buf_len << 16);
   1678
   1679		if (mdp->rx_skbuff[entry] == NULL) {
   1680			skb = netdev_alloc_skb(ndev, skbuff_size);
   1681			if (skb == NULL)
   1682				break;	/* Better luck next round. */
   1683			sh_eth_set_receive_align(skb);
   1684			dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
   1685						  buf_len, DMA_FROM_DEVICE);
   1686			if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
   1687				kfree_skb(skb);
   1688				break;
   1689			}
   1690			mdp->rx_skbuff[entry] = skb;
   1691
   1692			skb_checksum_none_assert(skb);
   1693			rxdesc->addr = cpu_to_le32(dma_addr);
   1694		}
   1695		dma_wmb(); /* RACT bit must be set after all the above writes */
   1696		if (entry >= mdp->num_rx_ring - 1)
   1697			rxdesc->status |=
   1698				cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE);
   1699		else
   1700			rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP);
   1701	}
   1702
   1703	/* Restart Rx engine if stopped. */
   1704	/* If we don't need to check status, don't. -KDU */
   1705	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
   1706		/* fix the values for the next receiving if RDE is set */
   1707		if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) {
   1708			u32 count = (sh_eth_read(ndev, RDFAR) -
   1709				     sh_eth_read(ndev, RDLAR)) >> 4;
   1710
   1711			mdp->cur_rx = count;
   1712			mdp->dirty_rx = count;
   1713		}
   1714		sh_eth_write(ndev, EDRRR_R, EDRRR);
   1715	}
   1716
   1717	*quota -= limit - boguscnt - 1;
   1718
   1719	return *quota <= 0;
   1720}
   1721
   1722static void sh_eth_rcv_snd_disable(struct net_device *ndev)
   1723{
   1724	/* disable tx and rx */
   1725	sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
   1726}
   1727
   1728static void sh_eth_rcv_snd_enable(struct net_device *ndev)
   1729{
   1730	/* enable tx and rx */
   1731	sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
   1732}
   1733
   1734/* E-MAC interrupt handler */
   1735static void sh_eth_emac_interrupt(struct net_device *ndev)
   1736{
   1737	struct sh_eth_private *mdp = netdev_priv(ndev);
   1738	u32 felic_stat;
   1739	u32 link_stat;
   1740
   1741	felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR);
   1742	sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
   1743	if (felic_stat & ECSR_ICD)
   1744		ndev->stats.tx_carrier_errors++;
   1745	if (felic_stat & ECSR_MPD)
   1746		pm_wakeup_event(&mdp->pdev->dev, 0);
   1747	if (felic_stat & ECSR_LCHNG) {
   1748		/* Link Changed */
   1749		if (mdp->cd->no_psr || mdp->no_ether_link)
   1750			return;
   1751		link_stat = sh_eth_read(ndev, PSR);
   1752		if (mdp->ether_link_active_low)
   1753			link_stat = ~link_stat;
   1754		if (!(link_stat & PSR_LMON)) {
   1755			sh_eth_rcv_snd_disable(ndev);
   1756		} else {
   1757			/* Link Up */
   1758			sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0);
   1759			/* clear int */
   1760			sh_eth_modify(ndev, ECSR, 0, 0);
   1761			sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP);
   1762			/* enable tx and rx */
   1763			sh_eth_rcv_snd_enable(ndev);
   1764		}
   1765	}
   1766}
   1767
   1768/* error control function */
   1769static void sh_eth_error(struct net_device *ndev, u32 intr_status)
   1770{
   1771	struct sh_eth_private *mdp = netdev_priv(ndev);
   1772	u32 mask;
   1773
   1774	if (intr_status & EESR_TWB) {
   1775		/* Unused write back interrupt */
   1776		if (intr_status & EESR_TABT) {	/* Transmit Abort int */
   1777			ndev->stats.tx_aborted_errors++;
   1778			netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
   1779		}
   1780	}
   1781
   1782	if (intr_status & EESR_RABT) {
   1783		/* Receive Abort int */
   1784		if (intr_status & EESR_RFRMER) {
   1785			/* Receive Frame Overflow int */
   1786			ndev->stats.rx_frame_errors++;
   1787		}
   1788	}
   1789
   1790	if (intr_status & EESR_TDE) {
   1791		/* Transmit Descriptor Empty int */
   1792		ndev->stats.tx_fifo_errors++;
   1793		netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
   1794	}
   1795
   1796	if (intr_status & EESR_TFE) {
   1797		/* FIFO under flow */
   1798		ndev->stats.tx_fifo_errors++;
   1799		netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
   1800	}
   1801
   1802	if (intr_status & EESR_RDE) {
   1803		/* Receive Descriptor Empty int */
   1804		ndev->stats.rx_over_errors++;
   1805	}
   1806
   1807	if (intr_status & EESR_RFE) {
   1808		/* Receive FIFO Overflow int */
   1809		ndev->stats.rx_fifo_errors++;
   1810	}
   1811
   1812	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
   1813		/* Address Error */
   1814		ndev->stats.tx_fifo_errors++;
   1815		netif_err(mdp, tx_err, ndev, "Address Error\n");
   1816	}
   1817
   1818	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
   1819	if (mdp->cd->no_ade)
   1820		mask &= ~EESR_ADE;
   1821	if (intr_status & mask) {
   1822		/* Tx error */
   1823		u32 edtrr = sh_eth_read(ndev, EDTRR);
   1824
   1825		/* dmesg */
   1826		netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
   1827			   intr_status, mdp->cur_tx, mdp->dirty_tx,
   1828			   (u32)ndev->state, edtrr);
   1829		/* dirty buffer free */
   1830		sh_eth_tx_free(ndev, true);
   1831
   1832		/* SH7712 BUG */
   1833		if (edtrr ^ mdp->cd->edtrr_trns) {
   1834			/* tx dma start */
   1835			sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
   1836		}
   1837		/* wakeup */
   1838		netif_wake_queue(ndev);
   1839	}
   1840}
   1841
   1842static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
   1843{
   1844	struct net_device *ndev = netdev;
   1845	struct sh_eth_private *mdp = netdev_priv(ndev);
   1846	struct sh_eth_cpu_data *cd = mdp->cd;
   1847	irqreturn_t ret = IRQ_NONE;
   1848	u32 intr_status, intr_enable;
   1849
   1850	spin_lock(&mdp->lock);
   1851
   1852	/* Get interrupt status */
   1853	intr_status = sh_eth_read(ndev, EESR);
   1854	/* Mask it with the interrupt mask, forcing ECI interrupt  to be always
   1855	 * enabled since it's the one that  comes  thru regardless of the mask,
   1856	 * and  we need to fully handle it  in sh_eth_emac_interrupt() in order
   1857	 * to quench it as it doesn't get cleared by just writing 1 to the  ECI
   1858	 * bit...
   1859	 */
   1860	intr_enable = sh_eth_read(ndev, EESIPR);
   1861	intr_status &= intr_enable | EESIPR_ECIIP;
   1862	if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
   1863			   cd->eesr_err_check))
   1864		ret = IRQ_HANDLED;
   1865	else
   1866		goto out;
   1867
   1868	if (unlikely(!mdp->irq_enabled)) {
   1869		sh_eth_write(ndev, 0, EESIPR);
   1870		goto out;
   1871	}
   1872
   1873	if (intr_status & EESR_RX_CHECK) {
   1874		if (napi_schedule_prep(&mdp->napi)) {
   1875			/* Mask Rx interrupts */
   1876			sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
   1877				     EESIPR);
   1878			__napi_schedule(&mdp->napi);
   1879		} else {
   1880			netdev_warn(ndev,
   1881				    "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
   1882				    intr_status, intr_enable);
   1883		}
   1884	}
   1885
   1886	/* Tx Check */
   1887	if (intr_status & cd->tx_check) {
   1888		/* Clear Tx interrupts */
   1889		sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
   1890
   1891		sh_eth_tx_free(ndev, true);
   1892		netif_wake_queue(ndev);
   1893	}
   1894
   1895	/* E-MAC interrupt */
   1896	if (intr_status & EESR_ECI)
   1897		sh_eth_emac_interrupt(ndev);
   1898
   1899	if (intr_status & cd->eesr_err_check) {
   1900		/* Clear error interrupts */
   1901		sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
   1902
   1903		sh_eth_error(ndev, intr_status);
   1904	}
   1905
   1906out:
   1907	spin_unlock(&mdp->lock);
   1908
   1909	return ret;
   1910}
   1911
   1912static int sh_eth_poll(struct napi_struct *napi, int budget)
   1913{
   1914	struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
   1915						  napi);
   1916	struct net_device *ndev = napi->dev;
   1917	int quota = budget;
   1918	u32 intr_status;
   1919
   1920	for (;;) {
   1921		intr_status = sh_eth_read(ndev, EESR);
   1922		if (!(intr_status & EESR_RX_CHECK))
   1923			break;
   1924		/* Clear Rx interrupts */
   1925		sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
   1926
   1927		if (sh_eth_rx(ndev, intr_status, &quota))
   1928			goto out;
   1929	}
   1930
   1931	napi_complete(napi);
   1932
   1933	/* Reenable Rx interrupts */
   1934	if (mdp->irq_enabled)
   1935		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
   1936out:
   1937	return budget - quota;
   1938}
   1939
   1940/* PHY state control function */
   1941static void sh_eth_adjust_link(struct net_device *ndev)
   1942{
   1943	struct sh_eth_private *mdp = netdev_priv(ndev);
   1944	struct phy_device *phydev = ndev->phydev;
   1945	unsigned long flags;
   1946	int new_state = 0;
   1947
   1948	spin_lock_irqsave(&mdp->lock, flags);
   1949
   1950	/* Disable TX and RX right over here, if E-MAC change is ignored */
   1951	if (mdp->cd->no_psr || mdp->no_ether_link)
   1952		sh_eth_rcv_snd_disable(ndev);
   1953
   1954	if (phydev->link) {
   1955		if (phydev->duplex != mdp->duplex) {
   1956			new_state = 1;
   1957			mdp->duplex = phydev->duplex;
   1958			if (mdp->cd->set_duplex)
   1959				mdp->cd->set_duplex(ndev);
   1960		}
   1961
   1962		if (phydev->speed != mdp->speed) {
   1963			new_state = 1;
   1964			mdp->speed = phydev->speed;
   1965			if (mdp->cd->set_rate)
   1966				mdp->cd->set_rate(ndev);
   1967		}
   1968		if (!mdp->link) {
   1969			sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
   1970			new_state = 1;
   1971			mdp->link = phydev->link;
   1972		}
   1973	} else if (mdp->link) {
   1974		new_state = 1;
   1975		mdp->link = 0;
   1976		mdp->speed = 0;
   1977		mdp->duplex = -1;
   1978	}
   1979
   1980	/* Enable TX and RX right over here, if E-MAC change is ignored */
   1981	if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
   1982		sh_eth_rcv_snd_enable(ndev);
   1983
   1984	spin_unlock_irqrestore(&mdp->lock, flags);
   1985
   1986	if (new_state && netif_msg_link(mdp))
   1987		phy_print_status(phydev);
   1988}
   1989
   1990/* PHY init function */
   1991static int sh_eth_phy_init(struct net_device *ndev)
   1992{
   1993	struct device_node *np = ndev->dev.parent->of_node;
   1994	struct sh_eth_private *mdp = netdev_priv(ndev);
   1995	struct phy_device *phydev;
   1996
   1997	mdp->link = 0;
   1998	mdp->speed = 0;
   1999	mdp->duplex = -1;
   2000
   2001	/* Try connect to PHY */
   2002	if (np) {
   2003		struct device_node *pn;
   2004
   2005		pn = of_parse_phandle(np, "phy-handle", 0);
   2006		phydev = of_phy_connect(ndev, pn,
   2007					sh_eth_adjust_link, 0,
   2008					mdp->phy_interface);
   2009
   2010		of_node_put(pn);
   2011		if (!phydev)
   2012			phydev = ERR_PTR(-ENOENT);
   2013	} else {
   2014		char phy_id[MII_BUS_ID_SIZE + 3];
   2015
   2016		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
   2017			 mdp->mii_bus->id, mdp->phy_id);
   2018
   2019		phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
   2020				     mdp->phy_interface);
   2021	}
   2022
   2023	if (IS_ERR(phydev)) {
   2024		netdev_err(ndev, "failed to connect PHY\n");
   2025		return PTR_ERR(phydev);
   2026	}
   2027
   2028	/* mask with MAC supported features */
   2029	if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
   2030		phy_set_max_speed(phydev, SPEED_100);
   2031
   2032	phy_attached_info(phydev);
   2033
   2034	return 0;
   2035}
   2036
   2037/* PHY control start function */
   2038static int sh_eth_phy_start(struct net_device *ndev)
   2039{
   2040	int ret;
   2041
   2042	ret = sh_eth_phy_init(ndev);
   2043	if (ret)
   2044		return ret;
   2045
   2046	phy_start(ndev->phydev);
   2047
   2048	return 0;
   2049}
   2050
   2051/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
   2052 * version must be bumped as well.  Just adding registers up to that
   2053 * limit is fine, as long as the existing register indices don't
   2054 * change.
   2055 */
   2056#define SH_ETH_REG_DUMP_VERSION		1
   2057#define SH_ETH_REG_DUMP_MAX_REGS	256
   2058
   2059static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
   2060{
   2061	struct sh_eth_private *mdp = netdev_priv(ndev);
   2062	struct sh_eth_cpu_data *cd = mdp->cd;
   2063	u32 *valid_map;
   2064	size_t len;
   2065
   2066	BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
   2067
   2068	/* Dump starts with a bitmap that tells ethtool which
   2069	 * registers are defined for this chip.
   2070	 */
   2071	len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
   2072	if (buf) {
   2073		valid_map = buf;
   2074		buf += len;
   2075	} else {
   2076		valid_map = NULL;
   2077	}
   2078
   2079	/* Add a register to the dump, if it has a defined offset.
   2080	 * This automatically skips most undefined registers, but for
   2081	 * some it is also necessary to check a capability flag in
   2082	 * struct sh_eth_cpu_data.
   2083	 */
   2084#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
   2085#define add_reg_from(reg, read_expr) do {				\
   2086		if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {	\
   2087			if (buf) {					\
   2088				mark_reg_valid(reg);			\
   2089				*buf++ = read_expr;			\
   2090			}						\
   2091			++len;						\
   2092		}							\
   2093	} while (0)
   2094#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
   2095#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
   2096
   2097	add_reg(EDSR);
   2098	add_reg(EDMR);
   2099	add_reg(EDTRR);
   2100	add_reg(EDRRR);
   2101	add_reg(EESR);
   2102	add_reg(EESIPR);
   2103	add_reg(TDLAR);
   2104	if (!cd->no_xdfar)
   2105		add_reg(TDFAR);
   2106	add_reg(TDFXR);
   2107	add_reg(TDFFR);
   2108	add_reg(RDLAR);
   2109	if (!cd->no_xdfar)
   2110		add_reg(RDFAR);
   2111	add_reg(RDFXR);
   2112	add_reg(RDFFR);
   2113	add_reg(TRSCER);
   2114	add_reg(RMFCR);
   2115	add_reg(TFTR);
   2116	add_reg(FDR);
   2117	add_reg(RMCR);
   2118	add_reg(TFUCR);
   2119	add_reg(RFOCR);
   2120	if (cd->rmiimode)
   2121		add_reg(RMIIMODE);
   2122	add_reg(FCFTR);
   2123	if (cd->rpadir)
   2124		add_reg(RPADIR);
   2125	if (!cd->no_trimd)
   2126		add_reg(TRIMD);
   2127	add_reg(ECMR);
   2128	add_reg(ECSR);
   2129	add_reg(ECSIPR);
   2130	add_reg(PIR);
   2131	if (!cd->no_psr)
   2132		add_reg(PSR);
   2133	add_reg(RDMLR);
   2134	add_reg(RFLR);
   2135	add_reg(IPGR);
   2136	if (cd->apr)
   2137		add_reg(APR);
   2138	if (cd->mpr)
   2139		add_reg(MPR);
   2140	add_reg(RFCR);
   2141	add_reg(RFCF);
   2142	if (cd->tpauser)
   2143		add_reg(TPAUSER);
   2144	add_reg(TPAUSECR);
   2145	if (cd->gecmr)
   2146		add_reg(GECMR);
   2147	if (cd->bculr)
   2148		add_reg(BCULR);
   2149	add_reg(MAHR);
   2150	add_reg(MALR);
   2151	if (!cd->no_tx_cntrs) {
   2152		add_reg(TROCR);
   2153		add_reg(CDCR);
   2154		add_reg(LCCR);
   2155		add_reg(CNDCR);
   2156	}
   2157	add_reg(CEFCR);
   2158	add_reg(FRECR);
   2159	add_reg(TSFRCR);
   2160	add_reg(TLFRCR);
   2161	if (cd->cexcr) {
   2162		add_reg(CERCR);
   2163		add_reg(CEECR);
   2164	}
   2165	add_reg(MAFCR);
   2166	if (cd->rtrate)
   2167		add_reg(RTRATE);
   2168	if (cd->csmr)
   2169		add_reg(CSMR);
   2170	if (cd->select_mii)
   2171		add_reg(RMII_MII);
   2172	if (cd->tsu) {
   2173		add_tsu_reg(ARSTR);
   2174		add_tsu_reg(TSU_CTRST);
   2175		if (cd->dual_port) {
   2176			add_tsu_reg(TSU_FWEN0);
   2177			add_tsu_reg(TSU_FWEN1);
   2178			add_tsu_reg(TSU_FCM);
   2179			add_tsu_reg(TSU_BSYSL0);
   2180			add_tsu_reg(TSU_BSYSL1);
   2181			add_tsu_reg(TSU_PRISL0);
   2182			add_tsu_reg(TSU_PRISL1);
   2183			add_tsu_reg(TSU_FWSL0);
   2184			add_tsu_reg(TSU_FWSL1);
   2185		}
   2186		add_tsu_reg(TSU_FWSLC);
   2187		if (cd->dual_port) {
   2188			add_tsu_reg(TSU_QTAGM0);
   2189			add_tsu_reg(TSU_QTAGM1);
   2190			add_tsu_reg(TSU_FWSR);
   2191			add_tsu_reg(TSU_FWINMK);
   2192			add_tsu_reg(TSU_ADQT0);
   2193			add_tsu_reg(TSU_ADQT1);
   2194			add_tsu_reg(TSU_VTAG0);
   2195			add_tsu_reg(TSU_VTAG1);
   2196		}
   2197		add_tsu_reg(TSU_ADSBSY);
   2198		add_tsu_reg(TSU_TEN);
   2199		add_tsu_reg(TSU_POST1);
   2200		add_tsu_reg(TSU_POST2);
   2201		add_tsu_reg(TSU_POST3);
   2202		add_tsu_reg(TSU_POST4);
   2203		/* This is the start of a table, not just a single register. */
   2204		if (buf) {
   2205			unsigned int i;
   2206
   2207			mark_reg_valid(TSU_ADRH0);
   2208			for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
   2209				*buf++ = ioread32(mdp->tsu_addr +
   2210						  mdp->reg_offset[TSU_ADRH0] +
   2211						  i * 4);
   2212		}
   2213		len += SH_ETH_TSU_CAM_ENTRIES * 2;
   2214	}
   2215
   2216#undef mark_reg_valid
   2217#undef add_reg_from
   2218#undef add_reg
   2219#undef add_tsu_reg
   2220
   2221	return len * 4;
   2222}
   2223
   2224static int sh_eth_get_regs_len(struct net_device *ndev)
   2225{
   2226	return __sh_eth_get_regs(ndev, NULL);
   2227}
   2228
   2229static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
   2230			    void *buf)
   2231{
   2232	struct sh_eth_private *mdp = netdev_priv(ndev);
   2233
   2234	regs->version = SH_ETH_REG_DUMP_VERSION;
   2235
   2236	pm_runtime_get_sync(&mdp->pdev->dev);
   2237	__sh_eth_get_regs(ndev, buf);
   2238	pm_runtime_put_sync(&mdp->pdev->dev);
   2239}
   2240
   2241static u32 sh_eth_get_msglevel(struct net_device *ndev)
   2242{
   2243	struct sh_eth_private *mdp = netdev_priv(ndev);
   2244	return mdp->msg_enable;
   2245}
   2246
   2247static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
   2248{
   2249	struct sh_eth_private *mdp = netdev_priv(ndev);
   2250	mdp->msg_enable = value;
   2251}
   2252
   2253static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
   2254	"rx_current", "tx_current",
   2255	"rx_dirty", "tx_dirty",
   2256};
   2257#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
   2258
   2259static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
   2260{
   2261	switch (sset) {
   2262	case ETH_SS_STATS:
   2263		return SH_ETH_STATS_LEN;
   2264	default:
   2265		return -EOPNOTSUPP;
   2266	}
   2267}
   2268
   2269static void sh_eth_get_ethtool_stats(struct net_device *ndev,
   2270				     struct ethtool_stats *stats, u64 *data)
   2271{
   2272	struct sh_eth_private *mdp = netdev_priv(ndev);
   2273	int i = 0;
   2274
   2275	/* device-specific stats */
   2276	data[i++] = mdp->cur_rx;
   2277	data[i++] = mdp->cur_tx;
   2278	data[i++] = mdp->dirty_rx;
   2279	data[i++] = mdp->dirty_tx;
   2280}
   2281
   2282static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
   2283{
   2284	switch (stringset) {
   2285	case ETH_SS_STATS:
   2286		memcpy(data, sh_eth_gstrings_stats,
   2287		       sizeof(sh_eth_gstrings_stats));
   2288		break;
   2289	}
   2290}
   2291
   2292static void sh_eth_get_ringparam(struct net_device *ndev,
   2293				 struct ethtool_ringparam *ring,
   2294				 struct kernel_ethtool_ringparam *kernel_ring,
   2295				 struct netlink_ext_ack *extack)
   2296{
   2297	struct sh_eth_private *mdp = netdev_priv(ndev);
   2298
   2299	ring->rx_max_pending = RX_RING_MAX;
   2300	ring->tx_max_pending = TX_RING_MAX;
   2301	ring->rx_pending = mdp->num_rx_ring;
   2302	ring->tx_pending = mdp->num_tx_ring;
   2303}
   2304
   2305static int sh_eth_set_ringparam(struct net_device *ndev,
   2306				struct ethtool_ringparam *ring,
   2307				struct kernel_ethtool_ringparam *kernel_ring,
   2308				struct netlink_ext_ack *extack)
   2309{
   2310	struct sh_eth_private *mdp = netdev_priv(ndev);
   2311	int ret;
   2312
   2313	if (ring->tx_pending > TX_RING_MAX ||
   2314	    ring->rx_pending > RX_RING_MAX ||
   2315	    ring->tx_pending < TX_RING_MIN ||
   2316	    ring->rx_pending < RX_RING_MIN)
   2317		return -EINVAL;
   2318	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
   2319		return -EINVAL;
   2320
   2321	if (netif_running(ndev)) {
   2322		netif_device_detach(ndev);
   2323		netif_tx_disable(ndev);
   2324
   2325		/* Serialise with the interrupt handler and NAPI, then
   2326		 * disable interrupts.  We have to clear the
   2327		 * irq_enabled flag first to ensure that interrupts
   2328		 * won't be re-enabled.
   2329		 */
   2330		mdp->irq_enabled = false;
   2331		synchronize_irq(ndev->irq);
   2332		napi_synchronize(&mdp->napi);
   2333		sh_eth_write(ndev, 0x0000, EESIPR);
   2334
   2335		sh_eth_dev_exit(ndev);
   2336
   2337		/* Free all the skbuffs in the Rx queue and the DMA buffers. */
   2338		sh_eth_ring_free(ndev);
   2339	}
   2340
   2341	/* Set new parameters */
   2342	mdp->num_rx_ring = ring->rx_pending;
   2343	mdp->num_tx_ring = ring->tx_pending;
   2344
   2345	if (netif_running(ndev)) {
   2346		ret = sh_eth_ring_init(ndev);
   2347		if (ret < 0) {
   2348			netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
   2349				   __func__);
   2350			return ret;
   2351		}
   2352		ret = sh_eth_dev_init(ndev);
   2353		if (ret < 0) {
   2354			netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
   2355				   __func__);
   2356			return ret;
   2357		}
   2358
   2359		netif_device_attach(ndev);
   2360	}
   2361
   2362	return 0;
   2363}
   2364
   2365static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
   2366{
   2367	struct sh_eth_private *mdp = netdev_priv(ndev);
   2368
   2369	wol->supported = 0;
   2370	wol->wolopts = 0;
   2371
   2372	if (mdp->cd->magic) {
   2373		wol->supported = WAKE_MAGIC;
   2374		wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
   2375	}
   2376}
   2377
   2378static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
   2379{
   2380	struct sh_eth_private *mdp = netdev_priv(ndev);
   2381
   2382	if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
   2383		return -EOPNOTSUPP;
   2384
   2385	mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
   2386
   2387	device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
   2388
   2389	return 0;
   2390}
   2391
   2392static const struct ethtool_ops sh_eth_ethtool_ops = {
   2393	.get_regs_len	= sh_eth_get_regs_len,
   2394	.get_regs	= sh_eth_get_regs,
   2395	.nway_reset	= phy_ethtool_nway_reset,
   2396	.get_msglevel	= sh_eth_get_msglevel,
   2397	.set_msglevel	= sh_eth_set_msglevel,
   2398	.get_link	= ethtool_op_get_link,
   2399	.get_strings	= sh_eth_get_strings,
   2400	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
   2401	.get_sset_count     = sh_eth_get_sset_count,
   2402	.get_ringparam	= sh_eth_get_ringparam,
   2403	.set_ringparam	= sh_eth_set_ringparam,
   2404	.get_link_ksettings = phy_ethtool_get_link_ksettings,
   2405	.set_link_ksettings = phy_ethtool_set_link_ksettings,
   2406	.get_wol	= sh_eth_get_wol,
   2407	.set_wol	= sh_eth_set_wol,
   2408};
   2409
   2410/* network device open function */
   2411static int sh_eth_open(struct net_device *ndev)
   2412{
   2413	struct sh_eth_private *mdp = netdev_priv(ndev);
   2414	int ret;
   2415
   2416	pm_runtime_get_sync(&mdp->pdev->dev);
   2417
   2418	napi_enable(&mdp->napi);
   2419
   2420	ret = request_irq(ndev->irq, sh_eth_interrupt,
   2421			  mdp->cd->irq_flags, ndev->name, ndev);
   2422	if (ret) {
   2423		netdev_err(ndev, "Can not assign IRQ number\n");
   2424		goto out_napi_off;
   2425	}
   2426
   2427	/* Descriptor set */
   2428	ret = sh_eth_ring_init(ndev);
   2429	if (ret)
   2430		goto out_free_irq;
   2431
   2432	/* device init */
   2433	ret = sh_eth_dev_init(ndev);
   2434	if (ret)
   2435		goto out_free_irq;
   2436
   2437	/* PHY control start*/
   2438	ret = sh_eth_phy_start(ndev);
   2439	if (ret)
   2440		goto out_free_irq;
   2441
   2442	netif_start_queue(ndev);
   2443
   2444	mdp->is_opened = 1;
   2445
   2446	return ret;
   2447
   2448out_free_irq:
   2449	free_irq(ndev->irq, ndev);
   2450out_napi_off:
   2451	napi_disable(&mdp->napi);
   2452	pm_runtime_put_sync(&mdp->pdev->dev);
   2453	return ret;
   2454}
   2455
   2456/* Timeout function */
   2457static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
   2458{
   2459	struct sh_eth_private *mdp = netdev_priv(ndev);
   2460	struct sh_eth_rxdesc *rxdesc;
   2461	int i;
   2462
   2463	netif_stop_queue(ndev);
   2464
   2465	netif_err(mdp, timer, ndev,
   2466		  "transmit timed out, status %8.8x, resetting...\n",
   2467		  sh_eth_read(ndev, EESR));
   2468
   2469	/* tx_errors count up */
   2470	ndev->stats.tx_errors++;
   2471
   2472	/* Free all the skbuffs in the Rx queue. */
   2473	for (i = 0; i < mdp->num_rx_ring; i++) {
   2474		rxdesc = &mdp->rx_ring[i];
   2475		rxdesc->status = cpu_to_le32(0);
   2476		rxdesc->addr = cpu_to_le32(0xBADF00D0);
   2477		dev_kfree_skb(mdp->rx_skbuff[i]);
   2478		mdp->rx_skbuff[i] = NULL;
   2479	}
   2480	for (i = 0; i < mdp->num_tx_ring; i++) {
   2481		dev_kfree_skb(mdp->tx_skbuff[i]);
   2482		mdp->tx_skbuff[i] = NULL;
   2483	}
   2484
   2485	/* device init */
   2486	sh_eth_dev_init(ndev);
   2487
   2488	netif_start_queue(ndev);
   2489}
   2490
   2491/* Packet transmit function */
   2492static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
   2493				     struct net_device *ndev)
   2494{
   2495	struct sh_eth_private *mdp = netdev_priv(ndev);
   2496	struct sh_eth_txdesc *txdesc;
   2497	dma_addr_t dma_addr;
   2498	u32 entry;
   2499	unsigned long flags;
   2500
   2501	spin_lock_irqsave(&mdp->lock, flags);
   2502	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
   2503		if (!sh_eth_tx_free(ndev, true)) {
   2504			netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
   2505			netif_stop_queue(ndev);
   2506			spin_unlock_irqrestore(&mdp->lock, flags);
   2507			return NETDEV_TX_BUSY;
   2508		}
   2509	}
   2510	spin_unlock_irqrestore(&mdp->lock, flags);
   2511
   2512	if (skb_put_padto(skb, ETH_ZLEN))
   2513		return NETDEV_TX_OK;
   2514
   2515	entry = mdp->cur_tx % mdp->num_tx_ring;
   2516	mdp->tx_skbuff[entry] = skb;
   2517	txdesc = &mdp->tx_ring[entry];
   2518	/* soft swap. */
   2519	if (!mdp->cd->hw_swap)
   2520		sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
   2521	dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
   2522				  DMA_TO_DEVICE);
   2523	if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
   2524		kfree_skb(skb);
   2525		return NETDEV_TX_OK;
   2526	}
   2527	txdesc->addr = cpu_to_le32(dma_addr);
   2528	txdesc->len  = cpu_to_le32(skb->len << 16);
   2529
   2530	dma_wmb(); /* TACT bit must be set after all the above writes */
   2531	if (entry >= mdp->num_tx_ring - 1)
   2532		txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
   2533	else
   2534		txdesc->status |= cpu_to_le32(TD_TACT);
   2535
   2536	wmb(); /* cur_tx must be incremented after TACT bit was set */
   2537	mdp->cur_tx++;
   2538
   2539	if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
   2540		sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
   2541
   2542	return NETDEV_TX_OK;
   2543}
   2544
   2545/* The statistics registers have write-clear behaviour, which means we
   2546 * will lose any increment between the read and write.  We mitigate
   2547 * this by only clearing when we read a non-zero value, so we will
   2548 * never falsely report a total of zero.
   2549 */
   2550static void
   2551sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
   2552{
   2553	u32 delta = sh_eth_read(ndev, reg);
   2554
   2555	if (delta) {
   2556		*stat += delta;
   2557		sh_eth_write(ndev, 0, reg);
   2558	}
   2559}
   2560
   2561static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
   2562{
   2563	struct sh_eth_private *mdp = netdev_priv(ndev);
   2564
   2565	if (mdp->cd->no_tx_cntrs)
   2566		return &ndev->stats;
   2567
   2568	if (!mdp->is_opened)
   2569		return &ndev->stats;
   2570
   2571	sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
   2572	sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
   2573	sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
   2574
   2575	if (mdp->cd->cexcr) {
   2576		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
   2577				   CERCR);
   2578		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
   2579				   CEECR);
   2580	} else {
   2581		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
   2582				   CNDCR);
   2583	}
   2584
   2585	return &ndev->stats;
   2586}
   2587
   2588/* device close function */
   2589static int sh_eth_close(struct net_device *ndev)
   2590{
   2591	struct sh_eth_private *mdp = netdev_priv(ndev);
   2592
   2593	netif_stop_queue(ndev);
   2594
   2595	/* Serialise with the interrupt handler and NAPI, then disable
   2596	 * interrupts.  We have to clear the irq_enabled flag first to
   2597	 * ensure that interrupts won't be re-enabled.
   2598	 */
   2599	mdp->irq_enabled = false;
   2600	synchronize_irq(ndev->irq);
   2601	napi_disable(&mdp->napi);
   2602	sh_eth_write(ndev, 0x0000, EESIPR);
   2603
   2604	sh_eth_dev_exit(ndev);
   2605
   2606	/* PHY Disconnect */
   2607	if (ndev->phydev) {
   2608		phy_stop(ndev->phydev);
   2609		phy_disconnect(ndev->phydev);
   2610	}
   2611
   2612	free_irq(ndev->irq, ndev);
   2613
   2614	/* Free all the skbuffs in the Rx queue and the DMA buffer. */
   2615	sh_eth_ring_free(ndev);
   2616
   2617	mdp->is_opened = 0;
   2618
   2619	pm_runtime_put(&mdp->pdev->dev);
   2620
   2621	return 0;
   2622}
   2623
   2624static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
   2625{
   2626	if (netif_running(ndev))
   2627		return -EBUSY;
   2628
   2629	ndev->mtu = new_mtu;
   2630	netdev_update_features(ndev);
   2631
   2632	return 0;
   2633}
   2634
   2635/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
   2636static u32 sh_eth_tsu_get_post_mask(int entry)
   2637{
   2638	return 0x0f << (28 - ((entry % 8) * 4));
   2639}
   2640
   2641static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
   2642{
   2643	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
   2644}
   2645
   2646static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
   2647					     int entry)
   2648{
   2649	struct sh_eth_private *mdp = netdev_priv(ndev);
   2650	int reg = TSU_POST1 + entry / 8;
   2651	u32 tmp;
   2652
   2653	tmp = sh_eth_tsu_read(mdp, reg);
   2654	sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg);
   2655}
   2656
   2657static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
   2658					      int entry)
   2659{
   2660	struct sh_eth_private *mdp = netdev_priv(ndev);
   2661	int reg = TSU_POST1 + entry / 8;
   2662	u32 post_mask, ref_mask, tmp;
   2663
   2664	post_mask = sh_eth_tsu_get_post_mask(entry);
   2665	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
   2666
   2667	tmp = sh_eth_tsu_read(mdp, reg);
   2668	sh_eth_tsu_write(mdp, tmp & ~post_mask, reg);
   2669
   2670	/* If other port enables, the function returns "true" */
   2671	return tmp & ref_mask;
   2672}
   2673
   2674static int sh_eth_tsu_busy(struct net_device *ndev)
   2675{
   2676	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
   2677	struct sh_eth_private *mdp = netdev_priv(ndev);
   2678
   2679	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
   2680		udelay(10);
   2681		timeout--;
   2682		if (timeout <= 0) {
   2683			netdev_err(ndev, "%s: timeout\n", __func__);
   2684			return -ETIMEDOUT;
   2685		}
   2686	}
   2687
   2688	return 0;
   2689}
   2690
   2691static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset,
   2692				  const u8 *addr)
   2693{
   2694	struct sh_eth_private *mdp = netdev_priv(ndev);
   2695	u32 val;
   2696
   2697	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
   2698	iowrite32(val, mdp->tsu_addr + offset);
   2699	if (sh_eth_tsu_busy(ndev) < 0)
   2700		return -EBUSY;
   2701
   2702	val = addr[4] << 8 | addr[5];
   2703	iowrite32(val, mdp->tsu_addr + offset + 4);
   2704	if (sh_eth_tsu_busy(ndev) < 0)
   2705		return -EBUSY;
   2706
   2707	return 0;
   2708}
   2709
   2710static void sh_eth_tsu_read_entry(struct net_device *ndev, u16 offset, u8 *addr)
   2711{
   2712	struct sh_eth_private *mdp = netdev_priv(ndev);
   2713	u32 val;
   2714
   2715	val = ioread32(mdp->tsu_addr + offset);
   2716	addr[0] = (val >> 24) & 0xff;
   2717	addr[1] = (val >> 16) & 0xff;
   2718	addr[2] = (val >> 8) & 0xff;
   2719	addr[3] = val & 0xff;
   2720	val = ioread32(mdp->tsu_addr + offset + 4);
   2721	addr[4] = (val >> 8) & 0xff;
   2722	addr[5] = val & 0xff;
   2723}
   2724
   2725
   2726static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
   2727{
   2728	struct sh_eth_private *mdp = netdev_priv(ndev);
   2729	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
   2730	int i;
   2731	u8 c_addr[ETH_ALEN];
   2732
   2733	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
   2734		sh_eth_tsu_read_entry(ndev, reg_offset, c_addr);
   2735		if (ether_addr_equal(addr, c_addr))
   2736			return i;
   2737	}
   2738
   2739	return -ENOENT;
   2740}
   2741
   2742static int sh_eth_tsu_find_empty(struct net_device *ndev)
   2743{
   2744	u8 blank[ETH_ALEN];
   2745	int entry;
   2746
   2747	memset(blank, 0, sizeof(blank));
   2748	entry = sh_eth_tsu_find_entry(ndev, blank);
   2749	return (entry < 0) ? -ENOMEM : entry;
   2750}
   2751
   2752static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
   2753					      int entry)
   2754{
   2755	struct sh_eth_private *mdp = netdev_priv(ndev);
   2756	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
   2757	int ret;
   2758	u8 blank[ETH_ALEN];
   2759
   2760	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
   2761			 ~(1 << (31 - entry)), TSU_TEN);
   2762
   2763	memset(blank, 0, sizeof(blank));
   2764	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
   2765	if (ret < 0)
   2766		return ret;
   2767	return 0;
   2768}
   2769
   2770static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
   2771{
   2772	struct sh_eth_private *mdp = netdev_priv(ndev);
   2773	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
   2774	int i, ret;
   2775
   2776	if (!mdp->cd->tsu)
   2777		return 0;
   2778
   2779	i = sh_eth_tsu_find_entry(ndev, addr);
   2780	if (i < 0) {
   2781		/* No entry found, create one */
   2782		i = sh_eth_tsu_find_empty(ndev);
   2783		if (i < 0)
   2784			return -ENOMEM;
   2785		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
   2786		if (ret < 0)
   2787			return ret;
   2788
   2789		/* Enable the entry */
   2790		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
   2791				 (1 << (31 - i)), TSU_TEN);
   2792	}
   2793
   2794	/* Entry found or created, enable POST */
   2795	sh_eth_tsu_enable_cam_entry_post(ndev, i);
   2796
   2797	return 0;
   2798}
   2799
   2800static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
   2801{
   2802	struct sh_eth_private *mdp = netdev_priv(ndev);
   2803	int i, ret;
   2804
   2805	if (!mdp->cd->tsu)
   2806		return 0;
   2807
   2808	i = sh_eth_tsu_find_entry(ndev, addr);
   2809	if (i) {
   2810		/* Entry found */
   2811		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
   2812			goto done;
   2813
   2814		/* Disable the entry if both ports was disabled */
   2815		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
   2816		if (ret < 0)
   2817			return ret;
   2818	}
   2819done:
   2820	return 0;
   2821}
   2822
   2823static int sh_eth_tsu_purge_all(struct net_device *ndev)
   2824{
   2825	struct sh_eth_private *mdp = netdev_priv(ndev);
   2826	int i, ret;
   2827
   2828	if (!mdp->cd->tsu)
   2829		return 0;
   2830
   2831	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
   2832		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
   2833			continue;
   2834
   2835		/* Disable the entry if both ports was disabled */
   2836		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
   2837		if (ret < 0)
   2838			return ret;
   2839	}
   2840
   2841	return 0;
   2842}
   2843
   2844static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
   2845{
   2846	struct sh_eth_private *mdp = netdev_priv(ndev);
   2847	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
   2848	u8 addr[ETH_ALEN];
   2849	int i;
   2850
   2851	if (!mdp->cd->tsu)
   2852		return;
   2853
   2854	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
   2855		sh_eth_tsu_read_entry(ndev, reg_offset, addr);
   2856		if (is_multicast_ether_addr(addr))
   2857			sh_eth_tsu_del_entry(ndev, addr);
   2858	}
   2859}
   2860
   2861/* Update promiscuous flag and multicast filter */
   2862static void sh_eth_set_rx_mode(struct net_device *ndev)
   2863{
   2864	struct sh_eth_private *mdp = netdev_priv(ndev);
   2865	u32 ecmr_bits;
   2866	int mcast_all = 0;
   2867	unsigned long flags;
   2868
   2869	spin_lock_irqsave(&mdp->lock, flags);
   2870	/* Initial condition is MCT = 1, PRM = 0.
   2871	 * Depending on ndev->flags, set PRM or clear MCT
   2872	 */
   2873	ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
   2874	if (mdp->cd->tsu)
   2875		ecmr_bits |= ECMR_MCT;
   2876
   2877	if (!(ndev->flags & IFF_MULTICAST)) {
   2878		sh_eth_tsu_purge_mcast(ndev);
   2879		mcast_all = 1;
   2880	}
   2881	if (ndev->flags & IFF_ALLMULTI) {
   2882		sh_eth_tsu_purge_mcast(ndev);
   2883		ecmr_bits &= ~ECMR_MCT;
   2884		mcast_all = 1;
   2885	}
   2886
   2887	if (ndev->flags & IFF_PROMISC) {
   2888		sh_eth_tsu_purge_all(ndev);
   2889		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
   2890	} else if (mdp->cd->tsu) {
   2891		struct netdev_hw_addr *ha;
   2892		netdev_for_each_mc_addr(ha, ndev) {
   2893			if (mcast_all && is_multicast_ether_addr(ha->addr))
   2894				continue;
   2895
   2896			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
   2897				if (!mcast_all) {
   2898					sh_eth_tsu_purge_mcast(ndev);
   2899					ecmr_bits &= ~ECMR_MCT;
   2900					mcast_all = 1;
   2901				}
   2902			}
   2903		}
   2904	}
   2905
   2906	/* update the ethernet mode */
   2907	sh_eth_write(ndev, ecmr_bits, ECMR);
   2908
   2909	spin_unlock_irqrestore(&mdp->lock, flags);
   2910}
   2911
   2912static void sh_eth_set_rx_csum(struct net_device *ndev, bool enable)
   2913{
   2914	struct sh_eth_private *mdp = netdev_priv(ndev);
   2915	unsigned long flags;
   2916
   2917	spin_lock_irqsave(&mdp->lock, flags);
   2918
   2919	/* Disable TX and RX */
   2920	sh_eth_rcv_snd_disable(ndev);
   2921
   2922	/* Modify RX Checksum setting */
   2923	sh_eth_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
   2924
   2925	/* Enable TX and RX */
   2926	sh_eth_rcv_snd_enable(ndev);
   2927
   2928	spin_unlock_irqrestore(&mdp->lock, flags);
   2929}
   2930
   2931static int sh_eth_set_features(struct net_device *ndev,
   2932			       netdev_features_t features)
   2933{
   2934	netdev_features_t changed = ndev->features ^ features;
   2935	struct sh_eth_private *mdp = netdev_priv(ndev);
   2936
   2937	if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum)
   2938		sh_eth_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
   2939
   2940	ndev->features = features;
   2941
   2942	return 0;
   2943}
   2944
   2945static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
   2946{
   2947	if (!mdp->port)
   2948		return TSU_VTAG0;
   2949	else
   2950		return TSU_VTAG1;
   2951}
   2952
   2953static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
   2954				  __be16 proto, u16 vid)
   2955{
   2956	struct sh_eth_private *mdp = netdev_priv(ndev);
   2957	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
   2958
   2959	if (unlikely(!mdp->cd->tsu))
   2960		return -EPERM;
   2961
   2962	/* No filtering if vid = 0 */
   2963	if (!vid)
   2964		return 0;
   2965
   2966	mdp->vlan_num_ids++;
   2967
   2968	/* The controller has one VLAN tag HW filter. So, if the filter is
   2969	 * already enabled, the driver disables it and the filte
   2970	 */
   2971	if (mdp->vlan_num_ids > 1) {
   2972		/* disable VLAN filter */
   2973		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
   2974		return 0;
   2975	}
   2976
   2977	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
   2978			 vtag_reg_index);
   2979
   2980	return 0;
   2981}
   2982
   2983static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
   2984				   __be16 proto, u16 vid)
   2985{
   2986	struct sh_eth_private *mdp = netdev_priv(ndev);
   2987	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
   2988
   2989	if (unlikely(!mdp->cd->tsu))
   2990		return -EPERM;
   2991
   2992	/* No filtering if vid = 0 */
   2993	if (!vid)
   2994		return 0;
   2995
   2996	mdp->vlan_num_ids--;
   2997	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
   2998
   2999	return 0;
   3000}
   3001
   3002/* SuperH's TSU register init function */
   3003static void sh_eth_tsu_init(struct sh_eth_private *mdp)
   3004{
   3005	if (!mdp->cd->dual_port) {
   3006		sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
   3007		sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
   3008				 TSU_FWSLC);	/* Enable POST registers */
   3009		return;
   3010	}
   3011
   3012	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
   3013	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
   3014	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
   3015	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
   3016	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
   3017	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
   3018	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
   3019	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
   3020	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
   3021	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
   3022	sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
   3023	sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
   3024	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
   3025	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
   3026	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
   3027	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
   3028	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
   3029	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
   3030	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
   3031}
   3032
   3033/* MDIO bus release function */
   3034static int sh_mdio_release(struct sh_eth_private *mdp)
   3035{
   3036	/* unregister mdio bus */
   3037	mdiobus_unregister(mdp->mii_bus);
   3038
   3039	/* free bitbang info */
   3040	free_mdio_bitbang(mdp->mii_bus);
   3041
   3042	return 0;
   3043}
   3044
   3045static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
   3046{
   3047	int res;
   3048
   3049	pm_runtime_get_sync(bus->parent);
   3050	res = mdiobb_read(bus, phy, reg);
   3051	pm_runtime_put(bus->parent);
   3052
   3053	return res;
   3054}
   3055
   3056static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
   3057{
   3058	int res;
   3059
   3060	pm_runtime_get_sync(bus->parent);
   3061	res = mdiobb_write(bus, phy, reg, val);
   3062	pm_runtime_put(bus->parent);
   3063
   3064	return res;
   3065}
   3066
   3067/* MDIO bus init function */
   3068static int sh_mdio_init(struct sh_eth_private *mdp,
   3069			struct sh_eth_plat_data *pd)
   3070{
   3071	int ret;
   3072	struct bb_info *bitbang;
   3073	struct platform_device *pdev = mdp->pdev;
   3074	struct device *dev = &mdp->pdev->dev;
   3075
   3076	/* create bit control struct for PHY */
   3077	bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
   3078	if (!bitbang)
   3079		return -ENOMEM;
   3080
   3081	/* bitbang init */
   3082	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
   3083	bitbang->set_gate = pd->set_mdio_gate;
   3084	bitbang->ctrl.ops = &bb_ops;
   3085
   3086	/* MII controller setting */
   3087	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
   3088	if (!mdp->mii_bus)
   3089		return -ENOMEM;
   3090
   3091	/* Wrap accessors with Runtime PM-aware ops */
   3092	mdp->mii_bus->read = sh_mdiobb_read;
   3093	mdp->mii_bus->write = sh_mdiobb_write;
   3094
   3095	/* Hook up MII support for ethtool */
   3096	mdp->mii_bus->name = "sh_mii";
   3097	mdp->mii_bus->parent = dev;
   3098	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
   3099		 pdev->name, pdev->id);
   3100
   3101	/* register MDIO bus */
   3102	if (pd->phy_irq > 0)
   3103		mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
   3104
   3105	ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
   3106	if (ret)
   3107		goto out_free_bus;
   3108
   3109	return 0;
   3110
   3111out_free_bus:
   3112	free_mdio_bitbang(mdp->mii_bus);
   3113	return ret;
   3114}
   3115
   3116static const u16 *sh_eth_get_register_offset(int register_type)
   3117{
   3118	const u16 *reg_offset = NULL;
   3119
   3120	switch (register_type) {
   3121	case SH_ETH_REG_GIGABIT:
   3122		reg_offset = sh_eth_offset_gigabit;
   3123		break;
   3124	case SH_ETH_REG_FAST_RCAR:
   3125		reg_offset = sh_eth_offset_fast_rcar;
   3126		break;
   3127	case SH_ETH_REG_FAST_SH4:
   3128		reg_offset = sh_eth_offset_fast_sh4;
   3129		break;
   3130	case SH_ETH_REG_FAST_SH3_SH2:
   3131		reg_offset = sh_eth_offset_fast_sh3_sh2;
   3132		break;
   3133	}
   3134
   3135	return reg_offset;
   3136}
   3137
   3138static const struct net_device_ops sh_eth_netdev_ops = {
   3139	.ndo_open		= sh_eth_open,
   3140	.ndo_stop		= sh_eth_close,
   3141	.ndo_start_xmit		= sh_eth_start_xmit,
   3142	.ndo_get_stats		= sh_eth_get_stats,
   3143	.ndo_set_rx_mode	= sh_eth_set_rx_mode,
   3144	.ndo_tx_timeout		= sh_eth_tx_timeout,
   3145	.ndo_eth_ioctl		= phy_do_ioctl_running,
   3146	.ndo_change_mtu		= sh_eth_change_mtu,
   3147	.ndo_validate_addr	= eth_validate_addr,
   3148	.ndo_set_mac_address	= eth_mac_addr,
   3149	.ndo_set_features	= sh_eth_set_features,
   3150};
   3151
   3152static const struct net_device_ops sh_eth_netdev_ops_tsu = {
   3153	.ndo_open		= sh_eth_open,
   3154	.ndo_stop		= sh_eth_close,
   3155	.ndo_start_xmit		= sh_eth_start_xmit,
   3156	.ndo_get_stats		= sh_eth_get_stats,
   3157	.ndo_set_rx_mode	= sh_eth_set_rx_mode,
   3158	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
   3159	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
   3160	.ndo_tx_timeout		= sh_eth_tx_timeout,
   3161	.ndo_eth_ioctl		= phy_do_ioctl_running,
   3162	.ndo_change_mtu		= sh_eth_change_mtu,
   3163	.ndo_validate_addr	= eth_validate_addr,
   3164	.ndo_set_mac_address	= eth_mac_addr,
   3165	.ndo_set_features	= sh_eth_set_features,
   3166};
   3167
   3168#ifdef CONFIG_OF
   3169static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
   3170{
   3171	struct device_node *np = dev->of_node;
   3172	struct sh_eth_plat_data *pdata;
   3173	phy_interface_t interface;
   3174	int ret;
   3175
   3176	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
   3177	if (!pdata)
   3178		return NULL;
   3179
   3180	ret = of_get_phy_mode(np, &interface);
   3181	if (ret)
   3182		return NULL;
   3183	pdata->phy_interface = interface;
   3184
   3185	of_get_mac_address(np, pdata->mac_addr);
   3186
   3187	pdata->no_ether_link =
   3188		of_property_read_bool(np, "renesas,no-ether-link");
   3189	pdata->ether_link_active_low =
   3190		of_property_read_bool(np, "renesas,ether-link-active-low");
   3191
   3192	return pdata;
   3193}
   3194
   3195static const struct of_device_id sh_eth_match_table[] = {
   3196	{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
   3197	{ .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data },
   3198	{ .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data },
   3199	{ .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data },
   3200	{ .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data },
   3201	{ .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data },
   3202	{ .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
   3203	{ .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
   3204	{ .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
   3205	{ .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
   3206	{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
   3207	{ .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
   3208	{ .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
   3209	{ .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
   3210	{ }
   3211};
   3212MODULE_DEVICE_TABLE(of, sh_eth_match_table);
   3213#else
   3214static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
   3215{
   3216	return NULL;
   3217}
   3218#endif
   3219
   3220static int sh_eth_drv_probe(struct platform_device *pdev)
   3221{
   3222	struct resource *res;
   3223	struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
   3224	const struct platform_device_id *id = platform_get_device_id(pdev);
   3225	struct sh_eth_private *mdp;
   3226	struct net_device *ndev;
   3227	int ret;
   3228
   3229	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
   3230	if (!ndev)
   3231		return -ENOMEM;
   3232
   3233	pm_runtime_enable(&pdev->dev);
   3234	pm_runtime_get_sync(&pdev->dev);
   3235
   3236	ret = platform_get_irq(pdev, 0);
   3237	if (ret < 0)
   3238		goto out_release;
   3239	ndev->irq = ret;
   3240
   3241	SET_NETDEV_DEV(ndev, &pdev->dev);
   3242
   3243	mdp = netdev_priv(ndev);
   3244	mdp->num_tx_ring = TX_RING_SIZE;
   3245	mdp->num_rx_ring = RX_RING_SIZE;
   3246	mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
   3247	if (IS_ERR(mdp->addr)) {
   3248		ret = PTR_ERR(mdp->addr);
   3249		goto out_release;
   3250	}
   3251
   3252	ndev->base_addr = res->start;
   3253
   3254	spin_lock_init(&mdp->lock);
   3255	mdp->pdev = pdev;
   3256
   3257	if (pdev->dev.of_node)
   3258		pd = sh_eth_parse_dt(&pdev->dev);
   3259	if (!pd) {
   3260		dev_err(&pdev->dev, "no platform data\n");
   3261		ret = -EINVAL;
   3262		goto out_release;
   3263	}
   3264
   3265	/* get PHY ID */
   3266	mdp->phy_id = pd->phy;
   3267	mdp->phy_interface = pd->phy_interface;
   3268	mdp->no_ether_link = pd->no_ether_link;
   3269	mdp->ether_link_active_low = pd->ether_link_active_low;
   3270
   3271	/* set cpu data */
   3272	if (id)
   3273		mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
   3274	else
   3275		mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
   3276
   3277	mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
   3278	if (!mdp->reg_offset) {
   3279		dev_err(&pdev->dev, "Unknown register type (%d)\n",
   3280			mdp->cd->register_type);
   3281		ret = -EINVAL;
   3282		goto out_release;
   3283	}
   3284	sh_eth_set_default_cpu_data(mdp->cd);
   3285
   3286	/* User's manual states max MTU should be 2048 but due to the
   3287	 * alignment calculations in sh_eth_ring_init() the practical
   3288	 * MTU is a bit less. Maybe this can be optimized some more.
   3289	 */
   3290	ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
   3291	ndev->min_mtu = ETH_MIN_MTU;
   3292
   3293	if (mdp->cd->rx_csum) {
   3294		ndev->features = NETIF_F_RXCSUM;
   3295		ndev->hw_features = NETIF_F_RXCSUM;
   3296	}
   3297
   3298	/* set function */
   3299	if (mdp->cd->tsu)
   3300		ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
   3301	else
   3302		ndev->netdev_ops = &sh_eth_netdev_ops;
   3303	ndev->ethtool_ops = &sh_eth_ethtool_ops;
   3304	ndev->watchdog_timeo = TX_TIMEOUT;
   3305
   3306	/* debug message level */
   3307	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
   3308
   3309	/* read and set MAC address */
   3310	read_mac_address(ndev, pd->mac_addr);
   3311	if (!is_valid_ether_addr(ndev->dev_addr)) {
   3312		dev_warn(&pdev->dev,
   3313			 "no valid MAC address supplied, using a random one.\n");
   3314		eth_hw_addr_random(ndev);
   3315	}
   3316
   3317	if (mdp->cd->tsu) {
   3318		int port = pdev->id < 0 ? 0 : pdev->id % 2;
   3319		struct resource *rtsu;
   3320
   3321		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
   3322		if (!rtsu) {
   3323			dev_err(&pdev->dev, "no TSU resource\n");
   3324			ret = -ENODEV;
   3325			goto out_release;
   3326		}
   3327		/* We can only request the  TSU region  for the first port
   3328		 * of the two  sharing this TSU for the probe to succeed...
   3329		 */
   3330		if (port == 0 &&
   3331		    !devm_request_mem_region(&pdev->dev, rtsu->start,
   3332					     resource_size(rtsu),
   3333					     dev_name(&pdev->dev))) {
   3334			dev_err(&pdev->dev, "can't request TSU resource.\n");
   3335			ret = -EBUSY;
   3336			goto out_release;
   3337		}
   3338		/* ioremap the TSU registers */
   3339		mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
   3340					     resource_size(rtsu));
   3341		if (!mdp->tsu_addr) {
   3342			dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
   3343			ret = -ENOMEM;
   3344			goto out_release;
   3345		}
   3346		mdp->port = port;
   3347		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
   3348
   3349		/* Need to init only the first port of the two sharing a TSU */
   3350		if (port == 0) {
   3351			if (mdp->cd->chip_reset)
   3352				mdp->cd->chip_reset(ndev);
   3353
   3354			/* TSU init (Init only)*/
   3355			sh_eth_tsu_init(mdp);
   3356		}
   3357	}
   3358
   3359	if (mdp->cd->rmiimode)
   3360		sh_eth_write(ndev, 0x1, RMIIMODE);
   3361
   3362	/* MDIO bus init */
   3363	ret = sh_mdio_init(mdp, pd);
   3364	if (ret) {
   3365		dev_err_probe(&pdev->dev, ret, "MDIO init failed\n");
   3366		goto out_release;
   3367	}
   3368
   3369	netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
   3370
   3371	/* network device register */
   3372	ret = register_netdev(ndev);
   3373	if (ret)
   3374		goto out_napi_del;
   3375
   3376	if (mdp->cd->magic)
   3377		device_set_wakeup_capable(&pdev->dev, 1);
   3378
   3379	/* print device information */
   3380	netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
   3381		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
   3382
   3383	pm_runtime_put(&pdev->dev);
   3384	platform_set_drvdata(pdev, ndev);
   3385
   3386	return ret;
   3387
   3388out_napi_del:
   3389	netif_napi_del(&mdp->napi);
   3390	sh_mdio_release(mdp);
   3391
   3392out_release:
   3393	/* net_dev free */
   3394	free_netdev(ndev);
   3395
   3396	pm_runtime_put(&pdev->dev);
   3397	pm_runtime_disable(&pdev->dev);
   3398	return ret;
   3399}
   3400
   3401static int sh_eth_drv_remove(struct platform_device *pdev)
   3402{
   3403	struct net_device *ndev = platform_get_drvdata(pdev);
   3404	struct sh_eth_private *mdp = netdev_priv(ndev);
   3405
   3406	unregister_netdev(ndev);
   3407	netif_napi_del(&mdp->napi);
   3408	sh_mdio_release(mdp);
   3409	pm_runtime_disable(&pdev->dev);
   3410	free_netdev(ndev);
   3411
   3412	return 0;
   3413}
   3414
   3415#ifdef CONFIG_PM
   3416#ifdef CONFIG_PM_SLEEP
   3417static int sh_eth_wol_setup(struct net_device *ndev)
   3418{
   3419	struct sh_eth_private *mdp = netdev_priv(ndev);
   3420
   3421	/* Only allow ECI interrupts */
   3422	synchronize_irq(ndev->irq);
   3423	napi_disable(&mdp->napi);
   3424	sh_eth_write(ndev, EESIPR_ECIIP, EESIPR);
   3425
   3426	/* Enable MagicPacket */
   3427	sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
   3428
   3429	return enable_irq_wake(ndev->irq);
   3430}
   3431
   3432static int sh_eth_wol_restore(struct net_device *ndev)
   3433{
   3434	struct sh_eth_private *mdp = netdev_priv(ndev);
   3435	int ret;
   3436
   3437	napi_enable(&mdp->napi);
   3438
   3439	/* Disable MagicPacket */
   3440	sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0);
   3441
   3442	/* The device needs to be reset to restore MagicPacket logic
   3443	 * for next wakeup. If we close and open the device it will
   3444	 * both be reset and all registers restored. This is what
   3445	 * happens during suspend and resume without WoL enabled.
   3446	 */
   3447	sh_eth_close(ndev);
   3448	ret = sh_eth_open(ndev);
   3449	if (ret < 0)
   3450		return ret;
   3451
   3452	return disable_irq_wake(ndev->irq);
   3453}
   3454
   3455static int sh_eth_suspend(struct device *dev)
   3456{
   3457	struct net_device *ndev = dev_get_drvdata(dev);
   3458	struct sh_eth_private *mdp = netdev_priv(ndev);
   3459	int ret;
   3460
   3461	if (!netif_running(ndev))
   3462		return 0;
   3463
   3464	netif_device_detach(ndev);
   3465
   3466	if (mdp->wol_enabled)
   3467		ret = sh_eth_wol_setup(ndev);
   3468	else
   3469		ret = sh_eth_close(ndev);
   3470
   3471	return ret;
   3472}
   3473
   3474static int sh_eth_resume(struct device *dev)
   3475{
   3476	struct net_device *ndev = dev_get_drvdata(dev);
   3477	struct sh_eth_private *mdp = netdev_priv(ndev);
   3478	int ret;
   3479
   3480	if (!netif_running(ndev))
   3481		return 0;
   3482
   3483	if (mdp->wol_enabled)
   3484		ret = sh_eth_wol_restore(ndev);
   3485	else
   3486		ret = sh_eth_open(ndev);
   3487
   3488	if (ret < 0)
   3489		return ret;
   3490
   3491	netif_device_attach(ndev);
   3492
   3493	return ret;
   3494}
   3495#endif
   3496
   3497static int sh_eth_runtime_nop(struct device *dev)
   3498{
   3499	/* Runtime PM callback shared between ->runtime_suspend()
   3500	 * and ->runtime_resume(). Simply returns success.
   3501	 *
   3502	 * This driver re-initializes all registers after
   3503	 * pm_runtime_get_sync() anyway so there is no need
   3504	 * to save and restore registers here.
   3505	 */
   3506	return 0;
   3507}
   3508
   3509static const struct dev_pm_ops sh_eth_dev_pm_ops = {
   3510	SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
   3511	SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
   3512};
   3513#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
   3514#else
   3515#define SH_ETH_PM_OPS NULL
   3516#endif
   3517
   3518static const struct platform_device_id sh_eth_id_table[] = {
   3519	{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
   3520	{ "sh771x-ether", (kernel_ulong_t)&sh771x_data },
   3521	{ "sh7724-ether", (kernel_ulong_t)&sh7724_data },
   3522	{ "sh7734-gether", (kernel_ulong_t)&sh7734_data },
   3523	{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
   3524	{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
   3525	{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
   3526	{ }
   3527};
   3528MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
   3529
   3530static struct platform_driver sh_eth_driver = {
   3531	.probe = sh_eth_drv_probe,
   3532	.remove = sh_eth_drv_remove,
   3533	.id_table = sh_eth_id_table,
   3534	.driver = {
   3535		   .name = CARDNAME,
   3536		   .pm = SH_ETH_PM_OPS,
   3537		   .of_match_table = of_match_ptr(sh_eth_match_table),
   3538	},
   3539};
   3540
   3541module_platform_driver(sh_eth_driver);
   3542
   3543MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
   3544MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
   3545MODULE_LICENSE("GPL v2");