cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ravb_main.c (80715B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Renesas Ethernet AVB device driver
      3 *
      4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
      5 * Copyright (C) 2015 Renesas Solutions Corp.
      6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
      7 *
      8 * Based on the SuperH Ethernet driver
      9 */
     10
     11#include <linux/cache.h>
     12#include <linux/clk.h>
     13#include <linux/delay.h>
     14#include <linux/dma-mapping.h>
     15#include <linux/err.h>
     16#include <linux/etherdevice.h>
     17#include <linux/ethtool.h>
     18#include <linux/if_vlan.h>
     19#include <linux/kernel.h>
     20#include <linux/list.h>
     21#include <linux/module.h>
     22#include <linux/net_tstamp.h>
     23#include <linux/of.h>
     24#include <linux/of_device.h>
     25#include <linux/of_irq.h>
     26#include <linux/of_mdio.h>
     27#include <linux/of_net.h>
     28#include <linux/pm_runtime.h>
     29#include <linux/slab.h>
     30#include <linux/spinlock.h>
     31#include <linux/sys_soc.h>
     32#include <linux/reset.h>
     33#include <linux/math64.h>
     34
     35#include "ravb.h"
     36
     37#define RAVB_DEF_MSG_ENABLE \
     38		(NETIF_MSG_LINK	  | \
     39		 NETIF_MSG_TIMER  | \
     40		 NETIF_MSG_RX_ERR | \
     41		 NETIF_MSG_TX_ERR)
     42
     43static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
     44	"ch0", /* RAVB_BE */
     45	"ch1", /* RAVB_NC */
     46};
     47
     48static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
     49	"ch18", /* RAVB_BE */
     50	"ch19", /* RAVB_NC */
     51};
     52
     53void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
     54		 u32 set)
     55{
     56	ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
     57}
     58
     59int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
     60{
     61	int i;
     62
     63	for (i = 0; i < 10000; i++) {
     64		if ((ravb_read(ndev, reg) & mask) == value)
     65			return 0;
     66		udelay(10);
     67	}
     68	return -ETIMEDOUT;
     69}
     70
     71static int ravb_config(struct net_device *ndev)
     72{
     73	int error;
     74
     75	/* Set config mode */
     76	ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
     77	/* Check if the operating mode is changed to the config mode */
     78	error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
     79	if (error)
     80		netdev_err(ndev, "failed to switch device to config mode\n");
     81
     82	return error;
     83}
     84
     85static void ravb_set_rate_gbeth(struct net_device *ndev)
     86{
     87	struct ravb_private *priv = netdev_priv(ndev);
     88
     89	switch (priv->speed) {
     90	case 10:                /* 10BASE */
     91		ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
     92		break;
     93	case 100:               /* 100BASE */
     94		ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
     95		break;
     96	case 1000:              /* 1000BASE */
     97		ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
     98		break;
     99	}
    100}
    101
    102static void ravb_set_rate_rcar(struct net_device *ndev)
    103{
    104	struct ravb_private *priv = netdev_priv(ndev);
    105
    106	switch (priv->speed) {
    107	case 100:		/* 100BASE */
    108		ravb_write(ndev, GECMR_SPEED_100, GECMR);
    109		break;
    110	case 1000:		/* 1000BASE */
    111		ravb_write(ndev, GECMR_SPEED_1000, GECMR);
    112		break;
    113	}
    114}
    115
    116static void ravb_set_buffer_align(struct sk_buff *skb)
    117{
    118	u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
    119
    120	if (reserve)
    121		skb_reserve(skb, RAVB_ALIGN - reserve);
    122}
    123
    124/* Get MAC address from the MAC address registers
    125 *
    126 * Ethernet AVB device doesn't have ROM for MAC address.
    127 * This function gets the MAC address that was used by a bootloader.
    128 */
    129static void ravb_read_mac_address(struct device_node *np,
    130				  struct net_device *ndev)
    131{
    132	int ret;
    133
    134	ret = of_get_ethdev_address(np, ndev);
    135	if (ret) {
    136		u32 mahr = ravb_read(ndev, MAHR);
    137		u32 malr = ravb_read(ndev, MALR);
    138		u8 addr[ETH_ALEN];
    139
    140		addr[0] = (mahr >> 24) & 0xFF;
    141		addr[1] = (mahr >> 16) & 0xFF;
    142		addr[2] = (mahr >>  8) & 0xFF;
    143		addr[3] = (mahr >>  0) & 0xFF;
    144		addr[4] = (malr >>  8) & 0xFF;
    145		addr[5] = (malr >>  0) & 0xFF;
    146		eth_hw_addr_set(ndev, addr);
    147	}
    148}
    149
    150static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
    151{
    152	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
    153						 mdiobb);
    154
    155	ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
    156}
    157
    158/* MDC pin control */
    159static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
    160{
    161	ravb_mdio_ctrl(ctrl, PIR_MDC, level);
    162}
    163
    164/* Data I/O pin control */
    165static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
    166{
    167	ravb_mdio_ctrl(ctrl, PIR_MMD, output);
    168}
    169
    170/* Set data bit */
    171static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
    172{
    173	ravb_mdio_ctrl(ctrl, PIR_MDO, value);
    174}
    175
    176/* Get data bit */
    177static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
    178{
    179	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
    180						 mdiobb);
    181
    182	return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
    183}
    184
    185/* MDIO bus control struct */
    186static const struct mdiobb_ops bb_ops = {
    187	.owner = THIS_MODULE,
    188	.set_mdc = ravb_set_mdc,
    189	.set_mdio_dir = ravb_set_mdio_dir,
    190	.set_mdio_data = ravb_set_mdio_data,
    191	.get_mdio_data = ravb_get_mdio_data,
    192};
    193
    194/* Free TX skb function for AVB-IP */
    195static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
    196{
    197	struct ravb_private *priv = netdev_priv(ndev);
    198	struct net_device_stats *stats = &priv->stats[q];
    199	unsigned int num_tx_desc = priv->num_tx_desc;
    200	struct ravb_tx_desc *desc;
    201	unsigned int entry;
    202	int free_num = 0;
    203	u32 size;
    204
    205	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
    206		bool txed;
    207
    208		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
    209					     num_tx_desc);
    210		desc = &priv->tx_ring[q][entry];
    211		txed = desc->die_dt == DT_FEMPTY;
    212		if (free_txed_only && !txed)
    213			break;
    214		/* Descriptor type must be checked before all other reads */
    215		dma_rmb();
    216		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
    217		/* Free the original skb. */
    218		if (priv->tx_skb[q][entry / num_tx_desc]) {
    219			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
    220					 size, DMA_TO_DEVICE);
    221			/* Last packet descriptor? */
    222			if (entry % num_tx_desc == num_tx_desc - 1) {
    223				entry /= num_tx_desc;
    224				dev_kfree_skb_any(priv->tx_skb[q][entry]);
    225				priv->tx_skb[q][entry] = NULL;
    226				if (txed)
    227					stats->tx_packets++;
    228			}
    229			free_num++;
    230		}
    231		if (txed)
    232			stats->tx_bytes += size;
    233		desc->die_dt = DT_EEMPTY;
    234	}
    235	return free_num;
    236}
    237
    238static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
    239{
    240	struct ravb_private *priv = netdev_priv(ndev);
    241	unsigned int ring_size;
    242	unsigned int i;
    243
    244	if (!priv->gbeth_rx_ring)
    245		return;
    246
    247	for (i = 0; i < priv->num_rx_ring[q]; i++) {
    248		struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
    249
    250		if (!dma_mapping_error(ndev->dev.parent,
    251				       le32_to_cpu(desc->dptr)))
    252			dma_unmap_single(ndev->dev.parent,
    253					 le32_to_cpu(desc->dptr),
    254					 GBETH_RX_BUFF_MAX,
    255					 DMA_FROM_DEVICE);
    256	}
    257	ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
    258	dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
    259			  priv->rx_desc_dma[q]);
    260	priv->gbeth_rx_ring = NULL;
    261}
    262
    263static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
    264{
    265	struct ravb_private *priv = netdev_priv(ndev);
    266	unsigned int ring_size;
    267	unsigned int i;
    268
    269	if (!priv->rx_ring[q])
    270		return;
    271
    272	for (i = 0; i < priv->num_rx_ring[q]; i++) {
    273		struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
    274
    275		if (!dma_mapping_error(ndev->dev.parent,
    276				       le32_to_cpu(desc->dptr)))
    277			dma_unmap_single(ndev->dev.parent,
    278					 le32_to_cpu(desc->dptr),
    279					 RX_BUF_SZ,
    280					 DMA_FROM_DEVICE);
    281	}
    282	ring_size = sizeof(struct ravb_ex_rx_desc) *
    283		    (priv->num_rx_ring[q] + 1);
    284	dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
    285			  priv->rx_desc_dma[q]);
    286	priv->rx_ring[q] = NULL;
    287}
    288
    289/* Free skb's and DMA buffers for Ethernet AVB */
    290static void ravb_ring_free(struct net_device *ndev, int q)
    291{
    292	struct ravb_private *priv = netdev_priv(ndev);
    293	const struct ravb_hw_info *info = priv->info;
    294	unsigned int num_tx_desc = priv->num_tx_desc;
    295	unsigned int ring_size;
    296	unsigned int i;
    297
    298	info->rx_ring_free(ndev, q);
    299
    300	if (priv->tx_ring[q]) {
    301		ravb_tx_free(ndev, q, false);
    302
    303		ring_size = sizeof(struct ravb_tx_desc) *
    304			    (priv->num_tx_ring[q] * num_tx_desc + 1);
    305		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
    306				  priv->tx_desc_dma[q]);
    307		priv->tx_ring[q] = NULL;
    308	}
    309
    310	/* Free RX skb ringbuffer */
    311	if (priv->rx_skb[q]) {
    312		for (i = 0; i < priv->num_rx_ring[q]; i++)
    313			dev_kfree_skb(priv->rx_skb[q][i]);
    314	}
    315	kfree(priv->rx_skb[q]);
    316	priv->rx_skb[q] = NULL;
    317
    318	/* Free aligned TX buffers */
    319	kfree(priv->tx_align[q]);
    320	priv->tx_align[q] = NULL;
    321
    322	/* Free TX skb ringbuffer.
    323	 * SKBs are freed by ravb_tx_free() call above.
    324	 */
    325	kfree(priv->tx_skb[q]);
    326	priv->tx_skb[q] = NULL;
    327}
    328
    329static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
    330{
    331	struct ravb_private *priv = netdev_priv(ndev);
    332	struct ravb_rx_desc *rx_desc;
    333	unsigned int rx_ring_size;
    334	dma_addr_t dma_addr;
    335	unsigned int i;
    336
    337	rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
    338	memset(priv->gbeth_rx_ring, 0, rx_ring_size);
    339	/* Build RX ring buffer */
    340	for (i = 0; i < priv->num_rx_ring[q]; i++) {
    341		/* RX descriptor */
    342		rx_desc = &priv->gbeth_rx_ring[i];
    343		rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
    344		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
    345					  GBETH_RX_BUFF_MAX,
    346					  DMA_FROM_DEVICE);
    347		/* We just set the data size to 0 for a failed mapping which
    348		 * should prevent DMA from happening...
    349		 */
    350		if (dma_mapping_error(ndev->dev.parent, dma_addr))
    351			rx_desc->ds_cc = cpu_to_le16(0);
    352		rx_desc->dptr = cpu_to_le32(dma_addr);
    353		rx_desc->die_dt = DT_FEMPTY;
    354	}
    355	rx_desc = &priv->gbeth_rx_ring[i];
    356	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
    357	rx_desc->die_dt = DT_LINKFIX; /* type */
    358}
    359
    360static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
    361{
    362	struct ravb_private *priv = netdev_priv(ndev);
    363	struct ravb_ex_rx_desc *rx_desc;
    364	unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
    365	dma_addr_t dma_addr;
    366	unsigned int i;
    367
    368	memset(priv->rx_ring[q], 0, rx_ring_size);
    369	/* Build RX ring buffer */
    370	for (i = 0; i < priv->num_rx_ring[q]; i++) {
    371		/* RX descriptor */
    372		rx_desc = &priv->rx_ring[q][i];
    373		rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
    374		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
    375					  RX_BUF_SZ,
    376					  DMA_FROM_DEVICE);
    377		/* We just set the data size to 0 for a failed mapping which
    378		 * should prevent DMA from happening...
    379		 */
    380		if (dma_mapping_error(ndev->dev.parent, dma_addr))
    381			rx_desc->ds_cc = cpu_to_le16(0);
    382		rx_desc->dptr = cpu_to_le32(dma_addr);
    383		rx_desc->die_dt = DT_FEMPTY;
    384	}
    385	rx_desc = &priv->rx_ring[q][i];
    386	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
    387	rx_desc->die_dt = DT_LINKFIX; /* type */
    388}
    389
    390/* Format skb and descriptor buffer for Ethernet AVB */
    391static void ravb_ring_format(struct net_device *ndev, int q)
    392{
    393	struct ravb_private *priv = netdev_priv(ndev);
    394	const struct ravb_hw_info *info = priv->info;
    395	unsigned int num_tx_desc = priv->num_tx_desc;
    396	struct ravb_tx_desc *tx_desc;
    397	struct ravb_desc *desc;
    398	unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
    399				    num_tx_desc;
    400	unsigned int i;
    401
    402	priv->cur_rx[q] = 0;
    403	priv->cur_tx[q] = 0;
    404	priv->dirty_rx[q] = 0;
    405	priv->dirty_tx[q] = 0;
    406
    407	info->rx_ring_format(ndev, q);
    408
    409	memset(priv->tx_ring[q], 0, tx_ring_size);
    410	/* Build TX ring buffer */
    411	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
    412	     i++, tx_desc++) {
    413		tx_desc->die_dt = DT_EEMPTY;
    414		if (num_tx_desc > 1) {
    415			tx_desc++;
    416			tx_desc->die_dt = DT_EEMPTY;
    417		}
    418	}
    419	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
    420	tx_desc->die_dt = DT_LINKFIX; /* type */
    421
    422	/* RX descriptor base address for best effort */
    423	desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
    424	desc->die_dt = DT_LINKFIX; /* type */
    425	desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
    426
    427	/* TX descriptor base address for best effort */
    428	desc = &priv->desc_bat[q];
    429	desc->die_dt = DT_LINKFIX; /* type */
    430	desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
    431}
    432
    433static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
    434{
    435	struct ravb_private *priv = netdev_priv(ndev);
    436	unsigned int ring_size;
    437
    438	ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
    439
    440	priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
    441						 &priv->rx_desc_dma[q],
    442						 GFP_KERNEL);
    443	return priv->gbeth_rx_ring;
    444}
    445
    446static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
    447{
    448	struct ravb_private *priv = netdev_priv(ndev);
    449	unsigned int ring_size;
    450
    451	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
    452
    453	priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
    454					      &priv->rx_desc_dma[q],
    455					      GFP_KERNEL);
    456	return priv->rx_ring[q];
    457}
    458
    459/* Init skb and descriptor buffer for Ethernet AVB */
    460static int ravb_ring_init(struct net_device *ndev, int q)
    461{
    462	struct ravb_private *priv = netdev_priv(ndev);
    463	const struct ravb_hw_info *info = priv->info;
    464	unsigned int num_tx_desc = priv->num_tx_desc;
    465	unsigned int ring_size;
    466	struct sk_buff *skb;
    467	unsigned int i;
    468
    469	/* Allocate RX and TX skb rings */
    470	priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
    471				  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
    472	priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
    473				  sizeof(*priv->tx_skb[q]), GFP_KERNEL);
    474	if (!priv->rx_skb[q] || !priv->tx_skb[q])
    475		goto error;
    476
    477	for (i = 0; i < priv->num_rx_ring[q]; i++) {
    478		skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
    479		if (!skb)
    480			goto error;
    481		ravb_set_buffer_align(skb);
    482		priv->rx_skb[q][i] = skb;
    483	}
    484
    485	if (num_tx_desc > 1) {
    486		/* Allocate rings for the aligned buffers */
    487		priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
    488					    DPTR_ALIGN - 1, GFP_KERNEL);
    489		if (!priv->tx_align[q])
    490			goto error;
    491	}
    492
    493	/* Allocate all RX descriptors. */
    494	if (!info->alloc_rx_desc(ndev, q))
    495		goto error;
    496
    497	priv->dirty_rx[q] = 0;
    498
    499	/* Allocate all TX descriptors. */
    500	ring_size = sizeof(struct ravb_tx_desc) *
    501		    (priv->num_tx_ring[q] * num_tx_desc + 1);
    502	priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
    503					      &priv->tx_desc_dma[q],
    504					      GFP_KERNEL);
    505	if (!priv->tx_ring[q])
    506		goto error;
    507
    508	return 0;
    509
    510error:
    511	ravb_ring_free(ndev, q);
    512
    513	return -ENOMEM;
    514}
    515
    516static void ravb_emac_init_gbeth(struct net_device *ndev)
    517{
    518	struct ravb_private *priv = netdev_priv(ndev);
    519
    520	/* Receive frame limit set register */
    521	ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
    522
    523	/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
    524	ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
    525			 ECMR_TE | ECMR_RE | ECMR_RCPT |
    526			 ECMR_TXF | ECMR_RXF, ECMR);
    527
    528	ravb_set_rate_gbeth(ndev);
    529
    530	/* Set MAC address */
    531	ravb_write(ndev,
    532		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
    533		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
    534	ravb_write(ndev, (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
    535
    536	/* E-MAC status register clear */
    537	ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
    538	ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
    539
    540	/* E-MAC interrupt enable register */
    541	ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
    542
    543	ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0);
    544}
    545
    546static void ravb_emac_init_rcar(struct net_device *ndev)
    547{
    548	/* Receive frame limit set register */
    549	ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
    550
    551	/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
    552	ravb_write(ndev, ECMR_ZPF | ECMR_DM |
    553		   (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
    554		   ECMR_TE | ECMR_RE, ECMR);
    555
    556	ravb_set_rate_rcar(ndev);
    557
    558	/* Set MAC address */
    559	ravb_write(ndev,
    560		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
    561		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
    562	ravb_write(ndev,
    563		   (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);
    564
    565	/* E-MAC status register clear */
    566	ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
    567
    568	/* E-MAC interrupt enable register */
    569	ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
    570}
    571
    572/* E-MAC init function */
    573static void ravb_emac_init(struct net_device *ndev)
    574{
    575	struct ravb_private *priv = netdev_priv(ndev);
    576	const struct ravb_hw_info *info = priv->info;
    577
    578	info->emac_init(ndev);
    579}
    580
    581static int ravb_dmac_init_gbeth(struct net_device *ndev)
    582{
    583	int error;
    584
    585	error = ravb_ring_init(ndev, RAVB_BE);
    586	if (error)
    587		return error;
    588
    589	/* Descriptor format */
    590	ravb_ring_format(ndev, RAVB_BE);
    591
    592	/* Set DMAC RX */
    593	ravb_write(ndev, 0x60000000, RCR);
    594
    595	/* Set Max Frame Length (RTC) */
    596	ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
    597
    598	/* Set FIFO size */
    599	ravb_write(ndev, 0x00222200, TGC);
    600
    601	ravb_write(ndev, 0, TCCR);
    602
    603	/* Frame receive */
    604	ravb_write(ndev, RIC0_FRE0, RIC0);
    605	/* Disable FIFO full warning */
    606	ravb_write(ndev, 0x0, RIC1);
    607	/* Receive FIFO full error, descriptor empty */
    608	ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
    609
    610	ravb_write(ndev, TIC_FTE0, TIC);
    611
    612	return 0;
    613}
    614
    615static int ravb_dmac_init_rcar(struct net_device *ndev)
    616{
    617	struct ravb_private *priv = netdev_priv(ndev);
    618	const struct ravb_hw_info *info = priv->info;
    619	int error;
    620
    621	error = ravb_ring_init(ndev, RAVB_BE);
    622	if (error)
    623		return error;
    624	error = ravb_ring_init(ndev, RAVB_NC);
    625	if (error) {
    626		ravb_ring_free(ndev, RAVB_BE);
    627		return error;
    628	}
    629
    630	/* Descriptor format */
    631	ravb_ring_format(ndev, RAVB_BE);
    632	ravb_ring_format(ndev, RAVB_NC);
    633
    634	/* Set AVB RX */
    635	ravb_write(ndev,
    636		   RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
    637
    638	/* Set FIFO size */
    639	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
    640
    641	/* Timestamp enable */
    642	ravb_write(ndev, TCCR_TFEN, TCCR);
    643
    644	/* Interrupt init: */
    645	if (info->multi_irqs) {
    646		/* Clear DIL.DPLx */
    647		ravb_write(ndev, 0, DIL);
    648		/* Set queue specific interrupt */
    649		ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
    650	}
    651	/* Frame receive */
    652	ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
    653	/* Disable FIFO full warning */
    654	ravb_write(ndev, 0, RIC1);
    655	/* Receive FIFO full error, descriptor empty */
    656	ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
    657	/* Frame transmitted, timestamp FIFO updated */
    658	ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
    659
    660	return 0;
    661}
    662
    663/* Device init function for Ethernet AVB */
    664static int ravb_dmac_init(struct net_device *ndev)
    665{
    666	struct ravb_private *priv = netdev_priv(ndev);
    667	const struct ravb_hw_info *info = priv->info;
    668	int error;
    669
    670	/* Set CONFIG mode */
    671	error = ravb_config(ndev);
    672	if (error)
    673		return error;
    674
    675	error = info->dmac_init(ndev);
    676	if (error)
    677		return error;
    678
    679	/* Setting the control will start the AVB-DMAC process. */
    680	ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
    681
    682	return 0;
    683}
    684
    685static void ravb_get_tx_tstamp(struct net_device *ndev)
    686{
    687	struct ravb_private *priv = netdev_priv(ndev);
    688	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
    689	struct skb_shared_hwtstamps shhwtstamps;
    690	struct sk_buff *skb;
    691	struct timespec64 ts;
    692	u16 tag, tfa_tag;
    693	int count;
    694	u32 tfa2;
    695
    696	count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
    697	while (count--) {
    698		tfa2 = ravb_read(ndev, TFA2);
    699		tfa_tag = (tfa2 & TFA2_TST) >> 16;
    700		ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
    701		ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
    702			    ravb_read(ndev, TFA1);
    703		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
    704		shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
    705		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
    706					 list) {
    707			skb = ts_skb->skb;
    708			tag = ts_skb->tag;
    709			list_del(&ts_skb->list);
    710			kfree(ts_skb);
    711			if (tag == tfa_tag) {
    712				skb_tstamp_tx(skb, &shhwtstamps);
    713				dev_consume_skb_any(skb);
    714				break;
    715			} else {
    716				dev_kfree_skb_any(skb);
    717			}
    718		}
    719		ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
    720	}
    721}
    722
    723static void ravb_rx_csum(struct sk_buff *skb)
    724{
    725	u8 *hw_csum;
    726
    727	/* The hardware checksum is contained in sizeof(__sum16) (2) bytes
    728	 * appended to packet data
    729	 */
    730	if (unlikely(skb->len < sizeof(__sum16)))
    731		return;
    732	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
    733	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
    734	skb->ip_summed = CHECKSUM_COMPLETE;
    735	skb_trim(skb, skb->len - sizeof(__sum16));
    736}
    737
    738static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
    739					  struct ravb_rx_desc *desc)
    740{
    741	struct ravb_private *priv = netdev_priv(ndev);
    742	struct sk_buff *skb;
    743
    744	skb = priv->rx_skb[RAVB_BE][entry];
    745	priv->rx_skb[RAVB_BE][entry] = NULL;
    746	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
    747			 ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
    748
    749	return skb;
    750}
    751
    752/* Packet receive function for Gigabit Ethernet */
    753static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
    754{
    755	struct ravb_private *priv = netdev_priv(ndev);
    756	const struct ravb_hw_info *info = priv->info;
    757	struct net_device_stats *stats;
    758	struct ravb_rx_desc *desc;
    759	struct sk_buff *skb;
    760	dma_addr_t dma_addr;
    761	u8  desc_status;
    762	int boguscnt;
    763	u16 pkt_len;
    764	u8  die_dt;
    765	int entry;
    766	int limit;
    767
    768	entry = priv->cur_rx[q] % priv->num_rx_ring[q];
    769	boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
    770	stats = &priv->stats[q];
    771
    772	boguscnt = min(boguscnt, *quota);
    773	limit = boguscnt;
    774	desc = &priv->gbeth_rx_ring[entry];
    775	while (desc->die_dt != DT_FEMPTY) {
    776		/* Descriptor type must be checked before all other reads */
    777		dma_rmb();
    778		desc_status = desc->msc;
    779		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
    780
    781		if (--boguscnt < 0)
    782			break;
    783
    784		/* We use 0-byte descriptors to mark the DMA mapping errors */
    785		if (!pkt_len)
    786			continue;
    787
    788		if (desc_status & MSC_MC)
    789			stats->multicast++;
    790
    791		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
    792			stats->rx_errors++;
    793			if (desc_status & MSC_CRC)
    794				stats->rx_crc_errors++;
    795			if (desc_status & MSC_RFE)
    796				stats->rx_frame_errors++;
    797			if (desc_status & (MSC_RTLF | MSC_RTSF))
    798				stats->rx_length_errors++;
    799			if (desc_status & MSC_CEEF)
    800				stats->rx_missed_errors++;
    801		} else {
    802			die_dt = desc->die_dt & 0xF0;
    803			switch (die_dt) {
    804			case DT_FSINGLE:
    805				skb = ravb_get_skb_gbeth(ndev, entry, desc);
    806				skb_put(skb, pkt_len);
    807				skb->protocol = eth_type_trans(skb, ndev);
    808				napi_gro_receive(&priv->napi[q], skb);
    809				stats->rx_packets++;
    810				stats->rx_bytes += pkt_len;
    811				break;
    812			case DT_FSTART:
    813				priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
    814				skb_put(priv->rx_1st_skb, pkt_len);
    815				break;
    816			case DT_FMID:
    817				skb = ravb_get_skb_gbeth(ndev, entry, desc);
    818				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
    819							       priv->rx_1st_skb->len,
    820							       skb->data,
    821							       pkt_len);
    822				skb_put(priv->rx_1st_skb, pkt_len);
    823				dev_kfree_skb(skb);
    824				break;
    825			case DT_FEND:
    826				skb = ravb_get_skb_gbeth(ndev, entry, desc);
    827				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
    828							       priv->rx_1st_skb->len,
    829							       skb->data,
    830							       pkt_len);
    831				skb_put(priv->rx_1st_skb, pkt_len);
    832				dev_kfree_skb(skb);
    833				priv->rx_1st_skb->protocol =
    834					eth_type_trans(priv->rx_1st_skb, ndev);
    835				napi_gro_receive(&priv->napi[q],
    836						 priv->rx_1st_skb);
    837				stats->rx_packets++;
    838				stats->rx_bytes += priv->rx_1st_skb->len;
    839				break;
    840			}
    841		}
    842
    843		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
    844		desc = &priv->gbeth_rx_ring[entry];
    845	}
    846
    847	/* Refill the RX ring buffers. */
    848	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
    849		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
    850		desc = &priv->gbeth_rx_ring[entry];
    851		desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
    852
    853		if (!priv->rx_skb[q][entry]) {
    854			skb = netdev_alloc_skb(ndev, info->max_rx_len);
    855			if (!skb)
    856				break;
    857			ravb_set_buffer_align(skb);
    858			dma_addr = dma_map_single(ndev->dev.parent,
    859						  skb->data,
    860						  GBETH_RX_BUFF_MAX,
    861						  DMA_FROM_DEVICE);
    862			skb_checksum_none_assert(skb);
    863			/* We just set the data size to 0 for a failed mapping
    864			 * which should prevent DMA  from happening...
    865			 */
    866			if (dma_mapping_error(ndev->dev.parent, dma_addr))
    867				desc->ds_cc = cpu_to_le16(0);
    868			desc->dptr = cpu_to_le32(dma_addr);
    869			priv->rx_skb[q][entry] = skb;
    870		}
    871		/* Descriptor type must be set after all the above writes */
    872		dma_wmb();
    873		desc->die_dt = DT_FEMPTY;
    874	}
    875
    876	*quota -= limit - (++boguscnt);
    877
    878	return boguscnt <= 0;
    879}
    880
    881/* Packet receive function for Ethernet AVB */
    882static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
    883{
    884	struct ravb_private *priv = netdev_priv(ndev);
    885	const struct ravb_hw_info *info = priv->info;
    886	int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
    887	int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
    888			priv->cur_rx[q];
    889	struct net_device_stats *stats = &priv->stats[q];
    890	struct ravb_ex_rx_desc *desc;
    891	struct sk_buff *skb;
    892	dma_addr_t dma_addr;
    893	struct timespec64 ts;
    894	u8  desc_status;
    895	u16 pkt_len;
    896	int limit;
    897
    898	boguscnt = min(boguscnt, *quota);
    899	limit = boguscnt;
    900	desc = &priv->rx_ring[q][entry];
    901	while (desc->die_dt != DT_FEMPTY) {
    902		/* Descriptor type must be checked before all other reads */
    903		dma_rmb();
    904		desc_status = desc->msc;
    905		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
    906
    907		if (--boguscnt < 0)
    908			break;
    909
    910		/* We use 0-byte descriptors to mark the DMA mapping errors */
    911		if (!pkt_len)
    912			continue;
    913
    914		if (desc_status & MSC_MC)
    915			stats->multicast++;
    916
    917		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
    918				   MSC_CEEF)) {
    919			stats->rx_errors++;
    920			if (desc_status & MSC_CRC)
    921				stats->rx_crc_errors++;
    922			if (desc_status & MSC_RFE)
    923				stats->rx_frame_errors++;
    924			if (desc_status & (MSC_RTLF | MSC_RTSF))
    925				stats->rx_length_errors++;
    926			if (desc_status & MSC_CEEF)
    927				stats->rx_missed_errors++;
    928		} else {
    929			u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
    930
    931			skb = priv->rx_skb[q][entry];
    932			priv->rx_skb[q][entry] = NULL;
    933			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
    934					 RX_BUF_SZ,
    935					 DMA_FROM_DEVICE);
    936			get_ts &= (q == RAVB_NC) ?
    937					RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
    938					~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
    939			if (get_ts) {
    940				struct skb_shared_hwtstamps *shhwtstamps;
    941
    942				shhwtstamps = skb_hwtstamps(skb);
    943				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
    944				ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
    945					     32) | le32_to_cpu(desc->ts_sl);
    946				ts.tv_nsec = le32_to_cpu(desc->ts_n);
    947				shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
    948			}
    949
    950			skb_put(skb, pkt_len);
    951			skb->protocol = eth_type_trans(skb, ndev);
    952			if (ndev->features & NETIF_F_RXCSUM)
    953				ravb_rx_csum(skb);
    954			napi_gro_receive(&priv->napi[q], skb);
    955			stats->rx_packets++;
    956			stats->rx_bytes += pkt_len;
    957		}
    958
    959		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
    960		desc = &priv->rx_ring[q][entry];
    961	}
    962
    963	/* Refill the RX ring buffers. */
    964	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
    965		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
    966		desc = &priv->rx_ring[q][entry];
    967		desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
    968
    969		if (!priv->rx_skb[q][entry]) {
    970			skb = netdev_alloc_skb(ndev, info->max_rx_len);
    971			if (!skb)
    972				break;	/* Better luck next round. */
    973			ravb_set_buffer_align(skb);
    974			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
    975						  le16_to_cpu(desc->ds_cc),
    976						  DMA_FROM_DEVICE);
    977			skb_checksum_none_assert(skb);
    978			/* We just set the data size to 0 for a failed mapping
    979			 * which should prevent DMA  from happening...
    980			 */
    981			if (dma_mapping_error(ndev->dev.parent, dma_addr))
    982				desc->ds_cc = cpu_to_le16(0);
    983			desc->dptr = cpu_to_le32(dma_addr);
    984			priv->rx_skb[q][entry] = skb;
    985		}
    986		/* Descriptor type must be set after all the above writes */
    987		dma_wmb();
    988		desc->die_dt = DT_FEMPTY;
    989	}
    990
    991	*quota -= limit - (++boguscnt);
    992
    993	return boguscnt <= 0;
    994}
    995
    996/* Packet receive function for Ethernet AVB */
    997static bool ravb_rx(struct net_device *ndev, int *quota, int q)
    998{
    999	struct ravb_private *priv = netdev_priv(ndev);
   1000	const struct ravb_hw_info *info = priv->info;
   1001
   1002	return info->receive(ndev, quota, q);
   1003}
   1004
   1005static void ravb_rcv_snd_disable(struct net_device *ndev)
   1006{
   1007	/* Disable TX and RX */
   1008	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
   1009}
   1010
   1011static void ravb_rcv_snd_enable(struct net_device *ndev)
   1012{
   1013	/* Enable TX and RX */
   1014	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
   1015}
   1016
   1017/* function for waiting dma process finished */
   1018static int ravb_stop_dma(struct net_device *ndev)
   1019{
   1020	struct ravb_private *priv = netdev_priv(ndev);
   1021	const struct ravb_hw_info *info = priv->info;
   1022	int error;
   1023
   1024	/* Wait for stopping the hardware TX process */
   1025	error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
   1026
   1027	if (error)
   1028		return error;
   1029
   1030	error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
   1031			  0);
   1032	if (error)
   1033		return error;
   1034
   1035	/* Stop the E-MAC's RX/TX processes. */
   1036	ravb_rcv_snd_disable(ndev);
   1037
   1038	/* Wait for stopping the RX DMA process */
   1039	error = ravb_wait(ndev, CSR, CSR_RPO, 0);
   1040	if (error)
   1041		return error;
   1042
   1043	/* Stop AVB-DMAC process */
   1044	return ravb_config(ndev);
   1045}
   1046
   1047/* E-MAC interrupt handler */
   1048static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
   1049{
   1050	struct ravb_private *priv = netdev_priv(ndev);
   1051	u32 ecsr, psr;
   1052
   1053	ecsr = ravb_read(ndev, ECSR);
   1054	ravb_write(ndev, ecsr, ECSR);	/* clear interrupt */
   1055
   1056	if (ecsr & ECSR_MPD)
   1057		pm_wakeup_event(&priv->pdev->dev, 0);
   1058	if (ecsr & ECSR_ICD)
   1059		ndev->stats.tx_carrier_errors++;
   1060	if (ecsr & ECSR_LCHNG) {
   1061		/* Link changed */
   1062		if (priv->no_avb_link)
   1063			return;
   1064		psr = ravb_read(ndev, PSR);
   1065		if (priv->avb_link_active_low)
   1066			psr ^= PSR_LMON;
   1067		if (!(psr & PSR_LMON)) {
   1068			/* DIsable RX and TX */
   1069			ravb_rcv_snd_disable(ndev);
   1070		} else {
   1071			/* Enable RX and TX */
   1072			ravb_rcv_snd_enable(ndev);
   1073		}
   1074	}
   1075}
   1076
   1077static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
   1078{
   1079	struct net_device *ndev = dev_id;
   1080	struct ravb_private *priv = netdev_priv(ndev);
   1081
   1082	spin_lock(&priv->lock);
   1083	ravb_emac_interrupt_unlocked(ndev);
   1084	spin_unlock(&priv->lock);
   1085	return IRQ_HANDLED;
   1086}
   1087
   1088/* Error interrupt handler */
   1089static void ravb_error_interrupt(struct net_device *ndev)
   1090{
   1091	struct ravb_private *priv = netdev_priv(ndev);
   1092	u32 eis, ris2;
   1093
   1094	eis = ravb_read(ndev, EIS);
   1095	ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
   1096	if (eis & EIS_QFS) {
   1097		ris2 = ravb_read(ndev, RIS2);
   1098		ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
   1099			   RIS2);
   1100
   1101		/* Receive Descriptor Empty int */
   1102		if (ris2 & RIS2_QFF0)
   1103			priv->stats[RAVB_BE].rx_over_errors++;
   1104
   1105		    /* Receive Descriptor Empty int */
   1106		if (ris2 & RIS2_QFF1)
   1107			priv->stats[RAVB_NC].rx_over_errors++;
   1108
   1109		/* Receive FIFO Overflow int */
   1110		if (ris2 & RIS2_RFFF)
   1111			priv->rx_fifo_errors++;
   1112	}
   1113}
   1114
   1115static bool ravb_queue_interrupt(struct net_device *ndev, int q)
   1116{
   1117	struct ravb_private *priv = netdev_priv(ndev);
   1118	const struct ravb_hw_info *info = priv->info;
   1119	u32 ris0 = ravb_read(ndev, RIS0);
   1120	u32 ric0 = ravb_read(ndev, RIC0);
   1121	u32 tis  = ravb_read(ndev, TIS);
   1122	u32 tic  = ravb_read(ndev, TIC);
   1123
   1124	if (((ris0 & ric0) & BIT(q)) || ((tis  & tic)  & BIT(q))) {
   1125		if (napi_schedule_prep(&priv->napi[q])) {
   1126			/* Mask RX and TX interrupts */
   1127			if (!info->irq_en_dis) {
   1128				ravb_write(ndev, ric0 & ~BIT(q), RIC0);
   1129				ravb_write(ndev, tic & ~BIT(q), TIC);
   1130			} else {
   1131				ravb_write(ndev, BIT(q), RID0);
   1132				ravb_write(ndev, BIT(q), TID);
   1133			}
   1134			__napi_schedule(&priv->napi[q]);
   1135		} else {
   1136			netdev_warn(ndev,
   1137				    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
   1138				    ris0, ric0);
   1139			netdev_warn(ndev,
   1140				    "                    tx status 0x%08x, tx mask 0x%08x.\n",
   1141				    tis, tic);
   1142		}
   1143		return true;
   1144	}
   1145	return false;
   1146}
   1147
   1148static bool ravb_timestamp_interrupt(struct net_device *ndev)
   1149{
   1150	u32 tis = ravb_read(ndev, TIS);
   1151
   1152	if (tis & TIS_TFUF) {
   1153		ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
   1154		ravb_get_tx_tstamp(ndev);
   1155		return true;
   1156	}
   1157	return false;
   1158}
   1159
   1160static irqreturn_t ravb_interrupt(int irq, void *dev_id)
   1161{
   1162	struct net_device *ndev = dev_id;
   1163	struct ravb_private *priv = netdev_priv(ndev);
   1164	const struct ravb_hw_info *info = priv->info;
   1165	irqreturn_t result = IRQ_NONE;
   1166	u32 iss;
   1167
   1168	spin_lock(&priv->lock);
   1169	/* Get interrupt status */
   1170	iss = ravb_read(ndev, ISS);
   1171
   1172	/* Received and transmitted interrupts */
   1173	if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
   1174		int q;
   1175
   1176		/* Timestamp updated */
   1177		if (ravb_timestamp_interrupt(ndev))
   1178			result = IRQ_HANDLED;
   1179
   1180		/* Network control and best effort queue RX/TX */
   1181		if (info->nc_queues) {
   1182			for (q = RAVB_NC; q >= RAVB_BE; q--) {
   1183				if (ravb_queue_interrupt(ndev, q))
   1184					result = IRQ_HANDLED;
   1185			}
   1186		} else {
   1187			if (ravb_queue_interrupt(ndev, RAVB_BE))
   1188				result = IRQ_HANDLED;
   1189		}
   1190	}
   1191
   1192	/* E-MAC status summary */
   1193	if (iss & ISS_MS) {
   1194		ravb_emac_interrupt_unlocked(ndev);
   1195		result = IRQ_HANDLED;
   1196	}
   1197
   1198	/* Error status summary */
   1199	if (iss & ISS_ES) {
   1200		ravb_error_interrupt(ndev);
   1201		result = IRQ_HANDLED;
   1202	}
   1203
   1204	/* gPTP interrupt status summary */
   1205	if (iss & ISS_CGIS) {
   1206		ravb_ptp_interrupt(ndev);
   1207		result = IRQ_HANDLED;
   1208	}
   1209
   1210	spin_unlock(&priv->lock);
   1211	return result;
   1212}
   1213
   1214/* Timestamp/Error/gPTP interrupt handler */
   1215static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
   1216{
   1217	struct net_device *ndev = dev_id;
   1218	struct ravb_private *priv = netdev_priv(ndev);
   1219	irqreturn_t result = IRQ_NONE;
   1220	u32 iss;
   1221
   1222	spin_lock(&priv->lock);
   1223	/* Get interrupt status */
   1224	iss = ravb_read(ndev, ISS);
   1225
   1226	/* Timestamp updated */
   1227	if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
   1228		result = IRQ_HANDLED;
   1229
   1230	/* Error status summary */
   1231	if (iss & ISS_ES) {
   1232		ravb_error_interrupt(ndev);
   1233		result = IRQ_HANDLED;
   1234	}
   1235
   1236	/* gPTP interrupt status summary */
   1237	if (iss & ISS_CGIS) {
   1238		ravb_ptp_interrupt(ndev);
   1239		result = IRQ_HANDLED;
   1240	}
   1241
   1242	spin_unlock(&priv->lock);
   1243	return result;
   1244}
   1245
   1246static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
   1247{
   1248	struct net_device *ndev = dev_id;
   1249	struct ravb_private *priv = netdev_priv(ndev);
   1250	irqreturn_t result = IRQ_NONE;
   1251
   1252	spin_lock(&priv->lock);
   1253
   1254	/* Network control/Best effort queue RX/TX */
   1255	if (ravb_queue_interrupt(ndev, q))
   1256		result = IRQ_HANDLED;
   1257
   1258	spin_unlock(&priv->lock);
   1259	return result;
   1260}
   1261
   1262static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
   1263{
   1264	return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
   1265}
   1266
   1267static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
   1268{
   1269	return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
   1270}
   1271
   1272static int ravb_poll(struct napi_struct *napi, int budget)
   1273{
   1274	struct net_device *ndev = napi->dev;
   1275	struct ravb_private *priv = netdev_priv(ndev);
   1276	const struct ravb_hw_info *info = priv->info;
   1277	bool gptp = info->gptp || info->ccc_gac;
   1278	struct ravb_rx_desc *desc;
   1279	unsigned long flags;
   1280	int q = napi - priv->napi;
   1281	int mask = BIT(q);
   1282	int quota = budget;
   1283	unsigned int entry;
   1284
   1285	if (!gptp) {
   1286		entry = priv->cur_rx[q] % priv->num_rx_ring[q];
   1287		desc = &priv->gbeth_rx_ring[entry];
   1288	}
   1289	/* Processing RX Descriptor Ring */
   1290	/* Clear RX interrupt */
   1291	ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
   1292	if (gptp || desc->die_dt != DT_FEMPTY) {
   1293		if (ravb_rx(ndev, &quota, q))
   1294			goto out;
   1295	}
   1296
   1297	/* Processing TX Descriptor Ring */
   1298	spin_lock_irqsave(&priv->lock, flags);
   1299	/* Clear TX interrupt */
   1300	ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
   1301	ravb_tx_free(ndev, q, true);
   1302	netif_wake_subqueue(ndev, q);
   1303	spin_unlock_irqrestore(&priv->lock, flags);
   1304
   1305	napi_complete(napi);
   1306
   1307	/* Re-enable RX/TX interrupts */
   1308	spin_lock_irqsave(&priv->lock, flags);
   1309	if (!info->irq_en_dis) {
   1310		ravb_modify(ndev, RIC0, mask, mask);
   1311		ravb_modify(ndev, TIC,  mask, mask);
   1312	} else {
   1313		ravb_write(ndev, mask, RIE0);
   1314		ravb_write(ndev, mask, TIE);
   1315	}
   1316	spin_unlock_irqrestore(&priv->lock, flags);
   1317
   1318	/* Receive error message handling */
   1319	priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
   1320	if (info->nc_queues)
   1321		priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
   1322	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
   1323		ndev->stats.rx_over_errors = priv->rx_over_errors;
   1324	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
   1325		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
   1326out:
   1327	return budget - quota;
   1328}
   1329
   1330static void ravb_set_duplex_gbeth(struct net_device *ndev)
   1331{
   1332	struct ravb_private *priv = netdev_priv(ndev);
   1333
   1334	ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
   1335}
   1336
   1337/* PHY state control function */
   1338static void ravb_adjust_link(struct net_device *ndev)
   1339{
   1340	struct ravb_private *priv = netdev_priv(ndev);
   1341	const struct ravb_hw_info *info = priv->info;
   1342	struct phy_device *phydev = ndev->phydev;
   1343	bool new_state = false;
   1344	unsigned long flags;
   1345
   1346	spin_lock_irqsave(&priv->lock, flags);
   1347
   1348	/* Disable TX and RX right over here, if E-MAC change is ignored */
   1349	if (priv->no_avb_link)
   1350		ravb_rcv_snd_disable(ndev);
   1351
   1352	if (phydev->link) {
   1353		if (info->half_duplex && phydev->duplex != priv->duplex) {
   1354			new_state = true;
   1355			priv->duplex = phydev->duplex;
   1356			ravb_set_duplex_gbeth(ndev);
   1357		}
   1358
   1359		if (phydev->speed != priv->speed) {
   1360			new_state = true;
   1361			priv->speed = phydev->speed;
   1362			info->set_rate(ndev);
   1363		}
   1364		if (!priv->link) {
   1365			ravb_modify(ndev, ECMR, ECMR_TXF, 0);
   1366			new_state = true;
   1367			priv->link = phydev->link;
   1368		}
   1369	} else if (priv->link) {
   1370		new_state = true;
   1371		priv->link = 0;
   1372		priv->speed = 0;
   1373		if (info->half_duplex)
   1374			priv->duplex = -1;
   1375	}
   1376
   1377	/* Enable TX and RX right over here, if E-MAC change is ignored */
   1378	if (priv->no_avb_link && phydev->link)
   1379		ravb_rcv_snd_enable(ndev);
   1380
   1381	spin_unlock_irqrestore(&priv->lock, flags);
   1382
   1383	if (new_state && netif_msg_link(priv))
   1384		phy_print_status(phydev);
   1385}
   1386
   1387static const struct soc_device_attribute r8a7795es10[] = {
   1388	{ .soc_id = "r8a7795", .revision = "ES1.0", },
   1389	{ /* sentinel */ }
   1390};
   1391
   1392/* PHY init function */
   1393static int ravb_phy_init(struct net_device *ndev)
   1394{
   1395	struct device_node *np = ndev->dev.parent->of_node;
   1396	struct ravb_private *priv = netdev_priv(ndev);
   1397	const struct ravb_hw_info *info = priv->info;
   1398	struct phy_device *phydev;
   1399	struct device_node *pn;
   1400	phy_interface_t iface;
   1401	int err;
   1402
   1403	priv->link = 0;
   1404	priv->speed = 0;
   1405	priv->duplex = -1;
   1406
   1407	/* Try connecting to PHY */
   1408	pn = of_parse_phandle(np, "phy-handle", 0);
   1409	if (!pn) {
   1410		/* In the case of a fixed PHY, the DT node associated
   1411		 * to the PHY is the Ethernet MAC DT node.
   1412		 */
   1413		if (of_phy_is_fixed_link(np)) {
   1414			err = of_phy_register_fixed_link(np);
   1415			if (err)
   1416				return err;
   1417		}
   1418		pn = of_node_get(np);
   1419	}
   1420
   1421	iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
   1422				     : priv->phy_interface;
   1423	phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
   1424	of_node_put(pn);
   1425	if (!phydev) {
   1426		netdev_err(ndev, "failed to connect PHY\n");
   1427		err = -ENOENT;
   1428		goto err_deregister_fixed_link;
   1429	}
   1430
   1431	/* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
   1432	 * at this time.
   1433	 */
   1434	if (soc_device_match(r8a7795es10)) {
   1435		phy_set_max_speed(phydev, SPEED_100);
   1436
   1437		netdev_info(ndev, "limited PHY to 100Mbit/s\n");
   1438	}
   1439
   1440	if (!info->half_duplex) {
   1441		/* 10BASE, Pause and Asym Pause is not supported */
   1442		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
   1443		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
   1444		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
   1445		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
   1446
   1447		/* Half Duplex is not supported */
   1448		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
   1449		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
   1450	}
   1451
   1452	phy_attached_info(phydev);
   1453
   1454	return 0;
   1455
   1456err_deregister_fixed_link:
   1457	if (of_phy_is_fixed_link(np))
   1458		of_phy_deregister_fixed_link(np);
   1459
   1460	return err;
   1461}
   1462
   1463/* PHY control start function */
   1464static int ravb_phy_start(struct net_device *ndev)
   1465{
   1466	int error;
   1467
   1468	error = ravb_phy_init(ndev);
   1469	if (error)
   1470		return error;
   1471
   1472	phy_start(ndev->phydev);
   1473
   1474	return 0;
   1475}
   1476
   1477static u32 ravb_get_msglevel(struct net_device *ndev)
   1478{
   1479	struct ravb_private *priv = netdev_priv(ndev);
   1480
   1481	return priv->msg_enable;
   1482}
   1483
   1484static void ravb_set_msglevel(struct net_device *ndev, u32 value)
   1485{
   1486	struct ravb_private *priv = netdev_priv(ndev);
   1487
   1488	priv->msg_enable = value;
   1489}
   1490
   1491static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
   1492	"rx_queue_0_current",
   1493	"tx_queue_0_current",
   1494	"rx_queue_0_dirty",
   1495	"tx_queue_0_dirty",
   1496	"rx_queue_0_packets",
   1497	"tx_queue_0_packets",
   1498	"rx_queue_0_bytes",
   1499	"tx_queue_0_bytes",
   1500	"rx_queue_0_mcast_packets",
   1501	"rx_queue_0_errors",
   1502	"rx_queue_0_crc_errors",
   1503	"rx_queue_0_frame_errors",
   1504	"rx_queue_0_length_errors",
   1505	"rx_queue_0_csum_offload_errors",
   1506	"rx_queue_0_over_errors",
   1507};
   1508
   1509static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
   1510	"rx_queue_0_current",
   1511	"tx_queue_0_current",
   1512	"rx_queue_0_dirty",
   1513	"tx_queue_0_dirty",
   1514	"rx_queue_0_packets",
   1515	"tx_queue_0_packets",
   1516	"rx_queue_0_bytes",
   1517	"tx_queue_0_bytes",
   1518	"rx_queue_0_mcast_packets",
   1519	"rx_queue_0_errors",
   1520	"rx_queue_0_crc_errors",
   1521	"rx_queue_0_frame_errors",
   1522	"rx_queue_0_length_errors",
   1523	"rx_queue_0_missed_errors",
   1524	"rx_queue_0_over_errors",
   1525
   1526	"rx_queue_1_current",
   1527	"tx_queue_1_current",
   1528	"rx_queue_1_dirty",
   1529	"tx_queue_1_dirty",
   1530	"rx_queue_1_packets",
   1531	"tx_queue_1_packets",
   1532	"rx_queue_1_bytes",
   1533	"tx_queue_1_bytes",
   1534	"rx_queue_1_mcast_packets",
   1535	"rx_queue_1_errors",
   1536	"rx_queue_1_crc_errors",
   1537	"rx_queue_1_frame_errors",
   1538	"rx_queue_1_length_errors",
   1539	"rx_queue_1_missed_errors",
   1540	"rx_queue_1_over_errors",
   1541};
   1542
   1543static int ravb_get_sset_count(struct net_device *netdev, int sset)
   1544{
   1545	struct ravb_private *priv = netdev_priv(netdev);
   1546	const struct ravb_hw_info *info = priv->info;
   1547
   1548	switch (sset) {
   1549	case ETH_SS_STATS:
   1550		return info->stats_len;
   1551	default:
   1552		return -EOPNOTSUPP;
   1553	}
   1554}
   1555
   1556static void ravb_get_ethtool_stats(struct net_device *ndev,
   1557				   struct ethtool_stats *estats, u64 *data)
   1558{
   1559	struct ravb_private *priv = netdev_priv(ndev);
   1560	const struct ravb_hw_info *info = priv->info;
   1561	int num_rx_q;
   1562	int i = 0;
   1563	int q;
   1564
   1565	num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
   1566	/* Device-specific stats */
   1567	for (q = RAVB_BE; q < num_rx_q; q++) {
   1568		struct net_device_stats *stats = &priv->stats[q];
   1569
   1570		data[i++] = priv->cur_rx[q];
   1571		data[i++] = priv->cur_tx[q];
   1572		data[i++] = priv->dirty_rx[q];
   1573		data[i++] = priv->dirty_tx[q];
   1574		data[i++] = stats->rx_packets;
   1575		data[i++] = stats->tx_packets;
   1576		data[i++] = stats->rx_bytes;
   1577		data[i++] = stats->tx_bytes;
   1578		data[i++] = stats->multicast;
   1579		data[i++] = stats->rx_errors;
   1580		data[i++] = stats->rx_crc_errors;
   1581		data[i++] = stats->rx_frame_errors;
   1582		data[i++] = stats->rx_length_errors;
   1583		data[i++] = stats->rx_missed_errors;
   1584		data[i++] = stats->rx_over_errors;
   1585	}
   1586}
   1587
   1588static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
   1589{
   1590	struct ravb_private *priv = netdev_priv(ndev);
   1591	const struct ravb_hw_info *info = priv->info;
   1592
   1593	switch (stringset) {
   1594	case ETH_SS_STATS:
   1595		memcpy(data, info->gstrings_stats, info->gstrings_size);
   1596		break;
   1597	}
   1598}
   1599
   1600static void ravb_get_ringparam(struct net_device *ndev,
   1601			       struct ethtool_ringparam *ring,
   1602			       struct kernel_ethtool_ringparam *kernel_ring,
   1603			       struct netlink_ext_ack *extack)
   1604{
   1605	struct ravb_private *priv = netdev_priv(ndev);
   1606
   1607	ring->rx_max_pending = BE_RX_RING_MAX;
   1608	ring->tx_max_pending = BE_TX_RING_MAX;
   1609	ring->rx_pending = priv->num_rx_ring[RAVB_BE];
   1610	ring->tx_pending = priv->num_tx_ring[RAVB_BE];
   1611}
   1612
   1613static int ravb_set_ringparam(struct net_device *ndev,
   1614			      struct ethtool_ringparam *ring,
   1615			      struct kernel_ethtool_ringparam *kernel_ring,
   1616			      struct netlink_ext_ack *extack)
   1617{
   1618	struct ravb_private *priv = netdev_priv(ndev);
   1619	const struct ravb_hw_info *info = priv->info;
   1620	int error;
   1621
   1622	if (ring->tx_pending > BE_TX_RING_MAX ||
   1623	    ring->rx_pending > BE_RX_RING_MAX ||
   1624	    ring->tx_pending < BE_TX_RING_MIN ||
   1625	    ring->rx_pending < BE_RX_RING_MIN)
   1626		return -EINVAL;
   1627	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
   1628		return -EINVAL;
   1629
   1630	if (netif_running(ndev)) {
   1631		netif_device_detach(ndev);
   1632		/* Stop PTP Clock driver */
   1633		if (info->gptp)
   1634			ravb_ptp_stop(ndev);
   1635		/* Wait for DMA stopping */
   1636		error = ravb_stop_dma(ndev);
   1637		if (error) {
   1638			netdev_err(ndev,
   1639				   "cannot set ringparam! Any AVB processes are still running?\n");
   1640			return error;
   1641		}
   1642		synchronize_irq(ndev->irq);
   1643
   1644		/* Free all the skb's in the RX queue and the DMA buffers. */
   1645		ravb_ring_free(ndev, RAVB_BE);
   1646		if (info->nc_queues)
   1647			ravb_ring_free(ndev, RAVB_NC);
   1648	}
   1649
   1650	/* Set new parameters */
   1651	priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
   1652	priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
   1653
   1654	if (netif_running(ndev)) {
   1655		error = ravb_dmac_init(ndev);
   1656		if (error) {
   1657			netdev_err(ndev,
   1658				   "%s: ravb_dmac_init() failed, error %d\n",
   1659				   __func__, error);
   1660			return error;
   1661		}
   1662
   1663		ravb_emac_init(ndev);
   1664
   1665		/* Initialise PTP Clock driver */
   1666		if (info->gptp)
   1667			ravb_ptp_init(ndev, priv->pdev);
   1668
   1669		netif_device_attach(ndev);
   1670	}
   1671
   1672	return 0;
   1673}
   1674
   1675static int ravb_get_ts_info(struct net_device *ndev,
   1676			    struct ethtool_ts_info *info)
   1677{
   1678	struct ravb_private *priv = netdev_priv(ndev);
   1679	const struct ravb_hw_info *hw_info = priv->info;
   1680
   1681	info->so_timestamping =
   1682		SOF_TIMESTAMPING_TX_SOFTWARE |
   1683		SOF_TIMESTAMPING_RX_SOFTWARE |
   1684		SOF_TIMESTAMPING_SOFTWARE |
   1685		SOF_TIMESTAMPING_TX_HARDWARE |
   1686		SOF_TIMESTAMPING_RX_HARDWARE |
   1687		SOF_TIMESTAMPING_RAW_HARDWARE;
   1688	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
   1689	info->rx_filters =
   1690		(1 << HWTSTAMP_FILTER_NONE) |
   1691		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
   1692		(1 << HWTSTAMP_FILTER_ALL);
   1693	if (hw_info->gptp || hw_info->ccc_gac)
   1694		info->phc_index = ptp_clock_index(priv->ptp.clock);
   1695
   1696	return 0;
   1697}
   1698
   1699static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
   1700{
   1701	struct ravb_private *priv = netdev_priv(ndev);
   1702
   1703	wol->supported = WAKE_MAGIC;
   1704	wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
   1705}
   1706
   1707static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
   1708{
   1709	struct ravb_private *priv = netdev_priv(ndev);
   1710	const struct ravb_hw_info *info = priv->info;
   1711
   1712	if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
   1713		return -EOPNOTSUPP;
   1714
   1715	priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
   1716
   1717	device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled);
   1718
   1719	return 0;
   1720}
   1721
   1722static const struct ethtool_ops ravb_ethtool_ops = {
   1723	.nway_reset		= phy_ethtool_nway_reset,
   1724	.get_msglevel		= ravb_get_msglevel,
   1725	.set_msglevel		= ravb_set_msglevel,
   1726	.get_link		= ethtool_op_get_link,
   1727	.get_strings		= ravb_get_strings,
   1728	.get_ethtool_stats	= ravb_get_ethtool_stats,
   1729	.get_sset_count		= ravb_get_sset_count,
   1730	.get_ringparam		= ravb_get_ringparam,
   1731	.set_ringparam		= ravb_set_ringparam,
   1732	.get_ts_info		= ravb_get_ts_info,
   1733	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
   1734	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
   1735	.get_wol		= ravb_get_wol,
   1736	.set_wol		= ravb_set_wol,
   1737};
   1738
   1739static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
   1740				struct net_device *ndev, struct device *dev,
   1741				const char *ch)
   1742{
   1743	char *name;
   1744	int error;
   1745
   1746	name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
   1747	if (!name)
   1748		return -ENOMEM;
   1749	error = request_irq(irq, handler, 0, name, ndev);
   1750	if (error)
   1751		netdev_err(ndev, "cannot request IRQ %s\n", name);
   1752
   1753	return error;
   1754}
   1755
   1756/* Network device open function for Ethernet AVB */
   1757static int ravb_open(struct net_device *ndev)
   1758{
   1759	struct ravb_private *priv = netdev_priv(ndev);
   1760	const struct ravb_hw_info *info = priv->info;
   1761	struct platform_device *pdev = priv->pdev;
   1762	struct device *dev = &pdev->dev;
   1763	int error;
   1764
   1765	napi_enable(&priv->napi[RAVB_BE]);
   1766	if (info->nc_queues)
   1767		napi_enable(&priv->napi[RAVB_NC]);
   1768
   1769	if (!info->multi_irqs) {
   1770		error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
   1771				    ndev->name, ndev);
   1772		if (error) {
   1773			netdev_err(ndev, "cannot request IRQ\n");
   1774			goto out_napi_off;
   1775		}
   1776	} else {
   1777		error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
   1778				      dev, "ch22:multi");
   1779		if (error)
   1780			goto out_napi_off;
   1781		error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
   1782				      dev, "ch24:emac");
   1783		if (error)
   1784			goto out_free_irq;
   1785		error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
   1786				      ndev, dev, "ch0:rx_be");
   1787		if (error)
   1788			goto out_free_irq_emac;
   1789		error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
   1790				      ndev, dev, "ch18:tx_be");
   1791		if (error)
   1792			goto out_free_irq_be_rx;
   1793		error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
   1794				      ndev, dev, "ch1:rx_nc");
   1795		if (error)
   1796			goto out_free_irq_be_tx;
   1797		error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
   1798				      ndev, dev, "ch19:tx_nc");
   1799		if (error)
   1800			goto out_free_irq_nc_rx;
   1801
   1802		if (info->err_mgmt_irqs) {
   1803			error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
   1804					      ndev, dev, "err_a");
   1805			if (error)
   1806				goto out_free_irq_nc_tx;
   1807			error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
   1808					      ndev, dev, "mgmt_a");
   1809			if (error)
   1810				goto out_free_irq_erra;
   1811		}
   1812	}
   1813
   1814	/* Device init */
   1815	error = ravb_dmac_init(ndev);
   1816	if (error)
   1817		goto out_free_irq_mgmta;
   1818	ravb_emac_init(ndev);
   1819
   1820	/* Initialise PTP Clock driver */
   1821	if (info->gptp)
   1822		ravb_ptp_init(ndev, priv->pdev);
   1823
   1824	netif_tx_start_all_queues(ndev);
   1825
   1826	/* PHY control start */
   1827	error = ravb_phy_start(ndev);
   1828	if (error)
   1829		goto out_ptp_stop;
   1830
   1831	return 0;
   1832
   1833out_ptp_stop:
   1834	/* Stop PTP Clock driver */
   1835	if (info->gptp)
   1836		ravb_ptp_stop(ndev);
   1837out_free_irq_mgmta:
   1838	if (!info->multi_irqs)
   1839		goto out_free_irq;
   1840	if (info->err_mgmt_irqs)
   1841		free_irq(priv->mgmta_irq, ndev);
   1842out_free_irq_erra:
   1843	if (info->err_mgmt_irqs)
   1844		free_irq(priv->erra_irq, ndev);
   1845out_free_irq_nc_tx:
   1846	free_irq(priv->tx_irqs[RAVB_NC], ndev);
   1847out_free_irq_nc_rx:
   1848	free_irq(priv->rx_irqs[RAVB_NC], ndev);
   1849out_free_irq_be_tx:
   1850	free_irq(priv->tx_irqs[RAVB_BE], ndev);
   1851out_free_irq_be_rx:
   1852	free_irq(priv->rx_irqs[RAVB_BE], ndev);
   1853out_free_irq_emac:
   1854	free_irq(priv->emac_irq, ndev);
   1855out_free_irq:
   1856	free_irq(ndev->irq, ndev);
   1857out_napi_off:
   1858	if (info->nc_queues)
   1859		napi_disable(&priv->napi[RAVB_NC]);
   1860	napi_disable(&priv->napi[RAVB_BE]);
   1861	return error;
   1862}
   1863
   1864/* Timeout function for Ethernet AVB */
   1865static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
   1866{
   1867	struct ravb_private *priv = netdev_priv(ndev);
   1868
   1869	netif_err(priv, tx_err, ndev,
   1870		  "transmit timed out, status %08x, resetting...\n",
   1871		  ravb_read(ndev, ISS));
   1872
   1873	/* tx_errors count up */
   1874	ndev->stats.tx_errors++;
   1875
   1876	schedule_work(&priv->work);
   1877}
   1878
   1879static void ravb_tx_timeout_work(struct work_struct *work)
   1880{
   1881	struct ravb_private *priv = container_of(work, struct ravb_private,
   1882						 work);
   1883	const struct ravb_hw_info *info = priv->info;
   1884	struct net_device *ndev = priv->ndev;
   1885	int error;
   1886
   1887	netif_tx_stop_all_queues(ndev);
   1888
   1889	/* Stop PTP Clock driver */
   1890	if (info->gptp)
   1891		ravb_ptp_stop(ndev);
   1892
   1893	/* Wait for DMA stopping */
   1894	if (ravb_stop_dma(ndev)) {
   1895		/* If ravb_stop_dma() fails, the hardware is still operating
   1896		 * for TX and/or RX. So, this should not call the following
   1897		 * functions because ravb_dmac_init() is possible to fail too.
   1898		 * Also, this should not retry ravb_stop_dma() again and again
   1899		 * here because it's possible to wait forever. So, this just
   1900		 * re-enables the TX and RX and skip the following
   1901		 * re-initialization procedure.
   1902		 */
   1903		ravb_rcv_snd_enable(ndev);
   1904		goto out;
   1905	}
   1906
   1907	ravb_ring_free(ndev, RAVB_BE);
   1908	if (info->nc_queues)
   1909		ravb_ring_free(ndev, RAVB_NC);
   1910
   1911	/* Device init */
   1912	error = ravb_dmac_init(ndev);
   1913	if (error) {
   1914		/* If ravb_dmac_init() fails, descriptors are freed. So, this
   1915		 * should return here to avoid re-enabling the TX and RX in
   1916		 * ravb_emac_init().
   1917		 */
   1918		netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
   1919			   __func__, error);
   1920		return;
   1921	}
   1922	ravb_emac_init(ndev);
   1923
   1924out:
   1925	/* Initialise PTP Clock driver */
   1926	if (info->gptp)
   1927		ravb_ptp_init(ndev, priv->pdev);
   1928
   1929	netif_tx_start_all_queues(ndev);
   1930}
   1931
   1932/* Packet transmit function for Ethernet AVB */
   1933static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
   1934{
   1935	struct ravb_private *priv = netdev_priv(ndev);
   1936	const struct ravb_hw_info *info = priv->info;
   1937	unsigned int num_tx_desc = priv->num_tx_desc;
   1938	u16 q = skb_get_queue_mapping(skb);
   1939	struct ravb_tstamp_skb *ts_skb;
   1940	struct ravb_tx_desc *desc;
   1941	unsigned long flags;
   1942	u32 dma_addr;
   1943	void *buffer;
   1944	u32 entry;
   1945	u32 len;
   1946
   1947	spin_lock_irqsave(&priv->lock, flags);
   1948	if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
   1949	    num_tx_desc) {
   1950		netif_err(priv, tx_queued, ndev,
   1951			  "still transmitting with the full ring!\n");
   1952		netif_stop_subqueue(ndev, q);
   1953		spin_unlock_irqrestore(&priv->lock, flags);
   1954		return NETDEV_TX_BUSY;
   1955	}
   1956
   1957	if (skb_put_padto(skb, ETH_ZLEN))
   1958		goto exit;
   1959
   1960	entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
   1961	priv->tx_skb[q][entry / num_tx_desc] = skb;
   1962
   1963	if (num_tx_desc > 1) {
   1964		buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
   1965			 entry / num_tx_desc * DPTR_ALIGN;
   1966		len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
   1967
   1968		/* Zero length DMA descriptors are problematic as they seem
   1969		 * to terminate DMA transfers. Avoid them by simply using a
   1970		 * length of DPTR_ALIGN (4) when skb data is aligned to
   1971		 * DPTR_ALIGN.
   1972		 *
   1973		 * As skb is guaranteed to have at least ETH_ZLEN (60)
   1974		 * bytes of data by the call to skb_put_padto() above this
   1975		 * is safe with respect to both the length of the first DMA
   1976		 * descriptor (len) overflowing the available data and the
   1977		 * length of the second DMA descriptor (skb->len - len)
   1978		 * being negative.
   1979		 */
   1980		if (len == 0)
   1981			len = DPTR_ALIGN;
   1982
   1983		memcpy(buffer, skb->data, len);
   1984		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
   1985					  DMA_TO_DEVICE);
   1986		if (dma_mapping_error(ndev->dev.parent, dma_addr))
   1987			goto drop;
   1988
   1989		desc = &priv->tx_ring[q][entry];
   1990		desc->ds_tagl = cpu_to_le16(len);
   1991		desc->dptr = cpu_to_le32(dma_addr);
   1992
   1993		buffer = skb->data + len;
   1994		len = skb->len - len;
   1995		dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
   1996					  DMA_TO_DEVICE);
   1997		if (dma_mapping_error(ndev->dev.parent, dma_addr))
   1998			goto unmap;
   1999
   2000		desc++;
   2001	} else {
   2002		desc = &priv->tx_ring[q][entry];
   2003		len = skb->len;
   2004		dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
   2005					  DMA_TO_DEVICE);
   2006		if (dma_mapping_error(ndev->dev.parent, dma_addr))
   2007			goto drop;
   2008	}
   2009	desc->ds_tagl = cpu_to_le16(len);
   2010	desc->dptr = cpu_to_le32(dma_addr);
   2011
   2012	/* TX timestamp required */
   2013	if (info->gptp || info->ccc_gac) {
   2014		if (q == RAVB_NC) {
   2015			ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
   2016			if (!ts_skb) {
   2017				if (num_tx_desc > 1) {
   2018					desc--;
   2019					dma_unmap_single(ndev->dev.parent, dma_addr,
   2020							 len, DMA_TO_DEVICE);
   2021				}
   2022				goto unmap;
   2023			}
   2024			ts_skb->skb = skb_get(skb);
   2025			ts_skb->tag = priv->ts_skb_tag++;
   2026			priv->ts_skb_tag &= 0x3ff;
   2027			list_add_tail(&ts_skb->list, &priv->ts_skb_list);
   2028
   2029			/* TAG and timestamp required flag */
   2030			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
   2031			desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
   2032			desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
   2033		}
   2034
   2035		skb_tx_timestamp(skb);
   2036	}
   2037	/* Descriptor type must be set after all the above writes */
   2038	dma_wmb();
   2039	if (num_tx_desc > 1) {
   2040		desc->die_dt = DT_FEND;
   2041		desc--;
   2042		desc->die_dt = DT_FSTART;
   2043	} else {
   2044		desc->die_dt = DT_FSINGLE;
   2045	}
   2046	ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
   2047
   2048	priv->cur_tx[q] += num_tx_desc;
   2049	if (priv->cur_tx[q] - priv->dirty_tx[q] >
   2050	    (priv->num_tx_ring[q] - 1) * num_tx_desc &&
   2051	    !ravb_tx_free(ndev, q, true))
   2052		netif_stop_subqueue(ndev, q);
   2053
   2054exit:
   2055	spin_unlock_irqrestore(&priv->lock, flags);
   2056	return NETDEV_TX_OK;
   2057
   2058unmap:
   2059	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
   2060			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
   2061drop:
   2062	dev_kfree_skb_any(skb);
   2063	priv->tx_skb[q][entry / num_tx_desc] = NULL;
   2064	goto exit;
   2065}
   2066
   2067static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
   2068			     struct net_device *sb_dev)
   2069{
   2070	/* If skb needs TX timestamp, it is handled in network control queue */
   2071	return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
   2072							       RAVB_BE;
   2073
   2074}
   2075
   2076static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
   2077{
   2078	struct ravb_private *priv = netdev_priv(ndev);
   2079	const struct ravb_hw_info *info = priv->info;
   2080	struct net_device_stats *nstats, *stats0, *stats1;
   2081
   2082	nstats = &ndev->stats;
   2083	stats0 = &priv->stats[RAVB_BE];
   2084
   2085	if (info->tx_counters) {
   2086		nstats->tx_dropped += ravb_read(ndev, TROCR);
   2087		ravb_write(ndev, 0, TROCR);	/* (write clear) */
   2088	}
   2089
   2090	if (info->carrier_counters) {
   2091		nstats->collisions += ravb_read(ndev, CXR41);
   2092		ravb_write(ndev, 0, CXR41);	/* (write clear) */
   2093		nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
   2094		ravb_write(ndev, 0, CXR42);	/* (write clear) */
   2095	}
   2096
   2097	nstats->rx_packets = stats0->rx_packets;
   2098	nstats->tx_packets = stats0->tx_packets;
   2099	nstats->rx_bytes = stats0->rx_bytes;
   2100	nstats->tx_bytes = stats0->tx_bytes;
   2101	nstats->multicast = stats0->multicast;
   2102	nstats->rx_errors = stats0->rx_errors;
   2103	nstats->rx_crc_errors = stats0->rx_crc_errors;
   2104	nstats->rx_frame_errors = stats0->rx_frame_errors;
   2105	nstats->rx_length_errors = stats0->rx_length_errors;
   2106	nstats->rx_missed_errors = stats0->rx_missed_errors;
   2107	nstats->rx_over_errors = stats0->rx_over_errors;
   2108	if (info->nc_queues) {
   2109		stats1 = &priv->stats[RAVB_NC];
   2110
   2111		nstats->rx_packets += stats1->rx_packets;
   2112		nstats->tx_packets += stats1->tx_packets;
   2113		nstats->rx_bytes += stats1->rx_bytes;
   2114		nstats->tx_bytes += stats1->tx_bytes;
   2115		nstats->multicast += stats1->multicast;
   2116		nstats->rx_errors += stats1->rx_errors;
   2117		nstats->rx_crc_errors += stats1->rx_crc_errors;
   2118		nstats->rx_frame_errors += stats1->rx_frame_errors;
   2119		nstats->rx_length_errors += stats1->rx_length_errors;
   2120		nstats->rx_missed_errors += stats1->rx_missed_errors;
   2121		nstats->rx_over_errors += stats1->rx_over_errors;
   2122	}
   2123
   2124	return nstats;
   2125}
   2126
   2127/* Update promiscuous bit */
   2128static void ravb_set_rx_mode(struct net_device *ndev)
   2129{
   2130	struct ravb_private *priv = netdev_priv(ndev);
   2131	unsigned long flags;
   2132
   2133	spin_lock_irqsave(&priv->lock, flags);
   2134	ravb_modify(ndev, ECMR, ECMR_PRM,
   2135		    ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
   2136	spin_unlock_irqrestore(&priv->lock, flags);
   2137}
   2138
   2139/* Device close function for Ethernet AVB */
   2140static int ravb_close(struct net_device *ndev)
   2141{
   2142	struct device_node *np = ndev->dev.parent->of_node;
   2143	struct ravb_private *priv = netdev_priv(ndev);
   2144	const struct ravb_hw_info *info = priv->info;
   2145	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
   2146
   2147	netif_tx_stop_all_queues(ndev);
   2148
   2149	/* Disable interrupts by clearing the interrupt masks. */
   2150	ravb_write(ndev, 0, RIC0);
   2151	ravb_write(ndev, 0, RIC2);
   2152	ravb_write(ndev, 0, TIC);
   2153
   2154	/* Stop PTP Clock driver */
   2155	if (info->gptp)
   2156		ravb_ptp_stop(ndev);
   2157
   2158	/* Set the config mode to stop the AVB-DMAC's processes */
   2159	if (ravb_stop_dma(ndev) < 0)
   2160		netdev_err(ndev,
   2161			   "device will be stopped after h/w processes are done.\n");
   2162
   2163	/* Clear the timestamp list */
   2164	if (info->gptp || info->ccc_gac) {
   2165		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
   2166			list_del(&ts_skb->list);
   2167			kfree_skb(ts_skb->skb);
   2168			kfree(ts_skb);
   2169		}
   2170	}
   2171
   2172	/* PHY disconnect */
   2173	if (ndev->phydev) {
   2174		phy_stop(ndev->phydev);
   2175		phy_disconnect(ndev->phydev);
   2176		if (of_phy_is_fixed_link(np))
   2177			of_phy_deregister_fixed_link(np);
   2178	}
   2179
   2180	if (info->multi_irqs) {
   2181		free_irq(priv->tx_irqs[RAVB_NC], ndev);
   2182		free_irq(priv->rx_irqs[RAVB_NC], ndev);
   2183		free_irq(priv->tx_irqs[RAVB_BE], ndev);
   2184		free_irq(priv->rx_irqs[RAVB_BE], ndev);
   2185		free_irq(priv->emac_irq, ndev);
   2186		if (info->err_mgmt_irqs) {
   2187			free_irq(priv->erra_irq, ndev);
   2188			free_irq(priv->mgmta_irq, ndev);
   2189		}
   2190	}
   2191	free_irq(ndev->irq, ndev);
   2192
   2193	if (info->nc_queues)
   2194		napi_disable(&priv->napi[RAVB_NC]);
   2195	napi_disable(&priv->napi[RAVB_BE]);
   2196
   2197	/* Free all the skb's in the RX queue and the DMA buffers. */
   2198	ravb_ring_free(ndev, RAVB_BE);
   2199	if (info->nc_queues)
   2200		ravb_ring_free(ndev, RAVB_NC);
   2201
   2202	return 0;
   2203}
   2204
   2205static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
   2206{
   2207	struct ravb_private *priv = netdev_priv(ndev);
   2208	struct hwtstamp_config config;
   2209
   2210	config.flags = 0;
   2211	config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
   2212						HWTSTAMP_TX_OFF;
   2213	switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
   2214	case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
   2215		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
   2216		break;
   2217	case RAVB_RXTSTAMP_TYPE_ALL:
   2218		config.rx_filter = HWTSTAMP_FILTER_ALL;
   2219		break;
   2220	default:
   2221		config.rx_filter = HWTSTAMP_FILTER_NONE;
   2222	}
   2223
   2224	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
   2225		-EFAULT : 0;
   2226}
   2227
   2228/* Control hardware time stamping */
   2229static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
   2230{
   2231	struct ravb_private *priv = netdev_priv(ndev);
   2232	struct hwtstamp_config config;
   2233	u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
   2234	u32 tstamp_tx_ctrl;
   2235
   2236	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
   2237		return -EFAULT;
   2238
   2239	switch (config.tx_type) {
   2240	case HWTSTAMP_TX_OFF:
   2241		tstamp_tx_ctrl = 0;
   2242		break;
   2243	case HWTSTAMP_TX_ON:
   2244		tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
   2245		break;
   2246	default:
   2247		return -ERANGE;
   2248	}
   2249
   2250	switch (config.rx_filter) {
   2251	case HWTSTAMP_FILTER_NONE:
   2252		tstamp_rx_ctrl = 0;
   2253		break;
   2254	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
   2255		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
   2256		break;
   2257	default:
   2258		config.rx_filter = HWTSTAMP_FILTER_ALL;
   2259		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
   2260	}
   2261
   2262	priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
   2263	priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
   2264
   2265	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
   2266		-EFAULT : 0;
   2267}
   2268
   2269/* ioctl to device function */
   2270static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
   2271{
   2272	struct phy_device *phydev = ndev->phydev;
   2273
   2274	if (!netif_running(ndev))
   2275		return -EINVAL;
   2276
   2277	if (!phydev)
   2278		return -ENODEV;
   2279
   2280	switch (cmd) {
   2281	case SIOCGHWTSTAMP:
   2282		return ravb_hwtstamp_get(ndev, req);
   2283	case SIOCSHWTSTAMP:
   2284		return ravb_hwtstamp_set(ndev, req);
   2285	}
   2286
   2287	return phy_mii_ioctl(phydev, req, cmd);
   2288}
   2289
   2290static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
   2291{
   2292	struct ravb_private *priv = netdev_priv(ndev);
   2293
   2294	ndev->mtu = new_mtu;
   2295
   2296	if (netif_running(ndev)) {
   2297		synchronize_irq(priv->emac_irq);
   2298		ravb_emac_init(ndev);
   2299	}
   2300
   2301	netdev_update_features(ndev);
   2302
   2303	return 0;
   2304}
   2305
   2306static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
   2307{
   2308	struct ravb_private *priv = netdev_priv(ndev);
   2309	unsigned long flags;
   2310
   2311	spin_lock_irqsave(&priv->lock, flags);
   2312
   2313	/* Disable TX and RX */
   2314	ravb_rcv_snd_disable(ndev);
   2315
   2316	/* Modify RX Checksum setting */
   2317	ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
   2318
   2319	/* Enable TX and RX */
   2320	ravb_rcv_snd_enable(ndev);
   2321
   2322	spin_unlock_irqrestore(&priv->lock, flags);
   2323}
   2324
   2325static int ravb_set_features_gbeth(struct net_device *ndev,
   2326				   netdev_features_t features)
   2327{
   2328	/* Place holder */
   2329	return 0;
   2330}
   2331
   2332static int ravb_set_features_rcar(struct net_device *ndev,
   2333				  netdev_features_t features)
   2334{
   2335	netdev_features_t changed = ndev->features ^ features;
   2336
   2337	if (changed & NETIF_F_RXCSUM)
   2338		ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
   2339
   2340	ndev->features = features;
   2341
   2342	return 0;
   2343}
   2344
   2345static int ravb_set_features(struct net_device *ndev,
   2346			     netdev_features_t features)
   2347{
   2348	struct ravb_private *priv = netdev_priv(ndev);
   2349	const struct ravb_hw_info *info = priv->info;
   2350
   2351	return info->set_feature(ndev, features);
   2352}
   2353
   2354static const struct net_device_ops ravb_netdev_ops = {
   2355	.ndo_open		= ravb_open,
   2356	.ndo_stop		= ravb_close,
   2357	.ndo_start_xmit		= ravb_start_xmit,
   2358	.ndo_select_queue	= ravb_select_queue,
   2359	.ndo_get_stats		= ravb_get_stats,
   2360	.ndo_set_rx_mode	= ravb_set_rx_mode,
   2361	.ndo_tx_timeout		= ravb_tx_timeout,
   2362	.ndo_eth_ioctl		= ravb_do_ioctl,
   2363	.ndo_change_mtu		= ravb_change_mtu,
   2364	.ndo_validate_addr	= eth_validate_addr,
   2365	.ndo_set_mac_address	= eth_mac_addr,
   2366	.ndo_set_features	= ravb_set_features,
   2367};
   2368
   2369/* MDIO bus init function */
   2370static int ravb_mdio_init(struct ravb_private *priv)
   2371{
   2372	struct platform_device *pdev = priv->pdev;
   2373	struct device *dev = &pdev->dev;
   2374	int error;
   2375
   2376	/* Bitbang init */
   2377	priv->mdiobb.ops = &bb_ops;
   2378
   2379	/* MII controller setting */
   2380	priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
   2381	if (!priv->mii_bus)
   2382		return -ENOMEM;
   2383
   2384	/* Hook up MII support for ethtool */
   2385	priv->mii_bus->name = "ravb_mii";
   2386	priv->mii_bus->parent = dev;
   2387	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
   2388		 pdev->name, pdev->id);
   2389
   2390	/* Register MDIO bus */
   2391	error = of_mdiobus_register(priv->mii_bus, dev->of_node);
   2392	if (error)
   2393		goto out_free_bus;
   2394
   2395	return 0;
   2396
   2397out_free_bus:
   2398	free_mdio_bitbang(priv->mii_bus);
   2399	return error;
   2400}
   2401
   2402/* MDIO bus release function */
   2403static int ravb_mdio_release(struct ravb_private *priv)
   2404{
   2405	/* Unregister mdio bus */
   2406	mdiobus_unregister(priv->mii_bus);
   2407
   2408	/* Free bitbang info */
   2409	free_mdio_bitbang(priv->mii_bus);
   2410
   2411	return 0;
   2412}
   2413
   2414static const struct ravb_hw_info ravb_gen3_hw_info = {
   2415	.rx_ring_free = ravb_rx_ring_free_rcar,
   2416	.rx_ring_format = ravb_rx_ring_format_rcar,
   2417	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
   2418	.receive = ravb_rx_rcar,
   2419	.set_rate = ravb_set_rate_rcar,
   2420	.set_feature = ravb_set_features_rcar,
   2421	.dmac_init = ravb_dmac_init_rcar,
   2422	.emac_init = ravb_emac_init_rcar,
   2423	.gstrings_stats = ravb_gstrings_stats,
   2424	.gstrings_size = sizeof(ravb_gstrings_stats),
   2425	.net_hw_features = NETIF_F_RXCSUM,
   2426	.net_features = NETIF_F_RXCSUM,
   2427	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
   2428	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
   2429	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
   2430	.rx_max_buf_size = SZ_2K,
   2431	.internal_delay = 1,
   2432	.tx_counters = 1,
   2433	.multi_irqs = 1,
   2434	.irq_en_dis = 1,
   2435	.ccc_gac = 1,
   2436	.nc_queues = 1,
   2437	.magic_pkt = 1,
   2438};
   2439
   2440static const struct ravb_hw_info ravb_gen2_hw_info = {
   2441	.rx_ring_free = ravb_rx_ring_free_rcar,
   2442	.rx_ring_format = ravb_rx_ring_format_rcar,
   2443	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
   2444	.receive = ravb_rx_rcar,
   2445	.set_rate = ravb_set_rate_rcar,
   2446	.set_feature = ravb_set_features_rcar,
   2447	.dmac_init = ravb_dmac_init_rcar,
   2448	.emac_init = ravb_emac_init_rcar,
   2449	.gstrings_stats = ravb_gstrings_stats,
   2450	.gstrings_size = sizeof(ravb_gstrings_stats),
   2451	.net_hw_features = NETIF_F_RXCSUM,
   2452	.net_features = NETIF_F_RXCSUM,
   2453	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
   2454	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
   2455	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
   2456	.rx_max_buf_size = SZ_2K,
   2457	.aligned_tx = 1,
   2458	.gptp = 1,
   2459	.nc_queues = 1,
   2460	.magic_pkt = 1,
   2461};
   2462
   2463static const struct ravb_hw_info ravb_rzv2m_hw_info = {
   2464	.rx_ring_free = ravb_rx_ring_free_rcar,
   2465	.rx_ring_format = ravb_rx_ring_format_rcar,
   2466	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
   2467	.receive = ravb_rx_rcar,
   2468	.set_rate = ravb_set_rate_rcar,
   2469	.set_feature = ravb_set_features_rcar,
   2470	.dmac_init = ravb_dmac_init_rcar,
   2471	.emac_init = ravb_emac_init_rcar,
   2472	.gstrings_stats = ravb_gstrings_stats,
   2473	.gstrings_size = sizeof(ravb_gstrings_stats),
   2474	.net_hw_features = NETIF_F_RXCSUM,
   2475	.net_features = NETIF_F_RXCSUM,
   2476	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
   2477	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
   2478	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
   2479	.rx_max_buf_size = SZ_2K,
   2480	.multi_irqs = 1,
   2481	.err_mgmt_irqs = 1,
   2482	.gptp = 1,
   2483	.gptp_ref_clk = 1,
   2484	.nc_queues = 1,
   2485	.magic_pkt = 1,
   2486};
   2487
   2488static const struct ravb_hw_info gbeth_hw_info = {
   2489	.rx_ring_free = ravb_rx_ring_free_gbeth,
   2490	.rx_ring_format = ravb_rx_ring_format_gbeth,
   2491	.alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
   2492	.receive = ravb_rx_gbeth,
   2493	.set_rate = ravb_set_rate_gbeth,
   2494	.set_feature = ravb_set_features_gbeth,
   2495	.dmac_init = ravb_dmac_init_gbeth,
   2496	.emac_init = ravb_emac_init_gbeth,
   2497	.gstrings_stats = ravb_gstrings_stats_gbeth,
   2498	.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
   2499	.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
   2500	.max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
   2501	.tccr_mask = TCCR_TSRQ0,
   2502	.rx_max_buf_size = SZ_8K,
   2503	.aligned_tx = 1,
   2504	.tx_counters = 1,
   2505	.carrier_counters = 1,
   2506	.half_duplex = 1,
   2507};
   2508
   2509static const struct of_device_id ravb_match_table[] = {
   2510	{ .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
   2511	{ .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
   2512	{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
   2513	{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
   2514	{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
   2515	{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
   2516	{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
   2517	{ }
   2518};
   2519MODULE_DEVICE_TABLE(of, ravb_match_table);
   2520
   2521static int ravb_set_gti(struct net_device *ndev)
   2522{
   2523	struct ravb_private *priv = netdev_priv(ndev);
   2524	const struct ravb_hw_info *info = priv->info;
   2525	struct device *dev = ndev->dev.parent;
   2526	unsigned long rate;
   2527	uint64_t inc;
   2528
   2529	if (info->gptp_ref_clk)
   2530		rate = clk_get_rate(priv->gptp_clk);
   2531	else
   2532		rate = clk_get_rate(priv->clk);
   2533	if (!rate)
   2534		return -EINVAL;
   2535
   2536	inc = div64_ul(1000000000ULL << 20, rate);
   2537
   2538	if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
   2539		dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
   2540			inc, GTI_TIV_MIN, GTI_TIV_MAX);
   2541		return -EINVAL;
   2542	}
   2543
   2544	ravb_write(ndev, inc, GTI);
   2545
   2546	return 0;
   2547}
   2548
   2549static void ravb_set_config_mode(struct net_device *ndev)
   2550{
   2551	struct ravb_private *priv = netdev_priv(ndev);
   2552	const struct ravb_hw_info *info = priv->info;
   2553
   2554	if (info->gptp) {
   2555		ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
   2556		/* Set CSEL value */
   2557		ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
   2558	} else if (info->ccc_gac) {
   2559		ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
   2560			    CCC_GAC | CCC_CSEL_HPB);
   2561	} else {
   2562		ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
   2563	}
   2564}
   2565
   2566/* Set tx and rx clock internal delay modes */
   2567static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
   2568{
   2569	struct ravb_private *priv = netdev_priv(ndev);
   2570	bool explicit_delay = false;
   2571	u32 delay;
   2572
   2573	if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
   2574		/* Valid values are 0 and 1800, according to DT bindings */
   2575		priv->rxcidm = !!delay;
   2576		explicit_delay = true;
   2577	}
   2578	if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
   2579		/* Valid values are 0 and 2000, according to DT bindings */
   2580		priv->txcidm = !!delay;
   2581		explicit_delay = true;
   2582	}
   2583
   2584	if (explicit_delay)
   2585		return;
   2586
   2587	/* Fall back to legacy rgmii-*id behavior */
   2588	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
   2589	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
   2590		priv->rxcidm = 1;
   2591		priv->rgmii_override = 1;
   2592	}
   2593
   2594	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
   2595	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
   2596		priv->txcidm = 1;
   2597		priv->rgmii_override = 1;
   2598	}
   2599}
   2600
   2601static void ravb_set_delay_mode(struct net_device *ndev)
   2602{
   2603	struct ravb_private *priv = netdev_priv(ndev);
   2604	u32 set = 0;
   2605
   2606	if (priv->rxcidm)
   2607		set |= APSR_RDM;
   2608	if (priv->txcidm)
   2609		set |= APSR_TDM;
   2610	ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
   2611}
   2612
   2613static int ravb_probe(struct platform_device *pdev)
   2614{
   2615	struct device_node *np = pdev->dev.of_node;
   2616	const struct ravb_hw_info *info;
   2617	struct reset_control *rstc;
   2618	struct ravb_private *priv;
   2619	struct net_device *ndev;
   2620	int error, irq, q;
   2621	struct resource *res;
   2622	int i;
   2623
   2624	if (!np) {
   2625		dev_err(&pdev->dev,
   2626			"this driver is required to be instantiated from device tree\n");
   2627		return -EINVAL;
   2628	}
   2629
   2630	rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
   2631	if (IS_ERR(rstc))
   2632		return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
   2633				     "failed to get cpg reset\n");
   2634
   2635	ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
   2636				  NUM_TX_QUEUE, NUM_RX_QUEUE);
   2637	if (!ndev)
   2638		return -ENOMEM;
   2639
   2640	info = of_device_get_match_data(&pdev->dev);
   2641
   2642	ndev->features = info->net_features;
   2643	ndev->hw_features = info->net_hw_features;
   2644
   2645	reset_control_deassert(rstc);
   2646	pm_runtime_enable(&pdev->dev);
   2647	pm_runtime_get_sync(&pdev->dev);
   2648
   2649	if (info->multi_irqs) {
   2650		if (info->err_mgmt_irqs)
   2651			irq = platform_get_irq_byname(pdev, "dia");
   2652		else
   2653			irq = platform_get_irq_byname(pdev, "ch22");
   2654	} else {
   2655		irq = platform_get_irq(pdev, 0);
   2656	}
   2657	if (irq < 0) {
   2658		error = irq;
   2659		goto out_release;
   2660	}
   2661	ndev->irq = irq;
   2662
   2663	SET_NETDEV_DEV(ndev, &pdev->dev);
   2664
   2665	priv = netdev_priv(ndev);
   2666	priv->info = info;
   2667	priv->rstc = rstc;
   2668	priv->ndev = ndev;
   2669	priv->pdev = pdev;
   2670	priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
   2671	priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
   2672	if (info->nc_queues) {
   2673		priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
   2674		priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
   2675	}
   2676
   2677	priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
   2678	if (IS_ERR(priv->addr)) {
   2679		error = PTR_ERR(priv->addr);
   2680		goto out_release;
   2681	}
   2682
   2683	/* The Ether-specific entries in the device structure. */
   2684	ndev->base_addr = res->start;
   2685
   2686	spin_lock_init(&priv->lock);
   2687	INIT_WORK(&priv->work, ravb_tx_timeout_work);
   2688
   2689	error = of_get_phy_mode(np, &priv->phy_interface);
   2690	if (error && error != -ENODEV)
   2691		goto out_release;
   2692
   2693	priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
   2694	priv->avb_link_active_low =
   2695		of_property_read_bool(np, "renesas,ether-link-active-low");
   2696
   2697	if (info->multi_irqs) {
   2698		if (info->err_mgmt_irqs)
   2699			irq = platform_get_irq_byname(pdev, "line3");
   2700		else
   2701			irq = platform_get_irq_byname(pdev, "ch24");
   2702		if (irq < 0) {
   2703			error = irq;
   2704			goto out_release;
   2705		}
   2706		priv->emac_irq = irq;
   2707		for (i = 0; i < NUM_RX_QUEUE; i++) {
   2708			irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
   2709			if (irq < 0) {
   2710				error = irq;
   2711				goto out_release;
   2712			}
   2713			priv->rx_irqs[i] = irq;
   2714		}
   2715		for (i = 0; i < NUM_TX_QUEUE; i++) {
   2716			irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
   2717			if (irq < 0) {
   2718				error = irq;
   2719				goto out_release;
   2720			}
   2721			priv->tx_irqs[i] = irq;
   2722		}
   2723
   2724		if (info->err_mgmt_irqs) {
   2725			irq = platform_get_irq_byname(pdev, "err_a");
   2726			if (irq < 0) {
   2727				error = irq;
   2728				goto out_release;
   2729			}
   2730			priv->erra_irq = irq;
   2731
   2732			irq = platform_get_irq_byname(pdev, "mgmt_a");
   2733			if (irq < 0) {
   2734				error = irq;
   2735				goto out_release;
   2736			}
   2737			priv->mgmta_irq = irq;
   2738		}
   2739	}
   2740
   2741	priv->clk = devm_clk_get(&pdev->dev, NULL);
   2742	if (IS_ERR(priv->clk)) {
   2743		error = PTR_ERR(priv->clk);
   2744		goto out_release;
   2745	}
   2746
   2747	priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
   2748	if (IS_ERR(priv->refclk)) {
   2749		error = PTR_ERR(priv->refclk);
   2750		goto out_release;
   2751	}
   2752	clk_prepare_enable(priv->refclk);
   2753
   2754	if (info->gptp_ref_clk) {
   2755		priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
   2756		if (IS_ERR(priv->gptp_clk)) {
   2757			error = PTR_ERR(priv->gptp_clk);
   2758			goto out_disable_refclk;
   2759		}
   2760		clk_prepare_enable(priv->gptp_clk);
   2761	}
   2762
   2763	ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
   2764	ndev->min_mtu = ETH_MIN_MTU;
   2765
   2766	/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
   2767	 * Use two descriptor to handle such situation. First descriptor to
   2768	 * handle aligned data buffer and second descriptor to handle the
   2769	 * overflow data because of alignment.
   2770	 */
   2771	priv->num_tx_desc = info->aligned_tx ? 2 : 1;
   2772
   2773	/* Set function */
   2774	ndev->netdev_ops = &ravb_netdev_ops;
   2775	ndev->ethtool_ops = &ravb_ethtool_ops;
   2776
   2777	/* Set AVB config mode */
   2778	ravb_set_config_mode(ndev);
   2779
   2780	if (info->gptp || info->ccc_gac) {
   2781		/* Set GTI value */
   2782		error = ravb_set_gti(ndev);
   2783		if (error)
   2784			goto out_disable_gptp_clk;
   2785
   2786		/* Request GTI loading */
   2787		ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
   2788	}
   2789
   2790	if (info->internal_delay) {
   2791		ravb_parse_delay_mode(np, ndev);
   2792		ravb_set_delay_mode(ndev);
   2793	}
   2794
   2795	/* Allocate descriptor base address table */
   2796	priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
   2797	priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
   2798					    &priv->desc_bat_dma, GFP_KERNEL);
   2799	if (!priv->desc_bat) {
   2800		dev_err(&pdev->dev,
   2801			"Cannot allocate desc base address table (size %d bytes)\n",
   2802			priv->desc_bat_size);
   2803		error = -ENOMEM;
   2804		goto out_disable_gptp_clk;
   2805	}
   2806	for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
   2807		priv->desc_bat[q].die_dt = DT_EOS;
   2808	ravb_write(ndev, priv->desc_bat_dma, DBAT);
   2809
   2810	/* Initialise HW timestamp list */
   2811	INIT_LIST_HEAD(&priv->ts_skb_list);
   2812
   2813	/* Initialise PTP Clock driver */
   2814	if (info->ccc_gac)
   2815		ravb_ptp_init(ndev, pdev);
   2816
   2817	/* Debug message level */
   2818	priv->msg_enable = RAVB_DEF_MSG_ENABLE;
   2819
   2820	/* Read and set MAC address */
   2821	ravb_read_mac_address(np, ndev);
   2822	if (!is_valid_ether_addr(ndev->dev_addr)) {
   2823		dev_warn(&pdev->dev,
   2824			 "no valid MAC address supplied, using a random one\n");
   2825		eth_hw_addr_random(ndev);
   2826	}
   2827
   2828	/* MDIO bus init */
   2829	error = ravb_mdio_init(priv);
   2830	if (error) {
   2831		dev_err(&pdev->dev, "failed to initialize MDIO\n");
   2832		goto out_dma_free;
   2833	}
   2834
   2835	netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
   2836	if (info->nc_queues)
   2837		netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
   2838
   2839	/* Network device register */
   2840	error = register_netdev(ndev);
   2841	if (error)
   2842		goto out_napi_del;
   2843
   2844	device_set_wakeup_capable(&pdev->dev, 1);
   2845
   2846	/* Print device information */
   2847	netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
   2848		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
   2849
   2850	platform_set_drvdata(pdev, ndev);
   2851
   2852	return 0;
   2853
   2854out_napi_del:
   2855	if (info->nc_queues)
   2856		netif_napi_del(&priv->napi[RAVB_NC]);
   2857
   2858	netif_napi_del(&priv->napi[RAVB_BE]);
   2859	ravb_mdio_release(priv);
   2860out_dma_free:
   2861	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
   2862			  priv->desc_bat_dma);
   2863
   2864	/* Stop PTP Clock driver */
   2865	if (info->ccc_gac)
   2866		ravb_ptp_stop(ndev);
   2867out_disable_gptp_clk:
   2868	clk_disable_unprepare(priv->gptp_clk);
   2869out_disable_refclk:
   2870	clk_disable_unprepare(priv->refclk);
   2871out_release:
   2872	free_netdev(ndev);
   2873
   2874	pm_runtime_put(&pdev->dev);
   2875	pm_runtime_disable(&pdev->dev);
   2876	reset_control_assert(rstc);
   2877	return error;
   2878}
   2879
   2880static int ravb_remove(struct platform_device *pdev)
   2881{
   2882	struct net_device *ndev = platform_get_drvdata(pdev);
   2883	struct ravb_private *priv = netdev_priv(ndev);
   2884	const struct ravb_hw_info *info = priv->info;
   2885
   2886	/* Stop PTP Clock driver */
   2887	if (info->ccc_gac)
   2888		ravb_ptp_stop(ndev);
   2889
   2890	clk_disable_unprepare(priv->gptp_clk);
   2891	clk_disable_unprepare(priv->refclk);
   2892
   2893	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
   2894			  priv->desc_bat_dma);
   2895	/* Set reset mode */
   2896	ravb_write(ndev, CCC_OPC_RESET, CCC);
   2897	pm_runtime_put_sync(&pdev->dev);
   2898	unregister_netdev(ndev);
   2899	if (info->nc_queues)
   2900		netif_napi_del(&priv->napi[RAVB_NC]);
   2901	netif_napi_del(&priv->napi[RAVB_BE]);
   2902	ravb_mdio_release(priv);
   2903	pm_runtime_disable(&pdev->dev);
   2904	reset_control_assert(priv->rstc);
   2905	free_netdev(ndev);
   2906	platform_set_drvdata(pdev, NULL);
   2907
   2908	return 0;
   2909}
   2910
   2911static int ravb_wol_setup(struct net_device *ndev)
   2912{
   2913	struct ravb_private *priv = netdev_priv(ndev);
   2914	const struct ravb_hw_info *info = priv->info;
   2915
   2916	/* Disable interrupts by clearing the interrupt masks. */
   2917	ravb_write(ndev, 0, RIC0);
   2918	ravb_write(ndev, 0, RIC2);
   2919	ravb_write(ndev, 0, TIC);
   2920
   2921	/* Only allow ECI interrupts */
   2922	synchronize_irq(priv->emac_irq);
   2923	if (info->nc_queues)
   2924		napi_disable(&priv->napi[RAVB_NC]);
   2925	napi_disable(&priv->napi[RAVB_BE]);
   2926	ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
   2927
   2928	/* Enable MagicPacket */
   2929	ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
   2930
   2931	return enable_irq_wake(priv->emac_irq);
   2932}
   2933
   2934static int ravb_wol_restore(struct net_device *ndev)
   2935{
   2936	struct ravb_private *priv = netdev_priv(ndev);
   2937	const struct ravb_hw_info *info = priv->info;
   2938
   2939	if (info->nc_queues)
   2940		napi_enable(&priv->napi[RAVB_NC]);
   2941	napi_enable(&priv->napi[RAVB_BE]);
   2942
   2943	/* Disable MagicPacket */
   2944	ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
   2945
   2946	ravb_close(ndev);
   2947
   2948	return disable_irq_wake(priv->emac_irq);
   2949}
   2950
   2951static int __maybe_unused ravb_suspend(struct device *dev)
   2952{
   2953	struct net_device *ndev = dev_get_drvdata(dev);
   2954	struct ravb_private *priv = netdev_priv(ndev);
   2955	int ret;
   2956
   2957	if (!netif_running(ndev))
   2958		return 0;
   2959
   2960	netif_device_detach(ndev);
   2961
   2962	if (priv->wol_enabled)
   2963		ret = ravb_wol_setup(ndev);
   2964	else
   2965		ret = ravb_close(ndev);
   2966
   2967	return ret;
   2968}
   2969
   2970static int __maybe_unused ravb_resume(struct device *dev)
   2971{
   2972	struct net_device *ndev = dev_get_drvdata(dev);
   2973	struct ravb_private *priv = netdev_priv(ndev);
   2974	const struct ravb_hw_info *info = priv->info;
   2975	int ret = 0;
   2976
   2977	/* If WoL is enabled set reset mode to rearm the WoL logic */
   2978	if (priv->wol_enabled)
   2979		ravb_write(ndev, CCC_OPC_RESET, CCC);
   2980
   2981	/* All register have been reset to default values.
   2982	 * Restore all registers which where setup at probe time and
   2983	 * reopen device if it was running before system suspended.
   2984	 */
   2985
   2986	/* Set AVB config mode */
   2987	ravb_set_config_mode(ndev);
   2988
   2989	if (info->gptp || info->ccc_gac) {
   2990		/* Set GTI value */
   2991		ret = ravb_set_gti(ndev);
   2992		if (ret)
   2993			return ret;
   2994
   2995		/* Request GTI loading */
   2996		ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
   2997	}
   2998
   2999	if (info->internal_delay)
   3000		ravb_set_delay_mode(ndev);
   3001
   3002	/* Restore descriptor base address table */
   3003	ravb_write(ndev, priv->desc_bat_dma, DBAT);
   3004
   3005	if (netif_running(ndev)) {
   3006		if (priv->wol_enabled) {
   3007			ret = ravb_wol_restore(ndev);
   3008			if (ret)
   3009				return ret;
   3010		}
   3011		ret = ravb_open(ndev);
   3012		if (ret < 0)
   3013			return ret;
   3014		netif_device_attach(ndev);
   3015	}
   3016
   3017	return ret;
   3018}
   3019
   3020static int __maybe_unused ravb_runtime_nop(struct device *dev)
   3021{
   3022	/* Runtime PM callback shared between ->runtime_suspend()
   3023	 * and ->runtime_resume(). Simply returns success.
   3024	 *
   3025	 * This driver re-initializes all registers after
   3026	 * pm_runtime_get_sync() anyway so there is no need
   3027	 * to save and restore registers here.
   3028	 */
   3029	return 0;
   3030}
   3031
   3032static const struct dev_pm_ops ravb_dev_pm_ops = {
   3033	SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
   3034	SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
   3035};
   3036
   3037static struct platform_driver ravb_driver = {
   3038	.probe		= ravb_probe,
   3039	.remove		= ravb_remove,
   3040	.driver = {
   3041		.name	= "ravb",
   3042		.pm	= &ravb_dev_pm_ops,
   3043		.of_match_table = ravb_match_table,
   3044	},
   3045};
   3046
   3047module_platform_driver(ravb_driver);
   3048
   3049MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
   3050MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
   3051MODULE_LICENSE("GPL v2");