cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mvneta.c (160733B)


      1/*
      2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
      3 *
      4 * Copyright (C) 2012 Marvell
      5 *
      6 * Rami Rosen <rosenr@marvell.com>
      7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
      8 *
      9 * This file is licensed under the terms of the GNU General Public
     10 * License version 2. This program is licensed "as is" without any
     11 * warranty of any kind, whether express or implied.
     12 */
     13
     14#include <linux/clk.h>
     15#include <linux/cpu.h>
     16#include <linux/etherdevice.h>
     17#include <linux/if_vlan.h>
     18#include <linux/inetdevice.h>
     19#include <linux/interrupt.h>
     20#include <linux/io.h>
     21#include <linux/kernel.h>
     22#include <linux/mbus.h>
     23#include <linux/module.h>
     24#include <linux/netdevice.h>
     25#include <linux/of.h>
     26#include <linux/of_address.h>
     27#include <linux/of_irq.h>
     28#include <linux/of_mdio.h>
     29#include <linux/of_net.h>
     30#include <linux/phy/phy.h>
     31#include <linux/phy.h>
     32#include <linux/phylink.h>
     33#include <linux/platform_device.h>
     34#include <linux/skbuff.h>
     35#include <net/hwbm.h>
     36#include "mvneta_bm.h"
     37#include <net/ip.h>
     38#include <net/ipv6.h>
     39#include <net/tso.h>
     40#include <net/page_pool.h>
     41#include <net/pkt_cls.h>
     42#include <linux/bpf_trace.h>
     43
     44/* Registers */
     45#define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
     46#define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
     47#define      MVNETA_RXQ_SHORT_POOL_ID_SHIFT	4
     48#define      MVNETA_RXQ_SHORT_POOL_ID_MASK	0x30
     49#define      MVNETA_RXQ_LONG_POOL_ID_SHIFT	6
     50#define      MVNETA_RXQ_LONG_POOL_ID_MASK	0xc0
     51#define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
     52#define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
     53#define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
     54#define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
     55#define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
     56#define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
     57#define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
     58#define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
     59#define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
     60#define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
     61#define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
     62#define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
     63#define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
     64#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)	(0x1700 + ((pool) << 2))
     65#define      MVNETA_PORT_POOL_BUFFER_SZ_SHIFT	3
     66#define      MVNETA_PORT_POOL_BUFFER_SZ_MASK	0xfff8
     67#define MVNETA_PORT_RX_RESET                    0x1cc0
     68#define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
     69#define MVNETA_PHY_ADDR                         0x2000
     70#define      MVNETA_PHY_ADDR_MASK               0x1f
     71#define MVNETA_MBUS_RETRY                       0x2010
     72#define MVNETA_UNIT_INTR_CAUSE                  0x2080
     73#define MVNETA_UNIT_CONTROL                     0x20B0
     74#define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
     75#define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
     76#define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
     77#define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
     78#define MVNETA_BASE_ADDR_ENABLE                 0x2290
     79#define      MVNETA_AC5_CNM_DDR_TARGET		0x2
     80#define      MVNETA_AC5_CNM_DDR_ATTR		0xb
     81#define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
     82#define MVNETA_PORT_CONFIG                      0x2400
     83#define      MVNETA_UNI_PROMISC_MODE            BIT(0)
     84#define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
     85#define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
     86#define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
     87#define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
     88#define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
     89#define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
     90#define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
     91#define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
     92						 MVNETA_DEF_RXQ_ARP(q)	 | \
     93						 MVNETA_DEF_RXQ_TCP(q)	 | \
     94						 MVNETA_DEF_RXQ_UDP(q)	 | \
     95						 MVNETA_DEF_RXQ_BPDU(q)	 | \
     96						 MVNETA_TX_UNSET_ERR_SUM | \
     97						 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
     98#define MVNETA_PORT_CONFIG_EXTEND                0x2404
     99#define MVNETA_MAC_ADDR_LOW                      0x2414
    100#define MVNETA_MAC_ADDR_HIGH                     0x2418
    101#define MVNETA_SDMA_CONFIG                       0x241c
    102#define      MVNETA_SDMA_BRST_SIZE_16            4
    103#define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
    104#define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
    105#define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
    106#define      MVNETA_DESC_SWAP                    BIT(6)
    107#define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
    108#define	MVNETA_VLAN_PRIO_TO_RXQ			 0x2440
    109#define      MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
    110#define MVNETA_PORT_STATUS                       0x2444
    111#define      MVNETA_TX_IN_PRGRS                  BIT(0)
    112#define      MVNETA_TX_FIFO_EMPTY                BIT(8)
    113#define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
    114/* Only exists on Armada XP and Armada 370 */
    115#define MVNETA_SERDES_CFG			 0x24A0
    116#define      MVNETA_SGMII_SERDES_PROTO		 0x0cc7
    117#define      MVNETA_QSGMII_SERDES_PROTO		 0x0667
    118#define      MVNETA_HSGMII_SERDES_PROTO		 0x1107
    119#define MVNETA_TYPE_PRIO                         0x24bc
    120#define      MVNETA_FORCE_UNI                    BIT(21)
    121#define MVNETA_TXQ_CMD_1                         0x24e4
    122#define MVNETA_TXQ_CMD                           0x2448
    123#define      MVNETA_TXQ_DISABLE_SHIFT            8
    124#define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
    125#define MVNETA_RX_DISCARD_FRAME_COUNT		 0x2484
    126#define MVNETA_OVERRUN_FRAME_COUNT		 0x2488
    127#define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
    128#define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
    129#define MVNETA_ACC_MODE                          0x2500
    130#define MVNETA_BM_ADDRESS                        0x2504
    131#define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
    132#define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
    133#define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
    134#define      MVNETA_CPU_RXQ_ACCESS(rxq)		 BIT(rxq)
    135#define      MVNETA_CPU_TXQ_ACCESS(txq)		 BIT(txq + 8)
    136#define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
    137
    138/* Exception Interrupt Port/Queue Cause register
    139 *
    140 * Their behavior depend of the mapping done using the PCPX2Q
    141 * registers. For a given CPU if the bit associated to a queue is not
    142 * set, then for the register a read from this CPU will always return
    143 * 0 and a write won't do anything
    144 */
    145
    146#define MVNETA_INTR_NEW_CAUSE                    0x25a0
    147#define MVNETA_INTR_NEW_MASK                     0x25a4
    148
    149/* bits  0..7  = TXQ SENT, one bit per queue.
    150 * bits  8..15 = RXQ OCCUP, one bit per queue.
    151 * bits 16..23 = RXQ FREE, one bit per queue.
    152 * bit  29 = OLD_REG_SUM, see old reg ?
    153 * bit  30 = TX_ERR_SUM, one bit for 4 ports
    154 * bit  31 = MISC_SUM,   one bit for 4 ports
    155 */
    156#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
    157#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
    158#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
    159#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
    160#define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
    161
    162#define MVNETA_INTR_OLD_CAUSE                    0x25a8
    163#define MVNETA_INTR_OLD_MASK                     0x25ac
    164
    165/* Data Path Port/Queue Cause Register */
    166#define MVNETA_INTR_MISC_CAUSE                   0x25b0
    167#define MVNETA_INTR_MISC_MASK                    0x25b4
    168
    169#define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
    170#define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
    171#define      MVNETA_CAUSE_PTP                    BIT(4)
    172
    173#define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
    174#define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
    175#define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
    176#define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
    177#define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
    178#define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
    179#define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
    180#define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
    181
    182#define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
    183#define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
    184#define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
    185
    186#define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
    187#define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
    188#define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
    189
    190#define MVNETA_INTR_ENABLE                       0x25b8
    191#define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
    192#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
    193
    194#define MVNETA_RXQ_CMD                           0x2680
    195#define      MVNETA_RXQ_DISABLE_SHIFT            8
    196#define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
    197#define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
    198#define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
    199#define MVNETA_GMAC_CTRL_0                       0x2c00
    200#define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
    201#define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
    202#define      MVNETA_GMAC0_PORT_1000BASE_X        BIT(1)
    203#define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
    204#define MVNETA_GMAC_CTRL_2                       0x2c08
    205#define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
    206#define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
    207#define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
    208#define      MVNETA_GMAC2_PORT_RESET             BIT(6)
    209#define MVNETA_GMAC_STATUS                       0x2c10
    210#define      MVNETA_GMAC_LINK_UP                 BIT(0)
    211#define      MVNETA_GMAC_SPEED_1000              BIT(1)
    212#define      MVNETA_GMAC_SPEED_100               BIT(2)
    213#define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
    214#define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
    215#define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
    216#define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
    217#define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
    218#define      MVNETA_GMAC_AN_COMPLETE             BIT(11)
    219#define      MVNETA_GMAC_SYNC_OK                 BIT(14)
    220#define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
    221#define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
    222#define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
    223#define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
    224#define      MVNETA_GMAC_AN_BYPASS_ENABLE        BIT(3)
    225#define      MVNETA_GMAC_INBAND_RESTART_AN       BIT(4)
    226#define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
    227#define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
    228#define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
    229#define      MVNETA_GMAC_CONFIG_FLOW_CTRL        BIT(8)
    230#define      MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL    BIT(9)
    231#define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
    232#define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
    233#define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
    234#define MVNETA_GMAC_CTRL_4                       0x2c90
    235#define      MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE  BIT(1)
    236#define MVNETA_MIB_COUNTERS_BASE                 0x3000
    237#define      MVNETA_MIB_LATE_COLLISION           0x7c
    238#define MVNETA_DA_FILT_SPEC_MCAST                0x3400
    239#define MVNETA_DA_FILT_OTH_MCAST                 0x3500
    240#define MVNETA_DA_FILT_UCAST_BASE                0x3600
    241#define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
    242#define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
    243#define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
    244#define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
    245#define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
    246#define      MVNETA_TXQ_DEC_SENT_SHIFT           16
    247#define      MVNETA_TXQ_DEC_SENT_MASK            0xff
    248#define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
    249#define      MVNETA_TXQ_SENT_DESC_SHIFT          16
    250#define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
    251#define MVNETA_PORT_TX_RESET                     0x3cf0
    252#define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
    253#define MVNETA_TXQ_CMD1_REG			 0x3e00
    254#define      MVNETA_TXQ_CMD1_BW_LIM_SEL_V1	 BIT(3)
    255#define      MVNETA_TXQ_CMD1_BW_LIM_EN		 BIT(0)
    256#define MVNETA_REFILL_NUM_CLK_REG		 0x3e08
    257#define      MVNETA_REFILL_MAX_NUM_CLK		 0x0000ffff
    258#define MVNETA_TX_MTU                            0x3e0c
    259#define MVNETA_TX_TOKEN_SIZE                     0x3e14
    260#define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
    261#define MVNETA_TXQ_BUCKET_REFILL_REG(q)		 (0x3e20 + ((q) << 2))
    262#define      MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK	0x3ff00000
    263#define      MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT	20
    264#define      MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX	 0x0007ffff
    265#define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
    266#define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
    267
    268/* The values of the bucket refill base period and refill period are taken from
    269 * the reference manual, and adds up to a base resolution of 10Kbps. This allows
    270 * to cover all rate-limit values from 10Kbps up to 5Gbps
    271 */
    272
    273/* Base period for the rate limit algorithm */
    274#define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS	100
    275
    276/* Number of Base Period to wait between each bucket refill */
    277#define MVNETA_TXQ_BUCKET_REFILL_PERIOD	1000
    278
    279/* The base resolution for rate limiting, in bps. Any max_rate value should be
    280 * a multiple of that value.
    281 */
    282#define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \
    283					 (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \
    284					  MVNETA_TXQ_BUCKET_REFILL_PERIOD))
    285
    286#define MVNETA_LPI_CTRL_0                        0x2cc0
    287#define MVNETA_LPI_CTRL_1                        0x2cc4
    288#define      MVNETA_LPI_REQUEST_ENABLE           BIT(0)
    289#define MVNETA_LPI_CTRL_2                        0x2cc8
    290#define MVNETA_LPI_STATUS                        0x2ccc
    291
    292#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff
    293
    294/* Descriptor ring Macros */
    295#define MVNETA_QUEUE_NEXT_DESC(q, index)	\
    296	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
    297
    298/* Various constants */
    299
    300/* Coalescing */
    301#define MVNETA_TXDONE_COAL_PKTS		0	/* interrupt per packet */
    302#define MVNETA_RX_COAL_PKTS		32
    303#define MVNETA_RX_COAL_USEC		100
    304
    305/* The two bytes Marvell header. Either contains a special value used
    306 * by Marvell switches when a specific hardware mode is enabled (not
    307 * supported by this driver) or is filled automatically by zeroes on
    308 * the RX side. Those two bytes being at the front of the Ethernet
    309 * header, they allow to have the IP header aligned on a 4 bytes
    310 * boundary automatically: the hardware skips those two bytes on its
    311 * own.
    312 */
    313#define MVNETA_MH_SIZE			2
    314
    315#define MVNETA_VLAN_TAG_LEN             4
    316
    317#define MVNETA_TX_CSUM_DEF_SIZE		1600
    318#define MVNETA_TX_CSUM_MAX_SIZE		9800
    319#define MVNETA_ACC_MODE_EXT1		1
    320#define MVNETA_ACC_MODE_EXT2		2
    321
    322#define MVNETA_MAX_DECODE_WIN		6
    323
    324/* Timeout constants */
    325#define MVNETA_TX_DISABLE_TIMEOUT_MSEC	1000
    326#define MVNETA_RX_DISABLE_TIMEOUT_MSEC	1000
    327#define MVNETA_TX_FIFO_EMPTY_TIMEOUT	10000
    328
    329#define MVNETA_TX_MTU_MAX		0x3ffff
    330
    331/* The RSS lookup table actually has 256 entries but we do not use
    332 * them yet
    333 */
    334#define MVNETA_RSS_LU_TABLE_SIZE	1
    335
    336/* Max number of Rx descriptors */
    337#define MVNETA_MAX_RXD 512
    338
    339/* Max number of Tx descriptors */
    340#define MVNETA_MAX_TXD 1024
    341
    342/* Max number of allowed TCP segments for software TSO */
    343#define MVNETA_MAX_TSO_SEGS 100
    344
    345#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
    346
    347/* descriptor aligned size */
    348#define MVNETA_DESC_ALIGNED_SIZE	32
    349
    350/* Number of bytes to be taken into account by HW when putting incoming data
    351 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
    352 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
    353 */
    354#define MVNETA_RX_PKT_OFFSET_CORRECTION		64
    355
    356#define MVNETA_RX_PKT_SIZE(mtu) \
    357	ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
    358	      ETH_HLEN + ETH_FCS_LEN,			     \
    359	      cache_line_size())
    360
    361/* Driver assumes that the last 3 bits are 0 */
    362#define MVNETA_SKB_HEADROOM	ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
    363#define MVNETA_SKB_PAD	(SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
    364			 MVNETA_SKB_HEADROOM))
    365#define MVNETA_MAX_RX_BUF_SIZE	(PAGE_SIZE - MVNETA_SKB_PAD)
    366
    367#define IS_TSO_HEADER(txq, addr) \
    368	((addr >= txq->tso_hdrs_phys) && \
    369	 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
    370
    371#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
    372	(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
    373
    374enum {
    375	ETHTOOL_STAT_EEE_WAKEUP,
    376	ETHTOOL_STAT_SKB_ALLOC_ERR,
    377	ETHTOOL_STAT_REFILL_ERR,
    378	ETHTOOL_XDP_REDIRECT,
    379	ETHTOOL_XDP_PASS,
    380	ETHTOOL_XDP_DROP,
    381	ETHTOOL_XDP_TX,
    382	ETHTOOL_XDP_TX_ERR,
    383	ETHTOOL_XDP_XMIT,
    384	ETHTOOL_XDP_XMIT_ERR,
    385	ETHTOOL_MAX_STATS,
    386};
    387
    388struct mvneta_statistic {
    389	unsigned short offset;
    390	unsigned short type;
    391	const char name[ETH_GSTRING_LEN];
    392};
    393
    394#define T_REG_32	32
    395#define T_REG_64	64
    396#define T_SW		1
    397
    398#define MVNETA_XDP_PASS		0
    399#define MVNETA_XDP_DROPPED	BIT(0)
    400#define MVNETA_XDP_TX		BIT(1)
    401#define MVNETA_XDP_REDIR	BIT(2)
    402
    403static const struct mvneta_statistic mvneta_statistics[] = {
    404	{ 0x3000, T_REG_64, "good_octets_received", },
    405	{ 0x3010, T_REG_32, "good_frames_received", },
    406	{ 0x3008, T_REG_32, "bad_octets_received", },
    407	{ 0x3014, T_REG_32, "bad_frames_received", },
    408	{ 0x3018, T_REG_32, "broadcast_frames_received", },
    409	{ 0x301c, T_REG_32, "multicast_frames_received", },
    410	{ 0x3050, T_REG_32, "unrec_mac_control_received", },
    411	{ 0x3058, T_REG_32, "good_fc_received", },
    412	{ 0x305c, T_REG_32, "bad_fc_received", },
    413	{ 0x3060, T_REG_32, "undersize_received", },
    414	{ 0x3064, T_REG_32, "fragments_received", },
    415	{ 0x3068, T_REG_32, "oversize_received", },
    416	{ 0x306c, T_REG_32, "jabber_received", },
    417	{ 0x3070, T_REG_32, "mac_receive_error", },
    418	{ 0x3074, T_REG_32, "bad_crc_event", },
    419	{ 0x3078, T_REG_32, "collision", },
    420	{ 0x307c, T_REG_32, "late_collision", },
    421	{ 0x2484, T_REG_32, "rx_discard", },
    422	{ 0x2488, T_REG_32, "rx_overrun", },
    423	{ 0x3020, T_REG_32, "frames_64_octets", },
    424	{ 0x3024, T_REG_32, "frames_65_to_127_octets", },
    425	{ 0x3028, T_REG_32, "frames_128_to_255_octets", },
    426	{ 0x302c, T_REG_32, "frames_256_to_511_octets", },
    427	{ 0x3030, T_REG_32, "frames_512_to_1023_octets", },
    428	{ 0x3034, T_REG_32, "frames_1024_to_max_octets", },
    429	{ 0x3038, T_REG_64, "good_octets_sent", },
    430	{ 0x3040, T_REG_32, "good_frames_sent", },
    431	{ 0x3044, T_REG_32, "excessive_collision", },
    432	{ 0x3048, T_REG_32, "multicast_frames_sent", },
    433	{ 0x304c, T_REG_32, "broadcast_frames_sent", },
    434	{ 0x3054, T_REG_32, "fc_sent", },
    435	{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
    436	{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
    437	{ ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
    438	{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
    439	{ ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
    440	{ ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
    441	{ ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
    442	{ ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
    443	{ ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
    444	{ ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
    445	{ ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
    446};
    447
    448struct mvneta_stats {
    449	u64	rx_packets;
    450	u64	rx_bytes;
    451	u64	tx_packets;
    452	u64	tx_bytes;
    453	/* xdp */
    454	u64	xdp_redirect;
    455	u64	xdp_pass;
    456	u64	xdp_drop;
    457	u64	xdp_xmit;
    458	u64	xdp_xmit_err;
    459	u64	xdp_tx;
    460	u64	xdp_tx_err;
    461};
    462
    463struct mvneta_ethtool_stats {
    464	struct mvneta_stats ps;
    465	u64	skb_alloc_error;
    466	u64	refill_error;
    467};
    468
    469struct mvneta_pcpu_stats {
    470	struct u64_stats_sync syncp;
    471
    472	struct mvneta_ethtool_stats es;
    473	u64	rx_dropped;
    474	u64	rx_errors;
    475};
    476
    477struct mvneta_pcpu_port {
    478	/* Pointer to the shared port */
    479	struct mvneta_port	*pp;
    480
    481	/* Pointer to the CPU-local NAPI struct */
    482	struct napi_struct	napi;
    483
    484	/* Cause of the previous interrupt */
    485	u32			cause_rx_tx;
    486};
    487
    488enum {
    489	__MVNETA_DOWN,
    490};
    491
    492struct mvneta_port {
    493	u8 id;
    494	struct mvneta_pcpu_port __percpu	*ports;
    495	struct mvneta_pcpu_stats __percpu	*stats;
    496
    497	unsigned long state;
    498
    499	int pkt_size;
    500	void __iomem *base;
    501	struct mvneta_rx_queue *rxqs;
    502	struct mvneta_tx_queue *txqs;
    503	struct net_device *dev;
    504	struct hlist_node node_online;
    505	struct hlist_node node_dead;
    506	int rxq_def;
    507	/* Protect the access to the percpu interrupt registers,
    508	 * ensuring that the configuration remains coherent.
    509	 */
    510	spinlock_t lock;
    511	bool is_stopped;
    512
    513	u32 cause_rx_tx;
    514	struct napi_struct napi;
    515
    516	struct bpf_prog *xdp_prog;
    517
    518	/* Core clock */
    519	struct clk *clk;
    520	/* AXI clock */
    521	struct clk *clk_bus;
    522	u8 mcast_count[256];
    523	u16 tx_ring_size;
    524	u16 rx_ring_size;
    525
    526	phy_interface_t phy_interface;
    527	struct device_node *dn;
    528	unsigned int tx_csum_limit;
    529	struct phylink *phylink;
    530	struct phylink_config phylink_config;
    531	struct phylink_pcs phylink_pcs;
    532	struct phy *comphy;
    533
    534	struct mvneta_bm *bm_priv;
    535	struct mvneta_bm_pool *pool_long;
    536	struct mvneta_bm_pool *pool_short;
    537	int bm_win_id;
    538
    539	bool eee_enabled;
    540	bool eee_active;
    541	bool tx_lpi_enabled;
    542
    543	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
    544
    545	u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
    546
    547	/* Flags for special SoC configurations */
    548	bool neta_armada3700;
    549	bool neta_ac5;
    550	u16 rx_offset_correction;
    551	const struct mbus_dram_target_info *dram_target_info;
    552};
    553
    554/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
    555 * layout of the transmit and reception DMA descriptors, and their
    556 * layout is therefore defined by the hardware design
    557 */
    558
    559#define MVNETA_TX_L3_OFF_SHIFT	0
    560#define MVNETA_TX_IP_HLEN_SHIFT	8
    561#define MVNETA_TX_L4_UDP	BIT(16)
    562#define MVNETA_TX_L3_IP6	BIT(17)
    563#define MVNETA_TXD_IP_CSUM	BIT(18)
    564#define MVNETA_TXD_Z_PAD	BIT(19)
    565#define MVNETA_TXD_L_DESC	BIT(20)
    566#define MVNETA_TXD_F_DESC	BIT(21)
    567#define MVNETA_TXD_FLZ_DESC	(MVNETA_TXD_Z_PAD  | \
    568				 MVNETA_TXD_L_DESC | \
    569				 MVNETA_TXD_F_DESC)
    570#define MVNETA_TX_L4_CSUM_FULL	BIT(30)
    571#define MVNETA_TX_L4_CSUM_NOT	BIT(31)
    572
    573#define MVNETA_RXD_ERR_CRC		0x0
    574#define MVNETA_RXD_BM_POOL_SHIFT	13
    575#define MVNETA_RXD_BM_POOL_MASK		(BIT(13) | BIT(14))
    576#define MVNETA_RXD_ERR_SUMMARY		BIT(16)
    577#define MVNETA_RXD_ERR_OVERRUN		BIT(17)
    578#define MVNETA_RXD_ERR_LEN		BIT(18)
    579#define MVNETA_RXD_ERR_RESOURCE		(BIT(17) | BIT(18))
    580#define MVNETA_RXD_ERR_CODE_MASK	(BIT(17) | BIT(18))
    581#define MVNETA_RXD_L3_IP4		BIT(25)
    582#define MVNETA_RXD_LAST_DESC		BIT(26)
    583#define MVNETA_RXD_FIRST_DESC		BIT(27)
    584#define MVNETA_RXD_FIRST_LAST_DESC	(MVNETA_RXD_FIRST_DESC | \
    585					 MVNETA_RXD_LAST_DESC)
    586#define MVNETA_RXD_L4_CSUM_OK		BIT(30)
    587
    588#if defined(__LITTLE_ENDIAN)
    589struct mvneta_tx_desc {
    590	u32  command;		/* Options used by HW for packet transmitting.*/
    591	u16  reserved1;		/* csum_l4 (for future use)		*/
    592	u16  data_size;		/* Data size of transmitted packet in bytes */
    593	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
    594	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
    595	u32  reserved3[4];	/* Reserved - (for future use)		*/
    596};
    597
    598struct mvneta_rx_desc {
    599	u32  status;		/* Info about received packet		*/
    600	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
    601	u16  data_size;		/* Size of received packet in bytes	*/
    602
    603	u32  buf_phys_addr;	/* Physical address of the buffer	*/
    604	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
    605
    606	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
    607	u16  reserved3;		/* prefetch_cmd, for future use		*/
    608	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
    609
    610	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
    611	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
    612};
    613#else
    614struct mvneta_tx_desc {
    615	u16  data_size;		/* Data size of transmitted packet in bytes */
    616	u16  reserved1;		/* csum_l4 (for future use)		*/
    617	u32  command;		/* Options used by HW for packet transmitting.*/
    618	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
    619	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
    620	u32  reserved3[4];	/* Reserved - (for future use)		*/
    621};
    622
    623struct mvneta_rx_desc {
    624	u16  data_size;		/* Size of received packet in bytes	*/
    625	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
    626	u32  status;		/* Info about received packet		*/
    627
    628	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
    629	u32  buf_phys_addr;	/* Physical address of the buffer	*/
    630
    631	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
    632	u16  reserved3;		/* prefetch_cmd, for future use		*/
    633	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
    634
    635	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
    636	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
    637};
    638#endif
    639
    640enum mvneta_tx_buf_type {
    641	MVNETA_TYPE_SKB,
    642	MVNETA_TYPE_XDP_TX,
    643	MVNETA_TYPE_XDP_NDO,
    644};
    645
    646struct mvneta_tx_buf {
    647	enum mvneta_tx_buf_type type;
    648	union {
    649		struct xdp_frame *xdpf;
    650		struct sk_buff *skb;
    651	};
    652};
    653
    654struct mvneta_tx_queue {
    655	/* Number of this TX queue, in the range 0-7 */
    656	u8 id;
    657
    658	/* Number of TX DMA descriptors in the descriptor ring */
    659	int size;
    660
    661	/* Number of currently used TX DMA descriptor in the
    662	 * descriptor ring
    663	 */
    664	int count;
    665	int pending;
    666	int tx_stop_threshold;
    667	int tx_wake_threshold;
    668
    669	/* Array of transmitted buffers */
    670	struct mvneta_tx_buf *buf;
    671
    672	/* Index of last TX DMA descriptor that was inserted */
    673	int txq_put_index;
    674
    675	/* Index of the TX DMA descriptor to be cleaned up */
    676	int txq_get_index;
    677
    678	u32 done_pkts_coal;
    679
    680	/* Virtual address of the TX DMA descriptors array */
    681	struct mvneta_tx_desc *descs;
    682
    683	/* DMA address of the TX DMA descriptors array */
    684	dma_addr_t descs_phys;
    685
    686	/* Index of the last TX DMA descriptor */
    687	int last_desc;
    688
    689	/* Index of the next TX DMA descriptor to process */
    690	int next_desc_to_proc;
    691
    692	/* DMA buffers for TSO headers */
    693	char *tso_hdrs;
    694
    695	/* DMA address of TSO headers */
    696	dma_addr_t tso_hdrs_phys;
    697
    698	/* Affinity mask for CPUs*/
    699	cpumask_t affinity_mask;
    700};
    701
    702struct mvneta_rx_queue {
    703	/* rx queue number, in the range 0-7 */
    704	u8 id;
    705
    706	/* num of rx descriptors in the rx descriptor ring */
    707	int size;
    708
    709	u32 pkts_coal;
    710	u32 time_coal;
    711
    712	/* page_pool */
    713	struct page_pool *page_pool;
    714	struct xdp_rxq_info xdp_rxq;
    715
    716	/* Virtual address of the RX buffer */
    717	void  **buf_virt_addr;
    718
    719	/* Virtual address of the RX DMA descriptors array */
    720	struct mvneta_rx_desc *descs;
    721
    722	/* DMA address of the RX DMA descriptors array */
    723	dma_addr_t descs_phys;
    724
    725	/* Index of the last RX DMA descriptor */
    726	int last_desc;
    727
    728	/* Index of the next RX DMA descriptor to process */
    729	int next_desc_to_proc;
    730
    731	/* Index of first RX DMA descriptor to refill */
    732	int first_to_refill;
    733	u32 refill_num;
    734};
    735
    736static enum cpuhp_state online_hpstate;
    737/* The hardware supports eight (8) rx queues, but we are only allowing
    738 * the first one to be used. Therefore, let's just allocate one queue.
    739 */
    740static int rxq_number = 8;
    741static int txq_number = 8;
    742
    743static int rxq_def;
    744
    745static int rx_copybreak __read_mostly = 256;
    746
    747/* HW BM need that each port be identify by a unique ID */
    748static int global_port_id;
    749
    750#define MVNETA_DRIVER_NAME "mvneta"
    751#define MVNETA_DRIVER_VERSION "1.0"
    752
    753/* Utility/helper methods */
    754
    755/* Write helper method */
    756static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
    757{
    758	writel(data, pp->base + offset);
    759}
    760
    761/* Read helper method */
    762static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
    763{
    764	return readl(pp->base + offset);
    765}
    766
    767/* Increment txq get counter */
    768static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
    769{
    770	txq->txq_get_index++;
    771	if (txq->txq_get_index == txq->size)
    772		txq->txq_get_index = 0;
    773}
    774
    775/* Increment txq put counter */
    776static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
    777{
    778	txq->txq_put_index++;
    779	if (txq->txq_put_index == txq->size)
    780		txq->txq_put_index = 0;
    781}
    782
    783
    784/* Clear all MIB counters */
    785static void mvneta_mib_counters_clear(struct mvneta_port *pp)
    786{
    787	int i;
    788
    789	/* Perform dummy reads from MIB counters */
    790	for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
    791		mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
    792	mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
    793	mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
    794}
    795
    796/* Get System Network Statistics */
    797static void
    798mvneta_get_stats64(struct net_device *dev,
    799		   struct rtnl_link_stats64 *stats)
    800{
    801	struct mvneta_port *pp = netdev_priv(dev);
    802	unsigned int start;
    803	int cpu;
    804
    805	for_each_possible_cpu(cpu) {
    806		struct mvneta_pcpu_stats *cpu_stats;
    807		u64 rx_packets;
    808		u64 rx_bytes;
    809		u64 rx_dropped;
    810		u64 rx_errors;
    811		u64 tx_packets;
    812		u64 tx_bytes;
    813
    814		cpu_stats = per_cpu_ptr(pp->stats, cpu);
    815		do {
    816			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
    817			rx_packets = cpu_stats->es.ps.rx_packets;
    818			rx_bytes   = cpu_stats->es.ps.rx_bytes;
    819			rx_dropped = cpu_stats->rx_dropped;
    820			rx_errors  = cpu_stats->rx_errors;
    821			tx_packets = cpu_stats->es.ps.tx_packets;
    822			tx_bytes   = cpu_stats->es.ps.tx_bytes;
    823		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
    824
    825		stats->rx_packets += rx_packets;
    826		stats->rx_bytes   += rx_bytes;
    827		stats->rx_dropped += rx_dropped;
    828		stats->rx_errors  += rx_errors;
    829		stats->tx_packets += tx_packets;
    830		stats->tx_bytes   += tx_bytes;
    831	}
    832
    833	stats->tx_dropped	= dev->stats.tx_dropped;
    834}
    835
    836/* Rx descriptors helper methods */
    837
    838/* Checks whether the RX descriptor having this status is both the first
    839 * and the last descriptor for the RX packet. Each RX packet is currently
    840 * received through a single RX descriptor, so not having each RX
    841 * descriptor with its first and last bits set is an error
    842 */
    843static int mvneta_rxq_desc_is_first_last(u32 status)
    844{
    845	return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
    846		MVNETA_RXD_FIRST_LAST_DESC;
    847}
    848
    849/* Add number of descriptors ready to receive new packets */
    850static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
    851					  struct mvneta_rx_queue *rxq,
    852					  int ndescs)
    853{
    854	/* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
    855	 * be added at once
    856	 */
    857	while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
    858		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
    859			    (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
    860			     MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
    861		ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
    862	}
    863
    864	mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
    865		    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
    866}
    867
    868/* Get number of RX descriptors occupied by received packets */
    869static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
    870					struct mvneta_rx_queue *rxq)
    871{
    872	u32 val;
    873
    874	val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
    875	return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
    876}
    877
    878/* Update num of rx desc called upon return from rx path or
    879 * from mvneta_rxq_drop_pkts().
    880 */
    881static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
    882				       struct mvneta_rx_queue *rxq,
    883				       int rx_done, int rx_filled)
    884{
    885	u32 val;
    886
    887	if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
    888		val = rx_done |
    889		  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
    890		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
    891		return;
    892	}
    893
    894	/* Only 255 descriptors can be added at once */
    895	while ((rx_done > 0) || (rx_filled > 0)) {
    896		if (rx_done <= 0xff) {
    897			val = rx_done;
    898			rx_done = 0;
    899		} else {
    900			val = 0xff;
    901			rx_done -= 0xff;
    902		}
    903		if (rx_filled <= 0xff) {
    904			val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
    905			rx_filled = 0;
    906		} else {
    907			val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
    908			rx_filled -= 0xff;
    909		}
    910		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
    911	}
    912}
    913
    914/* Get pointer to next RX descriptor to be processed by SW */
    915static struct mvneta_rx_desc *
    916mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
    917{
    918	int rx_desc = rxq->next_desc_to_proc;
    919
    920	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
    921	prefetch(rxq->descs + rxq->next_desc_to_proc);
    922	return rxq->descs + rx_desc;
    923}
    924
    925/* Change maximum receive size of the port. */
    926static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
    927{
    928	u32 val;
    929
    930	val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
    931	val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
    932	val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
    933		MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
    934	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
    935}
    936
    937
    938/* Set rx queue offset */
    939static void mvneta_rxq_offset_set(struct mvneta_port *pp,
    940				  struct mvneta_rx_queue *rxq,
    941				  int offset)
    942{
    943	u32 val;
    944
    945	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
    946	val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
    947
    948	/* Offset is in */
    949	val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
    950	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
    951}
    952
    953
    954/* Tx descriptors helper methods */
    955
    956/* Update HW with number of TX descriptors to be sent */
    957static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
    958				     struct mvneta_tx_queue *txq,
    959				     int pend_desc)
    960{
    961	u32 val;
    962
    963	pend_desc += txq->pending;
    964
    965	/* Only 255 Tx descriptors can be added at once */
    966	do {
    967		val = min(pend_desc, 255);
    968		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
    969		pend_desc -= val;
    970	} while (pend_desc > 0);
    971	txq->pending = 0;
    972}
    973
    974/* Get pointer to next TX descriptor to be processed (send) by HW */
    975static struct mvneta_tx_desc *
    976mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
    977{
    978	int tx_desc = txq->next_desc_to_proc;
    979
    980	txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
    981	return txq->descs + tx_desc;
    982}
    983
    984/* Release the last allocated TX descriptor. Useful to handle DMA
    985 * mapping failures in the TX path.
    986 */
    987static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
    988{
    989	if (txq->next_desc_to_proc == 0)
    990		txq->next_desc_to_proc = txq->last_desc - 1;
    991	else
    992		txq->next_desc_to_proc--;
    993}
    994
    995/* Set rxq buf size */
    996static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
    997				    struct mvneta_rx_queue *rxq,
    998				    int buf_size)
    999{
   1000	u32 val;
   1001
   1002	val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
   1003
   1004	val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
   1005	val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
   1006
   1007	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
   1008}
   1009
   1010/* Disable buffer management (BM) */
   1011static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
   1012				  struct mvneta_rx_queue *rxq)
   1013{
   1014	u32 val;
   1015
   1016	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
   1017	val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
   1018	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
   1019}
   1020
   1021/* Enable buffer management (BM) */
   1022static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
   1023				 struct mvneta_rx_queue *rxq)
   1024{
   1025	u32 val;
   1026
   1027	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
   1028	val |= MVNETA_RXQ_HW_BUF_ALLOC;
   1029	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
   1030}
   1031
   1032/* Notify HW about port's assignment of pool for bigger packets */
   1033static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
   1034				     struct mvneta_rx_queue *rxq)
   1035{
   1036	u32 val;
   1037
   1038	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
   1039	val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
   1040	val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
   1041
   1042	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
   1043}
   1044
   1045/* Notify HW about port's assignment of pool for smaller packets */
   1046static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
   1047				      struct mvneta_rx_queue *rxq)
   1048{
   1049	u32 val;
   1050
   1051	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
   1052	val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
   1053	val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
   1054
   1055	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
   1056}
   1057
   1058/* Set port's receive buffer size for assigned BM pool */
   1059static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
   1060					      int buf_size,
   1061					      u8 pool_id)
   1062{
   1063	u32 val;
   1064
   1065	if (!IS_ALIGNED(buf_size, 8)) {
   1066		dev_warn(pp->dev->dev.parent,
   1067			 "illegal buf_size value %d, round to %d\n",
   1068			 buf_size, ALIGN(buf_size, 8));
   1069		buf_size = ALIGN(buf_size, 8);
   1070	}
   1071
   1072	val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
   1073	val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
   1074	mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
   1075}
   1076
   1077/* Configure MBUS window in order to enable access BM internal SRAM */
   1078static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
   1079				  u8 target, u8 attr)
   1080{
   1081	u32 win_enable, win_protect;
   1082	int i;
   1083
   1084	win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
   1085
   1086	if (pp->bm_win_id < 0) {
   1087		/* Find first not occupied window */
   1088		for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
   1089			if (win_enable & (1 << i)) {
   1090				pp->bm_win_id = i;
   1091				break;
   1092			}
   1093		}
   1094		if (i == MVNETA_MAX_DECODE_WIN)
   1095			return -ENOMEM;
   1096	} else {
   1097		i = pp->bm_win_id;
   1098	}
   1099
   1100	mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
   1101	mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
   1102
   1103	if (i < 4)
   1104		mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
   1105
   1106	mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
   1107		    (attr << 8) | target);
   1108
   1109	mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
   1110
   1111	win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
   1112	win_protect |= 3 << (2 * i);
   1113	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
   1114
   1115	win_enable &= ~(1 << i);
   1116	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
   1117
   1118	return 0;
   1119}
   1120
   1121static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
   1122{
   1123	u32 wsize;
   1124	u8 target, attr;
   1125	int err;
   1126
   1127	/* Get BM window information */
   1128	err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
   1129					 &target, &attr);
   1130	if (err < 0)
   1131		return err;
   1132
   1133	pp->bm_win_id = -1;
   1134
   1135	/* Open NETA -> BM window */
   1136	err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
   1137				     target, attr);
   1138	if (err < 0) {
   1139		netdev_info(pp->dev, "fail to configure mbus window to BM\n");
   1140		return err;
   1141	}
   1142	return 0;
   1143}
   1144
   1145/* Assign and initialize pools for port. In case of fail
   1146 * buffer manager will remain disabled for current port.
   1147 */
   1148static int mvneta_bm_port_init(struct platform_device *pdev,
   1149			       struct mvneta_port *pp)
   1150{
   1151	struct device_node *dn = pdev->dev.of_node;
   1152	u32 long_pool_id, short_pool_id;
   1153
   1154	if (!pp->neta_armada3700) {
   1155		int ret;
   1156
   1157		ret = mvneta_bm_port_mbus_init(pp);
   1158		if (ret)
   1159			return ret;
   1160	}
   1161
   1162	if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
   1163		netdev_info(pp->dev, "missing long pool id\n");
   1164		return -EINVAL;
   1165	}
   1166
   1167	/* Create port's long pool depending on mtu */
   1168	pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
   1169					   MVNETA_BM_LONG, pp->id,
   1170					   MVNETA_RX_PKT_SIZE(pp->dev->mtu));
   1171	if (!pp->pool_long) {
   1172		netdev_info(pp->dev, "fail to obtain long pool for port\n");
   1173		return -ENOMEM;
   1174	}
   1175
   1176	pp->pool_long->port_map |= 1 << pp->id;
   1177
   1178	mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
   1179				   pp->pool_long->id);
   1180
   1181	/* If short pool id is not defined, assume using single pool */
   1182	if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
   1183		short_pool_id = long_pool_id;
   1184
   1185	/* Create port's short pool */
   1186	pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
   1187					    MVNETA_BM_SHORT, pp->id,
   1188					    MVNETA_BM_SHORT_PKT_SIZE);
   1189	if (!pp->pool_short) {
   1190		netdev_info(pp->dev, "fail to obtain short pool for port\n");
   1191		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
   1192		return -ENOMEM;
   1193	}
   1194
   1195	if (short_pool_id != long_pool_id) {
   1196		pp->pool_short->port_map |= 1 << pp->id;
   1197		mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
   1198					   pp->pool_short->id);
   1199	}
   1200
   1201	return 0;
   1202}
   1203
   1204/* Update settings of a pool for bigger packets */
   1205static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
   1206{
   1207	struct mvneta_bm_pool *bm_pool = pp->pool_long;
   1208	struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
   1209	int num;
   1210
   1211	/* Release all buffers from long pool */
   1212	mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
   1213	if (hwbm_pool->buf_num) {
   1214		WARN(1, "cannot free all buffers in pool %d\n",
   1215		     bm_pool->id);
   1216		goto bm_mtu_err;
   1217	}
   1218
   1219	bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
   1220	bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
   1221	hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
   1222			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
   1223
   1224	/* Fill entire long pool */
   1225	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
   1226	if (num != hwbm_pool->size) {
   1227		WARN(1, "pool %d: %d of %d allocated\n",
   1228		     bm_pool->id, num, hwbm_pool->size);
   1229		goto bm_mtu_err;
   1230	}
   1231	mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
   1232
   1233	return;
   1234
   1235bm_mtu_err:
   1236	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
   1237	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
   1238
   1239	pp->bm_priv = NULL;
   1240	pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
   1241	mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
   1242	netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
   1243}
   1244
   1245/* Start the Ethernet port RX and TX activity */
   1246static void mvneta_port_up(struct mvneta_port *pp)
   1247{
   1248	int queue;
   1249	u32 q_map;
   1250
   1251	/* Enable all initialized TXs. */
   1252	q_map = 0;
   1253	for (queue = 0; queue < txq_number; queue++) {
   1254		struct mvneta_tx_queue *txq = &pp->txqs[queue];
   1255		if (txq->descs)
   1256			q_map |= (1 << queue);
   1257	}
   1258	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
   1259
   1260	q_map = 0;
   1261	/* Enable all initialized RXQs. */
   1262	for (queue = 0; queue < rxq_number; queue++) {
   1263		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
   1264
   1265		if (rxq->descs)
   1266			q_map |= (1 << queue);
   1267	}
   1268	mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
   1269}
   1270
   1271/* Stop the Ethernet port activity */
   1272static void mvneta_port_down(struct mvneta_port *pp)
   1273{
   1274	u32 val;
   1275	int count;
   1276
   1277	/* Stop Rx port activity. Check port Rx activity. */
   1278	val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
   1279
   1280	/* Issue stop command for active channels only */
   1281	if (val != 0)
   1282		mvreg_write(pp, MVNETA_RXQ_CMD,
   1283			    val << MVNETA_RXQ_DISABLE_SHIFT);
   1284
   1285	/* Wait for all Rx activity to terminate. */
   1286	count = 0;
   1287	do {
   1288		if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
   1289			netdev_warn(pp->dev,
   1290				    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
   1291				    val);
   1292			break;
   1293		}
   1294		mdelay(1);
   1295
   1296		val = mvreg_read(pp, MVNETA_RXQ_CMD);
   1297	} while (val & MVNETA_RXQ_ENABLE_MASK);
   1298
   1299	/* Stop Tx port activity. Check port Tx activity. Issue stop
   1300	 * command for active channels only
   1301	 */
   1302	val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
   1303
   1304	if (val != 0)
   1305		mvreg_write(pp, MVNETA_TXQ_CMD,
   1306			    (val << MVNETA_TXQ_DISABLE_SHIFT));
   1307
   1308	/* Wait for all Tx activity to terminate. */
   1309	count = 0;
   1310	do {
   1311		if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
   1312			netdev_warn(pp->dev,
   1313				    "TIMEOUT for TX stopped status=0x%08x\n",
   1314				    val);
   1315			break;
   1316		}
   1317		mdelay(1);
   1318
   1319		/* Check TX Command reg that all Txqs are stopped */
   1320		val = mvreg_read(pp, MVNETA_TXQ_CMD);
   1321
   1322	} while (val & MVNETA_TXQ_ENABLE_MASK);
   1323
   1324	/* Double check to verify that TX FIFO is empty */
   1325	count = 0;
   1326	do {
   1327		if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
   1328			netdev_warn(pp->dev,
   1329				    "TX FIFO empty timeout status=0x%08x\n",
   1330				    val);
   1331			break;
   1332		}
   1333		mdelay(1);
   1334
   1335		val = mvreg_read(pp, MVNETA_PORT_STATUS);
   1336	} while (!(val & MVNETA_TX_FIFO_EMPTY) &&
   1337		 (val & MVNETA_TX_IN_PRGRS));
   1338
   1339	udelay(200);
   1340}
   1341
   1342/* Enable the port by setting the port enable bit of the MAC control register */
   1343static void mvneta_port_enable(struct mvneta_port *pp)
   1344{
   1345	u32 val;
   1346
   1347	/* Enable port */
   1348	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
   1349	val |= MVNETA_GMAC0_PORT_ENABLE;
   1350	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
   1351}
   1352
   1353/* Disable the port and wait for about 200 usec before retuning */
   1354static void mvneta_port_disable(struct mvneta_port *pp)
   1355{
   1356	u32 val;
   1357
   1358	/* Reset the Enable bit in the Serial Control Register */
   1359	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
   1360	val &= ~MVNETA_GMAC0_PORT_ENABLE;
   1361	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
   1362
   1363	udelay(200);
   1364}
   1365
   1366/* Multicast tables methods */
   1367
   1368/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
   1369static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
   1370{
   1371	int offset;
   1372	u32 val;
   1373
   1374	if (queue == -1) {
   1375		val = 0;
   1376	} else {
   1377		val = 0x1 | (queue << 1);
   1378		val |= (val << 24) | (val << 16) | (val << 8);
   1379	}
   1380
   1381	for (offset = 0; offset <= 0xc; offset += 4)
   1382		mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
   1383}
   1384
   1385/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
   1386static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
   1387{
   1388	int offset;
   1389	u32 val;
   1390
   1391	if (queue == -1) {
   1392		val = 0;
   1393	} else {
   1394		val = 0x1 | (queue << 1);
   1395		val |= (val << 24) | (val << 16) | (val << 8);
   1396	}
   1397
   1398	for (offset = 0; offset <= 0xfc; offset += 4)
   1399		mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
   1400
   1401}
   1402
   1403/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
   1404static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
   1405{
   1406	int offset;
   1407	u32 val;
   1408
   1409	if (queue == -1) {
   1410		memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
   1411		val = 0;
   1412	} else {
   1413		memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
   1414		val = 0x1 | (queue << 1);
   1415		val |= (val << 24) | (val << 16) | (val << 8);
   1416	}
   1417
   1418	for (offset = 0; offset <= 0xfc; offset += 4)
   1419		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
   1420}
   1421
   1422static void mvneta_percpu_unmask_interrupt(void *arg)
   1423{
   1424	struct mvneta_port *pp = arg;
   1425
   1426	/* All the queue are unmasked, but actually only the ones
   1427	 * mapped to this CPU will be unmasked
   1428	 */
   1429	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
   1430		    MVNETA_RX_INTR_MASK_ALL |
   1431		    MVNETA_TX_INTR_MASK_ALL |
   1432		    MVNETA_MISCINTR_INTR_MASK);
   1433}
   1434
   1435static void mvneta_percpu_mask_interrupt(void *arg)
   1436{
   1437	struct mvneta_port *pp = arg;
   1438
   1439	/* All the queue are masked, but actually only the ones
   1440	 * mapped to this CPU will be masked
   1441	 */
   1442	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
   1443	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
   1444	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
   1445}
   1446
   1447static void mvneta_percpu_clear_intr_cause(void *arg)
   1448{
   1449	struct mvneta_port *pp = arg;
   1450
   1451	/* All the queue are cleared, but actually only the ones
   1452	 * mapped to this CPU will be cleared
   1453	 */
   1454	mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
   1455	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
   1456	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
   1457}
   1458
   1459/* This method sets defaults to the NETA port:
   1460 *	Clears interrupt Cause and Mask registers.
   1461 *	Clears all MAC tables.
   1462 *	Sets defaults to all registers.
   1463 *	Resets RX and TX descriptor rings.
   1464 *	Resets PHY.
   1465 * This method can be called after mvneta_port_down() to return the port
   1466 *	settings to defaults.
   1467 */
   1468static void mvneta_defaults_set(struct mvneta_port *pp)
   1469{
   1470	int cpu;
   1471	int queue;
   1472	u32 val;
   1473	int max_cpu = num_present_cpus();
   1474
   1475	/* Clear all Cause registers */
   1476	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
   1477
   1478	/* Mask all interrupts */
   1479	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
   1480	mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
   1481
   1482	/* Enable MBUS Retry bit16 */
   1483	mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
   1484
   1485	/* Set CPU queue access map. CPUs are assigned to the RX and
   1486	 * TX queues modulo their number. If there is only one TX
   1487	 * queue then it is assigned to the CPU associated to the
   1488	 * default RX queue.
   1489	 */
   1490	for_each_present_cpu(cpu) {
   1491		int rxq_map = 0, txq_map = 0;
   1492		int rxq, txq;
   1493		if (!pp->neta_armada3700) {
   1494			for (rxq = 0; rxq < rxq_number; rxq++)
   1495				if ((rxq % max_cpu) == cpu)
   1496					rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
   1497
   1498			for (txq = 0; txq < txq_number; txq++)
   1499				if ((txq % max_cpu) == cpu)
   1500					txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
   1501
   1502			/* With only one TX queue we configure a special case
   1503			 * which will allow to get all the irq on a single
   1504			 * CPU
   1505			 */
   1506			if (txq_number == 1)
   1507				txq_map = (cpu == pp->rxq_def) ?
   1508					MVNETA_CPU_TXQ_ACCESS(1) : 0;
   1509
   1510		} else {
   1511			txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
   1512			rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
   1513		}
   1514
   1515		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
   1516	}
   1517
   1518	/* Reset RX and TX DMAs */
   1519	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
   1520	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
   1521
   1522	/* Disable Legacy WRR, Disable EJP, Release from reset */
   1523	mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
   1524	for (queue = 0; queue < txq_number; queue++) {
   1525		mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
   1526		mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
   1527	}
   1528
   1529	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
   1530	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
   1531
   1532	/* Set Port Acceleration Mode */
   1533	if (pp->bm_priv)
   1534		/* HW buffer management + legacy parser */
   1535		val = MVNETA_ACC_MODE_EXT2;
   1536	else
   1537		/* SW buffer management + legacy parser */
   1538		val = MVNETA_ACC_MODE_EXT1;
   1539	mvreg_write(pp, MVNETA_ACC_MODE, val);
   1540
   1541	if (pp->bm_priv)
   1542		mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
   1543
   1544	/* Update val of portCfg register accordingly with all RxQueue types */
   1545	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
   1546	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
   1547
   1548	val = 0;
   1549	mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
   1550	mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
   1551
   1552	/* Build PORT_SDMA_CONFIG_REG */
   1553	val = 0;
   1554
   1555	/* Default burst size */
   1556	val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
   1557	val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
   1558	val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
   1559
   1560#if defined(__BIG_ENDIAN)
   1561	val |= MVNETA_DESC_SWAP;
   1562#endif
   1563
   1564	/* Assign port SDMA configuration */
   1565	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
   1566
   1567	/* Disable PHY polling in hardware, since we're using the
   1568	 * kernel phylib to do this.
   1569	 */
   1570	val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
   1571	val &= ~MVNETA_PHY_POLLING_ENABLE;
   1572	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
   1573
   1574	mvneta_set_ucast_table(pp, -1);
   1575	mvneta_set_special_mcast_table(pp, -1);
   1576	mvneta_set_other_mcast_table(pp, -1);
   1577
   1578	/* Set port interrupt enable register - default enable all */
   1579	mvreg_write(pp, MVNETA_INTR_ENABLE,
   1580		    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
   1581		     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
   1582
   1583	mvneta_mib_counters_clear(pp);
   1584}
   1585
   1586/* Set max sizes for tx queues */
   1587static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
   1588
   1589{
   1590	u32 val, size, mtu;
   1591	int queue;
   1592
   1593	mtu = max_tx_size * 8;
   1594	if (mtu > MVNETA_TX_MTU_MAX)
   1595		mtu = MVNETA_TX_MTU_MAX;
   1596
   1597	/* Set MTU */
   1598	val = mvreg_read(pp, MVNETA_TX_MTU);
   1599	val &= ~MVNETA_TX_MTU_MAX;
   1600	val |= mtu;
   1601	mvreg_write(pp, MVNETA_TX_MTU, val);
   1602
   1603	/* TX token size and all TXQs token size must be larger that MTU */
   1604	val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
   1605
   1606	size = val & MVNETA_TX_TOKEN_SIZE_MAX;
   1607	if (size < mtu) {
   1608		size = mtu;
   1609		val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
   1610		val |= size;
   1611		mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
   1612	}
   1613	for (queue = 0; queue < txq_number; queue++) {
   1614		val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
   1615
   1616		size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
   1617		if (size < mtu) {
   1618			size = mtu;
   1619			val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
   1620			val |= size;
   1621			mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
   1622		}
   1623	}
   1624}
   1625
   1626/* Set unicast address */
   1627static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
   1628				  int queue)
   1629{
   1630	unsigned int unicast_reg;
   1631	unsigned int tbl_offset;
   1632	unsigned int reg_offset;
   1633
   1634	/* Locate the Unicast table entry */
   1635	last_nibble = (0xf & last_nibble);
   1636
   1637	/* offset from unicast tbl base */
   1638	tbl_offset = (last_nibble / 4) * 4;
   1639
   1640	/* offset within the above reg  */
   1641	reg_offset = last_nibble % 4;
   1642
   1643	unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
   1644
   1645	if (queue == -1) {
   1646		/* Clear accepts frame bit at specified unicast DA tbl entry */
   1647		unicast_reg &= ~(0xff << (8 * reg_offset));
   1648	} else {
   1649		unicast_reg &= ~(0xff << (8 * reg_offset));
   1650		unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
   1651	}
   1652
   1653	mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
   1654}
   1655
   1656/* Set mac address */
   1657static void mvneta_mac_addr_set(struct mvneta_port *pp,
   1658				const unsigned char *addr, int queue)
   1659{
   1660	unsigned int mac_h;
   1661	unsigned int mac_l;
   1662
   1663	if (queue != -1) {
   1664		mac_l = (addr[4] << 8) | (addr[5]);
   1665		mac_h = (addr[0] << 24) | (addr[1] << 16) |
   1666			(addr[2] << 8) | (addr[3] << 0);
   1667
   1668		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
   1669		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
   1670	}
   1671
   1672	/* Accept frames of this address */
   1673	mvneta_set_ucast_addr(pp, addr[5], queue);
   1674}
   1675
   1676/* Set the number of packets that will be received before RX interrupt
   1677 * will be generated by HW.
   1678 */
   1679static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
   1680				    struct mvneta_rx_queue *rxq, u32 value)
   1681{
   1682	mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
   1683		    value | MVNETA_RXQ_NON_OCCUPIED(0));
   1684}
   1685
   1686/* Set the time delay in usec before RX interrupt will be generated by
   1687 * HW.
   1688 */
   1689static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
   1690				    struct mvneta_rx_queue *rxq, u32 value)
   1691{
   1692	u32 val;
   1693	unsigned long clk_rate;
   1694
   1695	clk_rate = clk_get_rate(pp->clk);
   1696	val = (clk_rate / 1000000) * value;
   1697
   1698	mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
   1699}
   1700
   1701/* Set threshold for TX_DONE pkts coalescing */
   1702static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
   1703					 struct mvneta_tx_queue *txq, u32 value)
   1704{
   1705	u32 val;
   1706
   1707	val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
   1708
   1709	val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
   1710	val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
   1711
   1712	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
   1713}
   1714
   1715/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
   1716static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
   1717				u32 phys_addr, void *virt_addr,
   1718				struct mvneta_rx_queue *rxq)
   1719{
   1720	int i;
   1721
   1722	rx_desc->buf_phys_addr = phys_addr;
   1723	i = rx_desc - rxq->descs;
   1724	rxq->buf_virt_addr[i] = virt_addr;
   1725}
   1726
   1727/* Decrement sent descriptors counter */
   1728static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
   1729				     struct mvneta_tx_queue *txq,
   1730				     int sent_desc)
   1731{
   1732	u32 val;
   1733
   1734	/* Only 255 TX descriptors can be updated at once */
   1735	while (sent_desc > 0xff) {
   1736		val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
   1737		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
   1738		sent_desc = sent_desc - 0xff;
   1739	}
   1740
   1741	val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
   1742	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
   1743}
   1744
   1745/* Get number of TX descriptors already sent by HW */
   1746static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
   1747					struct mvneta_tx_queue *txq)
   1748{
   1749	u32 val;
   1750	int sent_desc;
   1751
   1752	val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
   1753	sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
   1754		MVNETA_TXQ_SENT_DESC_SHIFT;
   1755
   1756	return sent_desc;
   1757}
   1758
   1759/* Get number of sent descriptors and decrement counter.
   1760 *  The number of sent descriptors is returned.
   1761 */
   1762static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
   1763				     struct mvneta_tx_queue *txq)
   1764{
   1765	int sent_desc;
   1766
   1767	/* Get number of sent descriptors */
   1768	sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
   1769
   1770	/* Decrement sent descriptors counter */
   1771	if (sent_desc)
   1772		mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
   1773
   1774	return sent_desc;
   1775}
   1776
   1777/* Set TXQ descriptors fields relevant for CSUM calculation */
   1778static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
   1779				int ip_hdr_len, int l4_proto)
   1780{
   1781	u32 command;
   1782
   1783	/* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
   1784	 * G_L4_chk, L4_type; required only for checksum
   1785	 * calculation
   1786	 */
   1787	command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
   1788	command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
   1789
   1790	if (l3_proto == htons(ETH_P_IP))
   1791		command |= MVNETA_TXD_IP_CSUM;
   1792	else
   1793		command |= MVNETA_TX_L3_IP6;
   1794
   1795	if (l4_proto == IPPROTO_TCP)
   1796		command |=  MVNETA_TX_L4_CSUM_FULL;
   1797	else if (l4_proto == IPPROTO_UDP)
   1798		command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
   1799	else
   1800		command |= MVNETA_TX_L4_CSUM_NOT;
   1801
   1802	return command;
   1803}
   1804
   1805
   1806/* Display more error info */
   1807static void mvneta_rx_error(struct mvneta_port *pp,
   1808			    struct mvneta_rx_desc *rx_desc)
   1809{
   1810	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
   1811	u32 status = rx_desc->status;
   1812
   1813	/* update per-cpu counter */
   1814	u64_stats_update_begin(&stats->syncp);
   1815	stats->rx_errors++;
   1816	u64_stats_update_end(&stats->syncp);
   1817
   1818	switch (status & MVNETA_RXD_ERR_CODE_MASK) {
   1819	case MVNETA_RXD_ERR_CRC:
   1820		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
   1821			   status, rx_desc->data_size);
   1822		break;
   1823	case MVNETA_RXD_ERR_OVERRUN:
   1824		netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
   1825			   status, rx_desc->data_size);
   1826		break;
   1827	case MVNETA_RXD_ERR_LEN:
   1828		netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
   1829			   status, rx_desc->data_size);
   1830		break;
   1831	case MVNETA_RXD_ERR_RESOURCE:
   1832		netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
   1833			   status, rx_desc->data_size);
   1834		break;
   1835	}
   1836}
   1837
   1838/* Handle RX checksum offload based on the descriptor's status */
   1839static int mvneta_rx_csum(struct mvneta_port *pp, u32 status)
   1840{
   1841	if ((pp->dev->features & NETIF_F_RXCSUM) &&
   1842	    (status & MVNETA_RXD_L3_IP4) &&
   1843	    (status & MVNETA_RXD_L4_CSUM_OK))
   1844		return CHECKSUM_UNNECESSARY;
   1845
   1846	return CHECKSUM_NONE;
   1847}
   1848
   1849/* Return tx queue pointer (find last set bit) according to <cause> returned
   1850 * form tx_done reg. <cause> must not be null. The return value is always a
   1851 * valid queue for matching the first one found in <cause>.
   1852 */
   1853static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
   1854						     u32 cause)
   1855{
   1856	int queue = fls(cause) - 1;
   1857
   1858	return &pp->txqs[queue];
   1859}
   1860
   1861/* Free tx queue skbuffs */
   1862static void mvneta_txq_bufs_free(struct mvneta_port *pp,
   1863				 struct mvneta_tx_queue *txq, int num,
   1864				 struct netdev_queue *nq, bool napi)
   1865{
   1866	unsigned int bytes_compl = 0, pkts_compl = 0;
   1867	struct xdp_frame_bulk bq;
   1868	int i;
   1869
   1870	xdp_frame_bulk_init(&bq);
   1871
   1872	rcu_read_lock(); /* need for xdp_return_frame_bulk */
   1873
   1874	for (i = 0; i < num; i++) {
   1875		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
   1876		struct mvneta_tx_desc *tx_desc = txq->descs +
   1877			txq->txq_get_index;
   1878
   1879		mvneta_txq_inc_get(txq);
   1880
   1881		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
   1882		    buf->type != MVNETA_TYPE_XDP_TX)
   1883			dma_unmap_single(pp->dev->dev.parent,
   1884					 tx_desc->buf_phys_addr,
   1885					 tx_desc->data_size, DMA_TO_DEVICE);
   1886		if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
   1887			bytes_compl += buf->skb->len;
   1888			pkts_compl++;
   1889			dev_kfree_skb_any(buf->skb);
   1890		} else if ((buf->type == MVNETA_TYPE_XDP_TX ||
   1891			    buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) {
   1892			if (napi && buf->type == MVNETA_TYPE_XDP_TX)
   1893				xdp_return_frame_rx_napi(buf->xdpf);
   1894			else
   1895				xdp_return_frame_bulk(buf->xdpf, &bq);
   1896		}
   1897	}
   1898	xdp_flush_frame_bulk(&bq);
   1899
   1900	rcu_read_unlock();
   1901
   1902	netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
   1903}
   1904
   1905/* Handle end of transmission */
   1906static void mvneta_txq_done(struct mvneta_port *pp,
   1907			   struct mvneta_tx_queue *txq)
   1908{
   1909	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
   1910	int tx_done;
   1911
   1912	tx_done = mvneta_txq_sent_desc_proc(pp, txq);
   1913	if (!tx_done)
   1914		return;
   1915
   1916	mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
   1917
   1918	txq->count -= tx_done;
   1919
   1920	if (netif_tx_queue_stopped(nq)) {
   1921		if (txq->count <= txq->tx_wake_threshold)
   1922			netif_tx_wake_queue(nq);
   1923	}
   1924}
   1925
   1926/* Refill processing for SW buffer management */
   1927/* Allocate page per descriptor */
   1928static int mvneta_rx_refill(struct mvneta_port *pp,
   1929			    struct mvneta_rx_desc *rx_desc,
   1930			    struct mvneta_rx_queue *rxq,
   1931			    gfp_t gfp_mask)
   1932{
   1933	dma_addr_t phys_addr;
   1934	struct page *page;
   1935
   1936	page = page_pool_alloc_pages(rxq->page_pool,
   1937				     gfp_mask | __GFP_NOWARN);
   1938	if (!page)
   1939		return -ENOMEM;
   1940
   1941	phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
   1942	mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
   1943
   1944	return 0;
   1945}
   1946
   1947/* Handle tx checksum */
   1948static u32 mvneta_skb_tx_csum(struct sk_buff *skb)
   1949{
   1950	if (skb->ip_summed == CHECKSUM_PARTIAL) {
   1951		int ip_hdr_len = 0;
   1952		__be16 l3_proto = vlan_get_protocol(skb);
   1953		u8 l4_proto;
   1954
   1955		if (l3_proto == htons(ETH_P_IP)) {
   1956			struct iphdr *ip4h = ip_hdr(skb);
   1957
   1958			/* Calculate IPv4 checksum and L4 checksum */
   1959			ip_hdr_len = ip4h->ihl;
   1960			l4_proto = ip4h->protocol;
   1961		} else if (l3_proto == htons(ETH_P_IPV6)) {
   1962			struct ipv6hdr *ip6h = ipv6_hdr(skb);
   1963
   1964			/* Read l4_protocol from one of IPv6 extra headers */
   1965			if (skb_network_header_len(skb) > 0)
   1966				ip_hdr_len = (skb_network_header_len(skb) >> 2);
   1967			l4_proto = ip6h->nexthdr;
   1968		} else
   1969			return MVNETA_TX_L4_CSUM_NOT;
   1970
   1971		return mvneta_txq_desc_csum(skb_network_offset(skb),
   1972					    l3_proto, ip_hdr_len, l4_proto);
   1973	}
   1974
   1975	return MVNETA_TX_L4_CSUM_NOT;
   1976}
   1977
   1978/* Drop packets received by the RXQ and free buffers */
   1979static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
   1980				 struct mvneta_rx_queue *rxq)
   1981{
   1982	int rx_done, i;
   1983
   1984	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
   1985	if (rx_done)
   1986		mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
   1987
   1988	if (pp->bm_priv) {
   1989		for (i = 0; i < rx_done; i++) {
   1990			struct mvneta_rx_desc *rx_desc =
   1991						  mvneta_rxq_next_desc_get(rxq);
   1992			u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
   1993			struct mvneta_bm_pool *bm_pool;
   1994
   1995			bm_pool = &pp->bm_priv->bm_pools[pool_id];
   1996			/* Return dropped buffer to the pool */
   1997			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
   1998					      rx_desc->buf_phys_addr);
   1999		}
   2000		return;
   2001	}
   2002
   2003	for (i = 0; i < rxq->size; i++) {
   2004		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
   2005		void *data = rxq->buf_virt_addr[i];
   2006		if (!data || !(rx_desc->buf_phys_addr))
   2007			continue;
   2008
   2009		page_pool_put_full_page(rxq->page_pool, data, false);
   2010	}
   2011	if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
   2012		xdp_rxq_info_unreg(&rxq->xdp_rxq);
   2013	page_pool_destroy(rxq->page_pool);
   2014	rxq->page_pool = NULL;
   2015}
   2016
   2017static void
   2018mvneta_update_stats(struct mvneta_port *pp,
   2019		    struct mvneta_stats *ps)
   2020{
   2021	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
   2022
   2023	u64_stats_update_begin(&stats->syncp);
   2024	stats->es.ps.rx_packets += ps->rx_packets;
   2025	stats->es.ps.rx_bytes += ps->rx_bytes;
   2026	/* xdp */
   2027	stats->es.ps.xdp_redirect += ps->xdp_redirect;
   2028	stats->es.ps.xdp_pass += ps->xdp_pass;
   2029	stats->es.ps.xdp_drop += ps->xdp_drop;
   2030	u64_stats_update_end(&stats->syncp);
   2031}
   2032
   2033static inline
   2034int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
   2035{
   2036	struct mvneta_rx_desc *rx_desc;
   2037	int curr_desc = rxq->first_to_refill;
   2038	int i;
   2039
   2040	for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
   2041		rx_desc = rxq->descs + curr_desc;
   2042		if (!(rx_desc->buf_phys_addr)) {
   2043			if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
   2044				struct mvneta_pcpu_stats *stats;
   2045
   2046				pr_err("Can't refill queue %d. Done %d from %d\n",
   2047				       rxq->id, i, rxq->refill_num);
   2048
   2049				stats = this_cpu_ptr(pp->stats);
   2050				u64_stats_update_begin(&stats->syncp);
   2051				stats->es.refill_error++;
   2052				u64_stats_update_end(&stats->syncp);
   2053				break;
   2054			}
   2055		}
   2056		curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
   2057	}
   2058	rxq->refill_num -= i;
   2059	rxq->first_to_refill = curr_desc;
   2060
   2061	return i;
   2062}
   2063
   2064static void
   2065mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
   2066		    struct xdp_buff *xdp, int sync_len)
   2067{
   2068	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
   2069	int i;
   2070
   2071	if (likely(!xdp_buff_has_frags(xdp)))
   2072		goto out;
   2073
   2074	for (i = 0; i < sinfo->nr_frags; i++)
   2075		page_pool_put_full_page(rxq->page_pool,
   2076					skb_frag_page(&sinfo->frags[i]), true);
   2077
   2078out:
   2079	page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
   2080			   sync_len, true);
   2081}
   2082
   2083static int
   2084mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
   2085			struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map)
   2086{
   2087	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
   2088	struct device *dev = pp->dev->dev.parent;
   2089	struct mvneta_tx_desc *tx_desc;
   2090	int i, num_frames = 1;
   2091	struct page *page;
   2092
   2093	if (unlikely(xdp_frame_has_frags(xdpf)))
   2094		num_frames += sinfo->nr_frags;
   2095
   2096	if (txq->count + num_frames >= txq->size)
   2097		return MVNETA_XDP_DROPPED;
   2098
   2099	for (i = 0; i < num_frames; i++) {
   2100		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
   2101		skb_frag_t *frag = NULL;
   2102		int len = xdpf->len;
   2103		dma_addr_t dma_addr;
   2104
   2105		if (unlikely(i)) { /* paged area */
   2106			frag = &sinfo->frags[i - 1];
   2107			len = skb_frag_size(frag);
   2108		}
   2109
   2110		tx_desc = mvneta_txq_next_desc_get(txq);
   2111		if (dma_map) {
   2112			/* ndo_xdp_xmit */
   2113			void *data;
   2114
   2115			data = unlikely(frag) ? skb_frag_address(frag)
   2116					      : xdpf->data;
   2117			dma_addr = dma_map_single(dev, data, len,
   2118						  DMA_TO_DEVICE);
   2119			if (dma_mapping_error(dev, dma_addr)) {
   2120				mvneta_txq_desc_put(txq);
   2121				goto unmap;
   2122			}
   2123
   2124			buf->type = MVNETA_TYPE_XDP_NDO;
   2125		} else {
   2126			page = unlikely(frag) ? skb_frag_page(frag)
   2127					      : virt_to_page(xdpf->data);
   2128			dma_addr = page_pool_get_dma_addr(page);
   2129			if (unlikely(frag))
   2130				dma_addr += skb_frag_off(frag);
   2131			else
   2132				dma_addr += sizeof(*xdpf) + xdpf->headroom;
   2133			dma_sync_single_for_device(dev, dma_addr, len,
   2134						   DMA_BIDIRECTIONAL);
   2135			buf->type = MVNETA_TYPE_XDP_TX;
   2136		}
   2137		buf->xdpf = unlikely(i) ? NULL : xdpf;
   2138
   2139		tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC;
   2140		tx_desc->buf_phys_addr = dma_addr;
   2141		tx_desc->data_size = len;
   2142		*nxmit_byte += len;
   2143
   2144		mvneta_txq_inc_put(txq);
   2145	}
   2146	/*last descriptor */
   2147	tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
   2148
   2149	txq->pending += num_frames;
   2150	txq->count += num_frames;
   2151
   2152	return MVNETA_XDP_TX;
   2153
   2154unmap:
   2155	for (i--; i >= 0; i--) {
   2156		mvneta_txq_desc_put(txq);
   2157		tx_desc = txq->descs + txq->next_desc_to_proc;
   2158		dma_unmap_single(dev, tx_desc->buf_phys_addr,
   2159				 tx_desc->data_size,
   2160				 DMA_TO_DEVICE);
   2161	}
   2162
   2163	return MVNETA_XDP_DROPPED;
   2164}
   2165
   2166static int
   2167mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
   2168{
   2169	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
   2170	struct mvneta_tx_queue *txq;
   2171	struct netdev_queue *nq;
   2172	int cpu, nxmit_byte = 0;
   2173	struct xdp_frame *xdpf;
   2174	u32 ret;
   2175
   2176	xdpf = xdp_convert_buff_to_frame(xdp);
   2177	if (unlikely(!xdpf))
   2178		return MVNETA_XDP_DROPPED;
   2179
   2180	cpu = smp_processor_id();
   2181	txq = &pp->txqs[cpu % txq_number];
   2182	nq = netdev_get_tx_queue(pp->dev, txq->id);
   2183
   2184	__netif_tx_lock(nq, cpu);
   2185	ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
   2186	if (ret == MVNETA_XDP_TX) {
   2187		u64_stats_update_begin(&stats->syncp);
   2188		stats->es.ps.tx_bytes += nxmit_byte;
   2189		stats->es.ps.tx_packets++;
   2190		stats->es.ps.xdp_tx++;
   2191		u64_stats_update_end(&stats->syncp);
   2192
   2193		mvneta_txq_pend_desc_add(pp, txq, 0);
   2194	} else {
   2195		u64_stats_update_begin(&stats->syncp);
   2196		stats->es.ps.xdp_tx_err++;
   2197		u64_stats_update_end(&stats->syncp);
   2198	}
   2199	__netif_tx_unlock(nq);
   2200
   2201	return ret;
   2202}
   2203
   2204static int
   2205mvneta_xdp_xmit(struct net_device *dev, int num_frame,
   2206		struct xdp_frame **frames, u32 flags)
   2207{
   2208	struct mvneta_port *pp = netdev_priv(dev);
   2209	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
   2210	int i, nxmit_byte = 0, nxmit = 0;
   2211	int cpu = smp_processor_id();
   2212	struct mvneta_tx_queue *txq;
   2213	struct netdev_queue *nq;
   2214	u32 ret;
   2215
   2216	if (unlikely(test_bit(__MVNETA_DOWN, &pp->state)))
   2217		return -ENETDOWN;
   2218
   2219	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
   2220		return -EINVAL;
   2221
   2222	txq = &pp->txqs[cpu % txq_number];
   2223	nq = netdev_get_tx_queue(pp->dev, txq->id);
   2224
   2225	__netif_tx_lock(nq, cpu);
   2226	for (i = 0; i < num_frame; i++) {
   2227		ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
   2228					      true);
   2229		if (ret != MVNETA_XDP_TX)
   2230			break;
   2231
   2232		nxmit++;
   2233	}
   2234
   2235	if (unlikely(flags & XDP_XMIT_FLUSH))
   2236		mvneta_txq_pend_desc_add(pp, txq, 0);
   2237	__netif_tx_unlock(nq);
   2238
   2239	u64_stats_update_begin(&stats->syncp);
   2240	stats->es.ps.tx_bytes += nxmit_byte;
   2241	stats->es.ps.tx_packets += nxmit;
   2242	stats->es.ps.xdp_xmit += nxmit;
   2243	stats->es.ps.xdp_xmit_err += num_frame - nxmit;
   2244	u64_stats_update_end(&stats->syncp);
   2245
   2246	return nxmit;
   2247}
   2248
   2249static int
   2250mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
   2251	       struct bpf_prog *prog, struct xdp_buff *xdp,
   2252	       u32 frame_sz, struct mvneta_stats *stats)
   2253{
   2254	unsigned int len, data_len, sync;
   2255	u32 ret, act;
   2256
   2257	len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
   2258	data_len = xdp->data_end - xdp->data;
   2259	act = bpf_prog_run_xdp(prog, xdp);
   2260
   2261	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
   2262	sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
   2263	sync = max(sync, len);
   2264
   2265	switch (act) {
   2266	case XDP_PASS:
   2267		stats->xdp_pass++;
   2268		return MVNETA_XDP_PASS;
   2269	case XDP_REDIRECT: {
   2270		int err;
   2271
   2272		err = xdp_do_redirect(pp->dev, xdp, prog);
   2273		if (unlikely(err)) {
   2274			mvneta_xdp_put_buff(pp, rxq, xdp, sync);
   2275			ret = MVNETA_XDP_DROPPED;
   2276		} else {
   2277			ret = MVNETA_XDP_REDIR;
   2278			stats->xdp_redirect++;
   2279		}
   2280		break;
   2281	}
   2282	case XDP_TX:
   2283		ret = mvneta_xdp_xmit_back(pp, xdp);
   2284		if (ret != MVNETA_XDP_TX)
   2285			mvneta_xdp_put_buff(pp, rxq, xdp, sync);
   2286		break;
   2287	default:
   2288		bpf_warn_invalid_xdp_action(pp->dev, prog, act);
   2289		fallthrough;
   2290	case XDP_ABORTED:
   2291		trace_xdp_exception(pp->dev, prog, act);
   2292		fallthrough;
   2293	case XDP_DROP:
   2294		mvneta_xdp_put_buff(pp, rxq, xdp, sync);
   2295		ret = MVNETA_XDP_DROPPED;
   2296		stats->xdp_drop++;
   2297		break;
   2298	}
   2299
   2300	stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len;
   2301	stats->rx_packets++;
   2302
   2303	return ret;
   2304}
   2305
   2306static void
   2307mvneta_swbm_rx_frame(struct mvneta_port *pp,
   2308		     struct mvneta_rx_desc *rx_desc,
   2309		     struct mvneta_rx_queue *rxq,
   2310		     struct xdp_buff *xdp, int *size,
   2311		     struct page *page)
   2312{
   2313	unsigned char *data = page_address(page);
   2314	int data_len = -MVNETA_MH_SIZE, len;
   2315	struct net_device *dev = pp->dev;
   2316	enum dma_data_direction dma_dir;
   2317
   2318	if (*size > MVNETA_MAX_RX_BUF_SIZE) {
   2319		len = MVNETA_MAX_RX_BUF_SIZE;
   2320		data_len += len;
   2321	} else {
   2322		len = *size;
   2323		data_len += len - ETH_FCS_LEN;
   2324	}
   2325	*size = *size - len;
   2326
   2327	dma_dir = page_pool_get_dma_dir(rxq->page_pool);
   2328	dma_sync_single_for_cpu(dev->dev.parent,
   2329				rx_desc->buf_phys_addr,
   2330				len, dma_dir);
   2331
   2332	rx_desc->buf_phys_addr = 0;
   2333
   2334	/* Prefetch header */
   2335	prefetch(data);
   2336	xdp_buff_clear_frags_flag(xdp);
   2337	xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
   2338			 data_len, false);
   2339}
   2340
   2341static void
   2342mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
   2343			    struct mvneta_rx_desc *rx_desc,
   2344			    struct mvneta_rx_queue *rxq,
   2345			    struct xdp_buff *xdp, int *size,
   2346			    struct page *page)
   2347{
   2348	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
   2349	struct net_device *dev = pp->dev;
   2350	enum dma_data_direction dma_dir;
   2351	int data_len, len;
   2352
   2353	if (*size > MVNETA_MAX_RX_BUF_SIZE) {
   2354		len = MVNETA_MAX_RX_BUF_SIZE;
   2355		data_len = len;
   2356	} else {
   2357		len = *size;
   2358		data_len = len - ETH_FCS_LEN;
   2359	}
   2360	dma_dir = page_pool_get_dma_dir(rxq->page_pool);
   2361	dma_sync_single_for_cpu(dev->dev.parent,
   2362				rx_desc->buf_phys_addr,
   2363				len, dma_dir);
   2364	rx_desc->buf_phys_addr = 0;
   2365
   2366	if (!xdp_buff_has_frags(xdp))
   2367		sinfo->nr_frags = 0;
   2368
   2369	if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
   2370		skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++];
   2371
   2372		skb_frag_off_set(frag, pp->rx_offset_correction);
   2373		skb_frag_size_set(frag, data_len);
   2374		__skb_frag_set_page(frag, page);
   2375
   2376		if (!xdp_buff_has_frags(xdp)) {
   2377			sinfo->xdp_frags_size = *size;
   2378			xdp_buff_set_frags_flag(xdp);
   2379		}
   2380		if (page_is_pfmemalloc(page))
   2381			xdp_buff_set_frag_pfmemalloc(xdp);
   2382	} else {
   2383		page_pool_put_full_page(rxq->page_pool, page, true);
   2384	}
   2385	*size -= len;
   2386}
   2387
   2388static struct sk_buff *
   2389mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
   2390		      struct xdp_buff *xdp, u32 desc_status)
   2391{
   2392	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
   2393	struct sk_buff *skb;
   2394	u8 num_frags;
   2395
   2396	if (unlikely(xdp_buff_has_frags(xdp)))
   2397		num_frags = sinfo->nr_frags;
   2398
   2399	skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
   2400	if (!skb)
   2401		return ERR_PTR(-ENOMEM);
   2402
   2403	skb_mark_for_recycle(skb);
   2404
   2405	skb_reserve(skb, xdp->data - xdp->data_hard_start);
   2406	skb_put(skb, xdp->data_end - xdp->data);
   2407	skb->ip_summed = mvneta_rx_csum(pp, desc_status);
   2408
   2409	if (unlikely(xdp_buff_has_frags(xdp)))
   2410		xdp_update_skb_shared_info(skb, num_frags,
   2411					   sinfo->xdp_frags_size,
   2412					   num_frags * xdp->frame_sz,
   2413					   xdp_buff_is_frag_pfmemalloc(xdp));
   2414
   2415	return skb;
   2416}
   2417
   2418/* Main rx processing when using software buffer management */
   2419static int mvneta_rx_swbm(struct napi_struct *napi,
   2420			  struct mvneta_port *pp, int budget,
   2421			  struct mvneta_rx_queue *rxq)
   2422{
   2423	int rx_proc = 0, rx_todo, refill, size = 0;
   2424	struct net_device *dev = pp->dev;
   2425	struct mvneta_stats ps = {};
   2426	struct bpf_prog *xdp_prog;
   2427	u32 desc_status, frame_sz;
   2428	struct xdp_buff xdp_buf;
   2429
   2430	xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
   2431	xdp_buf.data_hard_start = NULL;
   2432
   2433	/* Get number of received packets */
   2434	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
   2435
   2436	xdp_prog = READ_ONCE(pp->xdp_prog);
   2437
   2438	/* Fairness NAPI loop */
   2439	while (rx_proc < budget && rx_proc < rx_todo) {
   2440		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
   2441		u32 rx_status, index;
   2442		struct sk_buff *skb;
   2443		struct page *page;
   2444
   2445		index = rx_desc - rxq->descs;
   2446		page = (struct page *)rxq->buf_virt_addr[index];
   2447
   2448		rx_status = rx_desc->status;
   2449		rx_proc++;
   2450		rxq->refill_num++;
   2451
   2452		if (rx_status & MVNETA_RXD_FIRST_DESC) {
   2453			/* Check errors only for FIRST descriptor */
   2454			if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
   2455				mvneta_rx_error(pp, rx_desc);
   2456				goto next;
   2457			}
   2458
   2459			size = rx_desc->data_size;
   2460			frame_sz = size - ETH_FCS_LEN;
   2461			desc_status = rx_status;
   2462
   2463			mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
   2464					     &size, page);
   2465		} else {
   2466			if (unlikely(!xdp_buf.data_hard_start)) {
   2467				rx_desc->buf_phys_addr = 0;
   2468				page_pool_put_full_page(rxq->page_pool, page,
   2469							true);
   2470				goto next;
   2471			}
   2472
   2473			mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
   2474						    &size, page);
   2475		} /* Middle or Last descriptor */
   2476
   2477		if (!(rx_status & MVNETA_RXD_LAST_DESC))
   2478			/* no last descriptor this time */
   2479			continue;
   2480
   2481		if (size) {
   2482			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
   2483			goto next;
   2484		}
   2485
   2486		if (xdp_prog &&
   2487		    mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
   2488			goto next;
   2489
   2490		skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
   2491		if (IS_ERR(skb)) {
   2492			struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
   2493
   2494			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
   2495
   2496			u64_stats_update_begin(&stats->syncp);
   2497			stats->es.skb_alloc_error++;
   2498			stats->rx_dropped++;
   2499			u64_stats_update_end(&stats->syncp);
   2500
   2501			goto next;
   2502		}
   2503
   2504		ps.rx_bytes += skb->len;
   2505		ps.rx_packets++;
   2506
   2507		skb->protocol = eth_type_trans(skb, dev);
   2508		napi_gro_receive(napi, skb);
   2509next:
   2510		xdp_buf.data_hard_start = NULL;
   2511	}
   2512
   2513	if (xdp_buf.data_hard_start)
   2514		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
   2515
   2516	if (ps.xdp_redirect)
   2517		xdp_do_flush_map();
   2518
   2519	if (ps.rx_packets)
   2520		mvneta_update_stats(pp, &ps);
   2521
   2522	/* return some buffers to hardware queue, one at a time is too slow */
   2523	refill = mvneta_rx_refill_queue(pp, rxq);
   2524
   2525	/* Update rxq management counters */
   2526	mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
   2527
   2528	return ps.rx_packets;
   2529}
   2530
   2531/* Main rx processing when using hardware buffer management */
   2532static int mvneta_rx_hwbm(struct napi_struct *napi,
   2533			  struct mvneta_port *pp, int rx_todo,
   2534			  struct mvneta_rx_queue *rxq)
   2535{
   2536	struct net_device *dev = pp->dev;
   2537	int rx_done;
   2538	u32 rcvd_pkts = 0;
   2539	u32 rcvd_bytes = 0;
   2540
   2541	/* Get number of received packets */
   2542	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
   2543
   2544	if (rx_todo > rx_done)
   2545		rx_todo = rx_done;
   2546
   2547	rx_done = 0;
   2548
   2549	/* Fairness NAPI loop */
   2550	while (rx_done < rx_todo) {
   2551		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
   2552		struct mvneta_bm_pool *bm_pool = NULL;
   2553		struct sk_buff *skb;
   2554		unsigned char *data;
   2555		dma_addr_t phys_addr;
   2556		u32 rx_status, frag_size;
   2557		int rx_bytes, err;
   2558		u8 pool_id;
   2559
   2560		rx_done++;
   2561		rx_status = rx_desc->status;
   2562		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
   2563		data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
   2564		phys_addr = rx_desc->buf_phys_addr;
   2565		pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
   2566		bm_pool = &pp->bm_priv->bm_pools[pool_id];
   2567
   2568		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
   2569		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
   2570err_drop_frame_ret_pool:
   2571			/* Return the buffer to the pool */
   2572			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
   2573					      rx_desc->buf_phys_addr);
   2574err_drop_frame:
   2575			mvneta_rx_error(pp, rx_desc);
   2576			/* leave the descriptor untouched */
   2577			continue;
   2578		}
   2579
   2580		if (rx_bytes <= rx_copybreak) {
   2581			/* better copy a small frame and not unmap the DMA region */
   2582			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
   2583			if (unlikely(!skb))
   2584				goto err_drop_frame_ret_pool;
   2585
   2586			dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
   2587			                              rx_desc->buf_phys_addr,
   2588			                              MVNETA_MH_SIZE + NET_SKB_PAD,
   2589			                              rx_bytes,
   2590			                              DMA_FROM_DEVICE);
   2591			skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
   2592				     rx_bytes);
   2593
   2594			skb->protocol = eth_type_trans(skb, dev);
   2595			skb->ip_summed = mvneta_rx_csum(pp, rx_status);
   2596			napi_gro_receive(napi, skb);
   2597
   2598			rcvd_pkts++;
   2599			rcvd_bytes += rx_bytes;
   2600
   2601			/* Return the buffer to the pool */
   2602			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
   2603					      rx_desc->buf_phys_addr);
   2604
   2605			/* leave the descriptor and buffer untouched */
   2606			continue;
   2607		}
   2608
   2609		/* Refill processing */
   2610		err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
   2611		if (err) {
   2612			struct mvneta_pcpu_stats *stats;
   2613
   2614			netdev_err(dev, "Linux processing - Can't refill\n");
   2615
   2616			stats = this_cpu_ptr(pp->stats);
   2617			u64_stats_update_begin(&stats->syncp);
   2618			stats->es.refill_error++;
   2619			u64_stats_update_end(&stats->syncp);
   2620
   2621			goto err_drop_frame_ret_pool;
   2622		}
   2623
   2624		frag_size = bm_pool->hwbm_pool.frag_size;
   2625
   2626		skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
   2627
   2628		/* After refill old buffer has to be unmapped regardless
   2629		 * the skb is successfully built or not.
   2630		 */
   2631		dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
   2632				 bm_pool->buf_size, DMA_FROM_DEVICE);
   2633		if (!skb)
   2634			goto err_drop_frame;
   2635
   2636		rcvd_pkts++;
   2637		rcvd_bytes += rx_bytes;
   2638
   2639		/* Linux processing */
   2640		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
   2641		skb_put(skb, rx_bytes);
   2642
   2643		skb->protocol = eth_type_trans(skb, dev);
   2644		skb->ip_summed = mvneta_rx_csum(pp, rx_status);
   2645
   2646		napi_gro_receive(napi, skb);
   2647	}
   2648
   2649	if (rcvd_pkts) {
   2650		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
   2651
   2652		u64_stats_update_begin(&stats->syncp);
   2653		stats->es.ps.rx_packets += rcvd_pkts;
   2654		stats->es.ps.rx_bytes += rcvd_bytes;
   2655		u64_stats_update_end(&stats->syncp);
   2656	}
   2657
   2658	/* Update rxq management counters */
   2659	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
   2660
   2661	return rx_done;
   2662}
   2663
   2664static inline void
   2665mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
   2666{
   2667	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
   2668	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
   2669	struct mvneta_tx_desc *tx_desc;
   2670
   2671	tx_desc = mvneta_txq_next_desc_get(txq);
   2672	tx_desc->data_size = hdr_len;
   2673	tx_desc->command = mvneta_skb_tx_csum(skb);
   2674	tx_desc->command |= MVNETA_TXD_F_DESC;
   2675	tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
   2676				 txq->txq_put_index * TSO_HEADER_SIZE;
   2677	buf->type = MVNETA_TYPE_SKB;
   2678	buf->skb = NULL;
   2679
   2680	mvneta_txq_inc_put(txq);
   2681}
   2682
   2683static inline int
   2684mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
   2685		    struct sk_buff *skb, char *data, int size,
   2686		    bool last_tcp, bool is_last)
   2687{
   2688	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
   2689	struct mvneta_tx_desc *tx_desc;
   2690
   2691	tx_desc = mvneta_txq_next_desc_get(txq);
   2692	tx_desc->data_size = size;
   2693	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
   2694						size, DMA_TO_DEVICE);
   2695	if (unlikely(dma_mapping_error(dev->dev.parent,
   2696		     tx_desc->buf_phys_addr))) {
   2697		mvneta_txq_desc_put(txq);
   2698		return -ENOMEM;
   2699	}
   2700
   2701	tx_desc->command = 0;
   2702	buf->type = MVNETA_TYPE_SKB;
   2703	buf->skb = NULL;
   2704
   2705	if (last_tcp) {
   2706		/* last descriptor in the TCP packet */
   2707		tx_desc->command = MVNETA_TXD_L_DESC;
   2708
   2709		/* last descriptor in SKB */
   2710		if (is_last)
   2711			buf->skb = skb;
   2712	}
   2713	mvneta_txq_inc_put(txq);
   2714	return 0;
   2715}
   2716
   2717static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
   2718			 struct mvneta_tx_queue *txq)
   2719{
   2720	int hdr_len, total_len, data_left;
   2721	int desc_count = 0;
   2722	struct mvneta_port *pp = netdev_priv(dev);
   2723	struct tso_t tso;
   2724	int i;
   2725
   2726	/* Count needed descriptors */
   2727	if ((txq->count + tso_count_descs(skb)) >= txq->size)
   2728		return 0;
   2729
   2730	if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
   2731		pr_info("*** Is this even possible?\n");
   2732		return 0;
   2733	}
   2734
   2735	/* Initialize the TSO handler, and prepare the first payload */
   2736	hdr_len = tso_start(skb, &tso);
   2737
   2738	total_len = skb->len - hdr_len;
   2739	while (total_len > 0) {
   2740		char *hdr;
   2741
   2742		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
   2743		total_len -= data_left;
   2744		desc_count++;
   2745
   2746		/* prepare packet headers: MAC + IP + TCP */
   2747		hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
   2748		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
   2749
   2750		mvneta_tso_put_hdr(skb, txq);
   2751
   2752		while (data_left > 0) {
   2753			int size;
   2754			desc_count++;
   2755
   2756			size = min_t(int, tso.size, data_left);
   2757
   2758			if (mvneta_tso_put_data(dev, txq, skb,
   2759						 tso.data, size,
   2760						 size == data_left,
   2761						 total_len == 0))
   2762				goto err_release;
   2763			data_left -= size;
   2764
   2765			tso_build_data(skb, &tso, size);
   2766		}
   2767	}
   2768
   2769	return desc_count;
   2770
   2771err_release:
   2772	/* Release all used data descriptors; header descriptors must not
   2773	 * be DMA-unmapped.
   2774	 */
   2775	for (i = desc_count - 1; i >= 0; i--) {
   2776		struct mvneta_tx_desc *tx_desc = txq->descs + i;
   2777		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
   2778			dma_unmap_single(pp->dev->dev.parent,
   2779					 tx_desc->buf_phys_addr,
   2780					 tx_desc->data_size,
   2781					 DMA_TO_DEVICE);
   2782		mvneta_txq_desc_put(txq);
   2783	}
   2784	return 0;
   2785}
   2786
   2787/* Handle tx fragmentation processing */
   2788static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
   2789				  struct mvneta_tx_queue *txq)
   2790{
   2791	struct mvneta_tx_desc *tx_desc;
   2792	int i, nr_frags = skb_shinfo(skb)->nr_frags;
   2793
   2794	for (i = 0; i < nr_frags; i++) {
   2795		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
   2796		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
   2797		void *addr = skb_frag_address(frag);
   2798
   2799		tx_desc = mvneta_txq_next_desc_get(txq);
   2800		tx_desc->data_size = skb_frag_size(frag);
   2801
   2802		tx_desc->buf_phys_addr =
   2803			dma_map_single(pp->dev->dev.parent, addr,
   2804				       tx_desc->data_size, DMA_TO_DEVICE);
   2805
   2806		if (dma_mapping_error(pp->dev->dev.parent,
   2807				      tx_desc->buf_phys_addr)) {
   2808			mvneta_txq_desc_put(txq);
   2809			goto error;
   2810		}
   2811
   2812		if (i == nr_frags - 1) {
   2813			/* Last descriptor */
   2814			tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
   2815			buf->skb = skb;
   2816		} else {
   2817			/* Descriptor in the middle: Not First, Not Last */
   2818			tx_desc->command = 0;
   2819			buf->skb = NULL;
   2820		}
   2821		buf->type = MVNETA_TYPE_SKB;
   2822		mvneta_txq_inc_put(txq);
   2823	}
   2824
   2825	return 0;
   2826
   2827error:
   2828	/* Release all descriptors that were used to map fragments of
   2829	 * this packet, as well as the corresponding DMA mappings
   2830	 */
   2831	for (i = i - 1; i >= 0; i--) {
   2832		tx_desc = txq->descs + i;
   2833		dma_unmap_single(pp->dev->dev.parent,
   2834				 tx_desc->buf_phys_addr,
   2835				 tx_desc->data_size,
   2836				 DMA_TO_DEVICE);
   2837		mvneta_txq_desc_put(txq);
   2838	}
   2839
   2840	return -ENOMEM;
   2841}
   2842
   2843/* Main tx processing */
   2844static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
   2845{
   2846	struct mvneta_port *pp = netdev_priv(dev);
   2847	u16 txq_id = skb_get_queue_mapping(skb);
   2848	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
   2849	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
   2850	struct mvneta_tx_desc *tx_desc;
   2851	int len = skb->len;
   2852	int frags = 0;
   2853	u32 tx_cmd;
   2854
   2855	if (!netif_running(dev))
   2856		goto out;
   2857
   2858	if (skb_is_gso(skb)) {
   2859		frags = mvneta_tx_tso(skb, dev, txq);
   2860		goto out;
   2861	}
   2862
   2863	frags = skb_shinfo(skb)->nr_frags + 1;
   2864
   2865	/* Get a descriptor for the first part of the packet */
   2866	tx_desc = mvneta_txq_next_desc_get(txq);
   2867
   2868	tx_cmd = mvneta_skb_tx_csum(skb);
   2869
   2870	tx_desc->data_size = skb_headlen(skb);
   2871
   2872	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
   2873						tx_desc->data_size,
   2874						DMA_TO_DEVICE);
   2875	if (unlikely(dma_mapping_error(dev->dev.parent,
   2876				       tx_desc->buf_phys_addr))) {
   2877		mvneta_txq_desc_put(txq);
   2878		frags = 0;
   2879		goto out;
   2880	}
   2881
   2882	buf->type = MVNETA_TYPE_SKB;
   2883	if (frags == 1) {
   2884		/* First and Last descriptor */
   2885		tx_cmd |= MVNETA_TXD_FLZ_DESC;
   2886		tx_desc->command = tx_cmd;
   2887		buf->skb = skb;
   2888		mvneta_txq_inc_put(txq);
   2889	} else {
   2890		/* First but not Last */
   2891		tx_cmd |= MVNETA_TXD_F_DESC;
   2892		buf->skb = NULL;
   2893		mvneta_txq_inc_put(txq);
   2894		tx_desc->command = tx_cmd;
   2895		/* Continue with other skb fragments */
   2896		if (mvneta_tx_frag_process(pp, skb, txq)) {
   2897			dma_unmap_single(dev->dev.parent,
   2898					 tx_desc->buf_phys_addr,
   2899					 tx_desc->data_size,
   2900					 DMA_TO_DEVICE);
   2901			mvneta_txq_desc_put(txq);
   2902			frags = 0;
   2903			goto out;
   2904		}
   2905	}
   2906
   2907out:
   2908	if (frags > 0) {
   2909		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
   2910		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
   2911
   2912		netdev_tx_sent_queue(nq, len);
   2913
   2914		txq->count += frags;
   2915		if (txq->count >= txq->tx_stop_threshold)
   2916			netif_tx_stop_queue(nq);
   2917
   2918		if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
   2919		    txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
   2920			mvneta_txq_pend_desc_add(pp, txq, frags);
   2921		else
   2922			txq->pending += frags;
   2923
   2924		u64_stats_update_begin(&stats->syncp);
   2925		stats->es.ps.tx_bytes += len;
   2926		stats->es.ps.tx_packets++;
   2927		u64_stats_update_end(&stats->syncp);
   2928	} else {
   2929		dev->stats.tx_dropped++;
   2930		dev_kfree_skb_any(skb);
   2931	}
   2932
   2933	return NETDEV_TX_OK;
   2934}
   2935
   2936
   2937/* Free tx resources, when resetting a port */
   2938static void mvneta_txq_done_force(struct mvneta_port *pp,
   2939				  struct mvneta_tx_queue *txq)
   2940
   2941{
   2942	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
   2943	int tx_done = txq->count;
   2944
   2945	mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
   2946
   2947	/* reset txq */
   2948	txq->count = 0;
   2949	txq->txq_put_index = 0;
   2950	txq->txq_get_index = 0;
   2951}
   2952
   2953/* Handle tx done - called in softirq context. The <cause_tx_done> argument
   2954 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
   2955 */
   2956static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
   2957{
   2958	struct mvneta_tx_queue *txq;
   2959	struct netdev_queue *nq;
   2960	int cpu = smp_processor_id();
   2961
   2962	while (cause_tx_done) {
   2963		txq = mvneta_tx_done_policy(pp, cause_tx_done);
   2964
   2965		nq = netdev_get_tx_queue(pp->dev, txq->id);
   2966		__netif_tx_lock(nq, cpu);
   2967
   2968		if (txq->count)
   2969			mvneta_txq_done(pp, txq);
   2970
   2971		__netif_tx_unlock(nq);
   2972		cause_tx_done &= ~((1 << txq->id));
   2973	}
   2974}
   2975
   2976/* Compute crc8 of the specified address, using a unique algorithm ,
   2977 * according to hw spec, different than generic crc8 algorithm
   2978 */
   2979static int mvneta_addr_crc(unsigned char *addr)
   2980{
   2981	int crc = 0;
   2982	int i;
   2983
   2984	for (i = 0; i < ETH_ALEN; i++) {
   2985		int j;
   2986
   2987		crc = (crc ^ addr[i]) << 8;
   2988		for (j = 7; j >= 0; j--) {
   2989			if (crc & (0x100 << j))
   2990				crc ^= 0x107 << j;
   2991		}
   2992	}
   2993
   2994	return crc;
   2995}
   2996
   2997/* This method controls the net device special MAC multicast support.
   2998 * The Special Multicast Table for MAC addresses supports MAC of the form
   2999 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
   3000 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
   3001 * Table entries in the DA-Filter table. This method set the Special
   3002 * Multicast Table appropriate entry.
   3003 */
   3004static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
   3005					  unsigned char last_byte,
   3006					  int queue)
   3007{
   3008	unsigned int smc_table_reg;
   3009	unsigned int tbl_offset;
   3010	unsigned int reg_offset;
   3011
   3012	/* Register offset from SMC table base    */
   3013	tbl_offset = (last_byte / 4);
   3014	/* Entry offset within the above reg */
   3015	reg_offset = last_byte % 4;
   3016
   3017	smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
   3018					+ tbl_offset * 4));
   3019
   3020	if (queue == -1)
   3021		smc_table_reg &= ~(0xff << (8 * reg_offset));
   3022	else {
   3023		smc_table_reg &= ~(0xff << (8 * reg_offset));
   3024		smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
   3025	}
   3026
   3027	mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
   3028		    smc_table_reg);
   3029}
   3030
   3031/* This method controls the network device Other MAC multicast support.
   3032 * The Other Multicast Table is used for multicast of another type.
   3033 * A CRC-8 is used as an index to the Other Multicast Table entries
   3034 * in the DA-Filter table.
   3035 * The method gets the CRC-8 value from the calling routine and
   3036 * sets the Other Multicast Table appropriate entry according to the
   3037 * specified CRC-8 .
   3038 */
   3039static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
   3040					unsigned char crc8,
   3041					int queue)
   3042{
   3043	unsigned int omc_table_reg;
   3044	unsigned int tbl_offset;
   3045	unsigned int reg_offset;
   3046
   3047	tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
   3048	reg_offset = crc8 % 4;	     /* Entry offset within the above reg   */
   3049
   3050	omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
   3051
   3052	if (queue == -1) {
   3053		/* Clear accepts frame bit at specified Other DA table entry */
   3054		omc_table_reg &= ~(0xff << (8 * reg_offset));
   3055	} else {
   3056		omc_table_reg &= ~(0xff << (8 * reg_offset));
   3057		omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
   3058	}
   3059
   3060	mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
   3061}
   3062
   3063/* The network device supports multicast using two tables:
   3064 *    1) Special Multicast Table for MAC addresses of the form
   3065 *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
   3066 *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
   3067 *       Table entries in the DA-Filter table.
   3068 *    2) Other Multicast Table for multicast of another type. A CRC-8 value
   3069 *       is used as an index to the Other Multicast Table entries in the
   3070 *       DA-Filter table.
   3071 */
   3072static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
   3073				 int queue)
   3074{
   3075	unsigned char crc_result = 0;
   3076
   3077	if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
   3078		mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
   3079		return 0;
   3080	}
   3081
   3082	crc_result = mvneta_addr_crc(p_addr);
   3083	if (queue == -1) {
   3084		if (pp->mcast_count[crc_result] == 0) {
   3085			netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
   3086				    crc_result);
   3087			return -EINVAL;
   3088		}
   3089
   3090		pp->mcast_count[crc_result]--;
   3091		if (pp->mcast_count[crc_result] != 0) {
   3092			netdev_info(pp->dev,
   3093				    "After delete there are %d valid Mcast for crc8=0x%02x\n",
   3094				    pp->mcast_count[crc_result], crc_result);
   3095			return -EINVAL;
   3096		}
   3097	} else
   3098		pp->mcast_count[crc_result]++;
   3099
   3100	mvneta_set_other_mcast_addr(pp, crc_result, queue);
   3101
   3102	return 0;
   3103}
   3104
   3105/* Configure Fitering mode of Ethernet port */
   3106static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
   3107					  int is_promisc)
   3108{
   3109	u32 port_cfg_reg, val;
   3110
   3111	port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
   3112
   3113	val = mvreg_read(pp, MVNETA_TYPE_PRIO);
   3114
   3115	/* Set / Clear UPM bit in port configuration register */
   3116	if (is_promisc) {
   3117		/* Accept all Unicast addresses */
   3118		port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
   3119		val |= MVNETA_FORCE_UNI;
   3120		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
   3121		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
   3122	} else {
   3123		/* Reject all Unicast addresses */
   3124		port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
   3125		val &= ~MVNETA_FORCE_UNI;
   3126	}
   3127
   3128	mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
   3129	mvreg_write(pp, MVNETA_TYPE_PRIO, val);
   3130}
   3131
   3132/* register unicast and multicast addresses */
   3133static void mvneta_set_rx_mode(struct net_device *dev)
   3134{
   3135	struct mvneta_port *pp = netdev_priv(dev);
   3136	struct netdev_hw_addr *ha;
   3137
   3138	if (dev->flags & IFF_PROMISC) {
   3139		/* Accept all: Multicast + Unicast */
   3140		mvneta_rx_unicast_promisc_set(pp, 1);
   3141		mvneta_set_ucast_table(pp, pp->rxq_def);
   3142		mvneta_set_special_mcast_table(pp, pp->rxq_def);
   3143		mvneta_set_other_mcast_table(pp, pp->rxq_def);
   3144	} else {
   3145		/* Accept single Unicast */
   3146		mvneta_rx_unicast_promisc_set(pp, 0);
   3147		mvneta_set_ucast_table(pp, -1);
   3148		mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
   3149
   3150		if (dev->flags & IFF_ALLMULTI) {
   3151			/* Accept all multicast */
   3152			mvneta_set_special_mcast_table(pp, pp->rxq_def);
   3153			mvneta_set_other_mcast_table(pp, pp->rxq_def);
   3154		} else {
   3155			/* Accept only initialized multicast */
   3156			mvneta_set_special_mcast_table(pp, -1);
   3157			mvneta_set_other_mcast_table(pp, -1);
   3158
   3159			if (!netdev_mc_empty(dev)) {
   3160				netdev_for_each_mc_addr(ha, dev) {
   3161					mvneta_mcast_addr_set(pp, ha->addr,
   3162							      pp->rxq_def);
   3163				}
   3164			}
   3165		}
   3166	}
   3167}
   3168
   3169/* Interrupt handling - the callback for request_irq() */
   3170static irqreturn_t mvneta_isr(int irq, void *dev_id)
   3171{
   3172	struct mvneta_port *pp = (struct mvneta_port *)dev_id;
   3173
   3174	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
   3175	napi_schedule(&pp->napi);
   3176
   3177	return IRQ_HANDLED;
   3178}
   3179
   3180/* Interrupt handling - the callback for request_percpu_irq() */
   3181static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
   3182{
   3183	struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
   3184
   3185	disable_percpu_irq(port->pp->dev->irq);
   3186	napi_schedule(&port->napi);
   3187
   3188	return IRQ_HANDLED;
   3189}
   3190
   3191static void mvneta_link_change(struct mvneta_port *pp)
   3192{
   3193	u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
   3194
   3195	phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
   3196}
   3197
   3198/* NAPI handler
   3199 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
   3200 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
   3201 * Bits 8 -15 of the cause Rx Tx register indicate that are received
   3202 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
   3203 * Each CPU has its own causeRxTx register
   3204 */
   3205static int mvneta_poll(struct napi_struct *napi, int budget)
   3206{
   3207	int rx_done = 0;
   3208	u32 cause_rx_tx;
   3209	int rx_queue;
   3210	struct mvneta_port *pp = netdev_priv(napi->dev);
   3211	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
   3212
   3213	if (!netif_running(pp->dev)) {
   3214		napi_complete(napi);
   3215		return rx_done;
   3216	}
   3217
   3218	/* Read cause register */
   3219	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
   3220	if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
   3221		u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
   3222
   3223		mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
   3224
   3225		if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
   3226				  MVNETA_CAUSE_LINK_CHANGE))
   3227			mvneta_link_change(pp);
   3228	}
   3229
   3230	/* Release Tx descriptors */
   3231	if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
   3232		mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
   3233		cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
   3234	}
   3235
   3236	/* For the case where the last mvneta_poll did not process all
   3237	 * RX packets
   3238	 */
   3239	cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
   3240		port->cause_rx_tx;
   3241
   3242	rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
   3243	if (rx_queue) {
   3244		rx_queue = rx_queue - 1;
   3245		if (pp->bm_priv)
   3246			rx_done = mvneta_rx_hwbm(napi, pp, budget,
   3247						 &pp->rxqs[rx_queue]);
   3248		else
   3249			rx_done = mvneta_rx_swbm(napi, pp, budget,
   3250						 &pp->rxqs[rx_queue]);
   3251	}
   3252
   3253	if (rx_done < budget) {
   3254		cause_rx_tx = 0;
   3255		napi_complete_done(napi, rx_done);
   3256
   3257		if (pp->neta_armada3700) {
   3258			unsigned long flags;
   3259
   3260			local_irq_save(flags);
   3261			mvreg_write(pp, MVNETA_INTR_NEW_MASK,
   3262				    MVNETA_RX_INTR_MASK(rxq_number) |
   3263				    MVNETA_TX_INTR_MASK(txq_number) |
   3264				    MVNETA_MISCINTR_INTR_MASK);
   3265			local_irq_restore(flags);
   3266		} else {
   3267			enable_percpu_irq(pp->dev->irq, 0);
   3268		}
   3269	}
   3270
   3271	if (pp->neta_armada3700)
   3272		pp->cause_rx_tx = cause_rx_tx;
   3273	else
   3274		port->cause_rx_tx = cause_rx_tx;
   3275
   3276	return rx_done;
   3277}
   3278
   3279static int mvneta_create_page_pool(struct mvneta_port *pp,
   3280				   struct mvneta_rx_queue *rxq, int size)
   3281{
   3282	struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
   3283	struct page_pool_params pp_params = {
   3284		.order = 0,
   3285		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
   3286		.pool_size = size,
   3287		.nid = NUMA_NO_NODE,
   3288		.dev = pp->dev->dev.parent,
   3289		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
   3290		.offset = pp->rx_offset_correction,
   3291		.max_len = MVNETA_MAX_RX_BUF_SIZE,
   3292	};
   3293	int err;
   3294
   3295	rxq->page_pool = page_pool_create(&pp_params);
   3296	if (IS_ERR(rxq->page_pool)) {
   3297		err = PTR_ERR(rxq->page_pool);
   3298		rxq->page_pool = NULL;
   3299		return err;
   3300	}
   3301
   3302	err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
   3303				 PAGE_SIZE);
   3304	if (err < 0)
   3305		goto err_free_pp;
   3306
   3307	err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
   3308					 rxq->page_pool);
   3309	if (err)
   3310		goto err_unregister_rxq;
   3311
   3312	return 0;
   3313
   3314err_unregister_rxq:
   3315	xdp_rxq_info_unreg(&rxq->xdp_rxq);
   3316err_free_pp:
   3317	page_pool_destroy(rxq->page_pool);
   3318	rxq->page_pool = NULL;
   3319	return err;
   3320}
   3321
   3322/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
   3323static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
   3324			   int num)
   3325{
   3326	int i, err;
   3327
   3328	err = mvneta_create_page_pool(pp, rxq, num);
   3329	if (err < 0)
   3330		return err;
   3331
   3332	for (i = 0; i < num; i++) {
   3333		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
   3334		if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
   3335				     GFP_KERNEL) != 0) {
   3336			netdev_err(pp->dev,
   3337				   "%s:rxq %d, %d of %d buffs  filled\n",
   3338				   __func__, rxq->id, i, num);
   3339			break;
   3340		}
   3341	}
   3342
   3343	/* Add this number of RX descriptors as non occupied (ready to
   3344	 * get packets)
   3345	 */
   3346	mvneta_rxq_non_occup_desc_add(pp, rxq, i);
   3347
   3348	return i;
   3349}
   3350
   3351/* Free all packets pending transmit from all TXQs and reset TX port */
   3352static void mvneta_tx_reset(struct mvneta_port *pp)
   3353{
   3354	int queue;
   3355
   3356	/* free the skb's in the tx ring */
   3357	for (queue = 0; queue < txq_number; queue++)
   3358		mvneta_txq_done_force(pp, &pp->txqs[queue]);
   3359
   3360	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
   3361	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
   3362}
   3363
   3364static void mvneta_rx_reset(struct mvneta_port *pp)
   3365{
   3366	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
   3367	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
   3368}
   3369
   3370/* Rx/Tx queue initialization/cleanup methods */
   3371
   3372static int mvneta_rxq_sw_init(struct mvneta_port *pp,
   3373			      struct mvneta_rx_queue *rxq)
   3374{
   3375	rxq->size = pp->rx_ring_size;
   3376
   3377	/* Allocate memory for RX descriptors */
   3378	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
   3379					rxq->size * MVNETA_DESC_ALIGNED_SIZE,
   3380					&rxq->descs_phys, GFP_KERNEL);
   3381	if (!rxq->descs)
   3382		return -ENOMEM;
   3383
   3384	rxq->last_desc = rxq->size - 1;
   3385
   3386	return 0;
   3387}
   3388
   3389static void mvneta_rxq_hw_init(struct mvneta_port *pp,
   3390			       struct mvneta_rx_queue *rxq)
   3391{
   3392	/* Set Rx descriptors queue starting address */
   3393	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
   3394	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
   3395
   3396	/* Set coalescing pkts and time */
   3397	mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
   3398	mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
   3399
   3400	if (!pp->bm_priv) {
   3401		/* Set Offset */
   3402		mvneta_rxq_offset_set(pp, rxq, 0);
   3403		mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
   3404					MVNETA_MAX_RX_BUF_SIZE :
   3405					MVNETA_RX_BUF_SIZE(pp->pkt_size));
   3406		mvneta_rxq_bm_disable(pp, rxq);
   3407		mvneta_rxq_fill(pp, rxq, rxq->size);
   3408	} else {
   3409		/* Set Offset */
   3410		mvneta_rxq_offset_set(pp, rxq,
   3411				      NET_SKB_PAD - pp->rx_offset_correction);
   3412
   3413		mvneta_rxq_bm_enable(pp, rxq);
   3414		/* Fill RXQ with buffers from RX pool */
   3415		mvneta_rxq_long_pool_set(pp, rxq);
   3416		mvneta_rxq_short_pool_set(pp, rxq);
   3417		mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
   3418	}
   3419}
   3420
   3421/* Create a specified RX queue */
   3422static int mvneta_rxq_init(struct mvneta_port *pp,
   3423			   struct mvneta_rx_queue *rxq)
   3424
   3425{
   3426	int ret;
   3427
   3428	ret = mvneta_rxq_sw_init(pp, rxq);
   3429	if (ret < 0)
   3430		return ret;
   3431
   3432	mvneta_rxq_hw_init(pp, rxq);
   3433
   3434	return 0;
   3435}
   3436
   3437/* Cleanup Rx queue */
   3438static void mvneta_rxq_deinit(struct mvneta_port *pp,
   3439			      struct mvneta_rx_queue *rxq)
   3440{
   3441	mvneta_rxq_drop_pkts(pp, rxq);
   3442
   3443	if (rxq->descs)
   3444		dma_free_coherent(pp->dev->dev.parent,
   3445				  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
   3446				  rxq->descs,
   3447				  rxq->descs_phys);
   3448
   3449	rxq->descs             = NULL;
   3450	rxq->last_desc         = 0;
   3451	rxq->next_desc_to_proc = 0;
   3452	rxq->descs_phys        = 0;
   3453	rxq->first_to_refill   = 0;
   3454	rxq->refill_num        = 0;
   3455}
   3456
   3457static int mvneta_txq_sw_init(struct mvneta_port *pp,
   3458			      struct mvneta_tx_queue *txq)
   3459{
   3460	int cpu;
   3461
   3462	txq->size = pp->tx_ring_size;
   3463
   3464	/* A queue must always have room for at least one skb.
   3465	 * Therefore, stop the queue when the free entries reaches
   3466	 * the maximum number of descriptors per skb.
   3467	 */
   3468	txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
   3469	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
   3470
   3471	/* Allocate memory for TX descriptors */
   3472	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
   3473					txq->size * MVNETA_DESC_ALIGNED_SIZE,
   3474					&txq->descs_phys, GFP_KERNEL);
   3475	if (!txq->descs)
   3476		return -ENOMEM;
   3477
   3478	txq->last_desc = txq->size - 1;
   3479
   3480	txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
   3481	if (!txq->buf)
   3482		return -ENOMEM;
   3483
   3484	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
   3485	txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
   3486					   txq->size * TSO_HEADER_SIZE,
   3487					   &txq->tso_hdrs_phys, GFP_KERNEL);
   3488	if (!txq->tso_hdrs)
   3489		return -ENOMEM;
   3490
   3491	/* Setup XPS mapping */
   3492	if (pp->neta_armada3700)
   3493		cpu = 0;
   3494	else if (txq_number > 1)
   3495		cpu = txq->id % num_present_cpus();
   3496	else
   3497		cpu = pp->rxq_def % num_present_cpus();
   3498	cpumask_set_cpu(cpu, &txq->affinity_mask);
   3499	netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
   3500
   3501	return 0;
   3502}
   3503
   3504static void mvneta_txq_hw_init(struct mvneta_port *pp,
   3505			       struct mvneta_tx_queue *txq)
   3506{
   3507	/* Set maximum bandwidth for enabled TXQs */
   3508	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
   3509	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
   3510
   3511	/* Set Tx descriptors queue starting address */
   3512	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
   3513	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
   3514
   3515	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
   3516}
   3517
   3518/* Create and initialize a tx queue */
   3519static int mvneta_txq_init(struct mvneta_port *pp,
   3520			   struct mvneta_tx_queue *txq)
   3521{
   3522	int ret;
   3523
   3524	ret = mvneta_txq_sw_init(pp, txq);
   3525	if (ret < 0)
   3526		return ret;
   3527
   3528	mvneta_txq_hw_init(pp, txq);
   3529
   3530	return 0;
   3531}
   3532
   3533/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
   3534static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
   3535				 struct mvneta_tx_queue *txq)
   3536{
   3537	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
   3538
   3539	kfree(txq->buf);
   3540
   3541	if (txq->tso_hdrs)
   3542		dma_free_coherent(pp->dev->dev.parent,
   3543				  txq->size * TSO_HEADER_SIZE,
   3544				  txq->tso_hdrs, txq->tso_hdrs_phys);
   3545	if (txq->descs)
   3546		dma_free_coherent(pp->dev->dev.parent,
   3547				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
   3548				  txq->descs, txq->descs_phys);
   3549
   3550	netdev_tx_reset_queue(nq);
   3551
   3552	txq->descs             = NULL;
   3553	txq->last_desc         = 0;
   3554	txq->next_desc_to_proc = 0;
   3555	txq->descs_phys        = 0;
   3556}
   3557
   3558static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
   3559				 struct mvneta_tx_queue *txq)
   3560{
   3561	/* Set minimum bandwidth for disabled TXQs */
   3562	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
   3563	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
   3564
   3565	/* Set Tx descriptors queue starting address and size */
   3566	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
   3567	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
   3568}
   3569
   3570static void mvneta_txq_deinit(struct mvneta_port *pp,
   3571			      struct mvneta_tx_queue *txq)
   3572{
   3573	mvneta_txq_sw_deinit(pp, txq);
   3574	mvneta_txq_hw_deinit(pp, txq);
   3575}
   3576
   3577/* Cleanup all Tx queues */
   3578static void mvneta_cleanup_txqs(struct mvneta_port *pp)
   3579{
   3580	int queue;
   3581
   3582	for (queue = 0; queue < txq_number; queue++)
   3583		mvneta_txq_deinit(pp, &pp->txqs[queue]);
   3584}
   3585
   3586/* Cleanup all Rx queues */
   3587static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
   3588{
   3589	int queue;
   3590
   3591	for (queue = 0; queue < rxq_number; queue++)
   3592		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
   3593}
   3594
   3595
   3596/* Init all Rx queues */
   3597static int mvneta_setup_rxqs(struct mvneta_port *pp)
   3598{
   3599	int queue;
   3600
   3601	for (queue = 0; queue < rxq_number; queue++) {
   3602		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
   3603
   3604		if (err) {
   3605			netdev_err(pp->dev, "%s: can't create rxq=%d\n",
   3606				   __func__, queue);
   3607			mvneta_cleanup_rxqs(pp);
   3608			return err;
   3609		}
   3610	}
   3611
   3612	return 0;
   3613}
   3614
   3615/* Init all tx queues */
   3616static int mvneta_setup_txqs(struct mvneta_port *pp)
   3617{
   3618	int queue;
   3619
   3620	for (queue = 0; queue < txq_number; queue++) {
   3621		int err = mvneta_txq_init(pp, &pp->txqs[queue]);
   3622		if (err) {
   3623			netdev_err(pp->dev, "%s: can't create txq=%d\n",
   3624				   __func__, queue);
   3625			mvneta_cleanup_txqs(pp);
   3626			return err;
   3627		}
   3628	}
   3629
   3630	return 0;
   3631}
   3632
   3633static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
   3634{
   3635	int ret;
   3636
   3637	ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
   3638	if (ret)
   3639		return ret;
   3640
   3641	return phy_power_on(pp->comphy);
   3642}
   3643
   3644static int mvneta_config_interface(struct mvneta_port *pp,
   3645				   phy_interface_t interface)
   3646{
   3647	int ret = 0;
   3648
   3649	if (pp->comphy) {
   3650		if (interface == PHY_INTERFACE_MODE_SGMII ||
   3651		    interface == PHY_INTERFACE_MODE_1000BASEX ||
   3652		    interface == PHY_INTERFACE_MODE_2500BASEX) {
   3653			ret = mvneta_comphy_init(pp, interface);
   3654		}
   3655	} else {
   3656		switch (interface) {
   3657		case PHY_INTERFACE_MODE_QSGMII:
   3658			mvreg_write(pp, MVNETA_SERDES_CFG,
   3659				    MVNETA_QSGMII_SERDES_PROTO);
   3660			break;
   3661
   3662		case PHY_INTERFACE_MODE_SGMII:
   3663		case PHY_INTERFACE_MODE_1000BASEX:
   3664			mvreg_write(pp, MVNETA_SERDES_CFG,
   3665				    MVNETA_SGMII_SERDES_PROTO);
   3666			break;
   3667
   3668		case PHY_INTERFACE_MODE_2500BASEX:
   3669			mvreg_write(pp, MVNETA_SERDES_CFG,
   3670				    MVNETA_HSGMII_SERDES_PROTO);
   3671			break;
   3672		default:
   3673			break;
   3674		}
   3675	}
   3676
   3677	pp->phy_interface = interface;
   3678
   3679	return ret;
   3680}
   3681
   3682static void mvneta_start_dev(struct mvneta_port *pp)
   3683{
   3684	int cpu;
   3685
   3686	WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
   3687
   3688	mvneta_max_rx_size_set(pp, pp->pkt_size);
   3689	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
   3690
   3691	/* start the Rx/Tx activity */
   3692	mvneta_port_enable(pp);
   3693
   3694	if (!pp->neta_armada3700) {
   3695		/* Enable polling on the port */
   3696		for_each_online_cpu(cpu) {
   3697			struct mvneta_pcpu_port *port =
   3698				per_cpu_ptr(pp->ports, cpu);
   3699
   3700			napi_enable(&port->napi);
   3701		}
   3702	} else {
   3703		napi_enable(&pp->napi);
   3704	}
   3705
   3706	/* Unmask interrupts. It has to be done from each CPU */
   3707	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
   3708
   3709	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
   3710		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
   3711		    MVNETA_CAUSE_LINK_CHANGE);
   3712
   3713	phylink_start(pp->phylink);
   3714
   3715	/* We may have called phylink_speed_down before */
   3716	phylink_speed_up(pp->phylink);
   3717
   3718	netif_tx_start_all_queues(pp->dev);
   3719
   3720	clear_bit(__MVNETA_DOWN, &pp->state);
   3721}
   3722
   3723static void mvneta_stop_dev(struct mvneta_port *pp)
   3724{
   3725	unsigned int cpu;
   3726
   3727	set_bit(__MVNETA_DOWN, &pp->state);
   3728
   3729	if (device_may_wakeup(&pp->dev->dev))
   3730		phylink_speed_down(pp->phylink, false);
   3731
   3732	phylink_stop(pp->phylink);
   3733
   3734	if (!pp->neta_armada3700) {
   3735		for_each_online_cpu(cpu) {
   3736			struct mvneta_pcpu_port *port =
   3737				per_cpu_ptr(pp->ports, cpu);
   3738
   3739			napi_disable(&port->napi);
   3740		}
   3741	} else {
   3742		napi_disable(&pp->napi);
   3743	}
   3744
   3745	netif_carrier_off(pp->dev);
   3746
   3747	mvneta_port_down(pp);
   3748	netif_tx_stop_all_queues(pp->dev);
   3749
   3750	/* Stop the port activity */
   3751	mvneta_port_disable(pp);
   3752
   3753	/* Clear all ethernet port interrupts */
   3754	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
   3755
   3756	/* Mask all ethernet port interrupts */
   3757	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
   3758
   3759	mvneta_tx_reset(pp);
   3760	mvneta_rx_reset(pp);
   3761
   3762	WARN_ON(phy_power_off(pp->comphy));
   3763}
   3764
   3765static void mvneta_percpu_enable(void *arg)
   3766{
   3767	struct mvneta_port *pp = arg;
   3768
   3769	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
   3770}
   3771
   3772static void mvneta_percpu_disable(void *arg)
   3773{
   3774	struct mvneta_port *pp = arg;
   3775
   3776	disable_percpu_irq(pp->dev->irq);
   3777}
   3778
   3779/* Change the device mtu */
   3780static int mvneta_change_mtu(struct net_device *dev, int mtu)
   3781{
   3782	struct mvneta_port *pp = netdev_priv(dev);
   3783	struct bpf_prog *prog = pp->xdp_prog;
   3784	int ret;
   3785
   3786	if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
   3787		netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
   3788			    mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
   3789		mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
   3790	}
   3791
   3792	if (prog && !prog->aux->xdp_has_frags &&
   3793	    mtu > MVNETA_MAX_RX_BUF_SIZE) {
   3794		netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n",
   3795			    mtu);
   3796
   3797		return -EINVAL;
   3798	}
   3799
   3800	dev->mtu = mtu;
   3801
   3802	if (!netif_running(dev)) {
   3803		if (pp->bm_priv)
   3804			mvneta_bm_update_mtu(pp, mtu);
   3805
   3806		netdev_update_features(dev);
   3807		return 0;
   3808	}
   3809
   3810	/* The interface is running, so we have to force a
   3811	 * reallocation of the queues
   3812	 */
   3813	mvneta_stop_dev(pp);
   3814	on_each_cpu(mvneta_percpu_disable, pp, true);
   3815
   3816	mvneta_cleanup_txqs(pp);
   3817	mvneta_cleanup_rxqs(pp);
   3818
   3819	if (pp->bm_priv)
   3820		mvneta_bm_update_mtu(pp, mtu);
   3821
   3822	pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
   3823
   3824	ret = mvneta_setup_rxqs(pp);
   3825	if (ret) {
   3826		netdev_err(dev, "unable to setup rxqs after MTU change\n");
   3827		return ret;
   3828	}
   3829
   3830	ret = mvneta_setup_txqs(pp);
   3831	if (ret) {
   3832		netdev_err(dev, "unable to setup txqs after MTU change\n");
   3833		return ret;
   3834	}
   3835
   3836	on_each_cpu(mvneta_percpu_enable, pp, true);
   3837	mvneta_start_dev(pp);
   3838
   3839	netdev_update_features(dev);
   3840
   3841	return 0;
   3842}
   3843
   3844static netdev_features_t mvneta_fix_features(struct net_device *dev,
   3845					     netdev_features_t features)
   3846{
   3847	struct mvneta_port *pp = netdev_priv(dev);
   3848
   3849	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
   3850		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
   3851		netdev_info(dev,
   3852			    "Disable IP checksum for MTU greater than %dB\n",
   3853			    pp->tx_csum_limit);
   3854	}
   3855
   3856	return features;
   3857}
   3858
   3859/* Get mac address */
   3860static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
   3861{
   3862	u32 mac_addr_l, mac_addr_h;
   3863
   3864	mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
   3865	mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
   3866	addr[0] = (mac_addr_h >> 24) & 0xFF;
   3867	addr[1] = (mac_addr_h >> 16) & 0xFF;
   3868	addr[2] = (mac_addr_h >> 8) & 0xFF;
   3869	addr[3] = mac_addr_h & 0xFF;
   3870	addr[4] = (mac_addr_l >> 8) & 0xFF;
   3871	addr[5] = mac_addr_l & 0xFF;
   3872}
   3873
   3874/* Handle setting mac address */
   3875static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
   3876{
   3877	struct mvneta_port *pp = netdev_priv(dev);
   3878	struct sockaddr *sockaddr = addr;
   3879	int ret;
   3880
   3881	ret = eth_prepare_mac_addr_change(dev, addr);
   3882	if (ret < 0)
   3883		return ret;
   3884	/* Remove previous address table entry */
   3885	mvneta_mac_addr_set(pp, dev->dev_addr, -1);
   3886
   3887	/* Set new addr in hw */
   3888	mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
   3889
   3890	eth_commit_mac_addr_change(dev, addr);
   3891	return 0;
   3892}
   3893
   3894static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
   3895{
   3896	return container_of(pcs, struct mvneta_port, phylink_pcs);
   3897}
   3898
   3899static int mvneta_pcs_validate(struct phylink_pcs *pcs,
   3900			       unsigned long *supported,
   3901			       const struct phylink_link_state *state)
   3902{
   3903	/* We only support QSGMII, SGMII, 802.3z and RGMII modes.
   3904	 * When in 802.3z mode, we must have AN enabled:
   3905	 * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
   3906	 * When <PortType> = 1 (1000BASE-X) this field must be set to 1."
   3907	 */
   3908	if (phy_interface_mode_is_8023z(state->interface) &&
   3909	    !phylink_test(state->advertising, Autoneg))
   3910		return -EINVAL;
   3911
   3912	return 0;
   3913}
   3914
   3915static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
   3916				 struct phylink_link_state *state)
   3917{
   3918	struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
   3919	u32 gmac_stat;
   3920
   3921	gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
   3922
   3923	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
   3924		state->speed =
   3925			state->interface == PHY_INTERFACE_MODE_2500BASEX ?
   3926			SPEED_2500 : SPEED_1000;
   3927	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
   3928		state->speed = SPEED_100;
   3929	else
   3930		state->speed = SPEED_10;
   3931
   3932	state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
   3933	state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
   3934	state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
   3935
   3936	if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
   3937		state->pause |= MLO_PAUSE_RX;
   3938	if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
   3939		state->pause |= MLO_PAUSE_TX;
   3940}
   3941
   3942static int mvneta_pcs_config(struct phylink_pcs *pcs,
   3943			     unsigned int mode, phy_interface_t interface,
   3944			     const unsigned long *advertising,
   3945			     bool permit_pause_to_mac)
   3946{
   3947	struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
   3948	u32 mask, val, an, old_an, changed;
   3949
   3950	mask = MVNETA_GMAC_INBAND_AN_ENABLE |
   3951	       MVNETA_GMAC_INBAND_RESTART_AN |
   3952	       MVNETA_GMAC_AN_SPEED_EN |
   3953	       MVNETA_GMAC_AN_FLOW_CTRL_EN |
   3954	       MVNETA_GMAC_AN_DUPLEX_EN;
   3955
   3956	if (phylink_autoneg_inband(mode)) {
   3957		mask |= MVNETA_GMAC_CONFIG_MII_SPEED |
   3958			MVNETA_GMAC_CONFIG_GMII_SPEED |
   3959			MVNETA_GMAC_CONFIG_FULL_DUPLEX;
   3960		val = MVNETA_GMAC_INBAND_AN_ENABLE;
   3961
   3962		if (interface == PHY_INTERFACE_MODE_SGMII) {
   3963			/* SGMII mode receives the speed and duplex from PHY */
   3964			val |= MVNETA_GMAC_AN_SPEED_EN |
   3965			       MVNETA_GMAC_AN_DUPLEX_EN;
   3966		} else {
   3967			/* 802.3z mode has fixed speed and duplex */
   3968			val |= MVNETA_GMAC_CONFIG_GMII_SPEED |
   3969			       MVNETA_GMAC_CONFIG_FULL_DUPLEX;
   3970
   3971			/* The FLOW_CTRL_EN bit selects either the hardware
   3972			 * automatically or the CONFIG_FLOW_CTRL manually
   3973			 * controls the GMAC pause mode.
   3974			 */
   3975			if (permit_pause_to_mac)
   3976				val |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
   3977
   3978			/* Update the advertisement bits */
   3979			mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
   3980			if (phylink_test(advertising, Pause))
   3981				val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
   3982		}
   3983	} else {
   3984		/* Phy or fixed speed - disable in-band AN modes */
   3985		val = 0;
   3986	}
   3987
   3988	old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
   3989	an = (an & ~mask) | val;
   3990	changed = old_an ^ an;
   3991	if (changed)
   3992		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an);
   3993
   3994	/* We are only interested in the advertisement bits changing */
   3995	return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL);
   3996}
   3997
   3998static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
   3999{
   4000	struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
   4001	u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
   4002
   4003	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
   4004		    gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
   4005	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
   4006		    gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
   4007}
   4008
   4009static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
   4010	.pcs_validate = mvneta_pcs_validate,
   4011	.pcs_get_state = mvneta_pcs_get_state,
   4012	.pcs_config = mvneta_pcs_config,
   4013	.pcs_an_restart = mvneta_pcs_an_restart,
   4014};
   4015
   4016static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config,
   4017						 phy_interface_t interface)
   4018{
   4019	struct net_device *ndev = to_net_dev(config->dev);
   4020	struct mvneta_port *pp = netdev_priv(ndev);
   4021
   4022	return &pp->phylink_pcs;
   4023}
   4024
   4025static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode,
   4026			      phy_interface_t interface)
   4027{
   4028	struct net_device *ndev = to_net_dev(config->dev);
   4029	struct mvneta_port *pp = netdev_priv(ndev);
   4030	u32 val;
   4031
   4032	if (pp->phy_interface != interface ||
   4033	    phylink_autoneg_inband(mode)) {
   4034		/* Force the link down when changing the interface or if in
   4035		 * in-band mode. According to Armada 370 documentation, we
   4036		 * can only change the port mode and in-band enable when the
   4037		 * link is down.
   4038		 */
   4039		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
   4040		val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
   4041		val |= MVNETA_GMAC_FORCE_LINK_DOWN;
   4042		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
   4043	}
   4044
   4045	if (pp->phy_interface != interface)
   4046		WARN_ON(phy_power_off(pp->comphy));
   4047
   4048	/* Enable the 1ms clock */
   4049	if (phylink_autoneg_inband(mode)) {
   4050		unsigned long rate = clk_get_rate(pp->clk);
   4051
   4052		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER,
   4053			    MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000));
   4054	}
   4055
   4056	return 0;
   4057}
   4058
   4059static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
   4060			      const struct phylink_link_state *state)
   4061{
   4062	struct net_device *ndev = to_net_dev(config->dev);
   4063	struct mvneta_port *pp = netdev_priv(ndev);
   4064	u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
   4065	u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
   4066	u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
   4067
   4068	new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
   4069	new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
   4070				   MVNETA_GMAC2_PORT_RESET);
   4071	new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
   4072
   4073	/* Even though it might look weird, when we're configured in
   4074	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
   4075	 */
   4076	new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
   4077
   4078	if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
   4079	    state->interface == PHY_INTERFACE_MODE_SGMII ||
   4080	    phy_interface_mode_is_8023z(state->interface))
   4081		new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
   4082
   4083	if (!phylink_autoneg_inband(mode)) {
   4084		/* Phy or fixed speed - nothing to do, leave the
   4085		 * configured speed, duplex and flow control as-is.
   4086		 */
   4087	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
   4088		/* SGMII mode receives the state from the PHY */
   4089		new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
   4090	} else {
   4091		/* 802.3z negotiation - only 1000base-X */
   4092		new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
   4093	}
   4094
   4095	/* When at 2.5G, the link partner can send frames with shortened
   4096	 * preambles.
   4097	 */
   4098	if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
   4099		new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
   4100
   4101	if (new_ctrl0 != gmac_ctrl0)
   4102		mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
   4103	if (new_ctrl2 != gmac_ctrl2)
   4104		mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
   4105	if (new_ctrl4 != gmac_ctrl4)
   4106		mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
   4107
   4108	if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
   4109		while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
   4110			MVNETA_GMAC2_PORT_RESET) != 0)
   4111			continue;
   4112	}
   4113}
   4114
   4115static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
   4116			     phy_interface_t interface)
   4117{
   4118	struct net_device *ndev = to_net_dev(config->dev);
   4119	struct mvneta_port *pp = netdev_priv(ndev);
   4120	u32 val, clk;
   4121
   4122	/* Disable 1ms clock if not in in-band mode */
   4123	if (!phylink_autoneg_inband(mode)) {
   4124		clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
   4125		clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
   4126		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk);
   4127	}
   4128
   4129	if (pp->phy_interface != interface)
   4130		/* Enable the Serdes PHY */
   4131		WARN_ON(mvneta_config_interface(pp, interface));
   4132
   4133	/* Allow the link to come up if in in-band mode, otherwise the
   4134	 * link is forced via mac_link_down()/mac_link_up()
   4135	 */
   4136	if (phylink_autoneg_inband(mode)) {
   4137		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
   4138		val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
   4139		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
   4140	}
   4141
   4142	return 0;
   4143}
   4144
   4145static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
   4146{
   4147	u32 lpi_ctl1;
   4148
   4149	lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
   4150	if (enable)
   4151		lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
   4152	else
   4153		lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
   4154	mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
   4155}
   4156
   4157static void mvneta_mac_link_down(struct phylink_config *config,
   4158				 unsigned int mode, phy_interface_t interface)
   4159{
   4160	struct net_device *ndev = to_net_dev(config->dev);
   4161	struct mvneta_port *pp = netdev_priv(ndev);
   4162	u32 val;
   4163
   4164	mvneta_port_down(pp);
   4165
   4166	if (!phylink_autoneg_inband(mode)) {
   4167		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
   4168		val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
   4169		val |= MVNETA_GMAC_FORCE_LINK_DOWN;
   4170		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
   4171	}
   4172
   4173	pp->eee_active = false;
   4174	mvneta_set_eee(pp, false);
   4175}
   4176
   4177static void mvneta_mac_link_up(struct phylink_config *config,
   4178			       struct phy_device *phy,
   4179			       unsigned int mode, phy_interface_t interface,
   4180			       int speed, int duplex,
   4181			       bool tx_pause, bool rx_pause)
   4182{
   4183	struct net_device *ndev = to_net_dev(config->dev);
   4184	struct mvneta_port *pp = netdev_priv(ndev);
   4185	u32 val;
   4186
   4187	if (!phylink_autoneg_inband(mode)) {
   4188		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
   4189		val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN |
   4190			 MVNETA_GMAC_CONFIG_MII_SPEED |
   4191			 MVNETA_GMAC_CONFIG_GMII_SPEED |
   4192			 MVNETA_GMAC_CONFIG_FLOW_CTRL |
   4193			 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
   4194		val |= MVNETA_GMAC_FORCE_LINK_PASS;
   4195
   4196		if (speed == SPEED_1000 || speed == SPEED_2500)
   4197			val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
   4198		else if (speed == SPEED_100)
   4199			val |= MVNETA_GMAC_CONFIG_MII_SPEED;
   4200
   4201		if (duplex == DUPLEX_FULL)
   4202			val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
   4203
   4204		if (tx_pause || rx_pause)
   4205			val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
   4206
   4207		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
   4208	} else {
   4209		/* When inband doesn't cover flow control or flow control is
   4210		 * disabled, we need to manually configure it. This bit will
   4211		 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset.
   4212		 */
   4213		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
   4214		val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL;
   4215
   4216		if (tx_pause || rx_pause)
   4217			val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
   4218
   4219		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
   4220	}
   4221
   4222	mvneta_port_up(pp);
   4223
   4224	if (phy && pp->eee_enabled) {
   4225		pp->eee_active = phy_init_eee(phy, false) >= 0;
   4226		mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
   4227	}
   4228}
   4229
   4230static const struct phylink_mac_ops mvneta_phylink_ops = {
   4231	.validate = phylink_generic_validate,
   4232	.mac_select_pcs = mvneta_mac_select_pcs,
   4233	.mac_prepare = mvneta_mac_prepare,
   4234	.mac_config = mvneta_mac_config,
   4235	.mac_finish = mvneta_mac_finish,
   4236	.mac_link_down = mvneta_mac_link_down,
   4237	.mac_link_up = mvneta_mac_link_up,
   4238};
   4239
   4240static int mvneta_mdio_probe(struct mvneta_port *pp)
   4241{
   4242	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
   4243	int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
   4244
   4245	if (err)
   4246		netdev_err(pp->dev, "could not attach PHY: %d\n", err);
   4247
   4248	phylink_ethtool_get_wol(pp->phylink, &wol);
   4249	device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
   4250
   4251	/* PHY WoL may be enabled but device wakeup disabled */
   4252	if (wol.supported)
   4253		device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
   4254
   4255	return err;
   4256}
   4257
   4258static void mvneta_mdio_remove(struct mvneta_port *pp)
   4259{
   4260	phylink_disconnect_phy(pp->phylink);
   4261}
   4262
   4263/* Electing a CPU must be done in an atomic way: it should be done
   4264 * after or before the removal/insertion of a CPU and this function is
   4265 * not reentrant.
   4266 */
   4267static void mvneta_percpu_elect(struct mvneta_port *pp)
   4268{
   4269	int elected_cpu = 0, max_cpu, cpu, i = 0;
   4270
   4271	/* Use the cpu associated to the rxq when it is online, in all
   4272	 * the other cases, use the cpu 0 which can't be offline.
   4273	 */
   4274	if (cpu_online(pp->rxq_def))
   4275		elected_cpu = pp->rxq_def;
   4276
   4277	max_cpu = num_present_cpus();
   4278
   4279	for_each_online_cpu(cpu) {
   4280		int rxq_map = 0, txq_map = 0;
   4281		int rxq;
   4282
   4283		for (rxq = 0; rxq < rxq_number; rxq++)
   4284			if ((rxq % max_cpu) == cpu)
   4285				rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
   4286
   4287		if (cpu == elected_cpu)
   4288			/* Map the default receive queue to the elected CPU */
   4289			rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
   4290
   4291		/* We update the TX queue map only if we have one
   4292		 * queue. In this case we associate the TX queue to
   4293		 * the CPU bound to the default RX queue
   4294		 */
   4295		if (txq_number == 1)
   4296			txq_map = (cpu == elected_cpu) ?
   4297				MVNETA_CPU_TXQ_ACCESS(1) : 0;
   4298		else
   4299			txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
   4300				MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
   4301
   4302		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
   4303
   4304		/* Update the interrupt mask on each CPU according the
   4305		 * new mapping
   4306		 */
   4307		smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
   4308					 pp, true);
   4309		i++;
   4310
   4311	}
   4312};
   4313
   4314static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
   4315{
   4316	int other_cpu;
   4317	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
   4318						  node_online);
   4319	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
   4320
   4321	/* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
   4322	 * are routed to CPU 0, so we don't need all the cpu-hotplug support
   4323	 */
   4324	if (pp->neta_armada3700)
   4325		return 0;
   4326
   4327	spin_lock(&pp->lock);
   4328	/*
   4329	 * Configuring the driver for a new CPU while the driver is
   4330	 * stopping is racy, so just avoid it.
   4331	 */
   4332	if (pp->is_stopped) {
   4333		spin_unlock(&pp->lock);
   4334		return 0;
   4335	}
   4336	netif_tx_stop_all_queues(pp->dev);
   4337
   4338	/*
   4339	 * We have to synchronise on tha napi of each CPU except the one
   4340	 * just being woken up
   4341	 */
   4342	for_each_online_cpu(other_cpu) {
   4343		if (other_cpu != cpu) {
   4344			struct mvneta_pcpu_port *other_port =
   4345				per_cpu_ptr(pp->ports, other_cpu);
   4346
   4347			napi_synchronize(&other_port->napi);
   4348		}
   4349	}
   4350
   4351	/* Mask all ethernet port interrupts */
   4352	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
   4353	napi_enable(&port->napi);
   4354
   4355	/*
   4356	 * Enable per-CPU interrupts on the CPU that is
   4357	 * brought up.
   4358	 */
   4359	mvneta_percpu_enable(pp);
   4360
   4361	/*
   4362	 * Enable per-CPU interrupt on the one CPU we care
   4363	 * about.
   4364	 */
   4365	mvneta_percpu_elect(pp);
   4366
   4367	/* Unmask all ethernet port interrupts */
   4368	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
   4369	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
   4370		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
   4371		    MVNETA_CAUSE_LINK_CHANGE);
   4372	netif_tx_start_all_queues(pp->dev);
   4373	spin_unlock(&pp->lock);
   4374	return 0;
   4375}
   4376
   4377static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
   4378{
   4379	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
   4380						  node_online);
   4381	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
   4382
   4383	/*
   4384	 * Thanks to this lock we are sure that any pending cpu election is
   4385	 * done.
   4386	 */
   4387	spin_lock(&pp->lock);
   4388	/* Mask all ethernet port interrupts */
   4389	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
   4390	spin_unlock(&pp->lock);
   4391
   4392	napi_synchronize(&port->napi);
   4393	napi_disable(&port->napi);
   4394	/* Disable per-CPU interrupts on the CPU that is brought down. */
   4395	mvneta_percpu_disable(pp);
   4396	return 0;
   4397}
   4398
   4399static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
   4400{
   4401	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
   4402						  node_dead);
   4403
   4404	/* Check if a new CPU must be elected now this on is down */
   4405	spin_lock(&pp->lock);
   4406	mvneta_percpu_elect(pp);
   4407	spin_unlock(&pp->lock);
   4408	/* Unmask all ethernet port interrupts */
   4409	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
   4410	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
   4411		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
   4412		    MVNETA_CAUSE_LINK_CHANGE);
   4413	netif_tx_start_all_queues(pp->dev);
   4414	return 0;
   4415}
   4416
   4417static int mvneta_open(struct net_device *dev)
   4418{
   4419	struct mvneta_port *pp = netdev_priv(dev);
   4420	int ret;
   4421
   4422	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
   4423
   4424	ret = mvneta_setup_rxqs(pp);
   4425	if (ret)
   4426		return ret;
   4427
   4428	ret = mvneta_setup_txqs(pp);
   4429	if (ret)
   4430		goto err_cleanup_rxqs;
   4431
   4432	/* Connect to port interrupt line */
   4433	if (pp->neta_armada3700)
   4434		ret = request_irq(pp->dev->irq, mvneta_isr, 0,
   4435				  dev->name, pp);
   4436	else
   4437		ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
   4438					 dev->name, pp->ports);
   4439	if (ret) {
   4440		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
   4441		goto err_cleanup_txqs;
   4442	}
   4443
   4444	if (!pp->neta_armada3700) {
   4445		/* Enable per-CPU interrupt on all the CPU to handle our RX
   4446		 * queue interrupts
   4447		 */
   4448		on_each_cpu(mvneta_percpu_enable, pp, true);
   4449
   4450		pp->is_stopped = false;
   4451		/* Register a CPU notifier to handle the case where our CPU
   4452		 * might be taken offline.
   4453		 */
   4454		ret = cpuhp_state_add_instance_nocalls(online_hpstate,
   4455						       &pp->node_online);
   4456		if (ret)
   4457			goto err_free_irq;
   4458
   4459		ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
   4460						       &pp->node_dead);
   4461		if (ret)
   4462			goto err_free_online_hp;
   4463	}
   4464
   4465	ret = mvneta_mdio_probe(pp);
   4466	if (ret < 0) {
   4467		netdev_err(dev, "cannot probe MDIO bus\n");
   4468		goto err_free_dead_hp;
   4469	}
   4470
   4471	mvneta_start_dev(pp);
   4472
   4473	return 0;
   4474
   4475err_free_dead_hp:
   4476	if (!pp->neta_armada3700)
   4477		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
   4478						    &pp->node_dead);
   4479err_free_online_hp:
   4480	if (!pp->neta_armada3700)
   4481		cpuhp_state_remove_instance_nocalls(online_hpstate,
   4482						    &pp->node_online);
   4483err_free_irq:
   4484	if (pp->neta_armada3700) {
   4485		free_irq(pp->dev->irq, pp);
   4486	} else {
   4487		on_each_cpu(mvneta_percpu_disable, pp, true);
   4488		free_percpu_irq(pp->dev->irq, pp->ports);
   4489	}
   4490err_cleanup_txqs:
   4491	mvneta_cleanup_txqs(pp);
   4492err_cleanup_rxqs:
   4493	mvneta_cleanup_rxqs(pp);
   4494	return ret;
   4495}
   4496
   4497/* Stop the port, free port interrupt line */
   4498static int mvneta_stop(struct net_device *dev)
   4499{
   4500	struct mvneta_port *pp = netdev_priv(dev);
   4501
   4502	if (!pp->neta_armada3700) {
   4503		/* Inform that we are stopping so we don't want to setup the
   4504		 * driver for new CPUs in the notifiers. The code of the
   4505		 * notifier for CPU online is protected by the same spinlock,
   4506		 * so when we get the lock, the notifer work is done.
   4507		 */
   4508		spin_lock(&pp->lock);
   4509		pp->is_stopped = true;
   4510		spin_unlock(&pp->lock);
   4511
   4512		mvneta_stop_dev(pp);
   4513		mvneta_mdio_remove(pp);
   4514
   4515		cpuhp_state_remove_instance_nocalls(online_hpstate,
   4516						    &pp->node_online);
   4517		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
   4518						    &pp->node_dead);
   4519		on_each_cpu(mvneta_percpu_disable, pp, true);
   4520		free_percpu_irq(dev->irq, pp->ports);
   4521	} else {
   4522		mvneta_stop_dev(pp);
   4523		mvneta_mdio_remove(pp);
   4524		free_irq(dev->irq, pp);
   4525	}
   4526
   4527	mvneta_cleanup_rxqs(pp);
   4528	mvneta_cleanup_txqs(pp);
   4529
   4530	return 0;
   4531}
   4532
   4533static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
   4534{
   4535	struct mvneta_port *pp = netdev_priv(dev);
   4536
   4537	return phylink_mii_ioctl(pp->phylink, ifr, cmd);
   4538}
   4539
   4540static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
   4541			    struct netlink_ext_ack *extack)
   4542{
   4543	bool need_update, running = netif_running(dev);
   4544	struct mvneta_port *pp = netdev_priv(dev);
   4545	struct bpf_prog *old_prog;
   4546
   4547	if (prog && !prog->aux->xdp_has_frags &&
   4548	    dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
   4549		NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags");
   4550		return -EOPNOTSUPP;
   4551	}
   4552
   4553	if (pp->bm_priv) {
   4554		NL_SET_ERR_MSG_MOD(extack,
   4555				   "Hardware Buffer Management not supported on XDP");
   4556		return -EOPNOTSUPP;
   4557	}
   4558
   4559	need_update = !!pp->xdp_prog != !!prog;
   4560	if (running && need_update)
   4561		mvneta_stop(dev);
   4562
   4563	old_prog = xchg(&pp->xdp_prog, prog);
   4564	if (old_prog)
   4565		bpf_prog_put(old_prog);
   4566
   4567	if (running && need_update)
   4568		return mvneta_open(dev);
   4569
   4570	return 0;
   4571}
   4572
   4573static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
   4574{
   4575	switch (xdp->command) {
   4576	case XDP_SETUP_PROG:
   4577		return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
   4578	default:
   4579		return -EINVAL;
   4580	}
   4581}
   4582
   4583/* Ethtool methods */
   4584
   4585/* Set link ksettings (phy address, speed) for ethtools */
   4586static int
   4587mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
   4588				  const struct ethtool_link_ksettings *cmd)
   4589{
   4590	struct mvneta_port *pp = netdev_priv(ndev);
   4591
   4592	return phylink_ethtool_ksettings_set(pp->phylink, cmd);
   4593}
   4594
   4595/* Get link ksettings for ethtools */
   4596static int
   4597mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
   4598				  struct ethtool_link_ksettings *cmd)
   4599{
   4600	struct mvneta_port *pp = netdev_priv(ndev);
   4601
   4602	return phylink_ethtool_ksettings_get(pp->phylink, cmd);
   4603}
   4604
   4605static int mvneta_ethtool_nway_reset(struct net_device *dev)
   4606{
   4607	struct mvneta_port *pp = netdev_priv(dev);
   4608
   4609	return phylink_ethtool_nway_reset(pp->phylink);
   4610}
   4611
   4612/* Set interrupt coalescing for ethtools */
   4613static int
   4614mvneta_ethtool_set_coalesce(struct net_device *dev,
   4615			    struct ethtool_coalesce *c,
   4616			    struct kernel_ethtool_coalesce *kernel_coal,
   4617			    struct netlink_ext_ack *extack)
   4618{
   4619	struct mvneta_port *pp = netdev_priv(dev);
   4620	int queue;
   4621
   4622	for (queue = 0; queue < rxq_number; queue++) {
   4623		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
   4624		rxq->time_coal = c->rx_coalesce_usecs;
   4625		rxq->pkts_coal = c->rx_max_coalesced_frames;
   4626		mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
   4627		mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
   4628	}
   4629
   4630	for (queue = 0; queue < txq_number; queue++) {
   4631		struct mvneta_tx_queue *txq = &pp->txqs[queue];
   4632		txq->done_pkts_coal = c->tx_max_coalesced_frames;
   4633		mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
   4634	}
   4635
   4636	return 0;
   4637}
   4638
   4639/* get coalescing for ethtools */
   4640static int
   4641mvneta_ethtool_get_coalesce(struct net_device *dev,
   4642			    struct ethtool_coalesce *c,
   4643			    struct kernel_ethtool_coalesce *kernel_coal,
   4644			    struct netlink_ext_ack *extack)
   4645{
   4646	struct mvneta_port *pp = netdev_priv(dev);
   4647
   4648	c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
   4649	c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;
   4650
   4651	c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
   4652	return 0;
   4653}
   4654
   4655
   4656static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
   4657				    struct ethtool_drvinfo *drvinfo)
   4658{
   4659	strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
   4660		sizeof(drvinfo->driver));
   4661	strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
   4662		sizeof(drvinfo->version));
   4663	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
   4664		sizeof(drvinfo->bus_info));
   4665}
   4666
   4667
   4668static void
   4669mvneta_ethtool_get_ringparam(struct net_device *netdev,
   4670			     struct ethtool_ringparam *ring,
   4671			     struct kernel_ethtool_ringparam *kernel_ring,
   4672			     struct netlink_ext_ack *extack)
   4673{
   4674	struct mvneta_port *pp = netdev_priv(netdev);
   4675
   4676	ring->rx_max_pending = MVNETA_MAX_RXD;
   4677	ring->tx_max_pending = MVNETA_MAX_TXD;
   4678	ring->rx_pending = pp->rx_ring_size;
   4679	ring->tx_pending = pp->tx_ring_size;
   4680}
   4681
   4682static int
   4683mvneta_ethtool_set_ringparam(struct net_device *dev,
   4684			     struct ethtool_ringparam *ring,
   4685			     struct kernel_ethtool_ringparam *kernel_ring,
   4686			     struct netlink_ext_ack *extack)
   4687{
   4688	struct mvneta_port *pp = netdev_priv(dev);
   4689
   4690	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
   4691		return -EINVAL;
   4692	pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
   4693		ring->rx_pending : MVNETA_MAX_RXD;
   4694
   4695	pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
   4696				   MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
   4697	if (pp->tx_ring_size != ring->tx_pending)
   4698		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
   4699			    pp->tx_ring_size, ring->tx_pending);
   4700
   4701	if (netif_running(dev)) {
   4702		mvneta_stop(dev);
   4703		if (mvneta_open(dev)) {
   4704			netdev_err(dev,
   4705				   "error on opening device after ring param change\n");
   4706			return -ENOMEM;
   4707		}
   4708	}
   4709
   4710	return 0;
   4711}
   4712
   4713static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
   4714					  struct ethtool_pauseparam *pause)
   4715{
   4716	struct mvneta_port *pp = netdev_priv(dev);
   4717
   4718	phylink_ethtool_get_pauseparam(pp->phylink, pause);
   4719}
   4720
   4721static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
   4722					 struct ethtool_pauseparam *pause)
   4723{
   4724	struct mvneta_port *pp = netdev_priv(dev);
   4725
   4726	return phylink_ethtool_set_pauseparam(pp->phylink, pause);
   4727}
   4728
   4729static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
   4730				       u8 *data)
   4731{
   4732	if (sset == ETH_SS_STATS) {
   4733		int i;
   4734
   4735		for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
   4736			memcpy(data + i * ETH_GSTRING_LEN,
   4737			       mvneta_statistics[i].name, ETH_GSTRING_LEN);
   4738
   4739		data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
   4740		page_pool_ethtool_stats_get_strings(data);
   4741	}
   4742}
   4743
   4744static void
   4745mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
   4746				 struct mvneta_ethtool_stats *es)
   4747{
   4748	unsigned int start;
   4749	int cpu;
   4750
   4751	for_each_possible_cpu(cpu) {
   4752		struct mvneta_pcpu_stats *stats;
   4753		u64 skb_alloc_error;
   4754		u64 refill_error;
   4755		u64 xdp_redirect;
   4756		u64 xdp_xmit_err;
   4757		u64 xdp_tx_err;
   4758		u64 xdp_pass;
   4759		u64 xdp_drop;
   4760		u64 xdp_xmit;
   4761		u64 xdp_tx;
   4762
   4763		stats = per_cpu_ptr(pp->stats, cpu);
   4764		do {
   4765			start = u64_stats_fetch_begin_irq(&stats->syncp);
   4766			skb_alloc_error = stats->es.skb_alloc_error;
   4767			refill_error = stats->es.refill_error;
   4768			xdp_redirect = stats->es.ps.xdp_redirect;
   4769			xdp_pass = stats->es.ps.xdp_pass;
   4770			xdp_drop = stats->es.ps.xdp_drop;
   4771			xdp_xmit = stats->es.ps.xdp_xmit;
   4772			xdp_xmit_err = stats->es.ps.xdp_xmit_err;
   4773			xdp_tx = stats->es.ps.xdp_tx;
   4774			xdp_tx_err = stats->es.ps.xdp_tx_err;
   4775		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
   4776
   4777		es->skb_alloc_error += skb_alloc_error;
   4778		es->refill_error += refill_error;
   4779		es->ps.xdp_redirect += xdp_redirect;
   4780		es->ps.xdp_pass += xdp_pass;
   4781		es->ps.xdp_drop += xdp_drop;
   4782		es->ps.xdp_xmit += xdp_xmit;
   4783		es->ps.xdp_xmit_err += xdp_xmit_err;
   4784		es->ps.xdp_tx += xdp_tx;
   4785		es->ps.xdp_tx_err += xdp_tx_err;
   4786	}
   4787}
   4788
   4789static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
   4790{
   4791	struct mvneta_ethtool_stats stats = {};
   4792	const struct mvneta_statistic *s;
   4793	void __iomem *base = pp->base;
   4794	u32 high, low;
   4795	u64 val;
   4796	int i;
   4797
   4798	mvneta_ethtool_update_pcpu_stats(pp, &stats);
   4799	for (i = 0, s = mvneta_statistics;
   4800	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
   4801	     s++, i++) {
   4802		switch (s->type) {
   4803		case T_REG_32:
   4804			val = readl_relaxed(base + s->offset);
   4805			pp->ethtool_stats[i] += val;
   4806			break;
   4807		case T_REG_64:
   4808			/* Docs say to read low 32-bit then high */
   4809			low = readl_relaxed(base + s->offset);
   4810			high = readl_relaxed(base + s->offset + 4);
   4811			val = (u64)high << 32 | low;
   4812			pp->ethtool_stats[i] += val;
   4813			break;
   4814		case T_SW:
   4815			switch (s->offset) {
   4816			case ETHTOOL_STAT_EEE_WAKEUP:
   4817				val = phylink_get_eee_err(pp->phylink);
   4818				pp->ethtool_stats[i] += val;
   4819				break;
   4820			case ETHTOOL_STAT_SKB_ALLOC_ERR:
   4821				pp->ethtool_stats[i] = stats.skb_alloc_error;
   4822				break;
   4823			case ETHTOOL_STAT_REFILL_ERR:
   4824				pp->ethtool_stats[i] = stats.refill_error;
   4825				break;
   4826			case ETHTOOL_XDP_REDIRECT:
   4827				pp->ethtool_stats[i] = stats.ps.xdp_redirect;
   4828				break;
   4829			case ETHTOOL_XDP_PASS:
   4830				pp->ethtool_stats[i] = stats.ps.xdp_pass;
   4831				break;
   4832			case ETHTOOL_XDP_DROP:
   4833				pp->ethtool_stats[i] = stats.ps.xdp_drop;
   4834				break;
   4835			case ETHTOOL_XDP_TX:
   4836				pp->ethtool_stats[i] = stats.ps.xdp_tx;
   4837				break;
   4838			case ETHTOOL_XDP_TX_ERR:
   4839				pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
   4840				break;
   4841			case ETHTOOL_XDP_XMIT:
   4842				pp->ethtool_stats[i] = stats.ps.xdp_xmit;
   4843				break;
   4844			case ETHTOOL_XDP_XMIT_ERR:
   4845				pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
   4846				break;
   4847			}
   4848			break;
   4849		}
   4850	}
   4851}
   4852
   4853static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
   4854{
   4855	struct page_pool_stats stats = {};
   4856	int i;
   4857
   4858	for (i = 0; i < rxq_number; i++)
   4859		page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
   4860
   4861	page_pool_ethtool_stats_get(data, &stats);
   4862}
   4863
   4864static void mvneta_ethtool_get_stats(struct net_device *dev,
   4865				     struct ethtool_stats *stats, u64 *data)
   4866{
   4867	struct mvneta_port *pp = netdev_priv(dev);
   4868	int i;
   4869
   4870	mvneta_ethtool_update_stats(pp);
   4871
   4872	for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
   4873		*data++ = pp->ethtool_stats[i];
   4874
   4875	mvneta_ethtool_pp_stats(pp, data);
   4876}
   4877
   4878static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
   4879{
   4880	if (sset == ETH_SS_STATS)
   4881		return ARRAY_SIZE(mvneta_statistics) +
   4882		       page_pool_ethtool_stats_get_count();
   4883
   4884	return -EOPNOTSUPP;
   4885}
   4886
   4887static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
   4888{
   4889	return MVNETA_RSS_LU_TABLE_SIZE;
   4890}
   4891
   4892static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
   4893				    struct ethtool_rxnfc *info,
   4894				    u32 *rules __always_unused)
   4895{
   4896	switch (info->cmd) {
   4897	case ETHTOOL_GRXRINGS:
   4898		info->data =  rxq_number;
   4899		return 0;
   4900	case ETHTOOL_GRXFH:
   4901		return -EOPNOTSUPP;
   4902	default:
   4903		return -EOPNOTSUPP;
   4904	}
   4905}
   4906
   4907static int  mvneta_config_rss(struct mvneta_port *pp)
   4908{
   4909	int cpu;
   4910	u32 val;
   4911
   4912	netif_tx_stop_all_queues(pp->dev);
   4913
   4914	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
   4915
   4916	if (!pp->neta_armada3700) {
   4917		/* We have to synchronise on the napi of each CPU */
   4918		for_each_online_cpu(cpu) {
   4919			struct mvneta_pcpu_port *pcpu_port =
   4920				per_cpu_ptr(pp->ports, cpu);
   4921
   4922			napi_synchronize(&pcpu_port->napi);
   4923			napi_disable(&pcpu_port->napi);
   4924		}
   4925	} else {
   4926		napi_synchronize(&pp->napi);
   4927		napi_disable(&pp->napi);
   4928	}
   4929
   4930	pp->rxq_def = pp->indir[0];
   4931
   4932	/* Update unicast mapping */
   4933	mvneta_set_rx_mode(pp->dev);
   4934
   4935	/* Update val of portCfg register accordingly with all RxQueue types */
   4936	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
   4937	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
   4938
   4939	/* Update the elected CPU matching the new rxq_def */
   4940	spin_lock(&pp->lock);
   4941	mvneta_percpu_elect(pp);
   4942	spin_unlock(&pp->lock);
   4943
   4944	if (!pp->neta_armada3700) {
   4945		/* We have to synchronise on the napi of each CPU */
   4946		for_each_online_cpu(cpu) {
   4947			struct mvneta_pcpu_port *pcpu_port =
   4948				per_cpu_ptr(pp->ports, cpu);
   4949
   4950			napi_enable(&pcpu_port->napi);
   4951		}
   4952	} else {
   4953		napi_enable(&pp->napi);
   4954	}
   4955
   4956	netif_tx_start_all_queues(pp->dev);
   4957
   4958	return 0;
   4959}
   4960
   4961static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
   4962				   const u8 *key, const u8 hfunc)
   4963{
   4964	struct mvneta_port *pp = netdev_priv(dev);
   4965
   4966	/* Current code for Armada 3700 doesn't support RSS features yet */
   4967	if (pp->neta_armada3700)
   4968		return -EOPNOTSUPP;
   4969
   4970	/* We require at least one supported parameter to be changed
   4971	 * and no change in any of the unsupported parameters
   4972	 */
   4973	if (key ||
   4974	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
   4975		return -EOPNOTSUPP;
   4976
   4977	if (!indir)
   4978		return 0;
   4979
   4980	memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
   4981
   4982	return mvneta_config_rss(pp);
   4983}
   4984
   4985static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
   4986				   u8 *hfunc)
   4987{
   4988	struct mvneta_port *pp = netdev_priv(dev);
   4989
   4990	/* Current code for Armada 3700 doesn't support RSS features yet */
   4991	if (pp->neta_armada3700)
   4992		return -EOPNOTSUPP;
   4993
   4994	if (hfunc)
   4995		*hfunc = ETH_RSS_HASH_TOP;
   4996
   4997	if (!indir)
   4998		return 0;
   4999
   5000	memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
   5001
   5002	return 0;
   5003}
   5004
   5005static void mvneta_ethtool_get_wol(struct net_device *dev,
   5006				   struct ethtool_wolinfo *wol)
   5007{
   5008	struct mvneta_port *pp = netdev_priv(dev);
   5009
   5010	phylink_ethtool_get_wol(pp->phylink, wol);
   5011}
   5012
   5013static int mvneta_ethtool_set_wol(struct net_device *dev,
   5014				  struct ethtool_wolinfo *wol)
   5015{
   5016	struct mvneta_port *pp = netdev_priv(dev);
   5017	int ret;
   5018
   5019	ret = phylink_ethtool_set_wol(pp->phylink, wol);
   5020	if (!ret)
   5021		device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
   5022
   5023	return ret;
   5024}
   5025
   5026static int mvneta_ethtool_get_eee(struct net_device *dev,
   5027				  struct ethtool_eee *eee)
   5028{
   5029	struct mvneta_port *pp = netdev_priv(dev);
   5030	u32 lpi_ctl0;
   5031
   5032	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
   5033
   5034	eee->eee_enabled = pp->eee_enabled;
   5035	eee->eee_active = pp->eee_active;
   5036	eee->tx_lpi_enabled = pp->tx_lpi_enabled;
   5037	eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
   5038
   5039	return phylink_ethtool_get_eee(pp->phylink, eee);
   5040}
   5041
   5042static int mvneta_ethtool_set_eee(struct net_device *dev,
   5043				  struct ethtool_eee *eee)
   5044{
   5045	struct mvneta_port *pp = netdev_priv(dev);
   5046	u32 lpi_ctl0;
   5047
   5048	/* The Armada 37x documents do not give limits for this other than
   5049	 * it being an 8-bit register.
   5050	 */
   5051	if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
   5052		return -EINVAL;
   5053
   5054	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
   5055	lpi_ctl0 &= ~(0xff << 8);
   5056	lpi_ctl0 |= eee->tx_lpi_timer << 8;
   5057	mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
   5058
   5059	pp->eee_enabled = eee->eee_enabled;
   5060	pp->tx_lpi_enabled = eee->tx_lpi_enabled;
   5061
   5062	mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
   5063
   5064	return phylink_ethtool_set_eee(pp->phylink, eee);
   5065}
   5066
   5067static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
   5068{
   5069	mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
   5070}
   5071
   5072static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
   5073{
   5074	u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
   5075
   5076	val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
   5077	val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
   5078
   5079	mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
   5080}
   5081
   5082static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
   5083{
   5084	unsigned long core_clk_rate;
   5085	u32 refill_cycles;
   5086	u32 val;
   5087
   5088	core_clk_rate = clk_get_rate(pp->clk);
   5089	if (!core_clk_rate)
   5090		return -EINVAL;
   5091
   5092	refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS /
   5093			(NSEC_PER_SEC / core_clk_rate);
   5094
   5095	if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK)
   5096		return -EINVAL;
   5097
   5098	/* Enable bw limit algorithm version 3 */
   5099	val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
   5100	val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
   5101	mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
   5102
   5103	/* Set the base refill rate */
   5104	mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles);
   5105
   5106	return 0;
   5107}
   5108
   5109static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
   5110{
   5111	u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
   5112
   5113	val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
   5114	mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
   5115}
   5116
   5117static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
   5118				    u64 min_rate, u64 max_rate)
   5119{
   5120	u32 refill_val, rem;
   5121	u32 val = 0;
   5122
   5123	/* Convert to from Bps to bps */
   5124	max_rate *= 8;
   5125
   5126	if (min_rate)
   5127		return -EINVAL;
   5128
   5129	refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION,
   5130				 &rem);
   5131
   5132	if (rem || !refill_val ||
   5133	    refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX)
   5134		return -EINVAL;
   5135
   5136	val = refill_val;
   5137	val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD <<
   5138		MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT);
   5139
   5140	mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
   5141
   5142	return 0;
   5143}
   5144
   5145static int mvneta_setup_mqprio(struct net_device *dev,
   5146			       struct tc_mqprio_qopt_offload *mqprio)
   5147{
   5148	struct mvneta_port *pp = netdev_priv(dev);
   5149	int rxq, txq, tc, ret;
   5150	u8 num_tc;
   5151
   5152	if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
   5153		return 0;
   5154
   5155	num_tc = mqprio->qopt.num_tc;
   5156
   5157	if (num_tc > rxq_number)
   5158		return -EINVAL;
   5159
   5160	mvneta_clear_rx_prio_map(pp);
   5161
   5162	if (!num_tc) {
   5163		mvneta_disable_per_queue_rate_limit(pp);
   5164		netdev_reset_tc(dev);
   5165		return 0;
   5166	}
   5167
   5168	netdev_set_num_tc(dev, mqprio->qopt.num_tc);
   5169
   5170	for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
   5171		netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
   5172				    mqprio->qopt.offset[tc]);
   5173
   5174		for (rxq = mqprio->qopt.offset[tc];
   5175		     rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
   5176		     rxq++) {
   5177			if (rxq >= rxq_number)
   5178				return -EINVAL;
   5179
   5180			mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
   5181		}
   5182	}
   5183
   5184	if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
   5185		mvneta_disable_per_queue_rate_limit(pp);
   5186		return 0;
   5187	}
   5188
   5189	if (mqprio->qopt.num_tc > txq_number)
   5190		return -EINVAL;
   5191
   5192	ret = mvneta_enable_per_queue_rate_limit(pp);
   5193	if (ret)
   5194		return ret;
   5195
   5196	for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
   5197		for (txq = mqprio->qopt.offset[tc];
   5198		     txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
   5199		     txq++) {
   5200			if (txq >= txq_number)
   5201				return -EINVAL;
   5202
   5203			ret = mvneta_setup_queue_rates(pp, txq,
   5204						       mqprio->min_rate[tc],
   5205						       mqprio->max_rate[tc]);
   5206			if (ret)
   5207				return ret;
   5208		}
   5209	}
   5210
   5211	return 0;
   5212}
   5213
   5214static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type,
   5215			   void *type_data)
   5216{
   5217	switch (type) {
   5218	case TC_SETUP_QDISC_MQPRIO:
   5219		return mvneta_setup_mqprio(dev, type_data);
   5220	default:
   5221		return -EOPNOTSUPP;
   5222	}
   5223}
   5224
   5225static const struct net_device_ops mvneta_netdev_ops = {
   5226	.ndo_open            = mvneta_open,
   5227	.ndo_stop            = mvneta_stop,
   5228	.ndo_start_xmit      = mvneta_tx,
   5229	.ndo_set_rx_mode     = mvneta_set_rx_mode,
   5230	.ndo_set_mac_address = mvneta_set_mac_addr,
   5231	.ndo_change_mtu      = mvneta_change_mtu,
   5232	.ndo_fix_features    = mvneta_fix_features,
   5233	.ndo_get_stats64     = mvneta_get_stats64,
   5234	.ndo_eth_ioctl        = mvneta_ioctl,
   5235	.ndo_bpf	     = mvneta_xdp,
   5236	.ndo_xdp_xmit        = mvneta_xdp_xmit,
   5237	.ndo_setup_tc	     = mvneta_setup_tc,
   5238};
   5239
   5240static const struct ethtool_ops mvneta_eth_tool_ops = {
   5241	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
   5242				     ETHTOOL_COALESCE_MAX_FRAMES,
   5243	.nway_reset	= mvneta_ethtool_nway_reset,
   5244	.get_link       = ethtool_op_get_link,
   5245	.set_coalesce   = mvneta_ethtool_set_coalesce,
   5246	.get_coalesce   = mvneta_ethtool_get_coalesce,
   5247	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
   5248	.get_ringparam  = mvneta_ethtool_get_ringparam,
   5249	.set_ringparam	= mvneta_ethtool_set_ringparam,
   5250	.get_pauseparam	= mvneta_ethtool_get_pauseparam,
   5251	.set_pauseparam	= mvneta_ethtool_set_pauseparam,
   5252	.get_strings	= mvneta_ethtool_get_strings,
   5253	.get_ethtool_stats = mvneta_ethtool_get_stats,
   5254	.get_sset_count	= mvneta_ethtool_get_sset_count,
   5255	.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
   5256	.get_rxnfc	= mvneta_ethtool_get_rxnfc,
   5257	.get_rxfh	= mvneta_ethtool_get_rxfh,
   5258	.set_rxfh	= mvneta_ethtool_set_rxfh,
   5259	.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
   5260	.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
   5261	.get_wol        = mvneta_ethtool_get_wol,
   5262	.set_wol        = mvneta_ethtool_set_wol,
   5263	.get_eee	= mvneta_ethtool_get_eee,
   5264	.set_eee	= mvneta_ethtool_set_eee,
   5265};
   5266
   5267/* Initialize hw */
   5268static int mvneta_init(struct device *dev, struct mvneta_port *pp)
   5269{
   5270	int queue;
   5271
   5272	/* Disable port */
   5273	mvneta_port_disable(pp);
   5274
   5275	/* Set port default values */
   5276	mvneta_defaults_set(pp);
   5277
   5278	pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
   5279	if (!pp->txqs)
   5280		return -ENOMEM;
   5281
   5282	/* Initialize TX descriptor rings */
   5283	for (queue = 0; queue < txq_number; queue++) {
   5284		struct mvneta_tx_queue *txq = &pp->txqs[queue];
   5285		txq->id = queue;
   5286		txq->size = pp->tx_ring_size;
   5287		txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
   5288	}
   5289
   5290	pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
   5291	if (!pp->rxqs)
   5292		return -ENOMEM;
   5293
   5294	/* Create Rx descriptor rings */
   5295	for (queue = 0; queue < rxq_number; queue++) {
   5296		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
   5297		rxq->id = queue;
   5298		rxq->size = pp->rx_ring_size;
   5299		rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
   5300		rxq->time_coal = MVNETA_RX_COAL_USEC;
   5301		rxq->buf_virt_addr
   5302			= devm_kmalloc_array(pp->dev->dev.parent,
   5303					     rxq->size,
   5304					     sizeof(*rxq->buf_virt_addr),
   5305					     GFP_KERNEL);
   5306		if (!rxq->buf_virt_addr)
   5307			return -ENOMEM;
   5308	}
   5309
   5310	return 0;
   5311}
   5312
   5313/* platform glue : initialize decoding windows */
   5314static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
   5315				     const struct mbus_dram_target_info *dram)
   5316{
   5317	u32 win_enable;
   5318	u32 win_protect;
   5319	int i;
   5320
   5321	for (i = 0; i < 6; i++) {
   5322		mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
   5323		mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
   5324
   5325		if (i < 4)
   5326			mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
   5327	}
   5328
   5329	win_enable = 0x3f;
   5330	win_protect = 0;
   5331
   5332	if (dram) {
   5333		for (i = 0; i < dram->num_cs; i++) {
   5334			const struct mbus_dram_window *cs = dram->cs + i;
   5335
   5336			mvreg_write(pp, MVNETA_WIN_BASE(i),
   5337				    (cs->base & 0xffff0000) |
   5338				    (cs->mbus_attr << 8) |
   5339				    dram->mbus_dram_target_id);
   5340
   5341			mvreg_write(pp, MVNETA_WIN_SIZE(i),
   5342				    (cs->size - 1) & 0xffff0000);
   5343
   5344			win_enable &= ~(1 << i);
   5345			win_protect |= 3 << (2 * i);
   5346		}
   5347	} else {
   5348		if (pp->neta_ac5)
   5349			mvreg_write(pp, MVNETA_WIN_BASE(0),
   5350				    (MVNETA_AC5_CNM_DDR_ATTR << 8) |
   5351				    MVNETA_AC5_CNM_DDR_TARGET);
   5352		/* For Armada3700 open default 4GB Mbus window, leaving
   5353		 * arbitration of target/attribute to a different layer
   5354		 * of configuration.
   5355		 */
   5356		mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
   5357		win_enable &= ~BIT(0);
   5358		win_protect = 3;
   5359	}
   5360
   5361	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
   5362	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
   5363}
   5364
   5365/* Power up the port */
   5366static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
   5367{
   5368	/* MAC Cause register should be cleared */
   5369	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
   5370
   5371	if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
   5372	    phy_mode != PHY_INTERFACE_MODE_SGMII &&
   5373	    !phy_interface_mode_is_8023z(phy_mode) &&
   5374	    !phy_interface_mode_is_rgmii(phy_mode))
   5375		return -EINVAL;
   5376
   5377	return 0;
   5378}
   5379
   5380/* Device initialization routine */
   5381static int mvneta_probe(struct platform_device *pdev)
   5382{
   5383	struct device_node *dn = pdev->dev.of_node;
   5384	struct device_node *bm_node;
   5385	struct mvneta_port *pp;
   5386	struct net_device *dev;
   5387	struct phylink *phylink;
   5388	struct phy *comphy;
   5389	char hw_mac_addr[ETH_ALEN];
   5390	phy_interface_t phy_mode;
   5391	const char *mac_from;
   5392	int tx_csum_limit;
   5393	int err;
   5394	int cpu;
   5395
   5396	dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
   5397				      txq_number, rxq_number);
   5398	if (!dev)
   5399		return -ENOMEM;
   5400
   5401	dev->tx_queue_len = MVNETA_MAX_TXD;
   5402	dev->watchdog_timeo = 5 * HZ;
   5403	dev->netdev_ops = &mvneta_netdev_ops;
   5404	dev->ethtool_ops = &mvneta_eth_tool_ops;
   5405
   5406	pp = netdev_priv(dev);
   5407	spin_lock_init(&pp->lock);
   5408	pp->dn = dn;
   5409
   5410	pp->rxq_def = rxq_def;
   5411	pp->indir[0] = rxq_def;
   5412
   5413	err = of_get_phy_mode(dn, &phy_mode);
   5414	if (err) {
   5415		dev_err(&pdev->dev, "incorrect phy-mode\n");
   5416		return err;
   5417	}
   5418
   5419	pp->phy_interface = phy_mode;
   5420
   5421	comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
   5422	if (comphy == ERR_PTR(-EPROBE_DEFER))
   5423		return -EPROBE_DEFER;
   5424
   5425	if (IS_ERR(comphy))
   5426		comphy = NULL;
   5427
   5428	pp->comphy = comphy;
   5429
   5430	pp->base = devm_platform_ioremap_resource(pdev, 0);
   5431	if (IS_ERR(pp->base))
   5432		return PTR_ERR(pp->base);
   5433
   5434	/* Get special SoC configurations */
   5435	if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
   5436		pp->neta_armada3700 = true;
   5437	if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) {
   5438		pp->neta_armada3700 = true;
   5439		pp->neta_ac5 = true;
   5440	}
   5441
   5442	dev->irq = irq_of_parse_and_map(dn, 0);
   5443	if (dev->irq == 0)
   5444		return -EINVAL;
   5445
   5446	pp->clk = devm_clk_get(&pdev->dev, "core");
   5447	if (IS_ERR(pp->clk))
   5448		pp->clk = devm_clk_get(&pdev->dev, NULL);
   5449	if (IS_ERR(pp->clk)) {
   5450		err = PTR_ERR(pp->clk);
   5451		goto err_free_irq;
   5452	}
   5453
   5454	clk_prepare_enable(pp->clk);
   5455
   5456	pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
   5457	if (!IS_ERR(pp->clk_bus))
   5458		clk_prepare_enable(pp->clk_bus);
   5459
   5460	pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
   5461
   5462	pp->phylink_config.dev = &dev->dev;
   5463	pp->phylink_config.type = PHYLINK_NETDEV;
   5464	pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
   5465		MAC_100 | MAC_1000FD | MAC_2500FD;
   5466
   5467	phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
   5468	__set_bit(PHY_INTERFACE_MODE_QSGMII,
   5469		  pp->phylink_config.supported_interfaces);
   5470	if (comphy) {
   5471		/* If a COMPHY is present, we can support any of the serdes
   5472		 * modes and switch between them.
   5473		 */
   5474		__set_bit(PHY_INTERFACE_MODE_SGMII,
   5475			  pp->phylink_config.supported_interfaces);
   5476		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
   5477			  pp->phylink_config.supported_interfaces);
   5478		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
   5479			  pp->phylink_config.supported_interfaces);
   5480	} else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
   5481		/* No COMPHY, with only 2500BASE-X mode supported */
   5482		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
   5483			  pp->phylink_config.supported_interfaces);
   5484	} else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
   5485		   phy_mode == PHY_INTERFACE_MODE_SGMII) {
   5486		/* No COMPHY, we can switch between 1000BASE-X and SGMII */
   5487		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
   5488			  pp->phylink_config.supported_interfaces);
   5489		__set_bit(PHY_INTERFACE_MODE_SGMII,
   5490			  pp->phylink_config.supported_interfaces);
   5491	}
   5492
   5493	phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
   5494				 phy_mode, &mvneta_phylink_ops);
   5495	if (IS_ERR(phylink)) {
   5496		err = PTR_ERR(phylink);
   5497		goto err_clk;
   5498	}
   5499
   5500	pp->phylink = phylink;
   5501
   5502	/* Alloc per-cpu port structure */
   5503	pp->ports = alloc_percpu(struct mvneta_pcpu_port);
   5504	if (!pp->ports) {
   5505		err = -ENOMEM;
   5506		goto err_free_phylink;
   5507	}
   5508
   5509	/* Alloc per-cpu stats */
   5510	pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
   5511	if (!pp->stats) {
   5512		err = -ENOMEM;
   5513		goto err_free_ports;
   5514	}
   5515
   5516	err = of_get_ethdev_address(dn, dev);
   5517	if (!err) {
   5518		mac_from = "device tree";
   5519	} else {
   5520		mvneta_get_mac_addr(pp, hw_mac_addr);
   5521		if (is_valid_ether_addr(hw_mac_addr)) {
   5522			mac_from = "hardware";
   5523			eth_hw_addr_set(dev, hw_mac_addr);
   5524		} else {
   5525			mac_from = "random";
   5526			eth_hw_addr_random(dev);
   5527		}
   5528	}
   5529
   5530	if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
   5531		if (tx_csum_limit < 0 ||
   5532		    tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
   5533			tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
   5534			dev_info(&pdev->dev,
   5535				 "Wrong TX csum limit in DT, set to %dB\n",
   5536				 MVNETA_TX_CSUM_DEF_SIZE);
   5537		}
   5538	} else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
   5539		tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
   5540	} else {
   5541		tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
   5542	}
   5543
   5544	pp->tx_csum_limit = tx_csum_limit;
   5545
   5546	pp->dram_target_info = mv_mbus_dram_info();
   5547	/* Armada3700 requires setting default configuration of Mbus
   5548	 * windows, however without using filled mbus_dram_target_info
   5549	 * structure.
   5550	 */
   5551	if (pp->dram_target_info || pp->neta_armada3700)
   5552		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
   5553
   5554	pp->tx_ring_size = MVNETA_MAX_TXD;
   5555	pp->rx_ring_size = MVNETA_MAX_RXD;
   5556
   5557	pp->dev = dev;
   5558	SET_NETDEV_DEV(dev, &pdev->dev);
   5559
   5560	pp->id = global_port_id++;
   5561
   5562	/* Obtain access to BM resources if enabled and already initialized */
   5563	bm_node = of_parse_phandle(dn, "buffer-manager", 0);
   5564	if (bm_node) {
   5565		pp->bm_priv = mvneta_bm_get(bm_node);
   5566		if (pp->bm_priv) {
   5567			err = mvneta_bm_port_init(pdev, pp);
   5568			if (err < 0) {
   5569				dev_info(&pdev->dev,
   5570					 "use SW buffer management\n");
   5571				mvneta_bm_put(pp->bm_priv);
   5572				pp->bm_priv = NULL;
   5573			}
   5574		}
   5575		/* Set RX packet offset correction for platforms, whose
   5576		 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
   5577		 * platforms and 0B for 32-bit ones.
   5578		 */
   5579		pp->rx_offset_correction = max(0,
   5580					       NET_SKB_PAD -
   5581					       MVNETA_RX_PKT_OFFSET_CORRECTION);
   5582	}
   5583	of_node_put(bm_node);
   5584
   5585	/* sw buffer management */
   5586	if (!pp->bm_priv)
   5587		pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
   5588
   5589	err = mvneta_init(&pdev->dev, pp);
   5590	if (err < 0)
   5591		goto err_netdev;
   5592
   5593	err = mvneta_port_power_up(pp, pp->phy_interface);
   5594	if (err < 0) {
   5595		dev_err(&pdev->dev, "can't power up port\n");
   5596		goto err_netdev;
   5597	}
   5598
   5599	/* Armada3700 network controller does not support per-cpu
   5600	 * operation, so only single NAPI should be initialized.
   5601	 */
   5602	if (pp->neta_armada3700) {
   5603		netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
   5604	} else {
   5605		for_each_present_cpu(cpu) {
   5606			struct mvneta_pcpu_port *port =
   5607				per_cpu_ptr(pp->ports, cpu);
   5608
   5609			netif_napi_add(dev, &port->napi, mvneta_poll,
   5610				       NAPI_POLL_WEIGHT);
   5611			port->pp = pp;
   5612		}
   5613	}
   5614
   5615	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
   5616			NETIF_F_TSO | NETIF_F_RXCSUM;
   5617	dev->hw_features |= dev->features;
   5618	dev->vlan_features |= dev->features;
   5619	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
   5620	netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
   5621
   5622	/* MTU range: 68 - 9676 */
   5623	dev->min_mtu = ETH_MIN_MTU;
   5624	/* 9676 == 9700 - 20 and rounding to 8 */
   5625	dev->max_mtu = 9676;
   5626
   5627	err = register_netdev(dev);
   5628	if (err < 0) {
   5629		dev_err(&pdev->dev, "failed to register\n");
   5630		goto err_netdev;
   5631	}
   5632
   5633	netdev_info(dev, "Using %s mac address %pM\n", mac_from,
   5634		    dev->dev_addr);
   5635
   5636	platform_set_drvdata(pdev, pp->dev);
   5637
   5638	return 0;
   5639
   5640err_netdev:
   5641	if (pp->bm_priv) {
   5642		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
   5643		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
   5644				       1 << pp->id);
   5645		mvneta_bm_put(pp->bm_priv);
   5646	}
   5647	free_percpu(pp->stats);
   5648err_free_ports:
   5649	free_percpu(pp->ports);
   5650err_free_phylink:
   5651	if (pp->phylink)
   5652		phylink_destroy(pp->phylink);
   5653err_clk:
   5654	clk_disable_unprepare(pp->clk_bus);
   5655	clk_disable_unprepare(pp->clk);
   5656err_free_irq:
   5657	irq_dispose_mapping(dev->irq);
   5658	return err;
   5659}
   5660
   5661/* Device removal routine */
   5662static int mvneta_remove(struct platform_device *pdev)
   5663{
   5664	struct net_device  *dev = platform_get_drvdata(pdev);
   5665	struct mvneta_port *pp = netdev_priv(dev);
   5666
   5667	unregister_netdev(dev);
   5668	clk_disable_unprepare(pp->clk_bus);
   5669	clk_disable_unprepare(pp->clk);
   5670	free_percpu(pp->ports);
   5671	free_percpu(pp->stats);
   5672	irq_dispose_mapping(dev->irq);
   5673	phylink_destroy(pp->phylink);
   5674
   5675	if (pp->bm_priv) {
   5676		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
   5677		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
   5678				       1 << pp->id);
   5679		mvneta_bm_put(pp->bm_priv);
   5680	}
   5681
   5682	return 0;
   5683}
   5684
   5685#ifdef CONFIG_PM_SLEEP
   5686static int mvneta_suspend(struct device *device)
   5687{
   5688	int queue;
   5689	struct net_device *dev = dev_get_drvdata(device);
   5690	struct mvneta_port *pp = netdev_priv(dev);
   5691
   5692	if (!netif_running(dev))
   5693		goto clean_exit;
   5694
   5695	if (!pp->neta_armada3700) {
   5696		spin_lock(&pp->lock);
   5697		pp->is_stopped = true;
   5698		spin_unlock(&pp->lock);
   5699
   5700		cpuhp_state_remove_instance_nocalls(online_hpstate,
   5701						    &pp->node_online);
   5702		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
   5703						    &pp->node_dead);
   5704	}
   5705
   5706	rtnl_lock();
   5707	mvneta_stop_dev(pp);
   5708	rtnl_unlock();
   5709
   5710	for (queue = 0; queue < rxq_number; queue++) {
   5711		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
   5712
   5713		mvneta_rxq_drop_pkts(pp, rxq);
   5714	}
   5715
   5716	for (queue = 0; queue < txq_number; queue++) {
   5717		struct mvneta_tx_queue *txq = &pp->txqs[queue];
   5718
   5719		mvneta_txq_hw_deinit(pp, txq);
   5720	}
   5721
   5722clean_exit:
   5723	netif_device_detach(dev);
   5724	clk_disable_unprepare(pp->clk_bus);
   5725	clk_disable_unprepare(pp->clk);
   5726
   5727	return 0;
   5728}
   5729
   5730static int mvneta_resume(struct device *device)
   5731{
   5732	struct platform_device *pdev = to_platform_device(device);
   5733	struct net_device *dev = dev_get_drvdata(device);
   5734	struct mvneta_port *pp = netdev_priv(dev);
   5735	int err, queue;
   5736
   5737	clk_prepare_enable(pp->clk);
   5738	if (!IS_ERR(pp->clk_bus))
   5739		clk_prepare_enable(pp->clk_bus);
   5740	if (pp->dram_target_info || pp->neta_armada3700)
   5741		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
   5742	if (pp->bm_priv) {
   5743		err = mvneta_bm_port_init(pdev, pp);
   5744		if (err < 0) {
   5745			dev_info(&pdev->dev, "use SW buffer management\n");
   5746			pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
   5747			pp->bm_priv = NULL;
   5748		}
   5749	}
   5750	mvneta_defaults_set(pp);
   5751	err = mvneta_port_power_up(pp, pp->phy_interface);
   5752	if (err < 0) {
   5753		dev_err(device, "can't power up port\n");
   5754		return err;
   5755	}
   5756
   5757	netif_device_attach(dev);
   5758
   5759	if (!netif_running(dev))
   5760		return 0;
   5761
   5762	for (queue = 0; queue < rxq_number; queue++) {
   5763		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
   5764
   5765		rxq->next_desc_to_proc = 0;
   5766		mvneta_rxq_hw_init(pp, rxq);
   5767	}
   5768
   5769	for (queue = 0; queue < txq_number; queue++) {
   5770		struct mvneta_tx_queue *txq = &pp->txqs[queue];
   5771
   5772		txq->next_desc_to_proc = 0;
   5773		mvneta_txq_hw_init(pp, txq);
   5774	}
   5775
   5776	if (!pp->neta_armada3700) {
   5777		spin_lock(&pp->lock);
   5778		pp->is_stopped = false;
   5779		spin_unlock(&pp->lock);
   5780		cpuhp_state_add_instance_nocalls(online_hpstate,
   5781						 &pp->node_online);
   5782		cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
   5783						 &pp->node_dead);
   5784	}
   5785
   5786	rtnl_lock();
   5787	mvneta_start_dev(pp);
   5788	rtnl_unlock();
   5789	mvneta_set_rx_mode(dev);
   5790
   5791	return 0;
   5792}
   5793#endif
   5794
   5795static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
   5796
   5797static const struct of_device_id mvneta_match[] = {
   5798	{ .compatible = "marvell,armada-370-neta" },
   5799	{ .compatible = "marvell,armada-xp-neta" },
   5800	{ .compatible = "marvell,armada-3700-neta" },
   5801	{ .compatible = "marvell,armada-ac5-neta" },
   5802	{ }
   5803};
   5804MODULE_DEVICE_TABLE(of, mvneta_match);
   5805
   5806static struct platform_driver mvneta_driver = {
   5807	.probe = mvneta_probe,
   5808	.remove = mvneta_remove,
   5809	.driver = {
   5810		.name = MVNETA_DRIVER_NAME,
   5811		.of_match_table = mvneta_match,
   5812		.pm = &mvneta_pm_ops,
   5813	},
   5814};
   5815
   5816static int __init mvneta_driver_init(void)
   5817{
   5818	int ret;
   5819
   5820	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
   5821				      mvneta_cpu_online,
   5822				      mvneta_cpu_down_prepare);
   5823	if (ret < 0)
   5824		goto out;
   5825	online_hpstate = ret;
   5826	ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
   5827				      NULL, mvneta_cpu_dead);
   5828	if (ret)
   5829		goto err_dead;
   5830
   5831	ret = platform_driver_register(&mvneta_driver);
   5832	if (ret)
   5833		goto err;
   5834	return 0;
   5835
   5836err:
   5837	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
   5838err_dead:
   5839	cpuhp_remove_multi_state(online_hpstate);
   5840out:
   5841	return ret;
   5842}
   5843module_init(mvneta_driver_init);
   5844
   5845static void __exit mvneta_driver_exit(void)
   5846{
   5847	platform_driver_unregister(&mvneta_driver);
   5848	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
   5849	cpuhp_remove_multi_state(online_hpstate);
   5850}
   5851module_exit(mvneta_driver_exit);
   5852
   5853MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
   5854MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
   5855MODULE_LICENSE("GPL");
   5856
   5857module_param(rxq_number, int, 0444);
   5858module_param(txq_number, int, 0444);
   5859
   5860module_param(rxq_def, int, 0444);
   5861module_param(rx_copybreak, int, 0644);