cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

stmmac_main.c (203986B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*******************************************************************************
      3  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
      4  ST Ethernet IPs are built around a Synopsys IP Core.
      5
      6	Copyright(C) 2007-2011 STMicroelectronics Ltd
      7
      8
      9  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
     10
     11  Documentation available at:
     12	http://www.stlinux.com
     13  Support available at:
     14	https://bugzilla.stlinux.com/
     15*******************************************************************************/
     16
     17#include <linux/clk.h>
     18#include <linux/kernel.h>
     19#include <linux/interrupt.h>
     20#include <linux/ip.h>
     21#include <linux/tcp.h>
     22#include <linux/skbuff.h>
     23#include <linux/ethtool.h>
     24#include <linux/if_ether.h>
     25#include <linux/crc32.h>
     26#include <linux/mii.h>
     27#include <linux/if.h>
     28#include <linux/if_vlan.h>
     29#include <linux/dma-mapping.h>
     30#include <linux/slab.h>
     31#include <linux/pm_runtime.h>
     32#include <linux/prefetch.h>
     33#include <linux/pinctrl/consumer.h>
     34#ifdef CONFIG_DEBUG_FS
     35#include <linux/debugfs.h>
     36#include <linux/seq_file.h>
     37#endif /* CONFIG_DEBUG_FS */
     38#include <linux/net_tstamp.h>
     39#include <linux/phylink.h>
     40#include <linux/udp.h>
     41#include <linux/bpf_trace.h>
     42#include <net/pkt_cls.h>
     43#include <net/xdp_sock_drv.h>
     44#include "stmmac_ptp.h"
     45#include "stmmac.h"
     46#include "stmmac_xdp.h"
     47#include <linux/reset.h>
     48#include <linux/of_mdio.h>
     49#include "dwmac1000.h"
     50#include "dwxgmac2.h"
     51#include "hwif.h"
     52
     53/* As long as the interface is active, we keep the timestamping counter enabled
     54 * with fine resolution and binary rollover. This avoid non-monotonic behavior
     55 * (clock jumps) when changing timestamping settings at runtime.
     56 */
     57#define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
     58				 PTP_TCR_TSCTRLSSR)
     59
     60#define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
     61#define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
     62
     63/* Module parameters */
     64#define TX_TIMEO	5000
     65static int watchdog = TX_TIMEO;
     66module_param(watchdog, int, 0644);
     67MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
     68
     69static int debug = -1;
     70module_param(debug, int, 0644);
     71MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
     72
     73static int phyaddr = -1;
     74module_param(phyaddr, int, 0444);
     75MODULE_PARM_DESC(phyaddr, "Physical device address");
     76
     77#define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
     78#define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
     79
     80/* Limit to make sure XDP TX and slow path can coexist */
     81#define STMMAC_XSK_TX_BUDGET_MAX	256
     82#define STMMAC_TX_XSK_AVAIL		16
     83#define STMMAC_RX_FILL_BATCH		16
     84
     85#define STMMAC_XDP_PASS		0
     86#define STMMAC_XDP_CONSUMED	BIT(0)
     87#define STMMAC_XDP_TX		BIT(1)
     88#define STMMAC_XDP_REDIRECT	BIT(2)
     89
     90static int flow_ctrl = FLOW_AUTO;
     91module_param(flow_ctrl, int, 0644);
     92MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
     93
     94static int pause = PAUSE_TIME;
     95module_param(pause, int, 0644);
     96MODULE_PARM_DESC(pause, "Flow Control Pause Time");
     97
     98#define TC_DEFAULT 64
     99static int tc = TC_DEFAULT;
    100module_param(tc, int, 0644);
    101MODULE_PARM_DESC(tc, "DMA threshold control value");
    102
    103#define	DEFAULT_BUFSIZE	1536
    104static int buf_sz = DEFAULT_BUFSIZE;
    105module_param(buf_sz, int, 0644);
    106MODULE_PARM_DESC(buf_sz, "DMA buffer size");
    107
    108#define	STMMAC_RX_COPYBREAK	256
    109
    110static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
    111				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
    112				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
    113
    114#define STMMAC_DEFAULT_LPI_TIMER	1000
    115static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
    116module_param(eee_timer, int, 0644);
    117MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
    118#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
    119
    120/* By default the driver will use the ring mode to manage tx and rx descriptors,
    121 * but allow user to force to use the chain instead of the ring
    122 */
    123static unsigned int chain_mode;
    124module_param(chain_mode, int, 0444);
    125MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
    126
    127static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
    128/* For MSI interrupts handling */
    129static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
    130static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
    131static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
    132static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
    133static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
    134static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
    135static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
    136					  u32 rxmode, u32 chan);
    137
    138#ifdef CONFIG_DEBUG_FS
    139static const struct net_device_ops stmmac_netdev_ops;
    140static void stmmac_init_fs(struct net_device *dev);
    141static void stmmac_exit_fs(struct net_device *dev);
    142#endif
    143
    144#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
    145
    146int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
    147{
    148	int ret = 0;
    149
    150	if (enabled) {
    151		ret = clk_prepare_enable(priv->plat->stmmac_clk);
    152		if (ret)
    153			return ret;
    154		ret = clk_prepare_enable(priv->plat->pclk);
    155		if (ret) {
    156			clk_disable_unprepare(priv->plat->stmmac_clk);
    157			return ret;
    158		}
    159		if (priv->plat->clks_config) {
    160			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
    161			if (ret) {
    162				clk_disable_unprepare(priv->plat->stmmac_clk);
    163				clk_disable_unprepare(priv->plat->pclk);
    164				return ret;
    165			}
    166		}
    167	} else {
    168		clk_disable_unprepare(priv->plat->stmmac_clk);
    169		clk_disable_unprepare(priv->plat->pclk);
    170		if (priv->plat->clks_config)
    171			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
    172	}
    173
    174	return ret;
    175}
    176EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
    177
    178/**
    179 * stmmac_verify_args - verify the driver parameters.
    180 * Description: it checks the driver parameters and set a default in case of
    181 * errors.
    182 */
    183static void stmmac_verify_args(void)
    184{
    185	if (unlikely(watchdog < 0))
    186		watchdog = TX_TIMEO;
    187	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
    188		buf_sz = DEFAULT_BUFSIZE;
    189	if (unlikely(flow_ctrl > 1))
    190		flow_ctrl = FLOW_AUTO;
    191	else if (likely(flow_ctrl < 0))
    192		flow_ctrl = FLOW_OFF;
    193	if (unlikely((pause < 0) || (pause > 0xffff)))
    194		pause = PAUSE_TIME;
    195	if (eee_timer < 0)
    196		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
    197}
    198
    199static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
    200{
    201	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
    202	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
    203	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
    204	u32 queue;
    205
    206	for (queue = 0; queue < maxq; queue++) {
    207		struct stmmac_channel *ch = &priv->channel[queue];
    208
    209		if (stmmac_xdp_is_enabled(priv) &&
    210		    test_bit(queue, priv->af_xdp_zc_qps)) {
    211			napi_disable(&ch->rxtx_napi);
    212			continue;
    213		}
    214
    215		if (queue < rx_queues_cnt)
    216			napi_disable(&ch->rx_napi);
    217		if (queue < tx_queues_cnt)
    218			napi_disable(&ch->tx_napi);
    219	}
    220}
    221
    222/**
    223 * stmmac_disable_all_queues - Disable all queues
    224 * @priv: driver private structure
    225 */
    226static void stmmac_disable_all_queues(struct stmmac_priv *priv)
    227{
    228	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
    229	struct stmmac_rx_queue *rx_q;
    230	u32 queue;
    231
    232	/* synchronize_rcu() needed for pending XDP buffers to drain */
    233	for (queue = 0; queue < rx_queues_cnt; queue++) {
    234		rx_q = &priv->rx_queue[queue];
    235		if (rx_q->xsk_pool) {
    236			synchronize_rcu();
    237			break;
    238		}
    239	}
    240
    241	__stmmac_disable_all_queues(priv);
    242}
    243
    244/**
    245 * stmmac_enable_all_queues - Enable all queues
    246 * @priv: driver private structure
    247 */
    248static void stmmac_enable_all_queues(struct stmmac_priv *priv)
    249{
    250	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
    251	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
    252	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
    253	u32 queue;
    254
    255	for (queue = 0; queue < maxq; queue++) {
    256		struct stmmac_channel *ch = &priv->channel[queue];
    257
    258		if (stmmac_xdp_is_enabled(priv) &&
    259		    test_bit(queue, priv->af_xdp_zc_qps)) {
    260			napi_enable(&ch->rxtx_napi);
    261			continue;
    262		}
    263
    264		if (queue < rx_queues_cnt)
    265			napi_enable(&ch->rx_napi);
    266		if (queue < tx_queues_cnt)
    267			napi_enable(&ch->tx_napi);
    268	}
    269}
    270
    271static void stmmac_service_event_schedule(struct stmmac_priv *priv)
    272{
    273	if (!test_bit(STMMAC_DOWN, &priv->state) &&
    274	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
    275		queue_work(priv->wq, &priv->service_task);
    276}
    277
    278static void stmmac_global_err(struct stmmac_priv *priv)
    279{
    280	netif_carrier_off(priv->dev);
    281	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
    282	stmmac_service_event_schedule(priv);
    283}
    284
    285/**
    286 * stmmac_clk_csr_set - dynamically set the MDC clock
    287 * @priv: driver private structure
    288 * Description: this is to dynamically set the MDC clock according to the csr
    289 * clock input.
    290 * Note:
    291 *	If a specific clk_csr value is passed from the platform
    292 *	this means that the CSR Clock Range selection cannot be
    293 *	changed at run-time and it is fixed (as reported in the driver
    294 *	documentation). Viceversa the driver will try to set the MDC
    295 *	clock dynamically according to the actual clock input.
    296 */
    297static void stmmac_clk_csr_set(struct stmmac_priv *priv)
    298{
    299	u32 clk_rate;
    300
    301	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
    302
    303	/* Platform provided default clk_csr would be assumed valid
    304	 * for all other cases except for the below mentioned ones.
    305	 * For values higher than the IEEE 802.3 specified frequency
    306	 * we can not estimate the proper divider as it is not known
    307	 * the frequency of clk_csr_i. So we do not change the default
    308	 * divider.
    309	 */
    310	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
    311		if (clk_rate < CSR_F_35M)
    312			priv->clk_csr = STMMAC_CSR_20_35M;
    313		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
    314			priv->clk_csr = STMMAC_CSR_35_60M;
    315		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
    316			priv->clk_csr = STMMAC_CSR_60_100M;
    317		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
    318			priv->clk_csr = STMMAC_CSR_100_150M;
    319		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
    320			priv->clk_csr = STMMAC_CSR_150_250M;
    321		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
    322			priv->clk_csr = STMMAC_CSR_250_300M;
    323	}
    324
    325	if (priv->plat->has_sun8i) {
    326		if (clk_rate > 160000000)
    327			priv->clk_csr = 0x03;
    328		else if (clk_rate > 80000000)
    329			priv->clk_csr = 0x02;
    330		else if (clk_rate > 40000000)
    331			priv->clk_csr = 0x01;
    332		else
    333			priv->clk_csr = 0;
    334	}
    335
    336	if (priv->plat->has_xgmac) {
    337		if (clk_rate > 400000000)
    338			priv->clk_csr = 0x5;
    339		else if (clk_rate > 350000000)
    340			priv->clk_csr = 0x4;
    341		else if (clk_rate > 300000000)
    342			priv->clk_csr = 0x3;
    343		else if (clk_rate > 250000000)
    344			priv->clk_csr = 0x2;
    345		else if (clk_rate > 150000000)
    346			priv->clk_csr = 0x1;
    347		else
    348			priv->clk_csr = 0x0;
    349	}
    350}
    351
    352static void print_pkt(unsigned char *buf, int len)
    353{
    354	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
    355	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
    356}
    357
    358static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
    359{
    360	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
    361	u32 avail;
    362
    363	if (tx_q->dirty_tx > tx_q->cur_tx)
    364		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
    365	else
    366		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
    367
    368	return avail;
    369}
    370
    371/**
    372 * stmmac_rx_dirty - Get RX queue dirty
    373 * @priv: driver private structure
    374 * @queue: RX queue index
    375 */
    376static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
    377{
    378	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
    379	u32 dirty;
    380
    381	if (rx_q->dirty_rx <= rx_q->cur_rx)
    382		dirty = rx_q->cur_rx - rx_q->dirty_rx;
    383	else
    384		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
    385
    386	return dirty;
    387}
    388
    389static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
    390{
    391	int tx_lpi_timer;
    392
    393	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
    394	priv->eee_sw_timer_en = en ? 0 : 1;
    395	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
    396	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
    397}
    398
    399/**
    400 * stmmac_enable_eee_mode - check and enter in LPI mode
    401 * @priv: driver private structure
    402 * Description: this function is to verify and enter in LPI mode in case of
    403 * EEE.
    404 */
    405static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
    406{
    407	u32 tx_cnt = priv->plat->tx_queues_to_use;
    408	u32 queue;
    409
    410	/* check if all TX queues have the work finished */
    411	for (queue = 0; queue < tx_cnt; queue++) {
    412		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
    413
    414		if (tx_q->dirty_tx != tx_q->cur_tx)
    415			return -EBUSY; /* still unfinished work */
    416	}
    417
    418	/* Check and enter in LPI mode */
    419	if (!priv->tx_path_in_lpi_mode)
    420		stmmac_set_eee_mode(priv, priv->hw,
    421				priv->plat->en_tx_lpi_clockgating);
    422	return 0;
    423}
    424
    425/**
    426 * stmmac_disable_eee_mode - disable and exit from LPI mode
    427 * @priv: driver private structure
    428 * Description: this function is to exit and disable EEE in case of
    429 * LPI state is true. This is called by the xmit.
    430 */
    431void stmmac_disable_eee_mode(struct stmmac_priv *priv)
    432{
    433	if (!priv->eee_sw_timer_en) {
    434		stmmac_lpi_entry_timer_config(priv, 0);
    435		return;
    436	}
    437
    438	stmmac_reset_eee_mode(priv, priv->hw);
    439	del_timer_sync(&priv->eee_ctrl_timer);
    440	priv->tx_path_in_lpi_mode = false;
    441}
    442
    443/**
    444 * stmmac_eee_ctrl_timer - EEE TX SW timer.
    445 * @t:  timer_list struct containing private info
    446 * Description:
    447 *  if there is no data transfer and if we are not in LPI state,
    448 *  then MAC Transmitter can be moved to LPI state.
    449 */
    450static void stmmac_eee_ctrl_timer(struct timer_list *t)
    451{
    452	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
    453
    454	if (stmmac_enable_eee_mode(priv))
    455		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
    456}
    457
    458/**
    459 * stmmac_eee_init - init EEE
    460 * @priv: driver private structure
    461 * Description:
    462 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
    463 *  can also manage EEE, this function enable the LPI state and start related
    464 *  timer.
    465 */
    466bool stmmac_eee_init(struct stmmac_priv *priv)
    467{
    468	int eee_tw_timer = priv->eee_tw_timer;
    469
    470	/* Using PCS we cannot dial with the phy registers at this stage
    471	 * so we do not support extra feature like EEE.
    472	 */
    473	if (priv->hw->pcs == STMMAC_PCS_TBI ||
    474	    priv->hw->pcs == STMMAC_PCS_RTBI)
    475		return false;
    476
    477	/* Check if MAC core supports the EEE feature. */
    478	if (!priv->dma_cap.eee)
    479		return false;
    480
    481	mutex_lock(&priv->lock);
    482
    483	/* Check if it needs to be deactivated */
    484	if (!priv->eee_active) {
    485		if (priv->eee_enabled) {
    486			netdev_dbg(priv->dev, "disable EEE\n");
    487			stmmac_lpi_entry_timer_config(priv, 0);
    488			del_timer_sync(&priv->eee_ctrl_timer);
    489			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
    490			if (priv->hw->xpcs)
    491				xpcs_config_eee(priv->hw->xpcs,
    492						priv->plat->mult_fact_100ns,
    493						false);
    494		}
    495		mutex_unlock(&priv->lock);
    496		return false;
    497	}
    498
    499	if (priv->eee_active && !priv->eee_enabled) {
    500		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
    501		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
    502				     eee_tw_timer);
    503		if (priv->hw->xpcs)
    504			xpcs_config_eee(priv->hw->xpcs,
    505					priv->plat->mult_fact_100ns,
    506					true);
    507	}
    508
    509	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
    510		del_timer_sync(&priv->eee_ctrl_timer);
    511		priv->tx_path_in_lpi_mode = false;
    512		stmmac_lpi_entry_timer_config(priv, 1);
    513	} else {
    514		stmmac_lpi_entry_timer_config(priv, 0);
    515		mod_timer(&priv->eee_ctrl_timer,
    516			  STMMAC_LPI_T(priv->tx_lpi_timer));
    517	}
    518
    519	mutex_unlock(&priv->lock);
    520	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
    521	return true;
    522}
    523
    524/* stmmac_get_tx_hwtstamp - get HW TX timestamps
    525 * @priv: driver private structure
    526 * @p : descriptor pointer
    527 * @skb : the socket buffer
    528 * Description :
    529 * This function will read timestamp from the descriptor & pass it to stack.
    530 * and also perform some sanity checks.
    531 */
    532static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
    533				   struct dma_desc *p, struct sk_buff *skb)
    534{
    535	struct skb_shared_hwtstamps shhwtstamp;
    536	bool found = false;
    537	u64 ns = 0;
    538
    539	if (!priv->hwts_tx_en)
    540		return;
    541
    542	/* exit if skb doesn't support hw tstamp */
    543	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
    544		return;
    545
    546	/* check tx tstamp status */
    547	if (stmmac_get_tx_timestamp_status(priv, p)) {
    548		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
    549		found = true;
    550	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
    551		found = true;
    552	}
    553
    554	if (found) {
    555		ns -= priv->plat->cdc_error_adj;
    556
    557		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
    558		shhwtstamp.hwtstamp = ns_to_ktime(ns);
    559
    560		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
    561		/* pass tstamp to stack */
    562		skb_tstamp_tx(skb, &shhwtstamp);
    563	}
    564}
    565
    566/* stmmac_get_rx_hwtstamp - get HW RX timestamps
    567 * @priv: driver private structure
    568 * @p : descriptor pointer
    569 * @np : next descriptor pointer
    570 * @skb : the socket buffer
    571 * Description :
    572 * This function will read received packet's timestamp from the descriptor
    573 * and pass it to stack. It also perform some sanity checks.
    574 */
    575static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
    576				   struct dma_desc *np, struct sk_buff *skb)
    577{
    578	struct skb_shared_hwtstamps *shhwtstamp = NULL;
    579	struct dma_desc *desc = p;
    580	u64 ns = 0;
    581
    582	if (!priv->hwts_rx_en)
    583		return;
    584	/* For GMAC4, the valid timestamp is from CTX next desc. */
    585	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
    586		desc = np;
    587
    588	/* Check if timestamp is available */
    589	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
    590		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
    591
    592		ns -= priv->plat->cdc_error_adj;
    593
    594		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
    595		shhwtstamp = skb_hwtstamps(skb);
    596		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
    597		shhwtstamp->hwtstamp = ns_to_ktime(ns);
    598	} else  {
    599		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
    600	}
    601}
    602
    603/**
    604 *  stmmac_hwtstamp_set - control hardware timestamping.
    605 *  @dev: device pointer.
    606 *  @ifr: An IOCTL specific structure, that can contain a pointer to
    607 *  a proprietary structure used to pass information to the driver.
    608 *  Description:
    609 *  This function configures the MAC to enable/disable both outgoing(TX)
    610 *  and incoming(RX) packets time stamping based on user input.
    611 *  Return Value:
    612 *  0 on success and an appropriate -ve integer on failure.
    613 */
    614static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
    615{
    616	struct stmmac_priv *priv = netdev_priv(dev);
    617	struct hwtstamp_config config;
    618	u32 ptp_v2 = 0;
    619	u32 tstamp_all = 0;
    620	u32 ptp_over_ipv4_udp = 0;
    621	u32 ptp_over_ipv6_udp = 0;
    622	u32 ptp_over_ethernet = 0;
    623	u32 snap_type_sel = 0;
    624	u32 ts_master_en = 0;
    625	u32 ts_event_en = 0;
    626
    627	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
    628		netdev_alert(priv->dev, "No support for HW time stamping\n");
    629		priv->hwts_tx_en = 0;
    630		priv->hwts_rx_en = 0;
    631
    632		return -EOPNOTSUPP;
    633	}
    634
    635	if (copy_from_user(&config, ifr->ifr_data,
    636			   sizeof(config)))
    637		return -EFAULT;
    638
    639	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
    640		   __func__, config.flags, config.tx_type, config.rx_filter);
    641
    642	if (config.tx_type != HWTSTAMP_TX_OFF &&
    643	    config.tx_type != HWTSTAMP_TX_ON)
    644		return -ERANGE;
    645
    646	if (priv->adv_ts) {
    647		switch (config.rx_filter) {
    648		case HWTSTAMP_FILTER_NONE:
    649			/* time stamp no incoming packet at all */
    650			config.rx_filter = HWTSTAMP_FILTER_NONE;
    651			break;
    652
    653		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
    654			/* PTP v1, UDP, any kind of event packet */
    655			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
    656			/* 'xmac' hardware can support Sync, Pdelay_Req and
    657			 * Pdelay_resp by setting bit14 and bits17/16 to 01
    658			 * This leaves Delay_Req timestamps out.
    659			 * Enable all events *and* general purpose message
    660			 * timestamping
    661			 */
    662			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
    663			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
    664			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
    665			break;
    666
    667		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
    668			/* PTP v1, UDP, Sync packet */
    669			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
    670			/* take time stamp for SYNC messages only */
    671			ts_event_en = PTP_TCR_TSEVNTENA;
    672
    673			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
    674			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
    675			break;
    676
    677		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
    678			/* PTP v1, UDP, Delay_req packet */
    679			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
    680			/* take time stamp for Delay_Req messages only */
    681			ts_master_en = PTP_TCR_TSMSTRENA;
    682			ts_event_en = PTP_TCR_TSEVNTENA;
    683
    684			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
    685			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
    686			break;
    687
    688		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
    689			/* PTP v2, UDP, any kind of event packet */
    690			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
    691			ptp_v2 = PTP_TCR_TSVER2ENA;
    692			/* take time stamp for all event messages */
    693			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
    694
    695			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
    696			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
    697			break;
    698
    699		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
    700			/* PTP v2, UDP, Sync packet */
    701			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
    702			ptp_v2 = PTP_TCR_TSVER2ENA;
    703			/* take time stamp for SYNC messages only */
    704			ts_event_en = PTP_TCR_TSEVNTENA;
    705
    706			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
    707			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
    708			break;
    709
    710		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
    711			/* PTP v2, UDP, Delay_req packet */
    712			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
    713			ptp_v2 = PTP_TCR_TSVER2ENA;
    714			/* take time stamp for Delay_Req messages only */
    715			ts_master_en = PTP_TCR_TSMSTRENA;
    716			ts_event_en = PTP_TCR_TSEVNTENA;
    717
    718			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
    719			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
    720			break;
    721
    722		case HWTSTAMP_FILTER_PTP_V2_EVENT:
    723			/* PTP v2/802.AS1 any layer, any kind of event packet */
    724			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
    725			ptp_v2 = PTP_TCR_TSVER2ENA;
    726			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
    727			if (priv->synopsys_id < DWMAC_CORE_4_10)
    728				ts_event_en = PTP_TCR_TSEVNTENA;
    729			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
    730			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
    731			ptp_over_ethernet = PTP_TCR_TSIPENA;
    732			break;
    733
    734		case HWTSTAMP_FILTER_PTP_V2_SYNC:
    735			/* PTP v2/802.AS1, any layer, Sync packet */
    736			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
    737			ptp_v2 = PTP_TCR_TSVER2ENA;
    738			/* take time stamp for SYNC messages only */
    739			ts_event_en = PTP_TCR_TSEVNTENA;
    740
    741			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
    742			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
    743			ptp_over_ethernet = PTP_TCR_TSIPENA;
    744			break;
    745
    746		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
    747			/* PTP v2/802.AS1, any layer, Delay_req packet */
    748			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
    749			ptp_v2 = PTP_TCR_TSVER2ENA;
    750			/* take time stamp for Delay_Req messages only */
    751			ts_master_en = PTP_TCR_TSMSTRENA;
    752			ts_event_en = PTP_TCR_TSEVNTENA;
    753
    754			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
    755			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
    756			ptp_over_ethernet = PTP_TCR_TSIPENA;
    757			break;
    758
    759		case HWTSTAMP_FILTER_NTP_ALL:
    760		case HWTSTAMP_FILTER_ALL:
    761			/* time stamp any incoming packet */
    762			config.rx_filter = HWTSTAMP_FILTER_ALL;
    763			tstamp_all = PTP_TCR_TSENALL;
    764			break;
    765
    766		default:
    767			return -ERANGE;
    768		}
    769	} else {
    770		switch (config.rx_filter) {
    771		case HWTSTAMP_FILTER_NONE:
    772			config.rx_filter = HWTSTAMP_FILTER_NONE;
    773			break;
    774		default:
    775			/* PTP v1, UDP, any kind of event packet */
    776			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
    777			break;
    778		}
    779	}
    780	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
    781	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
    782
    783	priv->systime_flags = STMMAC_HWTS_ACTIVE;
    784
    785	if (priv->hwts_tx_en || priv->hwts_rx_en) {
    786		priv->systime_flags |= tstamp_all | ptp_v2 |
    787				       ptp_over_ethernet | ptp_over_ipv6_udp |
    788				       ptp_over_ipv4_udp | ts_event_en |
    789				       ts_master_en | snap_type_sel;
    790	}
    791
    792	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
    793
    794	memcpy(&priv->tstamp_config, &config, sizeof(config));
    795
    796	return copy_to_user(ifr->ifr_data, &config,
    797			    sizeof(config)) ? -EFAULT : 0;
    798}
    799
    800/**
    801 *  stmmac_hwtstamp_get - read hardware timestamping.
    802 *  @dev: device pointer.
    803 *  @ifr: An IOCTL specific structure, that can contain a pointer to
    804 *  a proprietary structure used to pass information to the driver.
    805 *  Description:
    806 *  This function obtain the current hardware timestamping settings
    807 *  as requested.
    808 */
    809static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
    810{
    811	struct stmmac_priv *priv = netdev_priv(dev);
    812	struct hwtstamp_config *config = &priv->tstamp_config;
    813
    814	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
    815		return -EOPNOTSUPP;
    816
    817	return copy_to_user(ifr->ifr_data, config,
    818			    sizeof(*config)) ? -EFAULT : 0;
    819}
    820
    821/**
    822 * stmmac_init_tstamp_counter - init hardware timestamping counter
    823 * @priv: driver private structure
    824 * @systime_flags: timestamping flags
    825 * Description:
    826 * Initialize hardware counter for packet timestamping.
    827 * This is valid as long as the interface is open and not suspended.
    828 * Will be rerun after resuming from suspend, case in which the timestamping
    829 * flags updated by stmmac_hwtstamp_set() also need to be restored.
    830 */
    831int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
    832{
    833	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
    834	struct timespec64 now;
    835	u32 sec_inc = 0;
    836	u64 temp = 0;
    837	int ret;
    838
    839	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
    840		return -EOPNOTSUPP;
    841
    842	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
    843	if (ret < 0) {
    844		netdev_warn(priv->dev,
    845			    "failed to enable PTP reference clock: %pe\n",
    846			    ERR_PTR(ret));
    847		return ret;
    848	}
    849
    850	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
    851	priv->systime_flags = systime_flags;
    852
    853	/* program Sub Second Increment reg */
    854	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
    855					   priv->plat->clk_ptp_rate,
    856					   xmac, &sec_inc);
    857	temp = div_u64(1000000000ULL, sec_inc);
    858
    859	/* Store sub second increment for later use */
    860	priv->sub_second_inc = sec_inc;
    861
    862	/* calculate default added value:
    863	 * formula is :
    864	 * addend = (2^32)/freq_div_ratio;
    865	 * where, freq_div_ratio = 1e9ns/sec_inc
    866	 */
    867	temp = (u64)(temp << 32);
    868	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
    869	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
    870
    871	/* initialize system time */
    872	ktime_get_real_ts64(&now);
    873
    874	/* lower 32 bits of tv_sec are safe until y2106 */
    875	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
    876
    877	return 0;
    878}
    879EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
    880
    881/**
    882 * stmmac_init_ptp - init PTP
    883 * @priv: driver private structure
    884 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
    885 * This is done by looking at the HW cap. register.
    886 * This function also registers the ptp driver.
    887 */
    888static int stmmac_init_ptp(struct stmmac_priv *priv)
    889{
    890	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
    891	int ret;
    892
    893	if (priv->plat->ptp_clk_freq_config)
    894		priv->plat->ptp_clk_freq_config(priv);
    895
    896	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
    897	if (ret)
    898		return ret;
    899
    900	priv->adv_ts = 0;
    901	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
    902	if (xmac && priv->dma_cap.atime_stamp)
    903		priv->adv_ts = 1;
    904	/* Dwmac 3.x core with extend_desc can support adv_ts */
    905	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
    906		priv->adv_ts = 1;
    907
    908	if (priv->dma_cap.time_stamp)
    909		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
    910
    911	if (priv->adv_ts)
    912		netdev_info(priv->dev,
    913			    "IEEE 1588-2008 Advanced Timestamp supported\n");
    914
    915	priv->hwts_tx_en = 0;
    916	priv->hwts_rx_en = 0;
    917
    918	return 0;
    919}
    920
    921static void stmmac_release_ptp(struct stmmac_priv *priv)
    922{
    923	clk_disable_unprepare(priv->plat->clk_ptp_ref);
    924	stmmac_ptp_unregister(priv);
    925}
    926
    927/**
    928 *  stmmac_mac_flow_ctrl - Configure flow control in all queues
    929 *  @priv: driver private structure
    930 *  @duplex: duplex passed to the next function
    931 *  Description: It is used for configuring the flow control in all queues
    932 */
    933static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
    934{
    935	u32 tx_cnt = priv->plat->tx_queues_to_use;
    936
    937	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
    938			priv->pause, tx_cnt);
    939}
    940
    941static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
    942						 phy_interface_t interface)
    943{
    944	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
    945
    946	if (!priv->hw->xpcs)
    947		return NULL;
    948
    949	return &priv->hw->xpcs->pcs;
    950}
    951
    952static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
    953			      const struct phylink_link_state *state)
    954{
    955	/* Nothing to do, xpcs_config() handles everything */
    956}
    957
    958static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
    959{
    960	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
    961	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
    962	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
    963	bool *hs_enable = &fpe_cfg->hs_enable;
    964
    965	if (is_up && *hs_enable) {
    966		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
    967	} else {
    968		*lo_state = FPE_STATE_OFF;
    969		*lp_state = FPE_STATE_OFF;
    970	}
    971}
    972
    973static void stmmac_mac_link_down(struct phylink_config *config,
    974				 unsigned int mode, phy_interface_t interface)
    975{
    976	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
    977
    978	stmmac_mac_set(priv, priv->ioaddr, false);
    979	priv->eee_active = false;
    980	priv->tx_lpi_enabled = false;
    981	priv->eee_enabled = stmmac_eee_init(priv);
    982	stmmac_set_eee_pls(priv, priv->hw, false);
    983
    984	if (priv->dma_cap.fpesel)
    985		stmmac_fpe_link_state_handle(priv, false);
    986}
    987
    988static void stmmac_mac_link_up(struct phylink_config *config,
    989			       struct phy_device *phy,
    990			       unsigned int mode, phy_interface_t interface,
    991			       int speed, int duplex,
    992			       bool tx_pause, bool rx_pause)
    993{
    994	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
    995	u32 ctrl;
    996
    997	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
    998	ctrl &= ~priv->hw->link.speed_mask;
    999
   1000	if (interface == PHY_INTERFACE_MODE_USXGMII) {
   1001		switch (speed) {
   1002		case SPEED_10000:
   1003			ctrl |= priv->hw->link.xgmii.speed10000;
   1004			break;
   1005		case SPEED_5000:
   1006			ctrl |= priv->hw->link.xgmii.speed5000;
   1007			break;
   1008		case SPEED_2500:
   1009			ctrl |= priv->hw->link.xgmii.speed2500;
   1010			break;
   1011		default:
   1012			return;
   1013		}
   1014	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
   1015		switch (speed) {
   1016		case SPEED_100000:
   1017			ctrl |= priv->hw->link.xlgmii.speed100000;
   1018			break;
   1019		case SPEED_50000:
   1020			ctrl |= priv->hw->link.xlgmii.speed50000;
   1021			break;
   1022		case SPEED_40000:
   1023			ctrl |= priv->hw->link.xlgmii.speed40000;
   1024			break;
   1025		case SPEED_25000:
   1026			ctrl |= priv->hw->link.xlgmii.speed25000;
   1027			break;
   1028		case SPEED_10000:
   1029			ctrl |= priv->hw->link.xgmii.speed10000;
   1030			break;
   1031		case SPEED_2500:
   1032			ctrl |= priv->hw->link.speed2500;
   1033			break;
   1034		case SPEED_1000:
   1035			ctrl |= priv->hw->link.speed1000;
   1036			break;
   1037		default:
   1038			return;
   1039		}
   1040	} else {
   1041		switch (speed) {
   1042		case SPEED_2500:
   1043			ctrl |= priv->hw->link.speed2500;
   1044			break;
   1045		case SPEED_1000:
   1046			ctrl |= priv->hw->link.speed1000;
   1047			break;
   1048		case SPEED_100:
   1049			ctrl |= priv->hw->link.speed100;
   1050			break;
   1051		case SPEED_10:
   1052			ctrl |= priv->hw->link.speed10;
   1053			break;
   1054		default:
   1055			return;
   1056		}
   1057	}
   1058
   1059	priv->speed = speed;
   1060
   1061	if (priv->plat->fix_mac_speed)
   1062		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
   1063
   1064	if (!duplex)
   1065		ctrl &= ~priv->hw->link.duplex;
   1066	else
   1067		ctrl |= priv->hw->link.duplex;
   1068
   1069	/* Flow Control operation */
   1070	if (tx_pause && rx_pause)
   1071		stmmac_mac_flow_ctrl(priv, duplex);
   1072
   1073	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
   1074
   1075	stmmac_mac_set(priv, priv->ioaddr, true);
   1076	if (phy && priv->dma_cap.eee) {
   1077		priv->eee_active = phy_init_eee(phy, 1) >= 0;
   1078		priv->eee_enabled = stmmac_eee_init(priv);
   1079		priv->tx_lpi_enabled = priv->eee_enabled;
   1080		stmmac_set_eee_pls(priv, priv->hw, true);
   1081	}
   1082
   1083	if (priv->dma_cap.fpesel)
   1084		stmmac_fpe_link_state_handle(priv, true);
   1085}
   1086
   1087static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
   1088	.validate = phylink_generic_validate,
   1089	.mac_select_pcs = stmmac_mac_select_pcs,
   1090	.mac_config = stmmac_mac_config,
   1091	.mac_link_down = stmmac_mac_link_down,
   1092	.mac_link_up = stmmac_mac_link_up,
   1093};
   1094
   1095/**
   1096 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
   1097 * @priv: driver private structure
   1098 * Description: this is to verify if the HW supports the PCS.
   1099 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
   1100 * configured for the TBI, RTBI, or SGMII PHY interface.
   1101 */
   1102static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
   1103{
   1104	int interface = priv->plat->interface;
   1105
   1106	if (priv->dma_cap.pcs) {
   1107		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
   1108		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
   1109		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
   1110		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
   1111			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
   1112			priv->hw->pcs = STMMAC_PCS_RGMII;
   1113		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
   1114			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
   1115			priv->hw->pcs = STMMAC_PCS_SGMII;
   1116		}
   1117	}
   1118}
   1119
   1120/**
   1121 * stmmac_init_phy - PHY initialization
   1122 * @dev: net device structure
   1123 * Description: it initializes the driver's PHY state, and attaches the PHY
   1124 * to the mac driver.
   1125 *  Return value:
   1126 *  0 on success
   1127 */
   1128static int stmmac_init_phy(struct net_device *dev)
   1129{
   1130	struct stmmac_priv *priv = netdev_priv(dev);
   1131	struct device_node *node;
   1132	int ret;
   1133
   1134	node = priv->plat->phylink_node;
   1135
   1136	if (node)
   1137		ret = phylink_of_phy_connect(priv->phylink, node, 0);
   1138
   1139	/* Some DT bindings do not set-up the PHY handle. Let's try to
   1140	 * manually parse it
   1141	 */
   1142	if (!node || ret) {
   1143		int addr = priv->plat->phy_addr;
   1144		struct phy_device *phydev;
   1145
   1146		phydev = mdiobus_get_phy(priv->mii, addr);
   1147		if (!phydev) {
   1148			netdev_err(priv->dev, "no phy at addr %d\n", addr);
   1149			return -ENODEV;
   1150		}
   1151
   1152		ret = phylink_connect_phy(priv->phylink, phydev);
   1153	}
   1154
   1155	if (!priv->plat->pmt) {
   1156		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
   1157
   1158		phylink_ethtool_get_wol(priv->phylink, &wol);
   1159		device_set_wakeup_capable(priv->device, !!wol.supported);
   1160	}
   1161
   1162	return ret;
   1163}
   1164
   1165static int stmmac_phy_setup(struct stmmac_priv *priv)
   1166{
   1167	struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
   1168	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
   1169	int max_speed = priv->plat->max_speed;
   1170	int mode = priv->plat->phy_interface;
   1171	struct phylink *phylink;
   1172
   1173	priv->phylink_config.dev = &priv->dev->dev;
   1174	priv->phylink_config.type = PHYLINK_NETDEV;
   1175	if (priv->plat->mdio_bus_data)
   1176		priv->phylink_config.ovr_an_inband =
   1177			mdio_bus_data->xpcs_an_inband;
   1178
   1179	if (!fwnode)
   1180		fwnode = dev_fwnode(priv->device);
   1181
   1182	/* Set the platform/firmware specified interface mode */
   1183	__set_bit(mode, priv->phylink_config.supported_interfaces);
   1184
   1185	/* If we have an xpcs, it defines which PHY interfaces are supported. */
   1186	if (priv->hw->xpcs)
   1187		xpcs_get_interfaces(priv->hw->xpcs,
   1188				    priv->phylink_config.supported_interfaces);
   1189
   1190	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
   1191		MAC_10 | MAC_100;
   1192
   1193	if (!max_speed || max_speed >= 1000)
   1194		priv->phylink_config.mac_capabilities |= MAC_1000;
   1195
   1196	if (priv->plat->has_gmac4) {
   1197		if (!max_speed || max_speed >= 2500)
   1198			priv->phylink_config.mac_capabilities |= MAC_2500FD;
   1199	} else if (priv->plat->has_xgmac) {
   1200		if (!max_speed || max_speed >= 2500)
   1201			priv->phylink_config.mac_capabilities |= MAC_2500FD;
   1202		if (!max_speed || max_speed >= 5000)
   1203			priv->phylink_config.mac_capabilities |= MAC_5000FD;
   1204		if (!max_speed || max_speed >= 10000)
   1205			priv->phylink_config.mac_capabilities |= MAC_10000FD;
   1206		if (!max_speed || max_speed >= 25000)
   1207			priv->phylink_config.mac_capabilities |= MAC_25000FD;
   1208		if (!max_speed || max_speed >= 40000)
   1209			priv->phylink_config.mac_capabilities |= MAC_40000FD;
   1210		if (!max_speed || max_speed >= 50000)
   1211			priv->phylink_config.mac_capabilities |= MAC_50000FD;
   1212		if (!max_speed || max_speed >= 100000)
   1213			priv->phylink_config.mac_capabilities |= MAC_100000FD;
   1214	}
   1215
   1216	/* Half-Duplex can only work with single queue */
   1217	if (priv->plat->tx_queues_to_use > 1)
   1218		priv->phylink_config.mac_capabilities &=
   1219			~(MAC_10HD | MAC_100HD | MAC_1000HD);
   1220
   1221	phylink = phylink_create(&priv->phylink_config, fwnode,
   1222				 mode, &stmmac_phylink_mac_ops);
   1223	if (IS_ERR(phylink))
   1224		return PTR_ERR(phylink);
   1225
   1226	priv->phylink = phylink;
   1227	return 0;
   1228}
   1229
   1230static void stmmac_display_rx_rings(struct stmmac_priv *priv)
   1231{
   1232	u32 rx_cnt = priv->plat->rx_queues_to_use;
   1233	unsigned int desc_size;
   1234	void *head_rx;
   1235	u32 queue;
   1236
   1237	/* Display RX rings */
   1238	for (queue = 0; queue < rx_cnt; queue++) {
   1239		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   1240
   1241		pr_info("\tRX Queue %u rings\n", queue);
   1242
   1243		if (priv->extend_desc) {
   1244			head_rx = (void *)rx_q->dma_erx;
   1245			desc_size = sizeof(struct dma_extended_desc);
   1246		} else {
   1247			head_rx = (void *)rx_q->dma_rx;
   1248			desc_size = sizeof(struct dma_desc);
   1249		}
   1250
   1251		/* Display RX ring */
   1252		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
   1253				    rx_q->dma_rx_phy, desc_size);
   1254	}
   1255}
   1256
   1257static void stmmac_display_tx_rings(struct stmmac_priv *priv)
   1258{
   1259	u32 tx_cnt = priv->plat->tx_queues_to_use;
   1260	unsigned int desc_size;
   1261	void *head_tx;
   1262	u32 queue;
   1263
   1264	/* Display TX rings */
   1265	for (queue = 0; queue < tx_cnt; queue++) {
   1266		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   1267
   1268		pr_info("\tTX Queue %d rings\n", queue);
   1269
   1270		if (priv->extend_desc) {
   1271			head_tx = (void *)tx_q->dma_etx;
   1272			desc_size = sizeof(struct dma_extended_desc);
   1273		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
   1274			head_tx = (void *)tx_q->dma_entx;
   1275			desc_size = sizeof(struct dma_edesc);
   1276		} else {
   1277			head_tx = (void *)tx_q->dma_tx;
   1278			desc_size = sizeof(struct dma_desc);
   1279		}
   1280
   1281		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
   1282				    tx_q->dma_tx_phy, desc_size);
   1283	}
   1284}
   1285
   1286static void stmmac_display_rings(struct stmmac_priv *priv)
   1287{
   1288	/* Display RX ring */
   1289	stmmac_display_rx_rings(priv);
   1290
   1291	/* Display TX ring */
   1292	stmmac_display_tx_rings(priv);
   1293}
   1294
   1295static int stmmac_set_bfsize(int mtu, int bufsize)
   1296{
   1297	int ret = bufsize;
   1298
   1299	if (mtu >= BUF_SIZE_8KiB)
   1300		ret = BUF_SIZE_16KiB;
   1301	else if (mtu >= BUF_SIZE_4KiB)
   1302		ret = BUF_SIZE_8KiB;
   1303	else if (mtu >= BUF_SIZE_2KiB)
   1304		ret = BUF_SIZE_4KiB;
   1305	else if (mtu > DEFAULT_BUFSIZE)
   1306		ret = BUF_SIZE_2KiB;
   1307	else
   1308		ret = DEFAULT_BUFSIZE;
   1309
   1310	return ret;
   1311}
   1312
   1313/**
   1314 * stmmac_clear_rx_descriptors - clear RX descriptors
   1315 * @priv: driver private structure
   1316 * @queue: RX queue index
   1317 * Description: this function is called to clear the RX descriptors
   1318 * in case of both basic and extended descriptors are used.
   1319 */
   1320static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
   1321{
   1322	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   1323	int i;
   1324
   1325	/* Clear the RX descriptors */
   1326	for (i = 0; i < priv->dma_rx_size; i++)
   1327		if (priv->extend_desc)
   1328			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
   1329					priv->use_riwt, priv->mode,
   1330					(i == priv->dma_rx_size - 1),
   1331					priv->dma_buf_sz);
   1332		else
   1333			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
   1334					priv->use_riwt, priv->mode,
   1335					(i == priv->dma_rx_size - 1),
   1336					priv->dma_buf_sz);
   1337}
   1338
   1339/**
   1340 * stmmac_clear_tx_descriptors - clear tx descriptors
   1341 * @priv: driver private structure
   1342 * @queue: TX queue index.
   1343 * Description: this function is called to clear the TX descriptors
   1344 * in case of both basic and extended descriptors are used.
   1345 */
   1346static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
   1347{
   1348	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   1349	int i;
   1350
   1351	/* Clear the TX descriptors */
   1352	for (i = 0; i < priv->dma_tx_size; i++) {
   1353		int last = (i == (priv->dma_tx_size - 1));
   1354		struct dma_desc *p;
   1355
   1356		if (priv->extend_desc)
   1357			p = &tx_q->dma_etx[i].basic;
   1358		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
   1359			p = &tx_q->dma_entx[i].basic;
   1360		else
   1361			p = &tx_q->dma_tx[i];
   1362
   1363		stmmac_init_tx_desc(priv, p, priv->mode, last);
   1364	}
   1365}
   1366
   1367/**
   1368 * stmmac_clear_descriptors - clear descriptors
   1369 * @priv: driver private structure
   1370 * Description: this function is called to clear the TX and RX descriptors
   1371 * in case of both basic and extended descriptors are used.
   1372 */
   1373static void stmmac_clear_descriptors(struct stmmac_priv *priv)
   1374{
   1375	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
   1376	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
   1377	u32 queue;
   1378
   1379	/* Clear the RX descriptors */
   1380	for (queue = 0; queue < rx_queue_cnt; queue++)
   1381		stmmac_clear_rx_descriptors(priv, queue);
   1382
   1383	/* Clear the TX descriptors */
   1384	for (queue = 0; queue < tx_queue_cnt; queue++)
   1385		stmmac_clear_tx_descriptors(priv, queue);
   1386}
   1387
   1388/**
   1389 * stmmac_init_rx_buffers - init the RX descriptor buffer.
   1390 * @priv: driver private structure
   1391 * @p: descriptor pointer
   1392 * @i: descriptor index
   1393 * @flags: gfp flag
   1394 * @queue: RX queue index
   1395 * Description: this function is called to allocate a receive buffer, perform
   1396 * the DMA mapping and init the descriptor.
   1397 */
   1398static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
   1399				  int i, gfp_t flags, u32 queue)
   1400{
   1401	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   1402	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
   1403	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
   1404
   1405	if (priv->dma_cap.addr64 <= 32)
   1406		gfp |= GFP_DMA32;
   1407
   1408	if (!buf->page) {
   1409		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
   1410		if (!buf->page)
   1411			return -ENOMEM;
   1412		buf->page_offset = stmmac_rx_offset(priv);
   1413	}
   1414
   1415	if (priv->sph && !buf->sec_page) {
   1416		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
   1417		if (!buf->sec_page)
   1418			return -ENOMEM;
   1419
   1420		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
   1421		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
   1422	} else {
   1423		buf->sec_page = NULL;
   1424		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
   1425	}
   1426
   1427	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
   1428
   1429	stmmac_set_desc_addr(priv, p, buf->addr);
   1430	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
   1431		stmmac_init_desc3(priv, p);
   1432
   1433	return 0;
   1434}
   1435
   1436/**
   1437 * stmmac_free_rx_buffer - free RX dma buffers
   1438 * @priv: private structure
   1439 * @queue: RX queue index
   1440 * @i: buffer index.
   1441 */
   1442static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
   1443{
   1444	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   1445	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
   1446
   1447	if (buf->page)
   1448		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
   1449	buf->page = NULL;
   1450
   1451	if (buf->sec_page)
   1452		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
   1453	buf->sec_page = NULL;
   1454}
   1455
   1456/**
   1457 * stmmac_free_tx_buffer - free RX dma buffers
   1458 * @priv: private structure
   1459 * @queue: RX queue index
   1460 * @i: buffer index.
   1461 */
   1462static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
   1463{
   1464	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   1465
   1466	if (tx_q->tx_skbuff_dma[i].buf &&
   1467	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
   1468		if (tx_q->tx_skbuff_dma[i].map_as_page)
   1469			dma_unmap_page(priv->device,
   1470				       tx_q->tx_skbuff_dma[i].buf,
   1471				       tx_q->tx_skbuff_dma[i].len,
   1472				       DMA_TO_DEVICE);
   1473		else
   1474			dma_unmap_single(priv->device,
   1475					 tx_q->tx_skbuff_dma[i].buf,
   1476					 tx_q->tx_skbuff_dma[i].len,
   1477					 DMA_TO_DEVICE);
   1478	}
   1479
   1480	if (tx_q->xdpf[i] &&
   1481	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
   1482	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
   1483		xdp_return_frame(tx_q->xdpf[i]);
   1484		tx_q->xdpf[i] = NULL;
   1485	}
   1486
   1487	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
   1488		tx_q->xsk_frames_done++;
   1489
   1490	if (tx_q->tx_skbuff[i] &&
   1491	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
   1492		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
   1493		tx_q->tx_skbuff[i] = NULL;
   1494	}
   1495
   1496	tx_q->tx_skbuff_dma[i].buf = 0;
   1497	tx_q->tx_skbuff_dma[i].map_as_page = false;
   1498}
   1499
   1500/**
   1501 * dma_free_rx_skbufs - free RX dma buffers
   1502 * @priv: private structure
   1503 * @queue: RX queue index
   1504 */
   1505static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
   1506{
   1507	int i;
   1508
   1509	for (i = 0; i < priv->dma_rx_size; i++)
   1510		stmmac_free_rx_buffer(priv, queue, i);
   1511}
   1512
   1513static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
   1514				   gfp_t flags)
   1515{
   1516	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   1517	int i;
   1518
   1519	for (i = 0; i < priv->dma_rx_size; i++) {
   1520		struct dma_desc *p;
   1521		int ret;
   1522
   1523		if (priv->extend_desc)
   1524			p = &((rx_q->dma_erx + i)->basic);
   1525		else
   1526			p = rx_q->dma_rx + i;
   1527
   1528		ret = stmmac_init_rx_buffers(priv, p, i, flags,
   1529					     queue);
   1530		if (ret)
   1531			return ret;
   1532
   1533		rx_q->buf_alloc_num++;
   1534	}
   1535
   1536	return 0;
   1537}
   1538
   1539/**
   1540 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
   1541 * @priv: private structure
   1542 * @queue: RX queue index
   1543 */
   1544static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
   1545{
   1546	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   1547	int i;
   1548
   1549	for (i = 0; i < priv->dma_rx_size; i++) {
   1550		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
   1551
   1552		if (!buf->xdp)
   1553			continue;
   1554
   1555		xsk_buff_free(buf->xdp);
   1556		buf->xdp = NULL;
   1557	}
   1558}
   1559
   1560static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
   1561{
   1562	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   1563	int i;
   1564
   1565	for (i = 0; i < priv->dma_rx_size; i++) {
   1566		struct stmmac_rx_buffer *buf;
   1567		dma_addr_t dma_addr;
   1568		struct dma_desc *p;
   1569
   1570		if (priv->extend_desc)
   1571			p = (struct dma_desc *)(rx_q->dma_erx + i);
   1572		else
   1573			p = rx_q->dma_rx + i;
   1574
   1575		buf = &rx_q->buf_pool[i];
   1576
   1577		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
   1578		if (!buf->xdp)
   1579			return -ENOMEM;
   1580
   1581		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
   1582		stmmac_set_desc_addr(priv, p, dma_addr);
   1583		rx_q->buf_alloc_num++;
   1584	}
   1585
   1586	return 0;
   1587}
   1588
   1589static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
   1590{
   1591	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
   1592		return NULL;
   1593
   1594	return xsk_get_pool_from_qid(priv->dev, queue);
   1595}
   1596
   1597/**
   1598 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
   1599 * @priv: driver private structure
   1600 * @queue: RX queue index
   1601 * @flags: gfp flag.
   1602 * Description: this function initializes the DMA RX descriptors
   1603 * and allocates the socket buffers. It supports the chained and ring
   1604 * modes.
   1605 */
   1606static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
   1607{
   1608	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   1609	int ret;
   1610
   1611	netif_dbg(priv, probe, priv->dev,
   1612		  "(%s) dma_rx_phy=0x%08x\n", __func__,
   1613		  (u32)rx_q->dma_rx_phy);
   1614
   1615	stmmac_clear_rx_descriptors(priv, queue);
   1616
   1617	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
   1618
   1619	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
   1620
   1621	if (rx_q->xsk_pool) {
   1622		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
   1623						   MEM_TYPE_XSK_BUFF_POOL,
   1624						   NULL));
   1625		netdev_info(priv->dev,
   1626			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
   1627			    rx_q->queue_index);
   1628		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
   1629	} else {
   1630		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
   1631						   MEM_TYPE_PAGE_POOL,
   1632						   rx_q->page_pool));
   1633		netdev_info(priv->dev,
   1634			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
   1635			    rx_q->queue_index);
   1636	}
   1637
   1638	if (rx_q->xsk_pool) {
   1639		/* RX XDP ZC buffer pool may not be populated, e.g.
   1640		 * xdpsock TX-only.
   1641		 */
   1642		stmmac_alloc_rx_buffers_zc(priv, queue);
   1643	} else {
   1644		ret = stmmac_alloc_rx_buffers(priv, queue, flags);
   1645		if (ret < 0)
   1646			return -ENOMEM;
   1647	}
   1648
   1649	rx_q->cur_rx = 0;
   1650	rx_q->dirty_rx = 0;
   1651
   1652	/* Setup the chained descriptor addresses */
   1653	if (priv->mode == STMMAC_CHAIN_MODE) {
   1654		if (priv->extend_desc)
   1655			stmmac_mode_init(priv, rx_q->dma_erx,
   1656					 rx_q->dma_rx_phy,
   1657					 priv->dma_rx_size, 1);
   1658		else
   1659			stmmac_mode_init(priv, rx_q->dma_rx,
   1660					 rx_q->dma_rx_phy,
   1661					 priv->dma_rx_size, 0);
   1662	}
   1663
   1664	return 0;
   1665}
   1666
   1667static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
   1668{
   1669	struct stmmac_priv *priv = netdev_priv(dev);
   1670	u32 rx_count = priv->plat->rx_queues_to_use;
   1671	int queue;
   1672	int ret;
   1673
   1674	/* RX INITIALIZATION */
   1675	netif_dbg(priv, probe, priv->dev,
   1676		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
   1677
   1678	for (queue = 0; queue < rx_count; queue++) {
   1679		ret = __init_dma_rx_desc_rings(priv, queue, flags);
   1680		if (ret)
   1681			goto err_init_rx_buffers;
   1682	}
   1683
   1684	return 0;
   1685
   1686err_init_rx_buffers:
   1687	while (queue >= 0) {
   1688		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   1689
   1690		if (rx_q->xsk_pool)
   1691			dma_free_rx_xskbufs(priv, queue);
   1692		else
   1693			dma_free_rx_skbufs(priv, queue);
   1694
   1695		rx_q->buf_alloc_num = 0;
   1696		rx_q->xsk_pool = NULL;
   1697
   1698		queue--;
   1699	}
   1700
   1701	return ret;
   1702}
   1703
   1704/**
   1705 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
   1706 * @priv: driver private structure
   1707 * @queue : TX queue index
   1708 * Description: this function initializes the DMA TX descriptors
   1709 * and allocates the socket buffers. It supports the chained and ring
   1710 * modes.
   1711 */
   1712static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
   1713{
   1714	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   1715	int i;
   1716
   1717	netif_dbg(priv, probe, priv->dev,
   1718		  "(%s) dma_tx_phy=0x%08x\n", __func__,
   1719		  (u32)tx_q->dma_tx_phy);
   1720
   1721	/* Setup the chained descriptor addresses */
   1722	if (priv->mode == STMMAC_CHAIN_MODE) {
   1723		if (priv->extend_desc)
   1724			stmmac_mode_init(priv, tx_q->dma_etx,
   1725					 tx_q->dma_tx_phy,
   1726					 priv->dma_tx_size, 1);
   1727		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
   1728			stmmac_mode_init(priv, tx_q->dma_tx,
   1729					 tx_q->dma_tx_phy,
   1730					 priv->dma_tx_size, 0);
   1731	}
   1732
   1733	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
   1734
   1735	for (i = 0; i < priv->dma_tx_size; i++) {
   1736		struct dma_desc *p;
   1737
   1738		if (priv->extend_desc)
   1739			p = &((tx_q->dma_etx + i)->basic);
   1740		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
   1741			p = &((tx_q->dma_entx + i)->basic);
   1742		else
   1743			p = tx_q->dma_tx + i;
   1744
   1745		stmmac_clear_desc(priv, p);
   1746
   1747		tx_q->tx_skbuff_dma[i].buf = 0;
   1748		tx_q->tx_skbuff_dma[i].map_as_page = false;
   1749		tx_q->tx_skbuff_dma[i].len = 0;
   1750		tx_q->tx_skbuff_dma[i].last_segment = false;
   1751		tx_q->tx_skbuff[i] = NULL;
   1752	}
   1753
   1754	tx_q->dirty_tx = 0;
   1755	tx_q->cur_tx = 0;
   1756	tx_q->mss = 0;
   1757
   1758	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
   1759
   1760	return 0;
   1761}
   1762
   1763static int init_dma_tx_desc_rings(struct net_device *dev)
   1764{
   1765	struct stmmac_priv *priv = netdev_priv(dev);
   1766	u32 tx_queue_cnt;
   1767	u32 queue;
   1768
   1769	tx_queue_cnt = priv->plat->tx_queues_to_use;
   1770
   1771	for (queue = 0; queue < tx_queue_cnt; queue++)
   1772		__init_dma_tx_desc_rings(priv, queue);
   1773
   1774	return 0;
   1775}
   1776
   1777/**
   1778 * init_dma_desc_rings - init the RX/TX descriptor rings
   1779 * @dev: net device structure
   1780 * @flags: gfp flag.
   1781 * Description: this function initializes the DMA RX/TX descriptors
   1782 * and allocates the socket buffers. It supports the chained and ring
   1783 * modes.
   1784 */
   1785static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
   1786{
   1787	struct stmmac_priv *priv = netdev_priv(dev);
   1788	int ret;
   1789
   1790	ret = init_dma_rx_desc_rings(dev, flags);
   1791	if (ret)
   1792		return ret;
   1793
   1794	ret = init_dma_tx_desc_rings(dev);
   1795
   1796	stmmac_clear_descriptors(priv);
   1797
   1798	if (netif_msg_hw(priv))
   1799		stmmac_display_rings(priv);
   1800
   1801	return ret;
   1802}
   1803
   1804/**
   1805 * dma_free_tx_skbufs - free TX dma buffers
   1806 * @priv: private structure
   1807 * @queue: TX queue index
   1808 */
   1809static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
   1810{
   1811	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   1812	int i;
   1813
   1814	tx_q->xsk_frames_done = 0;
   1815
   1816	for (i = 0; i < priv->dma_tx_size; i++)
   1817		stmmac_free_tx_buffer(priv, queue, i);
   1818
   1819	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
   1820		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
   1821		tx_q->xsk_frames_done = 0;
   1822		tx_q->xsk_pool = NULL;
   1823	}
   1824}
   1825
   1826/**
   1827 * stmmac_free_tx_skbufs - free TX skb buffers
   1828 * @priv: private structure
   1829 */
   1830static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
   1831{
   1832	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
   1833	u32 queue;
   1834
   1835	for (queue = 0; queue < tx_queue_cnt; queue++)
   1836		dma_free_tx_skbufs(priv, queue);
   1837}
   1838
   1839/**
   1840 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
   1841 * @priv: private structure
   1842 * @queue: RX queue index
   1843 */
   1844static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
   1845{
   1846	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   1847
   1848	/* Release the DMA RX socket buffers */
   1849	if (rx_q->xsk_pool)
   1850		dma_free_rx_xskbufs(priv, queue);
   1851	else
   1852		dma_free_rx_skbufs(priv, queue);
   1853
   1854	rx_q->buf_alloc_num = 0;
   1855	rx_q->xsk_pool = NULL;
   1856
   1857	/* Free DMA regions of consistent memory previously allocated */
   1858	if (!priv->extend_desc)
   1859		dma_free_coherent(priv->device, priv->dma_rx_size *
   1860				  sizeof(struct dma_desc),
   1861				  rx_q->dma_rx, rx_q->dma_rx_phy);
   1862	else
   1863		dma_free_coherent(priv->device, priv->dma_rx_size *
   1864				  sizeof(struct dma_extended_desc),
   1865				  rx_q->dma_erx, rx_q->dma_rx_phy);
   1866
   1867	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
   1868		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
   1869
   1870	kfree(rx_q->buf_pool);
   1871	if (rx_q->page_pool)
   1872		page_pool_destroy(rx_q->page_pool);
   1873}
   1874
   1875static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
   1876{
   1877	u32 rx_count = priv->plat->rx_queues_to_use;
   1878	u32 queue;
   1879
   1880	/* Free RX queue resources */
   1881	for (queue = 0; queue < rx_count; queue++)
   1882		__free_dma_rx_desc_resources(priv, queue);
   1883}
   1884
   1885/**
   1886 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
   1887 * @priv: private structure
   1888 * @queue: TX queue index
   1889 */
   1890static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
   1891{
   1892	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   1893	size_t size;
   1894	void *addr;
   1895
   1896	/* Release the DMA TX socket buffers */
   1897	dma_free_tx_skbufs(priv, queue);
   1898
   1899	if (priv->extend_desc) {
   1900		size = sizeof(struct dma_extended_desc);
   1901		addr = tx_q->dma_etx;
   1902	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
   1903		size = sizeof(struct dma_edesc);
   1904		addr = tx_q->dma_entx;
   1905	} else {
   1906		size = sizeof(struct dma_desc);
   1907		addr = tx_q->dma_tx;
   1908	}
   1909
   1910	size *= priv->dma_tx_size;
   1911
   1912	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
   1913
   1914	kfree(tx_q->tx_skbuff_dma);
   1915	kfree(tx_q->tx_skbuff);
   1916}
   1917
   1918static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
   1919{
   1920	u32 tx_count = priv->plat->tx_queues_to_use;
   1921	u32 queue;
   1922
   1923	/* Free TX queue resources */
   1924	for (queue = 0; queue < tx_count; queue++)
   1925		__free_dma_tx_desc_resources(priv, queue);
   1926}
   1927
   1928/**
   1929 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
   1930 * @priv: private structure
   1931 * @queue: RX queue index
   1932 * Description: according to which descriptor can be used (extend or basic)
   1933 * this function allocates the resources for TX and RX paths. In case of
   1934 * reception, for example, it pre-allocated the RX socket buffer in order to
   1935 * allow zero-copy mechanism.
   1936 */
   1937static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
   1938{
   1939	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   1940	struct stmmac_channel *ch = &priv->channel[queue];
   1941	bool xdp_prog = stmmac_xdp_is_enabled(priv);
   1942	struct page_pool_params pp_params = { 0 };
   1943	unsigned int num_pages;
   1944	unsigned int napi_id;
   1945	int ret;
   1946
   1947	rx_q->queue_index = queue;
   1948	rx_q->priv_data = priv;
   1949
   1950	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
   1951	pp_params.pool_size = priv->dma_rx_size;
   1952	num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
   1953	pp_params.order = ilog2(num_pages);
   1954	pp_params.nid = dev_to_node(priv->device);
   1955	pp_params.dev = priv->device;
   1956	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
   1957	pp_params.offset = stmmac_rx_offset(priv);
   1958	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
   1959
   1960	rx_q->page_pool = page_pool_create(&pp_params);
   1961	if (IS_ERR(rx_q->page_pool)) {
   1962		ret = PTR_ERR(rx_q->page_pool);
   1963		rx_q->page_pool = NULL;
   1964		return ret;
   1965	}
   1966
   1967	rx_q->buf_pool = kcalloc(priv->dma_rx_size,
   1968				 sizeof(*rx_q->buf_pool),
   1969				 GFP_KERNEL);
   1970	if (!rx_q->buf_pool)
   1971		return -ENOMEM;
   1972
   1973	if (priv->extend_desc) {
   1974		rx_q->dma_erx = dma_alloc_coherent(priv->device,
   1975						   priv->dma_rx_size *
   1976						   sizeof(struct dma_extended_desc),
   1977						   &rx_q->dma_rx_phy,
   1978						   GFP_KERNEL);
   1979		if (!rx_q->dma_erx)
   1980			return -ENOMEM;
   1981
   1982	} else {
   1983		rx_q->dma_rx = dma_alloc_coherent(priv->device,
   1984						  priv->dma_rx_size *
   1985						  sizeof(struct dma_desc),
   1986						  &rx_q->dma_rx_phy,
   1987						  GFP_KERNEL);
   1988		if (!rx_q->dma_rx)
   1989			return -ENOMEM;
   1990	}
   1991
   1992	if (stmmac_xdp_is_enabled(priv) &&
   1993	    test_bit(queue, priv->af_xdp_zc_qps))
   1994		napi_id = ch->rxtx_napi.napi_id;
   1995	else
   1996		napi_id = ch->rx_napi.napi_id;
   1997
   1998	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
   1999			       rx_q->queue_index,
   2000			       napi_id);
   2001	if (ret) {
   2002		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
   2003		return -EINVAL;
   2004	}
   2005
   2006	return 0;
   2007}
   2008
   2009static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
   2010{
   2011	u32 rx_count = priv->plat->rx_queues_to_use;
   2012	u32 queue;
   2013	int ret;
   2014
   2015	/* RX queues buffers and DMA */
   2016	for (queue = 0; queue < rx_count; queue++) {
   2017		ret = __alloc_dma_rx_desc_resources(priv, queue);
   2018		if (ret)
   2019			goto err_dma;
   2020	}
   2021
   2022	return 0;
   2023
   2024err_dma:
   2025	free_dma_rx_desc_resources(priv);
   2026
   2027	return ret;
   2028}
   2029
   2030/**
   2031 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
   2032 * @priv: private structure
   2033 * @queue: TX queue index
   2034 * Description: according to which descriptor can be used (extend or basic)
   2035 * this function allocates the resources for TX and RX paths. In case of
   2036 * reception, for example, it pre-allocated the RX socket buffer in order to
   2037 * allow zero-copy mechanism.
   2038 */
   2039static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
   2040{
   2041	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   2042	size_t size;
   2043	void *addr;
   2044
   2045	tx_q->queue_index = queue;
   2046	tx_q->priv_data = priv;
   2047
   2048	tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
   2049				      sizeof(*tx_q->tx_skbuff_dma),
   2050				      GFP_KERNEL);
   2051	if (!tx_q->tx_skbuff_dma)
   2052		return -ENOMEM;
   2053
   2054	tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
   2055				  sizeof(struct sk_buff *),
   2056				  GFP_KERNEL);
   2057	if (!tx_q->tx_skbuff)
   2058		return -ENOMEM;
   2059
   2060	if (priv->extend_desc)
   2061		size = sizeof(struct dma_extended_desc);
   2062	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
   2063		size = sizeof(struct dma_edesc);
   2064	else
   2065		size = sizeof(struct dma_desc);
   2066
   2067	size *= priv->dma_tx_size;
   2068
   2069	addr = dma_alloc_coherent(priv->device, size,
   2070				  &tx_q->dma_tx_phy, GFP_KERNEL);
   2071	if (!addr)
   2072		return -ENOMEM;
   2073
   2074	if (priv->extend_desc)
   2075		tx_q->dma_etx = addr;
   2076	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
   2077		tx_q->dma_entx = addr;
   2078	else
   2079		tx_q->dma_tx = addr;
   2080
   2081	return 0;
   2082}
   2083
   2084static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
   2085{
   2086	u32 tx_count = priv->plat->tx_queues_to_use;
   2087	u32 queue;
   2088	int ret;
   2089
   2090	/* TX queues buffers and DMA */
   2091	for (queue = 0; queue < tx_count; queue++) {
   2092		ret = __alloc_dma_tx_desc_resources(priv, queue);
   2093		if (ret)
   2094			goto err_dma;
   2095	}
   2096
   2097	return 0;
   2098
   2099err_dma:
   2100	free_dma_tx_desc_resources(priv);
   2101	return ret;
   2102}
   2103
   2104/**
   2105 * alloc_dma_desc_resources - alloc TX/RX resources.
   2106 * @priv: private structure
   2107 * Description: according to which descriptor can be used (extend or basic)
   2108 * this function allocates the resources for TX and RX paths. In case of
   2109 * reception, for example, it pre-allocated the RX socket buffer in order to
   2110 * allow zero-copy mechanism.
   2111 */
   2112static int alloc_dma_desc_resources(struct stmmac_priv *priv)
   2113{
   2114	/* RX Allocation */
   2115	int ret = alloc_dma_rx_desc_resources(priv);
   2116
   2117	if (ret)
   2118		return ret;
   2119
   2120	ret = alloc_dma_tx_desc_resources(priv);
   2121
   2122	return ret;
   2123}
   2124
   2125/**
   2126 * free_dma_desc_resources - free dma desc resources
   2127 * @priv: private structure
   2128 */
   2129static void free_dma_desc_resources(struct stmmac_priv *priv)
   2130{
   2131	/* Release the DMA TX socket buffers */
   2132	free_dma_tx_desc_resources(priv);
   2133
   2134	/* Release the DMA RX socket buffers later
   2135	 * to ensure all pending XDP_TX buffers are returned.
   2136	 */
   2137	free_dma_rx_desc_resources(priv);
   2138}
   2139
   2140/**
   2141 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
   2142 *  @priv: driver private structure
   2143 *  Description: It is used for enabling the rx queues in the MAC
   2144 */
   2145static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
   2146{
   2147	u32 rx_queues_count = priv->plat->rx_queues_to_use;
   2148	int queue;
   2149	u8 mode;
   2150
   2151	for (queue = 0; queue < rx_queues_count; queue++) {
   2152		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
   2153		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
   2154	}
   2155}
   2156
   2157/**
   2158 * stmmac_start_rx_dma - start RX DMA channel
   2159 * @priv: driver private structure
   2160 * @chan: RX channel index
   2161 * Description:
   2162 * This starts a RX DMA channel
   2163 */
   2164static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
   2165{
   2166	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
   2167	stmmac_start_rx(priv, priv->ioaddr, chan);
   2168}
   2169
   2170/**
   2171 * stmmac_start_tx_dma - start TX DMA channel
   2172 * @priv: driver private structure
   2173 * @chan: TX channel index
   2174 * Description:
   2175 * This starts a TX DMA channel
   2176 */
   2177static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
   2178{
   2179	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
   2180	stmmac_start_tx(priv, priv->ioaddr, chan);
   2181}
   2182
   2183/**
   2184 * stmmac_stop_rx_dma - stop RX DMA channel
   2185 * @priv: driver private structure
   2186 * @chan: RX channel index
   2187 * Description:
   2188 * This stops a RX DMA channel
   2189 */
   2190static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
   2191{
   2192	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
   2193	stmmac_stop_rx(priv, priv->ioaddr, chan);
   2194}
   2195
   2196/**
   2197 * stmmac_stop_tx_dma - stop TX DMA channel
   2198 * @priv: driver private structure
   2199 * @chan: TX channel index
   2200 * Description:
   2201 * This stops a TX DMA channel
   2202 */
   2203static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
   2204{
   2205	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
   2206	stmmac_stop_tx(priv, priv->ioaddr, chan);
   2207}
   2208
   2209static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
   2210{
   2211	u32 rx_channels_count = priv->plat->rx_queues_to_use;
   2212	u32 tx_channels_count = priv->plat->tx_queues_to_use;
   2213	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
   2214	u32 chan;
   2215
   2216	for (chan = 0; chan < dma_csr_ch; chan++) {
   2217		struct stmmac_channel *ch = &priv->channel[chan];
   2218		unsigned long flags;
   2219
   2220		spin_lock_irqsave(&ch->lock, flags);
   2221		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
   2222		spin_unlock_irqrestore(&ch->lock, flags);
   2223	}
   2224}
   2225
   2226/**
   2227 * stmmac_start_all_dma - start all RX and TX DMA channels
   2228 * @priv: driver private structure
   2229 * Description:
   2230 * This starts all the RX and TX DMA channels
   2231 */
   2232static void stmmac_start_all_dma(struct stmmac_priv *priv)
   2233{
   2234	u32 rx_channels_count = priv->plat->rx_queues_to_use;
   2235	u32 tx_channels_count = priv->plat->tx_queues_to_use;
   2236	u32 chan = 0;
   2237
   2238	for (chan = 0; chan < rx_channels_count; chan++)
   2239		stmmac_start_rx_dma(priv, chan);
   2240
   2241	for (chan = 0; chan < tx_channels_count; chan++)
   2242		stmmac_start_tx_dma(priv, chan);
   2243}
   2244
   2245/**
   2246 * stmmac_stop_all_dma - stop all RX and TX DMA channels
   2247 * @priv: driver private structure
   2248 * Description:
   2249 * This stops the RX and TX DMA channels
   2250 */
   2251static void stmmac_stop_all_dma(struct stmmac_priv *priv)
   2252{
   2253	u32 rx_channels_count = priv->plat->rx_queues_to_use;
   2254	u32 tx_channels_count = priv->plat->tx_queues_to_use;
   2255	u32 chan = 0;
   2256
   2257	for (chan = 0; chan < rx_channels_count; chan++)
   2258		stmmac_stop_rx_dma(priv, chan);
   2259
   2260	for (chan = 0; chan < tx_channels_count; chan++)
   2261		stmmac_stop_tx_dma(priv, chan);
   2262}
   2263
   2264/**
   2265 *  stmmac_dma_operation_mode - HW DMA operation mode
   2266 *  @priv: driver private structure
   2267 *  Description: it is used for configuring the DMA operation mode register in
   2268 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
   2269 */
   2270static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
   2271{
   2272	u32 rx_channels_count = priv->plat->rx_queues_to_use;
   2273	u32 tx_channels_count = priv->plat->tx_queues_to_use;
   2274	int rxfifosz = priv->plat->rx_fifo_size;
   2275	int txfifosz = priv->plat->tx_fifo_size;
   2276	u32 txmode = 0;
   2277	u32 rxmode = 0;
   2278	u32 chan = 0;
   2279	u8 qmode = 0;
   2280
   2281	if (rxfifosz == 0)
   2282		rxfifosz = priv->dma_cap.rx_fifo_size;
   2283	if (txfifosz == 0)
   2284		txfifosz = priv->dma_cap.tx_fifo_size;
   2285
   2286	/* Adjust for real per queue fifo size */
   2287	rxfifosz /= rx_channels_count;
   2288	txfifosz /= tx_channels_count;
   2289
   2290	if (priv->plat->force_thresh_dma_mode) {
   2291		txmode = tc;
   2292		rxmode = tc;
   2293	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
   2294		/*
   2295		 * In case of GMAC, SF mode can be enabled
   2296		 * to perform the TX COE in HW. This depends on:
   2297		 * 1) TX COE if actually supported
   2298		 * 2) There is no bugged Jumbo frame support
   2299		 *    that needs to not insert csum in the TDES.
   2300		 */
   2301		txmode = SF_DMA_MODE;
   2302		rxmode = SF_DMA_MODE;
   2303		priv->xstats.threshold = SF_DMA_MODE;
   2304	} else {
   2305		txmode = tc;
   2306		rxmode = SF_DMA_MODE;
   2307	}
   2308
   2309	/* configure all channels */
   2310	for (chan = 0; chan < rx_channels_count; chan++) {
   2311		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
   2312		u32 buf_size;
   2313
   2314		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
   2315
   2316		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
   2317				rxfifosz, qmode);
   2318
   2319		if (rx_q->xsk_pool) {
   2320			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
   2321			stmmac_set_dma_bfsize(priv, priv->ioaddr,
   2322					      buf_size,
   2323					      chan);
   2324		} else {
   2325			stmmac_set_dma_bfsize(priv, priv->ioaddr,
   2326					      priv->dma_buf_sz,
   2327					      chan);
   2328		}
   2329	}
   2330
   2331	for (chan = 0; chan < tx_channels_count; chan++) {
   2332		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
   2333
   2334		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
   2335				txfifosz, qmode);
   2336	}
   2337}
   2338
   2339static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
   2340{
   2341	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
   2342	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   2343	struct xsk_buff_pool *pool = tx_q->xsk_pool;
   2344	unsigned int entry = tx_q->cur_tx;
   2345	struct dma_desc *tx_desc = NULL;
   2346	struct xdp_desc xdp_desc;
   2347	bool work_done = true;
   2348
   2349	/* Avoids TX time-out as we are sharing with slow path */
   2350	txq_trans_cond_update(nq);
   2351
   2352	budget = min(budget, stmmac_tx_avail(priv, queue));
   2353
   2354	while (budget-- > 0) {
   2355		dma_addr_t dma_addr;
   2356		bool set_ic;
   2357
   2358		/* We are sharing with slow path and stop XSK TX desc submission when
   2359		 * available TX ring is less than threshold.
   2360		 */
   2361		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
   2362		    !netif_carrier_ok(priv->dev)) {
   2363			work_done = false;
   2364			break;
   2365		}
   2366
   2367		if (!xsk_tx_peek_desc(pool, &xdp_desc))
   2368			break;
   2369
   2370		if (likely(priv->extend_desc))
   2371			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
   2372		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
   2373			tx_desc = &tx_q->dma_entx[entry].basic;
   2374		else
   2375			tx_desc = tx_q->dma_tx + entry;
   2376
   2377		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
   2378		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
   2379
   2380		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
   2381
   2382		/* To return XDP buffer to XSK pool, we simple call
   2383		 * xsk_tx_completed(), so we don't need to fill up
   2384		 * 'buf' and 'xdpf'.
   2385		 */
   2386		tx_q->tx_skbuff_dma[entry].buf = 0;
   2387		tx_q->xdpf[entry] = NULL;
   2388
   2389		tx_q->tx_skbuff_dma[entry].map_as_page = false;
   2390		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
   2391		tx_q->tx_skbuff_dma[entry].last_segment = true;
   2392		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
   2393
   2394		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
   2395
   2396		tx_q->tx_count_frames++;
   2397
   2398		if (!priv->tx_coal_frames[queue])
   2399			set_ic = false;
   2400		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
   2401			set_ic = true;
   2402		else
   2403			set_ic = false;
   2404
   2405		if (set_ic) {
   2406			tx_q->tx_count_frames = 0;
   2407			stmmac_set_tx_ic(priv, tx_desc);
   2408			priv->xstats.tx_set_ic_bit++;
   2409		}
   2410
   2411		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
   2412				       true, priv->mode, true, true,
   2413				       xdp_desc.len);
   2414
   2415		stmmac_enable_dma_transmission(priv, priv->ioaddr);
   2416
   2417		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
   2418		entry = tx_q->cur_tx;
   2419	}
   2420
   2421	if (tx_desc) {
   2422		stmmac_flush_tx_descriptors(priv, queue);
   2423		xsk_tx_release(pool);
   2424	}
   2425
   2426	/* Return true if all of the 3 conditions are met
   2427	 *  a) TX Budget is still available
   2428	 *  b) work_done = true when XSK TX desc peek is empty (no more
   2429	 *     pending XSK TX for transmission)
   2430	 */
   2431	return !!budget && work_done;
   2432}
   2433
   2434static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
   2435{
   2436	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
   2437		tc += 64;
   2438
   2439		if (priv->plat->force_thresh_dma_mode)
   2440			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
   2441		else
   2442			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
   2443						      chan);
   2444
   2445		priv->xstats.threshold = tc;
   2446	}
   2447}
   2448
   2449/**
   2450 * stmmac_tx_clean - to manage the transmission completion
   2451 * @priv: driver private structure
   2452 * @budget: napi budget limiting this functions packet handling
   2453 * @queue: TX queue index
   2454 * Description: it reclaims the transmit resources after transmission completes.
   2455 */
   2456static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
   2457{
   2458	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   2459	unsigned int bytes_compl = 0, pkts_compl = 0;
   2460	unsigned int entry, xmits = 0, count = 0;
   2461
   2462	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
   2463
   2464	priv->xstats.tx_clean++;
   2465
   2466	tx_q->xsk_frames_done = 0;
   2467
   2468	entry = tx_q->dirty_tx;
   2469
   2470	/* Try to clean all TX complete frame in 1 shot */
   2471	while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
   2472		struct xdp_frame *xdpf;
   2473		struct sk_buff *skb;
   2474		struct dma_desc *p;
   2475		int status;
   2476
   2477		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
   2478		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
   2479			xdpf = tx_q->xdpf[entry];
   2480			skb = NULL;
   2481		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
   2482			xdpf = NULL;
   2483			skb = tx_q->tx_skbuff[entry];
   2484		} else {
   2485			xdpf = NULL;
   2486			skb = NULL;
   2487		}
   2488
   2489		if (priv->extend_desc)
   2490			p = (struct dma_desc *)(tx_q->dma_etx + entry);
   2491		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
   2492			p = &tx_q->dma_entx[entry].basic;
   2493		else
   2494			p = tx_q->dma_tx + entry;
   2495
   2496		status = stmmac_tx_status(priv, &priv->dev->stats,
   2497				&priv->xstats, p, priv->ioaddr);
   2498		/* Check if the descriptor is owned by the DMA */
   2499		if (unlikely(status & tx_dma_own))
   2500			break;
   2501
   2502		count++;
   2503
   2504		/* Make sure descriptor fields are read after reading
   2505		 * the own bit.
   2506		 */
   2507		dma_rmb();
   2508
   2509		/* Just consider the last segment and ...*/
   2510		if (likely(!(status & tx_not_ls))) {
   2511			/* ... verify the status error condition */
   2512			if (unlikely(status & tx_err)) {
   2513				priv->dev->stats.tx_errors++;
   2514				if (unlikely(status & tx_err_bump_tc))
   2515					stmmac_bump_dma_threshold(priv, queue);
   2516			} else {
   2517				priv->dev->stats.tx_packets++;
   2518				priv->xstats.tx_pkt_n++;
   2519				priv->xstats.txq_stats[queue].tx_pkt_n++;
   2520			}
   2521			if (skb)
   2522				stmmac_get_tx_hwtstamp(priv, p, skb);
   2523		}
   2524
   2525		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
   2526			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
   2527			if (tx_q->tx_skbuff_dma[entry].map_as_page)
   2528				dma_unmap_page(priv->device,
   2529					       tx_q->tx_skbuff_dma[entry].buf,
   2530					       tx_q->tx_skbuff_dma[entry].len,
   2531					       DMA_TO_DEVICE);
   2532			else
   2533				dma_unmap_single(priv->device,
   2534						 tx_q->tx_skbuff_dma[entry].buf,
   2535						 tx_q->tx_skbuff_dma[entry].len,
   2536						 DMA_TO_DEVICE);
   2537			tx_q->tx_skbuff_dma[entry].buf = 0;
   2538			tx_q->tx_skbuff_dma[entry].len = 0;
   2539			tx_q->tx_skbuff_dma[entry].map_as_page = false;
   2540		}
   2541
   2542		stmmac_clean_desc3(priv, tx_q, p);
   2543
   2544		tx_q->tx_skbuff_dma[entry].last_segment = false;
   2545		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
   2546
   2547		if (xdpf &&
   2548		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
   2549			xdp_return_frame_rx_napi(xdpf);
   2550			tx_q->xdpf[entry] = NULL;
   2551		}
   2552
   2553		if (xdpf &&
   2554		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
   2555			xdp_return_frame(xdpf);
   2556			tx_q->xdpf[entry] = NULL;
   2557		}
   2558
   2559		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
   2560			tx_q->xsk_frames_done++;
   2561
   2562		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
   2563			if (likely(skb)) {
   2564				pkts_compl++;
   2565				bytes_compl += skb->len;
   2566				dev_consume_skb_any(skb);
   2567				tx_q->tx_skbuff[entry] = NULL;
   2568			}
   2569		}
   2570
   2571		stmmac_release_tx_desc(priv, p, priv->mode);
   2572
   2573		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
   2574	}
   2575	tx_q->dirty_tx = entry;
   2576
   2577	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
   2578				  pkts_compl, bytes_compl);
   2579
   2580	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
   2581								queue))) &&
   2582	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
   2583
   2584		netif_dbg(priv, tx_done, priv->dev,
   2585			  "%s: restart transmit\n", __func__);
   2586		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
   2587	}
   2588
   2589	if (tx_q->xsk_pool) {
   2590		bool work_done;
   2591
   2592		if (tx_q->xsk_frames_done)
   2593			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
   2594
   2595		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
   2596			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
   2597
   2598		/* For XSK TX, we try to send as many as possible.
   2599		 * If XSK work done (XSK TX desc empty and budget still
   2600		 * available), return "budget - 1" to reenable TX IRQ.
   2601		 * Else, return "budget" to make NAPI continue polling.
   2602		 */
   2603		work_done = stmmac_xdp_xmit_zc(priv, queue,
   2604					       STMMAC_XSK_TX_BUDGET_MAX);
   2605		if (work_done)
   2606			xmits = budget - 1;
   2607		else
   2608			xmits = budget;
   2609	}
   2610
   2611	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
   2612	    priv->eee_sw_timer_en) {
   2613		if (stmmac_enable_eee_mode(priv))
   2614			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
   2615	}
   2616
   2617	/* We still have pending packets, let's call for a new scheduling */
   2618	if (tx_q->dirty_tx != tx_q->cur_tx)
   2619		hrtimer_start(&tx_q->txtimer,
   2620			      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
   2621			      HRTIMER_MODE_REL);
   2622
   2623	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
   2624
   2625	/* Combine decisions from TX clean and XSK TX */
   2626	return max(count, xmits);
   2627}
   2628
   2629/**
   2630 * stmmac_tx_err - to manage the tx error
   2631 * @priv: driver private structure
   2632 * @chan: channel index
   2633 * Description: it cleans the descriptors and restarts the transmission
   2634 * in case of transmission errors.
   2635 */
   2636static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
   2637{
   2638	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
   2639
   2640	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
   2641
   2642	stmmac_stop_tx_dma(priv, chan);
   2643	dma_free_tx_skbufs(priv, chan);
   2644	stmmac_clear_tx_descriptors(priv, chan);
   2645	tx_q->dirty_tx = 0;
   2646	tx_q->cur_tx = 0;
   2647	tx_q->mss = 0;
   2648	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
   2649	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
   2650			    tx_q->dma_tx_phy, chan);
   2651	stmmac_start_tx_dma(priv, chan);
   2652
   2653	priv->dev->stats.tx_errors++;
   2654	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
   2655}
   2656
   2657/**
   2658 *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
   2659 *  @priv: driver private structure
   2660 *  @txmode: TX operating mode
   2661 *  @rxmode: RX operating mode
   2662 *  @chan: channel index
   2663 *  Description: it is used for configuring of the DMA operation mode in
   2664 *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
   2665 *  mode.
   2666 */
   2667static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
   2668					  u32 rxmode, u32 chan)
   2669{
   2670	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
   2671	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
   2672	u32 rx_channels_count = priv->plat->rx_queues_to_use;
   2673	u32 tx_channels_count = priv->plat->tx_queues_to_use;
   2674	int rxfifosz = priv->plat->rx_fifo_size;
   2675	int txfifosz = priv->plat->tx_fifo_size;
   2676
   2677	if (rxfifosz == 0)
   2678		rxfifosz = priv->dma_cap.rx_fifo_size;
   2679	if (txfifosz == 0)
   2680		txfifosz = priv->dma_cap.tx_fifo_size;
   2681
   2682	/* Adjust for real per queue fifo size */
   2683	rxfifosz /= rx_channels_count;
   2684	txfifosz /= tx_channels_count;
   2685
   2686	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
   2687	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
   2688}
   2689
   2690static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
   2691{
   2692	int ret;
   2693
   2694	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
   2695			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
   2696	if (ret && (ret != -EINVAL)) {
   2697		stmmac_global_err(priv);
   2698		return true;
   2699	}
   2700
   2701	return false;
   2702}
   2703
   2704static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
   2705{
   2706	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
   2707						 &priv->xstats, chan, dir);
   2708	struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
   2709	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
   2710	struct stmmac_channel *ch = &priv->channel[chan];
   2711	struct napi_struct *rx_napi;
   2712	struct napi_struct *tx_napi;
   2713	unsigned long flags;
   2714
   2715	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
   2716	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
   2717
   2718	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
   2719		if (napi_schedule_prep(rx_napi)) {
   2720			spin_lock_irqsave(&ch->lock, flags);
   2721			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
   2722			spin_unlock_irqrestore(&ch->lock, flags);
   2723			__napi_schedule(rx_napi);
   2724		}
   2725	}
   2726
   2727	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
   2728		if (napi_schedule_prep(tx_napi)) {
   2729			spin_lock_irqsave(&ch->lock, flags);
   2730			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
   2731			spin_unlock_irqrestore(&ch->lock, flags);
   2732			__napi_schedule(tx_napi);
   2733		}
   2734	}
   2735
   2736	return status;
   2737}
   2738
   2739/**
   2740 * stmmac_dma_interrupt - DMA ISR
   2741 * @priv: driver private structure
   2742 * Description: this is the DMA ISR. It is called by the main ISR.
   2743 * It calls the dwmac dma routine and schedule poll method in case of some
   2744 * work can be done.
   2745 */
   2746static void stmmac_dma_interrupt(struct stmmac_priv *priv)
   2747{
   2748	u32 tx_channel_count = priv->plat->tx_queues_to_use;
   2749	u32 rx_channel_count = priv->plat->rx_queues_to_use;
   2750	u32 channels_to_check = tx_channel_count > rx_channel_count ?
   2751				tx_channel_count : rx_channel_count;
   2752	u32 chan;
   2753	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
   2754
   2755	/* Make sure we never check beyond our status buffer. */
   2756	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
   2757		channels_to_check = ARRAY_SIZE(status);
   2758
   2759	for (chan = 0; chan < channels_to_check; chan++)
   2760		status[chan] = stmmac_napi_check(priv, chan,
   2761						 DMA_DIR_RXTX);
   2762
   2763	for (chan = 0; chan < tx_channel_count; chan++) {
   2764		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
   2765			/* Try to bump up the dma threshold on this failure */
   2766			stmmac_bump_dma_threshold(priv, chan);
   2767		} else if (unlikely(status[chan] == tx_hard_error)) {
   2768			stmmac_tx_err(priv, chan);
   2769		}
   2770	}
   2771}
   2772
   2773/**
   2774 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
   2775 * @priv: driver private structure
   2776 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
   2777 */
   2778static void stmmac_mmc_setup(struct stmmac_priv *priv)
   2779{
   2780	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
   2781			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
   2782
   2783	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
   2784
   2785	if (priv->dma_cap.rmon) {
   2786		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
   2787		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
   2788	} else
   2789		netdev_info(priv->dev, "No MAC Management Counters available\n");
   2790}
   2791
   2792/**
   2793 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
   2794 * @priv: driver private structure
   2795 * Description:
   2796 *  new GMAC chip generations have a new register to indicate the
   2797 *  presence of the optional feature/functions.
   2798 *  This can be also used to override the value passed through the
   2799 *  platform and necessary for old MAC10/100 and GMAC chips.
   2800 */
   2801static int stmmac_get_hw_features(struct stmmac_priv *priv)
   2802{
   2803	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
   2804}
   2805
   2806/**
   2807 * stmmac_check_ether_addr - check if the MAC addr is valid
   2808 * @priv: driver private structure
   2809 * Description:
   2810 * it is to verify if the MAC address is valid, in case of failures it
   2811 * generates a random MAC address
   2812 */
   2813static void stmmac_check_ether_addr(struct stmmac_priv *priv)
   2814{
   2815	u8 addr[ETH_ALEN];
   2816
   2817	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
   2818		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
   2819		if (is_valid_ether_addr(addr))
   2820			eth_hw_addr_set(priv->dev, addr);
   2821		else
   2822			eth_hw_addr_random(priv->dev);
   2823		dev_info(priv->device, "device MAC address %pM\n",
   2824			 priv->dev->dev_addr);
   2825	}
   2826}
   2827
   2828/**
   2829 * stmmac_init_dma_engine - DMA init.
   2830 * @priv: driver private structure
   2831 * Description:
   2832 * It inits the DMA invoking the specific MAC/GMAC callback.
   2833 * Some DMA parameters can be passed from the platform;
   2834 * in case of these are not passed a default is kept for the MAC or GMAC.
   2835 */
   2836static int stmmac_init_dma_engine(struct stmmac_priv *priv)
   2837{
   2838	u32 rx_channels_count = priv->plat->rx_queues_to_use;
   2839	u32 tx_channels_count = priv->plat->tx_queues_to_use;
   2840	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
   2841	struct stmmac_rx_queue *rx_q;
   2842	struct stmmac_tx_queue *tx_q;
   2843	u32 chan = 0;
   2844	int atds = 0;
   2845	int ret = 0;
   2846
   2847	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
   2848		dev_err(priv->device, "Invalid DMA configuration\n");
   2849		return -EINVAL;
   2850	}
   2851
   2852	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
   2853		atds = 1;
   2854
   2855	ret = stmmac_reset(priv, priv->ioaddr);
   2856	if (ret) {
   2857		dev_err(priv->device, "Failed to reset the dma\n");
   2858		return ret;
   2859	}
   2860
   2861	/* DMA Configuration */
   2862	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
   2863
   2864	if (priv->plat->axi)
   2865		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
   2866
   2867	/* DMA CSR Channel configuration */
   2868	for (chan = 0; chan < dma_csr_ch; chan++) {
   2869		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
   2870		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
   2871	}
   2872
   2873	/* DMA RX Channel Configuration */
   2874	for (chan = 0; chan < rx_channels_count; chan++) {
   2875		rx_q = &priv->rx_queue[chan];
   2876
   2877		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
   2878				    rx_q->dma_rx_phy, chan);
   2879
   2880		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
   2881				     (rx_q->buf_alloc_num *
   2882				      sizeof(struct dma_desc));
   2883		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
   2884				       rx_q->rx_tail_addr, chan);
   2885	}
   2886
   2887	/* DMA TX Channel Configuration */
   2888	for (chan = 0; chan < tx_channels_count; chan++) {
   2889		tx_q = &priv->tx_queue[chan];
   2890
   2891		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
   2892				    tx_q->dma_tx_phy, chan);
   2893
   2894		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
   2895		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
   2896				       tx_q->tx_tail_addr, chan);
   2897	}
   2898
   2899	return ret;
   2900}
   2901
   2902static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
   2903{
   2904	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   2905
   2906	hrtimer_start(&tx_q->txtimer,
   2907		      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
   2908		      HRTIMER_MODE_REL);
   2909}
   2910
   2911/**
   2912 * stmmac_tx_timer - mitigation sw timer for tx.
   2913 * @t: data pointer
   2914 * Description:
   2915 * This is the timer handler to directly invoke the stmmac_tx_clean.
   2916 */
   2917static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
   2918{
   2919	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
   2920	struct stmmac_priv *priv = tx_q->priv_data;
   2921	struct stmmac_channel *ch;
   2922	struct napi_struct *napi;
   2923
   2924	ch = &priv->channel[tx_q->queue_index];
   2925	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
   2926
   2927	if (likely(napi_schedule_prep(napi))) {
   2928		unsigned long flags;
   2929
   2930		spin_lock_irqsave(&ch->lock, flags);
   2931		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
   2932		spin_unlock_irqrestore(&ch->lock, flags);
   2933		__napi_schedule(napi);
   2934	}
   2935
   2936	return HRTIMER_NORESTART;
   2937}
   2938
   2939/**
   2940 * stmmac_init_coalesce - init mitigation options.
   2941 * @priv: driver private structure
   2942 * Description:
   2943 * This inits the coalesce parameters: i.e. timer rate,
   2944 * timer handler and default threshold used for enabling the
   2945 * interrupt on completion bit.
   2946 */
   2947static void stmmac_init_coalesce(struct stmmac_priv *priv)
   2948{
   2949	u32 tx_channel_count = priv->plat->tx_queues_to_use;
   2950	u32 rx_channel_count = priv->plat->rx_queues_to_use;
   2951	u32 chan;
   2952
   2953	for (chan = 0; chan < tx_channel_count; chan++) {
   2954		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
   2955
   2956		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
   2957		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
   2958
   2959		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
   2960		tx_q->txtimer.function = stmmac_tx_timer;
   2961	}
   2962
   2963	for (chan = 0; chan < rx_channel_count; chan++)
   2964		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
   2965}
   2966
   2967static void stmmac_set_rings_length(struct stmmac_priv *priv)
   2968{
   2969	u32 rx_channels_count = priv->plat->rx_queues_to_use;
   2970	u32 tx_channels_count = priv->plat->tx_queues_to_use;
   2971	u32 chan;
   2972
   2973	/* set TX ring length */
   2974	for (chan = 0; chan < tx_channels_count; chan++)
   2975		stmmac_set_tx_ring_len(priv, priv->ioaddr,
   2976				       (priv->dma_tx_size - 1), chan);
   2977
   2978	/* set RX ring length */
   2979	for (chan = 0; chan < rx_channels_count; chan++)
   2980		stmmac_set_rx_ring_len(priv, priv->ioaddr,
   2981				       (priv->dma_rx_size - 1), chan);
   2982}
   2983
   2984/**
   2985 *  stmmac_set_tx_queue_weight - Set TX queue weight
   2986 *  @priv: driver private structure
   2987 *  Description: It is used for setting TX queues weight
   2988 */
   2989static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
   2990{
   2991	u32 tx_queues_count = priv->plat->tx_queues_to_use;
   2992	u32 weight;
   2993	u32 queue;
   2994
   2995	for (queue = 0; queue < tx_queues_count; queue++) {
   2996		weight = priv->plat->tx_queues_cfg[queue].weight;
   2997		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
   2998	}
   2999}
   3000
   3001/**
   3002 *  stmmac_configure_cbs - Configure CBS in TX queue
   3003 *  @priv: driver private structure
   3004 *  Description: It is used for configuring CBS in AVB TX queues
   3005 */
   3006static void stmmac_configure_cbs(struct stmmac_priv *priv)
   3007{
   3008	u32 tx_queues_count = priv->plat->tx_queues_to_use;
   3009	u32 mode_to_use;
   3010	u32 queue;
   3011
   3012	/* queue 0 is reserved for legacy traffic */
   3013	for (queue = 1; queue < tx_queues_count; queue++) {
   3014		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
   3015		if (mode_to_use == MTL_QUEUE_DCB)
   3016			continue;
   3017
   3018		stmmac_config_cbs(priv, priv->hw,
   3019				priv->plat->tx_queues_cfg[queue].send_slope,
   3020				priv->plat->tx_queues_cfg[queue].idle_slope,
   3021				priv->plat->tx_queues_cfg[queue].high_credit,
   3022				priv->plat->tx_queues_cfg[queue].low_credit,
   3023				queue);
   3024	}
   3025}
   3026
   3027/**
   3028 *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
   3029 *  @priv: driver private structure
   3030 *  Description: It is used for mapping RX queues to RX dma channels
   3031 */
   3032static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
   3033{
   3034	u32 rx_queues_count = priv->plat->rx_queues_to_use;
   3035	u32 queue;
   3036	u32 chan;
   3037
   3038	for (queue = 0; queue < rx_queues_count; queue++) {
   3039		chan = priv->plat->rx_queues_cfg[queue].chan;
   3040		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
   3041	}
   3042}
   3043
   3044/**
   3045 *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
   3046 *  @priv: driver private structure
   3047 *  Description: It is used for configuring the RX Queue Priority
   3048 */
   3049static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
   3050{
   3051	u32 rx_queues_count = priv->plat->rx_queues_to_use;
   3052	u32 queue;
   3053	u32 prio;
   3054
   3055	for (queue = 0; queue < rx_queues_count; queue++) {
   3056		if (!priv->plat->rx_queues_cfg[queue].use_prio)
   3057			continue;
   3058
   3059		prio = priv->plat->rx_queues_cfg[queue].prio;
   3060		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
   3061	}
   3062}
   3063
   3064/**
   3065 *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
   3066 *  @priv: driver private structure
   3067 *  Description: It is used for configuring the TX Queue Priority
   3068 */
   3069static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
   3070{
   3071	u32 tx_queues_count = priv->plat->tx_queues_to_use;
   3072	u32 queue;
   3073	u32 prio;
   3074
   3075	for (queue = 0; queue < tx_queues_count; queue++) {
   3076		if (!priv->plat->tx_queues_cfg[queue].use_prio)
   3077			continue;
   3078
   3079		prio = priv->plat->tx_queues_cfg[queue].prio;
   3080		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
   3081	}
   3082}
   3083
   3084/**
   3085 *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
   3086 *  @priv: driver private structure
   3087 *  Description: It is used for configuring the RX queue routing
   3088 */
   3089static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
   3090{
   3091	u32 rx_queues_count = priv->plat->rx_queues_to_use;
   3092	u32 queue;
   3093	u8 packet;
   3094
   3095	for (queue = 0; queue < rx_queues_count; queue++) {
   3096		/* no specific packet type routing specified for the queue */
   3097		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
   3098			continue;
   3099
   3100		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
   3101		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
   3102	}
   3103}
   3104
   3105static void stmmac_mac_config_rss(struct stmmac_priv *priv)
   3106{
   3107	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
   3108		priv->rss.enable = false;
   3109		return;
   3110	}
   3111
   3112	if (priv->dev->features & NETIF_F_RXHASH)
   3113		priv->rss.enable = true;
   3114	else
   3115		priv->rss.enable = false;
   3116
   3117	stmmac_rss_configure(priv, priv->hw, &priv->rss,
   3118			     priv->plat->rx_queues_to_use);
   3119}
   3120
   3121/**
   3122 *  stmmac_mtl_configuration - Configure MTL
   3123 *  @priv: driver private structure
   3124 *  Description: It is used for configurring MTL
   3125 */
   3126static void stmmac_mtl_configuration(struct stmmac_priv *priv)
   3127{
   3128	u32 rx_queues_count = priv->plat->rx_queues_to_use;
   3129	u32 tx_queues_count = priv->plat->tx_queues_to_use;
   3130
   3131	if (tx_queues_count > 1)
   3132		stmmac_set_tx_queue_weight(priv);
   3133
   3134	/* Configure MTL RX algorithms */
   3135	if (rx_queues_count > 1)
   3136		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
   3137				priv->plat->rx_sched_algorithm);
   3138
   3139	/* Configure MTL TX algorithms */
   3140	if (tx_queues_count > 1)
   3141		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
   3142				priv->plat->tx_sched_algorithm);
   3143
   3144	/* Configure CBS in AVB TX queues */
   3145	if (tx_queues_count > 1)
   3146		stmmac_configure_cbs(priv);
   3147
   3148	/* Map RX MTL to DMA channels */
   3149	stmmac_rx_queue_dma_chan_map(priv);
   3150
   3151	/* Enable MAC RX Queues */
   3152	stmmac_mac_enable_rx_queues(priv);
   3153
   3154	/* Set RX priorities */
   3155	if (rx_queues_count > 1)
   3156		stmmac_mac_config_rx_queues_prio(priv);
   3157
   3158	/* Set TX priorities */
   3159	if (tx_queues_count > 1)
   3160		stmmac_mac_config_tx_queues_prio(priv);
   3161
   3162	/* Set RX routing */
   3163	if (rx_queues_count > 1)
   3164		stmmac_mac_config_rx_queues_routing(priv);
   3165
   3166	/* Receive Side Scaling */
   3167	if (rx_queues_count > 1)
   3168		stmmac_mac_config_rss(priv);
   3169}
   3170
   3171static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
   3172{
   3173	if (priv->dma_cap.asp) {
   3174		netdev_info(priv->dev, "Enabling Safety Features\n");
   3175		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
   3176					  priv->plat->safety_feat_cfg);
   3177	} else {
   3178		netdev_info(priv->dev, "No Safety Features support found\n");
   3179	}
   3180}
   3181
   3182static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
   3183{
   3184	char *name;
   3185
   3186	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
   3187	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
   3188
   3189	name = priv->wq_name;
   3190	sprintf(name, "%s-fpe", priv->dev->name);
   3191
   3192	priv->fpe_wq = create_singlethread_workqueue(name);
   3193	if (!priv->fpe_wq) {
   3194		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
   3195
   3196		return -ENOMEM;
   3197	}
   3198	netdev_info(priv->dev, "FPE workqueue start");
   3199
   3200	return 0;
   3201}
   3202
   3203/**
   3204 * stmmac_hw_setup - setup mac in a usable state.
   3205 *  @dev : pointer to the device structure.
   3206 *  @ptp_register: register PTP if set
   3207 *  Description:
   3208 *  this is the main function to setup the HW in a usable state because the
   3209 *  dma engine is reset, the core registers are configured (e.g. AXI,
   3210 *  Checksum features, timers). The DMA is ready to start receiving and
   3211 *  transmitting.
   3212 *  Return value:
   3213 *  0 on success and an appropriate (-)ve integer as defined in errno.h
   3214 *  file on failure.
   3215 */
   3216static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
   3217{
   3218	struct stmmac_priv *priv = netdev_priv(dev);
   3219	u32 rx_cnt = priv->plat->rx_queues_to_use;
   3220	u32 tx_cnt = priv->plat->tx_queues_to_use;
   3221	bool sph_en;
   3222	u32 chan;
   3223	int ret;
   3224
   3225	/* DMA initialization and SW reset */
   3226	ret = stmmac_init_dma_engine(priv);
   3227	if (ret < 0) {
   3228		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
   3229			   __func__);
   3230		return ret;
   3231	}
   3232
   3233	/* Copy the MAC addr into the HW  */
   3234	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
   3235
   3236	/* PS and related bits will be programmed according to the speed */
   3237	if (priv->hw->pcs) {
   3238		int speed = priv->plat->mac_port_sel_speed;
   3239
   3240		if ((speed == SPEED_10) || (speed == SPEED_100) ||
   3241		    (speed == SPEED_1000)) {
   3242			priv->hw->ps = speed;
   3243		} else {
   3244			dev_warn(priv->device, "invalid port speed\n");
   3245			priv->hw->ps = 0;
   3246		}
   3247	}
   3248
   3249	/* Initialize the MAC Core */
   3250	stmmac_core_init(priv, priv->hw, dev);
   3251
   3252	/* Initialize MTL*/
   3253	stmmac_mtl_configuration(priv);
   3254
   3255	/* Initialize Safety Features */
   3256	stmmac_safety_feat_configuration(priv);
   3257
   3258	ret = stmmac_rx_ipc(priv, priv->hw);
   3259	if (!ret) {
   3260		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
   3261		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
   3262		priv->hw->rx_csum = 0;
   3263	}
   3264
   3265	/* Enable the MAC Rx/Tx */
   3266	stmmac_mac_set(priv, priv->ioaddr, true);
   3267
   3268	/* Set the HW DMA mode and the COE */
   3269	stmmac_dma_operation_mode(priv);
   3270
   3271	stmmac_mmc_setup(priv);
   3272
   3273	ret = stmmac_init_ptp(priv);
   3274	if (ret == -EOPNOTSUPP)
   3275		netdev_info(priv->dev, "PTP not supported by HW\n");
   3276	else if (ret)
   3277		netdev_warn(priv->dev, "PTP init failed\n");
   3278	else if (ptp_register)
   3279		stmmac_ptp_register(priv);
   3280
   3281	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
   3282
   3283	/* Convert the timer from msec to usec */
   3284	if (!priv->tx_lpi_timer)
   3285		priv->tx_lpi_timer = eee_timer * 1000;
   3286
   3287	if (priv->use_riwt) {
   3288		u32 queue;
   3289
   3290		for (queue = 0; queue < rx_cnt; queue++) {
   3291			if (!priv->rx_riwt[queue])
   3292				priv->rx_riwt[queue] = DEF_DMA_RIWT;
   3293
   3294			stmmac_rx_watchdog(priv, priv->ioaddr,
   3295					   priv->rx_riwt[queue], queue);
   3296		}
   3297	}
   3298
   3299	if (priv->hw->pcs)
   3300		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
   3301
   3302	/* set TX and RX rings length */
   3303	stmmac_set_rings_length(priv);
   3304
   3305	/* Enable TSO */
   3306	if (priv->tso) {
   3307		for (chan = 0; chan < tx_cnt; chan++) {
   3308			struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
   3309
   3310			/* TSO and TBS cannot co-exist */
   3311			if (tx_q->tbs & STMMAC_TBS_AVAIL)
   3312				continue;
   3313
   3314			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
   3315		}
   3316	}
   3317
   3318	/* Enable Split Header */
   3319	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
   3320	for (chan = 0; chan < rx_cnt; chan++)
   3321		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
   3322
   3323
   3324	/* VLAN Tag Insertion */
   3325	if (priv->dma_cap.vlins)
   3326		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
   3327
   3328	/* TBS */
   3329	for (chan = 0; chan < tx_cnt; chan++) {
   3330		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
   3331		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
   3332
   3333		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
   3334	}
   3335
   3336	/* Configure real RX and TX queues */
   3337	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
   3338	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
   3339
   3340	/* Start the ball rolling... */
   3341	stmmac_start_all_dma(priv);
   3342
   3343	if (priv->dma_cap.fpesel) {
   3344		stmmac_fpe_start_wq(priv);
   3345
   3346		if (priv->plat->fpe_cfg->enable)
   3347			stmmac_fpe_handshake(priv, true);
   3348	}
   3349
   3350	return 0;
   3351}
   3352
   3353static void stmmac_hw_teardown(struct net_device *dev)
   3354{
   3355	struct stmmac_priv *priv = netdev_priv(dev);
   3356
   3357	clk_disable_unprepare(priv->plat->clk_ptp_ref);
   3358}
   3359
   3360static void stmmac_free_irq(struct net_device *dev,
   3361			    enum request_irq_err irq_err, int irq_idx)
   3362{
   3363	struct stmmac_priv *priv = netdev_priv(dev);
   3364	int j;
   3365
   3366	switch (irq_err) {
   3367	case REQ_IRQ_ERR_ALL:
   3368		irq_idx = priv->plat->tx_queues_to_use;
   3369		fallthrough;
   3370	case REQ_IRQ_ERR_TX:
   3371		for (j = irq_idx - 1; j >= 0; j--) {
   3372			if (priv->tx_irq[j] > 0) {
   3373				irq_set_affinity_hint(priv->tx_irq[j], NULL);
   3374				free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
   3375			}
   3376		}
   3377		irq_idx = priv->plat->rx_queues_to_use;
   3378		fallthrough;
   3379	case REQ_IRQ_ERR_RX:
   3380		for (j = irq_idx - 1; j >= 0; j--) {
   3381			if (priv->rx_irq[j] > 0) {
   3382				irq_set_affinity_hint(priv->rx_irq[j], NULL);
   3383				free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
   3384			}
   3385		}
   3386
   3387		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
   3388			free_irq(priv->sfty_ue_irq, dev);
   3389		fallthrough;
   3390	case REQ_IRQ_ERR_SFTY_UE:
   3391		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
   3392			free_irq(priv->sfty_ce_irq, dev);
   3393		fallthrough;
   3394	case REQ_IRQ_ERR_SFTY_CE:
   3395		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
   3396			free_irq(priv->lpi_irq, dev);
   3397		fallthrough;
   3398	case REQ_IRQ_ERR_LPI:
   3399		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
   3400			free_irq(priv->wol_irq, dev);
   3401		fallthrough;
   3402	case REQ_IRQ_ERR_WOL:
   3403		free_irq(dev->irq, dev);
   3404		fallthrough;
   3405	case REQ_IRQ_ERR_MAC:
   3406	case REQ_IRQ_ERR_NO:
   3407		/* If MAC IRQ request error, no more IRQ to free */
   3408		break;
   3409	}
   3410}
   3411
   3412static int stmmac_request_irq_multi_msi(struct net_device *dev)
   3413{
   3414	struct stmmac_priv *priv = netdev_priv(dev);
   3415	enum request_irq_err irq_err;
   3416	cpumask_t cpu_mask;
   3417	int irq_idx = 0;
   3418	char *int_name;
   3419	int ret;
   3420	int i;
   3421
   3422	/* For common interrupt */
   3423	int_name = priv->int_name_mac;
   3424	sprintf(int_name, "%s:%s", dev->name, "mac");
   3425	ret = request_irq(dev->irq, stmmac_mac_interrupt,
   3426			  0, int_name, dev);
   3427	if (unlikely(ret < 0)) {
   3428		netdev_err(priv->dev,
   3429			   "%s: alloc mac MSI %d (error: %d)\n",
   3430			   __func__, dev->irq, ret);
   3431		irq_err = REQ_IRQ_ERR_MAC;
   3432		goto irq_error;
   3433	}
   3434
   3435	/* Request the Wake IRQ in case of another line
   3436	 * is used for WoL
   3437	 */
   3438	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
   3439		int_name = priv->int_name_wol;
   3440		sprintf(int_name, "%s:%s", dev->name, "wol");
   3441		ret = request_irq(priv->wol_irq,
   3442				  stmmac_mac_interrupt,
   3443				  0, int_name, dev);
   3444		if (unlikely(ret < 0)) {
   3445			netdev_err(priv->dev,
   3446				   "%s: alloc wol MSI %d (error: %d)\n",
   3447				   __func__, priv->wol_irq, ret);
   3448			irq_err = REQ_IRQ_ERR_WOL;
   3449			goto irq_error;
   3450		}
   3451	}
   3452
   3453	/* Request the LPI IRQ in case of another line
   3454	 * is used for LPI
   3455	 */
   3456	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
   3457		int_name = priv->int_name_lpi;
   3458		sprintf(int_name, "%s:%s", dev->name, "lpi");
   3459		ret = request_irq(priv->lpi_irq,
   3460				  stmmac_mac_interrupt,
   3461				  0, int_name, dev);
   3462		if (unlikely(ret < 0)) {
   3463			netdev_err(priv->dev,
   3464				   "%s: alloc lpi MSI %d (error: %d)\n",
   3465				   __func__, priv->lpi_irq, ret);
   3466			irq_err = REQ_IRQ_ERR_LPI;
   3467			goto irq_error;
   3468		}
   3469	}
   3470
   3471	/* Request the Safety Feature Correctible Error line in
   3472	 * case of another line is used
   3473	 */
   3474	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
   3475		int_name = priv->int_name_sfty_ce;
   3476		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
   3477		ret = request_irq(priv->sfty_ce_irq,
   3478				  stmmac_safety_interrupt,
   3479				  0, int_name, dev);
   3480		if (unlikely(ret < 0)) {
   3481			netdev_err(priv->dev,
   3482				   "%s: alloc sfty ce MSI %d (error: %d)\n",
   3483				   __func__, priv->sfty_ce_irq, ret);
   3484			irq_err = REQ_IRQ_ERR_SFTY_CE;
   3485			goto irq_error;
   3486		}
   3487	}
   3488
   3489	/* Request the Safety Feature Uncorrectible Error line in
   3490	 * case of another line is used
   3491	 */
   3492	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
   3493		int_name = priv->int_name_sfty_ue;
   3494		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
   3495		ret = request_irq(priv->sfty_ue_irq,
   3496				  stmmac_safety_interrupt,
   3497				  0, int_name, dev);
   3498		if (unlikely(ret < 0)) {
   3499			netdev_err(priv->dev,
   3500				   "%s: alloc sfty ue MSI %d (error: %d)\n",
   3501				   __func__, priv->sfty_ue_irq, ret);
   3502			irq_err = REQ_IRQ_ERR_SFTY_UE;
   3503			goto irq_error;
   3504		}
   3505	}
   3506
   3507	/* Request Rx MSI irq */
   3508	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
   3509		if (i >= MTL_MAX_RX_QUEUES)
   3510			break;
   3511		if (priv->rx_irq[i] == 0)
   3512			continue;
   3513
   3514		int_name = priv->int_name_rx_irq[i];
   3515		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
   3516		ret = request_irq(priv->rx_irq[i],
   3517				  stmmac_msi_intr_rx,
   3518				  0, int_name, &priv->rx_queue[i]);
   3519		if (unlikely(ret < 0)) {
   3520			netdev_err(priv->dev,
   3521				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
   3522				   __func__, i, priv->rx_irq[i], ret);
   3523			irq_err = REQ_IRQ_ERR_RX;
   3524			irq_idx = i;
   3525			goto irq_error;
   3526		}
   3527		cpumask_clear(&cpu_mask);
   3528		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
   3529		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
   3530	}
   3531
   3532	/* Request Tx MSI irq */
   3533	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
   3534		if (i >= MTL_MAX_TX_QUEUES)
   3535			break;
   3536		if (priv->tx_irq[i] == 0)
   3537			continue;
   3538
   3539		int_name = priv->int_name_tx_irq[i];
   3540		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
   3541		ret = request_irq(priv->tx_irq[i],
   3542				  stmmac_msi_intr_tx,
   3543				  0, int_name, &priv->tx_queue[i]);
   3544		if (unlikely(ret < 0)) {
   3545			netdev_err(priv->dev,
   3546				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
   3547				   __func__, i, priv->tx_irq[i], ret);
   3548			irq_err = REQ_IRQ_ERR_TX;
   3549			irq_idx = i;
   3550			goto irq_error;
   3551		}
   3552		cpumask_clear(&cpu_mask);
   3553		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
   3554		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
   3555	}
   3556
   3557	return 0;
   3558
   3559irq_error:
   3560	stmmac_free_irq(dev, irq_err, irq_idx);
   3561	return ret;
   3562}
   3563
   3564static int stmmac_request_irq_single(struct net_device *dev)
   3565{
   3566	struct stmmac_priv *priv = netdev_priv(dev);
   3567	enum request_irq_err irq_err;
   3568	int ret;
   3569
   3570	ret = request_irq(dev->irq, stmmac_interrupt,
   3571			  IRQF_SHARED, dev->name, dev);
   3572	if (unlikely(ret < 0)) {
   3573		netdev_err(priv->dev,
   3574			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
   3575			   __func__, dev->irq, ret);
   3576		irq_err = REQ_IRQ_ERR_MAC;
   3577		goto irq_error;
   3578	}
   3579
   3580	/* Request the Wake IRQ in case of another line
   3581	 * is used for WoL
   3582	 */
   3583	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
   3584		ret = request_irq(priv->wol_irq, stmmac_interrupt,
   3585				  IRQF_SHARED, dev->name, dev);
   3586		if (unlikely(ret < 0)) {
   3587			netdev_err(priv->dev,
   3588				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
   3589				   __func__, priv->wol_irq, ret);
   3590			irq_err = REQ_IRQ_ERR_WOL;
   3591			goto irq_error;
   3592		}
   3593	}
   3594
   3595	/* Request the IRQ lines */
   3596	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
   3597		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
   3598				  IRQF_SHARED, dev->name, dev);
   3599		if (unlikely(ret < 0)) {
   3600			netdev_err(priv->dev,
   3601				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
   3602				   __func__, priv->lpi_irq, ret);
   3603			irq_err = REQ_IRQ_ERR_LPI;
   3604			goto irq_error;
   3605		}
   3606	}
   3607
   3608	return 0;
   3609
   3610irq_error:
   3611	stmmac_free_irq(dev, irq_err, 0);
   3612	return ret;
   3613}
   3614
   3615static int stmmac_request_irq(struct net_device *dev)
   3616{
   3617	struct stmmac_priv *priv = netdev_priv(dev);
   3618	int ret;
   3619
   3620	/* Request the IRQ lines */
   3621	if (priv->plat->multi_msi_en)
   3622		ret = stmmac_request_irq_multi_msi(dev);
   3623	else
   3624		ret = stmmac_request_irq_single(dev);
   3625
   3626	return ret;
   3627}
   3628
   3629/**
   3630 *  stmmac_open - open entry point of the driver
   3631 *  @dev : pointer to the device structure.
   3632 *  Description:
   3633 *  This function is the open entry point of the driver.
   3634 *  Return value:
   3635 *  0 on success and an appropriate (-)ve integer as defined in errno.h
   3636 *  file on failure.
   3637 */
   3638static int stmmac_open(struct net_device *dev)
   3639{
   3640	struct stmmac_priv *priv = netdev_priv(dev);
   3641	int mode = priv->plat->phy_interface;
   3642	int bfsize = 0;
   3643	u32 chan;
   3644	int ret;
   3645
   3646	ret = pm_runtime_resume_and_get(priv->device);
   3647	if (ret < 0)
   3648		return ret;
   3649
   3650	if (priv->hw->pcs != STMMAC_PCS_TBI &&
   3651	    priv->hw->pcs != STMMAC_PCS_RTBI &&
   3652	    (!priv->hw->xpcs ||
   3653	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
   3654		ret = stmmac_init_phy(dev);
   3655		if (ret) {
   3656			netdev_err(priv->dev,
   3657				   "%s: Cannot attach to PHY (error: %d)\n",
   3658				   __func__, ret);
   3659			goto init_phy_error;
   3660		}
   3661	}
   3662
   3663	/* Extra statistics */
   3664	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
   3665	priv->xstats.threshold = tc;
   3666
   3667	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
   3668	if (bfsize < 0)
   3669		bfsize = 0;
   3670
   3671	if (bfsize < BUF_SIZE_16KiB)
   3672		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
   3673
   3674	priv->dma_buf_sz = bfsize;
   3675	buf_sz = bfsize;
   3676
   3677	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
   3678
   3679	if (!priv->dma_tx_size)
   3680		priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
   3681	if (!priv->dma_rx_size)
   3682		priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
   3683
   3684	/* Earlier check for TBS */
   3685	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
   3686		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
   3687		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
   3688
   3689		/* Setup per-TXQ tbs flag before TX descriptor alloc */
   3690		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
   3691	}
   3692
   3693	ret = alloc_dma_desc_resources(priv);
   3694	if (ret < 0) {
   3695		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
   3696			   __func__);
   3697		goto dma_desc_error;
   3698	}
   3699
   3700	ret = init_dma_desc_rings(dev, GFP_KERNEL);
   3701	if (ret < 0) {
   3702		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
   3703			   __func__);
   3704		goto init_error;
   3705	}
   3706
   3707	ret = stmmac_hw_setup(dev, true);
   3708	if (ret < 0) {
   3709		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
   3710		goto init_error;
   3711	}
   3712
   3713	stmmac_init_coalesce(priv);
   3714
   3715	phylink_start(priv->phylink);
   3716	/* We may have called phylink_speed_down before */
   3717	phylink_speed_up(priv->phylink);
   3718
   3719	ret = stmmac_request_irq(dev);
   3720	if (ret)
   3721		goto irq_error;
   3722
   3723	stmmac_enable_all_queues(priv);
   3724	netif_tx_start_all_queues(priv->dev);
   3725	stmmac_enable_all_dma_irq(priv);
   3726
   3727	return 0;
   3728
   3729irq_error:
   3730	phylink_stop(priv->phylink);
   3731
   3732	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
   3733		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
   3734
   3735	stmmac_hw_teardown(dev);
   3736init_error:
   3737	free_dma_desc_resources(priv);
   3738dma_desc_error:
   3739	phylink_disconnect_phy(priv->phylink);
   3740init_phy_error:
   3741	pm_runtime_put(priv->device);
   3742	return ret;
   3743}
   3744
   3745static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
   3746{
   3747	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
   3748
   3749	if (priv->fpe_wq)
   3750		destroy_workqueue(priv->fpe_wq);
   3751
   3752	netdev_info(priv->dev, "FPE workqueue stop");
   3753}
   3754
   3755/**
   3756 *  stmmac_release - close entry point of the driver
   3757 *  @dev : device pointer.
   3758 *  Description:
   3759 *  This is the stop entry point of the driver.
   3760 */
   3761static int stmmac_release(struct net_device *dev)
   3762{
   3763	struct stmmac_priv *priv = netdev_priv(dev);
   3764	u32 chan;
   3765
   3766	netif_tx_disable(dev);
   3767
   3768	if (device_may_wakeup(priv->device))
   3769		phylink_speed_down(priv->phylink, false);
   3770	/* Stop and disconnect the PHY */
   3771	phylink_stop(priv->phylink);
   3772	phylink_disconnect_phy(priv->phylink);
   3773
   3774	stmmac_disable_all_queues(priv);
   3775
   3776	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
   3777		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
   3778
   3779	/* Free the IRQ lines */
   3780	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
   3781
   3782	if (priv->eee_enabled) {
   3783		priv->tx_path_in_lpi_mode = false;
   3784		del_timer_sync(&priv->eee_ctrl_timer);
   3785	}
   3786
   3787	/* Stop TX/RX DMA and clear the descriptors */
   3788	stmmac_stop_all_dma(priv);
   3789
   3790	/* Release and free the Rx/Tx resources */
   3791	free_dma_desc_resources(priv);
   3792
   3793	/* Disable the MAC Rx/Tx */
   3794	stmmac_mac_set(priv, priv->ioaddr, false);
   3795
   3796	netif_carrier_off(dev);
   3797
   3798	stmmac_release_ptp(priv);
   3799
   3800	pm_runtime_put(priv->device);
   3801
   3802	if (priv->dma_cap.fpesel)
   3803		stmmac_fpe_stop_wq(priv);
   3804
   3805	return 0;
   3806}
   3807
   3808static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
   3809			       struct stmmac_tx_queue *tx_q)
   3810{
   3811	u16 tag = 0x0, inner_tag = 0x0;
   3812	u32 inner_type = 0x0;
   3813	struct dma_desc *p;
   3814
   3815	if (!priv->dma_cap.vlins)
   3816		return false;
   3817	if (!skb_vlan_tag_present(skb))
   3818		return false;
   3819	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
   3820		inner_tag = skb_vlan_tag_get(skb);
   3821		inner_type = STMMAC_VLAN_INSERT;
   3822	}
   3823
   3824	tag = skb_vlan_tag_get(skb);
   3825
   3826	if (tx_q->tbs & STMMAC_TBS_AVAIL)
   3827		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
   3828	else
   3829		p = &tx_q->dma_tx[tx_q->cur_tx];
   3830
   3831	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
   3832		return false;
   3833
   3834	stmmac_set_tx_owner(priv, p);
   3835	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
   3836	return true;
   3837}
   3838
   3839/**
   3840 *  stmmac_tso_allocator - close entry point of the driver
   3841 *  @priv: driver private structure
   3842 *  @des: buffer start address
   3843 *  @total_len: total length to fill in descriptors
   3844 *  @last_segment: condition for the last descriptor
   3845 *  @queue: TX queue index
   3846 *  Description:
   3847 *  This function fills descriptor and request new descriptors according to
   3848 *  buffer length to fill
   3849 */
   3850static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
   3851				 int total_len, bool last_segment, u32 queue)
   3852{
   3853	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   3854	struct dma_desc *desc;
   3855	u32 buff_size;
   3856	int tmp_len;
   3857
   3858	tmp_len = total_len;
   3859
   3860	while (tmp_len > 0) {
   3861		dma_addr_t curr_addr;
   3862
   3863		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
   3864						priv->dma_tx_size);
   3865		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
   3866
   3867		if (tx_q->tbs & STMMAC_TBS_AVAIL)
   3868			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
   3869		else
   3870			desc = &tx_q->dma_tx[tx_q->cur_tx];
   3871
   3872		curr_addr = des + (total_len - tmp_len);
   3873		if (priv->dma_cap.addr64 <= 32)
   3874			desc->des0 = cpu_to_le32(curr_addr);
   3875		else
   3876			stmmac_set_desc_addr(priv, desc, curr_addr);
   3877
   3878		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
   3879			    TSO_MAX_BUFF_SIZE : tmp_len;
   3880
   3881		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
   3882				0, 1,
   3883				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
   3884				0, 0);
   3885
   3886		tmp_len -= TSO_MAX_BUFF_SIZE;
   3887	}
   3888}
   3889
   3890static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
   3891{
   3892	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   3893	int desc_size;
   3894
   3895	if (likely(priv->extend_desc))
   3896		desc_size = sizeof(struct dma_extended_desc);
   3897	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
   3898		desc_size = sizeof(struct dma_edesc);
   3899	else
   3900		desc_size = sizeof(struct dma_desc);
   3901
   3902	/* The own bit must be the latest setting done when prepare the
   3903	 * descriptor and then barrier is needed to make sure that
   3904	 * all is coherent before granting the DMA engine.
   3905	 */
   3906	wmb();
   3907
   3908	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
   3909	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
   3910}
   3911
   3912/**
   3913 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
   3914 *  @skb : the socket buffer
   3915 *  @dev : device pointer
   3916 *  Description: this is the transmit function that is called on TSO frames
   3917 *  (support available on GMAC4 and newer chips).
   3918 *  Diagram below show the ring programming in case of TSO frames:
   3919 *
   3920 *  First Descriptor
   3921 *   --------
   3922 *   | DES0 |---> buffer1 = L2/L3/L4 header
   3923 *   | DES1 |---> TCP Payload (can continue on next descr...)
   3924 *   | DES2 |---> buffer 1 and 2 len
   3925 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
   3926 *   --------
   3927 *	|
   3928 *     ...
   3929 *	|
   3930 *   --------
   3931 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
   3932 *   | DES1 | --|
   3933 *   | DES2 | --> buffer 1 and 2 len
   3934 *   | DES3 |
   3935 *   --------
   3936 *
   3937 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
   3938 */
   3939static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
   3940{
   3941	struct dma_desc *desc, *first, *mss_desc = NULL;
   3942	struct stmmac_priv *priv = netdev_priv(dev);
   3943	int nfrags = skb_shinfo(skb)->nr_frags;
   3944	u32 queue = skb_get_queue_mapping(skb);
   3945	unsigned int first_entry, tx_packets;
   3946	int tmp_pay_len = 0, first_tx;
   3947	struct stmmac_tx_queue *tx_q;
   3948	bool has_vlan, set_ic;
   3949	u8 proto_hdr_len, hdr;
   3950	u32 pay_len, mss;
   3951	dma_addr_t des;
   3952	int i;
   3953
   3954	tx_q = &priv->tx_queue[queue];
   3955	first_tx = tx_q->cur_tx;
   3956
   3957	/* Compute header lengths */
   3958	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
   3959		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
   3960		hdr = sizeof(struct udphdr);
   3961	} else {
   3962		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
   3963		hdr = tcp_hdrlen(skb);
   3964	}
   3965
   3966	/* Desc availability based on threshold should be enough safe */
   3967	if (unlikely(stmmac_tx_avail(priv, queue) <
   3968		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
   3969		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
   3970			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
   3971								queue));
   3972			/* This is a hard error, log it. */
   3973			netdev_err(priv->dev,
   3974				   "%s: Tx Ring full when queue awake\n",
   3975				   __func__);
   3976		}
   3977		return NETDEV_TX_BUSY;
   3978	}
   3979
   3980	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
   3981
   3982	mss = skb_shinfo(skb)->gso_size;
   3983
   3984	/* set new MSS value if needed */
   3985	if (mss != tx_q->mss) {
   3986		if (tx_q->tbs & STMMAC_TBS_AVAIL)
   3987			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
   3988		else
   3989			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
   3990
   3991		stmmac_set_mss(priv, mss_desc, mss);
   3992		tx_q->mss = mss;
   3993		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
   3994						priv->dma_tx_size);
   3995		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
   3996	}
   3997
   3998	if (netif_msg_tx_queued(priv)) {
   3999		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
   4000			__func__, hdr, proto_hdr_len, pay_len, mss);
   4001		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
   4002			skb->data_len);
   4003	}
   4004
   4005	/* Check if VLAN can be inserted by HW */
   4006	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
   4007
   4008	first_entry = tx_q->cur_tx;
   4009	WARN_ON(tx_q->tx_skbuff[first_entry]);
   4010
   4011	if (tx_q->tbs & STMMAC_TBS_AVAIL)
   4012		desc = &tx_q->dma_entx[first_entry].basic;
   4013	else
   4014		desc = &tx_q->dma_tx[first_entry];
   4015	first = desc;
   4016
   4017	if (has_vlan)
   4018		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
   4019
   4020	/* first descriptor: fill Headers on Buf1 */
   4021	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
   4022			     DMA_TO_DEVICE);
   4023	if (dma_mapping_error(priv->device, des))
   4024		goto dma_map_err;
   4025
   4026	tx_q->tx_skbuff_dma[first_entry].buf = des;
   4027	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
   4028	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
   4029	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
   4030
   4031	if (priv->dma_cap.addr64 <= 32) {
   4032		first->des0 = cpu_to_le32(des);
   4033
   4034		/* Fill start of payload in buff2 of first descriptor */
   4035		if (pay_len)
   4036			first->des1 = cpu_to_le32(des + proto_hdr_len);
   4037
   4038		/* If needed take extra descriptors to fill the remaining payload */
   4039		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
   4040	} else {
   4041		stmmac_set_desc_addr(priv, first, des);
   4042		tmp_pay_len = pay_len;
   4043		des += proto_hdr_len;
   4044		pay_len = 0;
   4045	}
   4046
   4047	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
   4048
   4049	/* Prepare fragments */
   4050	for (i = 0; i < nfrags; i++) {
   4051		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
   4052
   4053		des = skb_frag_dma_map(priv->device, frag, 0,
   4054				       skb_frag_size(frag),
   4055				       DMA_TO_DEVICE);
   4056		if (dma_mapping_error(priv->device, des))
   4057			goto dma_map_err;
   4058
   4059		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
   4060				     (i == nfrags - 1), queue);
   4061
   4062		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
   4063		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
   4064		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
   4065		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
   4066	}
   4067
   4068	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
   4069
   4070	/* Only the last descriptor gets to point to the skb. */
   4071	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
   4072	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
   4073
   4074	/* Manage tx mitigation */
   4075	tx_packets = (tx_q->cur_tx + 1) - first_tx;
   4076	tx_q->tx_count_frames += tx_packets;
   4077
   4078	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
   4079		set_ic = true;
   4080	else if (!priv->tx_coal_frames[queue])
   4081		set_ic = false;
   4082	else if (tx_packets > priv->tx_coal_frames[queue])
   4083		set_ic = true;
   4084	else if ((tx_q->tx_count_frames %
   4085		  priv->tx_coal_frames[queue]) < tx_packets)
   4086		set_ic = true;
   4087	else
   4088		set_ic = false;
   4089
   4090	if (set_ic) {
   4091		if (tx_q->tbs & STMMAC_TBS_AVAIL)
   4092			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
   4093		else
   4094			desc = &tx_q->dma_tx[tx_q->cur_tx];
   4095
   4096		tx_q->tx_count_frames = 0;
   4097		stmmac_set_tx_ic(priv, desc);
   4098		priv->xstats.tx_set_ic_bit++;
   4099	}
   4100
   4101	/* We've used all descriptors we need for this skb, however,
   4102	 * advance cur_tx so that it references a fresh descriptor.
   4103	 * ndo_start_xmit will fill this descriptor the next time it's
   4104	 * called and stmmac_tx_clean may clean up to this descriptor.
   4105	 */
   4106	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
   4107
   4108	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
   4109		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
   4110			  __func__);
   4111		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
   4112	}
   4113
   4114	dev->stats.tx_bytes += skb->len;
   4115	priv->xstats.tx_tso_frames++;
   4116	priv->xstats.tx_tso_nfrags += nfrags;
   4117
   4118	if (priv->sarc_type)
   4119		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
   4120
   4121	skb_tx_timestamp(skb);
   4122
   4123	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
   4124		     priv->hwts_tx_en)) {
   4125		/* declare that device is doing timestamping */
   4126		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
   4127		stmmac_enable_tx_timestamp(priv, first);
   4128	}
   4129
   4130	/* Complete the first descriptor before granting the DMA */
   4131	stmmac_prepare_tso_tx_desc(priv, first, 1,
   4132			proto_hdr_len,
   4133			pay_len,
   4134			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
   4135			hdr / 4, (skb->len - proto_hdr_len));
   4136
   4137	/* If context desc is used to change MSS */
   4138	if (mss_desc) {
   4139		/* Make sure that first descriptor has been completely
   4140		 * written, including its own bit. This is because MSS is
   4141		 * actually before first descriptor, so we need to make
   4142		 * sure that MSS's own bit is the last thing written.
   4143		 */
   4144		dma_wmb();
   4145		stmmac_set_tx_owner(priv, mss_desc);
   4146	}
   4147
   4148	if (netif_msg_pktdata(priv)) {
   4149		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
   4150			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
   4151			tx_q->cur_tx, first, nfrags);
   4152		pr_info(">>> frame to be transmitted: ");
   4153		print_pkt(skb->data, skb_headlen(skb));
   4154	}
   4155
   4156	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
   4157
   4158	stmmac_flush_tx_descriptors(priv, queue);
   4159	stmmac_tx_timer_arm(priv, queue);
   4160
   4161	return NETDEV_TX_OK;
   4162
   4163dma_map_err:
   4164	dev_err(priv->device, "Tx dma map failed\n");
   4165	dev_kfree_skb(skb);
   4166	priv->dev->stats.tx_dropped++;
   4167	return NETDEV_TX_OK;
   4168}
   4169
   4170/**
   4171 *  stmmac_xmit - Tx entry point of the driver
   4172 *  @skb : the socket buffer
   4173 *  @dev : device pointer
   4174 *  Description : this is the tx entry point of the driver.
   4175 *  It programs the chain or the ring and supports oversized frames
   4176 *  and SG feature.
   4177 */
   4178static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
   4179{
   4180	unsigned int first_entry, tx_packets, enh_desc;
   4181	struct stmmac_priv *priv = netdev_priv(dev);
   4182	unsigned int nopaged_len = skb_headlen(skb);
   4183	int i, csum_insertion = 0, is_jumbo = 0;
   4184	u32 queue = skb_get_queue_mapping(skb);
   4185	int nfrags = skb_shinfo(skb)->nr_frags;
   4186	int gso = skb_shinfo(skb)->gso_type;
   4187	struct dma_edesc *tbs_desc = NULL;
   4188	struct dma_desc *desc, *first;
   4189	struct stmmac_tx_queue *tx_q;
   4190	bool has_vlan, set_ic;
   4191	int entry, first_tx;
   4192	dma_addr_t des;
   4193
   4194	tx_q = &priv->tx_queue[queue];
   4195	first_tx = tx_q->cur_tx;
   4196
   4197	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
   4198		stmmac_disable_eee_mode(priv);
   4199
   4200	/* Manage oversized TCP frames for GMAC4 device */
   4201	if (skb_is_gso(skb) && priv->tso) {
   4202		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
   4203			return stmmac_tso_xmit(skb, dev);
   4204		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
   4205			return stmmac_tso_xmit(skb, dev);
   4206	}
   4207
   4208	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
   4209		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
   4210			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
   4211								queue));
   4212			/* This is a hard error, log it. */
   4213			netdev_err(priv->dev,
   4214				   "%s: Tx Ring full when queue awake\n",
   4215				   __func__);
   4216		}
   4217		return NETDEV_TX_BUSY;
   4218	}
   4219
   4220	/* Check if VLAN can be inserted by HW */
   4221	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
   4222
   4223	entry = tx_q->cur_tx;
   4224	first_entry = entry;
   4225	WARN_ON(tx_q->tx_skbuff[first_entry]);
   4226
   4227	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
   4228
   4229	if (likely(priv->extend_desc))
   4230		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
   4231	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
   4232		desc = &tx_q->dma_entx[entry].basic;
   4233	else
   4234		desc = tx_q->dma_tx + entry;
   4235
   4236	first = desc;
   4237
   4238	if (has_vlan)
   4239		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
   4240
   4241	enh_desc = priv->plat->enh_desc;
   4242	/* To program the descriptors according to the size of the frame */
   4243	if (enh_desc)
   4244		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
   4245
   4246	if (unlikely(is_jumbo)) {
   4247		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
   4248		if (unlikely(entry < 0) && (entry != -EINVAL))
   4249			goto dma_map_err;
   4250	}
   4251
   4252	for (i = 0; i < nfrags; i++) {
   4253		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
   4254		int len = skb_frag_size(frag);
   4255		bool last_segment = (i == (nfrags - 1));
   4256
   4257		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
   4258		WARN_ON(tx_q->tx_skbuff[entry]);
   4259
   4260		if (likely(priv->extend_desc))
   4261			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
   4262		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
   4263			desc = &tx_q->dma_entx[entry].basic;
   4264		else
   4265			desc = tx_q->dma_tx + entry;
   4266
   4267		des = skb_frag_dma_map(priv->device, frag, 0, len,
   4268				       DMA_TO_DEVICE);
   4269		if (dma_mapping_error(priv->device, des))
   4270			goto dma_map_err; /* should reuse desc w/o issues */
   4271
   4272		tx_q->tx_skbuff_dma[entry].buf = des;
   4273
   4274		stmmac_set_desc_addr(priv, desc, des);
   4275
   4276		tx_q->tx_skbuff_dma[entry].map_as_page = true;
   4277		tx_q->tx_skbuff_dma[entry].len = len;
   4278		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
   4279		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
   4280
   4281		/* Prepare the descriptor and set the own bit too */
   4282		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
   4283				priv->mode, 1, last_segment, skb->len);
   4284	}
   4285
   4286	/* Only the last descriptor gets to point to the skb. */
   4287	tx_q->tx_skbuff[entry] = skb;
   4288	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
   4289
   4290	/* According to the coalesce parameter the IC bit for the latest
   4291	 * segment is reset and the timer re-started to clean the tx status.
   4292	 * This approach takes care about the fragments: desc is the first
   4293	 * element in case of no SG.
   4294	 */
   4295	tx_packets = (entry + 1) - first_tx;
   4296	tx_q->tx_count_frames += tx_packets;
   4297
   4298	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
   4299		set_ic = true;
   4300	else if (!priv->tx_coal_frames[queue])
   4301		set_ic = false;
   4302	else if (tx_packets > priv->tx_coal_frames[queue])
   4303		set_ic = true;
   4304	else if ((tx_q->tx_count_frames %
   4305		  priv->tx_coal_frames[queue]) < tx_packets)
   4306		set_ic = true;
   4307	else
   4308		set_ic = false;
   4309
   4310	if (set_ic) {
   4311		if (likely(priv->extend_desc))
   4312			desc = &tx_q->dma_etx[entry].basic;
   4313		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
   4314			desc = &tx_q->dma_entx[entry].basic;
   4315		else
   4316			desc = &tx_q->dma_tx[entry];
   4317
   4318		tx_q->tx_count_frames = 0;
   4319		stmmac_set_tx_ic(priv, desc);
   4320		priv->xstats.tx_set_ic_bit++;
   4321	}
   4322
   4323	/* We've used all descriptors we need for this skb, however,
   4324	 * advance cur_tx so that it references a fresh descriptor.
   4325	 * ndo_start_xmit will fill this descriptor the next time it's
   4326	 * called and stmmac_tx_clean may clean up to this descriptor.
   4327	 */
   4328	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
   4329	tx_q->cur_tx = entry;
   4330
   4331	if (netif_msg_pktdata(priv)) {
   4332		netdev_dbg(priv->dev,
   4333			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
   4334			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
   4335			   entry, first, nfrags);
   4336
   4337		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
   4338		print_pkt(skb->data, skb->len);
   4339	}
   4340
   4341	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
   4342		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
   4343			  __func__);
   4344		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
   4345	}
   4346
   4347	dev->stats.tx_bytes += skb->len;
   4348
   4349	if (priv->sarc_type)
   4350		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
   4351
   4352	skb_tx_timestamp(skb);
   4353
   4354	/* Ready to fill the first descriptor and set the OWN bit w/o any
   4355	 * problems because all the descriptors are actually ready to be
   4356	 * passed to the DMA engine.
   4357	 */
   4358	if (likely(!is_jumbo)) {
   4359		bool last_segment = (nfrags == 0);
   4360
   4361		des = dma_map_single(priv->device, skb->data,
   4362				     nopaged_len, DMA_TO_DEVICE);
   4363		if (dma_mapping_error(priv->device, des))
   4364			goto dma_map_err;
   4365
   4366		tx_q->tx_skbuff_dma[first_entry].buf = des;
   4367		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
   4368		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
   4369
   4370		stmmac_set_desc_addr(priv, first, des);
   4371
   4372		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
   4373		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
   4374
   4375		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
   4376			     priv->hwts_tx_en)) {
   4377			/* declare that device is doing timestamping */
   4378			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
   4379			stmmac_enable_tx_timestamp(priv, first);
   4380		}
   4381
   4382		/* Prepare the first descriptor setting the OWN bit too */
   4383		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
   4384				csum_insertion, priv->mode, 0, last_segment,
   4385				skb->len);
   4386	}
   4387
   4388	if (tx_q->tbs & STMMAC_TBS_EN) {
   4389		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
   4390
   4391		tbs_desc = &tx_q->dma_entx[first_entry];
   4392		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
   4393	}
   4394
   4395	stmmac_set_tx_owner(priv, first);
   4396
   4397	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
   4398
   4399	stmmac_enable_dma_transmission(priv, priv->ioaddr);
   4400
   4401	stmmac_flush_tx_descriptors(priv, queue);
   4402	stmmac_tx_timer_arm(priv, queue);
   4403
   4404	return NETDEV_TX_OK;
   4405
   4406dma_map_err:
   4407	netdev_err(priv->dev, "Tx DMA map failed\n");
   4408	dev_kfree_skb(skb);
   4409	priv->dev->stats.tx_dropped++;
   4410	return NETDEV_TX_OK;
   4411}
   4412
   4413static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
   4414{
   4415	struct vlan_ethhdr *veth;
   4416	__be16 vlan_proto;
   4417	u16 vlanid;
   4418
   4419	veth = (struct vlan_ethhdr *)skb->data;
   4420	vlan_proto = veth->h_vlan_proto;
   4421
   4422	if ((vlan_proto == htons(ETH_P_8021Q) &&
   4423	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
   4424	    (vlan_proto == htons(ETH_P_8021AD) &&
   4425	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
   4426		/* pop the vlan tag */
   4427		vlanid = ntohs(veth->h_vlan_TCI);
   4428		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
   4429		skb_pull(skb, VLAN_HLEN);
   4430		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
   4431	}
   4432}
   4433
   4434/**
   4435 * stmmac_rx_refill - refill used skb preallocated buffers
   4436 * @priv: driver private structure
   4437 * @queue: RX queue index
   4438 * Description : this is to reallocate the skb for the reception process
   4439 * that is based on zero-copy.
   4440 */
   4441static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
   4442{
   4443	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   4444	int dirty = stmmac_rx_dirty(priv, queue);
   4445	unsigned int entry = rx_q->dirty_rx;
   4446	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
   4447
   4448	if (priv->dma_cap.addr64 <= 32)
   4449		gfp |= GFP_DMA32;
   4450
   4451	while (dirty-- > 0) {
   4452		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
   4453		struct dma_desc *p;
   4454		bool use_rx_wd;
   4455
   4456		if (priv->extend_desc)
   4457			p = (struct dma_desc *)(rx_q->dma_erx + entry);
   4458		else
   4459			p = rx_q->dma_rx + entry;
   4460
   4461		if (!buf->page) {
   4462			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
   4463			if (!buf->page)
   4464				break;
   4465		}
   4466
   4467		if (priv->sph && !buf->sec_page) {
   4468			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
   4469			if (!buf->sec_page)
   4470				break;
   4471
   4472			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
   4473		}
   4474
   4475		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
   4476
   4477		stmmac_set_desc_addr(priv, p, buf->addr);
   4478		if (priv->sph)
   4479			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
   4480		else
   4481			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
   4482		stmmac_refill_desc3(priv, rx_q, p);
   4483
   4484		rx_q->rx_count_frames++;
   4485		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
   4486		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
   4487			rx_q->rx_count_frames = 0;
   4488
   4489		use_rx_wd = !priv->rx_coal_frames[queue];
   4490		use_rx_wd |= rx_q->rx_count_frames > 0;
   4491		if (!priv->use_riwt)
   4492			use_rx_wd = false;
   4493
   4494		dma_wmb();
   4495		stmmac_set_rx_owner(priv, p, use_rx_wd);
   4496
   4497		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
   4498	}
   4499	rx_q->dirty_rx = entry;
   4500	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
   4501			    (rx_q->dirty_rx * sizeof(struct dma_desc));
   4502	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
   4503}
   4504
   4505static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
   4506				       struct dma_desc *p,
   4507				       int status, unsigned int len)
   4508{
   4509	unsigned int plen = 0, hlen = 0;
   4510	int coe = priv->hw->rx_csum;
   4511
   4512	/* Not first descriptor, buffer is always zero */
   4513	if (priv->sph && len)
   4514		return 0;
   4515
   4516	/* First descriptor, get split header length */
   4517	stmmac_get_rx_header_len(priv, p, &hlen);
   4518	if (priv->sph && hlen) {
   4519		priv->xstats.rx_split_hdr_pkt_n++;
   4520		return hlen;
   4521	}
   4522
   4523	/* First descriptor, not last descriptor and not split header */
   4524	if (status & rx_not_ls)
   4525		return priv->dma_buf_sz;
   4526
   4527	plen = stmmac_get_rx_frame_len(priv, p, coe);
   4528
   4529	/* First descriptor and last descriptor and not split header */
   4530	return min_t(unsigned int, priv->dma_buf_sz, plen);
   4531}
   4532
   4533static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
   4534				       struct dma_desc *p,
   4535				       int status, unsigned int len)
   4536{
   4537	int coe = priv->hw->rx_csum;
   4538	unsigned int plen = 0;
   4539
   4540	/* Not split header, buffer is not available */
   4541	if (!priv->sph)
   4542		return 0;
   4543
   4544	/* Not last descriptor */
   4545	if (status & rx_not_ls)
   4546		return priv->dma_buf_sz;
   4547
   4548	plen = stmmac_get_rx_frame_len(priv, p, coe);
   4549
   4550	/* Last descriptor */
   4551	return plen - len;
   4552}
   4553
   4554static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
   4555				struct xdp_frame *xdpf, bool dma_map)
   4556{
   4557	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   4558	unsigned int entry = tx_q->cur_tx;
   4559	struct dma_desc *tx_desc;
   4560	dma_addr_t dma_addr;
   4561	bool set_ic;
   4562
   4563	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
   4564		return STMMAC_XDP_CONSUMED;
   4565
   4566	if (likely(priv->extend_desc))
   4567		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
   4568	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
   4569		tx_desc = &tx_q->dma_entx[entry].basic;
   4570	else
   4571		tx_desc = tx_q->dma_tx + entry;
   4572
   4573	if (dma_map) {
   4574		dma_addr = dma_map_single(priv->device, xdpf->data,
   4575					  xdpf->len, DMA_TO_DEVICE);
   4576		if (dma_mapping_error(priv->device, dma_addr))
   4577			return STMMAC_XDP_CONSUMED;
   4578
   4579		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
   4580	} else {
   4581		struct page *page = virt_to_page(xdpf->data);
   4582
   4583		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
   4584			   xdpf->headroom;
   4585		dma_sync_single_for_device(priv->device, dma_addr,
   4586					   xdpf->len, DMA_BIDIRECTIONAL);
   4587
   4588		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
   4589	}
   4590
   4591	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
   4592	tx_q->tx_skbuff_dma[entry].map_as_page = false;
   4593	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
   4594	tx_q->tx_skbuff_dma[entry].last_segment = true;
   4595	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
   4596
   4597	tx_q->xdpf[entry] = xdpf;
   4598
   4599	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
   4600
   4601	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
   4602			       true, priv->mode, true, true,
   4603			       xdpf->len);
   4604
   4605	tx_q->tx_count_frames++;
   4606
   4607	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
   4608		set_ic = true;
   4609	else
   4610		set_ic = false;
   4611
   4612	if (set_ic) {
   4613		tx_q->tx_count_frames = 0;
   4614		stmmac_set_tx_ic(priv, tx_desc);
   4615		priv->xstats.tx_set_ic_bit++;
   4616	}
   4617
   4618	stmmac_enable_dma_transmission(priv, priv->ioaddr);
   4619
   4620	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
   4621	tx_q->cur_tx = entry;
   4622
   4623	return STMMAC_XDP_TX;
   4624}
   4625
   4626static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
   4627				   int cpu)
   4628{
   4629	int index = cpu;
   4630
   4631	if (unlikely(index < 0))
   4632		index = 0;
   4633
   4634	while (index >= priv->plat->tx_queues_to_use)
   4635		index -= priv->plat->tx_queues_to_use;
   4636
   4637	return index;
   4638}
   4639
   4640static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
   4641				struct xdp_buff *xdp)
   4642{
   4643	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
   4644	int cpu = smp_processor_id();
   4645	struct netdev_queue *nq;
   4646	int queue;
   4647	int res;
   4648
   4649	if (unlikely(!xdpf))
   4650		return STMMAC_XDP_CONSUMED;
   4651
   4652	queue = stmmac_xdp_get_tx_queue(priv, cpu);
   4653	nq = netdev_get_tx_queue(priv->dev, queue);
   4654
   4655	__netif_tx_lock(nq, cpu);
   4656	/* Avoids TX time-out as we are sharing with slow path */
   4657	txq_trans_cond_update(nq);
   4658
   4659	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
   4660	if (res == STMMAC_XDP_TX)
   4661		stmmac_flush_tx_descriptors(priv, queue);
   4662
   4663	__netif_tx_unlock(nq);
   4664
   4665	return res;
   4666}
   4667
   4668static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
   4669				 struct bpf_prog *prog,
   4670				 struct xdp_buff *xdp)
   4671{
   4672	u32 act;
   4673	int res;
   4674
   4675	act = bpf_prog_run_xdp(prog, xdp);
   4676	switch (act) {
   4677	case XDP_PASS:
   4678		res = STMMAC_XDP_PASS;
   4679		break;
   4680	case XDP_TX:
   4681		res = stmmac_xdp_xmit_back(priv, xdp);
   4682		break;
   4683	case XDP_REDIRECT:
   4684		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
   4685			res = STMMAC_XDP_CONSUMED;
   4686		else
   4687			res = STMMAC_XDP_REDIRECT;
   4688		break;
   4689	default:
   4690		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
   4691		fallthrough;
   4692	case XDP_ABORTED:
   4693		trace_xdp_exception(priv->dev, prog, act);
   4694		fallthrough;
   4695	case XDP_DROP:
   4696		res = STMMAC_XDP_CONSUMED;
   4697		break;
   4698	}
   4699
   4700	return res;
   4701}
   4702
   4703static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
   4704					   struct xdp_buff *xdp)
   4705{
   4706	struct bpf_prog *prog;
   4707	int res;
   4708
   4709	prog = READ_ONCE(priv->xdp_prog);
   4710	if (!prog) {
   4711		res = STMMAC_XDP_PASS;
   4712		goto out;
   4713	}
   4714
   4715	res = __stmmac_xdp_run_prog(priv, prog, xdp);
   4716out:
   4717	return ERR_PTR(-res);
   4718}
   4719
   4720static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
   4721				   int xdp_status)
   4722{
   4723	int cpu = smp_processor_id();
   4724	int queue;
   4725
   4726	queue = stmmac_xdp_get_tx_queue(priv, cpu);
   4727
   4728	if (xdp_status & STMMAC_XDP_TX)
   4729		stmmac_tx_timer_arm(priv, queue);
   4730
   4731	if (xdp_status & STMMAC_XDP_REDIRECT)
   4732		xdp_do_flush();
   4733}
   4734
   4735static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
   4736					       struct xdp_buff *xdp)
   4737{
   4738	unsigned int metasize = xdp->data - xdp->data_meta;
   4739	unsigned int datasize = xdp->data_end - xdp->data;
   4740	struct sk_buff *skb;
   4741
   4742	skb = __napi_alloc_skb(&ch->rxtx_napi,
   4743			       xdp->data_end - xdp->data_hard_start,
   4744			       GFP_ATOMIC | __GFP_NOWARN);
   4745	if (unlikely(!skb))
   4746		return NULL;
   4747
   4748	skb_reserve(skb, xdp->data - xdp->data_hard_start);
   4749	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
   4750	if (metasize)
   4751		skb_metadata_set(skb, metasize);
   4752
   4753	return skb;
   4754}
   4755
   4756static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
   4757				   struct dma_desc *p, struct dma_desc *np,
   4758				   struct xdp_buff *xdp)
   4759{
   4760	struct stmmac_channel *ch = &priv->channel[queue];
   4761	unsigned int len = xdp->data_end - xdp->data;
   4762	enum pkt_hash_types hash_type;
   4763	int coe = priv->hw->rx_csum;
   4764	struct sk_buff *skb;
   4765	u32 hash;
   4766
   4767	skb = stmmac_construct_skb_zc(ch, xdp);
   4768	if (!skb) {
   4769		priv->dev->stats.rx_dropped++;
   4770		return;
   4771	}
   4772
   4773	stmmac_get_rx_hwtstamp(priv, p, np, skb);
   4774	stmmac_rx_vlan(priv->dev, skb);
   4775	skb->protocol = eth_type_trans(skb, priv->dev);
   4776
   4777	if (unlikely(!coe))
   4778		skb_checksum_none_assert(skb);
   4779	else
   4780		skb->ip_summed = CHECKSUM_UNNECESSARY;
   4781
   4782	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
   4783		skb_set_hash(skb, hash, hash_type);
   4784
   4785	skb_record_rx_queue(skb, queue);
   4786	napi_gro_receive(&ch->rxtx_napi, skb);
   4787
   4788	priv->dev->stats.rx_packets++;
   4789	priv->dev->stats.rx_bytes += len;
   4790}
   4791
   4792static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
   4793{
   4794	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   4795	unsigned int entry = rx_q->dirty_rx;
   4796	struct dma_desc *rx_desc = NULL;
   4797	bool ret = true;
   4798
   4799	budget = min(budget, stmmac_rx_dirty(priv, queue));
   4800
   4801	while (budget-- > 0 && entry != rx_q->cur_rx) {
   4802		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
   4803		dma_addr_t dma_addr;
   4804		bool use_rx_wd;
   4805
   4806		if (!buf->xdp) {
   4807			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
   4808			if (!buf->xdp) {
   4809				ret = false;
   4810				break;
   4811			}
   4812		}
   4813
   4814		if (priv->extend_desc)
   4815			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
   4816		else
   4817			rx_desc = rx_q->dma_rx + entry;
   4818
   4819		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
   4820		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
   4821		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
   4822		stmmac_refill_desc3(priv, rx_q, rx_desc);
   4823
   4824		rx_q->rx_count_frames++;
   4825		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
   4826		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
   4827			rx_q->rx_count_frames = 0;
   4828
   4829		use_rx_wd = !priv->rx_coal_frames[queue];
   4830		use_rx_wd |= rx_q->rx_count_frames > 0;
   4831		if (!priv->use_riwt)
   4832			use_rx_wd = false;
   4833
   4834		dma_wmb();
   4835		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
   4836
   4837		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
   4838	}
   4839
   4840	if (rx_desc) {
   4841		rx_q->dirty_rx = entry;
   4842		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
   4843				     (rx_q->dirty_rx * sizeof(struct dma_desc));
   4844		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
   4845	}
   4846
   4847	return ret;
   4848}
   4849
   4850static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
   4851{
   4852	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   4853	unsigned int count = 0, error = 0, len = 0;
   4854	int dirty = stmmac_rx_dirty(priv, queue);
   4855	unsigned int next_entry = rx_q->cur_rx;
   4856	unsigned int desc_size;
   4857	struct bpf_prog *prog;
   4858	bool failure = false;
   4859	int xdp_status = 0;
   4860	int status = 0;
   4861
   4862	if (netif_msg_rx_status(priv)) {
   4863		void *rx_head;
   4864
   4865		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
   4866		if (priv->extend_desc) {
   4867			rx_head = (void *)rx_q->dma_erx;
   4868			desc_size = sizeof(struct dma_extended_desc);
   4869		} else {
   4870			rx_head = (void *)rx_q->dma_rx;
   4871			desc_size = sizeof(struct dma_desc);
   4872		}
   4873
   4874		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
   4875				    rx_q->dma_rx_phy, desc_size);
   4876	}
   4877	while (count < limit) {
   4878		struct stmmac_rx_buffer *buf;
   4879		unsigned int buf1_len = 0;
   4880		struct dma_desc *np, *p;
   4881		int entry;
   4882		int res;
   4883
   4884		if (!count && rx_q->state_saved) {
   4885			error = rx_q->state.error;
   4886			len = rx_q->state.len;
   4887		} else {
   4888			rx_q->state_saved = false;
   4889			error = 0;
   4890			len = 0;
   4891		}
   4892
   4893		if (count >= limit)
   4894			break;
   4895
   4896read_again:
   4897		buf1_len = 0;
   4898		entry = next_entry;
   4899		buf = &rx_q->buf_pool[entry];
   4900
   4901		if (dirty >= STMMAC_RX_FILL_BATCH) {
   4902			failure = failure ||
   4903				  !stmmac_rx_refill_zc(priv, queue, dirty);
   4904			dirty = 0;
   4905		}
   4906
   4907		if (priv->extend_desc)
   4908			p = (struct dma_desc *)(rx_q->dma_erx + entry);
   4909		else
   4910			p = rx_q->dma_rx + entry;
   4911
   4912		/* read the status of the incoming frame */
   4913		status = stmmac_rx_status(priv, &priv->dev->stats,
   4914					  &priv->xstats, p);
   4915		/* check if managed by the DMA otherwise go ahead */
   4916		if (unlikely(status & dma_own))
   4917			break;
   4918
   4919		/* Prefetch the next RX descriptor */
   4920		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
   4921						priv->dma_rx_size);
   4922		next_entry = rx_q->cur_rx;
   4923
   4924		if (priv->extend_desc)
   4925			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
   4926		else
   4927			np = rx_q->dma_rx + next_entry;
   4928
   4929		prefetch(np);
   4930
   4931		/* Ensure a valid XSK buffer before proceed */
   4932		if (!buf->xdp)
   4933			break;
   4934
   4935		if (priv->extend_desc)
   4936			stmmac_rx_extended_status(priv, &priv->dev->stats,
   4937						  &priv->xstats,
   4938						  rx_q->dma_erx + entry);
   4939		if (unlikely(status == discard_frame)) {
   4940			xsk_buff_free(buf->xdp);
   4941			buf->xdp = NULL;
   4942			dirty++;
   4943			error = 1;
   4944			if (!priv->hwts_rx_en)
   4945				priv->dev->stats.rx_errors++;
   4946		}
   4947
   4948		if (unlikely(error && (status & rx_not_ls)))
   4949			goto read_again;
   4950		if (unlikely(error)) {
   4951			count++;
   4952			continue;
   4953		}
   4954
   4955		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
   4956		if (likely(status & rx_not_ls)) {
   4957			xsk_buff_free(buf->xdp);
   4958			buf->xdp = NULL;
   4959			dirty++;
   4960			count++;
   4961			goto read_again;
   4962		}
   4963
   4964		/* XDP ZC Frame only support primary buffers for now */
   4965		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
   4966		len += buf1_len;
   4967
   4968		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
   4969		 * Type frames (LLC/LLC-SNAP)
   4970		 *
   4971		 * llc_snap is never checked in GMAC >= 4, so this ACS
   4972		 * feature is always disabled and packets need to be
   4973		 * stripped manually.
   4974		 */
   4975		if (likely(!(status & rx_not_ls)) &&
   4976		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
   4977		     unlikely(status != llc_snap))) {
   4978			buf1_len -= ETH_FCS_LEN;
   4979			len -= ETH_FCS_LEN;
   4980		}
   4981
   4982		/* RX buffer is good and fit into a XSK pool buffer */
   4983		buf->xdp->data_end = buf->xdp->data + buf1_len;
   4984		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
   4985
   4986		prog = READ_ONCE(priv->xdp_prog);
   4987		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
   4988
   4989		switch (res) {
   4990		case STMMAC_XDP_PASS:
   4991			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
   4992			xsk_buff_free(buf->xdp);
   4993			break;
   4994		case STMMAC_XDP_CONSUMED:
   4995			xsk_buff_free(buf->xdp);
   4996			priv->dev->stats.rx_dropped++;
   4997			break;
   4998		case STMMAC_XDP_TX:
   4999		case STMMAC_XDP_REDIRECT:
   5000			xdp_status |= res;
   5001			break;
   5002		}
   5003
   5004		buf->xdp = NULL;
   5005		dirty++;
   5006		count++;
   5007	}
   5008
   5009	if (status & rx_not_ls) {
   5010		rx_q->state_saved = true;
   5011		rx_q->state.error = error;
   5012		rx_q->state.len = len;
   5013	}
   5014
   5015	stmmac_finalize_xdp_rx(priv, xdp_status);
   5016
   5017	priv->xstats.rx_pkt_n += count;
   5018	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
   5019
   5020	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
   5021		if (failure || stmmac_rx_dirty(priv, queue) > 0)
   5022			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
   5023		else
   5024			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
   5025
   5026		return (int)count;
   5027	}
   5028
   5029	return failure ? limit : (int)count;
   5030}
   5031
   5032/**
   5033 * stmmac_rx - manage the receive process
   5034 * @priv: driver private structure
   5035 * @limit: napi bugget
   5036 * @queue: RX queue index.
   5037 * Description :  this the function called by the napi poll method.
   5038 * It gets all the frames inside the ring.
   5039 */
   5040static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
   5041{
   5042	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   5043	struct stmmac_channel *ch = &priv->channel[queue];
   5044	unsigned int count = 0, error = 0, len = 0;
   5045	int status = 0, coe = priv->hw->rx_csum;
   5046	unsigned int next_entry = rx_q->cur_rx;
   5047	enum dma_data_direction dma_dir;
   5048	unsigned int desc_size;
   5049	struct sk_buff *skb = NULL;
   5050	struct xdp_buff xdp;
   5051	int xdp_status = 0;
   5052	int buf_sz;
   5053
   5054	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
   5055	buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
   5056
   5057	if (netif_msg_rx_status(priv)) {
   5058		void *rx_head;
   5059
   5060		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
   5061		if (priv->extend_desc) {
   5062			rx_head = (void *)rx_q->dma_erx;
   5063			desc_size = sizeof(struct dma_extended_desc);
   5064		} else {
   5065			rx_head = (void *)rx_q->dma_rx;
   5066			desc_size = sizeof(struct dma_desc);
   5067		}
   5068
   5069		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
   5070				    rx_q->dma_rx_phy, desc_size);
   5071	}
   5072	while (count < limit) {
   5073		unsigned int buf1_len = 0, buf2_len = 0;
   5074		enum pkt_hash_types hash_type;
   5075		struct stmmac_rx_buffer *buf;
   5076		struct dma_desc *np, *p;
   5077		int entry;
   5078		u32 hash;
   5079
   5080		if (!count && rx_q->state_saved) {
   5081			skb = rx_q->state.skb;
   5082			error = rx_q->state.error;
   5083			len = rx_q->state.len;
   5084		} else {
   5085			rx_q->state_saved = false;
   5086			skb = NULL;
   5087			error = 0;
   5088			len = 0;
   5089		}
   5090
   5091		if (count >= limit)
   5092			break;
   5093
   5094read_again:
   5095		buf1_len = 0;
   5096		buf2_len = 0;
   5097		entry = next_entry;
   5098		buf = &rx_q->buf_pool[entry];
   5099
   5100		if (priv->extend_desc)
   5101			p = (struct dma_desc *)(rx_q->dma_erx + entry);
   5102		else
   5103			p = rx_q->dma_rx + entry;
   5104
   5105		/* read the status of the incoming frame */
   5106		status = stmmac_rx_status(priv, &priv->dev->stats,
   5107				&priv->xstats, p);
   5108		/* check if managed by the DMA otherwise go ahead */
   5109		if (unlikely(status & dma_own))
   5110			break;
   5111
   5112		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
   5113						priv->dma_rx_size);
   5114		next_entry = rx_q->cur_rx;
   5115
   5116		if (priv->extend_desc)
   5117			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
   5118		else
   5119			np = rx_q->dma_rx + next_entry;
   5120
   5121		prefetch(np);
   5122
   5123		if (priv->extend_desc)
   5124			stmmac_rx_extended_status(priv, &priv->dev->stats,
   5125					&priv->xstats, rx_q->dma_erx + entry);
   5126		if (unlikely(status == discard_frame)) {
   5127			page_pool_recycle_direct(rx_q->page_pool, buf->page);
   5128			buf->page = NULL;
   5129			error = 1;
   5130			if (!priv->hwts_rx_en)
   5131				priv->dev->stats.rx_errors++;
   5132		}
   5133
   5134		if (unlikely(error && (status & rx_not_ls)))
   5135			goto read_again;
   5136		if (unlikely(error)) {
   5137			dev_kfree_skb(skb);
   5138			skb = NULL;
   5139			count++;
   5140			continue;
   5141		}
   5142
   5143		/* Buffer is good. Go on. */
   5144
   5145		prefetch(page_address(buf->page) + buf->page_offset);
   5146		if (buf->sec_page)
   5147			prefetch(page_address(buf->sec_page));
   5148
   5149		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
   5150		len += buf1_len;
   5151		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
   5152		len += buf2_len;
   5153
   5154		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
   5155		 * Type frames (LLC/LLC-SNAP)
   5156		 *
   5157		 * llc_snap is never checked in GMAC >= 4, so this ACS
   5158		 * feature is always disabled and packets need to be
   5159		 * stripped manually.
   5160		 */
   5161		if (likely(!(status & rx_not_ls)) &&
   5162		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
   5163		     unlikely(status != llc_snap))) {
   5164			if (buf2_len) {
   5165				buf2_len -= ETH_FCS_LEN;
   5166				len -= ETH_FCS_LEN;
   5167			} else if (buf1_len) {
   5168				buf1_len -= ETH_FCS_LEN;
   5169				len -= ETH_FCS_LEN;
   5170			}
   5171		}
   5172
   5173		if (!skb) {
   5174			unsigned int pre_len, sync_len;
   5175
   5176			dma_sync_single_for_cpu(priv->device, buf->addr,
   5177						buf1_len, dma_dir);
   5178
   5179			xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
   5180			xdp_prepare_buff(&xdp, page_address(buf->page),
   5181					 buf->page_offset, buf1_len, false);
   5182
   5183			pre_len = xdp.data_end - xdp.data_hard_start -
   5184				  buf->page_offset;
   5185			skb = stmmac_xdp_run_prog(priv, &xdp);
   5186			/* Due xdp_adjust_tail: DMA sync for_device
   5187			 * cover max len CPU touch
   5188			 */
   5189			sync_len = xdp.data_end - xdp.data_hard_start -
   5190				   buf->page_offset;
   5191			sync_len = max(sync_len, pre_len);
   5192
   5193			/* For Not XDP_PASS verdict */
   5194			if (IS_ERR(skb)) {
   5195				unsigned int xdp_res = -PTR_ERR(skb);
   5196
   5197				if (xdp_res & STMMAC_XDP_CONSUMED) {
   5198					page_pool_put_page(rx_q->page_pool,
   5199							   virt_to_head_page(xdp.data),
   5200							   sync_len, true);
   5201					buf->page = NULL;
   5202					priv->dev->stats.rx_dropped++;
   5203
   5204					/* Clear skb as it was set as
   5205					 * status by XDP program.
   5206					 */
   5207					skb = NULL;
   5208
   5209					if (unlikely((status & rx_not_ls)))
   5210						goto read_again;
   5211
   5212					count++;
   5213					continue;
   5214				} else if (xdp_res & (STMMAC_XDP_TX |
   5215						      STMMAC_XDP_REDIRECT)) {
   5216					xdp_status |= xdp_res;
   5217					buf->page = NULL;
   5218					skb = NULL;
   5219					count++;
   5220					continue;
   5221				}
   5222			}
   5223		}
   5224
   5225		if (!skb) {
   5226			/* XDP program may expand or reduce tail */
   5227			buf1_len = xdp.data_end - xdp.data;
   5228
   5229			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
   5230			if (!skb) {
   5231				priv->dev->stats.rx_dropped++;
   5232				count++;
   5233				goto drain_data;
   5234			}
   5235
   5236			/* XDP program may adjust header */
   5237			skb_copy_to_linear_data(skb, xdp.data, buf1_len);
   5238			skb_put(skb, buf1_len);
   5239
   5240			/* Data payload copied into SKB, page ready for recycle */
   5241			page_pool_recycle_direct(rx_q->page_pool, buf->page);
   5242			buf->page = NULL;
   5243		} else if (buf1_len) {
   5244			dma_sync_single_for_cpu(priv->device, buf->addr,
   5245						buf1_len, dma_dir);
   5246			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
   5247					buf->page, buf->page_offset, buf1_len,
   5248					priv->dma_buf_sz);
   5249
   5250			/* Data payload appended into SKB */
   5251			page_pool_release_page(rx_q->page_pool, buf->page);
   5252			buf->page = NULL;
   5253		}
   5254
   5255		if (buf2_len) {
   5256			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
   5257						buf2_len, dma_dir);
   5258			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
   5259					buf->sec_page, 0, buf2_len,
   5260					priv->dma_buf_sz);
   5261
   5262			/* Data payload appended into SKB */
   5263			page_pool_release_page(rx_q->page_pool, buf->sec_page);
   5264			buf->sec_page = NULL;
   5265		}
   5266
   5267drain_data:
   5268		if (likely(status & rx_not_ls))
   5269			goto read_again;
   5270		if (!skb)
   5271			continue;
   5272
   5273		/* Got entire packet into SKB. Finish it. */
   5274
   5275		stmmac_get_rx_hwtstamp(priv, p, np, skb);
   5276		stmmac_rx_vlan(priv->dev, skb);
   5277		skb->protocol = eth_type_trans(skb, priv->dev);
   5278
   5279		if (unlikely(!coe))
   5280			skb_checksum_none_assert(skb);
   5281		else
   5282			skb->ip_summed = CHECKSUM_UNNECESSARY;
   5283
   5284		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
   5285			skb_set_hash(skb, hash, hash_type);
   5286
   5287		skb_record_rx_queue(skb, queue);
   5288		napi_gro_receive(&ch->rx_napi, skb);
   5289		skb = NULL;
   5290
   5291		priv->dev->stats.rx_packets++;
   5292		priv->dev->stats.rx_bytes += len;
   5293		count++;
   5294	}
   5295
   5296	if (status & rx_not_ls || skb) {
   5297		rx_q->state_saved = true;
   5298		rx_q->state.skb = skb;
   5299		rx_q->state.error = error;
   5300		rx_q->state.len = len;
   5301	}
   5302
   5303	stmmac_finalize_xdp_rx(priv, xdp_status);
   5304
   5305	stmmac_rx_refill(priv, queue);
   5306
   5307	priv->xstats.rx_pkt_n += count;
   5308	priv->xstats.rxq_stats[queue].rx_pkt_n += count;
   5309
   5310	return count;
   5311}
   5312
   5313static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
   5314{
   5315	struct stmmac_channel *ch =
   5316		container_of(napi, struct stmmac_channel, rx_napi);
   5317	struct stmmac_priv *priv = ch->priv_data;
   5318	u32 chan = ch->index;
   5319	int work_done;
   5320
   5321	priv->xstats.napi_poll++;
   5322
   5323	work_done = stmmac_rx(priv, budget, chan);
   5324	if (work_done < budget && napi_complete_done(napi, work_done)) {
   5325		unsigned long flags;
   5326
   5327		spin_lock_irqsave(&ch->lock, flags);
   5328		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
   5329		spin_unlock_irqrestore(&ch->lock, flags);
   5330	}
   5331
   5332	return work_done;
   5333}
   5334
   5335static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
   5336{
   5337	struct stmmac_channel *ch =
   5338		container_of(napi, struct stmmac_channel, tx_napi);
   5339	struct stmmac_priv *priv = ch->priv_data;
   5340	u32 chan = ch->index;
   5341	int work_done;
   5342
   5343	priv->xstats.napi_poll++;
   5344
   5345	work_done = stmmac_tx_clean(priv, budget, chan);
   5346	work_done = min(work_done, budget);
   5347
   5348	if (work_done < budget && napi_complete_done(napi, work_done)) {
   5349		unsigned long flags;
   5350
   5351		spin_lock_irqsave(&ch->lock, flags);
   5352		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
   5353		spin_unlock_irqrestore(&ch->lock, flags);
   5354	}
   5355
   5356	return work_done;
   5357}
   5358
   5359static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
   5360{
   5361	struct stmmac_channel *ch =
   5362		container_of(napi, struct stmmac_channel, rxtx_napi);
   5363	struct stmmac_priv *priv = ch->priv_data;
   5364	int rx_done, tx_done, rxtx_done;
   5365	u32 chan = ch->index;
   5366
   5367	priv->xstats.napi_poll++;
   5368
   5369	tx_done = stmmac_tx_clean(priv, budget, chan);
   5370	tx_done = min(tx_done, budget);
   5371
   5372	rx_done = stmmac_rx_zc(priv, budget, chan);
   5373
   5374	rxtx_done = max(tx_done, rx_done);
   5375
   5376	/* If either TX or RX work is not complete, return budget
   5377	 * and keep pooling
   5378	 */
   5379	if (rxtx_done >= budget)
   5380		return budget;
   5381
   5382	/* all work done, exit the polling mode */
   5383	if (napi_complete_done(napi, rxtx_done)) {
   5384		unsigned long flags;
   5385
   5386		spin_lock_irqsave(&ch->lock, flags);
   5387		/* Both RX and TX work done are compelte,
   5388		 * so enable both RX & TX IRQs.
   5389		 */
   5390		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
   5391		spin_unlock_irqrestore(&ch->lock, flags);
   5392	}
   5393
   5394	return min(rxtx_done, budget - 1);
   5395}
   5396
   5397/**
   5398 *  stmmac_tx_timeout
   5399 *  @dev : Pointer to net device structure
   5400 *  @txqueue: the index of the hanging transmit queue
   5401 *  Description: this function is called when a packet transmission fails to
   5402 *   complete within a reasonable time. The driver will mark the error in the
   5403 *   netdev structure and arrange for the device to be reset to a sane state
   5404 *   in order to transmit a new packet.
   5405 */
   5406static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
   5407{
   5408	struct stmmac_priv *priv = netdev_priv(dev);
   5409
   5410	stmmac_global_err(priv);
   5411}
   5412
   5413/**
   5414 *  stmmac_set_rx_mode - entry point for multicast addressing
   5415 *  @dev : pointer to the device structure
   5416 *  Description:
   5417 *  This function is a driver entry point which gets called by the kernel
   5418 *  whenever multicast addresses must be enabled/disabled.
   5419 *  Return value:
   5420 *  void.
   5421 */
   5422static void stmmac_set_rx_mode(struct net_device *dev)
   5423{
   5424	struct stmmac_priv *priv = netdev_priv(dev);
   5425
   5426	stmmac_set_filter(priv, priv->hw, dev);
   5427}
   5428
   5429/**
   5430 *  stmmac_change_mtu - entry point to change MTU size for the device.
   5431 *  @dev : device pointer.
   5432 *  @new_mtu : the new MTU size for the device.
   5433 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
   5434 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
   5435 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
   5436 *  Return value:
   5437 *  0 on success and an appropriate (-)ve integer as defined in errno.h
   5438 *  file on failure.
   5439 */
   5440static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
   5441{
   5442	struct stmmac_priv *priv = netdev_priv(dev);
   5443	int txfifosz = priv->plat->tx_fifo_size;
   5444	const int mtu = new_mtu;
   5445
   5446	if (txfifosz == 0)
   5447		txfifosz = priv->dma_cap.tx_fifo_size;
   5448
   5449	txfifosz /= priv->plat->tx_queues_to_use;
   5450
   5451	if (netif_running(dev)) {
   5452		netdev_err(priv->dev, "must be stopped to change its MTU\n");
   5453		return -EBUSY;
   5454	}
   5455
   5456	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
   5457		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
   5458		return -EINVAL;
   5459	}
   5460
   5461	new_mtu = STMMAC_ALIGN(new_mtu);
   5462
   5463	/* If condition true, FIFO is too small or MTU too large */
   5464	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
   5465		return -EINVAL;
   5466
   5467	dev->mtu = mtu;
   5468
   5469	netdev_update_features(dev);
   5470
   5471	return 0;
   5472}
   5473
   5474static netdev_features_t stmmac_fix_features(struct net_device *dev,
   5475					     netdev_features_t features)
   5476{
   5477	struct stmmac_priv *priv = netdev_priv(dev);
   5478
   5479	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
   5480		features &= ~NETIF_F_RXCSUM;
   5481
   5482	if (!priv->plat->tx_coe)
   5483		features &= ~NETIF_F_CSUM_MASK;
   5484
   5485	/* Some GMAC devices have a bugged Jumbo frame support that
   5486	 * needs to have the Tx COE disabled for oversized frames
   5487	 * (due to limited buffer sizes). In this case we disable
   5488	 * the TX csum insertion in the TDES and not use SF.
   5489	 */
   5490	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
   5491		features &= ~NETIF_F_CSUM_MASK;
   5492
   5493	/* Disable tso if asked by ethtool */
   5494	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
   5495		if (features & NETIF_F_TSO)
   5496			priv->tso = true;
   5497		else
   5498			priv->tso = false;
   5499	}
   5500
   5501	return features;
   5502}
   5503
   5504static int stmmac_set_features(struct net_device *netdev,
   5505			       netdev_features_t features)
   5506{
   5507	struct stmmac_priv *priv = netdev_priv(netdev);
   5508
   5509	/* Keep the COE Type in case of csum is supporting */
   5510	if (features & NETIF_F_RXCSUM)
   5511		priv->hw->rx_csum = priv->plat->rx_coe;
   5512	else
   5513		priv->hw->rx_csum = 0;
   5514	/* No check needed because rx_coe has been set before and it will be
   5515	 * fixed in case of issue.
   5516	 */
   5517	stmmac_rx_ipc(priv, priv->hw);
   5518
   5519	if (priv->sph_cap) {
   5520		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
   5521		u32 chan;
   5522
   5523		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
   5524			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
   5525	}
   5526
   5527	return 0;
   5528}
   5529
   5530static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
   5531{
   5532	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
   5533	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
   5534	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
   5535	bool *hs_enable = &fpe_cfg->hs_enable;
   5536
   5537	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
   5538		return;
   5539
   5540	/* If LP has sent verify mPacket, LP is FPE capable */
   5541	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
   5542		if (*lp_state < FPE_STATE_CAPABLE)
   5543			*lp_state = FPE_STATE_CAPABLE;
   5544
   5545		/* If user has requested FPE enable, quickly response */
   5546		if (*hs_enable)
   5547			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
   5548						MPACKET_RESPONSE);
   5549	}
   5550
   5551	/* If Local has sent verify mPacket, Local is FPE capable */
   5552	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
   5553		if (*lo_state < FPE_STATE_CAPABLE)
   5554			*lo_state = FPE_STATE_CAPABLE;
   5555	}
   5556
   5557	/* If LP has sent response mPacket, LP is entering FPE ON */
   5558	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
   5559		*lp_state = FPE_STATE_ENTERING_ON;
   5560
   5561	/* If Local has sent response mPacket, Local is entering FPE ON */
   5562	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
   5563		*lo_state = FPE_STATE_ENTERING_ON;
   5564
   5565	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
   5566	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
   5567	    priv->fpe_wq) {
   5568		queue_work(priv->fpe_wq, &priv->fpe_task);
   5569	}
   5570}
   5571
   5572static void stmmac_common_interrupt(struct stmmac_priv *priv)
   5573{
   5574	u32 rx_cnt = priv->plat->rx_queues_to_use;
   5575	u32 tx_cnt = priv->plat->tx_queues_to_use;
   5576	u32 queues_count;
   5577	u32 queue;
   5578	bool xmac;
   5579
   5580	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
   5581	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
   5582
   5583	if (priv->irq_wake)
   5584		pm_wakeup_event(priv->device, 0);
   5585
   5586	if (priv->dma_cap.estsel)
   5587		stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
   5588				      &priv->xstats, tx_cnt);
   5589
   5590	if (priv->dma_cap.fpesel) {
   5591		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
   5592						   priv->dev);
   5593
   5594		stmmac_fpe_event_status(priv, status);
   5595	}
   5596
   5597	/* To handle GMAC own interrupts */
   5598	if ((priv->plat->has_gmac) || xmac) {
   5599		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
   5600
   5601		if (unlikely(status)) {
   5602			/* For LPI we need to save the tx status */
   5603			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
   5604				priv->tx_path_in_lpi_mode = true;
   5605			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
   5606				priv->tx_path_in_lpi_mode = false;
   5607		}
   5608
   5609		for (queue = 0; queue < queues_count; queue++) {
   5610			status = stmmac_host_mtl_irq_status(priv, priv->hw,
   5611							    queue);
   5612		}
   5613
   5614		/* PCS link status */
   5615		if (priv->hw->pcs) {
   5616			if (priv->xstats.pcs_link)
   5617				netif_carrier_on(priv->dev);
   5618			else
   5619				netif_carrier_off(priv->dev);
   5620		}
   5621
   5622		stmmac_timestamp_interrupt(priv, priv);
   5623	}
   5624}
   5625
   5626/**
   5627 *  stmmac_interrupt - main ISR
   5628 *  @irq: interrupt number.
   5629 *  @dev_id: to pass the net device pointer.
   5630 *  Description: this is the main driver interrupt service routine.
   5631 *  It can call:
   5632 *  o DMA service routine (to manage incoming frame reception and transmission
   5633 *    status)
   5634 *  o Core interrupts to manage: remote wake-up, management counter, LPI
   5635 *    interrupts.
   5636 */
   5637static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
   5638{
   5639	struct net_device *dev = (struct net_device *)dev_id;
   5640	struct stmmac_priv *priv = netdev_priv(dev);
   5641
   5642	/* Check if adapter is up */
   5643	if (test_bit(STMMAC_DOWN, &priv->state))
   5644		return IRQ_HANDLED;
   5645
   5646	/* Check if a fatal error happened */
   5647	if (stmmac_safety_feat_interrupt(priv))
   5648		return IRQ_HANDLED;
   5649
   5650	/* To handle Common interrupts */
   5651	stmmac_common_interrupt(priv);
   5652
   5653	/* To handle DMA interrupts */
   5654	stmmac_dma_interrupt(priv);
   5655
   5656	return IRQ_HANDLED;
   5657}
   5658
   5659static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
   5660{
   5661	struct net_device *dev = (struct net_device *)dev_id;
   5662	struct stmmac_priv *priv = netdev_priv(dev);
   5663
   5664	if (unlikely(!dev)) {
   5665		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
   5666		return IRQ_NONE;
   5667	}
   5668
   5669	/* Check if adapter is up */
   5670	if (test_bit(STMMAC_DOWN, &priv->state))
   5671		return IRQ_HANDLED;
   5672
   5673	/* To handle Common interrupts */
   5674	stmmac_common_interrupt(priv);
   5675
   5676	return IRQ_HANDLED;
   5677}
   5678
   5679static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
   5680{
   5681	struct net_device *dev = (struct net_device *)dev_id;
   5682	struct stmmac_priv *priv = netdev_priv(dev);
   5683
   5684	if (unlikely(!dev)) {
   5685		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
   5686		return IRQ_NONE;
   5687	}
   5688
   5689	/* Check if adapter is up */
   5690	if (test_bit(STMMAC_DOWN, &priv->state))
   5691		return IRQ_HANDLED;
   5692
   5693	/* Check if a fatal error happened */
   5694	stmmac_safety_feat_interrupt(priv);
   5695
   5696	return IRQ_HANDLED;
   5697}
   5698
   5699static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
   5700{
   5701	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
   5702	int chan = tx_q->queue_index;
   5703	struct stmmac_priv *priv;
   5704	int status;
   5705
   5706	priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
   5707
   5708	if (unlikely(!data)) {
   5709		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
   5710		return IRQ_NONE;
   5711	}
   5712
   5713	/* Check if adapter is up */
   5714	if (test_bit(STMMAC_DOWN, &priv->state))
   5715		return IRQ_HANDLED;
   5716
   5717	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
   5718
   5719	if (unlikely(status & tx_hard_error_bump_tc)) {
   5720		/* Try to bump up the dma threshold on this failure */
   5721		stmmac_bump_dma_threshold(priv, chan);
   5722	} else if (unlikely(status == tx_hard_error)) {
   5723		stmmac_tx_err(priv, chan);
   5724	}
   5725
   5726	return IRQ_HANDLED;
   5727}
   5728
   5729static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
   5730{
   5731	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
   5732	int chan = rx_q->queue_index;
   5733	struct stmmac_priv *priv;
   5734
   5735	priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
   5736
   5737	if (unlikely(!data)) {
   5738		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
   5739		return IRQ_NONE;
   5740	}
   5741
   5742	/* Check if adapter is up */
   5743	if (test_bit(STMMAC_DOWN, &priv->state))
   5744		return IRQ_HANDLED;
   5745
   5746	stmmac_napi_check(priv, chan, DMA_DIR_RX);
   5747
   5748	return IRQ_HANDLED;
   5749}
   5750
   5751#ifdef CONFIG_NET_POLL_CONTROLLER
   5752/* Polling receive - used by NETCONSOLE and other diagnostic tools
   5753 * to allow network I/O with interrupts disabled.
   5754 */
   5755static void stmmac_poll_controller(struct net_device *dev)
   5756{
   5757	struct stmmac_priv *priv = netdev_priv(dev);
   5758	int i;
   5759
   5760	/* If adapter is down, do nothing */
   5761	if (test_bit(STMMAC_DOWN, &priv->state))
   5762		return;
   5763
   5764	if (priv->plat->multi_msi_en) {
   5765		for (i = 0; i < priv->plat->rx_queues_to_use; i++)
   5766			stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
   5767
   5768		for (i = 0; i < priv->plat->tx_queues_to_use; i++)
   5769			stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
   5770	} else {
   5771		disable_irq(dev->irq);
   5772		stmmac_interrupt(dev->irq, dev);
   5773		enable_irq(dev->irq);
   5774	}
   5775}
   5776#endif
   5777
   5778/**
   5779 *  stmmac_ioctl - Entry point for the Ioctl
   5780 *  @dev: Device pointer.
   5781 *  @rq: An IOCTL specefic structure, that can contain a pointer to
   5782 *  a proprietary structure used to pass information to the driver.
   5783 *  @cmd: IOCTL command
   5784 *  Description:
   5785 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
   5786 */
   5787static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
   5788{
   5789	struct stmmac_priv *priv = netdev_priv (dev);
   5790	int ret = -EOPNOTSUPP;
   5791
   5792	if (!netif_running(dev))
   5793		return -EINVAL;
   5794
   5795	switch (cmd) {
   5796	case SIOCGMIIPHY:
   5797	case SIOCGMIIREG:
   5798	case SIOCSMIIREG:
   5799		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
   5800		break;
   5801	case SIOCSHWTSTAMP:
   5802		ret = stmmac_hwtstamp_set(dev, rq);
   5803		break;
   5804	case SIOCGHWTSTAMP:
   5805		ret = stmmac_hwtstamp_get(dev, rq);
   5806		break;
   5807	default:
   5808		break;
   5809	}
   5810
   5811	return ret;
   5812}
   5813
   5814static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
   5815				    void *cb_priv)
   5816{
   5817	struct stmmac_priv *priv = cb_priv;
   5818	int ret = -EOPNOTSUPP;
   5819
   5820	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
   5821		return ret;
   5822
   5823	__stmmac_disable_all_queues(priv);
   5824
   5825	switch (type) {
   5826	case TC_SETUP_CLSU32:
   5827		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
   5828		break;
   5829	case TC_SETUP_CLSFLOWER:
   5830		ret = stmmac_tc_setup_cls(priv, priv, type_data);
   5831		break;
   5832	default:
   5833		break;
   5834	}
   5835
   5836	stmmac_enable_all_queues(priv);
   5837	return ret;
   5838}
   5839
   5840static LIST_HEAD(stmmac_block_cb_list);
   5841
   5842static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
   5843			   void *type_data)
   5844{
   5845	struct stmmac_priv *priv = netdev_priv(ndev);
   5846
   5847	switch (type) {
   5848	case TC_SETUP_BLOCK:
   5849		return flow_block_cb_setup_simple(type_data,
   5850						  &stmmac_block_cb_list,
   5851						  stmmac_setup_tc_block_cb,
   5852						  priv, priv, true);
   5853	case TC_SETUP_QDISC_CBS:
   5854		return stmmac_tc_setup_cbs(priv, priv, type_data);
   5855	case TC_SETUP_QDISC_TAPRIO:
   5856		return stmmac_tc_setup_taprio(priv, priv, type_data);
   5857	case TC_SETUP_QDISC_ETF:
   5858		return stmmac_tc_setup_etf(priv, priv, type_data);
   5859	default:
   5860		return -EOPNOTSUPP;
   5861	}
   5862}
   5863
   5864static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
   5865			       struct net_device *sb_dev)
   5866{
   5867	int gso = skb_shinfo(skb)->gso_type;
   5868
   5869	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
   5870		/*
   5871		 * There is no way to determine the number of TSO/USO
   5872		 * capable Queues. Let's use always the Queue 0
   5873		 * because if TSO/USO is supported then at least this
   5874		 * one will be capable.
   5875		 */
   5876		return 0;
   5877	}
   5878
   5879	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
   5880}
   5881
   5882static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
   5883{
   5884	struct stmmac_priv *priv = netdev_priv(ndev);
   5885	int ret = 0;
   5886
   5887	ret = pm_runtime_resume_and_get(priv->device);
   5888	if (ret < 0)
   5889		return ret;
   5890
   5891	ret = eth_mac_addr(ndev, addr);
   5892	if (ret)
   5893		goto set_mac_error;
   5894
   5895	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
   5896
   5897set_mac_error:
   5898	pm_runtime_put(priv->device);
   5899
   5900	return ret;
   5901}
   5902
   5903#ifdef CONFIG_DEBUG_FS
   5904static struct dentry *stmmac_fs_dir;
   5905
   5906static void sysfs_display_ring(void *head, int size, int extend_desc,
   5907			       struct seq_file *seq, dma_addr_t dma_phy_addr)
   5908{
   5909	int i;
   5910	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
   5911	struct dma_desc *p = (struct dma_desc *)head;
   5912	dma_addr_t dma_addr;
   5913
   5914	for (i = 0; i < size; i++) {
   5915		if (extend_desc) {
   5916			dma_addr = dma_phy_addr + i * sizeof(*ep);
   5917			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
   5918				   i, &dma_addr,
   5919				   le32_to_cpu(ep->basic.des0),
   5920				   le32_to_cpu(ep->basic.des1),
   5921				   le32_to_cpu(ep->basic.des2),
   5922				   le32_to_cpu(ep->basic.des3));
   5923			ep++;
   5924		} else {
   5925			dma_addr = dma_phy_addr + i * sizeof(*p);
   5926			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
   5927				   i, &dma_addr,
   5928				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
   5929				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
   5930			p++;
   5931		}
   5932		seq_printf(seq, "\n");
   5933	}
   5934}
   5935
   5936static int stmmac_rings_status_show(struct seq_file *seq, void *v)
   5937{
   5938	struct net_device *dev = seq->private;
   5939	struct stmmac_priv *priv = netdev_priv(dev);
   5940	u32 rx_count = priv->plat->rx_queues_to_use;
   5941	u32 tx_count = priv->plat->tx_queues_to_use;
   5942	u32 queue;
   5943
   5944	if ((dev->flags & IFF_UP) == 0)
   5945		return 0;
   5946
   5947	for (queue = 0; queue < rx_count; queue++) {
   5948		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   5949
   5950		seq_printf(seq, "RX Queue %d:\n", queue);
   5951
   5952		if (priv->extend_desc) {
   5953			seq_printf(seq, "Extended descriptor ring:\n");
   5954			sysfs_display_ring((void *)rx_q->dma_erx,
   5955					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
   5956		} else {
   5957			seq_printf(seq, "Descriptor ring:\n");
   5958			sysfs_display_ring((void *)rx_q->dma_rx,
   5959					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
   5960		}
   5961	}
   5962
   5963	for (queue = 0; queue < tx_count; queue++) {
   5964		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   5965
   5966		seq_printf(seq, "TX Queue %d:\n", queue);
   5967
   5968		if (priv->extend_desc) {
   5969			seq_printf(seq, "Extended descriptor ring:\n");
   5970			sysfs_display_ring((void *)tx_q->dma_etx,
   5971					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
   5972		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
   5973			seq_printf(seq, "Descriptor ring:\n");
   5974			sysfs_display_ring((void *)tx_q->dma_tx,
   5975					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
   5976		}
   5977	}
   5978
   5979	return 0;
   5980}
   5981DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
   5982
   5983static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
   5984{
   5985	struct net_device *dev = seq->private;
   5986	struct stmmac_priv *priv = netdev_priv(dev);
   5987
   5988	if (!priv->hw_cap_support) {
   5989		seq_printf(seq, "DMA HW features not supported\n");
   5990		return 0;
   5991	}
   5992
   5993	seq_printf(seq, "==============================\n");
   5994	seq_printf(seq, "\tDMA HW features\n");
   5995	seq_printf(seq, "==============================\n");
   5996
   5997	seq_printf(seq, "\t10/100 Mbps: %s\n",
   5998		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
   5999	seq_printf(seq, "\t1000 Mbps: %s\n",
   6000		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
   6001	seq_printf(seq, "\tHalf duplex: %s\n",
   6002		   (priv->dma_cap.half_duplex) ? "Y" : "N");
   6003	seq_printf(seq, "\tHash Filter: %s\n",
   6004		   (priv->dma_cap.hash_filter) ? "Y" : "N");
   6005	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
   6006		   (priv->dma_cap.multi_addr) ? "Y" : "N");
   6007	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
   6008		   (priv->dma_cap.pcs) ? "Y" : "N");
   6009	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
   6010		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
   6011	seq_printf(seq, "\tPMT Remote wake up: %s\n",
   6012		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
   6013	seq_printf(seq, "\tPMT Magic Frame: %s\n",
   6014		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
   6015	seq_printf(seq, "\tRMON module: %s\n",
   6016		   (priv->dma_cap.rmon) ? "Y" : "N");
   6017	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
   6018		   (priv->dma_cap.time_stamp) ? "Y" : "N");
   6019	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
   6020		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
   6021	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
   6022		   (priv->dma_cap.eee) ? "Y" : "N");
   6023	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
   6024	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
   6025		   (priv->dma_cap.tx_coe) ? "Y" : "N");
   6026	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
   6027		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
   6028			   (priv->dma_cap.rx_coe) ? "Y" : "N");
   6029	} else {
   6030		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
   6031			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
   6032		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
   6033			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
   6034	}
   6035	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
   6036		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
   6037	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
   6038		   priv->dma_cap.number_rx_channel);
   6039	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
   6040		   priv->dma_cap.number_tx_channel);
   6041	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
   6042		   priv->dma_cap.number_rx_queues);
   6043	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
   6044		   priv->dma_cap.number_tx_queues);
   6045	seq_printf(seq, "\tEnhanced descriptors: %s\n",
   6046		   (priv->dma_cap.enh_desc) ? "Y" : "N");
   6047	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
   6048	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
   6049	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
   6050	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
   6051	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
   6052		   priv->dma_cap.pps_out_num);
   6053	seq_printf(seq, "\tSafety Features: %s\n",
   6054		   priv->dma_cap.asp ? "Y" : "N");
   6055	seq_printf(seq, "\tFlexible RX Parser: %s\n",
   6056		   priv->dma_cap.frpsel ? "Y" : "N");
   6057	seq_printf(seq, "\tEnhanced Addressing: %d\n",
   6058		   priv->dma_cap.addr64);
   6059	seq_printf(seq, "\tReceive Side Scaling: %s\n",
   6060		   priv->dma_cap.rssen ? "Y" : "N");
   6061	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
   6062		   priv->dma_cap.vlhash ? "Y" : "N");
   6063	seq_printf(seq, "\tSplit Header: %s\n",
   6064		   priv->dma_cap.sphen ? "Y" : "N");
   6065	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
   6066		   priv->dma_cap.vlins ? "Y" : "N");
   6067	seq_printf(seq, "\tDouble VLAN: %s\n",
   6068		   priv->dma_cap.dvlan ? "Y" : "N");
   6069	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
   6070		   priv->dma_cap.l3l4fnum);
   6071	seq_printf(seq, "\tARP Offloading: %s\n",
   6072		   priv->dma_cap.arpoffsel ? "Y" : "N");
   6073	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
   6074		   priv->dma_cap.estsel ? "Y" : "N");
   6075	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
   6076		   priv->dma_cap.fpesel ? "Y" : "N");
   6077	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
   6078		   priv->dma_cap.tbssel ? "Y" : "N");
   6079	return 0;
   6080}
   6081DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
   6082
   6083/* Use network device events to rename debugfs file entries.
   6084 */
   6085static int stmmac_device_event(struct notifier_block *unused,
   6086			       unsigned long event, void *ptr)
   6087{
   6088	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
   6089	struct stmmac_priv *priv = netdev_priv(dev);
   6090
   6091	if (dev->netdev_ops != &stmmac_netdev_ops)
   6092		goto done;
   6093
   6094	switch (event) {
   6095	case NETDEV_CHANGENAME:
   6096		if (priv->dbgfs_dir)
   6097			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
   6098							 priv->dbgfs_dir,
   6099							 stmmac_fs_dir,
   6100							 dev->name);
   6101		break;
   6102	}
   6103done:
   6104	return NOTIFY_DONE;
   6105}
   6106
   6107static struct notifier_block stmmac_notifier = {
   6108	.notifier_call = stmmac_device_event,
   6109};
   6110
   6111static void stmmac_init_fs(struct net_device *dev)
   6112{
   6113	struct stmmac_priv *priv = netdev_priv(dev);
   6114
   6115	rtnl_lock();
   6116
   6117	/* Create per netdev entries */
   6118	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
   6119
   6120	/* Entry to report DMA RX/TX rings */
   6121	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
   6122			    &stmmac_rings_status_fops);
   6123
   6124	/* Entry to report the DMA HW features */
   6125	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
   6126			    &stmmac_dma_cap_fops);
   6127
   6128	rtnl_unlock();
   6129}
   6130
   6131static void stmmac_exit_fs(struct net_device *dev)
   6132{
   6133	struct stmmac_priv *priv = netdev_priv(dev);
   6134
   6135	debugfs_remove_recursive(priv->dbgfs_dir);
   6136}
   6137#endif /* CONFIG_DEBUG_FS */
   6138
   6139static u32 stmmac_vid_crc32_le(__le16 vid_le)
   6140{
   6141	unsigned char *data = (unsigned char *)&vid_le;
   6142	unsigned char data_byte = 0;
   6143	u32 crc = ~0x0;
   6144	u32 temp = 0;
   6145	int i, bits;
   6146
   6147	bits = get_bitmask_order(VLAN_VID_MASK);
   6148	for (i = 0; i < bits; i++) {
   6149		if ((i % 8) == 0)
   6150			data_byte = data[i / 8];
   6151
   6152		temp = ((crc & 1) ^ data_byte) & 1;
   6153		crc >>= 1;
   6154		data_byte >>= 1;
   6155
   6156		if (temp)
   6157			crc ^= 0xedb88320;
   6158	}
   6159
   6160	return crc;
   6161}
   6162
   6163static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
   6164{
   6165	u32 crc, hash = 0;
   6166	__le16 pmatch = 0;
   6167	int count = 0;
   6168	u16 vid = 0;
   6169
   6170	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
   6171		__le16 vid_le = cpu_to_le16(vid);
   6172		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
   6173		hash |= (1 << crc);
   6174		count++;
   6175	}
   6176
   6177	if (!priv->dma_cap.vlhash) {
   6178		if (count > 2) /* VID = 0 always passes filter */
   6179			return -EOPNOTSUPP;
   6180
   6181		pmatch = cpu_to_le16(vid);
   6182		hash = 0;
   6183	}
   6184
   6185	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
   6186}
   6187
   6188static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
   6189{
   6190	struct stmmac_priv *priv = netdev_priv(ndev);
   6191	bool is_double = false;
   6192	int ret;
   6193
   6194	if (be16_to_cpu(proto) == ETH_P_8021AD)
   6195		is_double = true;
   6196
   6197	set_bit(vid, priv->active_vlans);
   6198	ret = stmmac_vlan_update(priv, is_double);
   6199	if (ret) {
   6200		clear_bit(vid, priv->active_vlans);
   6201		return ret;
   6202	}
   6203
   6204	if (priv->hw->num_vlan) {
   6205		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
   6206		if (ret)
   6207			return ret;
   6208	}
   6209
   6210	return 0;
   6211}
   6212
   6213static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
   6214{
   6215	struct stmmac_priv *priv = netdev_priv(ndev);
   6216	bool is_double = false;
   6217	int ret;
   6218
   6219	ret = pm_runtime_resume_and_get(priv->device);
   6220	if (ret < 0)
   6221		return ret;
   6222
   6223	if (be16_to_cpu(proto) == ETH_P_8021AD)
   6224		is_double = true;
   6225
   6226	clear_bit(vid, priv->active_vlans);
   6227
   6228	if (priv->hw->num_vlan) {
   6229		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
   6230		if (ret)
   6231			goto del_vlan_error;
   6232	}
   6233
   6234	ret = stmmac_vlan_update(priv, is_double);
   6235
   6236del_vlan_error:
   6237	pm_runtime_put(priv->device);
   6238
   6239	return ret;
   6240}
   6241
   6242static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
   6243{
   6244	struct stmmac_priv *priv = netdev_priv(dev);
   6245
   6246	switch (bpf->command) {
   6247	case XDP_SETUP_PROG:
   6248		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
   6249	case XDP_SETUP_XSK_POOL:
   6250		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
   6251					     bpf->xsk.queue_id);
   6252	default:
   6253		return -EOPNOTSUPP;
   6254	}
   6255}
   6256
   6257static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
   6258			   struct xdp_frame **frames, u32 flags)
   6259{
   6260	struct stmmac_priv *priv = netdev_priv(dev);
   6261	int cpu = smp_processor_id();
   6262	struct netdev_queue *nq;
   6263	int i, nxmit = 0;
   6264	int queue;
   6265
   6266	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
   6267		return -ENETDOWN;
   6268
   6269	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
   6270		return -EINVAL;
   6271
   6272	queue = stmmac_xdp_get_tx_queue(priv, cpu);
   6273	nq = netdev_get_tx_queue(priv->dev, queue);
   6274
   6275	__netif_tx_lock(nq, cpu);
   6276	/* Avoids TX time-out as we are sharing with slow path */
   6277	txq_trans_cond_update(nq);
   6278
   6279	for (i = 0; i < num_frames; i++) {
   6280		int res;
   6281
   6282		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
   6283		if (res == STMMAC_XDP_CONSUMED)
   6284			break;
   6285
   6286		nxmit++;
   6287	}
   6288
   6289	if (flags & XDP_XMIT_FLUSH) {
   6290		stmmac_flush_tx_descriptors(priv, queue);
   6291		stmmac_tx_timer_arm(priv, queue);
   6292	}
   6293
   6294	__netif_tx_unlock(nq);
   6295
   6296	return nxmit;
   6297}
   6298
   6299void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
   6300{
   6301	struct stmmac_channel *ch = &priv->channel[queue];
   6302	unsigned long flags;
   6303
   6304	spin_lock_irqsave(&ch->lock, flags);
   6305	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
   6306	spin_unlock_irqrestore(&ch->lock, flags);
   6307
   6308	stmmac_stop_rx_dma(priv, queue);
   6309	__free_dma_rx_desc_resources(priv, queue);
   6310}
   6311
   6312void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
   6313{
   6314	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   6315	struct stmmac_channel *ch = &priv->channel[queue];
   6316	unsigned long flags;
   6317	u32 buf_size;
   6318	int ret;
   6319
   6320	ret = __alloc_dma_rx_desc_resources(priv, queue);
   6321	if (ret) {
   6322		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
   6323		return;
   6324	}
   6325
   6326	ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
   6327	if (ret) {
   6328		__free_dma_rx_desc_resources(priv, queue);
   6329		netdev_err(priv->dev, "Failed to init RX desc.\n");
   6330		return;
   6331	}
   6332
   6333	stmmac_clear_rx_descriptors(priv, queue);
   6334
   6335	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
   6336			    rx_q->dma_rx_phy, rx_q->queue_index);
   6337
   6338	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
   6339			     sizeof(struct dma_desc));
   6340	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
   6341			       rx_q->rx_tail_addr, rx_q->queue_index);
   6342
   6343	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
   6344		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
   6345		stmmac_set_dma_bfsize(priv, priv->ioaddr,
   6346				      buf_size,
   6347				      rx_q->queue_index);
   6348	} else {
   6349		stmmac_set_dma_bfsize(priv, priv->ioaddr,
   6350				      priv->dma_buf_sz,
   6351				      rx_q->queue_index);
   6352	}
   6353
   6354	stmmac_start_rx_dma(priv, queue);
   6355
   6356	spin_lock_irqsave(&ch->lock, flags);
   6357	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
   6358	spin_unlock_irqrestore(&ch->lock, flags);
   6359}
   6360
   6361void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
   6362{
   6363	struct stmmac_channel *ch = &priv->channel[queue];
   6364	unsigned long flags;
   6365
   6366	spin_lock_irqsave(&ch->lock, flags);
   6367	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
   6368	spin_unlock_irqrestore(&ch->lock, flags);
   6369
   6370	stmmac_stop_tx_dma(priv, queue);
   6371	__free_dma_tx_desc_resources(priv, queue);
   6372}
   6373
   6374void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
   6375{
   6376	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   6377	struct stmmac_channel *ch = &priv->channel[queue];
   6378	unsigned long flags;
   6379	int ret;
   6380
   6381	ret = __alloc_dma_tx_desc_resources(priv, queue);
   6382	if (ret) {
   6383		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
   6384		return;
   6385	}
   6386
   6387	ret = __init_dma_tx_desc_rings(priv, queue);
   6388	if (ret) {
   6389		__free_dma_tx_desc_resources(priv, queue);
   6390		netdev_err(priv->dev, "Failed to init TX desc.\n");
   6391		return;
   6392	}
   6393
   6394	stmmac_clear_tx_descriptors(priv, queue);
   6395
   6396	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
   6397			    tx_q->dma_tx_phy, tx_q->queue_index);
   6398
   6399	if (tx_q->tbs & STMMAC_TBS_AVAIL)
   6400		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
   6401
   6402	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
   6403	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
   6404			       tx_q->tx_tail_addr, tx_q->queue_index);
   6405
   6406	stmmac_start_tx_dma(priv, queue);
   6407
   6408	spin_lock_irqsave(&ch->lock, flags);
   6409	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
   6410	spin_unlock_irqrestore(&ch->lock, flags);
   6411}
   6412
   6413void stmmac_xdp_release(struct net_device *dev)
   6414{
   6415	struct stmmac_priv *priv = netdev_priv(dev);
   6416	u32 chan;
   6417
   6418	/* Disable NAPI process */
   6419	stmmac_disable_all_queues(priv);
   6420
   6421	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
   6422		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
   6423
   6424	/* Free the IRQ lines */
   6425	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
   6426
   6427	/* Stop TX/RX DMA channels */
   6428	stmmac_stop_all_dma(priv);
   6429
   6430	/* Release and free the Rx/Tx resources */
   6431	free_dma_desc_resources(priv);
   6432
   6433	/* Disable the MAC Rx/Tx */
   6434	stmmac_mac_set(priv, priv->ioaddr, false);
   6435
   6436	/* set trans_start so we don't get spurious
   6437	 * watchdogs during reset
   6438	 */
   6439	netif_trans_update(dev);
   6440	netif_carrier_off(dev);
   6441}
   6442
   6443int stmmac_xdp_open(struct net_device *dev)
   6444{
   6445	struct stmmac_priv *priv = netdev_priv(dev);
   6446	u32 rx_cnt = priv->plat->rx_queues_to_use;
   6447	u32 tx_cnt = priv->plat->tx_queues_to_use;
   6448	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
   6449	struct stmmac_rx_queue *rx_q;
   6450	struct stmmac_tx_queue *tx_q;
   6451	u32 buf_size;
   6452	bool sph_en;
   6453	u32 chan;
   6454	int ret;
   6455
   6456	ret = alloc_dma_desc_resources(priv);
   6457	if (ret < 0) {
   6458		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
   6459			   __func__);
   6460		goto dma_desc_error;
   6461	}
   6462
   6463	ret = init_dma_desc_rings(dev, GFP_KERNEL);
   6464	if (ret < 0) {
   6465		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
   6466			   __func__);
   6467		goto init_error;
   6468	}
   6469
   6470	/* DMA CSR Channel configuration */
   6471	for (chan = 0; chan < dma_csr_ch; chan++) {
   6472		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
   6473		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
   6474	}
   6475
   6476	/* Adjust Split header */
   6477	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
   6478
   6479	/* DMA RX Channel Configuration */
   6480	for (chan = 0; chan < rx_cnt; chan++) {
   6481		rx_q = &priv->rx_queue[chan];
   6482
   6483		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
   6484				    rx_q->dma_rx_phy, chan);
   6485
   6486		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
   6487				     (rx_q->buf_alloc_num *
   6488				      sizeof(struct dma_desc));
   6489		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
   6490				       rx_q->rx_tail_addr, chan);
   6491
   6492		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
   6493			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
   6494			stmmac_set_dma_bfsize(priv, priv->ioaddr,
   6495					      buf_size,
   6496					      rx_q->queue_index);
   6497		} else {
   6498			stmmac_set_dma_bfsize(priv, priv->ioaddr,
   6499					      priv->dma_buf_sz,
   6500					      rx_q->queue_index);
   6501		}
   6502
   6503		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
   6504	}
   6505
   6506	/* DMA TX Channel Configuration */
   6507	for (chan = 0; chan < tx_cnt; chan++) {
   6508		tx_q = &priv->tx_queue[chan];
   6509
   6510		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
   6511				    tx_q->dma_tx_phy, chan);
   6512
   6513		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
   6514		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
   6515				       tx_q->tx_tail_addr, chan);
   6516
   6517		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
   6518		tx_q->txtimer.function = stmmac_tx_timer;
   6519	}
   6520
   6521	/* Enable the MAC Rx/Tx */
   6522	stmmac_mac_set(priv, priv->ioaddr, true);
   6523
   6524	/* Start Rx & Tx DMA Channels */
   6525	stmmac_start_all_dma(priv);
   6526
   6527	ret = stmmac_request_irq(dev);
   6528	if (ret)
   6529		goto irq_error;
   6530
   6531	/* Enable NAPI process*/
   6532	stmmac_enable_all_queues(priv);
   6533	netif_carrier_on(dev);
   6534	netif_tx_start_all_queues(dev);
   6535	stmmac_enable_all_dma_irq(priv);
   6536
   6537	return 0;
   6538
   6539irq_error:
   6540	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
   6541		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
   6542
   6543	stmmac_hw_teardown(dev);
   6544init_error:
   6545	free_dma_desc_resources(priv);
   6546dma_desc_error:
   6547	return ret;
   6548}
   6549
   6550int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
   6551{
   6552	struct stmmac_priv *priv = netdev_priv(dev);
   6553	struct stmmac_rx_queue *rx_q;
   6554	struct stmmac_tx_queue *tx_q;
   6555	struct stmmac_channel *ch;
   6556
   6557	if (test_bit(STMMAC_DOWN, &priv->state) ||
   6558	    !netif_carrier_ok(priv->dev))
   6559		return -ENETDOWN;
   6560
   6561	if (!stmmac_xdp_is_enabled(priv))
   6562		return -EINVAL;
   6563
   6564	if (queue >= priv->plat->rx_queues_to_use ||
   6565	    queue >= priv->plat->tx_queues_to_use)
   6566		return -EINVAL;
   6567
   6568	rx_q = &priv->rx_queue[queue];
   6569	tx_q = &priv->tx_queue[queue];
   6570	ch = &priv->channel[queue];
   6571
   6572	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
   6573		return -EINVAL;
   6574
   6575	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
   6576		/* EQoS does not have per-DMA channel SW interrupt,
   6577		 * so we schedule RX Napi straight-away.
   6578		 */
   6579		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
   6580			__napi_schedule(&ch->rxtx_napi);
   6581	}
   6582
   6583	return 0;
   6584}
   6585
   6586static const struct net_device_ops stmmac_netdev_ops = {
   6587	.ndo_open = stmmac_open,
   6588	.ndo_start_xmit = stmmac_xmit,
   6589	.ndo_stop = stmmac_release,
   6590	.ndo_change_mtu = stmmac_change_mtu,
   6591	.ndo_fix_features = stmmac_fix_features,
   6592	.ndo_set_features = stmmac_set_features,
   6593	.ndo_set_rx_mode = stmmac_set_rx_mode,
   6594	.ndo_tx_timeout = stmmac_tx_timeout,
   6595	.ndo_eth_ioctl = stmmac_ioctl,
   6596	.ndo_setup_tc = stmmac_setup_tc,
   6597	.ndo_select_queue = stmmac_select_queue,
   6598#ifdef CONFIG_NET_POLL_CONTROLLER
   6599	.ndo_poll_controller = stmmac_poll_controller,
   6600#endif
   6601	.ndo_set_mac_address = stmmac_set_mac_address,
   6602	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
   6603	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
   6604	.ndo_bpf = stmmac_bpf,
   6605	.ndo_xdp_xmit = stmmac_xdp_xmit,
   6606	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
   6607};
   6608
   6609static void stmmac_reset_subtask(struct stmmac_priv *priv)
   6610{
   6611	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
   6612		return;
   6613	if (test_bit(STMMAC_DOWN, &priv->state))
   6614		return;
   6615
   6616	netdev_err(priv->dev, "Reset adapter.\n");
   6617
   6618	rtnl_lock();
   6619	netif_trans_update(priv->dev);
   6620	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
   6621		usleep_range(1000, 2000);
   6622
   6623	set_bit(STMMAC_DOWN, &priv->state);
   6624	dev_close(priv->dev);
   6625	dev_open(priv->dev, NULL);
   6626	clear_bit(STMMAC_DOWN, &priv->state);
   6627	clear_bit(STMMAC_RESETING, &priv->state);
   6628	rtnl_unlock();
   6629}
   6630
   6631static void stmmac_service_task(struct work_struct *work)
   6632{
   6633	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
   6634			service_task);
   6635
   6636	stmmac_reset_subtask(priv);
   6637	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
   6638}
   6639
   6640/**
   6641 *  stmmac_hw_init - Init the MAC device
   6642 *  @priv: driver private structure
   6643 *  Description: this function is to configure the MAC device according to
   6644 *  some platform parameters or the HW capability register. It prepares the
   6645 *  driver to use either ring or chain modes and to setup either enhanced or
   6646 *  normal descriptors.
   6647 */
   6648static int stmmac_hw_init(struct stmmac_priv *priv)
   6649{
   6650	int ret;
   6651
   6652	/* dwmac-sun8i only work in chain mode */
   6653	if (priv->plat->has_sun8i)
   6654		chain_mode = 1;
   6655	priv->chain_mode = chain_mode;
   6656
   6657	/* Initialize HW Interface */
   6658	ret = stmmac_hwif_init(priv);
   6659	if (ret)
   6660		return ret;
   6661
   6662	/* Get the HW capability (new GMAC newer than 3.50a) */
   6663	priv->hw_cap_support = stmmac_get_hw_features(priv);
   6664	if (priv->hw_cap_support) {
   6665		dev_info(priv->device, "DMA HW capability register supported\n");
   6666
   6667		/* We can override some gmac/dma configuration fields: e.g.
   6668		 * enh_desc, tx_coe (e.g. that are passed through the
   6669		 * platform) with the values from the HW capability
   6670		 * register (if supported).
   6671		 */
   6672		priv->plat->enh_desc = priv->dma_cap.enh_desc;
   6673		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
   6674				!priv->plat->use_phy_wol;
   6675		priv->hw->pmt = priv->plat->pmt;
   6676		if (priv->dma_cap.hash_tb_sz) {
   6677			priv->hw->multicast_filter_bins =
   6678					(BIT(priv->dma_cap.hash_tb_sz) << 5);
   6679			priv->hw->mcast_bits_log2 =
   6680					ilog2(priv->hw->multicast_filter_bins);
   6681		}
   6682
   6683		/* TXCOE doesn't work in thresh DMA mode */
   6684		if (priv->plat->force_thresh_dma_mode)
   6685			priv->plat->tx_coe = 0;
   6686		else
   6687			priv->plat->tx_coe = priv->dma_cap.tx_coe;
   6688
   6689		/* In case of GMAC4 rx_coe is from HW cap register. */
   6690		priv->plat->rx_coe = priv->dma_cap.rx_coe;
   6691
   6692		if (priv->dma_cap.rx_coe_type2)
   6693			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
   6694		else if (priv->dma_cap.rx_coe_type1)
   6695			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
   6696
   6697	} else {
   6698		dev_info(priv->device, "No HW DMA feature register supported\n");
   6699	}
   6700
   6701	if (priv->plat->rx_coe) {
   6702		priv->hw->rx_csum = priv->plat->rx_coe;
   6703		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
   6704		if (priv->synopsys_id < DWMAC_CORE_4_00)
   6705			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
   6706	}
   6707	if (priv->plat->tx_coe)
   6708		dev_info(priv->device, "TX Checksum insertion supported\n");
   6709
   6710	if (priv->plat->pmt) {
   6711		dev_info(priv->device, "Wake-Up On Lan supported\n");
   6712		device_set_wakeup_capable(priv->device, 1);
   6713	}
   6714
   6715	if (priv->dma_cap.tsoen)
   6716		dev_info(priv->device, "TSO supported\n");
   6717
   6718	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
   6719	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
   6720
   6721	/* Run HW quirks, if any */
   6722	if (priv->hwif_quirks) {
   6723		ret = priv->hwif_quirks(priv);
   6724		if (ret)
   6725			return ret;
   6726	}
   6727
   6728	/* Rx Watchdog is available in the COREs newer than the 3.40.
   6729	 * In some case, for example on bugged HW this feature
   6730	 * has to be disable and this can be done by passing the
   6731	 * riwt_off field from the platform.
   6732	 */
   6733	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
   6734	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
   6735		priv->use_riwt = 1;
   6736		dev_info(priv->device,
   6737			 "Enable RX Mitigation via HW Watchdog Timer\n");
   6738	}
   6739
   6740	return 0;
   6741}
   6742
   6743static void stmmac_napi_add(struct net_device *dev)
   6744{
   6745	struct stmmac_priv *priv = netdev_priv(dev);
   6746	u32 queue, maxq;
   6747
   6748	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
   6749
   6750	for (queue = 0; queue < maxq; queue++) {
   6751		struct stmmac_channel *ch = &priv->channel[queue];
   6752
   6753		ch->priv_data = priv;
   6754		ch->index = queue;
   6755		spin_lock_init(&ch->lock);
   6756
   6757		if (queue < priv->plat->rx_queues_to_use) {
   6758			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
   6759				       NAPI_POLL_WEIGHT);
   6760		}
   6761		if (queue < priv->plat->tx_queues_to_use) {
   6762			netif_napi_add_tx(dev, &ch->tx_napi,
   6763					  stmmac_napi_poll_tx);
   6764		}
   6765		if (queue < priv->plat->rx_queues_to_use &&
   6766		    queue < priv->plat->tx_queues_to_use) {
   6767			netif_napi_add(dev, &ch->rxtx_napi,
   6768				       stmmac_napi_poll_rxtx,
   6769				       NAPI_POLL_WEIGHT);
   6770		}
   6771	}
   6772}
   6773
   6774static void stmmac_napi_del(struct net_device *dev)
   6775{
   6776	struct stmmac_priv *priv = netdev_priv(dev);
   6777	u32 queue, maxq;
   6778
   6779	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
   6780
   6781	for (queue = 0; queue < maxq; queue++) {
   6782		struct stmmac_channel *ch = &priv->channel[queue];
   6783
   6784		if (queue < priv->plat->rx_queues_to_use)
   6785			netif_napi_del(&ch->rx_napi);
   6786		if (queue < priv->plat->tx_queues_to_use)
   6787			netif_napi_del(&ch->tx_napi);
   6788		if (queue < priv->plat->rx_queues_to_use &&
   6789		    queue < priv->plat->tx_queues_to_use) {
   6790			netif_napi_del(&ch->rxtx_napi);
   6791		}
   6792	}
   6793}
   6794
   6795int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
   6796{
   6797	struct stmmac_priv *priv = netdev_priv(dev);
   6798	int ret = 0;
   6799
   6800	if (netif_running(dev))
   6801		stmmac_release(dev);
   6802
   6803	stmmac_napi_del(dev);
   6804
   6805	priv->plat->rx_queues_to_use = rx_cnt;
   6806	priv->plat->tx_queues_to_use = tx_cnt;
   6807
   6808	stmmac_napi_add(dev);
   6809
   6810	if (netif_running(dev))
   6811		ret = stmmac_open(dev);
   6812
   6813	return ret;
   6814}
   6815
   6816int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
   6817{
   6818	struct stmmac_priv *priv = netdev_priv(dev);
   6819	int ret = 0;
   6820
   6821	if (netif_running(dev))
   6822		stmmac_release(dev);
   6823
   6824	priv->dma_rx_size = rx_size;
   6825	priv->dma_tx_size = tx_size;
   6826
   6827	if (netif_running(dev))
   6828		ret = stmmac_open(dev);
   6829
   6830	return ret;
   6831}
   6832
   6833#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
   6834static void stmmac_fpe_lp_task(struct work_struct *work)
   6835{
   6836	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
   6837						fpe_task);
   6838	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
   6839	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
   6840	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
   6841	bool *hs_enable = &fpe_cfg->hs_enable;
   6842	bool *enable = &fpe_cfg->enable;
   6843	int retries = 20;
   6844
   6845	while (retries-- > 0) {
   6846		/* Bail out immediately if FPE handshake is OFF */
   6847		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
   6848			break;
   6849
   6850		if (*lo_state == FPE_STATE_ENTERING_ON &&
   6851		    *lp_state == FPE_STATE_ENTERING_ON) {
   6852			stmmac_fpe_configure(priv, priv->ioaddr,
   6853					     priv->plat->tx_queues_to_use,
   6854					     priv->plat->rx_queues_to_use,
   6855					     *enable);
   6856
   6857			netdev_info(priv->dev, "configured FPE\n");
   6858
   6859			*lo_state = FPE_STATE_ON;
   6860			*lp_state = FPE_STATE_ON;
   6861			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
   6862			break;
   6863		}
   6864
   6865		if ((*lo_state == FPE_STATE_CAPABLE ||
   6866		     *lo_state == FPE_STATE_ENTERING_ON) &&
   6867		     *lp_state != FPE_STATE_ON) {
   6868			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
   6869				    *lo_state, *lp_state);
   6870			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
   6871						MPACKET_VERIFY);
   6872		}
   6873		/* Sleep then retry */
   6874		msleep(500);
   6875	}
   6876
   6877	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
   6878}
   6879
   6880void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
   6881{
   6882	if (priv->plat->fpe_cfg->hs_enable != enable) {
   6883		if (enable) {
   6884			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
   6885						MPACKET_VERIFY);
   6886		} else {
   6887			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
   6888			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
   6889		}
   6890
   6891		priv->plat->fpe_cfg->hs_enable = enable;
   6892	}
   6893}
   6894
   6895/**
   6896 * stmmac_dvr_probe
   6897 * @device: device pointer
   6898 * @plat_dat: platform data pointer
   6899 * @res: stmmac resource pointer
   6900 * Description: this is the main probe function used to
   6901 * call the alloc_etherdev, allocate the priv structure.
   6902 * Return:
   6903 * returns 0 on success, otherwise errno.
   6904 */
   6905int stmmac_dvr_probe(struct device *device,
   6906		     struct plat_stmmacenet_data *plat_dat,
   6907		     struct stmmac_resources *res)
   6908{
   6909	struct net_device *ndev = NULL;
   6910	struct stmmac_priv *priv;
   6911	u32 rxq;
   6912	int i, ret = 0;
   6913
   6914	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
   6915				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
   6916	if (!ndev)
   6917		return -ENOMEM;
   6918
   6919	SET_NETDEV_DEV(ndev, device);
   6920
   6921	priv = netdev_priv(ndev);
   6922	priv->device = device;
   6923	priv->dev = ndev;
   6924
   6925	stmmac_set_ethtool_ops(ndev);
   6926	priv->pause = pause;
   6927	priv->plat = plat_dat;
   6928	priv->ioaddr = res->addr;
   6929	priv->dev->base_addr = (unsigned long)res->addr;
   6930	priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
   6931
   6932	priv->dev->irq = res->irq;
   6933	priv->wol_irq = res->wol_irq;
   6934	priv->lpi_irq = res->lpi_irq;
   6935	priv->sfty_ce_irq = res->sfty_ce_irq;
   6936	priv->sfty_ue_irq = res->sfty_ue_irq;
   6937	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
   6938		priv->rx_irq[i] = res->rx_irq[i];
   6939	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
   6940		priv->tx_irq[i] = res->tx_irq[i];
   6941
   6942	if (!is_zero_ether_addr(res->mac))
   6943		eth_hw_addr_set(priv->dev, res->mac);
   6944
   6945	dev_set_drvdata(device, priv->dev);
   6946
   6947	/* Verify driver arguments */
   6948	stmmac_verify_args();
   6949
   6950	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
   6951	if (!priv->af_xdp_zc_qps)
   6952		return -ENOMEM;
   6953
   6954	/* Allocate workqueue */
   6955	priv->wq = create_singlethread_workqueue("stmmac_wq");
   6956	if (!priv->wq) {
   6957		dev_err(priv->device, "failed to create workqueue\n");
   6958		return -ENOMEM;
   6959	}
   6960
   6961	INIT_WORK(&priv->service_task, stmmac_service_task);
   6962
   6963	/* Initialize Link Partner FPE workqueue */
   6964	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
   6965
   6966	/* Override with kernel parameters if supplied XXX CRS XXX
   6967	 * this needs to have multiple instances
   6968	 */
   6969	if ((phyaddr >= 0) && (phyaddr <= 31))
   6970		priv->plat->phy_addr = phyaddr;
   6971
   6972	if (priv->plat->stmmac_rst) {
   6973		ret = reset_control_assert(priv->plat->stmmac_rst);
   6974		reset_control_deassert(priv->plat->stmmac_rst);
   6975		/* Some reset controllers have only reset callback instead of
   6976		 * assert + deassert callbacks pair.
   6977		 */
   6978		if (ret == -ENOTSUPP)
   6979			reset_control_reset(priv->plat->stmmac_rst);
   6980	}
   6981
   6982	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
   6983	if (ret == -ENOTSUPP)
   6984		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
   6985			ERR_PTR(ret));
   6986
   6987	/* Init MAC and get the capabilities */
   6988	ret = stmmac_hw_init(priv);
   6989	if (ret)
   6990		goto error_hw_init;
   6991
   6992	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
   6993	 */
   6994	if (priv->synopsys_id < DWMAC_CORE_5_20)
   6995		priv->plat->dma_cfg->dche = false;
   6996
   6997	stmmac_check_ether_addr(priv);
   6998
   6999	ndev->netdev_ops = &stmmac_netdev_ops;
   7000
   7001	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
   7002			    NETIF_F_RXCSUM;
   7003
   7004	ret = stmmac_tc_init(priv, priv);
   7005	if (!ret) {
   7006		ndev->hw_features |= NETIF_F_HW_TC;
   7007	}
   7008
   7009	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
   7010		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
   7011		if (priv->plat->has_gmac4)
   7012			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
   7013		priv->tso = true;
   7014		dev_info(priv->device, "TSO feature enabled\n");
   7015	}
   7016
   7017	if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
   7018		ndev->hw_features |= NETIF_F_GRO;
   7019		priv->sph_cap = true;
   7020		priv->sph = priv->sph_cap;
   7021		dev_info(priv->device, "SPH feature enabled\n");
   7022	}
   7023
   7024	/* The current IP register MAC_HW_Feature1[ADDR64] only define
   7025	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
   7026	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
   7027	 * So overwrite dma_cap.addr64 according to HW real design.
   7028	 */
   7029	if (priv->plat->addr64)
   7030		priv->dma_cap.addr64 = priv->plat->addr64;
   7031
   7032	if (priv->dma_cap.addr64) {
   7033		ret = dma_set_mask_and_coherent(device,
   7034				DMA_BIT_MASK(priv->dma_cap.addr64));
   7035		if (!ret) {
   7036			dev_info(priv->device, "Using %d bits DMA width\n",
   7037				 priv->dma_cap.addr64);
   7038
   7039			/*
   7040			 * If more than 32 bits can be addressed, make sure to
   7041			 * enable enhanced addressing mode.
   7042			 */
   7043			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
   7044				priv->plat->dma_cfg->eame = true;
   7045		} else {
   7046			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
   7047			if (ret) {
   7048				dev_err(priv->device, "Failed to set DMA Mask\n");
   7049				goto error_hw_init;
   7050			}
   7051
   7052			priv->dma_cap.addr64 = 32;
   7053		}
   7054	}
   7055
   7056	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
   7057	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
   7058#ifdef STMMAC_VLAN_TAG_USED
   7059	/* Both mac100 and gmac support receive VLAN tag detection */
   7060	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
   7061	if (priv->dma_cap.vlhash) {
   7062		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
   7063		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
   7064	}
   7065	if (priv->dma_cap.vlins) {
   7066		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
   7067		if (priv->dma_cap.dvlan)
   7068			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
   7069	}
   7070#endif
   7071	priv->msg_enable = netif_msg_init(debug, default_msg_level);
   7072
   7073	/* Initialize RSS */
   7074	rxq = priv->plat->rx_queues_to_use;
   7075	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
   7076	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
   7077		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
   7078
   7079	if (priv->dma_cap.rssen && priv->plat->rss_en)
   7080		ndev->features |= NETIF_F_RXHASH;
   7081
   7082	/* MTU range: 46 - hw-specific max */
   7083	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
   7084	if (priv->plat->has_xgmac)
   7085		ndev->max_mtu = XGMAC_JUMBO_LEN;
   7086	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
   7087		ndev->max_mtu = JUMBO_LEN;
   7088	else
   7089		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
   7090	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
   7091	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
   7092	 */
   7093	if ((priv->plat->maxmtu < ndev->max_mtu) &&
   7094	    (priv->plat->maxmtu >= ndev->min_mtu))
   7095		ndev->max_mtu = priv->plat->maxmtu;
   7096	else if (priv->plat->maxmtu < ndev->min_mtu)
   7097		dev_warn(priv->device,
   7098			 "%s: warning: maxmtu having invalid value (%d)\n",
   7099			 __func__, priv->plat->maxmtu);
   7100
   7101	if (flow_ctrl)
   7102		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
   7103
   7104	/* Setup channels NAPI */
   7105	stmmac_napi_add(ndev);
   7106
   7107	mutex_init(&priv->lock);
   7108
   7109	/* If a specific clk_csr value is passed from the platform
   7110	 * this means that the CSR Clock Range selection cannot be
   7111	 * changed at run-time and it is fixed. Viceversa the driver'll try to
   7112	 * set the MDC clock dynamically according to the csr actual
   7113	 * clock input.
   7114	 */
   7115	if (priv->plat->clk_csr >= 0)
   7116		priv->clk_csr = priv->plat->clk_csr;
   7117	else
   7118		stmmac_clk_csr_set(priv);
   7119
   7120	stmmac_check_pcs_mode(priv);
   7121
   7122	pm_runtime_get_noresume(device);
   7123	pm_runtime_set_active(device);
   7124	if (!pm_runtime_enabled(device))
   7125		pm_runtime_enable(device);
   7126
   7127	if (priv->hw->pcs != STMMAC_PCS_TBI &&
   7128	    priv->hw->pcs != STMMAC_PCS_RTBI) {
   7129		/* MDIO bus Registration */
   7130		ret = stmmac_mdio_register(ndev);
   7131		if (ret < 0) {
   7132			dev_err_probe(priv->device, ret,
   7133				      "%s: MDIO bus (id: %d) registration failed\n",
   7134				      __func__, priv->plat->bus_id);
   7135			goto error_mdio_register;
   7136		}
   7137	}
   7138
   7139	if (priv->plat->speed_mode_2500)
   7140		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
   7141
   7142	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
   7143		ret = stmmac_xpcs_setup(priv->mii);
   7144		if (ret)
   7145			goto error_xpcs_setup;
   7146	}
   7147
   7148	ret = stmmac_phy_setup(priv);
   7149	if (ret) {
   7150		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
   7151		goto error_phy_setup;
   7152	}
   7153
   7154	ret = register_netdev(ndev);
   7155	if (ret) {
   7156		dev_err(priv->device, "%s: ERROR %i registering the device\n",
   7157			__func__, ret);
   7158		goto error_netdev_register;
   7159	}
   7160
   7161	if (priv->plat->serdes_powerup) {
   7162		ret = priv->plat->serdes_powerup(ndev,
   7163						 priv->plat->bsp_priv);
   7164
   7165		if (ret < 0)
   7166			goto error_serdes_powerup;
   7167	}
   7168
   7169#ifdef CONFIG_DEBUG_FS
   7170	stmmac_init_fs(ndev);
   7171#endif
   7172
   7173	if (priv->plat->dump_debug_regs)
   7174		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
   7175
   7176	/* Let pm_runtime_put() disable the clocks.
   7177	 * If CONFIG_PM is not enabled, the clocks will stay powered.
   7178	 */
   7179	pm_runtime_put(device);
   7180
   7181	return ret;
   7182
   7183error_serdes_powerup:
   7184	unregister_netdev(ndev);
   7185error_netdev_register:
   7186	phylink_destroy(priv->phylink);
   7187error_xpcs_setup:
   7188error_phy_setup:
   7189	if (priv->hw->pcs != STMMAC_PCS_TBI &&
   7190	    priv->hw->pcs != STMMAC_PCS_RTBI)
   7191		stmmac_mdio_unregister(ndev);
   7192error_mdio_register:
   7193	stmmac_napi_del(ndev);
   7194error_hw_init:
   7195	destroy_workqueue(priv->wq);
   7196	bitmap_free(priv->af_xdp_zc_qps);
   7197
   7198	return ret;
   7199}
   7200EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
   7201
   7202/**
   7203 * stmmac_dvr_remove
   7204 * @dev: device pointer
   7205 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
   7206 * changes the link status, releases the DMA descriptor rings.
   7207 */
   7208int stmmac_dvr_remove(struct device *dev)
   7209{
   7210	struct net_device *ndev = dev_get_drvdata(dev);
   7211	struct stmmac_priv *priv = netdev_priv(ndev);
   7212
   7213	netdev_info(priv->dev, "%s: removing driver", __func__);
   7214
   7215	pm_runtime_get_sync(dev);
   7216	pm_runtime_disable(dev);
   7217	pm_runtime_put_noidle(dev);
   7218
   7219	stmmac_stop_all_dma(priv);
   7220	stmmac_mac_set(priv, priv->ioaddr, false);
   7221	netif_carrier_off(ndev);
   7222	unregister_netdev(ndev);
   7223
   7224	/* Serdes power down needs to happen after VLAN filter
   7225	 * is deleted that is triggered by unregister_netdev().
   7226	 */
   7227	if (priv->plat->serdes_powerdown)
   7228		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
   7229
   7230#ifdef CONFIG_DEBUG_FS
   7231	stmmac_exit_fs(ndev);
   7232#endif
   7233	phylink_destroy(priv->phylink);
   7234	if (priv->plat->stmmac_rst)
   7235		reset_control_assert(priv->plat->stmmac_rst);
   7236	reset_control_assert(priv->plat->stmmac_ahb_rst);
   7237	if (priv->hw->pcs != STMMAC_PCS_TBI &&
   7238	    priv->hw->pcs != STMMAC_PCS_RTBI)
   7239		stmmac_mdio_unregister(ndev);
   7240	destroy_workqueue(priv->wq);
   7241	mutex_destroy(&priv->lock);
   7242	bitmap_free(priv->af_xdp_zc_qps);
   7243
   7244	return 0;
   7245}
   7246EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
   7247
   7248/**
   7249 * stmmac_suspend - suspend callback
   7250 * @dev: device pointer
   7251 * Description: this is the function to suspend the device and it is called
   7252 * by the platform driver to stop the network queue, release the resources,
   7253 * program the PMT register (for WoL), clean and release driver resources.
   7254 */
   7255int stmmac_suspend(struct device *dev)
   7256{
   7257	struct net_device *ndev = dev_get_drvdata(dev);
   7258	struct stmmac_priv *priv = netdev_priv(ndev);
   7259	u32 chan;
   7260
   7261	if (!ndev || !netif_running(ndev))
   7262		return 0;
   7263
   7264	mutex_lock(&priv->lock);
   7265
   7266	netif_device_detach(ndev);
   7267
   7268	stmmac_disable_all_queues(priv);
   7269
   7270	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
   7271		hrtimer_cancel(&priv->tx_queue[chan].txtimer);
   7272
   7273	if (priv->eee_enabled) {
   7274		priv->tx_path_in_lpi_mode = false;
   7275		del_timer_sync(&priv->eee_ctrl_timer);
   7276	}
   7277
   7278	/* Stop TX/RX DMA */
   7279	stmmac_stop_all_dma(priv);
   7280
   7281	if (priv->plat->serdes_powerdown)
   7282		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
   7283
   7284	/* Enable Power down mode by programming the PMT regs */
   7285	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
   7286		stmmac_pmt(priv, priv->hw, priv->wolopts);
   7287		priv->irq_wake = 1;
   7288	} else {
   7289		stmmac_mac_set(priv, priv->ioaddr, false);
   7290		pinctrl_pm_select_sleep_state(priv->device);
   7291	}
   7292
   7293	mutex_unlock(&priv->lock);
   7294
   7295	rtnl_lock();
   7296	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
   7297		phylink_suspend(priv->phylink, true);
   7298	} else {
   7299		if (device_may_wakeup(priv->device))
   7300			phylink_speed_down(priv->phylink, false);
   7301		phylink_suspend(priv->phylink, false);
   7302	}
   7303	rtnl_unlock();
   7304
   7305	if (priv->dma_cap.fpesel) {
   7306		/* Disable FPE */
   7307		stmmac_fpe_configure(priv, priv->ioaddr,
   7308				     priv->plat->tx_queues_to_use,
   7309				     priv->plat->rx_queues_to_use, false);
   7310
   7311		stmmac_fpe_handshake(priv, false);
   7312		stmmac_fpe_stop_wq(priv);
   7313	}
   7314
   7315	priv->speed = SPEED_UNKNOWN;
   7316	return 0;
   7317}
   7318EXPORT_SYMBOL_GPL(stmmac_suspend);
   7319
   7320/**
   7321 * stmmac_reset_queues_param - reset queue parameters
   7322 * @priv: device pointer
   7323 */
   7324static void stmmac_reset_queues_param(struct stmmac_priv *priv)
   7325{
   7326	u32 rx_cnt = priv->plat->rx_queues_to_use;
   7327	u32 tx_cnt = priv->plat->tx_queues_to_use;
   7328	u32 queue;
   7329
   7330	for (queue = 0; queue < rx_cnt; queue++) {
   7331		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
   7332
   7333		rx_q->cur_rx = 0;
   7334		rx_q->dirty_rx = 0;
   7335	}
   7336
   7337	for (queue = 0; queue < tx_cnt; queue++) {
   7338		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
   7339
   7340		tx_q->cur_tx = 0;
   7341		tx_q->dirty_tx = 0;
   7342		tx_q->mss = 0;
   7343
   7344		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
   7345	}
   7346}
   7347
   7348/**
   7349 * stmmac_resume - resume callback
   7350 * @dev: device pointer
   7351 * Description: when resume this function is invoked to setup the DMA and CORE
   7352 * in a usable state.
   7353 */
   7354int stmmac_resume(struct device *dev)
   7355{
   7356	struct net_device *ndev = dev_get_drvdata(dev);
   7357	struct stmmac_priv *priv = netdev_priv(ndev);
   7358	int ret;
   7359
   7360	if (!netif_running(ndev))
   7361		return 0;
   7362
   7363	/* Power Down bit, into the PM register, is cleared
   7364	 * automatically as soon as a magic packet or a Wake-up frame
   7365	 * is received. Anyway, it's better to manually clear
   7366	 * this bit because it can generate problems while resuming
   7367	 * from another devices (e.g. serial console).
   7368	 */
   7369	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
   7370		mutex_lock(&priv->lock);
   7371		stmmac_pmt(priv, priv->hw, 0);
   7372		mutex_unlock(&priv->lock);
   7373		priv->irq_wake = 0;
   7374	} else {
   7375		pinctrl_pm_select_default_state(priv->device);
   7376		/* reset the phy so that it's ready */
   7377		if (priv->mii)
   7378			stmmac_mdio_reset(priv->mii);
   7379	}
   7380
   7381	if (priv->plat->serdes_powerup) {
   7382		ret = priv->plat->serdes_powerup(ndev,
   7383						 priv->plat->bsp_priv);
   7384
   7385		if (ret < 0)
   7386			return ret;
   7387	}
   7388
   7389	rtnl_lock();
   7390	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
   7391		phylink_resume(priv->phylink);
   7392	} else {
   7393		phylink_resume(priv->phylink);
   7394		if (device_may_wakeup(priv->device))
   7395			phylink_speed_up(priv->phylink);
   7396	}
   7397	rtnl_unlock();
   7398
   7399	rtnl_lock();
   7400	mutex_lock(&priv->lock);
   7401
   7402	stmmac_reset_queues_param(priv);
   7403
   7404	stmmac_free_tx_skbufs(priv);
   7405	stmmac_clear_descriptors(priv);
   7406
   7407	stmmac_hw_setup(ndev, false);
   7408	stmmac_init_coalesce(priv);
   7409	stmmac_set_rx_mode(ndev);
   7410
   7411	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
   7412
   7413	stmmac_enable_all_queues(priv);
   7414	stmmac_enable_all_dma_irq(priv);
   7415
   7416	mutex_unlock(&priv->lock);
   7417	rtnl_unlock();
   7418
   7419	netif_device_attach(ndev);
   7420
   7421	return 0;
   7422}
   7423EXPORT_SYMBOL_GPL(stmmac_resume);
   7424
   7425#ifndef MODULE
   7426static int __init stmmac_cmdline_opt(char *str)
   7427{
   7428	char *opt;
   7429
   7430	if (!str || !*str)
   7431		return 1;
   7432	while ((opt = strsep(&str, ",")) != NULL) {
   7433		if (!strncmp(opt, "debug:", 6)) {
   7434			if (kstrtoint(opt + 6, 0, &debug))
   7435				goto err;
   7436		} else if (!strncmp(opt, "phyaddr:", 8)) {
   7437			if (kstrtoint(opt + 8, 0, &phyaddr))
   7438				goto err;
   7439		} else if (!strncmp(opt, "buf_sz:", 7)) {
   7440			if (kstrtoint(opt + 7, 0, &buf_sz))
   7441				goto err;
   7442		} else if (!strncmp(opt, "tc:", 3)) {
   7443			if (kstrtoint(opt + 3, 0, &tc))
   7444				goto err;
   7445		} else if (!strncmp(opt, "watchdog:", 9)) {
   7446			if (kstrtoint(opt + 9, 0, &watchdog))
   7447				goto err;
   7448		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
   7449			if (kstrtoint(opt + 10, 0, &flow_ctrl))
   7450				goto err;
   7451		} else if (!strncmp(opt, "pause:", 6)) {
   7452			if (kstrtoint(opt + 6, 0, &pause))
   7453				goto err;
   7454		} else if (!strncmp(opt, "eee_timer:", 10)) {
   7455			if (kstrtoint(opt + 10, 0, &eee_timer))
   7456				goto err;
   7457		} else if (!strncmp(opt, "chain_mode:", 11)) {
   7458			if (kstrtoint(opt + 11, 0, &chain_mode))
   7459				goto err;
   7460		}
   7461	}
   7462	return 1;
   7463
   7464err:
   7465	pr_err("%s: ERROR broken module parameter conversion", __func__);
   7466	return 1;
   7467}
   7468
   7469__setup("stmmaceth=", stmmac_cmdline_opt);
   7470#endif /* MODULE */
   7471
   7472static int __init stmmac_init(void)
   7473{
   7474#ifdef CONFIG_DEBUG_FS
   7475	/* Create debugfs main directory if it doesn't exist yet */
   7476	if (!stmmac_fs_dir)
   7477		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
   7478	register_netdevice_notifier(&stmmac_notifier);
   7479#endif
   7480
   7481	return 0;
   7482}
   7483
   7484static void __exit stmmac_exit(void)
   7485{
   7486#ifdef CONFIG_DEBUG_FS
   7487	unregister_netdevice_notifier(&stmmac_notifier);
   7488	debugfs_remove_recursive(stmmac_fs_dir);
   7489#endif
   7490}
   7491
   7492module_init(stmmac_init)
   7493module_exit(stmmac_exit)
   7494
   7495MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
   7496MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
   7497MODULE_LICENSE("GPL");