cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cpsw_ethtool.c (19623B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Texas Instruments Ethernet Switch Driver ethtool intf
      4 *
      5 * Copyright (C) 2019 Texas Instruments
      6 */
      7
      8#include <linux/if_ether.h>
      9#include <linux/if_vlan.h>
     10#include <linux/kmemleak.h>
     11#include <linux/module.h>
     12#include <linux/netdevice.h>
     13#include <linux/net_tstamp.h>
     14#include <linux/phy.h>
     15#include <linux/pm_runtime.h>
     16#include <linux/skbuff.h>
     17
     18#include "cpsw.h"
     19#include "cpts.h"
     20#include "cpsw_ale.h"
     21#include "cpsw_priv.h"
     22#include "davinci_cpdma.h"
     23
     24struct cpsw_hw_stats {
     25	u32	rxgoodframes;
     26	u32	rxbroadcastframes;
     27	u32	rxmulticastframes;
     28	u32	rxpauseframes;
     29	u32	rxcrcerrors;
     30	u32	rxaligncodeerrors;
     31	u32	rxoversizedframes;
     32	u32	rxjabberframes;
     33	u32	rxundersizedframes;
     34	u32	rxfragments;
     35	u32	__pad_0[2];
     36	u32	rxoctets;
     37	u32	txgoodframes;
     38	u32	txbroadcastframes;
     39	u32	txmulticastframes;
     40	u32	txpauseframes;
     41	u32	txdeferredframes;
     42	u32	txcollisionframes;
     43	u32	txsinglecollframes;
     44	u32	txmultcollframes;
     45	u32	txexcessivecollisions;
     46	u32	txlatecollisions;
     47	u32	txunderrun;
     48	u32	txcarriersenseerrors;
     49	u32	txoctets;
     50	u32	octetframes64;
     51	u32	octetframes65t127;
     52	u32	octetframes128t255;
     53	u32	octetframes256t511;
     54	u32	octetframes512t1023;
     55	u32	octetframes1024tup;
     56	u32	netoctets;
     57	u32	rxsofoverruns;
     58	u32	rxmofoverruns;
     59	u32	rxdmaoverruns;
     60};
     61
     62struct cpsw_stats {
     63	char stat_string[ETH_GSTRING_LEN];
     64	int type;
     65	int sizeof_stat;
     66	int stat_offset;
     67};
     68
     69enum {
     70	CPSW_STATS,
     71	CPDMA_RX_STATS,
     72	CPDMA_TX_STATS,
     73};
     74
     75#define CPSW_STAT(m)		CPSW_STATS,				\
     76				sizeof_field(struct cpsw_hw_stats, m), \
     77				offsetof(struct cpsw_hw_stats, m)
     78#define CPDMA_RX_STAT(m)	CPDMA_RX_STATS,				   \
     79				sizeof_field(struct cpdma_chan_stats, m), \
     80				offsetof(struct cpdma_chan_stats, m)
     81#define CPDMA_TX_STAT(m)	CPDMA_TX_STATS,				   \
     82				sizeof_field(struct cpdma_chan_stats, m), \
     83				offsetof(struct cpdma_chan_stats, m)
     84
     85static const struct cpsw_stats cpsw_gstrings_stats[] = {
     86	{ "Good Rx Frames", CPSW_STAT(rxgoodframes) },
     87	{ "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
     88	{ "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
     89	{ "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
     90	{ "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
     91	{ "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
     92	{ "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
     93	{ "Rx Jabbers", CPSW_STAT(rxjabberframes) },
     94	{ "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
     95	{ "Rx Fragments", CPSW_STAT(rxfragments) },
     96	{ "Rx Octets", CPSW_STAT(rxoctets) },
     97	{ "Good Tx Frames", CPSW_STAT(txgoodframes) },
     98	{ "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
     99	{ "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
    100	{ "Pause Tx Frames", CPSW_STAT(txpauseframes) },
    101	{ "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
    102	{ "Collisions", CPSW_STAT(txcollisionframes) },
    103	{ "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
    104	{ "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
    105	{ "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
    106	{ "Late Collisions", CPSW_STAT(txlatecollisions) },
    107	{ "Tx Underrun", CPSW_STAT(txunderrun) },
    108	{ "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
    109	{ "Tx Octets", CPSW_STAT(txoctets) },
    110	{ "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
    111	{ "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
    112	{ "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
    113	{ "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
    114	{ "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
    115	{ "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
    116	{ "Net Octets", CPSW_STAT(netoctets) },
    117	{ "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
    118	{ "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
    119	{ "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
    120};
    121
    122static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
    123	{ "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
    124	{ "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
    125	{ "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
    126	{ "misqueued", CPDMA_RX_STAT(misqueued) },
    127	{ "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
    128	{ "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
    129	{ "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
    130	{ "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
    131	{ "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
    132	{ "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
    133	{ "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
    134	{ "requeue", CPDMA_RX_STAT(requeue) },
    135	{ "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
    136};
    137
    138#define CPSW_STATS_COMMON_LEN	ARRAY_SIZE(cpsw_gstrings_stats)
    139#define CPSW_STATS_CH_LEN	ARRAY_SIZE(cpsw_gstrings_ch_stats)
    140
    141u32 cpsw_get_msglevel(struct net_device *ndev)
    142{
    143	struct cpsw_priv *priv = netdev_priv(ndev);
    144
    145	return priv->msg_enable;
    146}
    147
    148void cpsw_set_msglevel(struct net_device *ndev, u32 value)
    149{
    150	struct cpsw_priv *priv = netdev_priv(ndev);
    151
    152	priv->msg_enable = value;
    153}
    154
    155int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
    156		      struct kernel_ethtool_coalesce *kernel_coal,
    157		      struct netlink_ext_ack *extack)
    158{
    159	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
    160
    161	coal->rx_coalesce_usecs = cpsw->coal_intvl;
    162	return 0;
    163}
    164
    165int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
    166		      struct kernel_ethtool_coalesce *kernel_coal,
    167		      struct netlink_ext_ack *extack)
    168{
    169	struct cpsw_priv *priv = netdev_priv(ndev);
    170	u32 int_ctrl;
    171	u32 num_interrupts = 0;
    172	u32 prescale = 0;
    173	u32 addnl_dvdr = 1;
    174	u32 coal_intvl = 0;
    175	struct cpsw_common *cpsw = priv->cpsw;
    176
    177	coal_intvl = coal->rx_coalesce_usecs;
    178
    179	int_ctrl =  readl(&cpsw->wr_regs->int_control);
    180	prescale = cpsw->bus_freq_mhz * 4;
    181
    182	if (!coal->rx_coalesce_usecs) {
    183		int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
    184		goto update_return;
    185	}
    186
    187	if (coal_intvl < CPSW_CMINTMIN_INTVL)
    188		coal_intvl = CPSW_CMINTMIN_INTVL;
    189
    190	if (coal_intvl > CPSW_CMINTMAX_INTVL) {
    191		/* Interrupt pacer works with 4us Pulse, we can
    192		 * throttle further by dilating the 4us pulse.
    193		 */
    194		addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
    195
    196		if (addnl_dvdr > 1) {
    197			prescale *= addnl_dvdr;
    198			if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
    199				coal_intvl = (CPSW_CMINTMAX_INTVL
    200						* addnl_dvdr);
    201		} else {
    202			addnl_dvdr = 1;
    203			coal_intvl = CPSW_CMINTMAX_INTVL;
    204		}
    205	}
    206
    207	num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
    208	writel(num_interrupts, &cpsw->wr_regs->rx_imax);
    209	writel(num_interrupts, &cpsw->wr_regs->tx_imax);
    210
    211	int_ctrl |= CPSW_INTPACEEN;
    212	int_ctrl &= (~CPSW_INTPRESCALE_MASK);
    213	int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
    214
    215update_return:
    216	writel(int_ctrl, &cpsw->wr_regs->int_control);
    217
    218	cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
    219	cpsw->coal_intvl = coal_intvl;
    220
    221	return 0;
    222}
    223
    224int cpsw_get_sset_count(struct net_device *ndev, int sset)
    225{
    226	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
    227
    228	switch (sset) {
    229	case ETH_SS_STATS:
    230		return (CPSW_STATS_COMMON_LEN +
    231		       (cpsw->rx_ch_num + cpsw->tx_ch_num) *
    232		       CPSW_STATS_CH_LEN);
    233	default:
    234		return -EOPNOTSUPP;
    235	}
    236}
    237
    238static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
    239{
    240	int ch_stats_len;
    241	int line;
    242	int i;
    243
    244	ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
    245	for (i = 0; i < ch_stats_len; i++) {
    246		line = i % CPSW_STATS_CH_LEN;
    247		snprintf(*p, ETH_GSTRING_LEN,
    248			 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
    249			 (long)(i / CPSW_STATS_CH_LEN),
    250			 cpsw_gstrings_ch_stats[line].stat_string);
    251		*p += ETH_GSTRING_LEN;
    252	}
    253}
    254
    255void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
    256{
    257	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
    258	u8 *p = data;
    259	int i;
    260
    261	switch (stringset) {
    262	case ETH_SS_STATS:
    263		for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
    264			memcpy(p, cpsw_gstrings_stats[i].stat_string,
    265			       ETH_GSTRING_LEN);
    266			p += ETH_GSTRING_LEN;
    267		}
    268
    269		cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
    270		cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
    271		break;
    272	}
    273}
    274
    275void cpsw_get_ethtool_stats(struct net_device *ndev,
    276			    struct ethtool_stats *stats, u64 *data)
    277{
    278	u8 *p;
    279	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
    280	struct cpdma_chan_stats ch_stats;
    281	int i, l, ch;
    282
    283	/* Collect Davinci CPDMA stats for Rx and Tx Channel */
    284	for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
    285		data[l] = readl(cpsw->hw_stats +
    286				cpsw_gstrings_stats[l].stat_offset);
    287
    288	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
    289		cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
    290		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
    291			p = (u8 *)&ch_stats +
    292				cpsw_gstrings_ch_stats[i].stat_offset;
    293			data[l] = *(u32 *)p;
    294		}
    295	}
    296
    297	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
    298		cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
    299		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
    300			p = (u8 *)&ch_stats +
    301				cpsw_gstrings_ch_stats[i].stat_offset;
    302			data[l] = *(u32 *)p;
    303		}
    304	}
    305}
    306
    307void cpsw_get_pauseparam(struct net_device *ndev,
    308			 struct ethtool_pauseparam *pause)
    309{
    310	struct cpsw_priv *priv = netdev_priv(ndev);
    311
    312	pause->autoneg = AUTONEG_DISABLE;
    313	pause->rx_pause = priv->rx_pause ? true : false;
    314	pause->tx_pause = priv->tx_pause ? true : false;
    315}
    316
    317void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
    318{
    319	struct cpsw_priv *priv = netdev_priv(ndev);
    320	struct cpsw_common *cpsw = priv->cpsw;
    321	int slave_no = cpsw_slave_index(cpsw, priv);
    322
    323	wol->supported = 0;
    324	wol->wolopts = 0;
    325
    326	if (cpsw->slaves[slave_no].phy)
    327		phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
    328}
    329
    330int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
    331{
    332	struct cpsw_priv *priv = netdev_priv(ndev);
    333	struct cpsw_common *cpsw = priv->cpsw;
    334	int slave_no = cpsw_slave_index(cpsw, priv);
    335
    336	if (cpsw->slaves[slave_no].phy)
    337		return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
    338	else
    339		return -EOPNOTSUPP;
    340}
    341
    342int cpsw_get_regs_len(struct net_device *ndev)
    343{
    344	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
    345
    346	return cpsw_ale_get_num_entries(cpsw->ale) *
    347	       ALE_ENTRY_WORDS * sizeof(u32);
    348}
    349
    350void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p)
    351{
    352	u32 *reg = p;
    353	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
    354
    355	/* update CPSW IP version */
    356	regs->version = cpsw->version;
    357
    358	cpsw_ale_dump(cpsw->ale, reg);
    359}
    360
    361int cpsw_ethtool_op_begin(struct net_device *ndev)
    362{
    363	struct cpsw_priv *priv = netdev_priv(ndev);
    364	struct cpsw_common *cpsw = priv->cpsw;
    365	int ret;
    366
    367	ret = pm_runtime_resume_and_get(cpsw->dev);
    368	if (ret < 0)
    369		cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
    370
    371	return ret;
    372}
    373
    374void cpsw_ethtool_op_complete(struct net_device *ndev)
    375{
    376	struct cpsw_priv *priv = netdev_priv(ndev);
    377	int ret;
    378
    379	ret = pm_runtime_put(priv->cpsw->dev);
    380	if (ret < 0)
    381		cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
    382}
    383
    384void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch)
    385{
    386	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
    387
    388	ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
    389	ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
    390	ch->max_combined = 0;
    391	ch->max_other = 0;
    392	ch->other_count = 0;
    393	ch->rx_count = cpsw->rx_ch_num;
    394	ch->tx_count = cpsw->tx_ch_num;
    395	ch->combined_count = 0;
    396}
    397
    398int cpsw_get_link_ksettings(struct net_device *ndev,
    399			    struct ethtool_link_ksettings *ecmd)
    400{
    401	struct cpsw_priv *priv = netdev_priv(ndev);
    402	struct cpsw_common *cpsw = priv->cpsw;
    403	int slave_no = cpsw_slave_index(cpsw, priv);
    404
    405	if (!cpsw->slaves[slave_no].phy)
    406		return -EOPNOTSUPP;
    407
    408	phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
    409	return 0;
    410}
    411
    412int cpsw_set_link_ksettings(struct net_device *ndev,
    413			    const struct ethtool_link_ksettings *ecmd)
    414{
    415	struct cpsw_priv *priv = netdev_priv(ndev);
    416	struct cpsw_common *cpsw = priv->cpsw;
    417	int slave_no = cpsw_slave_index(cpsw, priv);
    418
    419	if (!cpsw->slaves[slave_no].phy)
    420		return -EOPNOTSUPP;
    421
    422	return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd);
    423}
    424
    425int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
    426{
    427	struct cpsw_priv *priv = netdev_priv(ndev);
    428	struct cpsw_common *cpsw = priv->cpsw;
    429	int slave_no = cpsw_slave_index(cpsw, priv);
    430
    431	if (cpsw->slaves[slave_no].phy)
    432		return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
    433	else
    434		return -EOPNOTSUPP;
    435}
    436
    437int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
    438{
    439	struct cpsw_priv *priv = netdev_priv(ndev);
    440	struct cpsw_common *cpsw = priv->cpsw;
    441	int slave_no = cpsw_slave_index(cpsw, priv);
    442
    443	if (cpsw->slaves[slave_no].phy)
    444		return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
    445	else
    446		return -EOPNOTSUPP;
    447}
    448
    449int cpsw_nway_reset(struct net_device *ndev)
    450{
    451	struct cpsw_priv *priv = netdev_priv(ndev);
    452	struct cpsw_common *cpsw = priv->cpsw;
    453	int slave_no = cpsw_slave_index(cpsw, priv);
    454
    455	if (cpsw->slaves[slave_no].phy)
    456		return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
    457	else
    458		return -EOPNOTSUPP;
    459}
    460
    461static void cpsw_suspend_data_pass(struct net_device *ndev)
    462{
    463	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
    464	int i;
    465
    466	/* Disable NAPI scheduling */
    467	cpsw_intr_disable(cpsw);
    468
    469	/* Stop all transmit queues for every network device.
    470	 */
    471	for (i = 0; i < cpsw->data.slaves; i++) {
    472		ndev = cpsw->slaves[i].ndev;
    473		if (!(ndev && netif_running(ndev)))
    474			continue;
    475
    476		netif_tx_stop_all_queues(ndev);
    477
    478		/* Barrier, so that stop_queue visible to other cpus */
    479		smp_mb__after_atomic();
    480	}
    481
    482	/* Handle rest of tx packets and stop cpdma channels */
    483	cpdma_ctlr_stop(cpsw->dma);
    484}
    485
    486static int cpsw_resume_data_pass(struct net_device *ndev)
    487{
    488	struct cpsw_priv *priv = netdev_priv(ndev);
    489	struct cpsw_common *cpsw = priv->cpsw;
    490	int i, ret;
    491
    492	/* After this receive is started */
    493	if (cpsw->usage_count) {
    494		ret = cpsw_fill_rx_channels(priv);
    495		if (ret)
    496			return ret;
    497
    498		cpdma_ctlr_start(cpsw->dma);
    499		cpsw_intr_enable(cpsw);
    500	}
    501
    502	/* Resume transmit for every affected interface */
    503	for (i = 0; i < cpsw->data.slaves; i++) {
    504		ndev = cpsw->slaves[i].ndev;
    505		if (ndev && netif_running(ndev))
    506			netif_tx_start_all_queues(ndev);
    507	}
    508
    509	return 0;
    510}
    511
    512static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
    513				  struct ethtool_channels *ch)
    514{
    515	if (cpsw->quirk_irq) {
    516		dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
    517		return -EOPNOTSUPP;
    518	}
    519
    520	if (ch->combined_count)
    521		return -EINVAL;
    522
    523	/* verify we have at least one channel in each direction */
    524	if (!ch->rx_count || !ch->tx_count)
    525		return -EINVAL;
    526
    527	if (ch->rx_count > cpsw->data.channels ||
    528	    ch->tx_count > cpsw->data.channels)
    529		return -EINVAL;
    530
    531	return 0;
    532}
    533
    534static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx,
    535				    cpdma_handler_fn rx_handler)
    536{
    537	struct cpsw_common *cpsw = priv->cpsw;
    538	void (*handler)(void *, int, int);
    539	struct netdev_queue *queue;
    540	struct cpsw_vector *vec;
    541	int ret, *ch, vch;
    542
    543	if (rx) {
    544		ch = &cpsw->rx_ch_num;
    545		vec = cpsw->rxv;
    546		handler = rx_handler;
    547	} else {
    548		ch = &cpsw->tx_ch_num;
    549		vec = cpsw->txv;
    550		handler = cpsw_tx_handler;
    551	}
    552
    553	while (*ch < ch_num) {
    554		vch = rx ? *ch : 7 - *ch;
    555		vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
    556		queue = netdev_get_tx_queue(priv->ndev, *ch);
    557		queue->tx_maxrate = 0;
    558
    559		if (IS_ERR(vec[*ch].ch))
    560			return PTR_ERR(vec[*ch].ch);
    561
    562		if (!vec[*ch].ch)
    563			return -EINVAL;
    564
    565		cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
    566			  (rx ? "rx" : "tx"));
    567		(*ch)++;
    568	}
    569
    570	while (*ch > ch_num) {
    571		(*ch)--;
    572
    573		ret = cpdma_chan_destroy(vec[*ch].ch);
    574		if (ret)
    575			return ret;
    576
    577		cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
    578			  (rx ? "rx" : "tx"));
    579	}
    580
    581	return 0;
    582}
    583
    584static void cpsw_fail(struct cpsw_common *cpsw)
    585{
    586	struct net_device *ndev;
    587	int i;
    588
    589	for (i = 0; i < cpsw->data.slaves; i++) {
    590		ndev = cpsw->slaves[i].ndev;
    591		if (ndev)
    592			dev_close(ndev);
    593	}
    594}
    595
    596int cpsw_set_channels_common(struct net_device *ndev,
    597			     struct ethtool_channels *chs,
    598			     cpdma_handler_fn rx_handler)
    599{
    600	struct cpsw_priv *priv = netdev_priv(ndev);
    601	struct cpsw_common *cpsw = priv->cpsw;
    602	struct net_device *sl_ndev;
    603	int i, new_pools, ret;
    604
    605	ret = cpsw_check_ch_settings(cpsw, chs);
    606	if (ret < 0)
    607		return ret;
    608
    609	cpsw_suspend_data_pass(ndev);
    610
    611	new_pools = (chs->rx_count != cpsw->rx_ch_num) && cpsw->usage_count;
    612
    613	ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler);
    614	if (ret)
    615		goto err;
    616
    617	ret = cpsw_update_channels_res(priv, chs->tx_count, 0, rx_handler);
    618	if (ret)
    619		goto err;
    620
    621	for (i = 0; i < cpsw->data.slaves; i++) {
    622		sl_ndev = cpsw->slaves[i].ndev;
    623		if (!(sl_ndev && netif_running(sl_ndev)))
    624			continue;
    625
    626		/* Inform stack about new count of queues */
    627		ret = netif_set_real_num_tx_queues(sl_ndev, cpsw->tx_ch_num);
    628		if (ret) {
    629			dev_err(priv->dev, "cannot set real number of tx queues\n");
    630			goto err;
    631		}
    632
    633		ret = netif_set_real_num_rx_queues(sl_ndev, cpsw->rx_ch_num);
    634		if (ret) {
    635			dev_err(priv->dev, "cannot set real number of rx queues\n");
    636			goto err;
    637		}
    638	}
    639
    640	cpsw_split_res(cpsw);
    641
    642	if (new_pools) {
    643		cpsw_destroy_xdp_rxqs(cpsw);
    644		ret = cpsw_create_xdp_rxqs(cpsw);
    645		if (ret)
    646			goto err;
    647	}
    648
    649	ret = cpsw_resume_data_pass(ndev);
    650	if (!ret)
    651		return 0;
    652err:
    653	dev_err(priv->dev, "cannot update channels number, closing device\n");
    654	cpsw_fail(cpsw);
    655	return ret;
    656}
    657
    658void cpsw_get_ringparam(struct net_device *ndev,
    659			struct ethtool_ringparam *ering,
    660			struct kernel_ethtool_ringparam *kernel_ering,
    661			struct netlink_ext_ack *extack)
    662{
    663	struct cpsw_priv *priv = netdev_priv(ndev);
    664	struct cpsw_common *cpsw = priv->cpsw;
    665
    666	/* not supported */
    667	ering->tx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
    668	ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
    669	ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
    670	ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
    671}
    672
    673int cpsw_set_ringparam(struct net_device *ndev,
    674		       struct ethtool_ringparam *ering,
    675		       struct kernel_ethtool_ringparam *kernel_ering,
    676		       struct netlink_ext_ack *extack)
    677{
    678	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
    679	int descs_num, ret;
    680
    681	/* ignore ering->tx_pending - only rx_pending adjustment is supported */
    682
    683	if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
    684	    ering->rx_pending < CPSW_MAX_QUEUES ||
    685	    ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES))
    686		return -EINVAL;
    687
    688	descs_num = cpdma_get_num_rx_descs(cpsw->dma);
    689	if (ering->rx_pending == descs_num)
    690		return 0;
    691
    692	cpsw_suspend_data_pass(ndev);
    693
    694	ret = cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
    695	if (ret) {
    696		if (cpsw_resume_data_pass(ndev))
    697			goto err;
    698
    699		return ret;
    700	}
    701
    702	if (cpsw->usage_count) {
    703		cpsw_destroy_xdp_rxqs(cpsw);
    704		ret = cpsw_create_xdp_rxqs(cpsw);
    705		if (ret)
    706			goto err;
    707	}
    708
    709	ret = cpsw_resume_data_pass(ndev);
    710	if (!ret)
    711		return 0;
    712err:
    713	cpdma_set_num_rx_descs(cpsw->dma, descs_num);
    714	dev_err(cpsw->dev, "cannot set ring params, closing device\n");
    715	cpsw_fail(cpsw);
    716	return ret;
    717}
    718
    719#if IS_ENABLED(CONFIG_TI_CPTS)
    720int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
    721{
    722	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
    723
    724	info->so_timestamping =
    725		SOF_TIMESTAMPING_TX_HARDWARE |
    726		SOF_TIMESTAMPING_TX_SOFTWARE |
    727		SOF_TIMESTAMPING_RX_HARDWARE |
    728		SOF_TIMESTAMPING_RX_SOFTWARE |
    729		SOF_TIMESTAMPING_SOFTWARE |
    730		SOF_TIMESTAMPING_RAW_HARDWARE;
    731	info->phc_index = cpsw->cpts->phc_index;
    732	info->tx_types =
    733		(1 << HWTSTAMP_TX_OFF) |
    734		(1 << HWTSTAMP_TX_ON);
    735	info->rx_filters =
    736		(1 << HWTSTAMP_FILTER_NONE) |
    737		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
    738	return 0;
    739}
    740#else
    741int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
    742{
    743	info->so_timestamping =
    744		SOF_TIMESTAMPING_TX_SOFTWARE |
    745		SOF_TIMESTAMPING_RX_SOFTWARE |
    746		SOF_TIMESTAMPING_SOFTWARE;
    747	info->phc_index = -1;
    748	info->tx_types = 0;
    749	info->rx_filters = 0;
    750	return 0;
    751}
    752#endif