cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dwxgmac2_core.c (43705B)


      1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
      2/*
      3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
      4 * stmmac XGMAC support.
      5 */
      6
      7#include <linux/bitrev.h>
      8#include <linux/crc32.h>
      9#include <linux/iopoll.h>
     10#include "stmmac.h"
     11#include "stmmac_ptp.h"
     12#include "dwxlgmac2.h"
     13#include "dwxgmac2.h"
     14
     15static void dwxgmac2_core_init(struct mac_device_info *hw,
     16			       struct net_device *dev)
     17{
     18	void __iomem *ioaddr = hw->pcsr;
     19	u32 tx, rx;
     20
     21	tx = readl(ioaddr + XGMAC_TX_CONFIG);
     22	rx = readl(ioaddr + XGMAC_RX_CONFIG);
     23
     24	tx |= XGMAC_CORE_INIT_TX;
     25	rx |= XGMAC_CORE_INIT_RX;
     26
     27	if (hw->ps) {
     28		tx |= XGMAC_CONFIG_TE;
     29		tx &= ~hw->link.speed_mask;
     30
     31		switch (hw->ps) {
     32		case SPEED_10000:
     33			tx |= hw->link.xgmii.speed10000;
     34			break;
     35		case SPEED_2500:
     36			tx |= hw->link.speed2500;
     37			break;
     38		case SPEED_1000:
     39		default:
     40			tx |= hw->link.speed1000;
     41			break;
     42		}
     43	}
     44
     45	writel(tx, ioaddr + XGMAC_TX_CONFIG);
     46	writel(rx, ioaddr + XGMAC_RX_CONFIG);
     47	writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
     48}
     49
     50static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
     51{
     52	u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
     53	u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
     54
     55	if (enable) {
     56		tx |= XGMAC_CONFIG_TE;
     57		rx |= XGMAC_CONFIG_RE;
     58	} else {
     59		tx &= ~XGMAC_CONFIG_TE;
     60		rx &= ~XGMAC_CONFIG_RE;
     61	}
     62
     63	writel(tx, ioaddr + XGMAC_TX_CONFIG);
     64	writel(rx, ioaddr + XGMAC_RX_CONFIG);
     65}
     66
     67static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
     68{
     69	void __iomem *ioaddr = hw->pcsr;
     70	u32 value;
     71
     72	value = readl(ioaddr + XGMAC_RX_CONFIG);
     73	if (hw->rx_csum)
     74		value |= XGMAC_CONFIG_IPC;
     75	else
     76		value &= ~XGMAC_CONFIG_IPC;
     77	writel(value, ioaddr + XGMAC_RX_CONFIG);
     78
     79	return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
     80}
     81
     82static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
     83				     u32 queue)
     84{
     85	void __iomem *ioaddr = hw->pcsr;
     86	u32 value;
     87
     88	value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
     89	if (mode == MTL_QUEUE_AVB)
     90		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
     91	else if (mode == MTL_QUEUE_DCB)
     92		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
     93	writel(value, ioaddr + XGMAC_RXQ_CTRL0);
     94}
     95
     96static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
     97				   u32 queue)
     98{
     99	void __iomem *ioaddr = hw->pcsr;
    100	u32 value, reg;
    101
    102	reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
    103	if (queue >= 4)
    104		queue -= 4;
    105
    106	value = readl(ioaddr + reg);
    107	value &= ~XGMAC_PSRQ(queue);
    108	value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
    109
    110	writel(value, ioaddr + reg);
    111}
    112
    113static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
    114				   u32 queue)
    115{
    116	void __iomem *ioaddr = hw->pcsr;
    117	u32 value, reg;
    118
    119	reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
    120	if (queue >= 4)
    121		queue -= 4;
    122
    123	value = readl(ioaddr + reg);
    124	value &= ~XGMAC_PSTC(queue);
    125	value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
    126
    127	writel(value, ioaddr + reg);
    128}
    129
    130static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
    131					    u32 rx_alg)
    132{
    133	void __iomem *ioaddr = hw->pcsr;
    134	u32 value;
    135
    136	value = readl(ioaddr + XGMAC_MTL_OPMODE);
    137	value &= ~XGMAC_RAA;
    138
    139	switch (rx_alg) {
    140	case MTL_RX_ALGORITHM_SP:
    141		break;
    142	case MTL_RX_ALGORITHM_WSP:
    143		value |= XGMAC_RAA;
    144		break;
    145	default:
    146		break;
    147	}
    148
    149	writel(value, ioaddr + XGMAC_MTL_OPMODE);
    150}
    151
    152static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
    153					    u32 tx_alg)
    154{
    155	void __iomem *ioaddr = hw->pcsr;
    156	bool ets = true;
    157	u32 value;
    158	int i;
    159
    160	value = readl(ioaddr + XGMAC_MTL_OPMODE);
    161	value &= ~XGMAC_ETSALG;
    162
    163	switch (tx_alg) {
    164	case MTL_TX_ALGORITHM_WRR:
    165		value |= XGMAC_WRR;
    166		break;
    167	case MTL_TX_ALGORITHM_WFQ:
    168		value |= XGMAC_WFQ;
    169		break;
    170	case MTL_TX_ALGORITHM_DWRR:
    171		value |= XGMAC_DWRR;
    172		break;
    173	default:
    174		ets = false;
    175		break;
    176	}
    177
    178	writel(value, ioaddr + XGMAC_MTL_OPMODE);
    179
    180	/* Set ETS if desired */
    181	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
    182		value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
    183		value &= ~XGMAC_TSA;
    184		if (ets)
    185			value |= XGMAC_ETS;
    186		writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
    187	}
    188}
    189
    190static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
    191					     u32 weight, u32 queue)
    192{
    193	void __iomem *ioaddr = hw->pcsr;
    194
    195	writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
    196}
    197
    198static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
    199				    u32 chan)
    200{
    201	void __iomem *ioaddr = hw->pcsr;
    202	u32 value, reg;
    203
    204	reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
    205	if (queue >= 4)
    206		queue -= 4;
    207
    208	value = readl(ioaddr + reg);
    209	value &= ~XGMAC_QxMDMACH(queue);
    210	value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
    211
    212	writel(value, ioaddr + reg);
    213}
    214
    215static void dwxgmac2_config_cbs(struct mac_device_info *hw,
    216				u32 send_slope, u32 idle_slope,
    217				u32 high_credit, u32 low_credit, u32 queue)
    218{
    219	void __iomem *ioaddr = hw->pcsr;
    220	u32 value;
    221
    222	writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
    223	writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
    224	writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
    225	writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
    226
    227	value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
    228	value &= ~XGMAC_TSA;
    229	value |= XGMAC_CC | XGMAC_CBS;
    230	writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
    231}
    232
    233static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
    234{
    235	void __iomem *ioaddr = hw->pcsr;
    236	int i;
    237
    238	for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
    239		reg_space[i] = readl(ioaddr + i * 4);
    240}
    241
    242static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
    243				    struct stmmac_extra_stats *x)
    244{
    245	void __iomem *ioaddr = hw->pcsr;
    246	u32 stat, en;
    247	int ret = 0;
    248
    249	en = readl(ioaddr + XGMAC_INT_EN);
    250	stat = readl(ioaddr + XGMAC_INT_STATUS);
    251
    252	stat &= en;
    253
    254	if (stat & XGMAC_PMTIS) {
    255		x->irq_receive_pmt_irq_n++;
    256		readl(ioaddr + XGMAC_PMT);
    257	}
    258
    259	if (stat & XGMAC_LPIIS) {
    260		u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
    261
    262		if (lpi & XGMAC_TLPIEN) {
    263			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
    264			x->irq_tx_path_in_lpi_mode_n++;
    265		}
    266		if (lpi & XGMAC_TLPIEX) {
    267			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
    268			x->irq_tx_path_exit_lpi_mode_n++;
    269		}
    270		if (lpi & XGMAC_RLPIEN)
    271			x->irq_rx_path_in_lpi_mode_n++;
    272		if (lpi & XGMAC_RLPIEX)
    273			x->irq_rx_path_exit_lpi_mode_n++;
    274	}
    275
    276	return ret;
    277}
    278
    279static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
    280{
    281	void __iomem *ioaddr = hw->pcsr;
    282	int ret = 0;
    283	u32 status;
    284
    285	status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
    286	if (status & BIT(chan)) {
    287		u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
    288
    289		if (chan_status & XGMAC_RXOVFIS)
    290			ret |= CORE_IRQ_MTL_RX_OVERFLOW;
    291
    292		writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
    293	}
    294
    295	return ret;
    296}
    297
    298static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
    299			       unsigned int fc, unsigned int pause_time,
    300			       u32 tx_cnt)
    301{
    302	void __iomem *ioaddr = hw->pcsr;
    303	u32 i;
    304
    305	if (fc & FLOW_RX)
    306		writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
    307	if (fc & FLOW_TX) {
    308		for (i = 0; i < tx_cnt; i++) {
    309			u32 value = XGMAC_TFE;
    310
    311			if (duplex)
    312				value |= pause_time << XGMAC_PT_SHIFT;
    313
    314			writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
    315		}
    316	}
    317}
    318
    319static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
    320{
    321	void __iomem *ioaddr = hw->pcsr;
    322	u32 val = 0x0;
    323
    324	if (mode & WAKE_MAGIC)
    325		val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
    326	if (mode & WAKE_UCAST)
    327		val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
    328	if (val) {
    329		u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
    330		cfg |= XGMAC_CONFIG_RE;
    331		writel(cfg, ioaddr + XGMAC_RX_CONFIG);
    332	}
    333
    334	writel(val, ioaddr + XGMAC_PMT);
    335}
    336
    337static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
    338				   const unsigned char *addr,
    339				   unsigned int reg_n)
    340{
    341	void __iomem *ioaddr = hw->pcsr;
    342	u32 value;
    343
    344	value = (addr[5] << 8) | addr[4];
    345	writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
    346
    347	value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
    348	writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
    349}
    350
    351static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
    352				   unsigned char *addr, unsigned int reg_n)
    353{
    354	void __iomem *ioaddr = hw->pcsr;
    355	u32 hi_addr, lo_addr;
    356
    357	/* Read the MAC address from the hardware */
    358	hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
    359	lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
    360
    361	/* Extract the MAC address from the high and low words */
    362	addr[0] = lo_addr & 0xff;
    363	addr[1] = (lo_addr >> 8) & 0xff;
    364	addr[2] = (lo_addr >> 16) & 0xff;
    365	addr[3] = (lo_addr >> 24) & 0xff;
    366	addr[4] = hi_addr & 0xff;
    367	addr[5] = (hi_addr >> 8) & 0xff;
    368}
    369
    370static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
    371				  bool en_tx_lpi_clockgating)
    372{
    373	void __iomem *ioaddr = hw->pcsr;
    374	u32 value;
    375
    376	value = readl(ioaddr + XGMAC_LPI_CTRL);
    377
    378	value |= XGMAC_LPITXEN | XGMAC_LPITXA;
    379	if (en_tx_lpi_clockgating)
    380		value |= XGMAC_TXCGE;
    381
    382	writel(value, ioaddr + XGMAC_LPI_CTRL);
    383}
    384
    385static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
    386{
    387	void __iomem *ioaddr = hw->pcsr;
    388	u32 value;
    389
    390	value = readl(ioaddr + XGMAC_LPI_CTRL);
    391	value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
    392	writel(value, ioaddr + XGMAC_LPI_CTRL);
    393}
    394
    395static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
    396{
    397	void __iomem *ioaddr = hw->pcsr;
    398	u32 value;
    399
    400	value = readl(ioaddr + XGMAC_LPI_CTRL);
    401	if (link)
    402		value |= XGMAC_PLS;
    403	else
    404		value &= ~XGMAC_PLS;
    405	writel(value, ioaddr + XGMAC_LPI_CTRL);
    406}
    407
    408static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
    409{
    410	void __iomem *ioaddr = hw->pcsr;
    411	u32 value;
    412
    413	value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
    414	writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
    415}
    416
    417static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
    418				int mcbitslog2)
    419{
    420	int numhashregs, regs;
    421
    422	switch (mcbitslog2) {
    423	case 6:
    424		numhashregs = 2;
    425		break;
    426	case 7:
    427		numhashregs = 4;
    428		break;
    429	case 8:
    430		numhashregs = 8;
    431		break;
    432	default:
    433		return;
    434	}
    435
    436	for (regs = 0; regs < numhashregs; regs++)
    437		writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
    438}
    439
    440static void dwxgmac2_set_filter(struct mac_device_info *hw,
    441				struct net_device *dev)
    442{
    443	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
    444	u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
    445	int mcbitslog2 = hw->mcast_bits_log2;
    446	u32 mc_filter[8];
    447	int i;
    448
    449	value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
    450	value |= XGMAC_FILTER_HPF;
    451
    452	memset(mc_filter, 0, sizeof(mc_filter));
    453
    454	if (dev->flags & IFF_PROMISC) {
    455		value |= XGMAC_FILTER_PR;
    456		value |= XGMAC_FILTER_PCF;
    457	} else if ((dev->flags & IFF_ALLMULTI) ||
    458		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
    459		value |= XGMAC_FILTER_PM;
    460
    461		for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
    462			writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
    463	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
    464		struct netdev_hw_addr *ha;
    465
    466		value |= XGMAC_FILTER_HMC;
    467
    468		netdev_for_each_mc_addr(ha, dev) {
    469			u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
    470					(32 - mcbitslog2));
    471			mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
    472		}
    473	}
    474
    475	dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
    476
    477	/* Handle multiple unicast addresses */
    478	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
    479		value |= XGMAC_FILTER_PR;
    480	} else {
    481		struct netdev_hw_addr *ha;
    482		int reg = 1;
    483
    484		netdev_for_each_uc_addr(ha, dev) {
    485			dwxgmac2_set_umac_addr(hw, ha->addr, reg);
    486			reg++;
    487		}
    488
    489		for ( ; reg < XGMAC_ADDR_MAX; reg++) {
    490			writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
    491			writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
    492		}
    493	}
    494
    495	writel(value, ioaddr + XGMAC_PACKET_FILTER);
    496}
    497
    498static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
    499{
    500	u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
    501
    502	if (enable)
    503		value |= XGMAC_CONFIG_LM;
    504	else
    505		value &= ~XGMAC_CONFIG_LM;
    506
    507	writel(value, ioaddr + XGMAC_RX_CONFIG);
    508}
    509
    510static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
    511				  u32 val)
    512{
    513	u32 ctrl = 0;
    514
    515	writel(val, ioaddr + XGMAC_RSS_DATA);
    516	ctrl |= idx << XGMAC_RSSIA_SHIFT;
    517	ctrl |= is_key ? XGMAC_ADDRT : 0x0;
    518	ctrl |= XGMAC_OB;
    519	writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
    520
    521	return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
    522				  !(ctrl & XGMAC_OB), 100, 10000);
    523}
    524
    525static int dwxgmac2_rss_configure(struct mac_device_info *hw,
    526				  struct stmmac_rss *cfg, u32 num_rxq)
    527{
    528	void __iomem *ioaddr = hw->pcsr;
    529	u32 value, *key;
    530	int i, ret;
    531
    532	value = readl(ioaddr + XGMAC_RSS_CTRL);
    533	if (!cfg || !cfg->enable) {
    534		value &= ~XGMAC_RSSE;
    535		writel(value, ioaddr + XGMAC_RSS_CTRL);
    536		return 0;
    537	}
    538
    539	key = (u32 *)cfg->key;
    540	for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
    541		ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
    542		if (ret)
    543			return ret;
    544	}
    545
    546	for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
    547		ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
    548		if (ret)
    549			return ret;
    550	}
    551
    552	for (i = 0; i < num_rxq; i++)
    553		dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
    554
    555	value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
    556	writel(value, ioaddr + XGMAC_RSS_CTRL);
    557	return 0;
    558}
    559
    560static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
    561				      __le16 perfect_match, bool is_double)
    562{
    563	void __iomem *ioaddr = hw->pcsr;
    564
    565	writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
    566
    567	if (hash) {
    568		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
    569
    570		value |= XGMAC_FILTER_VTFE;
    571
    572		writel(value, ioaddr + XGMAC_PACKET_FILTER);
    573
    574		value = readl(ioaddr + XGMAC_VLAN_TAG);
    575
    576		value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
    577		if (is_double) {
    578			value |= XGMAC_VLAN_EDVLP;
    579			value |= XGMAC_VLAN_ESVL;
    580			value |= XGMAC_VLAN_DOVLTC;
    581		} else {
    582			value &= ~XGMAC_VLAN_EDVLP;
    583			value &= ~XGMAC_VLAN_ESVL;
    584			value &= ~XGMAC_VLAN_DOVLTC;
    585		}
    586
    587		value &= ~XGMAC_VLAN_VID;
    588		writel(value, ioaddr + XGMAC_VLAN_TAG);
    589	} else if (perfect_match) {
    590		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
    591
    592		value |= XGMAC_FILTER_VTFE;
    593
    594		writel(value, ioaddr + XGMAC_PACKET_FILTER);
    595
    596		value = readl(ioaddr + XGMAC_VLAN_TAG);
    597
    598		value &= ~XGMAC_VLAN_VTHM;
    599		value |= XGMAC_VLAN_ETV;
    600		if (is_double) {
    601			value |= XGMAC_VLAN_EDVLP;
    602			value |= XGMAC_VLAN_ESVL;
    603			value |= XGMAC_VLAN_DOVLTC;
    604		} else {
    605			value &= ~XGMAC_VLAN_EDVLP;
    606			value &= ~XGMAC_VLAN_ESVL;
    607			value &= ~XGMAC_VLAN_DOVLTC;
    608		}
    609
    610		value &= ~XGMAC_VLAN_VID;
    611		writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
    612	} else {
    613		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
    614
    615		value &= ~XGMAC_FILTER_VTFE;
    616
    617		writel(value, ioaddr + XGMAC_PACKET_FILTER);
    618
    619		value = readl(ioaddr + XGMAC_VLAN_TAG);
    620
    621		value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
    622		value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
    623		value &= ~XGMAC_VLAN_DOVLTC;
    624		value &= ~XGMAC_VLAN_VID;
    625
    626		writel(value, ioaddr + XGMAC_VLAN_TAG);
    627	}
    628}
    629
    630struct dwxgmac3_error_desc {
    631	bool valid;
    632	const char *desc;
    633	const char *detailed_desc;
    634};
    635
    636#define STAT_OFF(field)		offsetof(struct stmmac_safety_stats, field)
    637
    638static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
    639			       const char *module_name,
    640			       const struct dwxgmac3_error_desc *desc,
    641			       unsigned long field_offset,
    642			       struct stmmac_safety_stats *stats)
    643{
    644	unsigned long loc, mask;
    645	u8 *bptr = (u8 *)stats;
    646	unsigned long *ptr;
    647
    648	ptr = (unsigned long *)(bptr + field_offset);
    649
    650	mask = value;
    651	for_each_set_bit(loc, &mask, 32) {
    652		netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
    653				"correctable" : "uncorrectable", module_name,
    654				desc[loc].desc, desc[loc].detailed_desc);
    655
    656		/* Update counters */
    657		ptr[loc]++;
    658	}
    659}
    660
    661static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
    662	{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
    663	{ true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
    664	{ true, "TPES", "TSO Data Path Parity Check Error" },
    665	{ true, "TSOPES", "TSO Header Data Path Parity Check Error" },
    666	{ true, "MTPES", "MTL Data Path Parity Check Error" },
    667	{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
    668	{ true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
    669	{ true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
    670	{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
    671	{ true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
    672	{ true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
    673	{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
    674	{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
    675	{ true, "TTES", "TX FSM Timeout Error" },
    676	{ true, "RTES", "RX FSM Timeout Error" },
    677	{ true, "CTES", "CSR FSM Timeout Error" },
    678	{ true, "ATES", "APP FSM Timeout Error" },
    679	{ true, "PTES", "PTP FSM Timeout Error" },
    680	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
    681	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
    682	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
    683	{ true, "MSTTES", "Master Read/Write Timeout Error" },
    684	{ true, "SLVTES", "Slave Read/Write Timeout Error" },
    685	{ true, "ATITES", "Application Timeout on ATI Interface Error" },
    686	{ true, "ARITES", "Application Timeout on ARI Interface Error" },
    687	{ true, "FSMPES", "FSM State Parity Error" },
    688	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
    689	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
    690	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
    691	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
    692	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
    693	{ true, "CPI", "Control Register Parity Check Error" },
    694};
    695
    696static void dwxgmac3_handle_mac_err(struct net_device *ndev,
    697				    void __iomem *ioaddr, bool correctable,
    698				    struct stmmac_safety_stats *stats)
    699{
    700	u32 value;
    701
    702	value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
    703	writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
    704
    705	dwxgmac3_log_error(ndev, value, correctable, "MAC",
    706			   dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
    707}
    708
    709static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
    710	{ true, "TXCES", "MTL TX Memory Error" },
    711	{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
    712	{ true, "TXUES", "MTL TX Memory Error" },
    713	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
    714	{ true, "RXCES", "MTL RX Memory Error" },
    715	{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
    716	{ true, "RXUES", "MTL RX Memory Error" },
    717	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
    718	{ true, "ECES", "MTL EST Memory Error" },
    719	{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
    720	{ true, "EUES", "MTL EST Memory Error" },
    721	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
    722	{ true, "RPCES", "MTL RX Parser Memory Error" },
    723	{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
    724	{ true, "RPUES", "MTL RX Parser Memory Error" },
    725	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
    726	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
    727	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
    728	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
    729	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
    730	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
    731	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
    732	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
    733	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
    734	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
    735	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
    736	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
    737	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
    738	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
    739	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
    740	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
    741	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
    742};
    743
    744static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
    745				    void __iomem *ioaddr, bool correctable,
    746				    struct stmmac_safety_stats *stats)
    747{
    748	u32 value;
    749
    750	value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
    751	writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
    752
    753	dwxgmac3_log_error(ndev, value, correctable, "MTL",
    754			   dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
    755}
    756
    757static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
    758	{ true, "TCES", "DMA TSO Memory Error" },
    759	{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
    760	{ true, "TUES", "DMA TSO Memory Error" },
    761	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
    762	{ true, "DCES", "DMA DCACHE Memory Error" },
    763	{ true, "DAMS", "DMA DCACHE Address Mismatch Error" },
    764	{ true, "DUES", "DMA DCACHE Memory Error" },
    765	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
    766	{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
    767	{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
    768	{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
    769	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
    770	{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
    771	{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
    772	{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
    773	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
    774	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
    775	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
    776	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
    777	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
    778	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
    779	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
    780	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
    781	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
    782	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
    783	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
    784	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
    785	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
    786	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
    787	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
    788	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
    789	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
    790};
    791
    792static void dwxgmac3_handle_dma_err(struct net_device *ndev,
    793				    void __iomem *ioaddr, bool correctable,
    794				    struct stmmac_safety_stats *stats)
    795{
    796	u32 value;
    797
    798	value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
    799	writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
    800
    801	dwxgmac3_log_error(ndev, value, correctable, "DMA",
    802			   dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
    803}
    804
    805static int
    806dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
    807			    struct stmmac_safety_feature_cfg *safety_cfg)
    808{
    809	u32 value;
    810
    811	if (!asp)
    812		return -EINVAL;
    813
    814	/* 1. Enable Safety Features */
    815	writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
    816
    817	/* 2. Enable MTL Safety Interrupts */
    818	value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
    819	value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
    820	value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
    821	value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
    822	value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
    823	writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
    824
    825	/* 3. Enable DMA Safety Interrupts */
    826	value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
    827	value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
    828	value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
    829	writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
    830
    831	/* Only ECC Protection for External Memory feature is selected */
    832	if (asp <= 0x1)
    833		return 0;
    834
    835	/* 4. Enable Parity and Timeout for FSM */
    836	value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
    837	value |= XGMAC_PRTYEN; /* FSM Parity Feature */
    838	value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
    839	writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
    840
    841	return 0;
    842}
    843
    844static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
    845					   void __iomem *ioaddr,
    846					   unsigned int asp,
    847					   struct stmmac_safety_stats *stats)
    848{
    849	bool err, corr;
    850	u32 mtl, dma;
    851	int ret = 0;
    852
    853	if (!asp)
    854		return -EINVAL;
    855
    856	mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
    857	dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
    858
    859	err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
    860	corr = false;
    861	if (err) {
    862		dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
    863		ret |= !corr;
    864	}
    865
    866	err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
    867	      (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
    868	corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
    869	if (err) {
    870		dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
    871		ret |= !corr;
    872	}
    873
    874	err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
    875	corr = dma & XGMAC_DECIS;
    876	if (err) {
    877		dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
    878		ret |= !corr;
    879	}
    880
    881	return ret;
    882}
    883
    884static const struct dwxgmac3_error {
    885	const struct dwxgmac3_error_desc *desc;
    886} dwxgmac3_all_errors[] = {
    887	{ dwxgmac3_mac_errors },
    888	{ dwxgmac3_mtl_errors },
    889	{ dwxgmac3_dma_errors },
    890};
    891
    892static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
    893				     int index, unsigned long *count,
    894				     const char **desc)
    895{
    896	int module = index / 32, offset = index % 32;
    897	unsigned long *ptr = (unsigned long *)stats;
    898
    899	if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
    900		return -EINVAL;
    901	if (!dwxgmac3_all_errors[module].desc[offset].valid)
    902		return -EINVAL;
    903	if (count)
    904		*count = *(ptr + index);
    905	if (desc)
    906		*desc = dwxgmac3_all_errors[module].desc[offset].desc;
    907	return 0;
    908}
    909
    910static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
    911{
    912	u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
    913
    914	val &= ~XGMAC_FRPE;
    915	writel(val, ioaddr + XGMAC_MTL_OPMODE);
    916
    917	return 0;
    918}
    919
    920static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
    921{
    922	u32 val;
    923
    924	val = readl(ioaddr + XGMAC_MTL_OPMODE);
    925	val |= XGMAC_FRPE;
    926	writel(val, ioaddr + XGMAC_MTL_OPMODE);
    927}
    928
    929static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
    930					    struct stmmac_tc_entry *entry,
    931					    int pos)
    932{
    933	int ret, i;
    934
    935	for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
    936		int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
    937		u32 val;
    938
    939		/* Wait for ready */
    940		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
    941					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
    942		if (ret)
    943			return ret;
    944
    945		/* Write data */
    946		val = *((u32 *)&entry->val + i);
    947		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
    948
    949		/* Write pos */
    950		val = real_pos & XGMAC_ADDR;
    951		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
    952
    953		/* Write OP */
    954		val |= XGMAC_WRRDN;
    955		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
    956
    957		/* Start Write */
    958		val |= XGMAC_STARTBUSY;
    959		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
    960
    961		/* Wait for done */
    962		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
    963					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
    964		if (ret)
    965			return ret;
    966	}
    967
    968	return 0;
    969}
    970
    971static struct stmmac_tc_entry *
    972dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
    973			    unsigned int count, u32 curr_prio)
    974{
    975	struct stmmac_tc_entry *entry;
    976	u32 min_prio = ~0x0;
    977	int i, min_prio_idx;
    978	bool found = false;
    979
    980	for (i = count - 1; i >= 0; i--) {
    981		entry = &entries[i];
    982
    983		/* Do not update unused entries */
    984		if (!entry->in_use)
    985			continue;
    986		/* Do not update already updated entries (i.e. fragments) */
    987		if (entry->in_hw)
    988			continue;
    989		/* Let last entry be updated last */
    990		if (entry->is_last)
    991			continue;
    992		/* Do not return fragments */
    993		if (entry->is_frag)
    994			continue;
    995		/* Check if we already checked this prio */
    996		if (entry->prio < curr_prio)
    997			continue;
    998		/* Check if this is the minimum prio */
    999		if (entry->prio < min_prio) {
   1000			min_prio = entry->prio;
   1001			min_prio_idx = i;
   1002			found = true;
   1003		}
   1004	}
   1005
   1006	if (found)
   1007		return &entries[min_prio_idx];
   1008	return NULL;
   1009}
   1010
   1011static int dwxgmac3_rxp_config(void __iomem *ioaddr,
   1012			       struct stmmac_tc_entry *entries,
   1013			       unsigned int count)
   1014{
   1015	struct stmmac_tc_entry *entry, *frag;
   1016	int i, ret, nve = 0;
   1017	u32 curr_prio = 0;
   1018	u32 old_val, val;
   1019
   1020	/* Force disable RX */
   1021	old_val = readl(ioaddr + XGMAC_RX_CONFIG);
   1022	val = old_val & ~XGMAC_CONFIG_RE;
   1023	writel(val, ioaddr + XGMAC_RX_CONFIG);
   1024
   1025	/* Disable RX Parser */
   1026	ret = dwxgmac3_rxp_disable(ioaddr);
   1027	if (ret)
   1028		goto re_enable;
   1029
   1030	/* Set all entries as NOT in HW */
   1031	for (i = 0; i < count; i++) {
   1032		entry = &entries[i];
   1033		entry->in_hw = false;
   1034	}
   1035
   1036	/* Update entries by reverse order */
   1037	while (1) {
   1038		entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
   1039		if (!entry)
   1040			break;
   1041
   1042		curr_prio = entry->prio;
   1043		frag = entry->frag_ptr;
   1044
   1045		/* Set special fragment requirements */
   1046		if (frag) {
   1047			entry->val.af = 0;
   1048			entry->val.rf = 0;
   1049			entry->val.nc = 1;
   1050			entry->val.ok_index = nve + 2;
   1051		}
   1052
   1053		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
   1054		if (ret)
   1055			goto re_enable;
   1056
   1057		entry->table_pos = nve++;
   1058		entry->in_hw = true;
   1059
   1060		if (frag && !frag->in_hw) {
   1061			ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
   1062			if (ret)
   1063				goto re_enable;
   1064			frag->table_pos = nve++;
   1065			frag->in_hw = true;
   1066		}
   1067	}
   1068
   1069	if (!nve)
   1070		goto re_enable;
   1071
   1072	/* Update all pass entry */
   1073	for (i = 0; i < count; i++) {
   1074		entry = &entries[i];
   1075		if (!entry->is_last)
   1076			continue;
   1077
   1078		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
   1079		if (ret)
   1080			goto re_enable;
   1081
   1082		entry->table_pos = nve++;
   1083	}
   1084
   1085	/* Assume n. of parsable entries == n. of valid entries */
   1086	val = (nve << 16) & XGMAC_NPE;
   1087	val |= nve & XGMAC_NVE;
   1088	writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
   1089
   1090	/* Enable RX Parser */
   1091	dwxgmac3_rxp_enable(ioaddr);
   1092
   1093re_enable:
   1094	/* Re-enable RX */
   1095	writel(old_val, ioaddr + XGMAC_RX_CONFIG);
   1096	return ret;
   1097}
   1098
   1099static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
   1100{
   1101	void __iomem *ioaddr = hw->pcsr;
   1102	u32 value;
   1103
   1104	if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
   1105				      value, value & XGMAC_TXTSC, 100, 10000))
   1106		return -EBUSY;
   1107
   1108	*ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
   1109	*ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
   1110	return 0;
   1111}
   1112
   1113static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
   1114				    struct stmmac_pps_cfg *cfg, bool enable,
   1115				    u32 sub_second_inc, u32 systime_flags)
   1116{
   1117	u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
   1118	u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
   1119	u64 period;
   1120
   1121	if (!cfg->available)
   1122		return -EINVAL;
   1123	if (tnsec & XGMAC_TRGTBUSY0)
   1124		return -EBUSY;
   1125	if (!sub_second_inc || !systime_flags)
   1126		return -EINVAL;
   1127
   1128	val &= ~XGMAC_PPSx_MASK(index);
   1129
   1130	if (!enable) {
   1131		val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
   1132		writel(val, ioaddr + XGMAC_PPS_CONTROL);
   1133		return 0;
   1134	}
   1135
   1136	val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
   1137	val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
   1138	val |= XGMAC_PPSEN0;
   1139
   1140	writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
   1141
   1142	if (!(systime_flags & PTP_TCR_TSCTRLSSR))
   1143		cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
   1144	writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
   1145
   1146	period = cfg->period.tv_sec * 1000000000;
   1147	period += cfg->period.tv_nsec;
   1148
   1149	do_div(period, sub_second_inc);
   1150
   1151	if (period <= 1)
   1152		return -EINVAL;
   1153
   1154	writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
   1155
   1156	period >>= 1;
   1157	if (period <= 1)
   1158		return -EINVAL;
   1159
   1160	writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
   1161
   1162	/* Finally, activate it */
   1163	writel(val, ioaddr + XGMAC_PPS_CONTROL);
   1164	return 0;
   1165}
   1166
   1167static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
   1168{
   1169	u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
   1170
   1171	value &= ~XGMAC_CONFIG_SARC;
   1172	value |= val << XGMAC_CONFIG_SARC_SHIFT;
   1173
   1174	writel(value, ioaddr + XGMAC_TX_CONFIG);
   1175}
   1176
   1177static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
   1178{
   1179	void __iomem *ioaddr = hw->pcsr;
   1180	u32 value;
   1181
   1182	value = readl(ioaddr + XGMAC_VLAN_INCL);
   1183	value |= XGMAC_VLAN_VLTI;
   1184	value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
   1185	value &= ~XGMAC_VLAN_VLC;
   1186	value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
   1187	writel(value, ioaddr + XGMAC_VLAN_INCL);
   1188}
   1189
   1190static int dwxgmac2_filter_wait(struct mac_device_info *hw)
   1191{
   1192	void __iomem *ioaddr = hw->pcsr;
   1193	u32 value;
   1194
   1195	if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
   1196			       !(value & XGMAC_XB), 100, 10000))
   1197		return -EBUSY;
   1198	return 0;
   1199}
   1200
   1201static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
   1202				u8 reg, u32 *data)
   1203{
   1204	void __iomem *ioaddr = hw->pcsr;
   1205	u32 value;
   1206	int ret;
   1207
   1208	ret = dwxgmac2_filter_wait(hw);
   1209	if (ret)
   1210		return ret;
   1211
   1212	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
   1213	value |= XGMAC_TT | XGMAC_XB;
   1214	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
   1215
   1216	ret = dwxgmac2_filter_wait(hw);
   1217	if (ret)
   1218		return ret;
   1219
   1220	*data = readl(ioaddr + XGMAC_L3L4_DATA);
   1221	return 0;
   1222}
   1223
   1224static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
   1225				 u8 reg, u32 data)
   1226{
   1227	void __iomem *ioaddr = hw->pcsr;
   1228	u32 value;
   1229	int ret;
   1230
   1231	ret = dwxgmac2_filter_wait(hw);
   1232	if (ret)
   1233		return ret;
   1234
   1235	writel(data, ioaddr + XGMAC_L3L4_DATA);
   1236
   1237	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
   1238	value |= XGMAC_XB;
   1239	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
   1240
   1241	return dwxgmac2_filter_wait(hw);
   1242}
   1243
   1244static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
   1245				     bool en, bool ipv6, bool sa, bool inv,
   1246				     u32 match)
   1247{
   1248	void __iomem *ioaddr = hw->pcsr;
   1249	u32 value;
   1250	int ret;
   1251
   1252	value = readl(ioaddr + XGMAC_PACKET_FILTER);
   1253	value |= XGMAC_FILTER_IPFE;
   1254	writel(value, ioaddr + XGMAC_PACKET_FILTER);
   1255
   1256	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
   1257	if (ret)
   1258		return ret;
   1259
   1260	/* For IPv6 not both SA/DA filters can be active */
   1261	if (ipv6) {
   1262		value |= XGMAC_L3PEN0;
   1263		value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
   1264		value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
   1265		if (sa) {
   1266			value |= XGMAC_L3SAM0;
   1267			if (inv)
   1268				value |= XGMAC_L3SAIM0;
   1269		} else {
   1270			value |= XGMAC_L3DAM0;
   1271			if (inv)
   1272				value |= XGMAC_L3DAIM0;
   1273		}
   1274	} else {
   1275		value &= ~XGMAC_L3PEN0;
   1276		if (sa) {
   1277			value |= XGMAC_L3SAM0;
   1278			if (inv)
   1279				value |= XGMAC_L3SAIM0;
   1280		} else {
   1281			value |= XGMAC_L3DAM0;
   1282			if (inv)
   1283				value |= XGMAC_L3DAIM0;
   1284		}
   1285	}
   1286
   1287	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
   1288	if (ret)
   1289		return ret;
   1290
   1291	if (sa) {
   1292		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
   1293		if (ret)
   1294			return ret;
   1295	} else {
   1296		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
   1297		if (ret)
   1298			return ret;
   1299	}
   1300
   1301	if (!en)
   1302		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
   1303
   1304	return 0;
   1305}
   1306
   1307static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
   1308				     bool en, bool udp, bool sa, bool inv,
   1309				     u32 match)
   1310{
   1311	void __iomem *ioaddr = hw->pcsr;
   1312	u32 value;
   1313	int ret;
   1314
   1315	value = readl(ioaddr + XGMAC_PACKET_FILTER);
   1316	value |= XGMAC_FILTER_IPFE;
   1317	writel(value, ioaddr + XGMAC_PACKET_FILTER);
   1318
   1319	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
   1320	if (ret)
   1321		return ret;
   1322
   1323	if (udp) {
   1324		value |= XGMAC_L4PEN0;
   1325	} else {
   1326		value &= ~XGMAC_L4PEN0;
   1327	}
   1328
   1329	value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
   1330	value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
   1331	if (sa) {
   1332		value |= XGMAC_L4SPM0;
   1333		if (inv)
   1334			value |= XGMAC_L4SPIM0;
   1335	} else {
   1336		value |= XGMAC_L4DPM0;
   1337		if (inv)
   1338			value |= XGMAC_L4DPIM0;
   1339	}
   1340
   1341	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
   1342	if (ret)
   1343		return ret;
   1344
   1345	if (sa) {
   1346		value = match & XGMAC_L4SP0;
   1347
   1348		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
   1349		if (ret)
   1350			return ret;
   1351	} else {
   1352		value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
   1353
   1354		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
   1355		if (ret)
   1356			return ret;
   1357	}
   1358
   1359	if (!en)
   1360		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
   1361
   1362	return 0;
   1363}
   1364
   1365static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
   1366				     u32 addr)
   1367{
   1368	void __iomem *ioaddr = hw->pcsr;
   1369	u32 value;
   1370
   1371	writel(addr, ioaddr + XGMAC_ARP_ADDR);
   1372
   1373	value = readl(ioaddr + XGMAC_RX_CONFIG);
   1374	if (en)
   1375		value |= XGMAC_CONFIG_ARPEN;
   1376	else
   1377		value &= ~XGMAC_CONFIG_ARPEN;
   1378	writel(value, ioaddr + XGMAC_RX_CONFIG);
   1379}
   1380
   1381static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
   1382{
   1383	u32 ctrl;
   1384
   1385	writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
   1386
   1387	ctrl = (reg << XGMAC_ADDR_SHIFT);
   1388	ctrl |= gcl ? 0 : XGMAC_GCRR;
   1389
   1390	writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
   1391
   1392	ctrl |= XGMAC_SRWO;
   1393	writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
   1394
   1395	return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
   1396					 ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
   1397}
   1398
   1399static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
   1400				  unsigned int ptp_rate)
   1401{
   1402	int i, ret = 0x0;
   1403	u32 ctrl;
   1404
   1405	ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
   1406	ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
   1407	ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
   1408	ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
   1409	ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
   1410	ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
   1411	if (ret)
   1412		return ret;
   1413
   1414	for (i = 0; i < cfg->gcl_size; i++) {
   1415		ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
   1416		if (ret)
   1417			return ret;
   1418	}
   1419
   1420	ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
   1421	ctrl &= ~XGMAC_PTOV;
   1422	ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
   1423	if (cfg->enable)
   1424		ctrl |= XGMAC_EEST | XGMAC_SSWL;
   1425	else
   1426		ctrl &= ~XGMAC_EEST;
   1427
   1428	writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
   1429	return 0;
   1430}
   1431
   1432static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
   1433				   u32 num_rxq, bool enable)
   1434{
   1435	u32 value;
   1436
   1437	if (!enable) {
   1438		value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
   1439
   1440		value &= ~XGMAC_EFPE;
   1441
   1442		writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
   1443		return;
   1444	}
   1445
   1446	value = readl(ioaddr + XGMAC_RXQ_CTRL1);
   1447	value &= ~XGMAC_RQ;
   1448	value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
   1449	writel(value, ioaddr + XGMAC_RXQ_CTRL1);
   1450
   1451	value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
   1452	value |= XGMAC_EFPE;
   1453	writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
   1454}
   1455
   1456const struct stmmac_ops dwxgmac210_ops = {
   1457	.core_init = dwxgmac2_core_init,
   1458	.set_mac = dwxgmac2_set_mac,
   1459	.rx_ipc = dwxgmac2_rx_ipc,
   1460	.rx_queue_enable = dwxgmac2_rx_queue_enable,
   1461	.rx_queue_prio = dwxgmac2_rx_queue_prio,
   1462	.tx_queue_prio = dwxgmac2_tx_queue_prio,
   1463	.rx_queue_routing = NULL,
   1464	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
   1465	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
   1466	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
   1467	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
   1468	.config_cbs = dwxgmac2_config_cbs,
   1469	.dump_regs = dwxgmac2_dump_regs,
   1470	.host_irq_status = dwxgmac2_host_irq_status,
   1471	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
   1472	.flow_ctrl = dwxgmac2_flow_ctrl,
   1473	.pmt = dwxgmac2_pmt,
   1474	.set_umac_addr = dwxgmac2_set_umac_addr,
   1475	.get_umac_addr = dwxgmac2_get_umac_addr,
   1476	.set_eee_mode = dwxgmac2_set_eee_mode,
   1477	.reset_eee_mode = dwxgmac2_reset_eee_mode,
   1478	.set_eee_timer = dwxgmac2_set_eee_timer,
   1479	.set_eee_pls = dwxgmac2_set_eee_pls,
   1480	.pcs_ctrl_ane = NULL,
   1481	.pcs_rane = NULL,
   1482	.pcs_get_adv_lp = NULL,
   1483	.debug = NULL,
   1484	.set_filter = dwxgmac2_set_filter,
   1485	.safety_feat_config = dwxgmac3_safety_feat_config,
   1486	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
   1487	.safety_feat_dump = dwxgmac3_safety_feat_dump,
   1488	.set_mac_loopback = dwxgmac2_set_mac_loopback,
   1489	.rss_configure = dwxgmac2_rss_configure,
   1490	.update_vlan_hash = dwxgmac2_update_vlan_hash,
   1491	.rxp_config = dwxgmac3_rxp_config,
   1492	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
   1493	.flex_pps_config = dwxgmac2_flex_pps_config,
   1494	.sarc_configure = dwxgmac2_sarc_configure,
   1495	.enable_vlan = dwxgmac2_enable_vlan,
   1496	.config_l3_filter = dwxgmac2_config_l3_filter,
   1497	.config_l4_filter = dwxgmac2_config_l4_filter,
   1498	.set_arp_offload = dwxgmac2_set_arp_offload,
   1499	.est_configure = dwxgmac3_est_configure,
   1500	.fpe_configure = dwxgmac3_fpe_configure,
   1501};
   1502
   1503static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
   1504				      u32 queue)
   1505{
   1506	void __iomem *ioaddr = hw->pcsr;
   1507	u32 value;
   1508
   1509	value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
   1510	if (mode == MTL_QUEUE_AVB)
   1511		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
   1512	else if (mode == MTL_QUEUE_DCB)
   1513		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
   1514	writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
   1515}
   1516
   1517const struct stmmac_ops dwxlgmac2_ops = {
   1518	.core_init = dwxgmac2_core_init,
   1519	.set_mac = dwxgmac2_set_mac,
   1520	.rx_ipc = dwxgmac2_rx_ipc,
   1521	.rx_queue_enable = dwxlgmac2_rx_queue_enable,
   1522	.rx_queue_prio = dwxgmac2_rx_queue_prio,
   1523	.tx_queue_prio = dwxgmac2_tx_queue_prio,
   1524	.rx_queue_routing = NULL,
   1525	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
   1526	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
   1527	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
   1528	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
   1529	.config_cbs = dwxgmac2_config_cbs,
   1530	.dump_regs = dwxgmac2_dump_regs,
   1531	.host_irq_status = dwxgmac2_host_irq_status,
   1532	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
   1533	.flow_ctrl = dwxgmac2_flow_ctrl,
   1534	.pmt = dwxgmac2_pmt,
   1535	.set_umac_addr = dwxgmac2_set_umac_addr,
   1536	.get_umac_addr = dwxgmac2_get_umac_addr,
   1537	.set_eee_mode = dwxgmac2_set_eee_mode,
   1538	.reset_eee_mode = dwxgmac2_reset_eee_mode,
   1539	.set_eee_timer = dwxgmac2_set_eee_timer,
   1540	.set_eee_pls = dwxgmac2_set_eee_pls,
   1541	.pcs_ctrl_ane = NULL,
   1542	.pcs_rane = NULL,
   1543	.pcs_get_adv_lp = NULL,
   1544	.debug = NULL,
   1545	.set_filter = dwxgmac2_set_filter,
   1546	.safety_feat_config = dwxgmac3_safety_feat_config,
   1547	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
   1548	.safety_feat_dump = dwxgmac3_safety_feat_dump,
   1549	.set_mac_loopback = dwxgmac2_set_mac_loopback,
   1550	.rss_configure = dwxgmac2_rss_configure,
   1551	.update_vlan_hash = dwxgmac2_update_vlan_hash,
   1552	.rxp_config = dwxgmac3_rxp_config,
   1553	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
   1554	.flex_pps_config = dwxgmac2_flex_pps_config,
   1555	.sarc_configure = dwxgmac2_sarc_configure,
   1556	.enable_vlan = dwxgmac2_enable_vlan,
   1557	.config_l3_filter = dwxgmac2_config_l3_filter,
   1558	.config_l4_filter = dwxgmac2_config_l4_filter,
   1559	.set_arp_offload = dwxgmac2_set_arp_offload,
   1560	.est_configure = dwxgmac3_est_configure,
   1561	.fpe_configure = dwxgmac3_fpe_configure,
   1562};
   1563
   1564int dwxgmac2_setup(struct stmmac_priv *priv)
   1565{
   1566	struct mac_device_info *mac = priv->hw;
   1567
   1568	dev_info(priv->device, "\tXGMAC2\n");
   1569
   1570	priv->dev->priv_flags |= IFF_UNICAST_FLT;
   1571	mac->pcsr = priv->ioaddr;
   1572	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
   1573	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
   1574	mac->mcast_bits_log2 = 0;
   1575
   1576	if (mac->multicast_filter_bins)
   1577		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
   1578
   1579	mac->link.duplex = 0;
   1580	mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
   1581	mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
   1582	mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
   1583	mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
   1584	mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
   1585	mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
   1586	mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
   1587	mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
   1588
   1589	mac->mii.addr = XGMAC_MDIO_ADDR;
   1590	mac->mii.data = XGMAC_MDIO_DATA;
   1591	mac->mii.addr_shift = 16;
   1592	mac->mii.addr_mask = GENMASK(20, 16);
   1593	mac->mii.reg_shift = 0;
   1594	mac->mii.reg_mask = GENMASK(15, 0);
   1595	mac->mii.clk_csr_shift = 19;
   1596	mac->mii.clk_csr_mask = GENMASK(21, 19);
   1597
   1598	return 0;
   1599}
   1600
   1601int dwxlgmac2_setup(struct stmmac_priv *priv)
   1602{
   1603	struct mac_device_info *mac = priv->hw;
   1604
   1605	dev_info(priv->device, "\tXLGMAC\n");
   1606
   1607	priv->dev->priv_flags |= IFF_UNICAST_FLT;
   1608	mac->pcsr = priv->ioaddr;
   1609	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
   1610	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
   1611	mac->mcast_bits_log2 = 0;
   1612
   1613	if (mac->multicast_filter_bins)
   1614		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
   1615
   1616	mac->link.duplex = 0;
   1617	mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
   1618	mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
   1619	mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
   1620	mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
   1621	mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
   1622	mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
   1623	mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
   1624	mac->link.speed_mask = XLGMAC_CONFIG_SS;
   1625
   1626	mac->mii.addr = XGMAC_MDIO_ADDR;
   1627	mac->mii.data = XGMAC_MDIO_DATA;
   1628	mac->mii.addr_shift = 16;
   1629	mac->mii.addr_mask = GENMASK(20, 16);
   1630	mac->mii.reg_shift = 0;
   1631	mac->mii.reg_mask = GENMASK(15, 0);
   1632	mac->mii.clk_csr_shift = 19;
   1633	mac->mii.clk_csr_mask = GENMASK(21, 19);
   1634
   1635	return 0;
   1636}