cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

thunder_bgx.c (45075B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2015 Cavium, Inc.
      4 */
      5
      6#include <linux/acpi.h>
      7#include <linux/module.h>
      8#include <linux/interrupt.h>
      9#include <linux/pci.h>
     10#include <linux/netdevice.h>
     11#include <linux/etherdevice.h>
     12#include <linux/phy.h>
     13#include <linux/of.h>
     14#include <linux/of_mdio.h>
     15#include <linux/of_net.h>
     16
     17#include "nic_reg.h"
     18#include "nic.h"
     19#include "thunder_bgx.h"
     20
     21#define DRV_NAME	"thunder_bgx"
     22#define DRV_VERSION	"1.0"
     23
     24/* RX_DMAC_CTL configuration */
     25enum MCAST_MODE {
     26		MCAST_MODE_REJECT = 0x0,
     27		MCAST_MODE_ACCEPT = 0x1,
     28		MCAST_MODE_CAM_FILTER = 0x2,
     29		RSVD = 0x3
     30};
     31
     32#define BCAST_ACCEPT      BIT(0)
     33#define CAM_ACCEPT        BIT(3)
     34#define MCAST_MODE_MASK   0x3
     35#define BGX_MCAST_MODE(x) (x << 1)
     36
     37struct dmac_map {
     38	u64                     vf_map;
     39	u64                     dmac;
     40};
     41
     42struct lmac {
     43	struct bgx		*bgx;
     44	/* actual number of DMACs configured */
     45	u8			dmacs_cfg;
     46	/* overal number of possible DMACs could be configured per LMAC */
     47	u8                      dmacs_count;
     48	struct dmac_map         *dmacs; /* DMAC:VFs tracking filter array */
     49	u8			mac[ETH_ALEN];
     50	u8                      lmac_type;
     51	u8                      lane_to_sds;
     52	bool                    use_training;
     53	bool                    autoneg;
     54	bool			link_up;
     55	int			lmacid; /* ID within BGX */
     56	int			lmacid_bd; /* ID on board */
     57	struct net_device       netdev;
     58	struct phy_device       *phydev;
     59	unsigned int            last_duplex;
     60	unsigned int            last_link;
     61	unsigned int            last_speed;
     62	bool			is_sgmii;
     63	struct delayed_work	dwork;
     64	struct workqueue_struct *check_link;
     65};
     66
     67struct bgx {
     68	u8			bgx_id;
     69	struct	lmac		lmac[MAX_LMAC_PER_BGX];
     70	u8			lmac_count;
     71	u8			max_lmac;
     72	u8                      acpi_lmac_idx;
     73	void __iomem		*reg_base;
     74	struct pci_dev		*pdev;
     75	bool                    is_dlm;
     76	bool                    is_rgx;
     77};
     78
     79static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
     80static int lmac_count; /* Total no of LMACs in system */
     81
     82static int bgx_xaui_check_link(struct lmac *lmac);
     83
     84/* Supported devices */
     85static const struct pci_device_id bgx_id_table[] = {
     86	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
     87	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) },
     88	{ 0, }  /* end of table */
     89};
     90
     91MODULE_AUTHOR("Cavium Inc");
     92MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
     93MODULE_LICENSE("GPL v2");
     94MODULE_VERSION(DRV_VERSION);
     95MODULE_DEVICE_TABLE(pci, bgx_id_table);
     96
     97/* The Cavium ThunderX network controller can *only* be found in SoCs
     98 * containing the ThunderX ARM64 CPU implementation.  All accesses to the device
     99 * registers on this platform are implicitly strongly ordered with respect
    100 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
    101 * with no memory barriers in this driver.  The readq()/writeq() functions add
    102 * explicit ordering operation which in this case are redundant, and only
    103 * add overhead.
    104 */
    105
    106/* Register read/write APIs */
    107static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
    108{
    109	void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
    110
    111	return readq_relaxed(addr);
    112}
    113
    114static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
    115{
    116	void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
    117
    118	writeq_relaxed(val, addr);
    119}
    120
    121static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
    122{
    123	void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
    124
    125	writeq_relaxed(val | readq_relaxed(addr), addr);
    126}
    127
    128static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
    129{
    130	int timeout = 100;
    131	u64 reg_val;
    132
    133	while (timeout) {
    134		reg_val = bgx_reg_read(bgx, lmac, reg);
    135		if (zero && !(reg_val & mask))
    136			return 0;
    137		if (!zero && (reg_val & mask))
    138			return 0;
    139		usleep_range(1000, 2000);
    140		timeout--;
    141	}
    142	return 1;
    143}
    144
    145static int max_bgx_per_node;
    146static void set_max_bgx_per_node(struct pci_dev *pdev)
    147{
    148	u16 sdevid;
    149
    150	if (max_bgx_per_node)
    151		return;
    152
    153	pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
    154	switch (sdevid) {
    155	case PCI_SUBSYS_DEVID_81XX_BGX:
    156	case PCI_SUBSYS_DEVID_81XX_RGX:
    157		max_bgx_per_node = MAX_BGX_PER_CN81XX;
    158		break;
    159	case PCI_SUBSYS_DEVID_83XX_BGX:
    160		max_bgx_per_node = MAX_BGX_PER_CN83XX;
    161		break;
    162	case PCI_SUBSYS_DEVID_88XX_BGX:
    163	default:
    164		max_bgx_per_node = MAX_BGX_PER_CN88XX;
    165		break;
    166	}
    167}
    168
    169static struct bgx *get_bgx(int node, int bgx_idx)
    170{
    171	int idx = (node * max_bgx_per_node) + bgx_idx;
    172
    173	return bgx_vnic[idx];
    174}
    175
    176/* Return number of BGX present in HW */
    177unsigned bgx_get_map(int node)
    178{
    179	int i;
    180	unsigned map = 0;
    181
    182	for (i = 0; i < max_bgx_per_node; i++) {
    183		if (bgx_vnic[(node * max_bgx_per_node) + i])
    184			map |= (1 << i);
    185	}
    186
    187	return map;
    188}
    189EXPORT_SYMBOL(bgx_get_map);
    190
    191/* Return number of LMAC configured for this BGX */
    192int bgx_get_lmac_count(int node, int bgx_idx)
    193{
    194	struct bgx *bgx;
    195
    196	bgx = get_bgx(node, bgx_idx);
    197	if (bgx)
    198		return bgx->lmac_count;
    199
    200	return 0;
    201}
    202EXPORT_SYMBOL(bgx_get_lmac_count);
    203
    204/* Returns the current link status of LMAC */
    205void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
    206{
    207	struct bgx_link_status *link = (struct bgx_link_status *)status;
    208	struct bgx *bgx;
    209	struct lmac *lmac;
    210
    211	bgx = get_bgx(node, bgx_idx);
    212	if (!bgx)
    213		return;
    214
    215	lmac = &bgx->lmac[lmacid];
    216	link->mac_type = lmac->lmac_type;
    217	link->link_up = lmac->link_up;
    218	link->duplex = lmac->last_duplex;
    219	link->speed = lmac->last_speed;
    220}
    221EXPORT_SYMBOL(bgx_get_lmac_link_state);
    222
    223const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
    224{
    225	struct bgx *bgx = get_bgx(node, bgx_idx);
    226
    227	if (bgx)
    228		return bgx->lmac[lmacid].mac;
    229
    230	return NULL;
    231}
    232EXPORT_SYMBOL(bgx_get_lmac_mac);
    233
    234void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
    235{
    236	struct bgx *bgx = get_bgx(node, bgx_idx);
    237
    238	if (!bgx)
    239		return;
    240
    241	ether_addr_copy(bgx->lmac[lmacid].mac, mac);
    242}
    243EXPORT_SYMBOL(bgx_set_lmac_mac);
    244
    245static void bgx_flush_dmac_cam_filter(struct bgx *bgx, int lmacid)
    246{
    247	struct lmac *lmac = NULL;
    248	u8  idx = 0;
    249
    250	lmac = &bgx->lmac[lmacid];
    251	/* reset CAM filters */
    252	for (idx = 0; idx < lmac->dmacs_count; idx++)
    253		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM +
    254			      ((lmacid * lmac->dmacs_count) + idx) *
    255			      sizeof(u64), 0);
    256}
    257
    258static void bgx_lmac_remove_filters(struct lmac *lmac, u8 vf_id)
    259{
    260	int i = 0;
    261
    262	if (!lmac)
    263		return;
    264
    265	/* We've got reset filters request from some of attached VF, while the
    266	 * others might want to keep their configuration. So in this case lets
    267	 * iterate over all of configured filters and decrease number of
    268	 * referencies. if some addresses get zero refs remove them from list
    269	 */
    270	for (i = lmac->dmacs_cfg - 1; i >= 0; i--) {
    271		lmac->dmacs[i].vf_map &= ~BIT_ULL(vf_id);
    272		if (!lmac->dmacs[i].vf_map) {
    273			lmac->dmacs_cfg--;
    274			lmac->dmacs[i].dmac = 0;
    275			lmac->dmacs[i].vf_map = 0;
    276		}
    277	}
    278}
    279
    280static int bgx_lmac_save_filter(struct lmac *lmac, u64 dmac, u8 vf_id)
    281{
    282	u8 i = 0;
    283
    284	if (!lmac)
    285		return -1;
    286
    287	/* At the same time we could have several VFs 'attached' to some
    288	 * particular LMAC, and each VF is represented as network interface
    289	 * for kernel. So from user perspective it should be possible to
    290	 * manipulate with its' (VF) receive modes. However from PF
    291	 * driver perspective we need to keep track of filter configurations
    292	 * for different VFs to prevent filter values dupes
    293	 */
    294	for (i = 0; i < lmac->dmacs_cfg; i++) {
    295		if (lmac->dmacs[i].dmac == dmac) {
    296			lmac->dmacs[i].vf_map |= BIT_ULL(vf_id);
    297			return -1;
    298		}
    299	}
    300
    301	if (!(lmac->dmacs_cfg < lmac->dmacs_count))
    302		return -1;
    303
    304	/* keep it for further tracking */
    305	lmac->dmacs[lmac->dmacs_cfg].dmac = dmac;
    306	lmac->dmacs[lmac->dmacs_cfg].vf_map = BIT_ULL(vf_id);
    307	lmac->dmacs_cfg++;
    308	return 0;
    309}
    310
    311static int bgx_set_dmac_cam_filter_mac(struct bgx *bgx, int lmacid,
    312				       u64 cam_dmac, u8 idx)
    313{
    314	struct lmac *lmac = NULL;
    315	u64 cfg = 0;
    316
    317	/* skip zero addresses as meaningless */
    318	if (!cam_dmac || !bgx)
    319		return -1;
    320
    321	lmac = &bgx->lmac[lmacid];
    322
    323	/* configure DCAM filtering for designated LMAC */
    324	cfg = RX_DMACX_CAM_LMACID(lmacid & LMAC_ID_MASK) |
    325		RX_DMACX_CAM_EN | cam_dmac;
    326	bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM +
    327		      ((lmacid * lmac->dmacs_count) + idx) * sizeof(u64), cfg);
    328	return 0;
    329}
    330
    331void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid,
    332			     u64 cam_dmac, u8 vf_id)
    333{
    334	struct bgx *bgx = get_bgx(node, bgx_idx);
    335	struct lmac *lmac = NULL;
    336
    337	if (!bgx)
    338		return;
    339
    340	lmac = &bgx->lmac[lmacid];
    341
    342	if (!cam_dmac)
    343		cam_dmac = ether_addr_to_u64(lmac->mac);
    344
    345	/* since we might have several VFs attached to particular LMAC
    346	 * and kernel could call mcast config for each of them with the
    347	 * same MAC, check if requested MAC is already in filtering list and
    348	 * updare/prepare list of MACs to be applied later to HW filters
    349	 */
    350	bgx_lmac_save_filter(lmac, cam_dmac, vf_id);
    351}
    352EXPORT_SYMBOL(bgx_set_dmac_cam_filter);
    353
    354void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode)
    355{
    356	struct bgx *bgx = get_bgx(node, bgx_idx);
    357	struct lmac *lmac = NULL;
    358	u64 cfg = 0;
    359	u8 i = 0;
    360
    361	if (!bgx)
    362		return;
    363
    364	lmac = &bgx->lmac[lmacid];
    365
    366	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL);
    367	if (mode & BGX_XCAST_BCAST_ACCEPT)
    368		cfg |= BCAST_ACCEPT;
    369	else
    370		cfg &= ~BCAST_ACCEPT;
    371
    372	/* disable all MCASTs and DMAC filtering */
    373	cfg &= ~(CAM_ACCEPT | BGX_MCAST_MODE(MCAST_MODE_MASK));
    374
    375	/* check requested bits and set filtergin mode appropriately */
    376	if (mode & (BGX_XCAST_MCAST_ACCEPT)) {
    377		cfg |= (BGX_MCAST_MODE(MCAST_MODE_ACCEPT));
    378	} else if (mode & BGX_XCAST_MCAST_FILTER) {
    379		cfg |= (BGX_MCAST_MODE(MCAST_MODE_CAM_FILTER) | CAM_ACCEPT);
    380		for (i = 0; i < lmac->dmacs_cfg; i++)
    381			bgx_set_dmac_cam_filter_mac(bgx, lmacid,
    382						    lmac->dmacs[i].dmac, i);
    383	}
    384	bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, cfg);
    385}
    386EXPORT_SYMBOL(bgx_set_xcast_mode);
    387
    388void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf_id)
    389{
    390	struct bgx *bgx = get_bgx(node, bgx_idx);
    391
    392	if (!bgx)
    393		return;
    394
    395	bgx_lmac_remove_filters(&bgx->lmac[lmacid], vf_id);
    396	bgx_flush_dmac_cam_filter(bgx, lmacid);
    397	bgx_set_xcast_mode(node, bgx_idx, lmacid,
    398			   (BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT));
    399}
    400EXPORT_SYMBOL(bgx_reset_xcast_mode);
    401
    402void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
    403{
    404	struct bgx *bgx = get_bgx(node, bgx_idx);
    405	struct lmac *lmac;
    406	u64 cfg;
    407
    408	if (!bgx)
    409		return;
    410	lmac = &bgx->lmac[lmacid];
    411
    412	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
    413	if (enable) {
    414		cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
    415
    416		/* enable TX FIFO Underflow interrupt */
    417		bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S,
    418			       GMI_TXX_INT_UNDFLW);
    419	} else {
    420		cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
    421
    422		/* Disable TX FIFO Underflow interrupt */
    423		bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C,
    424			       GMI_TXX_INT_UNDFLW);
    425	}
    426	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
    427
    428	if (bgx->is_rgx)
    429		xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed);
    430}
    431EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
    432
    433/* Enables or disables timestamp insertion by BGX for Rx packets */
    434void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable)
    435{
    436	struct bgx *bgx = get_bgx(node, bgx_idx);
    437	struct lmac *lmac;
    438	u64 csr_offset, cfg;
    439
    440	if (!bgx)
    441		return;
    442
    443	lmac = &bgx->lmac[lmacid];
    444
    445	if (lmac->lmac_type == BGX_MODE_SGMII ||
    446	    lmac->lmac_type == BGX_MODE_QSGMII ||
    447	    lmac->lmac_type == BGX_MODE_RGMII)
    448		csr_offset = BGX_GMP_GMI_RXX_FRM_CTL;
    449	else
    450		csr_offset = BGX_SMUX_RX_FRM_CTL;
    451
    452	cfg = bgx_reg_read(bgx, lmacid, csr_offset);
    453
    454	if (enable)
    455		cfg |= BGX_PKT_RX_PTP_EN;
    456	else
    457		cfg &= ~BGX_PKT_RX_PTP_EN;
    458	bgx_reg_write(bgx, lmacid, csr_offset, cfg);
    459}
    460EXPORT_SYMBOL(bgx_config_timestamping);
    461
    462void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
    463{
    464	struct pfc *pfc = (struct pfc *)pause;
    465	struct bgx *bgx = get_bgx(node, bgx_idx);
    466	struct lmac *lmac;
    467	u64 cfg;
    468
    469	if (!bgx)
    470		return;
    471	lmac = &bgx->lmac[lmacid];
    472	if (lmac->is_sgmii)
    473		return;
    474
    475	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
    476	pfc->fc_rx = cfg & RX_EN;
    477	pfc->fc_tx = cfg & TX_EN;
    478	pfc->autoneg = 0;
    479}
    480EXPORT_SYMBOL(bgx_lmac_get_pfc);
    481
    482void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
    483{
    484	struct pfc *pfc = (struct pfc *)pause;
    485	struct bgx *bgx = get_bgx(node, bgx_idx);
    486	struct lmac *lmac;
    487	u64 cfg;
    488
    489	if (!bgx)
    490		return;
    491	lmac = &bgx->lmac[lmacid];
    492	if (lmac->is_sgmii)
    493		return;
    494
    495	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
    496	cfg &= ~(RX_EN | TX_EN);
    497	cfg |= (pfc->fc_rx ? RX_EN : 0x00);
    498	cfg |= (pfc->fc_tx ? TX_EN : 0x00);
    499	bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg);
    500}
    501EXPORT_SYMBOL(bgx_lmac_set_pfc);
    502
    503static void bgx_sgmii_change_link_state(struct lmac *lmac)
    504{
    505	struct bgx *bgx = lmac->bgx;
    506	u64 cmr_cfg;
    507	u64 port_cfg = 0;
    508	u64 misc_ctl = 0;
    509	bool tx_en, rx_en;
    510
    511	cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
    512	tx_en = cmr_cfg & CMR_PKT_TX_EN;
    513	rx_en = cmr_cfg & CMR_PKT_RX_EN;
    514	cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
    515	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
    516
    517	/* Wait for BGX RX to be idle */
    518	if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
    519			 GMI_PORT_CFG_RX_IDLE, false)) {
    520		dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n",
    521			bgx->bgx_id, lmac->lmacid);
    522		return;
    523	}
    524
    525	/* Wait for BGX TX to be idle */
    526	if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
    527			 GMI_PORT_CFG_TX_IDLE, false)) {
    528		dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n",
    529			bgx->bgx_id, lmac->lmacid);
    530		return;
    531	}
    532
    533	port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
    534	misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
    535
    536	if (lmac->link_up) {
    537		misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
    538		port_cfg &= ~GMI_PORT_CFG_DUPLEX;
    539		port_cfg |=  (lmac->last_duplex << 2);
    540	} else {
    541		misc_ctl |= PCS_MISC_CTL_GMX_ENO;
    542	}
    543
    544	switch (lmac->last_speed) {
    545	case 10:
    546		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
    547		port_cfg |= GMI_PORT_CFG_SPEED_MSB;  /* speed_msb 1 */
    548		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
    549		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
    550		misc_ctl |= 50; /* samp_pt */
    551		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
    552		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
    553		break;
    554	case 100:
    555		port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
    556		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
    557		port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
    558		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
    559		misc_ctl |= 5; /* samp_pt */
    560		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
    561		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
    562		break;
    563	case 1000:
    564		port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
    565		port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
    566		port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
    567		misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
    568		misc_ctl |= 1; /* samp_pt */
    569		bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
    570		if (lmac->last_duplex)
    571			bgx_reg_write(bgx, lmac->lmacid,
    572				      BGX_GMP_GMI_TXX_BURST, 0);
    573		else
    574			bgx_reg_write(bgx, lmac->lmacid,
    575				      BGX_GMP_GMI_TXX_BURST, 8192);
    576		break;
    577	default:
    578		break;
    579	}
    580	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
    581	bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
    582
    583	/* Restore CMR config settings */
    584	cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0);
    585	bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
    586
    587	if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
    588		xcv_setup_link(lmac->link_up, lmac->last_speed);
    589}
    590
    591static void bgx_lmac_handler(struct net_device *netdev)
    592{
    593	struct lmac *lmac = container_of(netdev, struct lmac, netdev);
    594	struct phy_device *phydev;
    595	int link_changed = 0;
    596
    597	phydev = lmac->phydev;
    598
    599	if (!phydev->link && lmac->last_link)
    600		link_changed = -1;
    601
    602	if (phydev->link &&
    603	    (lmac->last_duplex != phydev->duplex ||
    604	     lmac->last_link != phydev->link ||
    605	     lmac->last_speed != phydev->speed)) {
    606			link_changed = 1;
    607	}
    608
    609	lmac->last_link = phydev->link;
    610	lmac->last_speed = phydev->speed;
    611	lmac->last_duplex = phydev->duplex;
    612
    613	if (!link_changed)
    614		return;
    615
    616	if (link_changed > 0)
    617		lmac->link_up = true;
    618	else
    619		lmac->link_up = false;
    620
    621	if (lmac->is_sgmii)
    622		bgx_sgmii_change_link_state(lmac);
    623	else
    624		bgx_xaui_check_link(lmac);
    625}
    626
    627u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
    628{
    629	struct bgx *bgx;
    630
    631	bgx = get_bgx(node, bgx_idx);
    632	if (!bgx)
    633		return 0;
    634
    635	if (idx > 8)
    636		lmac = 0;
    637	return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
    638}
    639EXPORT_SYMBOL(bgx_get_rx_stats);
    640
    641u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
    642{
    643	struct bgx *bgx;
    644
    645	bgx = get_bgx(node, bgx_idx);
    646	if (!bgx)
    647		return 0;
    648
    649	return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
    650}
    651EXPORT_SYMBOL(bgx_get_tx_stats);
    652
    653/* Configure BGX LMAC in internal loopback mode */
    654void bgx_lmac_internal_loopback(int node, int bgx_idx,
    655				int lmac_idx, bool enable)
    656{
    657	struct bgx *bgx;
    658	struct lmac *lmac;
    659	u64    cfg;
    660
    661	bgx = get_bgx(node, bgx_idx);
    662	if (!bgx)
    663		return;
    664
    665	lmac = &bgx->lmac[lmac_idx];
    666	if (lmac->is_sgmii) {
    667		cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
    668		if (enable)
    669			cfg |= PCS_MRX_CTL_LOOPBACK1;
    670		else
    671			cfg &= ~PCS_MRX_CTL_LOOPBACK1;
    672		bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
    673	} else {
    674		cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
    675		if (enable)
    676			cfg |= SPU_CTL_LOOPBACK;
    677		else
    678			cfg &= ~SPU_CTL_LOOPBACK;
    679		bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
    680	}
    681}
    682EXPORT_SYMBOL(bgx_lmac_internal_loopback);
    683
    684static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
    685{
    686	int lmacid = lmac->lmacid;
    687	u64 cfg;
    688
    689	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
    690	/* max packet size */
    691	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
    692
    693	/* Disable frame alignment if using preamble */
    694	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
    695	if (cfg & 1)
    696		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
    697
    698	/* Enable lmac */
    699	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
    700
    701	/* PCS reset */
    702	bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
    703	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
    704			 PCS_MRX_CTL_RESET, true)) {
    705		dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
    706		return -1;
    707	}
    708
    709	/* power down, reset autoneg, autoneg enable */
    710	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
    711	cfg &= ~PCS_MRX_CTL_PWR_DN;
    712	cfg |= PCS_MRX_CTL_RST_AN;
    713	if (lmac->phydev) {
    714		cfg |= PCS_MRX_CTL_AN_EN;
    715	} else {
    716		/* In scenarios where PHY driver is not present or it's a
    717		 * non-standard PHY, FW sets AN_EN to inform Linux driver
    718		 * to do auto-neg and link polling or not.
    719		 */
    720		if (cfg & PCS_MRX_CTL_AN_EN)
    721			lmac->autoneg = true;
    722	}
    723	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
    724
    725	if (lmac->lmac_type == BGX_MODE_QSGMII) {
    726		/* Disable disparity check for QSGMII */
    727		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
    728		cfg &= ~PCS_MISC_CTL_DISP_EN;
    729		bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
    730		return 0;
    731	}
    732
    733	if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
    734		if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
    735				 PCS_MRX_STATUS_AN_CPT, false)) {
    736			dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
    737			return -1;
    738		}
    739	}
    740
    741	return 0;
    742}
    743
    744static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
    745{
    746	u64 cfg;
    747	int lmacid = lmac->lmacid;
    748
    749	/* Reset SPU */
    750	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
    751	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
    752		dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
    753		return -1;
    754	}
    755
    756	/* Disable LMAC */
    757	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
    758	cfg &= ~CMR_EN;
    759	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
    760
    761	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
    762	/* Set interleaved running disparity for RXAUI */
    763	if (lmac->lmac_type == BGX_MODE_RXAUI)
    764		bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
    765			       SPU_MISC_CTL_INTLV_RDISP);
    766
    767	/* Clear receive packet disable */
    768	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
    769	cfg &= ~SPU_MISC_CTL_RX_DIS;
    770	bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
    771
    772	/* clear all interrupts */
    773	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
    774	bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
    775	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
    776	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
    777	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
    778	bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
    779
    780	if (lmac->use_training) {
    781		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
    782		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
    783		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
    784		/* training enable */
    785		bgx_reg_modify(bgx, lmacid,
    786			       BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
    787	}
    788
    789	/* Append FCS to each packet */
    790	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
    791
    792	/* Disable forward error correction */
    793	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
    794	cfg &= ~SPU_FEC_CTL_FEC_EN;
    795	bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
    796
    797	/* Disable autoneg */
    798	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
    799	cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
    800	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
    801
    802	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
    803	if (lmac->lmac_type == BGX_MODE_10G_KR)
    804		cfg |= (1 << 23);
    805	else if (lmac->lmac_type == BGX_MODE_40G_KR)
    806		cfg |= (1 << 24);
    807	else
    808		cfg &= ~((1 << 23) | (1 << 24));
    809	cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
    810	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
    811
    812	cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
    813	cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
    814	bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
    815
    816	/* Enable lmac */
    817	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
    818
    819	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
    820	cfg &= ~SPU_CTL_LOW_POWER;
    821	bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
    822
    823	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
    824	cfg &= ~SMU_TX_CTL_UNI_EN;
    825	cfg |= SMU_TX_CTL_DIC_EN;
    826	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
    827
    828	/* Enable receive and transmission of pause frames */
    829	bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) |
    830		      BCK_EN | DRP_EN | TX_EN | RX_EN));
    831	/* Configure pause time and interval */
    832	bgx_reg_write(bgx, lmacid,
    833		      BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME);
    834	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL);
    835	cfg &= ~0xFFFFull;
    836	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL,
    837		      cfg | (DEFAULT_PAUSE_TIME - 0x1000));
    838	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01);
    839
    840	/* take lmac_count into account */
    841	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
    842	/* max packet size */
    843	bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
    844
    845	return 0;
    846}
    847
    848static int bgx_xaui_check_link(struct lmac *lmac)
    849{
    850	struct bgx *bgx = lmac->bgx;
    851	int lmacid = lmac->lmacid;
    852	int lmac_type = lmac->lmac_type;
    853	u64 cfg;
    854
    855	if (lmac->use_training) {
    856		cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
    857		if (!(cfg & (1ull << 13))) {
    858			cfg = (1ull << 13) | (1ull << 14);
    859			bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
    860			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
    861			cfg |= (1ull << 0);
    862			bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
    863			return -1;
    864		}
    865	}
    866
    867	/* wait for PCS to come out of reset */
    868	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
    869		dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
    870		return -1;
    871	}
    872
    873	if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
    874	    (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
    875		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
    876				 SPU_BR_STATUS_BLK_LOCK, false)) {
    877			dev_err(&bgx->pdev->dev,
    878				"SPU_BR_STATUS_BLK_LOCK not completed\n");
    879			return -1;
    880		}
    881	} else {
    882		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
    883				 SPU_BX_STATUS_RX_ALIGN, false)) {
    884			dev_err(&bgx->pdev->dev,
    885				"SPU_BX_STATUS_RX_ALIGN not completed\n");
    886			return -1;
    887		}
    888	}
    889
    890	/* Clear rcvflt bit (latching high) and read it back */
    891	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
    892		bgx_reg_modify(bgx, lmacid,
    893			       BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
    894	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
    895		dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
    896		if (lmac->use_training) {
    897			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
    898			if (!(cfg & (1ull << 13))) {
    899				cfg = (1ull << 13) | (1ull << 14);
    900				bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
    901				cfg = bgx_reg_read(bgx, lmacid,
    902						   BGX_SPUX_BR_PMD_CRTL);
    903				cfg |= (1ull << 0);
    904				bgx_reg_write(bgx, lmacid,
    905					      BGX_SPUX_BR_PMD_CRTL, cfg);
    906				return -1;
    907			}
    908		}
    909		return -1;
    910	}
    911
    912	/* Wait for BGX RX to be idle */
    913	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
    914		dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
    915		return -1;
    916	}
    917
    918	/* Wait for BGX TX to be idle */
    919	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
    920		dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
    921		return -1;
    922	}
    923
    924	/* Check for MAC RX faults */
    925	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
    926	/* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
    927	cfg &= SMU_RX_CTL_STATUS;
    928	if (!cfg)
    929		return 0;
    930
    931	/* Rx local/remote fault seen.
    932	 * Do lmac reinit to see if condition recovers
    933	 */
    934	bgx_lmac_xaui_init(bgx, lmac);
    935
    936	return -1;
    937}
    938
    939static void bgx_poll_for_sgmii_link(struct lmac *lmac)
    940{
    941	u64 pcs_link, an_result;
    942	u8 speed;
    943
    944	pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
    945				BGX_GMP_PCS_MRX_STATUS);
    946
    947	/*Link state bit is sticky, read it again*/
    948	if (!(pcs_link & PCS_MRX_STATUS_LINK))
    949		pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
    950					BGX_GMP_PCS_MRX_STATUS);
    951
    952	if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
    953			 PCS_MRX_STATUS_AN_CPT, false)) {
    954		lmac->link_up = false;
    955		lmac->last_speed = SPEED_UNKNOWN;
    956		lmac->last_duplex = DUPLEX_UNKNOWN;
    957		goto next_poll;
    958	}
    959
    960	lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
    961	an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
    962				 BGX_GMP_PCS_ANX_AN_RESULTS);
    963
    964	speed = (an_result >> 3) & 0x3;
    965	lmac->last_duplex = (an_result >> 1) & 0x1;
    966	switch (speed) {
    967	case 0:
    968		lmac->last_speed = SPEED_10;
    969		break;
    970	case 1:
    971		lmac->last_speed = SPEED_100;
    972		break;
    973	case 2:
    974		lmac->last_speed = SPEED_1000;
    975		break;
    976	default:
    977		lmac->link_up = false;
    978		lmac->last_speed = SPEED_UNKNOWN;
    979		lmac->last_duplex = DUPLEX_UNKNOWN;
    980		break;
    981	}
    982
    983next_poll:
    984
    985	if (lmac->last_link != lmac->link_up) {
    986		if (lmac->link_up)
    987			bgx_sgmii_change_link_state(lmac);
    988		lmac->last_link = lmac->link_up;
    989	}
    990
    991	queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
    992}
    993
    994static void bgx_poll_for_link(struct work_struct *work)
    995{
    996	struct lmac *lmac;
    997	u64 spu_link, smu_link;
    998
    999	lmac = container_of(work, struct lmac, dwork.work);
   1000	if (lmac->is_sgmii) {
   1001		bgx_poll_for_sgmii_link(lmac);
   1002		return;
   1003	}
   1004
   1005	/* Receive link is latching low. Force it high and verify it */
   1006	bgx_reg_modify(lmac->bgx, lmac->lmacid,
   1007		       BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
   1008	bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
   1009		     SPU_STATUS1_RCV_LNK, false);
   1010
   1011	spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
   1012	smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
   1013
   1014	if ((spu_link & SPU_STATUS1_RCV_LNK) &&
   1015	    !(smu_link & SMU_RX_CTL_STATUS)) {
   1016		lmac->link_up = true;
   1017		if (lmac->lmac_type == BGX_MODE_XLAUI)
   1018			lmac->last_speed = SPEED_40000;
   1019		else
   1020			lmac->last_speed = SPEED_10000;
   1021		lmac->last_duplex = DUPLEX_FULL;
   1022	} else {
   1023		lmac->link_up = false;
   1024		lmac->last_speed = SPEED_UNKNOWN;
   1025		lmac->last_duplex = DUPLEX_UNKNOWN;
   1026	}
   1027
   1028	if (lmac->last_link != lmac->link_up) {
   1029		if (lmac->link_up) {
   1030			if (bgx_xaui_check_link(lmac)) {
   1031				/* Errors, clear link_up state */
   1032				lmac->link_up = false;
   1033				lmac->last_speed = SPEED_UNKNOWN;
   1034				lmac->last_duplex = DUPLEX_UNKNOWN;
   1035			}
   1036		}
   1037		lmac->last_link = lmac->link_up;
   1038	}
   1039
   1040	queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
   1041}
   1042
   1043static int phy_interface_mode(u8 lmac_type)
   1044{
   1045	if (lmac_type == BGX_MODE_QSGMII)
   1046		return PHY_INTERFACE_MODE_QSGMII;
   1047	if (lmac_type == BGX_MODE_RGMII)
   1048		return PHY_INTERFACE_MODE_RGMII_RXID;
   1049
   1050	return PHY_INTERFACE_MODE_SGMII;
   1051}
   1052
   1053static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
   1054{
   1055	struct lmac *lmac;
   1056	u64 cfg;
   1057
   1058	lmac = &bgx->lmac[lmacid];
   1059	lmac->bgx = bgx;
   1060
   1061	if ((lmac->lmac_type == BGX_MODE_SGMII) ||
   1062	    (lmac->lmac_type == BGX_MODE_QSGMII) ||
   1063	    (lmac->lmac_type == BGX_MODE_RGMII)) {
   1064		lmac->is_sgmii = true;
   1065		if (bgx_lmac_sgmii_init(bgx, lmac))
   1066			return -1;
   1067	} else {
   1068		lmac->is_sgmii = false;
   1069		if (bgx_lmac_xaui_init(bgx, lmac))
   1070			return -1;
   1071	}
   1072
   1073	if (lmac->is_sgmii) {
   1074		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
   1075		cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
   1076		bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
   1077		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
   1078	} else {
   1079		cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
   1080		cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
   1081		bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
   1082		bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
   1083	}
   1084
   1085	/* actual number of filters available to exact LMAC */
   1086	lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
   1087	lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
   1088			      GFP_KERNEL);
   1089	if (!lmac->dmacs)
   1090		return -ENOMEM;
   1091
   1092	/* Enable lmac */
   1093	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
   1094
   1095	/* Restore default cfg, incase low level firmware changed it */
   1096	bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
   1097
   1098	if ((lmac->lmac_type != BGX_MODE_XFI) &&
   1099	    (lmac->lmac_type != BGX_MODE_XLAUI) &&
   1100	    (lmac->lmac_type != BGX_MODE_40G_KR) &&
   1101	    (lmac->lmac_type != BGX_MODE_10G_KR)) {
   1102		if (!lmac->phydev) {
   1103			if (lmac->autoneg) {
   1104				bgx_reg_write(bgx, lmacid,
   1105					      BGX_GMP_PCS_LINKX_TIMER,
   1106					      PCS_LINKX_TIMER_COUNT);
   1107				goto poll;
   1108			} else {
   1109				/* Default to below link speed and duplex */
   1110				lmac->link_up = true;
   1111				lmac->last_speed = SPEED_1000;
   1112				lmac->last_duplex = DUPLEX_FULL;
   1113				bgx_sgmii_change_link_state(lmac);
   1114				return 0;
   1115			}
   1116		}
   1117		lmac->phydev->dev_flags = 0;
   1118
   1119		if (phy_connect_direct(&lmac->netdev, lmac->phydev,
   1120				       bgx_lmac_handler,
   1121				       phy_interface_mode(lmac->lmac_type)))
   1122			return -ENODEV;
   1123
   1124		phy_start(lmac->phydev);
   1125		return 0;
   1126	}
   1127
   1128poll:
   1129	lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
   1130					   WQ_MEM_RECLAIM, 1);
   1131	if (!lmac->check_link)
   1132		return -ENOMEM;
   1133	INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
   1134	queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
   1135
   1136	return 0;
   1137}
   1138
   1139static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
   1140{
   1141	struct lmac *lmac;
   1142	u64 cfg;
   1143
   1144	lmac = &bgx->lmac[lmacid];
   1145	if (lmac->check_link) {
   1146		/* Destroy work queue */
   1147		cancel_delayed_work_sync(&lmac->dwork);
   1148		destroy_workqueue(lmac->check_link);
   1149	}
   1150
   1151	/* Disable packet reception */
   1152	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
   1153	cfg &= ~CMR_PKT_RX_EN;
   1154	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
   1155
   1156	/* Give chance for Rx/Tx FIFO to get drained */
   1157	bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
   1158	bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
   1159
   1160	/* Disable packet transmission */
   1161	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
   1162	cfg &= ~CMR_PKT_TX_EN;
   1163	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
   1164
   1165	/* Disable serdes lanes */
   1166        if (!lmac->is_sgmii)
   1167                bgx_reg_modify(bgx, lmacid,
   1168                               BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
   1169        else
   1170                bgx_reg_modify(bgx, lmacid,
   1171                               BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
   1172
   1173	/* Disable LMAC */
   1174	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
   1175	cfg &= ~CMR_EN;
   1176	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
   1177
   1178	bgx_flush_dmac_cam_filter(bgx, lmacid);
   1179	kfree(lmac->dmacs);
   1180
   1181	if ((lmac->lmac_type != BGX_MODE_XFI) &&
   1182	    (lmac->lmac_type != BGX_MODE_XLAUI) &&
   1183	    (lmac->lmac_type != BGX_MODE_40G_KR) &&
   1184	    (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
   1185		phy_disconnect(lmac->phydev);
   1186
   1187	lmac->phydev = NULL;
   1188}
   1189
   1190static void bgx_init_hw(struct bgx *bgx)
   1191{
   1192	int i;
   1193	struct lmac *lmac;
   1194
   1195	bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
   1196	if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
   1197		dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
   1198
   1199	/* Set lmac type and lane2serdes mapping */
   1200	for (i = 0; i < bgx->lmac_count; i++) {
   1201		lmac = &bgx->lmac[i];
   1202		bgx_reg_write(bgx, i, BGX_CMRX_CFG,
   1203			      (lmac->lmac_type << 8) | lmac->lane_to_sds);
   1204		bgx->lmac[i].lmacid_bd = lmac_count;
   1205		lmac_count++;
   1206	}
   1207
   1208	bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
   1209	bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
   1210
   1211	/* Set the backpressure AND mask */
   1212	for (i = 0; i < bgx->lmac_count; i++)
   1213		bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
   1214			       ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
   1215			       (i * MAX_BGX_CHANS_PER_LMAC));
   1216
   1217	/* Disable all MAC filtering */
   1218	for (i = 0; i < RX_DMAC_COUNT; i++)
   1219		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
   1220
   1221	/* Disable MAC steering (NCSI traffic) */
   1222	for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
   1223		bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
   1224}
   1225
   1226static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
   1227{
   1228	return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF);
   1229}
   1230
   1231static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
   1232{
   1233	struct device *dev = &bgx->pdev->dev;
   1234	struct lmac *lmac;
   1235	char str[27];
   1236
   1237	if (!bgx->is_dlm && lmacid)
   1238		return;
   1239
   1240	lmac = &bgx->lmac[lmacid];
   1241	if (!bgx->is_dlm)
   1242		sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
   1243	else
   1244		sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid);
   1245
   1246	switch (lmac->lmac_type) {
   1247	case BGX_MODE_SGMII:
   1248		dev_info(dev, "%s: SGMII\n", (char *)str);
   1249		break;
   1250	case BGX_MODE_XAUI:
   1251		dev_info(dev, "%s: XAUI\n", (char *)str);
   1252		break;
   1253	case BGX_MODE_RXAUI:
   1254		dev_info(dev, "%s: RXAUI\n", (char *)str);
   1255		break;
   1256	case BGX_MODE_XFI:
   1257		if (!lmac->use_training)
   1258			dev_info(dev, "%s: XFI\n", (char *)str);
   1259		else
   1260			dev_info(dev, "%s: 10G_KR\n", (char *)str);
   1261		break;
   1262	case BGX_MODE_XLAUI:
   1263		if (!lmac->use_training)
   1264			dev_info(dev, "%s: XLAUI\n", (char *)str);
   1265		else
   1266			dev_info(dev, "%s: 40G_KR4\n", (char *)str);
   1267		break;
   1268	case BGX_MODE_QSGMII:
   1269		dev_info(dev, "%s: QSGMII\n", (char *)str);
   1270		break;
   1271	case BGX_MODE_RGMII:
   1272		dev_info(dev, "%s: RGMII\n", (char *)str);
   1273		break;
   1274	case BGX_MODE_INVALID:
   1275		/* Nothing to do */
   1276		break;
   1277	}
   1278}
   1279
   1280static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac)
   1281{
   1282	switch (lmac->lmac_type) {
   1283	case BGX_MODE_SGMII:
   1284	case BGX_MODE_XFI:
   1285		lmac->lane_to_sds = lmac->lmacid;
   1286		break;
   1287	case BGX_MODE_XAUI:
   1288	case BGX_MODE_XLAUI:
   1289	case BGX_MODE_RGMII:
   1290		lmac->lane_to_sds = 0xE4;
   1291		break;
   1292	case BGX_MODE_RXAUI:
   1293		lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4;
   1294		break;
   1295	case BGX_MODE_QSGMII:
   1296		/* There is no way to determine if DLM0/2 is QSGMII or
   1297		 * DLM1/3 is configured to QSGMII as bootloader will
   1298		 * configure all LMACs, so take whatever is configured
   1299		 * by low level firmware.
   1300		 */
   1301		lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac);
   1302		break;
   1303	default:
   1304		lmac->lane_to_sds = 0;
   1305		break;
   1306	}
   1307}
   1308
   1309static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
   1310{
   1311	if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
   1312	    (lmac->lmac_type != BGX_MODE_40G_KR)) {
   1313		lmac->use_training = false;
   1314		return;
   1315	}
   1316
   1317	lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) &
   1318							SPU_PMD_CRTL_TRAIN_EN;
   1319}
   1320
   1321static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
   1322{
   1323	struct lmac *lmac;
   1324	u64 cmr_cfg;
   1325	u8 lmac_type;
   1326	u8 lane_to_sds;
   1327
   1328	lmac = &bgx->lmac[idx];
   1329
   1330	if (!bgx->is_dlm || bgx->is_rgx) {
   1331		/* Read LMAC0 type to figure out QLM mode
   1332		 * This is configured by low level firmware
   1333		 */
   1334		cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
   1335		lmac->lmac_type = (cmr_cfg >> 8) & 0x07;
   1336		if (bgx->is_rgx)
   1337			lmac->lmac_type = BGX_MODE_RGMII;
   1338		lmac_set_training(bgx, lmac, 0);
   1339		lmac_set_lane2sds(bgx, lmac);
   1340		return;
   1341	}
   1342
   1343	/* For DLMs or SLMs on 80/81/83xx so many lane configurations
   1344	 * are possible and vary across boards. Also Kernel doesn't have
   1345	 * any way to identify board type/info and since firmware does,
   1346	 * just take lmac type and serdes lane config as is.
   1347	 */
   1348	cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
   1349	lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
   1350	lane_to_sds = (u8)(cmr_cfg & 0xFF);
   1351	/* Check if config is reset value */
   1352	if ((lmac_type == 0) && (lane_to_sds == 0xE4))
   1353		lmac->lmac_type = BGX_MODE_INVALID;
   1354	else
   1355		lmac->lmac_type = lmac_type;
   1356	lmac->lane_to_sds = lane_to_sds;
   1357	lmac_set_training(bgx, lmac, lmac->lmacid);
   1358}
   1359
   1360static void bgx_get_qlm_mode(struct bgx *bgx)
   1361{
   1362	struct lmac *lmac;
   1363	u8  idx;
   1364
   1365	/* Init all LMAC's type to invalid */
   1366	for (idx = 0; idx < bgx->max_lmac; idx++) {
   1367		lmac = &bgx->lmac[idx];
   1368		lmac->lmacid = idx;
   1369		lmac->lmac_type = BGX_MODE_INVALID;
   1370		lmac->use_training = false;
   1371	}
   1372
   1373	/* It is assumed that low level firmware sets this value */
   1374	bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
   1375	if (bgx->lmac_count > bgx->max_lmac)
   1376		bgx->lmac_count = bgx->max_lmac;
   1377
   1378	for (idx = 0; idx < bgx->lmac_count; idx++) {
   1379		bgx_set_lmac_config(bgx, idx);
   1380		bgx_print_qlm_mode(bgx, idx);
   1381	}
   1382}
   1383
   1384#ifdef CONFIG_ACPI
   1385
   1386static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
   1387				u8 *dst)
   1388{
   1389	u8 mac[ETH_ALEN];
   1390	int ret;
   1391
   1392	ret = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac);
   1393	if (ret) {
   1394		dev_err(dev, "MAC address invalid: %pM\n", mac);
   1395		return -EINVAL;
   1396	}
   1397
   1398	dev_info(dev, "MAC address set to: %pM\n", mac);
   1399
   1400	ether_addr_copy(dst, mac);
   1401	return 0;
   1402}
   1403
   1404/* Currently only sets the MAC address. */
   1405static acpi_status bgx_acpi_register_phy(acpi_handle handle,
   1406					 u32 lvl, void *context, void **rv)
   1407{
   1408	struct bgx *bgx = context;
   1409	struct device *dev = &bgx->pdev->dev;
   1410	struct acpi_device *adev;
   1411
   1412	adev = acpi_fetch_acpi_dev(handle);
   1413	if (!adev)
   1414		goto out;
   1415
   1416	acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
   1417
   1418	SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
   1419
   1420	bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
   1421	bgx->acpi_lmac_idx++; /* move to next LMAC */
   1422out:
   1423	return AE_OK;
   1424}
   1425
   1426static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
   1427				     void *context, void **ret_val)
   1428{
   1429	struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
   1430	struct bgx *bgx = context;
   1431	char bgx_sel[5];
   1432
   1433	snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
   1434	if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
   1435		pr_warn("Invalid link device\n");
   1436		return AE_OK;
   1437	}
   1438
   1439	if (strncmp(string.pointer, bgx_sel, 4))
   1440		return AE_OK;
   1441
   1442	acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
   1443			    bgx_acpi_register_phy, NULL, bgx, NULL);
   1444
   1445	kfree(string.pointer);
   1446	return AE_CTRL_TERMINATE;
   1447}
   1448
   1449static int bgx_init_acpi_phy(struct bgx *bgx)
   1450{
   1451	acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
   1452	return 0;
   1453}
   1454
   1455#else
   1456
   1457static int bgx_init_acpi_phy(struct bgx *bgx)
   1458{
   1459	return -ENODEV;
   1460}
   1461
   1462#endif /* CONFIG_ACPI */
   1463
   1464#if IS_ENABLED(CONFIG_OF_MDIO)
   1465
   1466static int bgx_init_of_phy(struct bgx *bgx)
   1467{
   1468	struct fwnode_handle *fwn;
   1469	struct device_node *node = NULL;
   1470	u8 lmac = 0;
   1471
   1472	device_for_each_child_node(&bgx->pdev->dev, fwn) {
   1473		struct phy_device *pd;
   1474		struct device_node *phy_np;
   1475
   1476		/* Should always be an OF node.  But if it is not, we
   1477		 * cannot handle it, so exit the loop.
   1478		 */
   1479		node = to_of_node(fwn);
   1480		if (!node)
   1481			break;
   1482
   1483		of_get_mac_address(node, bgx->lmac[lmac].mac);
   1484
   1485		SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
   1486		bgx->lmac[lmac].lmacid = lmac;
   1487
   1488		phy_np = of_parse_phandle(node, "phy-handle", 0);
   1489		/* If there is no phy or defective firmware presents
   1490		 * this cortina phy, for which there is no driver
   1491		 * support, ignore it.
   1492		 */
   1493		if (phy_np &&
   1494		    !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
   1495			/* Wait until the phy drivers are available */
   1496			pd = of_phy_find_device(phy_np);
   1497			if (!pd)
   1498				goto defer;
   1499			bgx->lmac[lmac].phydev = pd;
   1500		}
   1501
   1502		lmac++;
   1503		if (lmac == bgx->max_lmac) {
   1504			of_node_put(node);
   1505			break;
   1506		}
   1507	}
   1508	return 0;
   1509
   1510defer:
   1511	/* We are bailing out, try not to leak device reference counts
   1512	 * for phy devices we may have already found.
   1513	 */
   1514	while (lmac) {
   1515		if (bgx->lmac[lmac].phydev) {
   1516			put_device(&bgx->lmac[lmac].phydev->mdio.dev);
   1517			bgx->lmac[lmac].phydev = NULL;
   1518		}
   1519		lmac--;
   1520	}
   1521	of_node_put(node);
   1522	return -EPROBE_DEFER;
   1523}
   1524
   1525#else
   1526
   1527static int bgx_init_of_phy(struct bgx *bgx)
   1528{
   1529	return -ENODEV;
   1530}
   1531
   1532#endif /* CONFIG_OF_MDIO */
   1533
   1534static int bgx_init_phy(struct bgx *bgx)
   1535{
   1536	if (!acpi_disabled)
   1537		return bgx_init_acpi_phy(bgx);
   1538
   1539	return bgx_init_of_phy(bgx);
   1540}
   1541
   1542static irqreturn_t bgx_intr_handler(int irq, void *data)
   1543{
   1544	struct bgx *bgx = (struct bgx *)data;
   1545	u64 status, val;
   1546	int lmac;
   1547
   1548	for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
   1549		status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT);
   1550		if (status & GMI_TXX_INT_UNDFLW) {
   1551			pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n",
   1552				bgx->bgx_id, lmac);
   1553			val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG);
   1554			val &= ~CMR_EN;
   1555			bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
   1556			val |= CMR_EN;
   1557			bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
   1558		}
   1559		/* clear interrupts */
   1560		bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status);
   1561	}
   1562
   1563	return IRQ_HANDLED;
   1564}
   1565
   1566static void bgx_register_intr(struct pci_dev *pdev)
   1567{
   1568	struct bgx *bgx = pci_get_drvdata(pdev);
   1569	int ret;
   1570
   1571	ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET,
   1572				    BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES);
   1573	if (ret < 0) {
   1574		pci_err(pdev, "Req for #%d msix vectors failed\n",
   1575			BGX_LMAC_VEC_OFFSET);
   1576		return;
   1577	}
   1578	ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL,
   1579			      bgx, "BGX%d", bgx->bgx_id);
   1580	if (ret)
   1581		pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
   1582}
   1583
   1584static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
   1585{
   1586	int err;
   1587	struct device *dev = &pdev->dev;
   1588	struct bgx *bgx = NULL;
   1589	u8 lmac;
   1590	u16 sdevid;
   1591
   1592	bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
   1593	if (!bgx)
   1594		return -ENOMEM;
   1595	bgx->pdev = pdev;
   1596
   1597	pci_set_drvdata(pdev, bgx);
   1598
   1599	err = pcim_enable_device(pdev);
   1600	if (err) {
   1601		pci_set_drvdata(pdev, NULL);
   1602		return dev_err_probe(dev, err, "Failed to enable PCI device\n");
   1603	}
   1604
   1605	err = pci_request_regions(pdev, DRV_NAME);
   1606	if (err) {
   1607		dev_err(dev, "PCI request regions failed 0x%x\n", err);
   1608		goto err_disable_device;
   1609	}
   1610
   1611	/* MAP configuration registers */
   1612	bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
   1613	if (!bgx->reg_base) {
   1614		dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
   1615		err = -ENOMEM;
   1616		goto err_release_regions;
   1617	}
   1618
   1619	set_max_bgx_per_node(pdev);
   1620
   1621	pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
   1622	if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
   1623		bgx->bgx_id = (pci_resource_start(pdev,
   1624			PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
   1625		bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
   1626		bgx->max_lmac = MAX_LMAC_PER_BGX;
   1627		bgx_vnic[bgx->bgx_id] = bgx;
   1628	} else {
   1629		bgx->is_rgx = true;
   1630		bgx->max_lmac = 1;
   1631		bgx->bgx_id = MAX_BGX_PER_CN81XX - 1;
   1632		bgx_vnic[bgx->bgx_id] = bgx;
   1633		xcv_init_hw();
   1634	}
   1635
   1636	/* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one
   1637	 * BGX i.e BGX2 can be split across 2 DLMs.
   1638	 */
   1639	pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
   1640	if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) ||
   1641	    ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2)))
   1642		bgx->is_dlm = true;
   1643
   1644	bgx_get_qlm_mode(bgx);
   1645
   1646	err = bgx_init_phy(bgx);
   1647	if (err)
   1648		goto err_enable;
   1649
   1650	bgx_init_hw(bgx);
   1651
   1652	bgx_register_intr(pdev);
   1653
   1654	/* Enable all LMACs */
   1655	for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
   1656		err = bgx_lmac_enable(bgx, lmac);
   1657		if (err) {
   1658			dev_err(dev, "BGX%d failed to enable lmac%d\n",
   1659				bgx->bgx_id, lmac);
   1660			while (lmac)
   1661				bgx_lmac_disable(bgx, --lmac);
   1662			goto err_enable;
   1663		}
   1664	}
   1665
   1666	return 0;
   1667
   1668err_enable:
   1669	bgx_vnic[bgx->bgx_id] = NULL;
   1670	pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
   1671err_release_regions:
   1672	pci_release_regions(pdev);
   1673err_disable_device:
   1674	pci_disable_device(pdev);
   1675	pci_set_drvdata(pdev, NULL);
   1676	return err;
   1677}
   1678
   1679static void bgx_remove(struct pci_dev *pdev)
   1680{
   1681	struct bgx *bgx = pci_get_drvdata(pdev);
   1682	u8 lmac;
   1683
   1684	/* Disable all LMACs */
   1685	for (lmac = 0; lmac < bgx->lmac_count; lmac++)
   1686		bgx_lmac_disable(bgx, lmac);
   1687
   1688	pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
   1689
   1690	bgx_vnic[bgx->bgx_id] = NULL;
   1691	pci_release_regions(pdev);
   1692	pci_disable_device(pdev);
   1693	pci_set_drvdata(pdev, NULL);
   1694}
   1695
   1696static struct pci_driver bgx_driver = {
   1697	.name = DRV_NAME,
   1698	.id_table = bgx_id_table,
   1699	.probe = bgx_probe,
   1700	.remove = bgx_remove,
   1701};
   1702
   1703static int __init bgx_init_module(void)
   1704{
   1705	pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
   1706
   1707	return pci_register_driver(&bgx_driver);
   1708}
   1709
   1710static void __exit bgx_cleanup_module(void)
   1711{
   1712	pci_unregister_driver(&bgx_driver);
   1713}
   1714
   1715module_init(bgx_init_module);
   1716module_exit(bgx_cleanup_module);