cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

phy-zynqmp.c (26796B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * phy-zynqmp.c - PHY driver for Xilinx ZynqMP GT.
      4 *
      5 * Copyright (C) 2018-2020 Xilinx Inc.
      6 *
      7 * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
      8 * Author: Subbaraya Sundeep <sundeep.lkml@gmail.com>
      9 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
     10 *
     11 * This driver is tested for USB, SATA and Display Port currently.
     12 * Other controllers PCIe and SGMII should also work but that is
     13 * experimental as of now.
     14 */
     15
     16#include <linux/clk.h>
     17#include <linux/delay.h>
     18#include <linux/io.h>
     19#include <linux/kernel.h>
     20#include <linux/module.h>
     21#include <linux/of.h>
     22#include <linux/phy/phy.h>
     23#include <linux/platform_device.h>
     24#include <linux/slab.h>
     25
     26#include <dt-bindings/phy/phy.h>
     27
     28/*
     29 * Lane Registers
     30 */
     31
     32/* TX De-emphasis parameters */
     33#define L0_TX_ANA_TM_18			0x0048
     34#define L0_TX_ANA_TM_118		0x01d8
     35#define L0_TX_ANA_TM_118_FORCE_17_0	BIT(0)
     36
     37/* DN Resistor calibration code parameters */
     38#define L0_TXPMA_ST_3			0x0b0c
     39#define L0_DN_CALIB_CODE		0x3f
     40
     41/* PMA control parameters */
     42#define L0_TXPMD_TM_45			0x0cb4
     43#define L0_TXPMD_TM_48			0x0cc0
     44#define L0_TXPMD_TM_45_OVER_DP_MAIN	BIT(0)
     45#define L0_TXPMD_TM_45_ENABLE_DP_MAIN	BIT(1)
     46#define L0_TXPMD_TM_45_OVER_DP_POST1	BIT(2)
     47#define L0_TXPMD_TM_45_ENABLE_DP_POST1	BIT(3)
     48#define L0_TXPMD_TM_45_OVER_DP_POST2	BIT(4)
     49#define L0_TXPMD_TM_45_ENABLE_DP_POST2	BIT(5)
     50
     51/* PCS control parameters */
     52#define L0_TM_DIG_6			0x106c
     53#define L0_TM_DIS_DESCRAMBLE_DECODER	0x0f
     54#define L0_TX_DIG_61			0x00f4
     55#define L0_TM_DISABLE_SCRAMBLE_ENCODER	0x0f
     56
     57/* PLL Test Mode register parameters */
     58#define L0_TM_PLL_DIG_37		0x2094
     59#define L0_TM_COARSE_CODE_LIMIT		0x10
     60
     61/* PLL SSC step size offsets */
     62#define L0_PLL_SS_STEPS_0_LSB		0x2368
     63#define L0_PLL_SS_STEPS_1_MSB		0x236c
     64#define L0_PLL_SS_STEP_SIZE_0_LSB	0x2370
     65#define L0_PLL_SS_STEP_SIZE_1		0x2374
     66#define L0_PLL_SS_STEP_SIZE_2		0x2378
     67#define L0_PLL_SS_STEP_SIZE_3_MSB	0x237c
     68#define L0_PLL_STATUS_READ_1		0x23e4
     69
     70/* SSC step size parameters */
     71#define STEP_SIZE_0_MASK		0xff
     72#define STEP_SIZE_1_MASK		0xff
     73#define STEP_SIZE_2_MASK		0xff
     74#define STEP_SIZE_3_MASK		0x3
     75#define STEP_SIZE_SHIFT			8
     76#define FORCE_STEP_SIZE			0x10
     77#define FORCE_STEPS			0x20
     78#define STEPS_0_MASK			0xff
     79#define STEPS_1_MASK			0x07
     80
     81/* Reference clock selection parameters */
     82#define L0_Ln_REF_CLK_SEL(n)		(0x2860 + (n) * 4)
     83#define L0_REF_CLK_SEL_MASK		0x8f
     84
     85/* Calibration digital logic parameters */
     86#define L3_TM_CALIB_DIG19		0xec4c
     87#define L3_CALIB_DONE_STATUS		0xef14
     88#define L3_TM_CALIB_DIG18		0xec48
     89#define L3_TM_CALIB_DIG19_NSW		0x07
     90#define L3_TM_CALIB_DIG18_NSW		0xe0
     91#define L3_TM_OVERRIDE_NSW_CODE         0x20
     92#define L3_CALIB_DONE			0x02
     93#define L3_NSW_SHIFT			5
     94#define L3_NSW_PIPE_SHIFT		4
     95#define L3_NSW_CALIB_SHIFT		3
     96
     97#define PHY_REG_OFFSET			0x4000
     98
     99/*
    100 * Global Registers
    101 */
    102
    103/* Refclk selection parameters */
    104#define PLL_REF_SEL(n)			(0x10000 + (n) * 4)
    105#define PLL_FREQ_MASK			0x1f
    106#define PLL_STATUS_LOCKED		0x10
    107
    108/* Inter Connect Matrix parameters */
    109#define ICM_CFG0			0x10010
    110#define ICM_CFG1			0x10014
    111#define ICM_CFG0_L0_MASK		0x07
    112#define ICM_CFG0_L1_MASK		0x70
    113#define ICM_CFG1_L2_MASK		0x07
    114#define ICM_CFG2_L3_MASK		0x70
    115#define ICM_CFG_SHIFT			4
    116
    117/* Inter Connect Matrix allowed protocols */
    118#define ICM_PROTOCOL_PD			0x0
    119#define ICM_PROTOCOL_PCIE		0x1
    120#define ICM_PROTOCOL_SATA		0x2
    121#define ICM_PROTOCOL_USB		0x3
    122#define ICM_PROTOCOL_DP			0x4
    123#define ICM_PROTOCOL_SGMII		0x5
    124
    125/* Test Mode common reset control  parameters */
    126#define TM_CMN_RST			0x10018
    127#define TM_CMN_RST_EN			0x1
    128#define TM_CMN_RST_SET			0x2
    129#define TM_CMN_RST_MASK			0x3
    130
    131/* Bus width parameters */
    132#define TX_PROT_BUS_WIDTH		0x10040
    133#define RX_PROT_BUS_WIDTH		0x10044
    134#define PROT_BUS_WIDTH_10		0x0
    135#define PROT_BUS_WIDTH_20		0x1
    136#define PROT_BUS_WIDTH_40		0x2
    137#define PROT_BUS_WIDTH_SHIFT(n)		((n) * 2)
    138#define PROT_BUS_WIDTH_MASK(n)		GENMASK((n) * 2 + 1, (n) * 2)
    139
    140/* Number of GT lanes */
    141#define NUM_LANES			4
    142
    143/* SIOU SATA control register */
    144#define SATA_CONTROL_OFFSET		0x0100
    145
    146/* Total number of controllers */
    147#define CONTROLLERS_PER_LANE		5
    148
    149/* Protocol Type parameters */
    150#define XPSGTR_TYPE_USB0		0  /* USB controller 0 */
    151#define XPSGTR_TYPE_USB1		1  /* USB controller 1 */
    152#define XPSGTR_TYPE_SATA_0		2  /* SATA controller lane 0 */
    153#define XPSGTR_TYPE_SATA_1		3  /* SATA controller lane 1 */
    154#define XPSGTR_TYPE_PCIE_0		4  /* PCIe controller lane 0 */
    155#define XPSGTR_TYPE_PCIE_1		5  /* PCIe controller lane 1 */
    156#define XPSGTR_TYPE_PCIE_2		6  /* PCIe controller lane 2 */
    157#define XPSGTR_TYPE_PCIE_3		7  /* PCIe controller lane 3 */
    158#define XPSGTR_TYPE_DP_0		8  /* Display Port controller lane 0 */
    159#define XPSGTR_TYPE_DP_1		9  /* Display Port controller lane 1 */
    160#define XPSGTR_TYPE_SGMII0		10 /* Ethernet SGMII controller 0 */
    161#define XPSGTR_TYPE_SGMII1		11 /* Ethernet SGMII controller 1 */
    162#define XPSGTR_TYPE_SGMII2		12 /* Ethernet SGMII controller 2 */
    163#define XPSGTR_TYPE_SGMII3		13 /* Ethernet SGMII controller 3 */
    164
    165/* Timeout values */
    166#define TIMEOUT_US			1000
    167
    168struct xpsgtr_dev;
    169
    170/**
    171 * struct xpsgtr_ssc - structure to hold SSC settings for a lane
    172 * @refclk_rate: PLL reference clock frequency
    173 * @pll_ref_clk: value to be written to register for corresponding ref clk rate
    174 * @steps: number of steps of SSC (Spread Spectrum Clock)
    175 * @step_size: step size of each step
    176 */
    177struct xpsgtr_ssc {
    178	u32 refclk_rate;
    179	u8  pll_ref_clk;
    180	u32 steps;
    181	u32 step_size;
    182};
    183
    184/**
    185 * struct xpsgtr_phy - representation of a lane
    186 * @phy: pointer to the kernel PHY device
    187 * @type: controller which uses this lane
    188 * @lane: lane number
    189 * @protocol: protocol in which the lane operates
    190 * @skip_phy_init: skip phy_init() if true
    191 * @dev: pointer to the xpsgtr_dev instance
    192 * @refclk: reference clock index
    193 */
    194struct xpsgtr_phy {
    195	struct phy *phy;
    196	u8 type;
    197	u8 lane;
    198	u8 protocol;
    199	bool skip_phy_init;
    200	struct xpsgtr_dev *dev;
    201	unsigned int refclk;
    202};
    203
    204/**
    205 * struct xpsgtr_dev - representation of a ZynMP GT device
    206 * @dev: pointer to device
    207 * @serdes: serdes base address
    208 * @siou: siou base address
    209 * @gtr_mutex: mutex for locking
    210 * @phys: PHY lanes
    211 * @refclk_sscs: spread spectrum settings for the reference clocks
    212 * @clk: reference clocks
    213 * @tx_term_fix: fix for GT issue
    214 * @saved_icm_cfg0: stored value of ICM CFG0 register
    215 * @saved_icm_cfg1: stored value of ICM CFG1 register
    216 */
    217struct xpsgtr_dev {
    218	struct device *dev;
    219	void __iomem *serdes;
    220	void __iomem *siou;
    221	struct mutex gtr_mutex; /* mutex for locking */
    222	struct xpsgtr_phy phys[NUM_LANES];
    223	const struct xpsgtr_ssc *refclk_sscs[NUM_LANES];
    224	struct clk *clk[NUM_LANES];
    225	bool tx_term_fix;
    226	unsigned int saved_icm_cfg0;
    227	unsigned int saved_icm_cfg1;
    228};
    229
    230/*
    231 * Configuration Data
    232 */
    233
    234/* lookup table to hold all settings needed for a ref clock frequency */
    235static const struct xpsgtr_ssc ssc_lookup[] = {
    236	{  19200000, 0x05,  608, 264020 },
    237	{  20000000, 0x06,  634, 243454 },
    238	{  24000000, 0x07,  760, 168973 },
    239	{  26000000, 0x08,  824, 143860 },
    240	{  27000000, 0x09,  856,  86551 },
    241	{  38400000, 0x0a, 1218,  65896 },
    242	{  40000000, 0x0b,  634, 243454 },
    243	{  52000000, 0x0c,  824, 143860 },
    244	{ 100000000, 0x0d, 1058,  87533 },
    245	{ 108000000, 0x0e,  856,  86551 },
    246	{ 125000000, 0x0f,  992, 119497 },
    247	{ 135000000, 0x10, 1070,  55393 },
    248	{ 150000000, 0x11,  792, 187091 }
    249};
    250
    251/*
    252 * I/O Accessors
    253 */
    254
    255static inline u32 xpsgtr_read(struct xpsgtr_dev *gtr_dev, u32 reg)
    256{
    257	return readl(gtr_dev->serdes + reg);
    258}
    259
    260static inline void xpsgtr_write(struct xpsgtr_dev *gtr_dev, u32 reg, u32 value)
    261{
    262	writel(value, gtr_dev->serdes + reg);
    263}
    264
    265static inline void xpsgtr_clr_set(struct xpsgtr_dev *gtr_dev, u32 reg,
    266				  u32 clr, u32 set)
    267{
    268	u32 value = xpsgtr_read(gtr_dev, reg);
    269
    270	value &= ~clr;
    271	value |= set;
    272	xpsgtr_write(gtr_dev, reg, value);
    273}
    274
    275static inline u32 xpsgtr_read_phy(struct xpsgtr_phy *gtr_phy, u32 reg)
    276{
    277	void __iomem *addr = gtr_phy->dev->serdes
    278			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
    279
    280	return readl(addr);
    281}
    282
    283static inline void xpsgtr_write_phy(struct xpsgtr_phy *gtr_phy,
    284				    u32 reg, u32 value)
    285{
    286	void __iomem *addr = gtr_phy->dev->serdes
    287			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
    288
    289	writel(value, addr);
    290}
    291
    292static inline void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy,
    293				      u32 reg, u32 clr, u32 set)
    294{
    295	void __iomem *addr = gtr_phy->dev->serdes
    296			   + gtr_phy->lane * PHY_REG_OFFSET + reg;
    297
    298	writel((readl(addr) & ~clr) | set, addr);
    299}
    300
    301/*
    302 * Hardware Configuration
    303 */
    304
    305/* Wait for the PLL to lock (with a timeout). */
    306static int xpsgtr_wait_pll_lock(struct phy *phy)
    307{
    308	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
    309	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
    310	unsigned int timeout = TIMEOUT_US;
    311	int ret;
    312
    313	dev_dbg(gtr_dev->dev, "Waiting for PLL lock\n");
    314
    315	while (1) {
    316		u32 reg = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
    317
    318		if ((reg & PLL_STATUS_LOCKED) == PLL_STATUS_LOCKED) {
    319			ret = 0;
    320			break;
    321		}
    322
    323		if (--timeout == 0) {
    324			ret = -ETIMEDOUT;
    325			break;
    326		}
    327
    328		udelay(1);
    329	}
    330
    331	if (ret == -ETIMEDOUT)
    332		dev_err(gtr_dev->dev,
    333			"lane %u (type %u, protocol %u): PLL lock timeout\n",
    334			gtr_phy->lane, gtr_phy->type, gtr_phy->protocol);
    335
    336	return ret;
    337}
    338
    339/* Configure PLL and spread-sprectrum clock. */
    340static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
    341{
    342	const struct xpsgtr_ssc *ssc;
    343	u32 step_size;
    344
    345	ssc = gtr_phy->dev->refclk_sscs[gtr_phy->refclk];
    346	step_size = ssc->step_size;
    347
    348	xpsgtr_clr_set(gtr_phy->dev, PLL_REF_SEL(gtr_phy->lane),
    349		       PLL_FREQ_MASK, ssc->pll_ref_clk);
    350
    351	/* Enable lane clock sharing, if required */
    352	if (gtr_phy->refclk != gtr_phy->lane) {
    353		/* Lane3 Ref Clock Selection Register */
    354		xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
    355			       L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk);
    356	}
    357
    358	/* SSC step size [7:0] */
    359	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB,
    360			   STEP_SIZE_0_MASK, step_size & STEP_SIZE_0_MASK);
    361
    362	/* SSC step size [15:8] */
    363	step_size >>= STEP_SIZE_SHIFT;
    364	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_1,
    365			   STEP_SIZE_1_MASK, step_size & STEP_SIZE_1_MASK);
    366
    367	/* SSC step size [23:16] */
    368	step_size >>= STEP_SIZE_SHIFT;
    369	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_2,
    370			   STEP_SIZE_2_MASK, step_size & STEP_SIZE_2_MASK);
    371
    372	/* SSC steps [7:0] */
    373	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_0_LSB,
    374			   STEPS_0_MASK, ssc->steps & STEPS_0_MASK);
    375
    376	/* SSC steps [10:8] */
    377	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_1_MSB,
    378			   STEPS_1_MASK,
    379			   (ssc->steps >> STEP_SIZE_SHIFT) & STEPS_1_MASK);
    380
    381	/* SSC step size [24:25] */
    382	step_size >>= STEP_SIZE_SHIFT;
    383	xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_3_MSB,
    384			   STEP_SIZE_3_MASK, (step_size & STEP_SIZE_3_MASK) |
    385			   FORCE_STEP_SIZE | FORCE_STEPS);
    386}
    387
    388/* Configure the lane protocol. */
    389static void xpsgtr_lane_set_protocol(struct xpsgtr_phy *gtr_phy)
    390{
    391	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
    392	u8 protocol = gtr_phy->protocol;
    393
    394	switch (gtr_phy->lane) {
    395	case 0:
    396		xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L0_MASK, protocol);
    397		break;
    398	case 1:
    399		xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L1_MASK,
    400			       protocol << ICM_CFG_SHIFT);
    401		break;
    402	case 2:
    403		xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L0_MASK, protocol);
    404		break;
    405	case 3:
    406		xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L1_MASK,
    407			       protocol << ICM_CFG_SHIFT);
    408		break;
    409	default:
    410		/* We already checked 0 <= lane <= 3 */
    411		break;
    412	}
    413}
    414
    415/* Bypass (de)scrambler and 8b/10b decoder and encoder. */
    416static void xpsgtr_bypass_scrambler_8b10b(struct xpsgtr_phy *gtr_phy)
    417{
    418	xpsgtr_write_phy(gtr_phy, L0_TM_DIG_6, L0_TM_DIS_DESCRAMBLE_DECODER);
    419	xpsgtr_write_phy(gtr_phy, L0_TX_DIG_61, L0_TM_DISABLE_SCRAMBLE_ENCODER);
    420}
    421
    422/* DP-specific initialization. */
    423static void xpsgtr_phy_init_dp(struct xpsgtr_phy *gtr_phy)
    424{
    425	xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_45,
    426			 L0_TXPMD_TM_45_OVER_DP_MAIN |
    427			 L0_TXPMD_TM_45_ENABLE_DP_MAIN |
    428			 L0_TXPMD_TM_45_OVER_DP_POST1 |
    429			 L0_TXPMD_TM_45_OVER_DP_POST2 |
    430			 L0_TXPMD_TM_45_ENABLE_DP_POST2);
    431	xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_118,
    432			 L0_TX_ANA_TM_118_FORCE_17_0);
    433}
    434
    435/* SATA-specific initialization. */
    436static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
    437{
    438	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
    439
    440	xpsgtr_bypass_scrambler_8b10b(gtr_phy);
    441
    442	writel(gtr_phy->lane, gtr_dev->siou + SATA_CONTROL_OFFSET);
    443}
    444
    445/* SGMII-specific initialization. */
    446static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
    447{
    448	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
    449	u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane);
    450	u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane);
    451
    452	/* Set SGMII protocol TX and RX bus width to 10 bits. */
    453	xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val);
    454	xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val);
    455
    456	xpsgtr_bypass_scrambler_8b10b(gtr_phy);
    457}
    458
    459/* Configure TX de-emphasis and margining for DP. */
    460static void xpsgtr_phy_configure_dp(struct xpsgtr_phy *gtr_phy, unsigned int pre,
    461				    unsigned int voltage)
    462{
    463	static const u8 voltage_swing[4][4] = {
    464		{ 0x2a, 0x27, 0x24, 0x20 },
    465		{ 0x27, 0x23, 0x20, 0xff },
    466		{ 0x24, 0x20, 0xff, 0xff },
    467		{ 0xff, 0xff, 0xff, 0xff }
    468	};
    469	static const u8 pre_emphasis[4][4] = {
    470		{ 0x02, 0x02, 0x02, 0x02 },
    471		{ 0x01, 0x01, 0x01, 0xff },
    472		{ 0x00, 0x00, 0xff, 0xff },
    473		{ 0xff, 0xff, 0xff, 0xff }
    474	};
    475
    476	xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_48, voltage_swing[pre][voltage]);
    477	xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_18, pre_emphasis[pre][voltage]);
    478}
    479
    480/*
    481 * PHY Operations
    482 */
    483
    484static bool xpsgtr_phy_init_required(struct xpsgtr_phy *gtr_phy)
    485{
    486	/*
    487	 * As USB may save the snapshot of the states during hibernation, doing
    488	 * phy_init() will put the USB controller into reset, resulting in the
    489	 * losing of the saved snapshot. So try to avoid phy_init() for USB
    490	 * except when gtr_phy->skip_phy_init is false (this happens when FPD is
    491	 * shutdown during suspend or when gt lane is changed from current one)
    492	 */
    493	if (gtr_phy->protocol == ICM_PROTOCOL_USB && gtr_phy->skip_phy_init)
    494		return false;
    495	else
    496		return true;
    497}
    498
    499/*
    500 * There is a functional issue in the GT. The TX termination resistance can be
    501 * out of spec due to a issue in the calibration logic. This is the workaround
    502 * to fix it, required for XCZU9EG silicon.
    503 */
    504static int xpsgtr_phy_tx_term_fix(struct xpsgtr_phy *gtr_phy)
    505{
    506	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
    507	u32 timeout = TIMEOUT_US;
    508	u32 nsw;
    509
    510	/* Enabling Test Mode control for CMN Rest */
    511	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
    512
    513	/* Set Test Mode reset */
    514	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
    515
    516	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18, 0x00);
    517	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, L3_TM_OVERRIDE_NSW_CODE);
    518
    519	/*
    520	 * As a part of work around sequence for PMOS calibration fix,
    521	 * we need to configure any lane ICM_CFG to valid protocol. This
    522	 * will deassert the CMN_Resetn signal.
    523	 */
    524	xpsgtr_lane_set_protocol(gtr_phy);
    525
    526	/* Clear Test Mode reset */
    527	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
    528
    529	dev_dbg(gtr_dev->dev, "calibrating...\n");
    530
    531	do {
    532		u32 reg = xpsgtr_read(gtr_dev, L3_CALIB_DONE_STATUS);
    533
    534		if ((reg & L3_CALIB_DONE) == L3_CALIB_DONE)
    535			break;
    536
    537		if (!--timeout) {
    538			dev_err(gtr_dev->dev, "calibration time out\n");
    539			return -ETIMEDOUT;
    540		}
    541
    542		udelay(1);
    543	} while (timeout > 0);
    544
    545	dev_dbg(gtr_dev->dev, "calibration done\n");
    546
    547	/* Reading NMOS Register Code */
    548	nsw = xpsgtr_read(gtr_dev, L0_TXPMA_ST_3) & L0_DN_CALIB_CODE;
    549
    550	/* Set Test Mode reset */
    551	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_EN);
    552
    553	/* Writing NMOS register values back [5:3] */
    554	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG19, nsw >> L3_NSW_CALIB_SHIFT);
    555
    556	/* Writing NMOS register value [2:0] */
    557	xpsgtr_write(gtr_dev, L3_TM_CALIB_DIG18,
    558		     ((nsw & L3_TM_CALIB_DIG19_NSW) << L3_NSW_SHIFT) |
    559		     (1 << L3_NSW_PIPE_SHIFT));
    560
    561	/* Clear Test Mode reset */
    562	xpsgtr_clr_set(gtr_dev, TM_CMN_RST, TM_CMN_RST_MASK, TM_CMN_RST_SET);
    563
    564	return 0;
    565}
    566
    567static int xpsgtr_phy_init(struct phy *phy)
    568{
    569	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
    570	struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
    571	int ret = 0;
    572
    573	mutex_lock(&gtr_dev->gtr_mutex);
    574
    575	/* Skip initialization if not required. */
    576	if (!xpsgtr_phy_init_required(gtr_phy))
    577		goto out;
    578
    579	if (gtr_dev->tx_term_fix) {
    580		ret = xpsgtr_phy_tx_term_fix(gtr_phy);
    581		if (ret < 0)
    582			goto out;
    583
    584		gtr_dev->tx_term_fix = false;
    585	}
    586
    587	/* Enable coarse code saturation limiting logic. */
    588	xpsgtr_write_phy(gtr_phy, L0_TM_PLL_DIG_37, L0_TM_COARSE_CODE_LIMIT);
    589
    590	/*
    591	 * Configure the PLL, the lane protocol, and perform protocol-specific
    592	 * initialization.
    593	 */
    594	xpsgtr_configure_pll(gtr_phy);
    595	xpsgtr_lane_set_protocol(gtr_phy);
    596
    597	switch (gtr_phy->protocol) {
    598	case ICM_PROTOCOL_DP:
    599		xpsgtr_phy_init_dp(gtr_phy);
    600		break;
    601
    602	case ICM_PROTOCOL_SATA:
    603		xpsgtr_phy_init_sata(gtr_phy);
    604		break;
    605
    606	case ICM_PROTOCOL_SGMII:
    607		xpsgtr_phy_init_sgmii(gtr_phy);
    608		break;
    609	}
    610
    611out:
    612	mutex_unlock(&gtr_dev->gtr_mutex);
    613	return ret;
    614}
    615
    616static int xpsgtr_phy_exit(struct phy *phy)
    617{
    618	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
    619
    620	gtr_phy->skip_phy_init = false;
    621
    622	return 0;
    623}
    624
    625static int xpsgtr_phy_power_on(struct phy *phy)
    626{
    627	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
    628	int ret = 0;
    629
    630	/* Skip initialization if not required. */
    631	if (!xpsgtr_phy_init_required(gtr_phy))
    632		return ret;
    633	/*
    634	 * Wait for the PLL to lock. For DP, only wait on DP0 to avoid
    635	 * cumulating waits for both lanes. The user is expected to initialize
    636	 * lane 0 last.
    637	 */
    638	if (gtr_phy->protocol != ICM_PROTOCOL_DP ||
    639	    gtr_phy->type == XPSGTR_TYPE_DP_0)
    640		ret = xpsgtr_wait_pll_lock(phy);
    641
    642	return ret;
    643}
    644
    645static int xpsgtr_phy_configure(struct phy *phy, union phy_configure_opts *opts)
    646{
    647	struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
    648
    649	if (gtr_phy->protocol != ICM_PROTOCOL_DP)
    650		return 0;
    651
    652	xpsgtr_phy_configure_dp(gtr_phy, opts->dp.pre[0], opts->dp.voltage[0]);
    653
    654	return 0;
    655}
    656
    657static const struct phy_ops xpsgtr_phyops = {
    658	.init		= xpsgtr_phy_init,
    659	.exit		= xpsgtr_phy_exit,
    660	.power_on	= xpsgtr_phy_power_on,
    661	.configure	= xpsgtr_phy_configure,
    662	.owner		= THIS_MODULE,
    663};
    664
    665/*
    666 * OF Xlate Support
    667 */
    668
    669/* Set the lane type and protocol based on the PHY type and instance number. */
    670static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type,
    671				unsigned int phy_instance)
    672{
    673	unsigned int num_phy_types;
    674	const int *phy_types;
    675
    676	switch (phy_type) {
    677	case PHY_TYPE_SATA: {
    678		static const int types[] = {
    679			XPSGTR_TYPE_SATA_0,
    680			XPSGTR_TYPE_SATA_1,
    681		};
    682
    683		phy_types = types;
    684		num_phy_types = ARRAY_SIZE(types);
    685		gtr_phy->protocol = ICM_PROTOCOL_SATA;
    686		break;
    687	}
    688	case PHY_TYPE_USB3: {
    689		static const int types[] = {
    690			XPSGTR_TYPE_USB0,
    691			XPSGTR_TYPE_USB1,
    692		};
    693
    694		phy_types = types;
    695		num_phy_types = ARRAY_SIZE(types);
    696		gtr_phy->protocol = ICM_PROTOCOL_USB;
    697		break;
    698	}
    699	case PHY_TYPE_DP: {
    700		static const int types[] = {
    701			XPSGTR_TYPE_DP_0,
    702			XPSGTR_TYPE_DP_1,
    703		};
    704
    705		phy_types = types;
    706		num_phy_types = ARRAY_SIZE(types);
    707		gtr_phy->protocol = ICM_PROTOCOL_DP;
    708		break;
    709	}
    710	case PHY_TYPE_PCIE: {
    711		static const int types[] = {
    712			XPSGTR_TYPE_PCIE_0,
    713			XPSGTR_TYPE_PCIE_1,
    714			XPSGTR_TYPE_PCIE_2,
    715			XPSGTR_TYPE_PCIE_3,
    716		};
    717
    718		phy_types = types;
    719		num_phy_types = ARRAY_SIZE(types);
    720		gtr_phy->protocol = ICM_PROTOCOL_PCIE;
    721		break;
    722	}
    723	case PHY_TYPE_SGMII: {
    724		static const int types[] = {
    725			XPSGTR_TYPE_SGMII0,
    726			XPSGTR_TYPE_SGMII1,
    727			XPSGTR_TYPE_SGMII2,
    728			XPSGTR_TYPE_SGMII3,
    729		};
    730
    731		phy_types = types;
    732		num_phy_types = ARRAY_SIZE(types);
    733		gtr_phy->protocol = ICM_PROTOCOL_SGMII;
    734		break;
    735	}
    736	default:
    737		return -EINVAL;
    738	}
    739
    740	if (phy_instance >= num_phy_types)
    741		return -EINVAL;
    742
    743	gtr_phy->type = phy_types[phy_instance];
    744	return 0;
    745}
    746
    747/*
    748 * Valid combinations of controllers and lanes (Interconnect Matrix).
    749 */
    750static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = {
    751	{ XPSGTR_TYPE_PCIE_0, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
    752		XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII0 },
    753	{ XPSGTR_TYPE_PCIE_1, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB0,
    754		XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII1 },
    755	{ XPSGTR_TYPE_PCIE_2, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
    756		XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII2 },
    757	{ XPSGTR_TYPE_PCIE_3, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB1,
    758		XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII3 }
    759};
    760
    761/* Translate OF phandle and args to PHY instance. */
    762static struct phy *xpsgtr_xlate(struct device *dev,
    763				struct of_phandle_args *args)
    764{
    765	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
    766	struct xpsgtr_phy *gtr_phy;
    767	unsigned int phy_instance;
    768	unsigned int phy_lane;
    769	unsigned int phy_type;
    770	unsigned int refclk;
    771	unsigned int i;
    772	int ret;
    773
    774	if (args->args_count != 4) {
    775		dev_err(dev, "Invalid number of cells in 'phy' property\n");
    776		return ERR_PTR(-EINVAL);
    777	}
    778
    779	/*
    780	 * Get the PHY parameters from the OF arguments and derive the lane
    781	 * type.
    782	 */
    783	phy_lane = args->args[0];
    784	if (phy_lane >= ARRAY_SIZE(gtr_dev->phys)) {
    785		dev_err(dev, "Invalid lane number %u\n", phy_lane);
    786		return ERR_PTR(-ENODEV);
    787	}
    788
    789	gtr_phy = &gtr_dev->phys[phy_lane];
    790	phy_type = args->args[1];
    791	phy_instance = args->args[2];
    792
    793	ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
    794	if (ret < 0) {
    795		dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n");
    796		return ERR_PTR(ret);
    797	}
    798
    799	refclk = args->args[3];
    800	if (refclk >= ARRAY_SIZE(gtr_dev->refclk_sscs) ||
    801	    !gtr_dev->refclk_sscs[refclk]) {
    802		dev_err(dev, "Invalid reference clock number %u\n", refclk);
    803		return ERR_PTR(-EINVAL);
    804	}
    805
    806	gtr_phy->refclk = refclk;
    807
    808	/*
    809	 * Ensure that the Interconnect Matrix is obeyed, i.e a given lane type
    810	 * is allowed to operate on the lane.
    811	 */
    812	for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
    813		if (icm_matrix[phy_lane][i] == gtr_phy->type)
    814			return gtr_phy->phy;
    815	}
    816
    817	return ERR_PTR(-EINVAL);
    818}
    819
    820/*
    821 * Power Management
    822 */
    823
    824static int __maybe_unused xpsgtr_suspend(struct device *dev)
    825{
    826	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
    827	unsigned int i;
    828
    829	/* Save the snapshot ICM_CFG registers. */
    830	gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
    831	gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
    832
    833	for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
    834		clk_disable_unprepare(gtr_dev->clk[i]);
    835
    836	return 0;
    837}
    838
    839static int __maybe_unused xpsgtr_resume(struct device *dev)
    840{
    841	struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
    842	unsigned int icm_cfg0, icm_cfg1;
    843	unsigned int i;
    844	bool skip_phy_init;
    845	int err;
    846
    847	for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++) {
    848		err = clk_prepare_enable(gtr_dev->clk[i]);
    849		if (err)
    850			goto err_clk_put;
    851	}
    852
    853	icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
    854	icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
    855
    856	/* Return if no GT lanes got configured before suspend. */
    857	if (!gtr_dev->saved_icm_cfg0 && !gtr_dev->saved_icm_cfg1)
    858		return 0;
    859
    860	/* Check if the ICM configurations changed after suspend. */
    861	if (icm_cfg0 == gtr_dev->saved_icm_cfg0 &&
    862	    icm_cfg1 == gtr_dev->saved_icm_cfg1)
    863		skip_phy_init = true;
    864	else
    865		skip_phy_init = false;
    866
    867	/* Update the skip_phy_init for all gtr_phy instances. */
    868	for (i = 0; i < ARRAY_SIZE(gtr_dev->phys); i++)
    869		gtr_dev->phys[i].skip_phy_init = skip_phy_init;
    870
    871	return 0;
    872
    873err_clk_put:
    874	while (i--)
    875		clk_disable_unprepare(gtr_dev->clk[i]);
    876
    877	return err;
    878}
    879
    880static const struct dev_pm_ops xpsgtr_pm_ops = {
    881	SET_SYSTEM_SLEEP_PM_OPS(xpsgtr_suspend, xpsgtr_resume)
    882};
    883
    884/*
    885 * Probe & Platform Driver
    886 */
    887
    888static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
    889{
    890	unsigned int refclk;
    891	int ret;
    892
    893	for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
    894		unsigned long rate;
    895		unsigned int i;
    896		struct clk *clk;
    897		char name[8];
    898
    899		snprintf(name, sizeof(name), "ref%u", refclk);
    900		clk = devm_clk_get_optional(gtr_dev->dev, name);
    901		if (IS_ERR(clk)) {
    902			ret = dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
    903					    "Failed to get reference clock %u\n",
    904					    refclk);
    905			goto err_clk_put;
    906		}
    907
    908		if (!clk)
    909			continue;
    910
    911		ret = clk_prepare_enable(clk);
    912		if (ret)
    913			goto err_clk_put;
    914
    915		gtr_dev->clk[refclk] = clk;
    916
    917		/*
    918		 * Get the spread spectrum (SSC) settings for the reference
    919		 * clock rate.
    920		 */
    921		rate = clk_get_rate(clk);
    922
    923		for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
    924			if (rate == ssc_lookup[i].refclk_rate) {
    925				gtr_dev->refclk_sscs[refclk] = &ssc_lookup[i];
    926				break;
    927			}
    928		}
    929
    930		if (i == ARRAY_SIZE(ssc_lookup)) {
    931			dev_err(gtr_dev->dev,
    932				"Invalid rate %lu for reference clock %u\n",
    933				rate, refclk);
    934			ret = -EINVAL;
    935			goto err_clk_put;
    936		}
    937	}
    938
    939	return 0;
    940
    941err_clk_put:
    942	while (refclk--)
    943		clk_disable_unprepare(gtr_dev->clk[refclk]);
    944
    945	return ret;
    946}
    947
    948static int xpsgtr_probe(struct platform_device *pdev)
    949{
    950	struct device_node *np = pdev->dev.of_node;
    951	struct xpsgtr_dev *gtr_dev;
    952	struct phy_provider *provider;
    953	unsigned int port;
    954	unsigned int i;
    955	int ret;
    956
    957	gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
    958	if (!gtr_dev)
    959		return -ENOMEM;
    960
    961	gtr_dev->dev = &pdev->dev;
    962	platform_set_drvdata(pdev, gtr_dev);
    963
    964	mutex_init(&gtr_dev->gtr_mutex);
    965
    966	if (of_device_is_compatible(np, "xlnx,zynqmp-psgtr"))
    967		gtr_dev->tx_term_fix =
    968			of_property_read_bool(np, "xlnx,tx-termination-fix");
    969
    970	/* Acquire resources. */
    971	gtr_dev->serdes = devm_platform_ioremap_resource_byname(pdev, "serdes");
    972	if (IS_ERR(gtr_dev->serdes))
    973		return PTR_ERR(gtr_dev->serdes);
    974
    975	gtr_dev->siou = devm_platform_ioremap_resource_byname(pdev, "siou");
    976	if (IS_ERR(gtr_dev->siou))
    977		return PTR_ERR(gtr_dev->siou);
    978
    979	ret = xpsgtr_get_ref_clocks(gtr_dev);
    980	if (ret)
    981		return ret;
    982
    983	/* Create PHYs. */
    984	for (port = 0; port < ARRAY_SIZE(gtr_dev->phys); ++port) {
    985		struct xpsgtr_phy *gtr_phy = &gtr_dev->phys[port];
    986		struct phy *phy;
    987
    988		gtr_phy->lane = port;
    989		gtr_phy->dev = gtr_dev;
    990
    991		phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
    992		if (IS_ERR(phy)) {
    993			dev_err(&pdev->dev, "failed to create PHY\n");
    994			ret = PTR_ERR(phy);
    995			goto err_clk_put;
    996		}
    997
    998		gtr_phy->phy = phy;
    999		phy_set_drvdata(phy, gtr_phy);
   1000	}
   1001
   1002	/* Register the PHY provider. */
   1003	provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
   1004	if (IS_ERR(provider)) {
   1005		dev_err(&pdev->dev, "registering provider failed\n");
   1006		ret = PTR_ERR(provider);
   1007		goto err_clk_put;
   1008	}
   1009	return 0;
   1010
   1011err_clk_put:
   1012	for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
   1013		clk_disable_unprepare(gtr_dev->clk[i]);
   1014
   1015	return ret;
   1016}
   1017
   1018static const struct of_device_id xpsgtr_of_match[] = {
   1019	{ .compatible = "xlnx,zynqmp-psgtr", },
   1020	{ .compatible = "xlnx,zynqmp-psgtr-v1.1", },
   1021	{},
   1022};
   1023MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
   1024
   1025static struct platform_driver xpsgtr_driver = {
   1026	.probe = xpsgtr_probe,
   1027	.driver = {
   1028		.name = "xilinx-psgtr",
   1029		.of_match_table	= xpsgtr_of_match,
   1030		.pm =  &xpsgtr_pm_ops,
   1031	},
   1032};
   1033
   1034module_platform_driver(xpsgtr_driver);
   1035
   1036MODULE_AUTHOR("Xilinx Inc.");
   1037MODULE_LICENSE("GPL v2");
   1038MODULE_DESCRIPTION("Xilinx ZynqMP High speed Gigabit Transceiver");