cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pci-tegra.c (73727B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * PCIe host controller driver for Tegra SoCs
      4 *
      5 * Copyright (c) 2010, CompuLab, Ltd.
      6 * Author: Mike Rapoport <mike@compulab.co.il>
      7 *
      8 * Based on NVIDIA PCIe driver
      9 * Copyright (c) 2008-2009, NVIDIA Corporation.
     10 *
     11 * Bits taken from arch/arm/mach-dove/pcie.c
     12 *
     13 * Author: Thierry Reding <treding@nvidia.com>
     14 */
     15
     16#include <linux/clk.h>
     17#include <linux/debugfs.h>
     18#include <linux/delay.h>
     19#include <linux/export.h>
     20#include <linux/gpio/consumer.h>
     21#include <linux/interrupt.h>
     22#include <linux/iopoll.h>
     23#include <linux/irq.h>
     24#include <linux/irqchip/chained_irq.h>
     25#include <linux/irqdomain.h>
     26#include <linux/kernel.h>
     27#include <linux/init.h>
     28#include <linux/module.h>
     29#include <linux/msi.h>
     30#include <linux/of_address.h>
     31#include <linux/of_pci.h>
     32#include <linux/of_platform.h>
     33#include <linux/pci.h>
     34#include <linux/phy/phy.h>
     35#include <linux/pinctrl/consumer.h>
     36#include <linux/platform_device.h>
     37#include <linux/reset.h>
     38#include <linux/sizes.h>
     39#include <linux/slab.h>
     40#include <linux/vmalloc.h>
     41#include <linux/regulator/consumer.h>
     42
     43#include <soc/tegra/cpuidle.h>
     44#include <soc/tegra/pmc.h>
     45
     46#include "../pci.h"
     47
     48#define INT_PCI_MSI_NR (8 * 32)
     49
     50/* register definitions */
     51
     52#define AFI_AXI_BAR0_SZ	0x00
     53#define AFI_AXI_BAR1_SZ	0x04
     54#define AFI_AXI_BAR2_SZ	0x08
     55#define AFI_AXI_BAR3_SZ	0x0c
     56#define AFI_AXI_BAR4_SZ	0x10
     57#define AFI_AXI_BAR5_SZ	0x14
     58
     59#define AFI_AXI_BAR0_START	0x18
     60#define AFI_AXI_BAR1_START	0x1c
     61#define AFI_AXI_BAR2_START	0x20
     62#define AFI_AXI_BAR3_START	0x24
     63#define AFI_AXI_BAR4_START	0x28
     64#define AFI_AXI_BAR5_START	0x2c
     65
     66#define AFI_FPCI_BAR0	0x30
     67#define AFI_FPCI_BAR1	0x34
     68#define AFI_FPCI_BAR2	0x38
     69#define AFI_FPCI_BAR3	0x3c
     70#define AFI_FPCI_BAR4	0x40
     71#define AFI_FPCI_BAR5	0x44
     72
     73#define AFI_CACHE_BAR0_SZ	0x48
     74#define AFI_CACHE_BAR0_ST	0x4c
     75#define AFI_CACHE_BAR1_SZ	0x50
     76#define AFI_CACHE_BAR1_ST	0x54
     77
     78#define AFI_MSI_BAR_SZ		0x60
     79#define AFI_MSI_FPCI_BAR_ST	0x64
     80#define AFI_MSI_AXI_BAR_ST	0x68
     81
     82#define AFI_MSI_VEC(x)		(0x6c + ((x) * 4))
     83#define AFI_MSI_EN_VEC(x)	(0x8c + ((x) * 4))
     84
     85#define AFI_CONFIGURATION		0xac
     86#define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
     87#define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
     88
     89#define AFI_FPCI_ERROR_MASKS	0xb0
     90
     91#define AFI_INTR_MASK		0xb4
     92#define  AFI_INTR_MASK_INT_MASK	(1 << 0)
     93#define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
     94
     95#define AFI_INTR_CODE			0xb8
     96#define  AFI_INTR_CODE_MASK		0xf
     97#define  AFI_INTR_INI_SLAVE_ERROR	1
     98#define  AFI_INTR_INI_DECODE_ERROR	2
     99#define  AFI_INTR_TARGET_ABORT		3
    100#define  AFI_INTR_MASTER_ABORT		4
    101#define  AFI_INTR_INVALID_WRITE		5
    102#define  AFI_INTR_LEGACY		6
    103#define  AFI_INTR_FPCI_DECODE_ERROR	7
    104#define  AFI_INTR_AXI_DECODE_ERROR	8
    105#define  AFI_INTR_FPCI_TIMEOUT		9
    106#define  AFI_INTR_PE_PRSNT_SENSE	10
    107#define  AFI_INTR_PE_CLKREQ_SENSE	11
    108#define  AFI_INTR_CLKCLAMP_SENSE	12
    109#define  AFI_INTR_RDY4PD_SENSE		13
    110#define  AFI_INTR_P2P_ERROR		14
    111
    112#define AFI_INTR_SIGNATURE	0xbc
    113#define AFI_UPPER_FPCI_ADDRESS	0xc0
    114#define AFI_SM_INTR_ENABLE	0xc4
    115#define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
    116#define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
    117#define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
    118#define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
    119#define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
    120#define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
    121#define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
    122#define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
    123
    124#define AFI_AFI_INTR_ENABLE		0xc8
    125#define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
    126#define  AFI_INTR_EN_INI_DECERR		(1 << 1)
    127#define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
    128#define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
    129#define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
    130#define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
    131#define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
    132#define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
    133#define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
    134
    135#define AFI_PCIE_PME		0xf0
    136
    137#define AFI_PCIE_CONFIG					0x0f8
    138#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
    139#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
    140#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
    141#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
    142#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
    143#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
    144#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
    145#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
    146#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
    147#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
    148#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
    149#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
    150#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
    151#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
    152#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
    153
    154#define AFI_FUSE			0x104
    155#define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
    156
    157#define AFI_PEX0_CTRL			0x110
    158#define AFI_PEX1_CTRL			0x118
    159#define  AFI_PEX_CTRL_RST		(1 << 0)
    160#define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
    161#define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
    162#define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
    163
    164#define AFI_PLLE_CONTROL		0x160
    165#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
    166#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
    167
    168#define AFI_PEXBIAS_CTRL_0		0x168
    169
    170#define RP_ECTL_2_R1	0x00000e84
    171#define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
    172
    173#define RP_ECTL_4_R1	0x00000e8c
    174#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
    175#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
    176
    177#define RP_ECTL_5_R1	0x00000e90
    178#define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
    179
    180#define RP_ECTL_6_R1	0x00000e94
    181#define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
    182
    183#define RP_ECTL_2_R2	0x00000ea4
    184#define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
    185
    186#define RP_ECTL_4_R2	0x00000eac
    187#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
    188#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
    189
    190#define RP_ECTL_5_R2	0x00000eb0
    191#define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
    192
    193#define RP_ECTL_6_R2	0x00000eb4
    194#define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
    195
    196#define RP_VEND_XP	0x00000f00
    197#define  RP_VEND_XP_DL_UP			(1 << 30)
    198#define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
    199#define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
    200#define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
    201
    202#define RP_VEND_CTL0	0x00000f44
    203#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
    204#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
    205
    206#define RP_VEND_CTL1	0x00000f48
    207#define  RP_VEND_CTL1_ERPT	(1 << 13)
    208
    209#define RP_VEND_XP_BIST	0x00000f4c
    210#define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
    211
    212#define RP_VEND_CTL2 0x00000fa8
    213#define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
    214
    215#define RP_PRIV_MISC	0x00000fe0
    216#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
    217#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
    218#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
    219#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
    220#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
    221#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
    222#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
    223#define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
    224
    225#define RP_LINK_CONTROL_STATUS			0x00000090
    226#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
    227#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
    228
    229#define RP_LINK_CONTROL_STATUS_2		0x000000b0
    230
    231#define PADS_CTL_SEL		0x0000009c
    232
    233#define PADS_CTL		0x000000a0
    234#define  PADS_CTL_IDDQ_1L	(1 << 0)
    235#define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
    236#define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
    237
    238#define PADS_PLL_CTL_TEGRA20			0x000000b8
    239#define PADS_PLL_CTL_TEGRA30			0x000000b4
    240#define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
    241#define  PADS_PLL_CTL_LOCKDET			(1 << 8)
    242#define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
    243#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
    244#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
    245#define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
    246#define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
    247#define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
    248#define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
    249#define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
    250
    251#define PADS_REFCLK_CFG0			0x000000c8
    252#define PADS_REFCLK_CFG1			0x000000cc
    253#define PADS_REFCLK_BIAS			0x000000d0
    254
    255/*
    256 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
    257 * entries, one entry per PCIe port. These field definitions and desired
    258 * values aren't in the TRM, but do come from NVIDIA.
    259 */
    260#define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
    261#define PADS_REFCLK_CFG_E_TERM_SHIFT		7
    262#define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
    263#define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
    264
    265#define PME_ACK_TIMEOUT 10000
    266#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
    267
    268struct tegra_msi {
    269	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
    270	struct irq_domain *domain;
    271	struct mutex map_lock;
    272	spinlock_t mask_lock;
    273	void *virt;
    274	dma_addr_t phys;
    275	int irq;
    276};
    277
    278/* used to differentiate between Tegra SoC generations */
    279struct tegra_pcie_port_soc {
    280	struct {
    281		u8 turnoff_bit;
    282		u8 ack_bit;
    283	} pme;
    284};
    285
    286struct tegra_pcie_soc {
    287	unsigned int num_ports;
    288	const struct tegra_pcie_port_soc *ports;
    289	unsigned int msi_base_shift;
    290	unsigned long afi_pex2_ctrl;
    291	u32 pads_pll_ctl;
    292	u32 tx_ref_sel;
    293	u32 pads_refclk_cfg0;
    294	u32 pads_refclk_cfg1;
    295	u32 update_fc_threshold;
    296	bool has_pex_clkreq_en;
    297	bool has_pex_bias_ctrl;
    298	bool has_intr_prsnt_sense;
    299	bool has_cml_clk;
    300	bool has_gen2;
    301	bool force_pca_enable;
    302	bool program_uphy;
    303	bool update_clamp_threshold;
    304	bool program_deskew_time;
    305	bool update_fc_timer;
    306	bool has_cache_bars;
    307	struct {
    308		struct {
    309			u32 rp_ectl_2_r1;
    310			u32 rp_ectl_4_r1;
    311			u32 rp_ectl_5_r1;
    312			u32 rp_ectl_6_r1;
    313			u32 rp_ectl_2_r2;
    314			u32 rp_ectl_4_r2;
    315			u32 rp_ectl_5_r2;
    316			u32 rp_ectl_6_r2;
    317		} regs;
    318		bool enable;
    319	} ectl;
    320};
    321
    322struct tegra_pcie {
    323	struct device *dev;
    324
    325	void __iomem *pads;
    326	void __iomem *afi;
    327	void __iomem *cfg;
    328	int irq;
    329
    330	struct resource cs;
    331
    332	struct clk *pex_clk;
    333	struct clk *afi_clk;
    334	struct clk *pll_e;
    335	struct clk *cml_clk;
    336
    337	struct reset_control *pex_rst;
    338	struct reset_control *afi_rst;
    339	struct reset_control *pcie_xrst;
    340
    341	bool legacy_phy;
    342	struct phy *phy;
    343
    344	struct tegra_msi msi;
    345
    346	struct list_head ports;
    347	u32 xbar_config;
    348
    349	struct regulator_bulk_data *supplies;
    350	unsigned int num_supplies;
    351
    352	const struct tegra_pcie_soc *soc;
    353	struct dentry *debugfs;
    354};
    355
    356static inline struct tegra_pcie *msi_to_pcie(struct tegra_msi *msi)
    357{
    358	return container_of(msi, struct tegra_pcie, msi);
    359}
    360
    361struct tegra_pcie_port {
    362	struct tegra_pcie *pcie;
    363	struct device_node *np;
    364	struct list_head list;
    365	struct resource regs;
    366	void __iomem *base;
    367	unsigned int index;
    368	unsigned int lanes;
    369
    370	struct phy **phys;
    371
    372	struct gpio_desc *reset_gpio;
    373};
    374
    375static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
    376			      unsigned long offset)
    377{
    378	writel(value, pcie->afi + offset);
    379}
    380
    381static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
    382{
    383	return readl(pcie->afi + offset);
    384}
    385
    386static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
    387			       unsigned long offset)
    388{
    389	writel(value, pcie->pads + offset);
    390}
    391
    392static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
    393{
    394	return readl(pcie->pads + offset);
    395}
    396
    397/*
    398 * The configuration space mapping on Tegra is somewhat similar to the ECAM
    399 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
    400 * register accesses are mapped:
    401 *
    402 *    [27:24] extended register number
    403 *    [23:16] bus number
    404 *    [15:11] device number
    405 *    [10: 8] function number
    406 *    [ 7: 0] register number
    407 *
    408 * Mapping the whole extended configuration space would require 256 MiB of
    409 * virtual address space, only a small part of which will actually be used.
    410 *
    411 * To work around this, a 4 KiB region is used to generate the required
    412 * configuration transaction with relevant B:D:F and register offset values.
    413 * This is achieved by dynamically programming base address and size of
    414 * AFI_AXI_BAR used for end point config space mapping to make sure that the
    415 * address (access to which generates correct config transaction) falls in
    416 * this 4 KiB region.
    417 */
    418static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
    419					   unsigned int where)
    420{
    421	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
    422	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
    423}
    424
    425static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
    426					unsigned int devfn,
    427					int where)
    428{
    429	struct tegra_pcie *pcie = bus->sysdata;
    430	void __iomem *addr = NULL;
    431
    432	if (bus->number == 0) {
    433		unsigned int slot = PCI_SLOT(devfn);
    434		struct tegra_pcie_port *port;
    435
    436		list_for_each_entry(port, &pcie->ports, list) {
    437			if (port->index + 1 == slot) {
    438				addr = port->base + (where & ~3);
    439				break;
    440			}
    441		}
    442	} else {
    443		unsigned int offset;
    444		u32 base;
    445
    446		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
    447
    448		/* move 4 KiB window to offset within the FPCI region */
    449		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
    450		afi_writel(pcie, base, AFI_FPCI_BAR0);
    451
    452		/* move to correct offset within the 4 KiB page */
    453		addr = pcie->cfg + (offset & (SZ_4K - 1));
    454	}
    455
    456	return addr;
    457}
    458
    459static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
    460				  int where, int size, u32 *value)
    461{
    462	if (bus->number == 0)
    463		return pci_generic_config_read32(bus, devfn, where, size,
    464						 value);
    465
    466	return pci_generic_config_read(bus, devfn, where, size, value);
    467}
    468
    469static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
    470				   int where, int size, u32 value)
    471{
    472	if (bus->number == 0)
    473		return pci_generic_config_write32(bus, devfn, where, size,
    474						  value);
    475
    476	return pci_generic_config_write(bus, devfn, where, size, value);
    477}
    478
    479static struct pci_ops tegra_pcie_ops = {
    480	.map_bus = tegra_pcie_map_bus,
    481	.read = tegra_pcie_config_read,
    482	.write = tegra_pcie_config_write,
    483};
    484
    485static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
    486{
    487	const struct tegra_pcie_soc *soc = port->pcie->soc;
    488	unsigned long ret = 0;
    489
    490	switch (port->index) {
    491	case 0:
    492		ret = AFI_PEX0_CTRL;
    493		break;
    494
    495	case 1:
    496		ret = AFI_PEX1_CTRL;
    497		break;
    498
    499	case 2:
    500		ret = soc->afi_pex2_ctrl;
    501		break;
    502	}
    503
    504	return ret;
    505}
    506
    507static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
    508{
    509	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
    510	unsigned long value;
    511
    512	/* pulse reset signal */
    513	if (port->reset_gpio) {
    514		gpiod_set_value(port->reset_gpio, 1);
    515	} else {
    516		value = afi_readl(port->pcie, ctrl);
    517		value &= ~AFI_PEX_CTRL_RST;
    518		afi_writel(port->pcie, value, ctrl);
    519	}
    520
    521	usleep_range(1000, 2000);
    522
    523	if (port->reset_gpio) {
    524		gpiod_set_value(port->reset_gpio, 0);
    525	} else {
    526		value = afi_readl(port->pcie, ctrl);
    527		value |= AFI_PEX_CTRL_RST;
    528		afi_writel(port->pcie, value, ctrl);
    529	}
    530}
    531
    532static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
    533{
    534	const struct tegra_pcie_soc *soc = port->pcie->soc;
    535	u32 value;
    536
    537	/* Enable AER capability */
    538	value = readl(port->base + RP_VEND_CTL1);
    539	value |= RP_VEND_CTL1_ERPT;
    540	writel(value, port->base + RP_VEND_CTL1);
    541
    542	/* Optimal settings to enhance bandwidth */
    543	value = readl(port->base + RP_VEND_XP);
    544	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
    545	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
    546	writel(value, port->base + RP_VEND_XP);
    547
    548	/*
    549	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
    550	 * to avoid truncation of PM messages which results in receiver errors
    551	 */
    552	value = readl(port->base + RP_VEND_XP_BIST);
    553	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
    554	writel(value, port->base + RP_VEND_XP_BIST);
    555
    556	value = readl(port->base + RP_PRIV_MISC);
    557	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
    558	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
    559
    560	if (soc->update_clamp_threshold) {
    561		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
    562				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
    563		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
    564			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
    565	}
    566
    567	writel(value, port->base + RP_PRIV_MISC);
    568}
    569
    570static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
    571{
    572	const struct tegra_pcie_soc *soc = port->pcie->soc;
    573	u32 value;
    574
    575	value = readl(port->base + RP_ECTL_2_R1);
    576	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
    577	value |= soc->ectl.regs.rp_ectl_2_r1;
    578	writel(value, port->base + RP_ECTL_2_R1);
    579
    580	value = readl(port->base + RP_ECTL_4_R1);
    581	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
    582	value |= soc->ectl.regs.rp_ectl_4_r1 <<
    583				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
    584	writel(value, port->base + RP_ECTL_4_R1);
    585
    586	value = readl(port->base + RP_ECTL_5_R1);
    587	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
    588	value |= soc->ectl.regs.rp_ectl_5_r1;
    589	writel(value, port->base + RP_ECTL_5_R1);
    590
    591	value = readl(port->base + RP_ECTL_6_R1);
    592	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
    593	value |= soc->ectl.regs.rp_ectl_6_r1;
    594	writel(value, port->base + RP_ECTL_6_R1);
    595
    596	value = readl(port->base + RP_ECTL_2_R2);
    597	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
    598	value |= soc->ectl.regs.rp_ectl_2_r2;
    599	writel(value, port->base + RP_ECTL_2_R2);
    600
    601	value = readl(port->base + RP_ECTL_4_R2);
    602	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
    603	value |= soc->ectl.regs.rp_ectl_4_r2 <<
    604				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
    605	writel(value, port->base + RP_ECTL_4_R2);
    606
    607	value = readl(port->base + RP_ECTL_5_R2);
    608	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
    609	value |= soc->ectl.regs.rp_ectl_5_r2;
    610	writel(value, port->base + RP_ECTL_5_R2);
    611
    612	value = readl(port->base + RP_ECTL_6_R2);
    613	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
    614	value |= soc->ectl.regs.rp_ectl_6_r2;
    615	writel(value, port->base + RP_ECTL_6_R2);
    616}
    617
    618static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
    619{
    620	const struct tegra_pcie_soc *soc = port->pcie->soc;
    621	u32 value;
    622
    623	/*
    624	 * Sometimes link speed change from Gen2 to Gen1 fails due to
    625	 * instability in deskew logic on lane-0. Increase the deskew
    626	 * retry time to resolve this issue.
    627	 */
    628	if (soc->program_deskew_time) {
    629		value = readl(port->base + RP_VEND_CTL0);
    630		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
    631		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
    632		writel(value, port->base + RP_VEND_CTL0);
    633	}
    634
    635	if (soc->update_fc_timer) {
    636		value = readl(port->base + RP_VEND_XP);
    637		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
    638		value |= soc->update_fc_threshold;
    639		writel(value, port->base + RP_VEND_XP);
    640	}
    641
    642	/*
    643	 * PCIe link doesn't come up with few legacy PCIe endpoints if
    644	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
    645	 * Hence, the strategy followed here is to initially advertise
    646	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
    647	 */
    648	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
    649	value &= ~PCI_EXP_LNKSTA_CLS;
    650	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
    651	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
    652}
    653
    654static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
    655{
    656	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
    657	const struct tegra_pcie_soc *soc = port->pcie->soc;
    658	unsigned long value;
    659
    660	/* enable reference clock */
    661	value = afi_readl(port->pcie, ctrl);
    662	value |= AFI_PEX_CTRL_REFCLK_EN;
    663
    664	if (soc->has_pex_clkreq_en)
    665		value |= AFI_PEX_CTRL_CLKREQ_EN;
    666
    667	value |= AFI_PEX_CTRL_OVERRIDE_EN;
    668
    669	afi_writel(port->pcie, value, ctrl);
    670
    671	tegra_pcie_port_reset(port);
    672
    673	if (soc->force_pca_enable) {
    674		value = readl(port->base + RP_VEND_CTL2);
    675		value |= RP_VEND_CTL2_PCA_ENABLE;
    676		writel(value, port->base + RP_VEND_CTL2);
    677	}
    678
    679	tegra_pcie_enable_rp_features(port);
    680
    681	if (soc->ectl.enable)
    682		tegra_pcie_program_ectl_settings(port);
    683
    684	tegra_pcie_apply_sw_fixup(port);
    685}
    686
    687static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
    688{
    689	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
    690	const struct tegra_pcie_soc *soc = port->pcie->soc;
    691	unsigned long value;
    692
    693	/* assert port reset */
    694	value = afi_readl(port->pcie, ctrl);
    695	value &= ~AFI_PEX_CTRL_RST;
    696	afi_writel(port->pcie, value, ctrl);
    697
    698	/* disable reference clock */
    699	value = afi_readl(port->pcie, ctrl);
    700
    701	if (soc->has_pex_clkreq_en)
    702		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
    703
    704	value &= ~AFI_PEX_CTRL_REFCLK_EN;
    705	afi_writel(port->pcie, value, ctrl);
    706
    707	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
    708	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
    709	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
    710	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
    711	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
    712}
    713
    714static void tegra_pcie_port_free(struct tegra_pcie_port *port)
    715{
    716	struct tegra_pcie *pcie = port->pcie;
    717	struct device *dev = pcie->dev;
    718
    719	devm_iounmap(dev, port->base);
    720	devm_release_mem_region(dev, port->regs.start,
    721				resource_size(&port->regs));
    722	list_del(&port->list);
    723	devm_kfree(dev, port);
    724}
    725
    726/* Tegra PCIE root complex wrongly reports device class */
    727static void tegra_pcie_fixup_class(struct pci_dev *dev)
    728{
    729	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
    730}
    731DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
    732DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
    733DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
    734DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
    735
    736/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
    737static void tegra_pcie_relax_enable(struct pci_dev *dev)
    738{
    739	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
    740}
    741DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
    742DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
    743DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
    744DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
    745
    746static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
    747{
    748	struct tegra_pcie *pcie = pdev->bus->sysdata;
    749	int irq;
    750
    751	tegra_cpuidle_pcie_irqs_in_use();
    752
    753	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
    754	if (!irq)
    755		irq = pcie->irq;
    756
    757	return irq;
    758}
    759
    760static irqreturn_t tegra_pcie_isr(int irq, void *arg)
    761{
    762	static const char * const err_msg[] = {
    763		"Unknown",
    764		"AXI slave error",
    765		"AXI decode error",
    766		"Target abort",
    767		"Master abort",
    768		"Invalid write",
    769		"Legacy interrupt",
    770		"Response decoding error",
    771		"AXI response decoding error",
    772		"Transaction timeout",
    773		"Slot present pin change",
    774		"Slot clock request change",
    775		"TMS clock ramp change",
    776		"TMS ready for power down",
    777		"Peer2Peer error",
    778	};
    779	struct tegra_pcie *pcie = arg;
    780	struct device *dev = pcie->dev;
    781	u32 code, signature;
    782
    783	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
    784	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
    785	afi_writel(pcie, 0, AFI_INTR_CODE);
    786
    787	if (code == AFI_INTR_LEGACY)
    788		return IRQ_NONE;
    789
    790	if (code >= ARRAY_SIZE(err_msg))
    791		code = 0;
    792
    793	/*
    794	 * do not pollute kernel log with master abort reports since they
    795	 * happen a lot during enumeration
    796	 */
    797	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
    798		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
    799	else
    800		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
    801
    802	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
    803	    code == AFI_INTR_FPCI_DECODE_ERROR) {
    804		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
    805		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
    806
    807		if (code == AFI_INTR_MASTER_ABORT)
    808			dev_dbg(dev, "  FPCI address: %10llx\n", address);
    809		else
    810			dev_err(dev, "  FPCI address: %10llx\n", address);
    811	}
    812
    813	return IRQ_HANDLED;
    814}
    815
    816/*
    817 * FPCI map is as follows:
    818 * - 0xfdfc000000: I/O space
    819 * - 0xfdfe000000: type 0 configuration space
    820 * - 0xfdff000000: type 1 configuration space
    821 * - 0xfe00000000: type 0 extended configuration space
    822 * - 0xfe10000000: type 1 extended configuration space
    823 */
    824static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
    825{
    826	u32 size;
    827	struct resource_entry *entry;
    828	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
    829
    830	/* Bar 0: type 1 extended configuration space */
    831	size = resource_size(&pcie->cs);
    832	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
    833	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
    834
    835	resource_list_for_each_entry(entry, &bridge->windows) {
    836		u32 fpci_bar, axi_address;
    837		struct resource *res = entry->res;
    838
    839		size = resource_size(res);
    840
    841		switch (resource_type(res)) {
    842		case IORESOURCE_IO:
    843			/* Bar 1: downstream IO bar */
    844			fpci_bar = 0xfdfc0000;
    845			axi_address = pci_pio_to_address(res->start);
    846			afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
    847			afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
    848			afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
    849			break;
    850		case IORESOURCE_MEM:
    851			fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
    852			axi_address = res->start;
    853
    854			if (res->flags & IORESOURCE_PREFETCH) {
    855				/* Bar 2: prefetchable memory BAR */
    856				afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
    857				afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
    858				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
    859
    860			} else {
    861				/* Bar 3: non prefetchable memory BAR */
    862				afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
    863				afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
    864				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
    865			}
    866			break;
    867		}
    868	}
    869
    870	/* NULL out the remaining BARs as they are not used */
    871	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
    872	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
    873	afi_writel(pcie, 0, AFI_FPCI_BAR4);
    874
    875	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
    876	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
    877	afi_writel(pcie, 0, AFI_FPCI_BAR5);
    878
    879	if (pcie->soc->has_cache_bars) {
    880		/* map all upstream transactions as uncached */
    881		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
    882		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
    883		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
    884		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
    885	}
    886
    887	/* MSI translations are setup only when needed */
    888	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
    889	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
    890	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
    891	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
    892}
    893
    894static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
    895{
    896	const struct tegra_pcie_soc *soc = pcie->soc;
    897	u32 value;
    898
    899	timeout = jiffies + msecs_to_jiffies(timeout);
    900
    901	while (time_before(jiffies, timeout)) {
    902		value = pads_readl(pcie, soc->pads_pll_ctl);
    903		if (value & PADS_PLL_CTL_LOCKDET)
    904			return 0;
    905	}
    906
    907	return -ETIMEDOUT;
    908}
    909
    910static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
    911{
    912	struct device *dev = pcie->dev;
    913	const struct tegra_pcie_soc *soc = pcie->soc;
    914	u32 value;
    915	int err;
    916
    917	/* initialize internal PHY, enable up to 16 PCIE lanes */
    918	pads_writel(pcie, 0x0, PADS_CTL_SEL);
    919
    920	/* override IDDQ to 1 on all 4 lanes */
    921	value = pads_readl(pcie, PADS_CTL);
    922	value |= PADS_CTL_IDDQ_1L;
    923	pads_writel(pcie, value, PADS_CTL);
    924
    925	/*
    926	 * Set up PHY PLL inputs select PLLE output as refclock,
    927	 * set TX ref sel to div10 (not div5).
    928	 */
    929	value = pads_readl(pcie, soc->pads_pll_ctl);
    930	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
    931	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
    932	pads_writel(pcie, value, soc->pads_pll_ctl);
    933
    934	/* reset PLL */
    935	value = pads_readl(pcie, soc->pads_pll_ctl);
    936	value &= ~PADS_PLL_CTL_RST_B4SM;
    937	pads_writel(pcie, value, soc->pads_pll_ctl);
    938
    939	usleep_range(20, 100);
    940
    941	/* take PLL out of reset  */
    942	value = pads_readl(pcie, soc->pads_pll_ctl);
    943	value |= PADS_PLL_CTL_RST_B4SM;
    944	pads_writel(pcie, value, soc->pads_pll_ctl);
    945
    946	/* wait for the PLL to lock */
    947	err = tegra_pcie_pll_wait(pcie, 500);
    948	if (err < 0) {
    949		dev_err(dev, "PLL failed to lock: %d\n", err);
    950		return err;
    951	}
    952
    953	/* turn off IDDQ override */
    954	value = pads_readl(pcie, PADS_CTL);
    955	value &= ~PADS_CTL_IDDQ_1L;
    956	pads_writel(pcie, value, PADS_CTL);
    957
    958	/* enable TX/RX data */
    959	value = pads_readl(pcie, PADS_CTL);
    960	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
    961	pads_writel(pcie, value, PADS_CTL);
    962
    963	return 0;
    964}
    965
    966static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
    967{
    968	const struct tegra_pcie_soc *soc = pcie->soc;
    969	u32 value;
    970
    971	/* disable TX/RX data */
    972	value = pads_readl(pcie, PADS_CTL);
    973	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
    974	pads_writel(pcie, value, PADS_CTL);
    975
    976	/* override IDDQ */
    977	value = pads_readl(pcie, PADS_CTL);
    978	value |= PADS_CTL_IDDQ_1L;
    979	pads_writel(pcie, value, PADS_CTL);
    980
    981	/* reset PLL */
    982	value = pads_readl(pcie, soc->pads_pll_ctl);
    983	value &= ~PADS_PLL_CTL_RST_B4SM;
    984	pads_writel(pcie, value, soc->pads_pll_ctl);
    985
    986	usleep_range(20, 100);
    987
    988	return 0;
    989}
    990
    991static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
    992{
    993	struct device *dev = port->pcie->dev;
    994	unsigned int i;
    995	int err;
    996
    997	for (i = 0; i < port->lanes; i++) {
    998		err = phy_power_on(port->phys[i]);
    999		if (err < 0) {
   1000			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
   1001			return err;
   1002		}
   1003	}
   1004
   1005	return 0;
   1006}
   1007
   1008static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
   1009{
   1010	struct device *dev = port->pcie->dev;
   1011	unsigned int i;
   1012	int err;
   1013
   1014	for (i = 0; i < port->lanes; i++) {
   1015		err = phy_power_off(port->phys[i]);
   1016		if (err < 0) {
   1017			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
   1018				err);
   1019			return err;
   1020		}
   1021	}
   1022
   1023	return 0;
   1024}
   1025
   1026static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
   1027{
   1028	struct device *dev = pcie->dev;
   1029	struct tegra_pcie_port *port;
   1030	int err;
   1031
   1032	if (pcie->legacy_phy) {
   1033		if (pcie->phy)
   1034			err = phy_power_on(pcie->phy);
   1035		else
   1036			err = tegra_pcie_phy_enable(pcie);
   1037
   1038		if (err < 0)
   1039			dev_err(dev, "failed to power on PHY: %d\n", err);
   1040
   1041		return err;
   1042	}
   1043
   1044	list_for_each_entry(port, &pcie->ports, list) {
   1045		err = tegra_pcie_port_phy_power_on(port);
   1046		if (err < 0) {
   1047			dev_err(dev,
   1048				"failed to power on PCIe port %u PHY: %d\n",
   1049				port->index, err);
   1050			return err;
   1051		}
   1052	}
   1053
   1054	return 0;
   1055}
   1056
   1057static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
   1058{
   1059	struct device *dev = pcie->dev;
   1060	struct tegra_pcie_port *port;
   1061	int err;
   1062
   1063	if (pcie->legacy_phy) {
   1064		if (pcie->phy)
   1065			err = phy_power_off(pcie->phy);
   1066		else
   1067			err = tegra_pcie_phy_disable(pcie);
   1068
   1069		if (err < 0)
   1070			dev_err(dev, "failed to power off PHY: %d\n", err);
   1071
   1072		return err;
   1073	}
   1074
   1075	list_for_each_entry(port, &pcie->ports, list) {
   1076		err = tegra_pcie_port_phy_power_off(port);
   1077		if (err < 0) {
   1078			dev_err(dev,
   1079				"failed to power off PCIe port %u PHY: %d\n",
   1080				port->index, err);
   1081			return err;
   1082		}
   1083	}
   1084
   1085	return 0;
   1086}
   1087
   1088static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
   1089{
   1090	const struct tegra_pcie_soc *soc = pcie->soc;
   1091	struct tegra_pcie_port *port;
   1092	unsigned long value;
   1093
   1094	/* enable PLL power down */
   1095	if (pcie->phy) {
   1096		value = afi_readl(pcie, AFI_PLLE_CONTROL);
   1097		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
   1098		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
   1099		afi_writel(pcie, value, AFI_PLLE_CONTROL);
   1100	}
   1101
   1102	/* power down PCIe slot clock bias pad */
   1103	if (soc->has_pex_bias_ctrl)
   1104		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
   1105
   1106	/* configure mode and disable all ports */
   1107	value = afi_readl(pcie, AFI_PCIE_CONFIG);
   1108	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
   1109	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
   1110	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
   1111
   1112	list_for_each_entry(port, &pcie->ports, list) {
   1113		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
   1114		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
   1115	}
   1116
   1117	afi_writel(pcie, value, AFI_PCIE_CONFIG);
   1118
   1119	if (soc->has_gen2) {
   1120		value = afi_readl(pcie, AFI_FUSE);
   1121		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
   1122		afi_writel(pcie, value, AFI_FUSE);
   1123	} else {
   1124		value = afi_readl(pcie, AFI_FUSE);
   1125		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
   1126		afi_writel(pcie, value, AFI_FUSE);
   1127	}
   1128
   1129	/* Disable AFI dynamic clock gating and enable PCIe */
   1130	value = afi_readl(pcie, AFI_CONFIGURATION);
   1131	value |= AFI_CONFIGURATION_EN_FPCI;
   1132	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
   1133	afi_writel(pcie, value, AFI_CONFIGURATION);
   1134
   1135	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
   1136		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
   1137		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
   1138
   1139	if (soc->has_intr_prsnt_sense)
   1140		value |= AFI_INTR_EN_PRSNT_SENSE;
   1141
   1142	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
   1143	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
   1144
   1145	/* don't enable MSI for now, only when needed */
   1146	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
   1147
   1148	/* disable all exceptions */
   1149	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
   1150}
   1151
   1152static void tegra_pcie_power_off(struct tegra_pcie *pcie)
   1153{
   1154	struct device *dev = pcie->dev;
   1155	const struct tegra_pcie_soc *soc = pcie->soc;
   1156	int err;
   1157
   1158	reset_control_assert(pcie->afi_rst);
   1159
   1160	clk_disable_unprepare(pcie->pll_e);
   1161	if (soc->has_cml_clk)
   1162		clk_disable_unprepare(pcie->cml_clk);
   1163	clk_disable_unprepare(pcie->afi_clk);
   1164
   1165	if (!dev->pm_domain)
   1166		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
   1167
   1168	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
   1169	if (err < 0)
   1170		dev_warn(dev, "failed to disable regulators: %d\n", err);
   1171}
   1172
   1173static int tegra_pcie_power_on(struct tegra_pcie *pcie)
   1174{
   1175	struct device *dev = pcie->dev;
   1176	const struct tegra_pcie_soc *soc = pcie->soc;
   1177	int err;
   1178
   1179	reset_control_assert(pcie->pcie_xrst);
   1180	reset_control_assert(pcie->afi_rst);
   1181	reset_control_assert(pcie->pex_rst);
   1182
   1183	if (!dev->pm_domain)
   1184		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
   1185
   1186	/* enable regulators */
   1187	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
   1188	if (err < 0)
   1189		dev_err(dev, "failed to enable regulators: %d\n", err);
   1190
   1191	if (!dev->pm_domain) {
   1192		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
   1193		if (err) {
   1194			dev_err(dev, "failed to power ungate: %d\n", err);
   1195			goto regulator_disable;
   1196		}
   1197		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
   1198		if (err) {
   1199			dev_err(dev, "failed to remove clamp: %d\n", err);
   1200			goto powergate;
   1201		}
   1202	}
   1203
   1204	err = clk_prepare_enable(pcie->afi_clk);
   1205	if (err < 0) {
   1206		dev_err(dev, "failed to enable AFI clock: %d\n", err);
   1207		goto powergate;
   1208	}
   1209
   1210	if (soc->has_cml_clk) {
   1211		err = clk_prepare_enable(pcie->cml_clk);
   1212		if (err < 0) {
   1213			dev_err(dev, "failed to enable CML clock: %d\n", err);
   1214			goto disable_afi_clk;
   1215		}
   1216	}
   1217
   1218	err = clk_prepare_enable(pcie->pll_e);
   1219	if (err < 0) {
   1220		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
   1221		goto disable_cml_clk;
   1222	}
   1223
   1224	reset_control_deassert(pcie->afi_rst);
   1225
   1226	return 0;
   1227
   1228disable_cml_clk:
   1229	if (soc->has_cml_clk)
   1230		clk_disable_unprepare(pcie->cml_clk);
   1231disable_afi_clk:
   1232	clk_disable_unprepare(pcie->afi_clk);
   1233powergate:
   1234	if (!dev->pm_domain)
   1235		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
   1236regulator_disable:
   1237	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
   1238
   1239	return err;
   1240}
   1241
   1242static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
   1243{
   1244	const struct tegra_pcie_soc *soc = pcie->soc;
   1245
   1246	/* Configure the reference clock driver */
   1247	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
   1248
   1249	if (soc->num_ports > 2)
   1250		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
   1251}
   1252
   1253static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
   1254{
   1255	struct device *dev = pcie->dev;
   1256	const struct tegra_pcie_soc *soc = pcie->soc;
   1257
   1258	pcie->pex_clk = devm_clk_get(dev, "pex");
   1259	if (IS_ERR(pcie->pex_clk))
   1260		return PTR_ERR(pcie->pex_clk);
   1261
   1262	pcie->afi_clk = devm_clk_get(dev, "afi");
   1263	if (IS_ERR(pcie->afi_clk))
   1264		return PTR_ERR(pcie->afi_clk);
   1265
   1266	pcie->pll_e = devm_clk_get(dev, "pll_e");
   1267	if (IS_ERR(pcie->pll_e))
   1268		return PTR_ERR(pcie->pll_e);
   1269
   1270	if (soc->has_cml_clk) {
   1271		pcie->cml_clk = devm_clk_get(dev, "cml");
   1272		if (IS_ERR(pcie->cml_clk))
   1273			return PTR_ERR(pcie->cml_clk);
   1274	}
   1275
   1276	return 0;
   1277}
   1278
   1279static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
   1280{
   1281	struct device *dev = pcie->dev;
   1282
   1283	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
   1284	if (IS_ERR(pcie->pex_rst))
   1285		return PTR_ERR(pcie->pex_rst);
   1286
   1287	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
   1288	if (IS_ERR(pcie->afi_rst))
   1289		return PTR_ERR(pcie->afi_rst);
   1290
   1291	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
   1292	if (IS_ERR(pcie->pcie_xrst))
   1293		return PTR_ERR(pcie->pcie_xrst);
   1294
   1295	return 0;
   1296}
   1297
   1298static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
   1299{
   1300	struct device *dev = pcie->dev;
   1301	int err;
   1302
   1303	pcie->phy = devm_phy_optional_get(dev, "pcie");
   1304	if (IS_ERR(pcie->phy)) {
   1305		err = PTR_ERR(pcie->phy);
   1306		dev_err(dev, "failed to get PHY: %d\n", err);
   1307		return err;
   1308	}
   1309
   1310	err = phy_init(pcie->phy);
   1311	if (err < 0) {
   1312		dev_err(dev, "failed to initialize PHY: %d\n", err);
   1313		return err;
   1314	}
   1315
   1316	pcie->legacy_phy = true;
   1317
   1318	return 0;
   1319}
   1320
   1321static struct phy *devm_of_phy_optional_get_index(struct device *dev,
   1322						  struct device_node *np,
   1323						  const char *consumer,
   1324						  unsigned int index)
   1325{
   1326	struct phy *phy;
   1327	char *name;
   1328
   1329	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
   1330	if (!name)
   1331		return ERR_PTR(-ENOMEM);
   1332
   1333	phy = devm_of_phy_get(dev, np, name);
   1334	kfree(name);
   1335
   1336	if (PTR_ERR(phy) == -ENODEV)
   1337		phy = NULL;
   1338
   1339	return phy;
   1340}
   1341
   1342static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
   1343{
   1344	struct device *dev = port->pcie->dev;
   1345	struct phy *phy;
   1346	unsigned int i;
   1347	int err;
   1348
   1349	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
   1350	if (!port->phys)
   1351		return -ENOMEM;
   1352
   1353	for (i = 0; i < port->lanes; i++) {
   1354		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
   1355		if (IS_ERR(phy)) {
   1356			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
   1357				PTR_ERR(phy));
   1358			return PTR_ERR(phy);
   1359		}
   1360
   1361		err = phy_init(phy);
   1362		if (err < 0) {
   1363			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
   1364				err);
   1365			return err;
   1366		}
   1367
   1368		port->phys[i] = phy;
   1369	}
   1370
   1371	return 0;
   1372}
   1373
   1374static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
   1375{
   1376	const struct tegra_pcie_soc *soc = pcie->soc;
   1377	struct device_node *np = pcie->dev->of_node;
   1378	struct tegra_pcie_port *port;
   1379	int err;
   1380
   1381	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
   1382		return tegra_pcie_phys_get_legacy(pcie);
   1383
   1384	list_for_each_entry(port, &pcie->ports, list) {
   1385		err = tegra_pcie_port_get_phys(port);
   1386		if (err < 0)
   1387			return err;
   1388	}
   1389
   1390	return 0;
   1391}
   1392
   1393static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
   1394{
   1395	struct tegra_pcie_port *port;
   1396	struct device *dev = pcie->dev;
   1397	int err, i;
   1398
   1399	if (pcie->legacy_phy) {
   1400		err = phy_exit(pcie->phy);
   1401		if (err < 0)
   1402			dev_err(dev, "failed to teardown PHY: %d\n", err);
   1403		return;
   1404	}
   1405
   1406	list_for_each_entry(port, &pcie->ports, list) {
   1407		for (i = 0; i < port->lanes; i++) {
   1408			err = phy_exit(port->phys[i]);
   1409			if (err < 0)
   1410				dev_err(dev, "failed to teardown PHY#%u: %d\n",
   1411					i, err);
   1412		}
   1413	}
   1414}
   1415
   1416static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
   1417{
   1418	struct device *dev = pcie->dev;
   1419	struct platform_device *pdev = to_platform_device(dev);
   1420	struct resource *res;
   1421	const struct tegra_pcie_soc *soc = pcie->soc;
   1422	int err;
   1423
   1424	err = tegra_pcie_clocks_get(pcie);
   1425	if (err) {
   1426		dev_err(dev, "failed to get clocks: %d\n", err);
   1427		return err;
   1428	}
   1429
   1430	err = tegra_pcie_resets_get(pcie);
   1431	if (err) {
   1432		dev_err(dev, "failed to get resets: %d\n", err);
   1433		return err;
   1434	}
   1435
   1436	if (soc->program_uphy) {
   1437		err = tegra_pcie_phys_get(pcie);
   1438		if (err < 0) {
   1439			dev_err(dev, "failed to get PHYs: %d\n", err);
   1440			return err;
   1441		}
   1442	}
   1443
   1444	pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
   1445	if (IS_ERR(pcie->pads)) {
   1446		err = PTR_ERR(pcie->pads);
   1447		goto phys_put;
   1448	}
   1449
   1450	pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
   1451	if (IS_ERR(pcie->afi)) {
   1452		err = PTR_ERR(pcie->afi);
   1453		goto phys_put;
   1454	}
   1455
   1456	/* request configuration space, but remap later, on demand */
   1457	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
   1458	if (!res) {
   1459		err = -EADDRNOTAVAIL;
   1460		goto phys_put;
   1461	}
   1462
   1463	pcie->cs = *res;
   1464
   1465	/* constrain configuration space to 4 KiB */
   1466	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
   1467
   1468	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
   1469	if (IS_ERR(pcie->cfg)) {
   1470		err = PTR_ERR(pcie->cfg);
   1471		goto phys_put;
   1472	}
   1473
   1474	/* request interrupt */
   1475	err = platform_get_irq_byname(pdev, "intr");
   1476	if (err < 0)
   1477		goto phys_put;
   1478
   1479	pcie->irq = err;
   1480
   1481	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
   1482	if (err) {
   1483		dev_err(dev, "failed to register IRQ: %d\n", err);
   1484		goto phys_put;
   1485	}
   1486
   1487	return 0;
   1488
   1489phys_put:
   1490	if (soc->program_uphy)
   1491		tegra_pcie_phys_put(pcie);
   1492
   1493	return err;
   1494}
   1495
   1496static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
   1497{
   1498	const struct tegra_pcie_soc *soc = pcie->soc;
   1499
   1500	if (pcie->irq > 0)
   1501		free_irq(pcie->irq, pcie);
   1502
   1503	if (soc->program_uphy)
   1504		tegra_pcie_phys_put(pcie);
   1505
   1506	return 0;
   1507}
   1508
   1509static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
   1510{
   1511	struct tegra_pcie *pcie = port->pcie;
   1512	const struct tegra_pcie_soc *soc = pcie->soc;
   1513	int err;
   1514	u32 val;
   1515	u8 ack_bit;
   1516
   1517	val = afi_readl(pcie, AFI_PCIE_PME);
   1518	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
   1519	afi_writel(pcie, val, AFI_PCIE_PME);
   1520
   1521	ack_bit = soc->ports[port->index].pme.ack_bit;
   1522	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
   1523				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
   1524	if (err)
   1525		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
   1526			port->index);
   1527
   1528	usleep_range(10000, 11000);
   1529
   1530	val = afi_readl(pcie, AFI_PCIE_PME);
   1531	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
   1532	afi_writel(pcie, val, AFI_PCIE_PME);
   1533}
   1534
   1535static void tegra_pcie_msi_irq(struct irq_desc *desc)
   1536{
   1537	struct tegra_pcie *pcie = irq_desc_get_handler_data(desc);
   1538	struct irq_chip *chip = irq_desc_get_chip(desc);
   1539	struct tegra_msi *msi = &pcie->msi;
   1540	struct device *dev = pcie->dev;
   1541	unsigned int i;
   1542
   1543	chained_irq_enter(chip, desc);
   1544
   1545	for (i = 0; i < 8; i++) {
   1546		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC(i));
   1547
   1548		while (reg) {
   1549			unsigned int offset = find_first_bit(&reg, 32);
   1550			unsigned int index = i * 32 + offset;
   1551			int ret;
   1552
   1553			ret = generic_handle_domain_irq(msi->domain->parent, index);
   1554			if (ret) {
   1555				/*
   1556				 * that's weird who triggered this?
   1557				 * just clear it
   1558				 */
   1559				dev_info(dev, "unexpected MSI\n");
   1560				afi_writel(pcie, BIT(index % 32), AFI_MSI_VEC(index));
   1561			}
   1562
   1563			/* see if there's any more pending in this vector */
   1564			reg = afi_readl(pcie, AFI_MSI_VEC(i));
   1565		}
   1566	}
   1567
   1568	chained_irq_exit(chip, desc);
   1569}
   1570
   1571static void tegra_msi_top_irq_ack(struct irq_data *d)
   1572{
   1573	irq_chip_ack_parent(d);
   1574}
   1575
   1576static void tegra_msi_top_irq_mask(struct irq_data *d)
   1577{
   1578	pci_msi_mask_irq(d);
   1579	irq_chip_mask_parent(d);
   1580}
   1581
   1582static void tegra_msi_top_irq_unmask(struct irq_data *d)
   1583{
   1584	pci_msi_unmask_irq(d);
   1585	irq_chip_unmask_parent(d);
   1586}
   1587
   1588static struct irq_chip tegra_msi_top_chip = {
   1589	.name		= "Tegra PCIe MSI",
   1590	.irq_ack	= tegra_msi_top_irq_ack,
   1591	.irq_mask	= tegra_msi_top_irq_mask,
   1592	.irq_unmask	= tegra_msi_top_irq_unmask,
   1593};
   1594
   1595static void tegra_msi_irq_ack(struct irq_data *d)
   1596{
   1597	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
   1598	struct tegra_pcie *pcie = msi_to_pcie(msi);
   1599	unsigned int index = d->hwirq / 32;
   1600
   1601	/* clear the interrupt */
   1602	afi_writel(pcie, BIT(d->hwirq % 32), AFI_MSI_VEC(index));
   1603}
   1604
   1605static void tegra_msi_irq_mask(struct irq_data *d)
   1606{
   1607	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
   1608	struct tegra_pcie *pcie = msi_to_pcie(msi);
   1609	unsigned int index = d->hwirq / 32;
   1610	unsigned long flags;
   1611	u32 value;
   1612
   1613	spin_lock_irqsave(&msi->mask_lock, flags);
   1614	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
   1615	value &= ~BIT(d->hwirq % 32);
   1616	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
   1617	spin_unlock_irqrestore(&msi->mask_lock, flags);
   1618}
   1619
   1620static void tegra_msi_irq_unmask(struct irq_data *d)
   1621{
   1622	struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
   1623	struct tegra_pcie *pcie = msi_to_pcie(msi);
   1624	unsigned int index = d->hwirq / 32;
   1625	unsigned long flags;
   1626	u32 value;
   1627
   1628	spin_lock_irqsave(&msi->mask_lock, flags);
   1629	value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
   1630	value |= BIT(d->hwirq % 32);
   1631	afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
   1632	spin_unlock_irqrestore(&msi->mask_lock, flags);
   1633}
   1634
   1635static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
   1636{
   1637	return -EINVAL;
   1638}
   1639
   1640static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
   1641{
   1642	struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
   1643
   1644	msg->address_lo = lower_32_bits(msi->phys);
   1645	msg->address_hi = upper_32_bits(msi->phys);
   1646	msg->data = data->hwirq;
   1647}
   1648
   1649static struct irq_chip tegra_msi_bottom_chip = {
   1650	.name			= "Tegra MSI",
   1651	.irq_ack		= tegra_msi_irq_ack,
   1652	.irq_mask		= tegra_msi_irq_mask,
   1653	.irq_unmask		= tegra_msi_irq_unmask,
   1654	.irq_set_affinity 	= tegra_msi_set_affinity,
   1655	.irq_compose_msi_msg	= tegra_compose_msi_msg,
   1656};
   1657
   1658static int tegra_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
   1659				  unsigned int nr_irqs, void *args)
   1660{
   1661	struct tegra_msi *msi = domain->host_data;
   1662	unsigned int i;
   1663	int hwirq;
   1664
   1665	mutex_lock(&msi->map_lock);
   1666
   1667	hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
   1668
   1669	mutex_unlock(&msi->map_lock);
   1670
   1671	if (hwirq < 0)
   1672		return -ENOSPC;
   1673
   1674	for (i = 0; i < nr_irqs; i++)
   1675		irq_domain_set_info(domain, virq + i, hwirq + i,
   1676				    &tegra_msi_bottom_chip, domain->host_data,
   1677				    handle_edge_irq, NULL, NULL);
   1678
   1679	tegra_cpuidle_pcie_irqs_in_use();
   1680
   1681	return 0;
   1682}
   1683
   1684static void tegra_msi_domain_free(struct irq_domain *domain, unsigned int virq,
   1685				  unsigned int nr_irqs)
   1686{
   1687	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
   1688	struct tegra_msi *msi = domain->host_data;
   1689
   1690	mutex_lock(&msi->map_lock);
   1691
   1692	bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
   1693
   1694	mutex_unlock(&msi->map_lock);
   1695}
   1696
   1697static const struct irq_domain_ops tegra_msi_domain_ops = {
   1698	.alloc = tegra_msi_domain_alloc,
   1699	.free = tegra_msi_domain_free,
   1700};
   1701
   1702static struct msi_domain_info tegra_msi_info = {
   1703	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
   1704		   MSI_FLAG_PCI_MSIX),
   1705	.chip	= &tegra_msi_top_chip,
   1706};
   1707
   1708static int tegra_allocate_domains(struct tegra_msi *msi)
   1709{
   1710	struct tegra_pcie *pcie = msi_to_pcie(msi);
   1711	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
   1712	struct irq_domain *parent;
   1713
   1714	parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
   1715					  &tegra_msi_domain_ops, msi);
   1716	if (!parent) {
   1717		dev_err(pcie->dev, "failed to create IRQ domain\n");
   1718		return -ENOMEM;
   1719	}
   1720	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
   1721
   1722	msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
   1723	if (!msi->domain) {
   1724		dev_err(pcie->dev, "failed to create MSI domain\n");
   1725		irq_domain_remove(parent);
   1726		return -ENOMEM;
   1727	}
   1728
   1729	return 0;
   1730}
   1731
   1732static void tegra_free_domains(struct tegra_msi *msi)
   1733{
   1734	struct irq_domain *parent = msi->domain->parent;
   1735
   1736	irq_domain_remove(msi->domain);
   1737	irq_domain_remove(parent);
   1738}
   1739
   1740static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
   1741{
   1742	struct platform_device *pdev = to_platform_device(pcie->dev);
   1743	struct tegra_msi *msi = &pcie->msi;
   1744	struct device *dev = pcie->dev;
   1745	int err;
   1746
   1747	mutex_init(&msi->map_lock);
   1748	spin_lock_init(&msi->mask_lock);
   1749
   1750	if (IS_ENABLED(CONFIG_PCI_MSI)) {
   1751		err = tegra_allocate_domains(msi);
   1752		if (err)
   1753			return err;
   1754	}
   1755
   1756	err = platform_get_irq_byname(pdev, "msi");
   1757	if (err < 0)
   1758		goto free_irq_domain;
   1759
   1760	msi->irq = err;
   1761
   1762	irq_set_chained_handler_and_data(msi->irq, tegra_pcie_msi_irq, pcie);
   1763
   1764	/* Though the PCIe controller can address >32-bit address space, to
   1765	 * facilitate endpoints that support only 32-bit MSI target address,
   1766	 * the mask is set to 32-bit to make sure that MSI target address is
   1767	 * always a 32-bit address
   1768	 */
   1769	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
   1770	if (err < 0) {
   1771		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
   1772		goto free_irq;
   1773	}
   1774
   1775	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
   1776				    DMA_ATTR_NO_KERNEL_MAPPING);
   1777	if (!msi->virt) {
   1778		dev_err(dev, "failed to allocate DMA memory for MSI\n");
   1779		err = -ENOMEM;
   1780		goto free_irq;
   1781	}
   1782
   1783	return 0;
   1784
   1785free_irq:
   1786	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
   1787free_irq_domain:
   1788	if (IS_ENABLED(CONFIG_PCI_MSI))
   1789		tegra_free_domains(msi);
   1790
   1791	return err;
   1792}
   1793
   1794static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
   1795{
   1796	const struct tegra_pcie_soc *soc = pcie->soc;
   1797	struct tegra_msi *msi = &pcie->msi;
   1798	u32 reg, msi_state[INT_PCI_MSI_NR / 32];
   1799	int i;
   1800
   1801	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
   1802	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
   1803	/* this register is in 4K increments */
   1804	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
   1805
   1806	/* Restore the MSI allocation state */
   1807	bitmap_to_arr32(msi_state, msi->used, INT_PCI_MSI_NR);
   1808	for (i = 0; i < ARRAY_SIZE(msi_state); i++)
   1809		afi_writel(pcie, msi_state[i], AFI_MSI_EN_VEC(i));
   1810
   1811	/* and unmask the MSI interrupt */
   1812	reg = afi_readl(pcie, AFI_INTR_MASK);
   1813	reg |= AFI_INTR_MASK_MSI_MASK;
   1814	afi_writel(pcie, reg, AFI_INTR_MASK);
   1815}
   1816
   1817static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
   1818{
   1819	struct tegra_msi *msi = &pcie->msi;
   1820	unsigned int i, irq;
   1821
   1822	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
   1823		       DMA_ATTR_NO_KERNEL_MAPPING);
   1824
   1825	for (i = 0; i < INT_PCI_MSI_NR; i++) {
   1826		irq = irq_find_mapping(msi->domain, i);
   1827		if (irq > 0)
   1828			irq_domain_free_irqs(irq, 1);
   1829	}
   1830
   1831	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
   1832
   1833	if (IS_ENABLED(CONFIG_PCI_MSI))
   1834		tegra_free_domains(msi);
   1835}
   1836
   1837static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
   1838{
   1839	u32 value;
   1840
   1841	/* mask the MSI interrupt */
   1842	value = afi_readl(pcie, AFI_INTR_MASK);
   1843	value &= ~AFI_INTR_MASK_MSI_MASK;
   1844	afi_writel(pcie, value, AFI_INTR_MASK);
   1845
   1846	return 0;
   1847}
   1848
   1849static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
   1850{
   1851	u32 value;
   1852
   1853	value = afi_readl(pcie, AFI_INTR_MASK);
   1854	value &= ~AFI_INTR_MASK_INT_MASK;
   1855	afi_writel(pcie, value, AFI_INTR_MASK);
   1856}
   1857
   1858static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
   1859				      u32 *xbar)
   1860{
   1861	struct device *dev = pcie->dev;
   1862	struct device_node *np = dev->of_node;
   1863
   1864	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
   1865		switch (lanes) {
   1866		case 0x010004:
   1867			dev_info(dev, "4x1, 1x1 configuration\n");
   1868			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
   1869			return 0;
   1870
   1871		case 0x010102:
   1872			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
   1873			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
   1874			return 0;
   1875
   1876		case 0x010101:
   1877			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
   1878			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
   1879			return 0;
   1880
   1881		default:
   1882			dev_info(dev, "wrong configuration updated in DT, "
   1883				 "switching to default 2x1, 1x1, 1x1 "
   1884				 "configuration\n");
   1885			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
   1886			return 0;
   1887		}
   1888	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
   1889		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
   1890		switch (lanes) {
   1891		case 0x0000104:
   1892			dev_info(dev, "4x1, 1x1 configuration\n");
   1893			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
   1894			return 0;
   1895
   1896		case 0x0000102:
   1897			dev_info(dev, "2x1, 1x1 configuration\n");
   1898			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
   1899			return 0;
   1900		}
   1901	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
   1902		switch (lanes) {
   1903		case 0x00000204:
   1904			dev_info(dev, "4x1, 2x1 configuration\n");
   1905			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
   1906			return 0;
   1907
   1908		case 0x00020202:
   1909			dev_info(dev, "2x3 configuration\n");
   1910			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
   1911			return 0;
   1912
   1913		case 0x00010104:
   1914			dev_info(dev, "4x1, 1x2 configuration\n");
   1915			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
   1916			return 0;
   1917		}
   1918	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
   1919		switch (lanes) {
   1920		case 0x00000004:
   1921			dev_info(dev, "single-mode configuration\n");
   1922			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
   1923			return 0;
   1924
   1925		case 0x00000202:
   1926			dev_info(dev, "dual-mode configuration\n");
   1927			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
   1928			return 0;
   1929		}
   1930	}
   1931
   1932	return -EINVAL;
   1933}
   1934
   1935/*
   1936 * Check whether a given set of supplies is available in a device tree node.
   1937 * This is used to check whether the new or the legacy device tree bindings
   1938 * should be used.
   1939 */
   1940static bool of_regulator_bulk_available(struct device_node *np,
   1941					struct regulator_bulk_data *supplies,
   1942					unsigned int num_supplies)
   1943{
   1944	char property[32];
   1945	unsigned int i;
   1946
   1947	for (i = 0; i < num_supplies; i++) {
   1948		snprintf(property, 32, "%s-supply", supplies[i].supply);
   1949
   1950		if (of_find_property(np, property, NULL) == NULL)
   1951			return false;
   1952	}
   1953
   1954	return true;
   1955}
   1956
   1957/*
   1958 * Old versions of the device tree binding for this device used a set of power
   1959 * supplies that didn't match the hardware inputs. This happened to work for a
   1960 * number of cases but is not future proof. However to preserve backwards-
   1961 * compatibility with old device trees, this function will try to use the old
   1962 * set of supplies.
   1963 */
   1964static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
   1965{
   1966	struct device *dev = pcie->dev;
   1967	struct device_node *np = dev->of_node;
   1968
   1969	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
   1970		pcie->num_supplies = 3;
   1971	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
   1972		pcie->num_supplies = 2;
   1973
   1974	if (pcie->num_supplies == 0) {
   1975		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
   1976		return -ENODEV;
   1977	}
   1978
   1979	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
   1980				      sizeof(*pcie->supplies),
   1981				      GFP_KERNEL);
   1982	if (!pcie->supplies)
   1983		return -ENOMEM;
   1984
   1985	pcie->supplies[0].supply = "pex-clk";
   1986	pcie->supplies[1].supply = "vdd";
   1987
   1988	if (pcie->num_supplies > 2)
   1989		pcie->supplies[2].supply = "avdd";
   1990
   1991	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
   1992}
   1993
   1994/*
   1995 * Obtains the list of regulators required for a particular generation of the
   1996 * IP block.
   1997 *
   1998 * This would've been nice to do simply by providing static tables for use
   1999 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
   2000 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
   2001 * and either seems to be optional depending on which ports are being used.
   2002 */
   2003static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
   2004{
   2005	struct device *dev = pcie->dev;
   2006	struct device_node *np = dev->of_node;
   2007	unsigned int i = 0;
   2008
   2009	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
   2010		pcie->num_supplies = 4;
   2011
   2012		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
   2013					      sizeof(*pcie->supplies),
   2014					      GFP_KERNEL);
   2015		if (!pcie->supplies)
   2016			return -ENOMEM;
   2017
   2018		pcie->supplies[i++].supply = "dvdd-pex";
   2019		pcie->supplies[i++].supply = "hvdd-pex-pll";
   2020		pcie->supplies[i++].supply = "hvdd-pex";
   2021		pcie->supplies[i++].supply = "vddio-pexctl-aud";
   2022	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
   2023		pcie->num_supplies = 3;
   2024
   2025		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
   2026					      sizeof(*pcie->supplies),
   2027					      GFP_KERNEL);
   2028		if (!pcie->supplies)
   2029			return -ENOMEM;
   2030
   2031		pcie->supplies[i++].supply = "hvddio-pex";
   2032		pcie->supplies[i++].supply = "dvddio-pex";
   2033		pcie->supplies[i++].supply = "vddio-pex-ctl";
   2034	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
   2035		pcie->num_supplies = 4;
   2036
   2037		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
   2038					      sizeof(*pcie->supplies),
   2039					      GFP_KERNEL);
   2040		if (!pcie->supplies)
   2041			return -ENOMEM;
   2042
   2043		pcie->supplies[i++].supply = "avddio-pex";
   2044		pcie->supplies[i++].supply = "dvddio-pex";
   2045		pcie->supplies[i++].supply = "hvdd-pex";
   2046		pcie->supplies[i++].supply = "vddio-pex-ctl";
   2047	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
   2048		bool need_pexa = false, need_pexb = false;
   2049
   2050		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
   2051		if (lane_mask & 0x0f)
   2052			need_pexa = true;
   2053
   2054		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
   2055		if (lane_mask & 0x30)
   2056			need_pexb = true;
   2057
   2058		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
   2059					 (need_pexb ? 2 : 0);
   2060
   2061		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
   2062					      sizeof(*pcie->supplies),
   2063					      GFP_KERNEL);
   2064		if (!pcie->supplies)
   2065			return -ENOMEM;
   2066
   2067		pcie->supplies[i++].supply = "avdd-pex-pll";
   2068		pcie->supplies[i++].supply = "hvdd-pex";
   2069		pcie->supplies[i++].supply = "vddio-pex-ctl";
   2070		pcie->supplies[i++].supply = "avdd-plle";
   2071
   2072		if (need_pexa) {
   2073			pcie->supplies[i++].supply = "avdd-pexa";
   2074			pcie->supplies[i++].supply = "vdd-pexa";
   2075		}
   2076
   2077		if (need_pexb) {
   2078			pcie->supplies[i++].supply = "avdd-pexb";
   2079			pcie->supplies[i++].supply = "vdd-pexb";
   2080		}
   2081	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
   2082		pcie->num_supplies = 5;
   2083
   2084		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
   2085					      sizeof(*pcie->supplies),
   2086					      GFP_KERNEL);
   2087		if (!pcie->supplies)
   2088			return -ENOMEM;
   2089
   2090		pcie->supplies[0].supply = "avdd-pex";
   2091		pcie->supplies[1].supply = "vdd-pex";
   2092		pcie->supplies[2].supply = "avdd-pex-pll";
   2093		pcie->supplies[3].supply = "avdd-plle";
   2094		pcie->supplies[4].supply = "vddio-pex-clk";
   2095	}
   2096
   2097	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
   2098					pcie->num_supplies))
   2099		return devm_regulator_bulk_get(dev, pcie->num_supplies,
   2100					       pcie->supplies);
   2101
   2102	/*
   2103	 * If not all regulators are available for this new scheme, assume
   2104	 * that the device tree complies with an older version of the device
   2105	 * tree binding.
   2106	 */
   2107	dev_info(dev, "using legacy DT binding for power supplies\n");
   2108
   2109	devm_kfree(dev, pcie->supplies);
   2110	pcie->num_supplies = 0;
   2111
   2112	return tegra_pcie_get_legacy_regulators(pcie);
   2113}
   2114
   2115static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
   2116{
   2117	struct device *dev = pcie->dev;
   2118	struct device_node *np = dev->of_node, *port;
   2119	const struct tegra_pcie_soc *soc = pcie->soc;
   2120	u32 lanes = 0, mask = 0;
   2121	unsigned int lane = 0;
   2122	int err;
   2123
   2124	/* parse root ports */
   2125	for_each_child_of_node(np, port) {
   2126		struct tegra_pcie_port *rp;
   2127		unsigned int index;
   2128		u32 value;
   2129		char *label;
   2130
   2131		err = of_pci_get_devfn(port);
   2132		if (err < 0) {
   2133			dev_err(dev, "failed to parse address: %d\n", err);
   2134			goto err_node_put;
   2135		}
   2136
   2137		index = PCI_SLOT(err);
   2138
   2139		if (index < 1 || index > soc->num_ports) {
   2140			dev_err(dev, "invalid port number: %d\n", index);
   2141			err = -EINVAL;
   2142			goto err_node_put;
   2143		}
   2144
   2145		index--;
   2146
   2147		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
   2148		if (err < 0) {
   2149			dev_err(dev, "failed to parse # of lanes: %d\n",
   2150				err);
   2151			goto err_node_put;
   2152		}
   2153
   2154		if (value > 16) {
   2155			dev_err(dev, "invalid # of lanes: %u\n", value);
   2156			err = -EINVAL;
   2157			goto err_node_put;
   2158		}
   2159
   2160		lanes |= value << (index << 3);
   2161
   2162		if (!of_device_is_available(port)) {
   2163			lane += value;
   2164			continue;
   2165		}
   2166
   2167		mask |= ((1 << value) - 1) << lane;
   2168		lane += value;
   2169
   2170		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
   2171		if (!rp) {
   2172			err = -ENOMEM;
   2173			goto err_node_put;
   2174		}
   2175
   2176		err = of_address_to_resource(port, 0, &rp->regs);
   2177		if (err < 0) {
   2178			dev_err(dev, "failed to parse address: %d\n", err);
   2179			goto err_node_put;
   2180		}
   2181
   2182		INIT_LIST_HEAD(&rp->list);
   2183		rp->index = index;
   2184		rp->lanes = value;
   2185		rp->pcie = pcie;
   2186		rp->np = port;
   2187
   2188		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
   2189		if (IS_ERR(rp->base)) {
   2190			err = PTR_ERR(rp->base);
   2191			goto err_node_put;
   2192		}
   2193
   2194		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
   2195		if (!label) {
   2196			err = -ENOMEM;
   2197			goto err_node_put;
   2198		}
   2199
   2200		/*
   2201		 * Returns -ENOENT if reset-gpios property is not populated
   2202		 * and in this case fall back to using AFI per port register
   2203		 * to toggle PERST# SFIO line.
   2204		 */
   2205		rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
   2206							     "reset-gpios", 0,
   2207							     GPIOD_OUT_LOW,
   2208							     label);
   2209		if (IS_ERR(rp->reset_gpio)) {
   2210			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
   2211				rp->reset_gpio = NULL;
   2212			} else {
   2213				dev_err(dev, "failed to get reset GPIO: %ld\n",
   2214					PTR_ERR(rp->reset_gpio));
   2215				err = PTR_ERR(rp->reset_gpio);
   2216				goto err_node_put;
   2217			}
   2218		}
   2219
   2220		list_add_tail(&rp->list, &pcie->ports);
   2221	}
   2222
   2223	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
   2224	if (err < 0) {
   2225		dev_err(dev, "invalid lane configuration\n");
   2226		return err;
   2227	}
   2228
   2229	err = tegra_pcie_get_regulators(pcie, mask);
   2230	if (err < 0)
   2231		return err;
   2232
   2233	return 0;
   2234
   2235err_node_put:
   2236	of_node_put(port);
   2237	return err;
   2238}
   2239
   2240/*
   2241 * FIXME: If there are no PCIe cards attached, then calling this function
   2242 * can result in the increase of the bootup time as there are big timeout
   2243 * loops.
   2244 */
   2245#define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
   2246static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
   2247{
   2248	struct device *dev = port->pcie->dev;
   2249	unsigned int retries = 3;
   2250	unsigned long value;
   2251
   2252	/* override presence detection */
   2253	value = readl(port->base + RP_PRIV_MISC);
   2254	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
   2255	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
   2256	writel(value, port->base + RP_PRIV_MISC);
   2257
   2258	do {
   2259		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
   2260
   2261		do {
   2262			value = readl(port->base + RP_VEND_XP);
   2263
   2264			if (value & RP_VEND_XP_DL_UP)
   2265				break;
   2266
   2267			usleep_range(1000, 2000);
   2268		} while (--timeout);
   2269
   2270		if (!timeout) {
   2271			dev_dbg(dev, "link %u down, retrying\n", port->index);
   2272			goto retry;
   2273		}
   2274
   2275		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
   2276
   2277		do {
   2278			value = readl(port->base + RP_LINK_CONTROL_STATUS);
   2279
   2280			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
   2281				return true;
   2282
   2283			usleep_range(1000, 2000);
   2284		} while (--timeout);
   2285
   2286retry:
   2287		tegra_pcie_port_reset(port);
   2288	} while (--retries);
   2289
   2290	return false;
   2291}
   2292
   2293static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
   2294{
   2295	struct device *dev = pcie->dev;
   2296	struct tegra_pcie_port *port;
   2297	ktime_t deadline;
   2298	u32 value;
   2299
   2300	list_for_each_entry(port, &pcie->ports, list) {
   2301		/*
   2302		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
   2303		 * is not supported by Tegra. tegra_pcie_change_link_speed()
   2304		 * is called only for Tegra chips which support Gen2.
   2305		 * So there no harm if supported link speed is not verified.
   2306		 */
   2307		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
   2308		value &= ~PCI_EXP_LNKSTA_CLS;
   2309		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
   2310		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
   2311
   2312		/*
   2313		 * Poll until link comes back from recovery to avoid race
   2314		 * condition.
   2315		 */
   2316		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
   2317
   2318		while (ktime_before(ktime_get(), deadline)) {
   2319			value = readl(port->base + RP_LINK_CONTROL_STATUS);
   2320			if ((value & PCI_EXP_LNKSTA_LT) == 0)
   2321				break;
   2322
   2323			usleep_range(2000, 3000);
   2324		}
   2325
   2326		if (value & PCI_EXP_LNKSTA_LT)
   2327			dev_warn(dev, "PCIe port %u link is in recovery\n",
   2328				 port->index);
   2329
   2330		/* Retrain the link */
   2331		value = readl(port->base + RP_LINK_CONTROL_STATUS);
   2332		value |= PCI_EXP_LNKCTL_RL;
   2333		writel(value, port->base + RP_LINK_CONTROL_STATUS);
   2334
   2335		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
   2336
   2337		while (ktime_before(ktime_get(), deadline)) {
   2338			value = readl(port->base + RP_LINK_CONTROL_STATUS);
   2339			if ((value & PCI_EXP_LNKSTA_LT) == 0)
   2340				break;
   2341
   2342			usleep_range(2000, 3000);
   2343		}
   2344
   2345		if (value & PCI_EXP_LNKSTA_LT)
   2346			dev_err(dev, "failed to retrain link of port %u\n",
   2347				port->index);
   2348	}
   2349}
   2350
   2351static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
   2352{
   2353	struct device *dev = pcie->dev;
   2354	struct tegra_pcie_port *port, *tmp;
   2355
   2356	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
   2357		dev_info(dev, "probing port %u, using %u lanes\n",
   2358			 port->index, port->lanes);
   2359
   2360		tegra_pcie_port_enable(port);
   2361	}
   2362
   2363	/* Start LTSSM from Tegra side */
   2364	reset_control_deassert(pcie->pcie_xrst);
   2365
   2366	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
   2367		if (tegra_pcie_port_check_link(port))
   2368			continue;
   2369
   2370		dev_info(dev, "link %u down, ignoring\n", port->index);
   2371
   2372		tegra_pcie_port_disable(port);
   2373		tegra_pcie_port_free(port);
   2374	}
   2375
   2376	if (pcie->soc->has_gen2)
   2377		tegra_pcie_change_link_speed(pcie);
   2378}
   2379
   2380static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
   2381{
   2382	struct tegra_pcie_port *port, *tmp;
   2383
   2384	reset_control_assert(pcie->pcie_xrst);
   2385
   2386	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
   2387		tegra_pcie_port_disable(port);
   2388}
   2389
   2390static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
   2391	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
   2392	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
   2393};
   2394
   2395static const struct tegra_pcie_soc tegra20_pcie = {
   2396	.num_ports = 2,
   2397	.ports = tegra20_pcie_ports,
   2398	.msi_base_shift = 0,
   2399	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
   2400	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
   2401	.pads_refclk_cfg0 = 0xfa5cfa5c,
   2402	.has_pex_clkreq_en = false,
   2403	.has_pex_bias_ctrl = false,
   2404	.has_intr_prsnt_sense = false,
   2405	.has_cml_clk = false,
   2406	.has_gen2 = false,
   2407	.force_pca_enable = false,
   2408	.program_uphy = true,
   2409	.update_clamp_threshold = false,
   2410	.program_deskew_time = false,
   2411	.update_fc_timer = false,
   2412	.has_cache_bars = true,
   2413	.ectl.enable = false,
   2414};
   2415
   2416static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
   2417	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
   2418	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
   2419	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
   2420};
   2421
   2422static const struct tegra_pcie_soc tegra30_pcie = {
   2423	.num_ports = 3,
   2424	.ports = tegra30_pcie_ports,
   2425	.msi_base_shift = 8,
   2426	.afi_pex2_ctrl = 0x128,
   2427	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
   2428	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
   2429	.pads_refclk_cfg0 = 0xfa5cfa5c,
   2430	.pads_refclk_cfg1 = 0xfa5cfa5c,
   2431	.has_pex_clkreq_en = true,
   2432	.has_pex_bias_ctrl = true,
   2433	.has_intr_prsnt_sense = true,
   2434	.has_cml_clk = true,
   2435	.has_gen2 = false,
   2436	.force_pca_enable = false,
   2437	.program_uphy = true,
   2438	.update_clamp_threshold = false,
   2439	.program_deskew_time = false,
   2440	.update_fc_timer = false,
   2441	.has_cache_bars = false,
   2442	.ectl.enable = false,
   2443};
   2444
   2445static const struct tegra_pcie_soc tegra124_pcie = {
   2446	.num_ports = 2,
   2447	.ports = tegra20_pcie_ports,
   2448	.msi_base_shift = 8,
   2449	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
   2450	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
   2451	.pads_refclk_cfg0 = 0x44ac44ac,
   2452	.has_pex_clkreq_en = true,
   2453	.has_pex_bias_ctrl = true,
   2454	.has_intr_prsnt_sense = true,
   2455	.has_cml_clk = true,
   2456	.has_gen2 = true,
   2457	.force_pca_enable = false,
   2458	.program_uphy = true,
   2459	.update_clamp_threshold = true,
   2460	.program_deskew_time = false,
   2461	.update_fc_timer = false,
   2462	.has_cache_bars = false,
   2463	.ectl.enable = false,
   2464};
   2465
   2466static const struct tegra_pcie_soc tegra210_pcie = {
   2467	.num_ports = 2,
   2468	.ports = tegra20_pcie_ports,
   2469	.msi_base_shift = 8,
   2470	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
   2471	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
   2472	.pads_refclk_cfg0 = 0x90b890b8,
   2473	/* FC threshold is bit[25:18] */
   2474	.update_fc_threshold = 0x01800000,
   2475	.has_pex_clkreq_en = true,
   2476	.has_pex_bias_ctrl = true,
   2477	.has_intr_prsnt_sense = true,
   2478	.has_cml_clk = true,
   2479	.has_gen2 = true,
   2480	.force_pca_enable = true,
   2481	.program_uphy = true,
   2482	.update_clamp_threshold = true,
   2483	.program_deskew_time = true,
   2484	.update_fc_timer = true,
   2485	.has_cache_bars = false,
   2486	.ectl = {
   2487		.regs = {
   2488			.rp_ectl_2_r1 = 0x0000000f,
   2489			.rp_ectl_4_r1 = 0x00000067,
   2490			.rp_ectl_5_r1 = 0x55010000,
   2491			.rp_ectl_6_r1 = 0x00000001,
   2492			.rp_ectl_2_r2 = 0x0000008f,
   2493			.rp_ectl_4_r2 = 0x000000c7,
   2494			.rp_ectl_5_r2 = 0x55010000,
   2495			.rp_ectl_6_r2 = 0x00000001,
   2496		},
   2497		.enable = true,
   2498	},
   2499};
   2500
   2501static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
   2502	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
   2503	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
   2504	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
   2505};
   2506
   2507static const struct tegra_pcie_soc tegra186_pcie = {
   2508	.num_ports = 3,
   2509	.ports = tegra186_pcie_ports,
   2510	.msi_base_shift = 8,
   2511	.afi_pex2_ctrl = 0x19c,
   2512	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
   2513	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
   2514	.pads_refclk_cfg0 = 0x80b880b8,
   2515	.pads_refclk_cfg1 = 0x000480b8,
   2516	.has_pex_clkreq_en = true,
   2517	.has_pex_bias_ctrl = true,
   2518	.has_intr_prsnt_sense = true,
   2519	.has_cml_clk = false,
   2520	.has_gen2 = true,
   2521	.force_pca_enable = false,
   2522	.program_uphy = false,
   2523	.update_clamp_threshold = false,
   2524	.program_deskew_time = false,
   2525	.update_fc_timer = false,
   2526	.has_cache_bars = false,
   2527	.ectl.enable = false,
   2528};
   2529
   2530static const struct of_device_id tegra_pcie_of_match[] = {
   2531	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
   2532	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
   2533	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
   2534	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
   2535	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
   2536	{ },
   2537};
   2538MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
   2539
   2540static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
   2541{
   2542	struct tegra_pcie *pcie = s->private;
   2543
   2544	if (list_empty(&pcie->ports))
   2545		return NULL;
   2546
   2547	seq_puts(s, "Index  Status\n");
   2548
   2549	return seq_list_start(&pcie->ports, *pos);
   2550}
   2551
   2552static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
   2553{
   2554	struct tegra_pcie *pcie = s->private;
   2555
   2556	return seq_list_next(v, &pcie->ports, pos);
   2557}
   2558
   2559static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
   2560{
   2561}
   2562
   2563static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
   2564{
   2565	bool up = false, active = false;
   2566	struct tegra_pcie_port *port;
   2567	unsigned int value;
   2568
   2569	port = list_entry(v, struct tegra_pcie_port, list);
   2570
   2571	value = readl(port->base + RP_VEND_XP);
   2572
   2573	if (value & RP_VEND_XP_DL_UP)
   2574		up = true;
   2575
   2576	value = readl(port->base + RP_LINK_CONTROL_STATUS);
   2577
   2578	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
   2579		active = true;
   2580
   2581	seq_printf(s, "%2u     ", port->index);
   2582
   2583	if (up)
   2584		seq_puts(s, "up");
   2585
   2586	if (active) {
   2587		if (up)
   2588			seq_puts(s, ", ");
   2589
   2590		seq_puts(s, "active");
   2591	}
   2592
   2593	seq_puts(s, "\n");
   2594	return 0;
   2595}
   2596
   2597static const struct seq_operations tegra_pcie_ports_sops = {
   2598	.start = tegra_pcie_ports_seq_start,
   2599	.next = tegra_pcie_ports_seq_next,
   2600	.stop = tegra_pcie_ports_seq_stop,
   2601	.show = tegra_pcie_ports_seq_show,
   2602};
   2603
   2604DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
   2605
   2606static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
   2607{
   2608	debugfs_remove_recursive(pcie->debugfs);
   2609	pcie->debugfs = NULL;
   2610}
   2611
   2612static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
   2613{
   2614	pcie->debugfs = debugfs_create_dir("pcie", NULL);
   2615
   2616	debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
   2617			    &tegra_pcie_ports_fops);
   2618}
   2619
   2620static int tegra_pcie_probe(struct platform_device *pdev)
   2621{
   2622	struct device *dev = &pdev->dev;
   2623	struct pci_host_bridge *host;
   2624	struct tegra_pcie *pcie;
   2625	int err;
   2626
   2627	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
   2628	if (!host)
   2629		return -ENOMEM;
   2630
   2631	pcie = pci_host_bridge_priv(host);
   2632	host->sysdata = pcie;
   2633	platform_set_drvdata(pdev, pcie);
   2634
   2635	pcie->soc = of_device_get_match_data(dev);
   2636	INIT_LIST_HEAD(&pcie->ports);
   2637	pcie->dev = dev;
   2638
   2639	err = tegra_pcie_parse_dt(pcie);
   2640	if (err < 0)
   2641		return err;
   2642
   2643	err = tegra_pcie_get_resources(pcie);
   2644	if (err < 0) {
   2645		dev_err(dev, "failed to request resources: %d\n", err);
   2646		return err;
   2647	}
   2648
   2649	err = tegra_pcie_msi_setup(pcie);
   2650	if (err < 0) {
   2651		dev_err(dev, "failed to enable MSI support: %d\n", err);
   2652		goto put_resources;
   2653	}
   2654
   2655	pm_runtime_enable(pcie->dev);
   2656	err = pm_runtime_get_sync(pcie->dev);
   2657	if (err < 0) {
   2658		dev_err(dev, "fail to enable pcie controller: %d\n", err);
   2659		goto pm_runtime_put;
   2660	}
   2661
   2662	host->ops = &tegra_pcie_ops;
   2663	host->map_irq = tegra_pcie_map_irq;
   2664
   2665	err = pci_host_probe(host);
   2666	if (err < 0) {
   2667		dev_err(dev, "failed to register host: %d\n", err);
   2668		goto pm_runtime_put;
   2669	}
   2670
   2671	if (IS_ENABLED(CONFIG_DEBUG_FS))
   2672		tegra_pcie_debugfs_init(pcie);
   2673
   2674	return 0;
   2675
   2676pm_runtime_put:
   2677	pm_runtime_put_sync(pcie->dev);
   2678	pm_runtime_disable(pcie->dev);
   2679	tegra_pcie_msi_teardown(pcie);
   2680put_resources:
   2681	tegra_pcie_put_resources(pcie);
   2682	return err;
   2683}
   2684
   2685static int tegra_pcie_remove(struct platform_device *pdev)
   2686{
   2687	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
   2688	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
   2689	struct tegra_pcie_port *port, *tmp;
   2690
   2691	if (IS_ENABLED(CONFIG_DEBUG_FS))
   2692		tegra_pcie_debugfs_exit(pcie);
   2693
   2694	pci_stop_root_bus(host->bus);
   2695	pci_remove_root_bus(host->bus);
   2696	pm_runtime_put_sync(pcie->dev);
   2697	pm_runtime_disable(pcie->dev);
   2698
   2699	if (IS_ENABLED(CONFIG_PCI_MSI))
   2700		tegra_pcie_msi_teardown(pcie);
   2701
   2702	tegra_pcie_put_resources(pcie);
   2703
   2704	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
   2705		tegra_pcie_port_free(port);
   2706
   2707	return 0;
   2708}
   2709
   2710static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
   2711{
   2712	struct tegra_pcie *pcie = dev_get_drvdata(dev);
   2713	struct tegra_pcie_port *port;
   2714	int err;
   2715
   2716	list_for_each_entry(port, &pcie->ports, list)
   2717		tegra_pcie_pme_turnoff(port);
   2718
   2719	tegra_pcie_disable_ports(pcie);
   2720
   2721	/*
   2722	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
   2723	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
   2724	 */
   2725	tegra_pcie_disable_interrupts(pcie);
   2726
   2727	if (pcie->soc->program_uphy) {
   2728		err = tegra_pcie_phy_power_off(pcie);
   2729		if (err < 0)
   2730			dev_err(dev, "failed to power off PHY(s): %d\n", err);
   2731	}
   2732
   2733	reset_control_assert(pcie->pex_rst);
   2734	clk_disable_unprepare(pcie->pex_clk);
   2735
   2736	if (IS_ENABLED(CONFIG_PCI_MSI))
   2737		tegra_pcie_disable_msi(pcie);
   2738
   2739	pinctrl_pm_select_idle_state(dev);
   2740	tegra_pcie_power_off(pcie);
   2741
   2742	return 0;
   2743}
   2744
   2745static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
   2746{
   2747	struct tegra_pcie *pcie = dev_get_drvdata(dev);
   2748	int err;
   2749
   2750	err = tegra_pcie_power_on(pcie);
   2751	if (err) {
   2752		dev_err(dev, "tegra pcie power on fail: %d\n", err);
   2753		return err;
   2754	}
   2755
   2756	err = pinctrl_pm_select_default_state(dev);
   2757	if (err < 0) {
   2758		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
   2759		goto poweroff;
   2760	}
   2761
   2762	tegra_pcie_enable_controller(pcie);
   2763	tegra_pcie_setup_translations(pcie);
   2764
   2765	if (IS_ENABLED(CONFIG_PCI_MSI))
   2766		tegra_pcie_enable_msi(pcie);
   2767
   2768	err = clk_prepare_enable(pcie->pex_clk);
   2769	if (err) {
   2770		dev_err(dev, "failed to enable PEX clock: %d\n", err);
   2771		goto pex_dpd_enable;
   2772	}
   2773
   2774	reset_control_deassert(pcie->pex_rst);
   2775
   2776	if (pcie->soc->program_uphy) {
   2777		err = tegra_pcie_phy_power_on(pcie);
   2778		if (err < 0) {
   2779			dev_err(dev, "failed to power on PHY(s): %d\n", err);
   2780			goto disable_pex_clk;
   2781		}
   2782	}
   2783
   2784	tegra_pcie_apply_pad_settings(pcie);
   2785	tegra_pcie_enable_ports(pcie);
   2786
   2787	return 0;
   2788
   2789disable_pex_clk:
   2790	reset_control_assert(pcie->pex_rst);
   2791	clk_disable_unprepare(pcie->pex_clk);
   2792pex_dpd_enable:
   2793	pinctrl_pm_select_idle_state(dev);
   2794poweroff:
   2795	tegra_pcie_power_off(pcie);
   2796
   2797	return err;
   2798}
   2799
   2800static const struct dev_pm_ops tegra_pcie_pm_ops = {
   2801	SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
   2802	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
   2803				      tegra_pcie_pm_resume)
   2804};
   2805
   2806static struct platform_driver tegra_pcie_driver = {
   2807	.driver = {
   2808		.name = "tegra-pcie",
   2809		.of_match_table = tegra_pcie_of_match,
   2810		.suppress_bind_attrs = true,
   2811		.pm = &tegra_pcie_pm_ops,
   2812	},
   2813	.probe = tegra_pcie_probe,
   2814	.remove = tegra_pcie_remove,
   2815};
   2816module_platform_driver(tegra_pcie_driver);
   2817MODULE_LICENSE("GPL");