cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pcie-iproc.c (42245B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de>
      4 * Copyright (C) 2015 Broadcom Corporation
      5 */
      6
      7#include <linux/kernel.h>
      8#include <linux/pci.h>
      9#include <linux/pci-ecam.h>
     10#include <linux/msi.h>
     11#include <linux/clk.h>
     12#include <linux/module.h>
     13#include <linux/mbus.h>
     14#include <linux/slab.h>
     15#include <linux/delay.h>
     16#include <linux/interrupt.h>
     17#include <linux/irqchip/arm-gic-v3.h>
     18#include <linux/platform_device.h>
     19#include <linux/of_address.h>
     20#include <linux/of_pci.h>
     21#include <linux/of_irq.h>
     22#include <linux/of_platform.h>
     23#include <linux/phy/phy.h>
     24
     25#include "pcie-iproc.h"
     26
     27#define EP_PERST_SOURCE_SELECT_SHIFT	2
     28#define EP_PERST_SOURCE_SELECT		BIT(EP_PERST_SOURCE_SELECT_SHIFT)
     29#define EP_MODE_SURVIVE_PERST_SHIFT	1
     30#define EP_MODE_SURVIVE_PERST		BIT(EP_MODE_SURVIVE_PERST_SHIFT)
     31#define RC_PCIE_RST_OUTPUT_SHIFT	0
     32#define RC_PCIE_RST_OUTPUT		BIT(RC_PCIE_RST_OUTPUT_SHIFT)
     33#define PAXC_RESET_MASK			0x7f
     34
     35#define GIC_V3_CFG_SHIFT		0
     36#define GIC_V3_CFG			BIT(GIC_V3_CFG_SHIFT)
     37
     38#define MSI_ENABLE_CFG_SHIFT		0
     39#define MSI_ENABLE_CFG			BIT(MSI_ENABLE_CFG_SHIFT)
     40
     41#define CFG_IND_ADDR_MASK		0x00001ffc
     42
     43#define CFG_ADDR_REG_NUM_MASK		0x00000ffc
     44#define CFG_ADDR_CFG_TYPE_1		1
     45
     46#define SYS_RC_INTX_MASK		0xf
     47
     48#define PCIE_PHYLINKUP_SHIFT		3
     49#define PCIE_PHYLINKUP			BIT(PCIE_PHYLINKUP_SHIFT)
     50#define PCIE_DL_ACTIVE_SHIFT		2
     51#define PCIE_DL_ACTIVE			BIT(PCIE_DL_ACTIVE_SHIFT)
     52
     53#define APB_ERR_EN_SHIFT		0
     54#define APB_ERR_EN			BIT(APB_ERR_EN_SHIFT)
     55
     56#define CFG_RD_SUCCESS			0
     57#define CFG_RD_UR			1
     58#define CFG_RD_CRS			2
     59#define CFG_RD_CA			3
     60#define CFG_RETRY_STATUS		0xffff0001
     61#define CFG_RETRY_STATUS_TIMEOUT_US	500000 /* 500 milliseconds */
     62
     63/* derive the enum index of the outbound/inbound mapping registers */
     64#define MAP_REG(base_reg, index)	((base_reg) + (index) * 2)
     65
     66/*
     67 * Maximum number of outbound mapping window sizes that can be supported by any
     68 * OARR/OMAP mapping pair
     69 */
     70#define MAX_NUM_OB_WINDOW_SIZES		4
     71
     72#define OARR_VALID_SHIFT		0
     73#define OARR_VALID			BIT(OARR_VALID_SHIFT)
     74#define OARR_SIZE_CFG_SHIFT		1
     75
     76/*
     77 * Maximum number of inbound mapping region sizes that can be supported by an
     78 * IARR
     79 */
     80#define MAX_NUM_IB_REGION_SIZES		9
     81
     82#define IMAP_VALID_SHIFT		0
     83#define IMAP_VALID			BIT(IMAP_VALID_SHIFT)
     84
     85#define IPROC_PCI_PM_CAP		0x48
     86#define IPROC_PCI_PM_CAP_MASK		0xffff
     87#define IPROC_PCI_EXP_CAP		0xac
     88
     89#define IPROC_PCIE_REG_INVALID		0xffff
     90
     91/**
     92 * struct iproc_pcie_ob_map - iProc PCIe outbound mapping controller-specific
     93 * parameters
     94 * @window_sizes: list of supported outbound mapping window sizes in MB
     95 * @nr_sizes: number of supported outbound mapping window sizes
     96 */
     97struct iproc_pcie_ob_map {
     98	resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES];
     99	unsigned int nr_sizes;
    100};
    101
    102static const struct iproc_pcie_ob_map paxb_ob_map[] = {
    103	{
    104		/* OARR0/OMAP0 */
    105		.window_sizes = { 128, 256 },
    106		.nr_sizes = 2,
    107	},
    108	{
    109		/* OARR1/OMAP1 */
    110		.window_sizes = { 128, 256 },
    111		.nr_sizes = 2,
    112	},
    113};
    114
    115static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
    116	{
    117		/* OARR0/OMAP0 */
    118		.window_sizes = { 128, 256 },
    119		.nr_sizes = 2,
    120	},
    121	{
    122		/* OARR1/OMAP1 */
    123		.window_sizes = { 128, 256 },
    124		.nr_sizes = 2,
    125	},
    126	{
    127		/* OARR2/OMAP2 */
    128		.window_sizes = { 128, 256, 512, 1024 },
    129		.nr_sizes = 4,
    130	},
    131	{
    132		/* OARR3/OMAP3 */
    133		.window_sizes = { 128, 256, 512, 1024 },
    134		.nr_sizes = 4,
    135	},
    136};
    137
    138/**
    139 * enum iproc_pcie_ib_map_type - iProc PCIe inbound mapping type
    140 * @IPROC_PCIE_IB_MAP_MEM: DDR memory
    141 * @IPROC_PCIE_IB_MAP_IO: device I/O memory
    142 * @IPROC_PCIE_IB_MAP_INVALID: invalid or unused
    143 */
    144enum iproc_pcie_ib_map_type {
    145	IPROC_PCIE_IB_MAP_MEM = 0,
    146	IPROC_PCIE_IB_MAP_IO,
    147	IPROC_PCIE_IB_MAP_INVALID
    148};
    149
    150/**
    151 * struct iproc_pcie_ib_map - iProc PCIe inbound mapping controller-specific
    152 * parameters
    153 * @type: inbound mapping region type
    154 * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
    155 * SZ_1G
    156 * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
    157 * GB, depending on the size unit
    158 * @nr_sizes: number of supported inbound mapping region sizes
    159 * @nr_windows: number of supported inbound mapping windows for the region
    160 * @imap_addr_offset: register offset between the upper and lower 32-bit
    161 * IMAP address registers
    162 * @imap_window_offset: register offset between each IMAP window
    163 */
    164struct iproc_pcie_ib_map {
    165	enum iproc_pcie_ib_map_type type;
    166	unsigned int size_unit;
    167	resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES];
    168	unsigned int nr_sizes;
    169	unsigned int nr_windows;
    170	u16 imap_addr_offset;
    171	u16 imap_window_offset;
    172};
    173
    174static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
    175	{
    176		/* IARR0/IMAP0 */
    177		.type = IPROC_PCIE_IB_MAP_IO,
    178		.size_unit = SZ_1K,
    179		.region_sizes = { 32 },
    180		.nr_sizes = 1,
    181		.nr_windows = 8,
    182		.imap_addr_offset = 0x40,
    183		.imap_window_offset = 0x4,
    184	},
    185	{
    186		/* IARR1/IMAP1 */
    187		.type = IPROC_PCIE_IB_MAP_MEM,
    188		.size_unit = SZ_1M,
    189		.region_sizes = { 8 },
    190		.nr_sizes = 1,
    191		.nr_windows = 8,
    192		.imap_addr_offset = 0x4,
    193		.imap_window_offset = 0x8,
    194
    195	},
    196	{
    197		/* IARR2/IMAP2 */
    198		.type = IPROC_PCIE_IB_MAP_MEM,
    199		.size_unit = SZ_1M,
    200		.region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192,
    201				  16384 },
    202		.nr_sizes = 9,
    203		.nr_windows = 1,
    204		.imap_addr_offset = 0x4,
    205		.imap_window_offset = 0x8,
    206	},
    207	{
    208		/* IARR3/IMAP3 */
    209		.type = IPROC_PCIE_IB_MAP_MEM,
    210		.size_unit = SZ_1G,
    211		.region_sizes = { 1, 2, 4, 8, 16, 32 },
    212		.nr_sizes = 6,
    213		.nr_windows = 8,
    214		.imap_addr_offset = 0x4,
    215		.imap_window_offset = 0x8,
    216	},
    217	{
    218		/* IARR4/IMAP4 */
    219		.type = IPROC_PCIE_IB_MAP_MEM,
    220		.size_unit = SZ_1G,
    221		.region_sizes = { 32, 64, 128, 256, 512 },
    222		.nr_sizes = 5,
    223		.nr_windows = 8,
    224		.imap_addr_offset = 0x4,
    225		.imap_window_offset = 0x8,
    226	},
    227};
    228
    229/*
    230 * iProc PCIe host registers
    231 */
    232enum iproc_pcie_reg {
    233	/* clock/reset signal control */
    234	IPROC_PCIE_CLK_CTRL = 0,
    235
    236	/*
    237	 * To allow MSI to be steered to an external MSI controller (e.g., ARM
    238	 * GICv3 ITS)
    239	 */
    240	IPROC_PCIE_MSI_GIC_MODE,
    241
    242	/*
    243	 * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the
    244	 * window where the MSI posted writes are written, for the writes to be
    245	 * interpreted as MSI writes.
    246	 */
    247	IPROC_PCIE_MSI_BASE_ADDR,
    248	IPROC_PCIE_MSI_WINDOW_SIZE,
    249
    250	/*
    251	 * To hold the address of the register where the MSI writes are
    252	 * programmed.  When ARM GICv3 ITS is used, this should be programmed
    253	 * with the address of the GITS_TRANSLATER register.
    254	 */
    255	IPROC_PCIE_MSI_ADDR_LO,
    256	IPROC_PCIE_MSI_ADDR_HI,
    257
    258	/* enable MSI */
    259	IPROC_PCIE_MSI_EN_CFG,
    260
    261	/* allow access to root complex configuration space */
    262	IPROC_PCIE_CFG_IND_ADDR,
    263	IPROC_PCIE_CFG_IND_DATA,
    264
    265	/* allow access to device configuration space */
    266	IPROC_PCIE_CFG_ADDR,
    267	IPROC_PCIE_CFG_DATA,
    268
    269	/* enable INTx */
    270	IPROC_PCIE_INTX_EN,
    271
    272	/* outbound address mapping */
    273	IPROC_PCIE_OARR0,
    274	IPROC_PCIE_OMAP0,
    275	IPROC_PCIE_OARR1,
    276	IPROC_PCIE_OMAP1,
    277	IPROC_PCIE_OARR2,
    278	IPROC_PCIE_OMAP2,
    279	IPROC_PCIE_OARR3,
    280	IPROC_PCIE_OMAP3,
    281
    282	/* inbound address mapping */
    283	IPROC_PCIE_IARR0,
    284	IPROC_PCIE_IMAP0,
    285	IPROC_PCIE_IARR1,
    286	IPROC_PCIE_IMAP1,
    287	IPROC_PCIE_IARR2,
    288	IPROC_PCIE_IMAP2,
    289	IPROC_PCIE_IARR3,
    290	IPROC_PCIE_IMAP3,
    291	IPROC_PCIE_IARR4,
    292	IPROC_PCIE_IMAP4,
    293
    294	/* config read status */
    295	IPROC_PCIE_CFG_RD_STATUS,
    296
    297	/* link status */
    298	IPROC_PCIE_LINK_STATUS,
    299
    300	/* enable APB error for unsupported requests */
    301	IPROC_PCIE_APB_ERR_EN,
    302
    303	/* total number of core registers */
    304	IPROC_PCIE_MAX_NUM_REG,
    305};
    306
    307/* iProc PCIe PAXB BCMA registers */
    308static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = {
    309	[IPROC_PCIE_CLK_CTRL]		= 0x000,
    310	[IPROC_PCIE_CFG_IND_ADDR]	= 0x120,
    311	[IPROC_PCIE_CFG_IND_DATA]	= 0x124,
    312	[IPROC_PCIE_CFG_ADDR]		= 0x1f8,
    313	[IPROC_PCIE_CFG_DATA]		= 0x1fc,
    314	[IPROC_PCIE_INTX_EN]		= 0x330,
    315	[IPROC_PCIE_LINK_STATUS]	= 0xf0c,
    316};
    317
    318/* iProc PCIe PAXB registers */
    319static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = {
    320	[IPROC_PCIE_CLK_CTRL]		= 0x000,
    321	[IPROC_PCIE_CFG_IND_ADDR]	= 0x120,
    322	[IPROC_PCIE_CFG_IND_DATA]	= 0x124,
    323	[IPROC_PCIE_CFG_ADDR]		= 0x1f8,
    324	[IPROC_PCIE_CFG_DATA]		= 0x1fc,
    325	[IPROC_PCIE_INTX_EN]		= 0x330,
    326	[IPROC_PCIE_OARR0]		= 0xd20,
    327	[IPROC_PCIE_OMAP0]		= 0xd40,
    328	[IPROC_PCIE_OARR1]		= 0xd28,
    329	[IPROC_PCIE_OMAP1]		= 0xd48,
    330	[IPROC_PCIE_LINK_STATUS]	= 0xf0c,
    331	[IPROC_PCIE_APB_ERR_EN]		= 0xf40,
    332};
    333
    334/* iProc PCIe PAXB v2 registers */
    335static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = {
    336	[IPROC_PCIE_CLK_CTRL]		= 0x000,
    337	[IPROC_PCIE_CFG_IND_ADDR]	= 0x120,
    338	[IPROC_PCIE_CFG_IND_DATA]	= 0x124,
    339	[IPROC_PCIE_CFG_ADDR]		= 0x1f8,
    340	[IPROC_PCIE_CFG_DATA]		= 0x1fc,
    341	[IPROC_PCIE_INTX_EN]		= 0x330,
    342	[IPROC_PCIE_OARR0]		= 0xd20,
    343	[IPROC_PCIE_OMAP0]		= 0xd40,
    344	[IPROC_PCIE_OARR1]		= 0xd28,
    345	[IPROC_PCIE_OMAP1]		= 0xd48,
    346	[IPROC_PCIE_OARR2]		= 0xd60,
    347	[IPROC_PCIE_OMAP2]		= 0xd68,
    348	[IPROC_PCIE_OARR3]		= 0xdf0,
    349	[IPROC_PCIE_OMAP3]		= 0xdf8,
    350	[IPROC_PCIE_IARR0]		= 0xd00,
    351	[IPROC_PCIE_IMAP0]		= 0xc00,
    352	[IPROC_PCIE_IARR1]		= 0xd08,
    353	[IPROC_PCIE_IMAP1]		= 0xd70,
    354	[IPROC_PCIE_IARR2]		= 0xd10,
    355	[IPROC_PCIE_IMAP2]		= 0xcc0,
    356	[IPROC_PCIE_IARR3]		= 0xe00,
    357	[IPROC_PCIE_IMAP3]		= 0xe08,
    358	[IPROC_PCIE_IARR4]		= 0xe68,
    359	[IPROC_PCIE_IMAP4]		= 0xe70,
    360	[IPROC_PCIE_CFG_RD_STATUS]	= 0xee0,
    361	[IPROC_PCIE_LINK_STATUS]	= 0xf0c,
    362	[IPROC_PCIE_APB_ERR_EN]		= 0xf40,
    363};
    364
    365/* iProc PCIe PAXC v1 registers */
    366static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = {
    367	[IPROC_PCIE_CLK_CTRL]		= 0x000,
    368	[IPROC_PCIE_CFG_IND_ADDR]	= 0x1f0,
    369	[IPROC_PCIE_CFG_IND_DATA]	= 0x1f4,
    370	[IPROC_PCIE_CFG_ADDR]		= 0x1f8,
    371	[IPROC_PCIE_CFG_DATA]		= 0x1fc,
    372};
    373
    374/* iProc PCIe PAXC v2 registers */
    375static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = {
    376	[IPROC_PCIE_MSI_GIC_MODE]	= 0x050,
    377	[IPROC_PCIE_MSI_BASE_ADDR]	= 0x074,
    378	[IPROC_PCIE_MSI_WINDOW_SIZE]	= 0x078,
    379	[IPROC_PCIE_MSI_ADDR_LO]	= 0x07c,
    380	[IPROC_PCIE_MSI_ADDR_HI]	= 0x080,
    381	[IPROC_PCIE_MSI_EN_CFG]		= 0x09c,
    382	[IPROC_PCIE_CFG_IND_ADDR]	= 0x1f0,
    383	[IPROC_PCIE_CFG_IND_DATA]	= 0x1f4,
    384	[IPROC_PCIE_CFG_ADDR]		= 0x1f8,
    385	[IPROC_PCIE_CFG_DATA]		= 0x1fc,
    386};
    387
    388/*
    389 * List of device IDs of controllers that have corrupted capability list that
    390 * require SW fixup
    391 */
    392static const u16 iproc_pcie_corrupt_cap_did[] = {
    393	0x16cd,
    394	0x16f0,
    395	0xd802,
    396	0xd804
    397};
    398
    399static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
    400{
    401	struct iproc_pcie *pcie = bus->sysdata;
    402	return pcie;
    403}
    404
    405static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset)
    406{
    407	return !!(reg_offset == IPROC_PCIE_REG_INVALID);
    408}
    409
    410static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie,
    411					enum iproc_pcie_reg reg)
    412{
    413	return pcie->reg_offsets[reg];
    414}
    415
    416static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie,
    417				      enum iproc_pcie_reg reg)
    418{
    419	u16 offset = iproc_pcie_reg_offset(pcie, reg);
    420
    421	if (iproc_pcie_reg_is_invalid(offset))
    422		return 0;
    423
    424	return readl(pcie->base + offset);
    425}
    426
    427static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
    428					enum iproc_pcie_reg reg, u32 val)
    429{
    430	u16 offset = iproc_pcie_reg_offset(pcie, reg);
    431
    432	if (iproc_pcie_reg_is_invalid(offset))
    433		return;
    434
    435	writel(val, pcie->base + offset);
    436}
    437
    438/*
    439 * APB error forwarding can be disabled during access of configuration
    440 * registers of the endpoint device, to prevent unsupported requests
    441 * (typically seen during enumeration with multi-function devices) from
    442 * triggering a system exception.
    443 */
    444static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
    445					      bool disable)
    446{
    447	struct iproc_pcie *pcie = iproc_data(bus);
    448	u32 val;
    449
    450	if (bus->number && pcie->has_apb_err_disable) {
    451		val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN);
    452		if (disable)
    453			val &= ~APB_ERR_EN;
    454		else
    455			val |= APB_ERR_EN;
    456		iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
    457	}
    458}
    459
    460static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie,
    461					       unsigned int busno,
    462					       unsigned int devfn,
    463					       int where)
    464{
    465	u16 offset;
    466	u32 val;
    467
    468	/* EP device access */
    469	val = ALIGN_DOWN(PCIE_ECAM_OFFSET(busno, devfn, where), 4) |
    470		CFG_ADDR_CFG_TYPE_1;
    471
    472	iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val);
    473	offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA);
    474
    475	if (iproc_pcie_reg_is_invalid(offset))
    476		return NULL;
    477
    478	return (pcie->base + offset);
    479}
    480
    481static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
    482					 void __iomem *cfg_data_p)
    483{
    484	int timeout = CFG_RETRY_STATUS_TIMEOUT_US;
    485	unsigned int data;
    486	u32 status;
    487
    488	/*
    489	 * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
    490	 * affects config reads of the Vendor ID.  For config writes or any
    491	 * other config reads, the Root may automatically reissue the
    492	 * configuration request again as a new request.
    493	 *
    494	 * For config reads, this hardware returns CFG_RETRY_STATUS data
    495	 * when it receives a CRS completion, regardless of the address of
    496	 * the read or the CRS Software Visibility Enable bit.  As a
    497	 * partial workaround for this, we retry in software any read that
    498	 * returns CFG_RETRY_STATUS.
    499	 *
    500	 * Note that a non-Vendor ID config register may have a value of
    501	 * CFG_RETRY_STATUS.  If we read that, we can't distinguish it from
    502	 * a CRS completion, so we will incorrectly retry the read and
    503	 * eventually return the wrong data (0xffffffff).
    504	 */
    505	data = readl(cfg_data_p);
    506	while (data == CFG_RETRY_STATUS && timeout--) {
    507		/*
    508		 * CRS state is set in CFG_RD status register
    509		 * This will handle the case where CFG_RETRY_STATUS is
    510		 * valid config data.
    511		 */
    512		status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
    513		if (status != CFG_RD_CRS)
    514			return data;
    515
    516		udelay(1);
    517		data = readl(cfg_data_p);
    518	}
    519
    520	if (data == CFG_RETRY_STATUS)
    521		data = 0xffffffff;
    522
    523	return data;
    524}
    525
    526static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
    527{
    528	u32 i, dev_id;
    529
    530	switch (where & ~0x3) {
    531	case PCI_VENDOR_ID:
    532		dev_id = *val >> 16;
    533
    534		/*
    535		 * Activate fixup for those controllers that have corrupted
    536		 * capability list registers
    537		 */
    538		for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++)
    539			if (dev_id == iproc_pcie_corrupt_cap_did[i])
    540				pcie->fix_paxc_cap = true;
    541		break;
    542
    543	case IPROC_PCI_PM_CAP:
    544		if (pcie->fix_paxc_cap) {
    545			/* advertise PM, force next capability to PCIe */
    546			*val &= ~IPROC_PCI_PM_CAP_MASK;
    547			*val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM;
    548		}
    549		break;
    550
    551	case IPROC_PCI_EXP_CAP:
    552		if (pcie->fix_paxc_cap) {
    553			/* advertise root port, version 2, terminate here */
    554			*val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 |
    555				PCI_CAP_ID_EXP;
    556		}
    557		break;
    558
    559	case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
    560		/* Don't advertise CRS SV support */
    561		*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
    562		break;
    563
    564	default:
    565		break;
    566	}
    567}
    568
    569static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
    570				  int where, int size, u32 *val)
    571{
    572	struct iproc_pcie *pcie = iproc_data(bus);
    573	unsigned int busno = bus->number;
    574	void __iomem *cfg_data_p;
    575	unsigned int data;
    576	int ret;
    577
    578	/* root complex access */
    579	if (busno == 0) {
    580		ret = pci_generic_config_read32(bus, devfn, where, size, val);
    581		if (ret == PCIBIOS_SUCCESSFUL)
    582			iproc_pcie_fix_cap(pcie, where, val);
    583
    584		return ret;
    585	}
    586
    587	cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where);
    588
    589	if (!cfg_data_p)
    590		return PCIBIOS_DEVICE_NOT_FOUND;
    591
    592	data = iproc_pcie_cfg_retry(pcie, cfg_data_p);
    593
    594	*val = data;
    595	if (size <= 2)
    596		*val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
    597
    598	/*
    599	 * For PAXC and PAXCv2, the total number of PFs that one can enumerate
    600	 * depends on the firmware configuration. Unfortunately, due to an ASIC
    601	 * bug, unconfigured PFs cannot be properly hidden from the root
    602	 * complex. As a result, write access to these PFs will cause bus lock
    603	 * up on the embedded processor
    604	 *
    605	 * Since all unconfigured PFs are left with an incorrect, staled device
    606	 * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access
    607	 * early here and reject them all
    608	 */
    609#define DEVICE_ID_MASK     0xffff0000
    610#define DEVICE_ID_SHIFT    16
    611	if (pcie->rej_unconfig_pf &&
    612	    (where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID)
    613		if ((*val & DEVICE_ID_MASK) ==
    614		    (PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT))
    615			return PCIBIOS_FUNC_NOT_SUPPORTED;
    616
    617	return PCIBIOS_SUCCESSFUL;
    618}
    619
    620/*
    621 * Note access to the configuration registers are protected at the higher layer
    622 * by 'pci_lock' in drivers/pci/access.c
    623 */
    624static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
    625					    int busno, unsigned int devfn,
    626					    int where)
    627{
    628	u16 offset;
    629
    630	/* root complex access */
    631	if (busno == 0) {
    632		if (PCIE_ECAM_DEVFN(devfn) > 0)
    633			return NULL;
    634
    635		iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
    636				     where & CFG_IND_ADDR_MASK);
    637		offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
    638		if (iproc_pcie_reg_is_invalid(offset))
    639			return NULL;
    640		else
    641			return (pcie->base + offset);
    642	}
    643
    644	return iproc_pcie_map_ep_cfg_reg(pcie, busno, devfn, where);
    645}
    646
    647static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus,
    648						unsigned int devfn,
    649						int where)
    650{
    651	return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn,
    652				      where);
    653}
    654
    655static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie,
    656				       unsigned int devfn, int where,
    657				       int size, u32 *val)
    658{
    659	void __iomem *addr;
    660
    661	addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
    662	if (!addr)
    663		return PCIBIOS_DEVICE_NOT_FOUND;
    664
    665	*val = readl(addr);
    666
    667	if (size <= 2)
    668		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
    669
    670	return PCIBIOS_SUCCESSFUL;
    671}
    672
    673static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie,
    674					unsigned int devfn, int where,
    675					int size, u32 val)
    676{
    677	void __iomem *addr;
    678	u32 mask, tmp;
    679
    680	addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
    681	if (!addr)
    682		return PCIBIOS_DEVICE_NOT_FOUND;
    683
    684	if (size == 4) {
    685		writel(val, addr);
    686		return PCIBIOS_SUCCESSFUL;
    687	}
    688
    689	mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
    690	tmp = readl(addr) & mask;
    691	tmp |= val << ((where & 0x3) * 8);
    692	writel(tmp, addr);
    693
    694	return PCIBIOS_SUCCESSFUL;
    695}
    696
    697static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
    698				    int where, int size, u32 *val)
    699{
    700	int ret;
    701	struct iproc_pcie *pcie = iproc_data(bus);
    702
    703	iproc_pcie_apb_err_disable(bus, true);
    704	if (pcie->iproc_cfg_read)
    705		ret = iproc_pcie_config_read(bus, devfn, where, size, val);
    706	else
    707		ret = pci_generic_config_read32(bus, devfn, where, size, val);
    708	iproc_pcie_apb_err_disable(bus, false);
    709
    710	return ret;
    711}
    712
    713static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
    714				     int where, int size, u32 val)
    715{
    716	int ret;
    717
    718	iproc_pcie_apb_err_disable(bus, true);
    719	ret = pci_generic_config_write32(bus, devfn, where, size, val);
    720	iproc_pcie_apb_err_disable(bus, false);
    721
    722	return ret;
    723}
    724
    725static struct pci_ops iproc_pcie_ops = {
    726	.map_bus = iproc_pcie_bus_map_cfg_bus,
    727	.read = iproc_pcie_config_read32,
    728	.write = iproc_pcie_config_write32,
    729};
    730
    731static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert)
    732{
    733	u32 val;
    734
    735	/*
    736	 * PAXC and the internal emulated endpoint device downstream should not
    737	 * be reset.  If firmware has been loaded on the endpoint device at an
    738	 * earlier boot stage, reset here causes issues.
    739	 */
    740	if (pcie->ep_is_internal)
    741		return;
    742
    743	if (assert) {
    744		val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
    745		val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
    746			~RC_PCIE_RST_OUTPUT;
    747		iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
    748		udelay(250);
    749	} else {
    750		val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
    751		val |= RC_PCIE_RST_OUTPUT;
    752		iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
    753		msleep(100);
    754	}
    755}
    756
    757int iproc_pcie_shutdown(struct iproc_pcie *pcie)
    758{
    759	iproc_pcie_perst_ctrl(pcie, true);
    760	msleep(500);
    761
    762	return 0;
    763}
    764EXPORT_SYMBOL_GPL(iproc_pcie_shutdown);
    765
    766static int iproc_pcie_check_link(struct iproc_pcie *pcie)
    767{
    768	struct device *dev = pcie->dev;
    769	u32 hdr_type, link_ctrl, link_status, class, val;
    770	bool link_is_active = false;
    771
    772	/*
    773	 * PAXC connects to emulated endpoint devices directly and does not
    774	 * have a Serdes.  Therefore skip the link detection logic here.
    775	 */
    776	if (pcie->ep_is_internal)
    777		return 0;
    778
    779	val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
    780	if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
    781		dev_err(dev, "PHY or data link is INACTIVE!\n");
    782		return -ENODEV;
    783	}
    784
    785	/* make sure we are not in EP mode */
    786	iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
    787	if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
    788		dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
    789		return -EFAULT;
    790	}
    791
    792	/* force class to PCI_CLASS_BRIDGE_PCI_NORMAL (0x060400) */
    793#define PCI_BRIDGE_CTRL_REG_OFFSET	0x43c
    794#define PCI_BRIDGE_CTRL_REG_CLASS_MASK	0xffffff
    795	iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
    796				    4, &class);
    797	class &= ~PCI_BRIDGE_CTRL_REG_CLASS_MASK;
    798	class |= PCI_CLASS_BRIDGE_PCI_NORMAL;
    799	iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
    800				     4, class);
    801
    802	/* check link status to see if link is active */
    803	iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
    804				    2, &link_status);
    805	if (link_status & PCI_EXP_LNKSTA_NLW)
    806		link_is_active = true;
    807
    808	if (!link_is_active) {
    809		/* try GEN 1 link speed */
    810#define PCI_TARGET_LINK_SPEED_MASK	0xf
    811#define PCI_TARGET_LINK_SPEED_GEN2	0x2
    812#define PCI_TARGET_LINK_SPEED_GEN1	0x1
    813		iproc_pci_raw_config_read32(pcie, 0,
    814					    IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
    815					    4, &link_ctrl);
    816		if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
    817		    PCI_TARGET_LINK_SPEED_GEN2) {
    818			link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
    819			link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
    820			iproc_pci_raw_config_write32(pcie, 0,
    821					IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
    822					4, link_ctrl);
    823			msleep(100);
    824
    825			iproc_pci_raw_config_read32(pcie, 0,
    826					IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
    827					2, &link_status);
    828			if (link_status & PCI_EXP_LNKSTA_NLW)
    829				link_is_active = true;
    830		}
    831	}
    832
    833	dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
    834
    835	return link_is_active ? 0 : -ENODEV;
    836}
    837
    838static void iproc_pcie_enable(struct iproc_pcie *pcie)
    839{
    840	iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
    841}
    842
    843static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie,
    844					  int window_idx)
    845{
    846	u32 val;
    847
    848	val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx));
    849
    850	return !!(val & OARR_VALID);
    851}
    852
    853static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
    854				      int size_idx, u64 axi_addr, u64 pci_addr)
    855{
    856	struct device *dev = pcie->dev;
    857	u16 oarr_offset, omap_offset;
    858
    859	/*
    860	 * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based
    861	 * on window index.
    862	 */
    863	oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
    864							  window_idx));
    865	omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
    866							  window_idx));
    867	if (iproc_pcie_reg_is_invalid(oarr_offset) ||
    868	    iproc_pcie_reg_is_invalid(omap_offset))
    869		return -EINVAL;
    870
    871	/*
    872	 * Program the OARR registers.  The upper 32-bit OARR register is
    873	 * always right after the lower 32-bit OARR register.
    874	 */
    875	writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
    876	       OARR_VALID, pcie->base + oarr_offset);
    877	writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
    878
    879	/* now program the OMAP registers */
    880	writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
    881	writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
    882
    883	dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
    884		window_idx, oarr_offset, &axi_addr, &pci_addr);
    885	dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n",
    886		readl(pcie->base + oarr_offset),
    887		readl(pcie->base + oarr_offset + 4));
    888	dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n",
    889		readl(pcie->base + omap_offset),
    890		readl(pcie->base + omap_offset + 4));
    891
    892	return 0;
    893}
    894
    895/*
    896 * Some iProc SoCs require the SW to configure the outbound address mapping
    897 *
    898 * Outbound address translation:
    899 *
    900 * iproc_pcie_address = axi_address - axi_offset
    901 * OARR = iproc_pcie_address
    902 * OMAP = pci_addr
    903 *
    904 * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address
    905 */
    906static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
    907			       u64 pci_addr, resource_size_t size)
    908{
    909	struct iproc_pcie_ob *ob = &pcie->ob;
    910	struct device *dev = pcie->dev;
    911	int ret = -EINVAL, window_idx, size_idx;
    912
    913	if (axi_addr < ob->axi_offset) {
    914		dev_err(dev, "axi address %pap less than offset %pap\n",
    915			&axi_addr, &ob->axi_offset);
    916		return -EINVAL;
    917	}
    918
    919	/*
    920	 * Translate the AXI address to the internal address used by the iProc
    921	 * PCIe core before programming the OARR
    922	 */
    923	axi_addr -= ob->axi_offset;
    924
    925	/* iterate through all OARR/OMAP mapping windows */
    926	for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) {
    927		const struct iproc_pcie_ob_map *ob_map =
    928			&pcie->ob_map[window_idx];
    929
    930		/*
    931		 * If current outbound window is already in use, move on to the
    932		 * next one.
    933		 */
    934		if (iproc_pcie_ob_is_valid(pcie, window_idx))
    935			continue;
    936
    937		/*
    938		 * Iterate through all supported window sizes within the
    939		 * OARR/OMAP pair to find a match.  Go through the window sizes
    940		 * in a descending order.
    941		 */
    942		for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
    943		     size_idx--) {
    944			resource_size_t window_size =
    945				ob_map->window_sizes[size_idx] * SZ_1M;
    946
    947			/*
    948			 * Keep iterating until we reach the last window and
    949			 * with the minimal window size at index zero. In this
    950			 * case, we take a compromise by mapping it using the
    951			 * minimum window size that can be supported
    952			 */
    953			if (size < window_size) {
    954				if (size_idx > 0 || window_idx > 0)
    955					continue;
    956
    957				/*
    958				 * For the corner case of reaching the minimal
    959				 * window size that can be supported on the
    960				 * last window
    961				 */
    962				axi_addr = ALIGN_DOWN(axi_addr, window_size);
    963				pci_addr = ALIGN_DOWN(pci_addr, window_size);
    964				size = window_size;
    965			}
    966
    967			if (!IS_ALIGNED(axi_addr, window_size) ||
    968			    !IS_ALIGNED(pci_addr, window_size)) {
    969				dev_err(dev,
    970					"axi %pap or pci %pap not aligned\n",
    971					&axi_addr, &pci_addr);
    972				return -EINVAL;
    973			}
    974
    975			/*
    976			 * Match found!  Program both OARR and OMAP and mark
    977			 * them as a valid entry.
    978			 */
    979			ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
    980						  axi_addr, pci_addr);
    981			if (ret)
    982				goto err_ob;
    983
    984			size -= window_size;
    985			if (size == 0)
    986				return 0;
    987
    988			/*
    989			 * If we are here, we are done with the current window,
    990			 * but not yet finished all mappings.  Need to move on
    991			 * to the next window.
    992			 */
    993			axi_addr += window_size;
    994			pci_addr += window_size;
    995			break;
    996		}
    997	}
    998
    999err_ob:
   1000	dev_err(dev, "unable to configure outbound mapping\n");
   1001	dev_err(dev,
   1002		"axi %pap, axi offset %pap, pci %pap, res size %pap\n",
   1003		&axi_addr, &ob->axi_offset, &pci_addr, &size);
   1004
   1005	return ret;
   1006}
   1007
   1008static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
   1009				 struct list_head *resources)
   1010{
   1011	struct device *dev = pcie->dev;
   1012	struct resource_entry *window;
   1013	int ret;
   1014
   1015	resource_list_for_each_entry(window, resources) {
   1016		struct resource *res = window->res;
   1017		u64 res_type = resource_type(res);
   1018
   1019		switch (res_type) {
   1020		case IORESOURCE_IO:
   1021		case IORESOURCE_BUS:
   1022			break;
   1023		case IORESOURCE_MEM:
   1024			ret = iproc_pcie_setup_ob(pcie, res->start,
   1025						  res->start - window->offset,
   1026						  resource_size(res));
   1027			if (ret)
   1028				return ret;
   1029			break;
   1030		default:
   1031			dev_err(dev, "invalid resource %pR\n", res);
   1032			return -EINVAL;
   1033		}
   1034	}
   1035
   1036	return 0;
   1037}
   1038
   1039static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie,
   1040					   int region_idx)
   1041{
   1042	const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
   1043	u32 val;
   1044
   1045	val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx));
   1046
   1047	return !!(val & (BIT(ib_map->nr_sizes) - 1));
   1048}
   1049
   1050static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map,
   1051					    enum iproc_pcie_ib_map_type type)
   1052{
   1053	return !!(ib_map->type == type);
   1054}
   1055
   1056static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
   1057			       int size_idx, int nr_windows, u64 axi_addr,
   1058			       u64 pci_addr, resource_size_t size)
   1059{
   1060	struct device *dev = pcie->dev;
   1061	const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
   1062	u16 iarr_offset, imap_offset;
   1063	u32 val;
   1064	int window_idx;
   1065
   1066	iarr_offset = iproc_pcie_reg_offset(pcie,
   1067				MAP_REG(IPROC_PCIE_IARR0, region_idx));
   1068	imap_offset = iproc_pcie_reg_offset(pcie,
   1069				MAP_REG(IPROC_PCIE_IMAP0, region_idx));
   1070	if (iproc_pcie_reg_is_invalid(iarr_offset) ||
   1071	    iproc_pcie_reg_is_invalid(imap_offset))
   1072		return -EINVAL;
   1073
   1074	dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
   1075		region_idx, iarr_offset, &axi_addr, &pci_addr);
   1076
   1077	/*
   1078	 * Program the IARR registers.  The upper 32-bit IARR register is
   1079	 * always right after the lower 32-bit IARR register.
   1080	 */
   1081	writel(lower_32_bits(pci_addr) | BIT(size_idx),
   1082	       pcie->base + iarr_offset);
   1083	writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
   1084
   1085	dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n",
   1086		readl(pcie->base + iarr_offset),
   1087		readl(pcie->base + iarr_offset + 4));
   1088
   1089	/*
   1090	 * Now program the IMAP registers.  Each IARR region may have one or
   1091	 * more IMAP windows.
   1092	 */
   1093	size >>= ilog2(nr_windows);
   1094	for (window_idx = 0; window_idx < nr_windows; window_idx++) {
   1095		val = readl(pcie->base + imap_offset);
   1096		val |= lower_32_bits(axi_addr) | IMAP_VALID;
   1097		writel(val, pcie->base + imap_offset);
   1098		writel(upper_32_bits(axi_addr),
   1099		       pcie->base + imap_offset + ib_map->imap_addr_offset);
   1100
   1101		dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
   1102			window_idx, readl(pcie->base + imap_offset),
   1103			readl(pcie->base + imap_offset +
   1104			      ib_map->imap_addr_offset));
   1105
   1106		imap_offset += ib_map->imap_window_offset;
   1107		axi_addr += size;
   1108	}
   1109
   1110	return 0;
   1111}
   1112
   1113static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
   1114			       struct resource_entry *entry,
   1115			       enum iproc_pcie_ib_map_type type)
   1116{
   1117	struct device *dev = pcie->dev;
   1118	struct iproc_pcie_ib *ib = &pcie->ib;
   1119	int ret;
   1120	unsigned int region_idx, size_idx;
   1121	u64 axi_addr = entry->res->start;
   1122	u64 pci_addr = entry->res->start - entry->offset;
   1123	resource_size_t size = resource_size(entry->res);
   1124
   1125	/* iterate through all IARR mapping regions */
   1126	for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
   1127		const struct iproc_pcie_ib_map *ib_map =
   1128			&pcie->ib_map[region_idx];
   1129
   1130		/*
   1131		 * If current inbound region is already in use or not a
   1132		 * compatible type, move on to the next.
   1133		 */
   1134		if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
   1135		    !iproc_pcie_ib_check_type(ib_map, type))
   1136			continue;
   1137
   1138		/* iterate through all supported region sizes to find a match */
   1139		for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
   1140			resource_size_t region_size =
   1141			ib_map->region_sizes[size_idx] * ib_map->size_unit;
   1142
   1143			if (size != region_size)
   1144				continue;
   1145
   1146			if (!IS_ALIGNED(axi_addr, region_size) ||
   1147			    !IS_ALIGNED(pci_addr, region_size)) {
   1148				dev_err(dev,
   1149					"axi %pap or pci %pap not aligned\n",
   1150					&axi_addr, &pci_addr);
   1151				return -EINVAL;
   1152			}
   1153
   1154			/* Match found!  Program IARR and all IMAP windows. */
   1155			ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
   1156						  ib_map->nr_windows, axi_addr,
   1157						  pci_addr, size);
   1158			if (ret)
   1159				goto err_ib;
   1160			else
   1161				return 0;
   1162
   1163		}
   1164	}
   1165	ret = -EINVAL;
   1166
   1167err_ib:
   1168	dev_err(dev, "unable to configure inbound mapping\n");
   1169	dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
   1170		&axi_addr, &pci_addr, &size);
   1171
   1172	return ret;
   1173}
   1174
   1175static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
   1176{
   1177	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
   1178	struct resource_entry *entry;
   1179	int ret = 0;
   1180
   1181	resource_list_for_each_entry(entry, &host->dma_ranges) {
   1182		/* Each range entry corresponds to an inbound mapping region */
   1183		ret = iproc_pcie_setup_ib(pcie, entry, IPROC_PCIE_IB_MAP_MEM);
   1184		if (ret)
   1185			break;
   1186	}
   1187
   1188	return ret;
   1189}
   1190
   1191static void iproc_pcie_invalidate_mapping(struct iproc_pcie *pcie)
   1192{
   1193	struct iproc_pcie_ib *ib = &pcie->ib;
   1194	struct iproc_pcie_ob *ob = &pcie->ob;
   1195	int idx;
   1196
   1197	if (pcie->ep_is_internal)
   1198		return;
   1199
   1200	if (pcie->need_ob_cfg) {
   1201		/* iterate through all OARR mapping regions */
   1202		for (idx = ob->nr_windows - 1; idx >= 0; idx--) {
   1203			iproc_pcie_write_reg(pcie,
   1204					     MAP_REG(IPROC_PCIE_OARR0, idx), 0);
   1205		}
   1206	}
   1207
   1208	if (pcie->need_ib_cfg) {
   1209		/* iterate through all IARR mapping regions */
   1210		for (idx = 0; idx < ib->nr_regions; idx++) {
   1211			iproc_pcie_write_reg(pcie,
   1212					     MAP_REG(IPROC_PCIE_IARR0, idx), 0);
   1213		}
   1214	}
   1215}
   1216
   1217static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
   1218			       struct device_node *msi_node,
   1219			       u64 *msi_addr)
   1220{
   1221	struct device *dev = pcie->dev;
   1222	int ret;
   1223	struct resource res;
   1224
   1225	/*
   1226	 * Check if 'msi-map' points to ARM GICv3 ITS, which is the only
   1227	 * supported external MSI controller that requires steering.
   1228	 */
   1229	if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
   1230		dev_err(dev, "unable to find compatible MSI controller\n");
   1231		return -ENODEV;
   1232	}
   1233
   1234	/* derive GITS_TRANSLATER address from GICv3 */
   1235	ret = of_address_to_resource(msi_node, 0, &res);
   1236	if (ret < 0) {
   1237		dev_err(dev, "unable to obtain MSI controller resources\n");
   1238		return ret;
   1239	}
   1240
   1241	*msi_addr = res.start + GITS_TRANSLATER;
   1242	return 0;
   1243}
   1244
   1245static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
   1246{
   1247	int ret;
   1248	struct resource_entry entry;
   1249
   1250	memset(&entry, 0, sizeof(entry));
   1251	entry.res = &entry.__res;
   1252
   1253	msi_addr &= ~(SZ_32K - 1);
   1254	entry.res->start = msi_addr;
   1255	entry.res->end = msi_addr + SZ_32K - 1;
   1256
   1257	ret = iproc_pcie_setup_ib(pcie, &entry, IPROC_PCIE_IB_MAP_IO);
   1258	return ret;
   1259}
   1260
   1261static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr,
   1262					 bool enable)
   1263{
   1264	u32 val;
   1265
   1266	if (!enable) {
   1267		/*
   1268		 * Disable PAXC MSI steering. All write transfers will be
   1269		 * treated as non-MSI transfers
   1270		 */
   1271		val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
   1272		val &= ~MSI_ENABLE_CFG;
   1273		iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
   1274		return;
   1275	}
   1276
   1277	/*
   1278	 * Program bits [43:13] of address of GITS_TRANSLATER register into
   1279	 * bits [30:0] of the MSI base address register.  In fact, in all iProc
   1280	 * based SoCs, all I/O register bases are well below the 32-bit
   1281	 * boundary, so we can safely assume bits [43:32] are always zeros.
   1282	 */
   1283	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
   1284			     (u32)(msi_addr >> 13));
   1285
   1286	/* use a default 8K window size */
   1287	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
   1288
   1289	/* steering MSI to GICv3 ITS */
   1290	val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
   1291	val |= GIC_V3_CFG;
   1292	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
   1293
   1294	/*
   1295	 * Program bits [43:2] of address of GITS_TRANSLATER register into the
   1296	 * iProc MSI address registers.
   1297	 */
   1298	msi_addr >>= 2;
   1299	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
   1300			     upper_32_bits(msi_addr));
   1301	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
   1302			     lower_32_bits(msi_addr));
   1303
   1304	/* enable MSI */
   1305	val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
   1306	val |= MSI_ENABLE_CFG;
   1307	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
   1308}
   1309
   1310static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
   1311				struct device_node *msi_node)
   1312{
   1313	struct device *dev = pcie->dev;
   1314	int ret;
   1315	u64 msi_addr;
   1316
   1317	ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr);
   1318	if (ret < 0) {
   1319		dev_err(dev, "msi steering failed\n");
   1320		return ret;
   1321	}
   1322
   1323	switch (pcie->type) {
   1324	case IPROC_PCIE_PAXB_V2:
   1325		ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr);
   1326		if (ret)
   1327			return ret;
   1328		break;
   1329	case IPROC_PCIE_PAXC_V2:
   1330		iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, true);
   1331		break;
   1332	default:
   1333		return -EINVAL;
   1334	}
   1335
   1336	return 0;
   1337}
   1338
   1339static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
   1340{
   1341	struct device_node *msi_node;
   1342	int ret;
   1343
   1344	/*
   1345	 * Either the "msi-parent" or the "msi-map" phandle needs to exist
   1346	 * for us to obtain the MSI node.
   1347	 */
   1348
   1349	msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
   1350	if (!msi_node) {
   1351		const __be32 *msi_map = NULL;
   1352		int len;
   1353		u32 phandle;
   1354
   1355		msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
   1356		if (!msi_map)
   1357			return -ENODEV;
   1358
   1359		phandle = be32_to_cpup(msi_map + 1);
   1360		msi_node = of_find_node_by_phandle(phandle);
   1361		if (!msi_node)
   1362			return -ENODEV;
   1363	}
   1364
   1365	/*
   1366	 * Certain revisions of the iProc PCIe controller require additional
   1367	 * configurations to steer the MSI writes towards an external MSI
   1368	 * controller.
   1369	 */
   1370	if (pcie->need_msi_steer) {
   1371		ret = iproc_pcie_msi_steer(pcie, msi_node);
   1372		if (ret)
   1373			goto out_put_node;
   1374	}
   1375
   1376	/*
   1377	 * If another MSI controller is being used, the call below should fail
   1378	 * but that is okay
   1379	 */
   1380	ret = iproc_msi_init(pcie, msi_node);
   1381
   1382out_put_node:
   1383	of_node_put(msi_node);
   1384	return ret;
   1385}
   1386
   1387static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
   1388{
   1389	iproc_msi_exit(pcie);
   1390}
   1391
   1392static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
   1393{
   1394	struct device *dev = pcie->dev;
   1395	unsigned int reg_idx;
   1396	const u16 *regs;
   1397
   1398	switch (pcie->type) {
   1399	case IPROC_PCIE_PAXB_BCMA:
   1400		regs = iproc_pcie_reg_paxb_bcma;
   1401		break;
   1402	case IPROC_PCIE_PAXB:
   1403		regs = iproc_pcie_reg_paxb;
   1404		pcie->has_apb_err_disable = true;
   1405		if (pcie->need_ob_cfg) {
   1406			pcie->ob_map = paxb_ob_map;
   1407			pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map);
   1408		}
   1409		break;
   1410	case IPROC_PCIE_PAXB_V2:
   1411		regs = iproc_pcie_reg_paxb_v2;
   1412		pcie->iproc_cfg_read = true;
   1413		pcie->has_apb_err_disable = true;
   1414		if (pcie->need_ob_cfg) {
   1415			pcie->ob_map = paxb_v2_ob_map;
   1416			pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map);
   1417		}
   1418		pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
   1419		pcie->ib_map = paxb_v2_ib_map;
   1420		pcie->need_msi_steer = true;
   1421		dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n",
   1422			 CFG_RETRY_STATUS);
   1423		break;
   1424	case IPROC_PCIE_PAXC:
   1425		regs = iproc_pcie_reg_paxc;
   1426		pcie->ep_is_internal = true;
   1427		pcie->iproc_cfg_read = true;
   1428		pcie->rej_unconfig_pf = true;
   1429		break;
   1430	case IPROC_PCIE_PAXC_V2:
   1431		regs = iproc_pcie_reg_paxc_v2;
   1432		pcie->ep_is_internal = true;
   1433		pcie->iproc_cfg_read = true;
   1434		pcie->rej_unconfig_pf = true;
   1435		pcie->need_msi_steer = true;
   1436		break;
   1437	default:
   1438		dev_err(dev, "incompatible iProc PCIe interface\n");
   1439		return -EINVAL;
   1440	}
   1441
   1442	pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG,
   1443					 sizeof(*pcie->reg_offsets),
   1444					 GFP_KERNEL);
   1445	if (!pcie->reg_offsets)
   1446		return -ENOMEM;
   1447
   1448	/* go through the register table and populate all valid registers */
   1449	pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ?
   1450		IPROC_PCIE_REG_INVALID : regs[0];
   1451	for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++)
   1452		pcie->reg_offsets[reg_idx] = regs[reg_idx] ?
   1453			regs[reg_idx] : IPROC_PCIE_REG_INVALID;
   1454
   1455	return 0;
   1456}
   1457
   1458int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
   1459{
   1460	struct device *dev;
   1461	int ret;
   1462	struct pci_dev *pdev;
   1463	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
   1464
   1465	dev = pcie->dev;
   1466
   1467	ret = iproc_pcie_rev_init(pcie);
   1468	if (ret) {
   1469		dev_err(dev, "unable to initialize controller parameters\n");
   1470		return ret;
   1471	}
   1472
   1473	ret = phy_init(pcie->phy);
   1474	if (ret) {
   1475		dev_err(dev, "unable to initialize PCIe PHY\n");
   1476		return ret;
   1477	}
   1478
   1479	ret = phy_power_on(pcie->phy);
   1480	if (ret) {
   1481		dev_err(dev, "unable to power on PCIe PHY\n");
   1482		goto err_exit_phy;
   1483	}
   1484
   1485	iproc_pcie_perst_ctrl(pcie, true);
   1486	iproc_pcie_perst_ctrl(pcie, false);
   1487
   1488	iproc_pcie_invalidate_mapping(pcie);
   1489
   1490	if (pcie->need_ob_cfg) {
   1491		ret = iproc_pcie_map_ranges(pcie, res);
   1492		if (ret) {
   1493			dev_err(dev, "map failed\n");
   1494			goto err_power_off_phy;
   1495		}
   1496	}
   1497
   1498	if (pcie->need_ib_cfg) {
   1499		ret = iproc_pcie_map_dma_ranges(pcie);
   1500		if (ret && ret != -ENOENT)
   1501			goto err_power_off_phy;
   1502	}
   1503
   1504	ret = iproc_pcie_check_link(pcie);
   1505	if (ret) {
   1506		dev_err(dev, "no PCIe EP device detected\n");
   1507		goto err_power_off_phy;
   1508	}
   1509
   1510	iproc_pcie_enable(pcie);
   1511
   1512	if (IS_ENABLED(CONFIG_PCI_MSI))
   1513		if (iproc_pcie_msi_enable(pcie))
   1514			dev_info(dev, "not using iProc MSI\n");
   1515
   1516	host->ops = &iproc_pcie_ops;
   1517	host->sysdata = pcie;
   1518	host->map_irq = pcie->map_irq;
   1519
   1520	ret = pci_host_probe(host);
   1521	if (ret < 0) {
   1522		dev_err(dev, "failed to scan host: %d\n", ret);
   1523		goto err_power_off_phy;
   1524	}
   1525
   1526	for_each_pci_bridge(pdev, host->bus) {
   1527		if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
   1528			pcie_print_link_status(pdev);
   1529	}
   1530
   1531	return 0;
   1532
   1533err_power_off_phy:
   1534	phy_power_off(pcie->phy);
   1535err_exit_phy:
   1536	phy_exit(pcie->phy);
   1537	return ret;
   1538}
   1539EXPORT_SYMBOL(iproc_pcie_setup);
   1540
   1541int iproc_pcie_remove(struct iproc_pcie *pcie)
   1542{
   1543	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
   1544
   1545	pci_stop_root_bus(host->bus);
   1546	pci_remove_root_bus(host->bus);
   1547
   1548	iproc_pcie_msi_disable(pcie);
   1549
   1550	phy_power_off(pcie->phy);
   1551	phy_exit(pcie->phy);
   1552
   1553	return 0;
   1554}
   1555EXPORT_SYMBOL(iproc_pcie_remove);
   1556
   1557/*
   1558 * The MSI parsing logic in certain revisions of Broadcom PAXC based root
   1559 * complex does not work and needs to be disabled
   1560 */
   1561static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev)
   1562{
   1563	struct iproc_pcie *pcie = iproc_data(pdev->bus);
   1564
   1565	if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
   1566		iproc_pcie_paxc_v2_msi_steer(pcie, 0, false);
   1567}
   1568DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0,
   1569			quirk_paxc_disable_msi_parsing);
   1570DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
   1571			quirk_paxc_disable_msi_parsing);
   1572DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
   1573			quirk_paxc_disable_msi_parsing);
   1574
   1575static void quirk_paxc_bridge(struct pci_dev *pdev)
   1576{
   1577	/*
   1578	 * The PCI config space is shared with the PAXC root port and the first
   1579	 * Ethernet device.  So, we need to workaround this by telling the PCI
   1580	 * code that the bridge is not an Ethernet device.
   1581	 */
   1582	if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
   1583		pdev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
   1584
   1585	/*
   1586	 * MPSS is not being set properly (as it is currently 0).  This is
   1587	 * because that area of the PCI config space is hard coded to zero, and
   1588	 * is not modifiable by firmware.  Set this to 2 (e.g., 512 byte MPS)
   1589	 * so that the MPS can be set to the real max value.
   1590	 */
   1591	pdev->pcie_mpss = 2;
   1592}
   1593DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
   1594DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
   1595DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
   1596DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
   1597DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
   1598
   1599MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
   1600MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
   1601MODULE_LICENSE("GPL v2");