cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pcie-designware.c (20867B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Synopsys DesignWare PCIe host controller driver
      4 *
      5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
      6 *		https://www.samsung.com
      7 *
      8 * Author: Jingoo Han <jg1.han@samsung.com>
      9 */
     10
     11#include <linux/delay.h>
     12#include <linux/of.h>
     13#include <linux/of_platform.h>
     14#include <linux/types.h>
     15
     16#include "../../pci.h"
     17#include "pcie-designware.h"
     18
     19/*
     20 * These interfaces resemble the pci_find_*capability() interfaces, but these
     21 * are for configuring host controllers, which are bridges *to* PCI devices but
     22 * are not PCI devices themselves.
     23 */
     24static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
     25				  u8 cap)
     26{
     27	u8 cap_id, next_cap_ptr;
     28	u16 reg;
     29
     30	if (!cap_ptr)
     31		return 0;
     32
     33	reg = dw_pcie_readw_dbi(pci, cap_ptr);
     34	cap_id = (reg & 0x00ff);
     35
     36	if (cap_id > PCI_CAP_ID_MAX)
     37		return 0;
     38
     39	if (cap_id == cap)
     40		return cap_ptr;
     41
     42	next_cap_ptr = (reg & 0xff00) >> 8;
     43	return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
     44}
     45
     46u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
     47{
     48	u8 next_cap_ptr;
     49	u16 reg;
     50
     51	reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
     52	next_cap_ptr = (reg & 0x00ff);
     53
     54	return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
     55}
     56EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
     57
     58static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
     59					    u8 cap)
     60{
     61	u32 header;
     62	int ttl;
     63	int pos = PCI_CFG_SPACE_SIZE;
     64
     65	/* minimum 8 bytes per capability */
     66	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
     67
     68	if (start)
     69		pos = start;
     70
     71	header = dw_pcie_readl_dbi(pci, pos);
     72	/*
     73	 * If we have no capabilities, this is indicated by cap ID,
     74	 * cap version and next pointer all being 0.
     75	 */
     76	if (header == 0)
     77		return 0;
     78
     79	while (ttl-- > 0) {
     80		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
     81			return pos;
     82
     83		pos = PCI_EXT_CAP_NEXT(header);
     84		if (pos < PCI_CFG_SPACE_SIZE)
     85			break;
     86
     87		header = dw_pcie_readl_dbi(pci, pos);
     88	}
     89
     90	return 0;
     91}
     92
     93u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
     94{
     95	return dw_pcie_find_next_ext_capability(pci, 0, cap);
     96}
     97EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
     98
     99int dw_pcie_read(void __iomem *addr, int size, u32 *val)
    100{
    101	if (!IS_ALIGNED((uintptr_t)addr, size)) {
    102		*val = 0;
    103		return PCIBIOS_BAD_REGISTER_NUMBER;
    104	}
    105
    106	if (size == 4) {
    107		*val = readl(addr);
    108	} else if (size == 2) {
    109		*val = readw(addr);
    110	} else if (size == 1) {
    111		*val = readb(addr);
    112	} else {
    113		*val = 0;
    114		return PCIBIOS_BAD_REGISTER_NUMBER;
    115	}
    116
    117	return PCIBIOS_SUCCESSFUL;
    118}
    119EXPORT_SYMBOL_GPL(dw_pcie_read);
    120
    121int dw_pcie_write(void __iomem *addr, int size, u32 val)
    122{
    123	if (!IS_ALIGNED((uintptr_t)addr, size))
    124		return PCIBIOS_BAD_REGISTER_NUMBER;
    125
    126	if (size == 4)
    127		writel(val, addr);
    128	else if (size == 2)
    129		writew(val, addr);
    130	else if (size == 1)
    131		writeb(val, addr);
    132	else
    133		return PCIBIOS_BAD_REGISTER_NUMBER;
    134
    135	return PCIBIOS_SUCCESSFUL;
    136}
    137EXPORT_SYMBOL_GPL(dw_pcie_write);
    138
    139u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
    140{
    141	int ret;
    142	u32 val;
    143
    144	if (pci->ops && pci->ops->read_dbi)
    145		return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
    146
    147	ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
    148	if (ret)
    149		dev_err(pci->dev, "Read DBI address failed\n");
    150
    151	return val;
    152}
    153EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
    154
    155void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
    156{
    157	int ret;
    158
    159	if (pci->ops && pci->ops->write_dbi) {
    160		pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
    161		return;
    162	}
    163
    164	ret = dw_pcie_write(pci->dbi_base + reg, size, val);
    165	if (ret)
    166		dev_err(pci->dev, "Write DBI address failed\n");
    167}
    168EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
    169
    170void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
    171{
    172	int ret;
    173
    174	if (pci->ops && pci->ops->write_dbi2) {
    175		pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
    176		return;
    177	}
    178
    179	ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
    180	if (ret)
    181		dev_err(pci->dev, "write DBI address failed\n");
    182}
    183
    184static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
    185{
    186	int ret;
    187	u32 val;
    188
    189	if (pci->ops && pci->ops->read_dbi)
    190		return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
    191
    192	ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
    193	if (ret)
    194		dev_err(pci->dev, "Read ATU address failed\n");
    195
    196	return val;
    197}
    198
    199static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
    200{
    201	int ret;
    202
    203	if (pci->ops && pci->ops->write_dbi) {
    204		pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
    205		return;
    206	}
    207
    208	ret = dw_pcie_write(pci->atu_base + reg, 4, val);
    209	if (ret)
    210		dev_err(pci->dev, "Write ATU address failed\n");
    211}
    212
    213static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
    214{
    215	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
    216
    217	return dw_pcie_readl_atu(pci, offset + reg);
    218}
    219
    220static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
    221				     u32 val)
    222{
    223	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
    224
    225	dw_pcie_writel_atu(pci, offset + reg, val);
    226}
    227
    228static inline u32 dw_pcie_enable_ecrc(u32 val)
    229{
    230	/*
    231	 * DesignWare core version 4.90A has a design issue where the 'TD'
    232	 * bit in the Control register-1 of the ATU outbound region acts
    233	 * like an override for the ECRC setting, i.e., the presence of TLP
    234	 * Digest (ECRC) in the outgoing TLPs is solely determined by this
    235	 * bit. This is contrary to the PCIe spec which says that the
    236	 * enablement of the ECRC is solely determined by the AER
    237	 * registers.
    238	 *
    239	 * Because of this, even when the ECRC is enabled through AER
    240	 * registers, the transactions going through ATU won't have TLP
    241	 * Digest as there is no way the PCI core AER code could program
    242	 * the TD bit which is specific to the DesignWare core.
    243	 *
    244	 * The best way to handle this scenario is to program the TD bit
    245	 * always. It affects only the traffic from root port to downstream
    246	 * devices.
    247	 *
    248	 * At this point,
    249	 * When ECRC is enabled in AER registers, everything works normally
    250	 * When ECRC is NOT enabled in AER registers, then,
    251	 * on Root Port:- TLP Digest (DWord size) gets appended to each packet
    252	 *                even through it is not required. Since downstream
    253	 *                TLPs are mostly for configuration accesses and BAR
    254	 *                accesses, they are not in critical path and won't
    255	 *                have much negative effect on the performance.
    256	 * on End Point:- TLP Digest is received for some/all the packets coming
    257	 *                from the root port. TLP Digest is ignored because,
    258	 *                as per the PCIe Spec r5.0 v1.0 section 2.2.3
    259	 *                "TLP Digest Rules", when an endpoint receives TLP
    260	 *                Digest when its ECRC check functionality is disabled
    261	 *                in AER registers, received TLP Digest is just ignored.
    262	 * Since there is no issue or error reported either side, best way to
    263	 * handle the scenario is to program TD bit by default.
    264	 */
    265
    266	return val | PCIE_ATU_TD;
    267}
    268
    269static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
    270					     int index, int type,
    271					     u64 cpu_addr, u64 pci_addr,
    272					     u64 size)
    273{
    274	u32 retries, val;
    275	u64 limit_addr = cpu_addr + size - 1;
    276
    277	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
    278				 lower_32_bits(cpu_addr));
    279	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
    280				 upper_32_bits(cpu_addr));
    281	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
    282				 lower_32_bits(limit_addr));
    283	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
    284				 upper_32_bits(limit_addr));
    285	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
    286				 lower_32_bits(pci_addr));
    287	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
    288				 upper_32_bits(pci_addr));
    289	val = type | PCIE_ATU_FUNC_NUM(func_no);
    290	val = upper_32_bits(size - 1) ?
    291		val | PCIE_ATU_INCREASE_REGION_SIZE : val;
    292	if (pci->version == 0x490A)
    293		val = dw_pcie_enable_ecrc(val);
    294	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
    295	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
    296				 PCIE_ATU_ENABLE);
    297
    298	/*
    299	 * Make sure ATU enable takes effect before any subsequent config
    300	 * and I/O accesses.
    301	 */
    302	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
    303		val = dw_pcie_readl_ob_unroll(pci, index,
    304					      PCIE_ATU_UNR_REGION_CTRL2);
    305		if (val & PCIE_ATU_ENABLE)
    306			return;
    307
    308		mdelay(LINK_WAIT_IATU);
    309	}
    310	dev_err(pci->dev, "Outbound iATU is not being enabled\n");
    311}
    312
    313static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
    314					int index, int type, u64 cpu_addr,
    315					u64 pci_addr, u64 size)
    316{
    317	u32 retries, val;
    318
    319	if (pci->ops && pci->ops->cpu_addr_fixup)
    320		cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
    321
    322	if (pci->iatu_unroll_enabled) {
    323		dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type,
    324						 cpu_addr, pci_addr, size);
    325		return;
    326	}
    327
    328	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
    329			   PCIE_ATU_REGION_OUTBOUND | index);
    330	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
    331			   lower_32_bits(cpu_addr));
    332	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
    333			   upper_32_bits(cpu_addr));
    334	dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
    335			   lower_32_bits(cpu_addr + size - 1));
    336	if (pci->version >= 0x460A)
    337		dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_LIMIT,
    338				   upper_32_bits(cpu_addr + size - 1));
    339	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
    340			   lower_32_bits(pci_addr));
    341	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
    342			   upper_32_bits(pci_addr));
    343	val = type | PCIE_ATU_FUNC_NUM(func_no);
    344	val = ((upper_32_bits(size - 1)) && (pci->version >= 0x460A)) ?
    345		val | PCIE_ATU_INCREASE_REGION_SIZE : val;
    346	if (pci->version == 0x490A)
    347		val = dw_pcie_enable_ecrc(val);
    348	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, val);
    349	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
    350
    351	/*
    352	 * Make sure ATU enable takes effect before any subsequent config
    353	 * and I/O accesses.
    354	 */
    355	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
    356		val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
    357		if (val & PCIE_ATU_ENABLE)
    358			return;
    359
    360		mdelay(LINK_WAIT_IATU);
    361	}
    362	dev_err(pci->dev, "Outbound iATU is not being enabled\n");
    363}
    364
    365void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
    366			       u64 cpu_addr, u64 pci_addr, u64 size)
    367{
    368	__dw_pcie_prog_outbound_atu(pci, 0, index, type,
    369				    cpu_addr, pci_addr, size);
    370}
    371
    372void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
    373				  int type, u64 cpu_addr, u64 pci_addr,
    374				  u64 size)
    375{
    376	__dw_pcie_prog_outbound_atu(pci, func_no, index, type,
    377				    cpu_addr, pci_addr, size);
    378}
    379
    380static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
    381{
    382	u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
    383
    384	return dw_pcie_readl_atu(pci, offset + reg);
    385}
    386
    387static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
    388				     u32 val)
    389{
    390	u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
    391
    392	dw_pcie_writel_atu(pci, offset + reg, val);
    393}
    394
    395static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
    396					   int index, int bar, u64 cpu_addr,
    397					   enum dw_pcie_as_type as_type)
    398{
    399	int type;
    400	u32 retries, val;
    401
    402	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
    403				 lower_32_bits(cpu_addr));
    404	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
    405				 upper_32_bits(cpu_addr));
    406
    407	switch (as_type) {
    408	case DW_PCIE_AS_MEM:
    409		type = PCIE_ATU_TYPE_MEM;
    410		break;
    411	case DW_PCIE_AS_IO:
    412		type = PCIE_ATU_TYPE_IO;
    413		break;
    414	default:
    415		return -EINVAL;
    416	}
    417
    418	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
    419				 PCIE_ATU_FUNC_NUM(func_no));
    420	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
    421				 PCIE_ATU_FUNC_NUM_MATCH_EN |
    422				 PCIE_ATU_ENABLE |
    423				 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
    424
    425	/*
    426	 * Make sure ATU enable takes effect before any subsequent config
    427	 * and I/O accesses.
    428	 */
    429	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
    430		val = dw_pcie_readl_ib_unroll(pci, index,
    431					      PCIE_ATU_UNR_REGION_CTRL2);
    432		if (val & PCIE_ATU_ENABLE)
    433			return 0;
    434
    435		mdelay(LINK_WAIT_IATU);
    436	}
    437	dev_err(pci->dev, "Inbound iATU is not being enabled\n");
    438
    439	return -EBUSY;
    440}
    441
    442int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
    443			     int bar, u64 cpu_addr,
    444			     enum dw_pcie_as_type as_type)
    445{
    446	int type;
    447	u32 retries, val;
    448
    449	if (pci->iatu_unroll_enabled)
    450		return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
    451						       cpu_addr, as_type);
    452
    453	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
    454			   index);
    455	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
    456	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
    457
    458	switch (as_type) {
    459	case DW_PCIE_AS_MEM:
    460		type = PCIE_ATU_TYPE_MEM;
    461		break;
    462	case DW_PCIE_AS_IO:
    463		type = PCIE_ATU_TYPE_IO;
    464		break;
    465	default:
    466		return -EINVAL;
    467	}
    468
    469	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
    470			   PCIE_ATU_FUNC_NUM(func_no));
    471	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
    472			   PCIE_ATU_FUNC_NUM_MATCH_EN |
    473			   PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
    474
    475	/*
    476	 * Make sure ATU enable takes effect before any subsequent config
    477	 * and I/O accesses.
    478	 */
    479	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
    480		val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
    481		if (val & PCIE_ATU_ENABLE)
    482			return 0;
    483
    484		mdelay(LINK_WAIT_IATU);
    485	}
    486	dev_err(pci->dev, "Inbound iATU is not being enabled\n");
    487
    488	return -EBUSY;
    489}
    490
    491void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
    492			 enum dw_pcie_region_type type)
    493{
    494	int region;
    495
    496	switch (type) {
    497	case DW_PCIE_REGION_INBOUND:
    498		region = PCIE_ATU_REGION_INBOUND;
    499		break;
    500	case DW_PCIE_REGION_OUTBOUND:
    501		region = PCIE_ATU_REGION_OUTBOUND;
    502		break;
    503	default:
    504		return;
    505	}
    506
    507	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
    508	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
    509}
    510
    511int dw_pcie_wait_for_link(struct dw_pcie *pci)
    512{
    513	int retries;
    514
    515	/* Check if the link is up or not */
    516	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
    517		if (dw_pcie_link_up(pci)) {
    518			dev_info(pci->dev, "Link up\n");
    519			return 0;
    520		}
    521		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
    522	}
    523
    524	dev_info(pci->dev, "Phy link never came up\n");
    525
    526	return -ETIMEDOUT;
    527}
    528EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
    529
    530int dw_pcie_link_up(struct dw_pcie *pci)
    531{
    532	u32 val;
    533
    534	if (pci->ops && pci->ops->link_up)
    535		return pci->ops->link_up(pci);
    536
    537	val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
    538	return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
    539		(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
    540}
    541EXPORT_SYMBOL_GPL(dw_pcie_link_up);
    542
    543void dw_pcie_upconfig_setup(struct dw_pcie *pci)
    544{
    545	u32 val;
    546
    547	val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
    548	val |= PORT_MLTI_UPCFG_SUPPORT;
    549	dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
    550}
    551EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
    552
    553static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
    554{
    555	u32 cap, ctrl2, link_speed;
    556	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
    557
    558	cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
    559	ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
    560	ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
    561
    562	switch (pcie_link_speed[link_gen]) {
    563	case PCIE_SPEED_2_5GT:
    564		link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
    565		break;
    566	case PCIE_SPEED_5_0GT:
    567		link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
    568		break;
    569	case PCIE_SPEED_8_0GT:
    570		link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
    571		break;
    572	case PCIE_SPEED_16_0GT:
    573		link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
    574		break;
    575	default:
    576		/* Use hardware capability */
    577		link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
    578		ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
    579		break;
    580	}
    581
    582	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
    583
    584	cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
    585	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
    586
    587}
    588
    589static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
    590{
    591	u32 val;
    592
    593	val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
    594	if (val == 0xffffffff)
    595		return 1;
    596
    597	return 0;
    598}
    599
    600static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci)
    601{
    602	int max_region, i, ob = 0, ib = 0;
    603	u32 val;
    604
    605	max_region = min((int)pci->atu_size / 512, 256);
    606
    607	for (i = 0; i < max_region; i++) {
    608		dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
    609					0x11110000);
    610
    611		val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
    612		if (val == 0x11110000)
    613			ob++;
    614		else
    615			break;
    616	}
    617
    618	for (i = 0; i < max_region; i++) {
    619		dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
    620					0x11110000);
    621
    622		val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
    623		if (val == 0x11110000)
    624			ib++;
    625		else
    626			break;
    627	}
    628	pci->num_ib_windows = ib;
    629	pci->num_ob_windows = ob;
    630}
    631
    632static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
    633{
    634	int max_region, i, ob = 0, ib = 0;
    635	u32 val;
    636
    637	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
    638	max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
    639
    640	for (i = 0; i < max_region; i++) {
    641		dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i);
    642		dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
    643		val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
    644		if (val == 0x11110000)
    645			ob++;
    646		else
    647			break;
    648	}
    649
    650	for (i = 0; i < max_region; i++) {
    651		dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i);
    652		dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
    653		val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
    654		if (val == 0x11110000)
    655			ib++;
    656		else
    657			break;
    658	}
    659
    660	pci->num_ib_windows = ib;
    661	pci->num_ob_windows = ob;
    662}
    663
    664void dw_pcie_iatu_detect(struct dw_pcie *pci)
    665{
    666	struct device *dev = pci->dev;
    667	struct platform_device *pdev = to_platform_device(dev);
    668
    669	if (pci->version >= 0x480A || (!pci->version &&
    670				       dw_pcie_iatu_unroll_enabled(pci))) {
    671		pci->iatu_unroll_enabled = true;
    672		if (!pci->atu_base) {
    673			struct resource *res =
    674				platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
    675			if (res) {
    676				pci->atu_size = resource_size(res);
    677				pci->atu_base = devm_ioremap_resource(dev, res);
    678			}
    679			if (!pci->atu_base || IS_ERR(pci->atu_base))
    680				pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
    681		}
    682
    683		if (!pci->atu_size)
    684			/* Pick a minimal default, enough for 8 in and 8 out windows */
    685			pci->atu_size = SZ_4K;
    686
    687		dw_pcie_iatu_detect_regions_unroll(pci);
    688	} else
    689		dw_pcie_iatu_detect_regions(pci);
    690
    691	dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
    692		"enabled" : "disabled");
    693
    694	dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
    695		 pci->num_ob_windows, pci->num_ib_windows);
    696}
    697
    698void dw_pcie_setup(struct dw_pcie *pci)
    699{
    700	u32 val;
    701	struct device *dev = pci->dev;
    702	struct device_node *np = dev->of_node;
    703
    704	if (pci->link_gen > 0)
    705		dw_pcie_link_set_max_speed(pci, pci->link_gen);
    706
    707	/* Configure Gen1 N_FTS */
    708	if (pci->n_fts[0]) {
    709		val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
    710		val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
    711		val |= PORT_AFR_N_FTS(pci->n_fts[0]);
    712		val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
    713		dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
    714	}
    715
    716	/* Configure Gen2+ N_FTS */
    717	if (pci->n_fts[1]) {
    718		val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
    719		val &= ~PORT_LOGIC_N_FTS_MASK;
    720		val |= pci->n_fts[pci->link_gen - 1];
    721		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
    722	}
    723
    724	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
    725	val &= ~PORT_LINK_FAST_LINK_MODE;
    726	val |= PORT_LINK_DLL_LINK_EN;
    727	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
    728
    729	of_property_read_u32(np, "num-lanes", &pci->num_lanes);
    730	if (!pci->num_lanes) {
    731		dev_dbg(pci->dev, "Using h/w default number of lanes\n");
    732		return;
    733	}
    734
    735	/* Set the number of lanes */
    736	val &= ~PORT_LINK_FAST_LINK_MODE;
    737	val &= ~PORT_LINK_MODE_MASK;
    738	switch (pci->num_lanes) {
    739	case 1:
    740		val |= PORT_LINK_MODE_1_LANES;
    741		break;
    742	case 2:
    743		val |= PORT_LINK_MODE_2_LANES;
    744		break;
    745	case 4:
    746		val |= PORT_LINK_MODE_4_LANES;
    747		break;
    748	case 8:
    749		val |= PORT_LINK_MODE_8_LANES;
    750		break;
    751	default:
    752		dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
    753		return;
    754	}
    755	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
    756
    757	/* Set link width speed control register */
    758	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
    759	val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
    760	switch (pci->num_lanes) {
    761	case 1:
    762		val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
    763		break;
    764	case 2:
    765		val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
    766		break;
    767	case 4:
    768		val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
    769		break;
    770	case 8:
    771		val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
    772		break;
    773	}
    774	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
    775
    776	if (of_property_read_bool(np, "snps,enable-cdm-check")) {
    777		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
    778		val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
    779		       PCIE_PL_CHK_REG_CHK_REG_START;
    780		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
    781	}
    782}