cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qcom-ebi2.c (10768B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Qualcomm External Bus Interface 2 (EBI2) driver
      4 * an older version of the Qualcomm Parallel Interface Controller (QPIC)
      5 *
      6 * Copyright (C) 2016 Linaro Ltd.
      7 *
      8 * Author: Linus Walleij <linus.walleij@linaro.org>
      9 *
     10 * See the device tree bindings for this block for more details on the
     11 * hardware.
     12 */
     13
     14#include <linux/module.h>
     15#include <linux/clk.h>
     16#include <linux/err.h>
     17#include <linux/io.h>
     18#include <linux/of.h>
     19#include <linux/of_platform.h>
     20#include <linux/init.h>
     21#include <linux/slab.h>
     22#include <linux/platform_device.h>
     23#include <linux/bitops.h>
     24
     25/*
     26 * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
     27 */
     28#define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
     29#define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
     30#define EBI2_CS2_ENABLE_MASK BIT(4)
     31#define EBI2_CS3_ENABLE_MASK BIT(5)
     32#define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
     33#define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
     34#define EBI2_CSN_MASK GENMASK(9, 0)
     35
     36#define EBI2_XMEM_CFG 0x0000 /* Power management etc */
     37
     38/*
     39 * SLOW CSn CFG
     40 *
     41 * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
     42 *             memory continues to drive the data bus after OE is de-asserted.
     43 *             Inserted when reading one CS and switching to another CS or read
     44 *             followed by write on the same CS. Valid values 0 thru 15.
     45 * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
     46 *             every write minimum 1. The data out is driven from the time WE is
     47 *             asserted until CS is asserted. With a hold of 1, the CS stays
     48 *             active for 1 extra cycle etc. Valid values 0 thru 15.
     49 * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
     50 *             write to a page or burst memory
     51 * Bits 15-8:  RD_DELTA initial latency for read cycles inserted for the first
     52 *             read to a page or burst memory
     53 * Bits 7-4:   WR_WAIT number of wait cycles for every write access, 0=1 cycle
     54 *             so 1 thru 16 cycles.
     55 * Bits 3-0:   RD_WAIT number of wait cycles for every read access, 0=1 cycle
     56 *             so 1 thru 16 cycles.
     57 */
     58#define EBI2_XMEM_CS0_SLOW_CFG 0x0008
     59#define EBI2_XMEM_CS1_SLOW_CFG 0x000C
     60#define EBI2_XMEM_CS2_SLOW_CFG 0x0010
     61#define EBI2_XMEM_CS3_SLOW_CFG 0x0014
     62#define EBI2_XMEM_CS4_SLOW_CFG 0x0018
     63#define EBI2_XMEM_CS5_SLOW_CFG 0x001C
     64
     65#define EBI2_XMEM_RECOVERY_SHIFT	28
     66#define EBI2_XMEM_WR_HOLD_SHIFT		24
     67#define EBI2_XMEM_WR_DELTA_SHIFT	16
     68#define EBI2_XMEM_RD_DELTA_SHIFT	8
     69#define EBI2_XMEM_WR_WAIT_SHIFT		4
     70#define EBI2_XMEM_RD_WAIT_SHIFT		0
     71
     72/*
     73 * FAST CSn CFG
     74 * Bits 31-28: ?
     75 * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
     76 *             transfer. For a single read trandfer this will be the time
     77 *             from CS assertion to OE assertion.
     78 * Bits 18-24: ?
     79 * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
     80 *             assertion, with respect to the cycle where ADV is asserted.
     81 *             2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
     82 * Bits 5:     ADDR_HOLD_ENA, The address is held for an extra cycle to meet
     83 *             hold time requirements with ADV assertion.
     84 *
     85 * The manual mentions "write precharge cycles" and "precharge cycles".
     86 * We have not been able to figure out which bit fields these correspond to
     87 * in the hardware, or what valid values exist. The current hypothesis is that
     88 * this is something just used on the FAST chip selects. There is also a "byte
     89 * device enable" flag somewhere for 8bit memories.
     90 */
     91#define EBI2_XMEM_CS0_FAST_CFG 0x0028
     92#define EBI2_XMEM_CS1_FAST_CFG 0x002C
     93#define EBI2_XMEM_CS2_FAST_CFG 0x0030
     94#define EBI2_XMEM_CS3_FAST_CFG 0x0034
     95#define EBI2_XMEM_CS4_FAST_CFG 0x0038
     96#define EBI2_XMEM_CS5_FAST_CFG 0x003C
     97
     98#define EBI2_XMEM_RD_HOLD_SHIFT		24
     99#define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT	16
    100#define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT	5
    101
    102/**
    103 * struct cs_data - struct with info on a chipselect setting
    104 * @enable_mask: mask to enable the chipselect in the EBI2 config
    105 * @slow_cfg: offset to XMEMC slow CS config
    106 * @fast_cfg: offset to XMEMC fast CS config
    107 */
    108struct cs_data {
    109	u32 enable_mask;
    110	u16 slow_cfg;
    111	u16 fast_cfg;
    112};
    113
    114static const struct cs_data cs_info[] = {
    115	{
    116		/* CS0 */
    117		.enable_mask = EBI2_CS0_ENABLE_MASK,
    118		.slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
    119		.fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
    120	},
    121	{
    122		/* CS1 */
    123		.enable_mask = EBI2_CS1_ENABLE_MASK,
    124		.slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
    125		.fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
    126	},
    127	{
    128		/* CS2 */
    129		.enable_mask = EBI2_CS2_ENABLE_MASK,
    130		.slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
    131		.fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
    132	},
    133	{
    134		/* CS3 */
    135		.enable_mask = EBI2_CS3_ENABLE_MASK,
    136		.slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
    137		.fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
    138	},
    139	{
    140		/* CS4 */
    141		.enable_mask = EBI2_CS4_ENABLE_MASK,
    142		.slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
    143		.fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
    144	},
    145	{
    146		/* CS5 */
    147		.enable_mask = EBI2_CS5_ENABLE_MASK,
    148		.slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
    149		.fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
    150	},
    151};
    152
    153/**
    154 * struct ebi2_xmem_prop - describes an XMEM config property
    155 * @prop: the device tree binding name
    156 * @max: maximum value for the property
    157 * @slowreg: true if this property is in the SLOW CS config register
    158 * else it is assumed to be in the FAST config register
    159 * @shift: the bit field start in the SLOW or FAST register for this
    160 * property
    161 */
    162struct ebi2_xmem_prop {
    163	const char *prop;
    164	u32 max;
    165	bool slowreg;
    166	u16 shift;
    167};
    168
    169static const struct ebi2_xmem_prop xmem_props[] = {
    170	{
    171		.prop = "qcom,xmem-recovery-cycles",
    172		.max = 15,
    173		.slowreg = true,
    174		.shift = EBI2_XMEM_RECOVERY_SHIFT,
    175	},
    176	{
    177		.prop = "qcom,xmem-write-hold-cycles",
    178		.max = 15,
    179		.slowreg = true,
    180		.shift = EBI2_XMEM_WR_HOLD_SHIFT,
    181	},
    182	{
    183		.prop = "qcom,xmem-write-delta-cycles",
    184		.max = 255,
    185		.slowreg = true,
    186		.shift = EBI2_XMEM_WR_DELTA_SHIFT,
    187	},
    188	{
    189		.prop = "qcom,xmem-read-delta-cycles",
    190		.max = 255,
    191		.slowreg = true,
    192		.shift = EBI2_XMEM_RD_DELTA_SHIFT,
    193	},
    194	{
    195		.prop = "qcom,xmem-write-wait-cycles",
    196		.max = 15,
    197		.slowreg = true,
    198		.shift = EBI2_XMEM_WR_WAIT_SHIFT,
    199	},
    200	{
    201		.prop = "qcom,xmem-read-wait-cycles",
    202		.max = 15,
    203		.slowreg = true,
    204		.shift = EBI2_XMEM_RD_WAIT_SHIFT,
    205	},
    206	{
    207		.prop = "qcom,xmem-address-hold-enable",
    208		.max = 1, /* boolean prop */
    209		.slowreg = false,
    210		.shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
    211	},
    212	{
    213		.prop = "qcom,xmem-adv-to-oe-recovery-cycles",
    214		.max = 3,
    215		.slowreg = false,
    216		.shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
    217	},
    218	{
    219		.prop = "qcom,xmem-read-hold-cycles",
    220		.max = 15,
    221		.slowreg = false,
    222		.shift = EBI2_XMEM_RD_HOLD_SHIFT,
    223	},
    224};
    225
    226static void qcom_ebi2_setup_chipselect(struct device_node *np,
    227				       struct device *dev,
    228				       void __iomem *ebi2_base,
    229				       void __iomem *ebi2_xmem,
    230				       u32 csindex)
    231{
    232	const struct cs_data *csd;
    233	u32 slowcfg, fastcfg;
    234	u32 val;
    235	int ret;
    236	int i;
    237
    238	csd = &cs_info[csindex];
    239	val = readl(ebi2_base);
    240	val |= csd->enable_mask;
    241	writel(val, ebi2_base);
    242	dev_dbg(dev, "enabled CS%u\n", csindex);
    243
    244	/* Next set up the XMEMC */
    245	slowcfg = 0;
    246	fastcfg = 0;
    247
    248	for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
    249		const struct ebi2_xmem_prop *xp = &xmem_props[i];
    250
    251		/* All are regular u32 values */
    252		ret = of_property_read_u32(np, xp->prop, &val);
    253		if (ret) {
    254			dev_dbg(dev, "could not read %s for CS%d\n",
    255				xp->prop, csindex);
    256			continue;
    257		}
    258
    259		/* First check boolean props */
    260		if (xp->max == 1 && val) {
    261			if (xp->slowreg)
    262				slowcfg |= BIT(xp->shift);
    263			else
    264				fastcfg |= BIT(xp->shift);
    265			dev_dbg(dev, "set %s flag\n", xp->prop);
    266			continue;
    267		}
    268
    269		/* We're dealing with an u32 */
    270		if (val > xp->max) {
    271			dev_err(dev,
    272				"too high value for %s: %u, capped at %u\n",
    273				xp->prop, val, xp->max);
    274			val = xp->max;
    275		}
    276		if (xp->slowreg)
    277			slowcfg |= (val << xp->shift);
    278		else
    279			fastcfg |= (val << xp->shift);
    280		dev_dbg(dev, "set %s to %u\n", xp->prop, val);
    281	}
    282
    283	dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
    284		 csindex, slowcfg, fastcfg);
    285
    286	if (slowcfg)
    287		writel(slowcfg, ebi2_xmem + csd->slow_cfg);
    288	if (fastcfg)
    289		writel(fastcfg, ebi2_xmem + csd->fast_cfg);
    290}
    291
    292static int qcom_ebi2_probe(struct platform_device *pdev)
    293{
    294	struct device_node *np = pdev->dev.of_node;
    295	struct device_node *child;
    296	struct device *dev = &pdev->dev;
    297	struct resource *res;
    298	void __iomem *ebi2_base;
    299	void __iomem *ebi2_xmem;
    300	struct clk *ebi2xclk;
    301	struct clk *ebi2clk;
    302	bool have_children = false;
    303	u32 val;
    304	int ret;
    305
    306	ebi2xclk = devm_clk_get(dev, "ebi2x");
    307	if (IS_ERR(ebi2xclk))
    308		return PTR_ERR(ebi2xclk);
    309
    310	ret = clk_prepare_enable(ebi2xclk);
    311	if (ret) {
    312		dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
    313		return ret;
    314	}
    315
    316	ebi2clk = devm_clk_get(dev, "ebi2");
    317	if (IS_ERR(ebi2clk)) {
    318		ret = PTR_ERR(ebi2clk);
    319		goto err_disable_2x_clk;
    320	}
    321
    322	ret = clk_prepare_enable(ebi2clk);
    323	if (ret) {
    324		dev_err(dev, "could not enable EBI2 clk\n");
    325		goto err_disable_2x_clk;
    326	}
    327
    328	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    329	ebi2_base = devm_ioremap_resource(dev, res);
    330	if (IS_ERR(ebi2_base)) {
    331		ret = PTR_ERR(ebi2_base);
    332		goto err_disable_clk;
    333	}
    334
    335	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
    336	ebi2_xmem = devm_ioremap_resource(dev, res);
    337	if (IS_ERR(ebi2_xmem)) {
    338		ret = PTR_ERR(ebi2_xmem);
    339		goto err_disable_clk;
    340	}
    341
    342	/* Allegedly this turns the power save mode off */
    343	writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
    344
    345	/* Disable all chipselects */
    346	val = readl(ebi2_base);
    347	val &= ~EBI2_CSN_MASK;
    348	writel(val, ebi2_base);
    349
    350	/* Walk over the child nodes and see what chipselects we use */
    351	for_each_available_child_of_node(np, child) {
    352		u32 csindex;
    353
    354		/* Figure out the chipselect */
    355		ret = of_property_read_u32(child, "reg", &csindex);
    356		if (ret) {
    357			of_node_put(child);
    358			return ret;
    359		}
    360
    361		if (csindex > 5) {
    362			dev_err(dev,
    363				"invalid chipselect %u, we only support 0-5\n",
    364				csindex);
    365			continue;
    366		}
    367
    368		qcom_ebi2_setup_chipselect(child,
    369					   dev,
    370					   ebi2_base,
    371					   ebi2_xmem,
    372					   csindex);
    373
    374		/* We have at least one child */
    375		have_children = true;
    376	}
    377
    378	if (have_children)
    379		return of_platform_default_populate(np, NULL, dev);
    380	return 0;
    381
    382err_disable_clk:
    383	clk_disable_unprepare(ebi2clk);
    384err_disable_2x_clk:
    385	clk_disable_unprepare(ebi2xclk);
    386
    387	return ret;
    388}
    389
    390static const struct of_device_id qcom_ebi2_of_match[] = {
    391	{ .compatible = "qcom,msm8660-ebi2", },
    392	{ .compatible = "qcom,apq8060-ebi2", },
    393	{ }
    394};
    395
    396static struct platform_driver qcom_ebi2_driver = {
    397	.probe = qcom_ebi2_probe,
    398	.driver = {
    399		.name = "qcom-ebi2",
    400		.of_match_table = qcom_ebi2_of_match,
    401	},
    402};
    403module_platform_driver(qcom_ebi2_driver);
    404MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
    405MODULE_DESCRIPTION("Qualcomm EBI2 driver");
    406MODULE_LICENSE("GPL");