cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sram.c (11719B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Generic on-chip SRAM allocation driver
      4 *
      5 * Copyright (C) 2012 Philipp Zabel, Pengutronix
      6 */
      7
      8#include <linux/clk.h>
      9#include <linux/delay.h>
     10#include <linux/genalloc.h>
     11#include <linux/io.h>
     12#include <linux/list_sort.h>
     13#include <linux/of_address.h>
     14#include <linux/of_device.h>
     15#include <linux/platform_device.h>
     16#include <linux/regmap.h>
     17#include <linux/slab.h>
     18#include <linux/mfd/syscon.h>
     19#include <soc/at91/atmel-secumod.h>
     20
     21#include "sram.h"
     22
     23#define SRAM_GRANULARITY	32
     24
     25static ssize_t sram_read(struct file *filp, struct kobject *kobj,
     26			 struct bin_attribute *attr,
     27			 char *buf, loff_t pos, size_t count)
     28{
     29	struct sram_partition *part;
     30
     31	part = container_of(attr, struct sram_partition, battr);
     32
     33	mutex_lock(&part->lock);
     34	memcpy_fromio(buf, part->base + pos, count);
     35	mutex_unlock(&part->lock);
     36
     37	return count;
     38}
     39
     40static ssize_t sram_write(struct file *filp, struct kobject *kobj,
     41			  struct bin_attribute *attr,
     42			  char *buf, loff_t pos, size_t count)
     43{
     44	struct sram_partition *part;
     45
     46	part = container_of(attr, struct sram_partition, battr);
     47
     48	mutex_lock(&part->lock);
     49	memcpy_toio(part->base + pos, buf, count);
     50	mutex_unlock(&part->lock);
     51
     52	return count;
     53}
     54
     55static int sram_add_pool(struct sram_dev *sram, struct sram_reserve *block,
     56			 phys_addr_t start, struct sram_partition *part)
     57{
     58	int ret;
     59
     60	part->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
     61					  NUMA_NO_NODE, block->label);
     62	if (IS_ERR(part->pool))
     63		return PTR_ERR(part->pool);
     64
     65	ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start,
     66				block->size, NUMA_NO_NODE);
     67	if (ret < 0) {
     68		dev_err(sram->dev, "failed to register subpool: %d\n", ret);
     69		return ret;
     70	}
     71
     72	return 0;
     73}
     74
     75static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block,
     76			   phys_addr_t start, struct sram_partition *part)
     77{
     78	sysfs_bin_attr_init(&part->battr);
     79	part->battr.attr.name = devm_kasprintf(sram->dev, GFP_KERNEL,
     80					       "%llx.sram",
     81					       (unsigned long long)start);
     82	if (!part->battr.attr.name)
     83		return -ENOMEM;
     84
     85	part->battr.attr.mode = S_IRUSR | S_IWUSR;
     86	part->battr.read = sram_read;
     87	part->battr.write = sram_write;
     88	part->battr.size = block->size;
     89
     90	return device_create_bin_file(sram->dev, &part->battr);
     91}
     92
     93static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
     94			      phys_addr_t start)
     95{
     96	int ret;
     97	struct sram_partition *part = &sram->partition[sram->partitions];
     98
     99	mutex_init(&part->lock);
    100
    101	if (sram->config && sram->config->map_only_reserved) {
    102		void __iomem *virt_base;
    103
    104		if (sram->no_memory_wc)
    105			virt_base = devm_ioremap_resource(sram->dev, &block->res);
    106		else
    107			virt_base = devm_ioremap_resource_wc(sram->dev, &block->res);
    108
    109		if (IS_ERR(virt_base)) {
    110			dev_err(sram->dev, "could not map SRAM at %pr\n", &block->res);
    111			return PTR_ERR(virt_base);
    112		}
    113
    114		part->base = virt_base;
    115	} else {
    116		part->base = sram->virt_base + block->start;
    117	}
    118
    119	if (block->pool) {
    120		ret = sram_add_pool(sram, block, start, part);
    121		if (ret)
    122			return ret;
    123	}
    124	if (block->export) {
    125		ret = sram_add_export(sram, block, start, part);
    126		if (ret)
    127			return ret;
    128	}
    129	if (block->protect_exec) {
    130		ret = sram_check_protect_exec(sram, block, part);
    131		if (ret)
    132			return ret;
    133
    134		ret = sram_add_pool(sram, block, start, part);
    135		if (ret)
    136			return ret;
    137
    138		sram_add_protect_exec(part);
    139	}
    140
    141	sram->partitions++;
    142
    143	return 0;
    144}
    145
    146static void sram_free_partitions(struct sram_dev *sram)
    147{
    148	struct sram_partition *part;
    149
    150	if (!sram->partitions)
    151		return;
    152
    153	part = &sram->partition[sram->partitions - 1];
    154	for (; sram->partitions; sram->partitions--, part--) {
    155		if (part->battr.size)
    156			device_remove_bin_file(sram->dev, &part->battr);
    157
    158		if (part->pool &&
    159		    gen_pool_avail(part->pool) < gen_pool_size(part->pool))
    160			dev_err(sram->dev, "removed pool while SRAM allocated\n");
    161	}
    162}
    163
    164static int sram_reserve_cmp(void *priv, const struct list_head *a,
    165					const struct list_head *b)
    166{
    167	struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
    168	struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
    169
    170	return ra->start - rb->start;
    171}
    172
    173static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
    174{
    175	struct device_node *np = sram->dev->of_node, *child;
    176	unsigned long size, cur_start, cur_size;
    177	struct sram_reserve *rblocks, *block;
    178	struct list_head reserve_list;
    179	unsigned int nblocks, exports = 0;
    180	const char *label;
    181	int ret = 0;
    182
    183	INIT_LIST_HEAD(&reserve_list);
    184
    185	size = resource_size(res);
    186
    187	/*
    188	 * We need an additional block to mark the end of the memory region
    189	 * after the reserved blocks from the dt are processed.
    190	 */
    191	nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
    192	rblocks = kcalloc(nblocks, sizeof(*rblocks), GFP_KERNEL);
    193	if (!rblocks)
    194		return -ENOMEM;
    195
    196	block = &rblocks[0];
    197	for_each_available_child_of_node(np, child) {
    198		struct resource child_res;
    199
    200		ret = of_address_to_resource(child, 0, &child_res);
    201		if (ret < 0) {
    202			dev_err(sram->dev,
    203				"could not get address for node %pOF\n",
    204				child);
    205			goto err_chunks;
    206		}
    207
    208		if (child_res.start < res->start || child_res.end > res->end) {
    209			dev_err(sram->dev,
    210				"reserved block %pOF outside the sram area\n",
    211				child);
    212			ret = -EINVAL;
    213			goto err_chunks;
    214		}
    215
    216		block->start = child_res.start - res->start;
    217		block->size = resource_size(&child_res);
    218		block->res = child_res;
    219		list_add_tail(&block->list, &reserve_list);
    220
    221		if (of_find_property(child, "export", NULL))
    222			block->export = true;
    223
    224		if (of_find_property(child, "pool", NULL))
    225			block->pool = true;
    226
    227		if (of_find_property(child, "protect-exec", NULL))
    228			block->protect_exec = true;
    229
    230		if ((block->export || block->pool || block->protect_exec) &&
    231		    block->size) {
    232			exports++;
    233
    234			label = NULL;
    235			ret = of_property_read_string(child, "label", &label);
    236			if (ret && ret != -EINVAL) {
    237				dev_err(sram->dev,
    238					"%pOF has invalid label name\n",
    239					child);
    240				goto err_chunks;
    241			}
    242			if (!label)
    243				label = child->name;
    244
    245			block->label = devm_kstrdup(sram->dev,
    246						    label, GFP_KERNEL);
    247			if (!block->label) {
    248				ret = -ENOMEM;
    249				goto err_chunks;
    250			}
    251
    252			dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n",
    253				block->export ? "exported " : "", block->label,
    254				block->start, block->start + block->size);
    255		} else {
    256			dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
    257				block->start, block->start + block->size);
    258		}
    259
    260		block++;
    261	}
    262	child = NULL;
    263
    264	/* the last chunk marks the end of the region */
    265	rblocks[nblocks - 1].start = size;
    266	rblocks[nblocks - 1].size = 0;
    267	list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
    268
    269	list_sort(NULL, &reserve_list, sram_reserve_cmp);
    270
    271	if (exports) {
    272		sram->partition = devm_kcalloc(sram->dev,
    273				       exports, sizeof(*sram->partition),
    274				       GFP_KERNEL);
    275		if (!sram->partition) {
    276			ret = -ENOMEM;
    277			goto err_chunks;
    278		}
    279	}
    280
    281	cur_start = 0;
    282	list_for_each_entry(block, &reserve_list, list) {
    283		/* can only happen if sections overlap */
    284		if (block->start < cur_start) {
    285			dev_err(sram->dev,
    286				"block at 0x%x starts after current offset 0x%lx\n",
    287				block->start, cur_start);
    288			ret = -EINVAL;
    289			sram_free_partitions(sram);
    290			goto err_chunks;
    291		}
    292
    293		if ((block->export || block->pool || block->protect_exec) &&
    294		    block->size) {
    295			ret = sram_add_partition(sram, block,
    296						 res->start + block->start);
    297			if (ret) {
    298				sram_free_partitions(sram);
    299				goto err_chunks;
    300			}
    301		}
    302
    303		/* current start is in a reserved block, so continue after it */
    304		if (block->start == cur_start) {
    305			cur_start = block->start + block->size;
    306			continue;
    307		}
    308
    309		/*
    310		 * allocate the space between the current starting
    311		 * address and the following reserved block, or the
    312		 * end of the region.
    313		 */
    314		cur_size = block->start - cur_start;
    315
    316		if (sram->pool) {
    317			dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
    318				cur_start, cur_start + cur_size);
    319
    320			ret = gen_pool_add_virt(sram->pool,
    321					(unsigned long)sram->virt_base + cur_start,
    322					res->start + cur_start, cur_size, -1);
    323			if (ret < 0) {
    324				sram_free_partitions(sram);
    325				goto err_chunks;
    326			}
    327		}
    328
    329		/* next allocation after this reserved block */
    330		cur_start = block->start + block->size;
    331	}
    332
    333err_chunks:
    334	of_node_put(child);
    335	kfree(rblocks);
    336
    337	return ret;
    338}
    339
    340static int atmel_securam_wait(void)
    341{
    342	struct regmap *regmap;
    343	u32 val;
    344
    345	regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod");
    346	if (IS_ERR(regmap))
    347		return -ENODEV;
    348
    349	return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val,
    350					val & AT91_SECUMOD_RAMRDY_READY,
    351					10000, 500000);
    352}
    353
    354static const struct sram_config atmel_securam_config = {
    355	.init = atmel_securam_wait,
    356};
    357
    358/*
    359 * SYSRAM contains areas that are not accessible by the
    360 * kernel, such as the first 256K that is reserved for TZ.
    361 * Accesses to those areas (including speculative accesses)
    362 * trigger SErrors. As such we must map only the areas of
    363 * SYSRAM specified in the device tree.
    364 */
    365static const struct sram_config tegra_sysram_config = {
    366	.map_only_reserved = true,
    367};
    368
    369static const struct of_device_id sram_dt_ids[] = {
    370	{ .compatible = "mmio-sram" },
    371	{ .compatible = "atmel,sama5d2-securam", .data = &atmel_securam_config },
    372	{ .compatible = "nvidia,tegra186-sysram", .data = &tegra_sysram_config },
    373	{ .compatible = "nvidia,tegra194-sysram", .data = &tegra_sysram_config },
    374	{ .compatible = "nvidia,tegra234-sysram", .data = &tegra_sysram_config },
    375	{}
    376};
    377
    378static int sram_probe(struct platform_device *pdev)
    379{
    380	const struct sram_config *config;
    381	struct sram_dev *sram;
    382	int ret;
    383	struct resource *res;
    384
    385	config = of_device_get_match_data(&pdev->dev);
    386
    387	sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
    388	if (!sram)
    389		return -ENOMEM;
    390
    391	sram->dev = &pdev->dev;
    392	sram->no_memory_wc = of_property_read_bool(pdev->dev.of_node, "no-memory-wc");
    393	sram->config = config;
    394
    395	if (!config || !config->map_only_reserved) {
    396		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    397		if (sram->no_memory_wc)
    398			sram->virt_base = devm_ioremap_resource(&pdev->dev, res);
    399		else
    400			sram->virt_base = devm_ioremap_resource_wc(&pdev->dev, res);
    401		if (IS_ERR(sram->virt_base)) {
    402			dev_err(&pdev->dev, "could not map SRAM registers\n");
    403			return PTR_ERR(sram->virt_base);
    404		}
    405
    406		sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
    407						  NUMA_NO_NODE, NULL);
    408		if (IS_ERR(sram->pool))
    409			return PTR_ERR(sram->pool);
    410	}
    411
    412	sram->clk = devm_clk_get(sram->dev, NULL);
    413	if (IS_ERR(sram->clk))
    414		sram->clk = NULL;
    415	else
    416		clk_prepare_enable(sram->clk);
    417
    418	ret = sram_reserve_regions(sram,
    419			platform_get_resource(pdev, IORESOURCE_MEM, 0));
    420	if (ret)
    421		goto err_disable_clk;
    422
    423	platform_set_drvdata(pdev, sram);
    424
    425	if (config && config->init) {
    426		ret = config->init();
    427		if (ret)
    428			goto err_free_partitions;
    429	}
    430
    431	if (sram->pool)
    432		dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
    433			gen_pool_size(sram->pool) / 1024, sram->virt_base);
    434
    435	return 0;
    436
    437err_free_partitions:
    438	sram_free_partitions(sram);
    439err_disable_clk:
    440	if (sram->clk)
    441		clk_disable_unprepare(sram->clk);
    442
    443	return ret;
    444}
    445
    446static int sram_remove(struct platform_device *pdev)
    447{
    448	struct sram_dev *sram = platform_get_drvdata(pdev);
    449
    450	sram_free_partitions(sram);
    451
    452	if (sram->pool && gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
    453		dev_err(sram->dev, "removed while SRAM allocated\n");
    454
    455	if (sram->clk)
    456		clk_disable_unprepare(sram->clk);
    457
    458	return 0;
    459}
    460
    461static struct platform_driver sram_driver = {
    462	.driver = {
    463		.name = "sram",
    464		.of_match_table = sram_dt_ids,
    465	},
    466	.probe = sram_probe,
    467	.remove = sram_remove,
    468};
    469
    470static int __init sram_init(void)
    471{
    472	return platform_driver_register(&sram_driver);
    473}
    474
    475postcore_initcall(sram_init);