cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

drmem.c (11792B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Dynamic reconfiguration memory support
      4 *
      5 * Copyright 2017 IBM Corporation
      6 */
      7
      8#define pr_fmt(fmt) "drmem: " fmt
      9
     10#include <linux/kernel.h>
     11#include <linux/of.h>
     12#include <linux/of_fdt.h>
     13#include <linux/memblock.h>
     14#include <linux/slab.h>
     15#include <asm/drmem.h>
     16
     17static int n_root_addr_cells, n_root_size_cells;
     18
     19static struct drmem_lmb_info __drmem_info;
     20struct drmem_lmb_info *drmem_info = &__drmem_info;
     21static bool in_drmem_update;
     22
     23u64 drmem_lmb_memory_max(void)
     24{
     25	struct drmem_lmb *last_lmb;
     26
     27	last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
     28	return last_lmb->base_addr + drmem_lmb_size();
     29}
     30
     31static u32 drmem_lmb_flags(struct drmem_lmb *lmb)
     32{
     33	/*
     34	 * Return the value of the lmb flags field minus the reserved
     35	 * bit used internally for hotplug processing.
     36	 */
     37	return lmb->flags & ~DRMEM_LMB_RESERVED;
     38}
     39
     40static struct property *clone_property(struct property *prop, u32 prop_sz)
     41{
     42	struct property *new_prop;
     43
     44	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
     45	if (!new_prop)
     46		return NULL;
     47
     48	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
     49	new_prop->value = kzalloc(prop_sz, GFP_KERNEL);
     50	if (!new_prop->name || !new_prop->value) {
     51		kfree(new_prop->name);
     52		kfree(new_prop->value);
     53		kfree(new_prop);
     54		return NULL;
     55	}
     56
     57	new_prop->length = prop_sz;
     58#if defined(CONFIG_OF_DYNAMIC)
     59	of_property_set_flag(new_prop, OF_DYNAMIC);
     60#endif
     61	return new_prop;
     62}
     63
     64static int drmem_update_dt_v1(struct device_node *memory,
     65			      struct property *prop)
     66{
     67	struct property *new_prop;
     68	struct of_drconf_cell_v1 *dr_cell;
     69	struct drmem_lmb *lmb;
     70	u32 *p;
     71
     72	new_prop = clone_property(prop, prop->length);
     73	if (!new_prop)
     74		return -1;
     75
     76	p = new_prop->value;
     77	*p++ = cpu_to_be32(drmem_info->n_lmbs);
     78
     79	dr_cell = (struct of_drconf_cell_v1 *)p;
     80
     81	for_each_drmem_lmb(lmb) {
     82		dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
     83		dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
     84		dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
     85		dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
     86
     87		dr_cell++;
     88	}
     89
     90	of_update_property(memory, new_prop);
     91	return 0;
     92}
     93
     94static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
     95				struct drmem_lmb *lmb)
     96{
     97	dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
     98	dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
     99	dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
    100	dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
    101}
    102
    103static int drmem_update_dt_v2(struct device_node *memory,
    104			      struct property *prop)
    105{
    106	struct property *new_prop;
    107	struct of_drconf_cell_v2 *dr_cell;
    108	struct drmem_lmb *lmb, *prev_lmb;
    109	u32 lmb_sets, prop_sz, seq_lmbs;
    110	u32 *p;
    111
    112	/* First pass, determine how many LMB sets are needed. */
    113	lmb_sets = 0;
    114	prev_lmb = NULL;
    115	for_each_drmem_lmb(lmb) {
    116		if (!prev_lmb) {
    117			prev_lmb = lmb;
    118			lmb_sets++;
    119			continue;
    120		}
    121
    122		if (prev_lmb->aa_index != lmb->aa_index ||
    123		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
    124			lmb_sets++;
    125
    126		prev_lmb = lmb;
    127	}
    128
    129	prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32);
    130	new_prop = clone_property(prop, prop_sz);
    131	if (!new_prop)
    132		return -1;
    133
    134	p = new_prop->value;
    135	*p++ = cpu_to_be32(lmb_sets);
    136
    137	dr_cell = (struct of_drconf_cell_v2 *)p;
    138
    139	/* Second pass, populate the LMB set data */
    140	prev_lmb = NULL;
    141	seq_lmbs = 0;
    142	for_each_drmem_lmb(lmb) {
    143		if (prev_lmb == NULL) {
    144			/* Start of first LMB set */
    145			prev_lmb = lmb;
    146			init_drconf_v2_cell(dr_cell, lmb);
    147			seq_lmbs++;
    148			continue;
    149		}
    150
    151		if (prev_lmb->aa_index != lmb->aa_index ||
    152		    drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
    153			/* end of one set, start of another */
    154			dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
    155			dr_cell++;
    156
    157			init_drconf_v2_cell(dr_cell, lmb);
    158			seq_lmbs = 1;
    159		} else {
    160			seq_lmbs++;
    161		}
    162
    163		prev_lmb = lmb;
    164	}
    165
    166	/* close out last LMB set */
    167	dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
    168	of_update_property(memory, new_prop);
    169	return 0;
    170}
    171
    172int drmem_update_dt(void)
    173{
    174	struct device_node *memory;
    175	struct property *prop;
    176	int rc = -1;
    177
    178	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
    179	if (!memory)
    180		return -1;
    181
    182	/*
    183	 * Set in_drmem_update to prevent the notifier callback to process the
    184	 * DT property back since the change is coming from the LMB tree.
    185	 */
    186	in_drmem_update = true;
    187	prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
    188	if (prop) {
    189		rc = drmem_update_dt_v1(memory, prop);
    190	} else {
    191		prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL);
    192		if (prop)
    193			rc = drmem_update_dt_v2(memory, prop);
    194	}
    195	in_drmem_update = false;
    196
    197	of_node_put(memory);
    198	return rc;
    199}
    200
    201static void read_drconf_v1_cell(struct drmem_lmb *lmb,
    202				       const __be32 **prop)
    203{
    204	const __be32 *p = *prop;
    205
    206	lmb->base_addr = of_read_number(p, n_root_addr_cells);
    207	p += n_root_addr_cells;
    208	lmb->drc_index = of_read_number(p++, 1);
    209
    210	p++; /* skip reserved field */
    211
    212	lmb->aa_index = of_read_number(p++, 1);
    213	lmb->flags = of_read_number(p++, 1);
    214
    215	*prop = p;
    216}
    217
    218static int
    219__walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, void *data,
    220		     int (*func)(struct drmem_lmb *, const __be32 **, void *))
    221{
    222	struct drmem_lmb lmb;
    223	u32 i, n_lmbs;
    224	int ret = 0;
    225
    226	n_lmbs = of_read_number(prop++, 1);
    227	for (i = 0; i < n_lmbs; i++) {
    228		read_drconf_v1_cell(&lmb, &prop);
    229		ret = func(&lmb, &usm, data);
    230		if (ret)
    231			break;
    232	}
    233
    234	return ret;
    235}
    236
    237static void read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
    238				       const __be32 **prop)
    239{
    240	const __be32 *p = *prop;
    241
    242	dr_cell->seq_lmbs = of_read_number(p++, 1);
    243	dr_cell->base_addr = of_read_number(p, n_root_addr_cells);
    244	p += n_root_addr_cells;
    245	dr_cell->drc_index = of_read_number(p++, 1);
    246	dr_cell->aa_index = of_read_number(p++, 1);
    247	dr_cell->flags = of_read_number(p++, 1);
    248
    249	*prop = p;
    250}
    251
    252static int
    253__walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, void *data,
    254		     int (*func)(struct drmem_lmb *, const __be32 **, void *))
    255{
    256	struct of_drconf_cell_v2 dr_cell;
    257	struct drmem_lmb lmb;
    258	u32 i, j, lmb_sets;
    259	int ret = 0;
    260
    261	lmb_sets = of_read_number(prop++, 1);
    262	for (i = 0; i < lmb_sets; i++) {
    263		read_drconf_v2_cell(&dr_cell, &prop);
    264
    265		for (j = 0; j < dr_cell.seq_lmbs; j++) {
    266			lmb.base_addr = dr_cell.base_addr;
    267			dr_cell.base_addr += drmem_lmb_size();
    268
    269			lmb.drc_index = dr_cell.drc_index;
    270			dr_cell.drc_index++;
    271
    272			lmb.aa_index = dr_cell.aa_index;
    273			lmb.flags = dr_cell.flags;
    274
    275			ret = func(&lmb, &usm, data);
    276			if (ret)
    277				break;
    278		}
    279	}
    280
    281	return ret;
    282}
    283
    284#ifdef CONFIG_PPC_PSERIES
    285int __init walk_drmem_lmbs_early(unsigned long node, void *data,
    286		int (*func)(struct drmem_lmb *, const __be32 **, void *))
    287{
    288	const __be32 *prop, *usm;
    289	int len, ret = -ENODEV;
    290
    291	prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
    292	if (!prop || len < dt_root_size_cells * sizeof(__be32))
    293		return ret;
    294
    295	/* Get the address & size cells */
    296	n_root_addr_cells = dt_root_addr_cells;
    297	n_root_size_cells = dt_root_size_cells;
    298
    299	drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
    300
    301	usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
    302
    303	prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
    304	if (prop) {
    305		ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
    306	} else {
    307		prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
    308					   &len);
    309		if (prop)
    310			ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
    311	}
    312
    313	memblock_dump_all();
    314	return ret;
    315}
    316
    317/*
    318 * Update the LMB associativity index.
    319 */
    320static int update_lmb(struct drmem_lmb *updated_lmb,
    321		      __maybe_unused const __be32 **usm,
    322		      __maybe_unused void *data)
    323{
    324	struct drmem_lmb *lmb;
    325
    326	for_each_drmem_lmb(lmb) {
    327		if (lmb->drc_index != updated_lmb->drc_index)
    328			continue;
    329
    330		lmb->aa_index = updated_lmb->aa_index;
    331		break;
    332	}
    333	return 0;
    334}
    335
    336/*
    337 * Update the LMB associativity index.
    338 *
    339 * This needs to be called when the hypervisor is updating the
    340 * dynamic-reconfiguration-memory node property.
    341 */
    342void drmem_update_lmbs(struct property *prop)
    343{
    344	/*
    345	 * Don't update the LMBs if triggered by the update done in
    346	 * drmem_update_dt(), the LMB values have been used to the update the DT
    347	 * property in that case.
    348	 */
    349	if (in_drmem_update)
    350		return;
    351	if (!strcmp(prop->name, "ibm,dynamic-memory"))
    352		__walk_drmem_v1_lmbs(prop->value, NULL, NULL, update_lmb);
    353	else if (!strcmp(prop->name, "ibm,dynamic-memory-v2"))
    354		__walk_drmem_v2_lmbs(prop->value, NULL, NULL, update_lmb);
    355}
    356#endif
    357
    358static int init_drmem_lmb_size(struct device_node *dn)
    359{
    360	const __be32 *prop;
    361	int len;
    362
    363	if (drmem_info->lmb_size)
    364		return 0;
    365
    366	prop = of_get_property(dn, "ibm,lmb-size", &len);
    367	if (!prop || len < n_root_size_cells * sizeof(__be32)) {
    368		pr_info("Could not determine LMB size\n");
    369		return -1;
    370	}
    371
    372	drmem_info->lmb_size = of_read_number(prop, n_root_size_cells);
    373	return 0;
    374}
    375
    376/*
    377 * Returns the property linux,drconf-usable-memory if
    378 * it exists (the property exists only in kexec/kdump kernels,
    379 * added by kexec-tools)
    380 */
    381static const __be32 *of_get_usable_memory(struct device_node *dn)
    382{
    383	const __be32 *prop;
    384	u32 len;
    385
    386	prop = of_get_property(dn, "linux,drconf-usable-memory", &len);
    387	if (!prop || len < sizeof(unsigned int))
    388		return NULL;
    389
    390	return prop;
    391}
    392
    393int walk_drmem_lmbs(struct device_node *dn, void *data,
    394		    int (*func)(struct drmem_lmb *, const __be32 **, void *))
    395{
    396	const __be32 *prop, *usm;
    397	int ret = -ENODEV;
    398
    399	if (!of_root)
    400		return ret;
    401
    402	/* Get the address & size cells */
    403	of_node_get(of_root);
    404	n_root_addr_cells = of_n_addr_cells(of_root);
    405	n_root_size_cells = of_n_size_cells(of_root);
    406	of_node_put(of_root);
    407
    408	if (init_drmem_lmb_size(dn))
    409		return ret;
    410
    411	usm = of_get_usable_memory(dn);
    412
    413	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
    414	if (prop) {
    415		ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
    416	} else {
    417		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
    418		if (prop)
    419			ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
    420	}
    421
    422	return ret;
    423}
    424
    425static void __init init_drmem_v1_lmbs(const __be32 *prop)
    426{
    427	struct drmem_lmb *lmb;
    428
    429	drmem_info->n_lmbs = of_read_number(prop++, 1);
    430	if (drmem_info->n_lmbs == 0)
    431		return;
    432
    433	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
    434				   GFP_KERNEL);
    435	if (!drmem_info->lmbs)
    436		return;
    437
    438	for_each_drmem_lmb(lmb)
    439		read_drconf_v1_cell(lmb, &prop);
    440}
    441
    442static void __init init_drmem_v2_lmbs(const __be32 *prop)
    443{
    444	struct drmem_lmb *lmb;
    445	struct of_drconf_cell_v2 dr_cell;
    446	const __be32 *p;
    447	u32 i, j, lmb_sets;
    448	int lmb_index;
    449
    450	lmb_sets = of_read_number(prop++, 1);
    451	if (lmb_sets == 0)
    452		return;
    453
    454	/* first pass, calculate the number of LMBs */
    455	p = prop;
    456	for (i = 0; i < lmb_sets; i++) {
    457		read_drconf_v2_cell(&dr_cell, &p);
    458		drmem_info->n_lmbs += dr_cell.seq_lmbs;
    459	}
    460
    461	drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
    462				   GFP_KERNEL);
    463	if (!drmem_info->lmbs)
    464		return;
    465
    466	/* second pass, read in the LMB information */
    467	lmb_index = 0;
    468	p = prop;
    469
    470	for (i = 0; i < lmb_sets; i++) {
    471		read_drconf_v2_cell(&dr_cell, &p);
    472
    473		for (j = 0; j < dr_cell.seq_lmbs; j++) {
    474			lmb = &drmem_info->lmbs[lmb_index++];
    475
    476			lmb->base_addr = dr_cell.base_addr;
    477			dr_cell.base_addr += drmem_info->lmb_size;
    478
    479			lmb->drc_index = dr_cell.drc_index;
    480			dr_cell.drc_index++;
    481
    482			lmb->aa_index = dr_cell.aa_index;
    483			lmb->flags = dr_cell.flags;
    484		}
    485	}
    486}
    487
    488static int __init drmem_init(void)
    489{
    490	struct device_node *dn;
    491	const __be32 *prop;
    492
    493	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
    494	if (!dn) {
    495		pr_info("No dynamic reconfiguration memory found\n");
    496		return 0;
    497	}
    498
    499	if (init_drmem_lmb_size(dn)) {
    500		of_node_put(dn);
    501		return 0;
    502	}
    503
    504	prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
    505	if (prop) {
    506		init_drmem_v1_lmbs(prop);
    507	} else {
    508		prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
    509		if (prop)
    510			init_drmem_v2_lmbs(prop);
    511	}
    512
    513	of_node_put(dn);
    514	return 0;
    515}
    516late_initcall(drmem_init);