cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hotplug-memory.c (22016B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * pseries Memory Hotplug infrastructure.
      4 *
      5 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
      6 */
      7
      8#define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
      9
     10#include <linux/of.h>
     11#include <linux/of_address.h>
     12#include <linux/memblock.h>
     13#include <linux/memory.h>
     14#include <linux/memory_hotplug.h>
     15#include <linux/slab.h>
     16
     17#include <asm/firmware.h>
     18#include <asm/machdep.h>
     19#include <asm/sparsemem.h>
     20#include <asm/fadump.h>
     21#include <asm/drmem.h>
     22#include "pseries.h"
     23
     24unsigned long pseries_memory_block_size(void)
     25{
     26	struct device_node *np;
     27	u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
     28	struct resource r;
     29
     30	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
     31	if (np) {
     32		int len;
     33		int size_cells;
     34		const __be32 *prop;
     35
     36		size_cells = of_n_size_cells(np);
     37
     38		prop = of_get_property(np, "ibm,lmb-size", &len);
     39		if (prop && len >= size_cells * sizeof(__be32))
     40			memblock_size = of_read_number(prop, size_cells);
     41		of_node_put(np);
     42
     43	} else  if (machine_is(pseries)) {
     44		/* This fallback really only applies to pseries */
     45		unsigned int memzero_size = 0;
     46
     47		np = of_find_node_by_path("/memory@0");
     48		if (np) {
     49			if (!of_address_to_resource(np, 0, &r))
     50				memzero_size = resource_size(&r);
     51			of_node_put(np);
     52		}
     53
     54		if (memzero_size) {
     55			/* We now know the size of memory@0, use this to find
     56			 * the first memoryblock and get its size.
     57			 */
     58			char buf[64];
     59
     60			sprintf(buf, "/memory@%x", memzero_size);
     61			np = of_find_node_by_path(buf);
     62			if (np) {
     63				if (!of_address_to_resource(np, 0, &r))
     64					memblock_size = resource_size(&r);
     65				of_node_put(np);
     66			}
     67		}
     68	}
     69	return memblock_size;
     70}
     71
     72static void dlpar_free_property(struct property *prop)
     73{
     74	kfree(prop->name);
     75	kfree(prop->value);
     76	kfree(prop);
     77}
     78
     79static struct property *dlpar_clone_property(struct property *prop,
     80					     u32 prop_size)
     81{
     82	struct property *new_prop;
     83
     84	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
     85	if (!new_prop)
     86		return NULL;
     87
     88	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
     89	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
     90	if (!new_prop->name || !new_prop->value) {
     91		dlpar_free_property(new_prop);
     92		return NULL;
     93	}
     94
     95	memcpy(new_prop->value, prop->value, prop->length);
     96	new_prop->length = prop_size;
     97
     98	of_property_set_flag(new_prop, OF_DYNAMIC);
     99	return new_prop;
    100}
    101
    102static bool find_aa_index(struct device_node *dr_node,
    103			 struct property *ala_prop,
    104			 const u32 *lmb_assoc, u32 *aa_index)
    105{
    106	u32 *assoc_arrays, new_prop_size;
    107	struct property *new_prop;
    108	int aa_arrays, aa_array_entries, aa_array_sz;
    109	int i, index;
    110
    111	/*
    112	 * The ibm,associativity-lookup-arrays property is defined to be
    113	 * a 32-bit value specifying the number of associativity arrays
    114	 * followed by a 32-bitvalue specifying the number of entries per
    115	 * array, followed by the associativity arrays.
    116	 */
    117	assoc_arrays = ala_prop->value;
    118
    119	aa_arrays = be32_to_cpu(assoc_arrays[0]);
    120	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
    121	aa_array_sz = aa_array_entries * sizeof(u32);
    122
    123	for (i = 0; i < aa_arrays; i++) {
    124		index = (i * aa_array_entries) + 2;
    125
    126		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
    127			continue;
    128
    129		*aa_index = i;
    130		return true;
    131	}
    132
    133	new_prop_size = ala_prop->length + aa_array_sz;
    134	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
    135	if (!new_prop)
    136		return false;
    137
    138	assoc_arrays = new_prop->value;
    139
    140	/* increment the number of entries in the lookup array */
    141	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
    142
    143	/* copy the new associativity into the lookup array */
    144	index = aa_arrays * aa_array_entries + 2;
    145	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
    146
    147	of_update_property(dr_node, new_prop);
    148
    149	/*
    150	 * The associativity lookup array index for this lmb is
    151	 * number of entries - 1 since we added its associativity
    152	 * to the end of the lookup array.
    153	 */
    154	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
    155	return true;
    156}
    157
    158static int update_lmb_associativity_index(struct drmem_lmb *lmb)
    159{
    160	struct device_node *parent, *lmb_node, *dr_node;
    161	struct property *ala_prop;
    162	const u32 *lmb_assoc;
    163	u32 aa_index;
    164	bool found;
    165
    166	parent = of_find_node_by_path("/");
    167	if (!parent)
    168		return -ENODEV;
    169
    170	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
    171					     parent);
    172	of_node_put(parent);
    173	if (!lmb_node)
    174		return -EINVAL;
    175
    176	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
    177	if (!lmb_assoc) {
    178		dlpar_free_cc_nodes(lmb_node);
    179		return -ENODEV;
    180	}
    181
    182	update_numa_distance(lmb_node);
    183
    184	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
    185	if (!dr_node) {
    186		dlpar_free_cc_nodes(lmb_node);
    187		return -ENODEV;
    188	}
    189
    190	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
    191				    NULL);
    192	if (!ala_prop) {
    193		of_node_put(dr_node);
    194		dlpar_free_cc_nodes(lmb_node);
    195		return -ENODEV;
    196	}
    197
    198	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
    199
    200	of_node_put(dr_node);
    201	dlpar_free_cc_nodes(lmb_node);
    202
    203	if (!found) {
    204		pr_err("Could not find LMB associativity\n");
    205		return -1;
    206	}
    207
    208	lmb->aa_index = aa_index;
    209	return 0;
    210}
    211
    212static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
    213{
    214	unsigned long section_nr;
    215	struct memory_block *mem_block;
    216
    217	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
    218
    219	mem_block = find_memory_block(section_nr);
    220	return mem_block;
    221}
    222
    223static int get_lmb_range(u32 drc_index, int n_lmbs,
    224			 struct drmem_lmb **start_lmb,
    225			 struct drmem_lmb **end_lmb)
    226{
    227	struct drmem_lmb *lmb, *start, *end;
    228	struct drmem_lmb *limit;
    229
    230	start = NULL;
    231	for_each_drmem_lmb(lmb) {
    232		if (lmb->drc_index == drc_index) {
    233			start = lmb;
    234			break;
    235		}
    236	}
    237
    238	if (!start)
    239		return -EINVAL;
    240
    241	end = &start[n_lmbs];
    242
    243	limit = &drmem_info->lmbs[drmem_info->n_lmbs];
    244	if (end > limit)
    245		return -EINVAL;
    246
    247	*start_lmb = start;
    248	*end_lmb = end;
    249	return 0;
    250}
    251
    252static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
    253{
    254	struct memory_block *mem_block;
    255	int rc;
    256
    257	mem_block = lmb_to_memblock(lmb);
    258	if (!mem_block)
    259		return -EINVAL;
    260
    261	if (online && mem_block->dev.offline)
    262		rc = device_online(&mem_block->dev);
    263	else if (!online && !mem_block->dev.offline)
    264		rc = device_offline(&mem_block->dev);
    265	else
    266		rc = 0;
    267
    268	put_device(&mem_block->dev);
    269
    270	return rc;
    271}
    272
    273static int dlpar_online_lmb(struct drmem_lmb *lmb)
    274{
    275	return dlpar_change_lmb_state(lmb, true);
    276}
    277
    278#ifdef CONFIG_MEMORY_HOTREMOVE
    279static int dlpar_offline_lmb(struct drmem_lmb *lmb)
    280{
    281	return dlpar_change_lmb_state(lmb, false);
    282}
    283
    284static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
    285{
    286	unsigned long block_sz, start_pfn;
    287	int sections_per_block;
    288	int i;
    289
    290	start_pfn = base >> PAGE_SHIFT;
    291
    292	lock_device_hotplug();
    293
    294	if (!pfn_valid(start_pfn))
    295		goto out;
    296
    297	block_sz = pseries_memory_block_size();
    298	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
    299
    300	for (i = 0; i < sections_per_block; i++) {
    301		__remove_memory(base, MIN_MEMORY_BLOCK_SIZE);
    302		base += MIN_MEMORY_BLOCK_SIZE;
    303	}
    304
    305out:
    306	/* Update memory regions for memory remove */
    307	memblock_remove(base, memblock_size);
    308	unlock_device_hotplug();
    309	return 0;
    310}
    311
    312static int pseries_remove_mem_node(struct device_node *np)
    313{
    314	const __be32 *prop;
    315	unsigned long base;
    316	unsigned long lmb_size;
    317	int ret = -EINVAL;
    318	int addr_cells, size_cells;
    319
    320	/*
    321	 * Check to see if we are actually removing memory
    322	 */
    323	if (!of_node_is_type(np, "memory"))
    324		return 0;
    325
    326	/*
    327	 * Find the base address and size of the memblock
    328	 */
    329	prop = of_get_property(np, "reg", NULL);
    330	if (!prop)
    331		return ret;
    332
    333	addr_cells = of_n_addr_cells(np);
    334	size_cells = of_n_size_cells(np);
    335
    336	/*
    337	 * "reg" property represents (addr,size) tuple.
    338	 */
    339	base = of_read_number(prop, addr_cells);
    340	prop += addr_cells;
    341	lmb_size = of_read_number(prop, size_cells);
    342
    343	pseries_remove_memblock(base, lmb_size);
    344	return 0;
    345}
    346
    347static bool lmb_is_removable(struct drmem_lmb *lmb)
    348{
    349	if ((lmb->flags & DRCONF_MEM_RESERVED) ||
    350		!(lmb->flags & DRCONF_MEM_ASSIGNED))
    351		return false;
    352
    353#ifdef CONFIG_FA_DUMP
    354	/*
    355	 * Don't hot-remove memory that falls in fadump boot memory area
    356	 * and memory that is reserved for capturing old kernel memory.
    357	 */
    358	if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
    359		return false;
    360#endif
    361	/* device_offline() will determine if we can actually remove this lmb */
    362	return true;
    363}
    364
    365static int dlpar_add_lmb(struct drmem_lmb *);
    366
    367static int dlpar_remove_lmb(struct drmem_lmb *lmb)
    368{
    369	struct memory_block *mem_block;
    370	unsigned long block_sz;
    371	int rc;
    372
    373	if (!lmb_is_removable(lmb))
    374		return -EINVAL;
    375
    376	mem_block = lmb_to_memblock(lmb);
    377	if (mem_block == NULL)
    378		return -EINVAL;
    379
    380	rc = dlpar_offline_lmb(lmb);
    381	if (rc) {
    382		put_device(&mem_block->dev);
    383		return rc;
    384	}
    385
    386	block_sz = pseries_memory_block_size();
    387
    388	__remove_memory(lmb->base_addr, block_sz);
    389	put_device(&mem_block->dev);
    390
    391	/* Update memory regions for memory remove */
    392	memblock_remove(lmb->base_addr, block_sz);
    393
    394	invalidate_lmb_associativity_index(lmb);
    395	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
    396
    397	return 0;
    398}
    399
    400static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
    401{
    402	struct drmem_lmb *lmb;
    403	int lmbs_reserved = 0;
    404	int lmbs_available = 0;
    405	int rc;
    406
    407	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
    408
    409	if (lmbs_to_remove == 0)
    410		return -EINVAL;
    411
    412	/* Validate that there are enough LMBs to satisfy the request */
    413	for_each_drmem_lmb(lmb) {
    414		if (lmb_is_removable(lmb))
    415			lmbs_available++;
    416
    417		if (lmbs_available == lmbs_to_remove)
    418			break;
    419	}
    420
    421	if (lmbs_available < lmbs_to_remove) {
    422		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
    423			lmbs_available, lmbs_to_remove);
    424		return -EINVAL;
    425	}
    426
    427	for_each_drmem_lmb(lmb) {
    428		rc = dlpar_remove_lmb(lmb);
    429		if (rc)
    430			continue;
    431
    432		/* Mark this lmb so we can add it later if all of the
    433		 * requested LMBs cannot be removed.
    434		 */
    435		drmem_mark_lmb_reserved(lmb);
    436
    437		lmbs_reserved++;
    438		if (lmbs_reserved == lmbs_to_remove)
    439			break;
    440	}
    441
    442	if (lmbs_reserved != lmbs_to_remove) {
    443		pr_err("Memory hot-remove failed, adding LMB's back\n");
    444
    445		for_each_drmem_lmb(lmb) {
    446			if (!drmem_lmb_reserved(lmb))
    447				continue;
    448
    449			rc = dlpar_add_lmb(lmb);
    450			if (rc)
    451				pr_err("Failed to add LMB back, drc index %x\n",
    452				       lmb->drc_index);
    453
    454			drmem_remove_lmb_reservation(lmb);
    455
    456			lmbs_reserved--;
    457			if (lmbs_reserved == 0)
    458				break;
    459		}
    460
    461		rc = -EINVAL;
    462	} else {
    463		for_each_drmem_lmb(lmb) {
    464			if (!drmem_lmb_reserved(lmb))
    465				continue;
    466
    467			dlpar_release_drc(lmb->drc_index);
    468			pr_info("Memory at %llx was hot-removed\n",
    469				lmb->base_addr);
    470
    471			drmem_remove_lmb_reservation(lmb);
    472
    473			lmbs_reserved--;
    474			if (lmbs_reserved == 0)
    475				break;
    476		}
    477		rc = 0;
    478	}
    479
    480	return rc;
    481}
    482
    483static int dlpar_memory_remove_by_index(u32 drc_index)
    484{
    485	struct drmem_lmb *lmb;
    486	int lmb_found;
    487	int rc;
    488
    489	pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
    490
    491	lmb_found = 0;
    492	for_each_drmem_lmb(lmb) {
    493		if (lmb->drc_index == drc_index) {
    494			lmb_found = 1;
    495			rc = dlpar_remove_lmb(lmb);
    496			if (!rc)
    497				dlpar_release_drc(lmb->drc_index);
    498
    499			break;
    500		}
    501	}
    502
    503	if (!lmb_found)
    504		rc = -EINVAL;
    505
    506	if (rc)
    507		pr_debug("Failed to hot-remove memory at %llx\n",
    508			 lmb->base_addr);
    509	else
    510		pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
    511
    512	return rc;
    513}
    514
    515static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
    516{
    517	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
    518	int rc;
    519
    520	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
    521		lmbs_to_remove, drc_index);
    522
    523	if (lmbs_to_remove == 0)
    524		return -EINVAL;
    525
    526	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
    527	if (rc)
    528		return -EINVAL;
    529
    530	/*
    531	 * Validate that all LMBs in range are not reserved. Note that it
    532	 * is ok if they are !ASSIGNED since our goal here is to remove the
    533	 * LMB range, regardless of whether some LMBs were already removed
    534	 * by any other reason.
    535	 *
    536	 * This is a contrast to what is done in remove_by_count() where we
    537	 * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
    538	 * because we want to remove a fixed amount of LMBs in that function.
    539	 */
    540	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
    541		if (lmb->flags & DRCONF_MEM_RESERVED) {
    542			pr_err("Memory at %llx (drc index %x) is reserved\n",
    543				lmb->base_addr, lmb->drc_index);
    544			return -EINVAL;
    545		}
    546	}
    547
    548	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
    549		/*
    550		 * dlpar_remove_lmb() will error out if the LMB is already
    551		 * !ASSIGNED, but this case is a no-op for us.
    552		 */
    553		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
    554			continue;
    555
    556		rc = dlpar_remove_lmb(lmb);
    557		if (rc)
    558			break;
    559
    560		drmem_mark_lmb_reserved(lmb);
    561	}
    562
    563	if (rc) {
    564		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
    565
    566
    567		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
    568			if (!drmem_lmb_reserved(lmb))
    569				continue;
    570
    571			/*
    572			 * Setting the isolation state of an UNISOLATED/CONFIGURED
    573			 * device to UNISOLATE is a no-op, but the hypervisor can
    574			 * use it as a hint that the LMB removal failed.
    575			 */
    576			dlpar_unisolate_drc(lmb->drc_index);
    577
    578			rc = dlpar_add_lmb(lmb);
    579			if (rc)
    580				pr_err("Failed to add LMB, drc index %x\n",
    581				       lmb->drc_index);
    582
    583			drmem_remove_lmb_reservation(lmb);
    584		}
    585		rc = -EINVAL;
    586	} else {
    587		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
    588			if (!drmem_lmb_reserved(lmb))
    589				continue;
    590
    591			dlpar_release_drc(lmb->drc_index);
    592			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
    593				lmb->base_addr, lmb->drc_index);
    594
    595			drmem_remove_lmb_reservation(lmb);
    596		}
    597	}
    598
    599	return rc;
    600}
    601
    602#else
    603static inline int pseries_remove_memblock(unsigned long base,
    604					  unsigned long memblock_size)
    605{
    606	return -EOPNOTSUPP;
    607}
    608static inline int pseries_remove_mem_node(struct device_node *np)
    609{
    610	return 0;
    611}
    612static int dlpar_remove_lmb(struct drmem_lmb *lmb)
    613{
    614	return -EOPNOTSUPP;
    615}
    616static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
    617{
    618	return -EOPNOTSUPP;
    619}
    620static int dlpar_memory_remove_by_index(u32 drc_index)
    621{
    622	return -EOPNOTSUPP;
    623}
    624
    625static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
    626{
    627	return -EOPNOTSUPP;
    628}
    629#endif /* CONFIG_MEMORY_HOTREMOVE */
    630
    631static int dlpar_add_lmb(struct drmem_lmb *lmb)
    632{
    633	unsigned long block_sz;
    634	int nid, rc;
    635
    636	if (lmb->flags & DRCONF_MEM_ASSIGNED)
    637		return -EINVAL;
    638
    639	rc = update_lmb_associativity_index(lmb);
    640	if (rc) {
    641		dlpar_release_drc(lmb->drc_index);
    642		return rc;
    643	}
    644
    645	block_sz = memory_block_size_bytes();
    646
    647	/* Find the node id for this LMB.  Fake one if necessary. */
    648	nid = of_drconf_to_nid_single(lmb);
    649	if (nid < 0 || !node_possible(nid))
    650		nid = first_online_node;
    651
    652	/* Add the memory */
    653	rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE);
    654	if (rc) {
    655		invalidate_lmb_associativity_index(lmb);
    656		return rc;
    657	}
    658
    659	rc = dlpar_online_lmb(lmb);
    660	if (rc) {
    661		__remove_memory(lmb->base_addr, block_sz);
    662		invalidate_lmb_associativity_index(lmb);
    663	} else {
    664		lmb->flags |= DRCONF_MEM_ASSIGNED;
    665	}
    666
    667	return rc;
    668}
    669
    670static int dlpar_memory_add_by_count(u32 lmbs_to_add)
    671{
    672	struct drmem_lmb *lmb;
    673	int lmbs_available = 0;
    674	int lmbs_reserved = 0;
    675	int rc;
    676
    677	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
    678
    679	if (lmbs_to_add == 0)
    680		return -EINVAL;
    681
    682	/* Validate that there are enough LMBs to satisfy the request */
    683	for_each_drmem_lmb(lmb) {
    684		if (lmb->flags & DRCONF_MEM_RESERVED)
    685			continue;
    686
    687		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
    688			lmbs_available++;
    689
    690		if (lmbs_available == lmbs_to_add)
    691			break;
    692	}
    693
    694	if (lmbs_available < lmbs_to_add)
    695		return -EINVAL;
    696
    697	for_each_drmem_lmb(lmb) {
    698		if (lmb->flags & DRCONF_MEM_ASSIGNED)
    699			continue;
    700
    701		rc = dlpar_acquire_drc(lmb->drc_index);
    702		if (rc)
    703			continue;
    704
    705		rc = dlpar_add_lmb(lmb);
    706		if (rc) {
    707			dlpar_release_drc(lmb->drc_index);
    708			continue;
    709		}
    710
    711		/* Mark this lmb so we can remove it later if all of the
    712		 * requested LMBs cannot be added.
    713		 */
    714		drmem_mark_lmb_reserved(lmb);
    715		lmbs_reserved++;
    716		if (lmbs_reserved == lmbs_to_add)
    717			break;
    718	}
    719
    720	if (lmbs_reserved != lmbs_to_add) {
    721		pr_err("Memory hot-add failed, removing any added LMBs\n");
    722
    723		for_each_drmem_lmb(lmb) {
    724			if (!drmem_lmb_reserved(lmb))
    725				continue;
    726
    727			rc = dlpar_remove_lmb(lmb);
    728			if (rc)
    729				pr_err("Failed to remove LMB, drc index %x\n",
    730				       lmb->drc_index);
    731			else
    732				dlpar_release_drc(lmb->drc_index);
    733
    734			drmem_remove_lmb_reservation(lmb);
    735			lmbs_reserved--;
    736
    737			if (lmbs_reserved == 0)
    738				break;
    739		}
    740		rc = -EINVAL;
    741	} else {
    742		for_each_drmem_lmb(lmb) {
    743			if (!drmem_lmb_reserved(lmb))
    744				continue;
    745
    746			pr_debug("Memory at %llx (drc index %x) was hot-added\n",
    747				 lmb->base_addr, lmb->drc_index);
    748			drmem_remove_lmb_reservation(lmb);
    749			lmbs_reserved--;
    750
    751			if (lmbs_reserved == 0)
    752				break;
    753		}
    754		rc = 0;
    755	}
    756
    757	return rc;
    758}
    759
    760static int dlpar_memory_add_by_index(u32 drc_index)
    761{
    762	struct drmem_lmb *lmb;
    763	int rc, lmb_found;
    764
    765	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
    766
    767	lmb_found = 0;
    768	for_each_drmem_lmb(lmb) {
    769		if (lmb->drc_index == drc_index) {
    770			lmb_found = 1;
    771			rc = dlpar_acquire_drc(lmb->drc_index);
    772			if (!rc) {
    773				rc = dlpar_add_lmb(lmb);
    774				if (rc)
    775					dlpar_release_drc(lmb->drc_index);
    776			}
    777
    778			break;
    779		}
    780	}
    781
    782	if (!lmb_found)
    783		rc = -EINVAL;
    784
    785	if (rc)
    786		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
    787	else
    788		pr_info("Memory at %llx (drc index %x) was hot-added\n",
    789			lmb->base_addr, drc_index);
    790
    791	return rc;
    792}
    793
    794static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
    795{
    796	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
    797	int rc;
    798
    799	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
    800		lmbs_to_add, drc_index);
    801
    802	if (lmbs_to_add == 0)
    803		return -EINVAL;
    804
    805	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
    806	if (rc)
    807		return -EINVAL;
    808
    809	/* Validate that the LMBs in this range are not reserved */
    810	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
    811		/* Fail immediately if the whole range can't be hot-added */
    812		if (lmb->flags & DRCONF_MEM_RESERVED) {
    813			pr_err("Memory at %llx (drc index %x) is reserved\n",
    814					lmb->base_addr, lmb->drc_index);
    815			return -EINVAL;
    816		}
    817	}
    818
    819	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
    820		if (lmb->flags & DRCONF_MEM_ASSIGNED)
    821			continue;
    822
    823		rc = dlpar_acquire_drc(lmb->drc_index);
    824		if (rc)
    825			break;
    826
    827		rc = dlpar_add_lmb(lmb);
    828		if (rc) {
    829			dlpar_release_drc(lmb->drc_index);
    830			break;
    831		}
    832
    833		drmem_mark_lmb_reserved(lmb);
    834	}
    835
    836	if (rc) {
    837		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
    838
    839		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
    840			if (!drmem_lmb_reserved(lmb))
    841				continue;
    842
    843			rc = dlpar_remove_lmb(lmb);
    844			if (rc)
    845				pr_err("Failed to remove LMB, drc index %x\n",
    846				       lmb->drc_index);
    847			else
    848				dlpar_release_drc(lmb->drc_index);
    849
    850			drmem_remove_lmb_reservation(lmb);
    851		}
    852		rc = -EINVAL;
    853	} else {
    854		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
    855			if (!drmem_lmb_reserved(lmb))
    856				continue;
    857
    858			pr_info("Memory at %llx (drc index %x) was hot-added\n",
    859				lmb->base_addr, lmb->drc_index);
    860			drmem_remove_lmb_reservation(lmb);
    861		}
    862	}
    863
    864	return rc;
    865}
    866
    867int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
    868{
    869	u32 count, drc_index;
    870	int rc;
    871
    872	lock_device_hotplug();
    873
    874	switch (hp_elog->action) {
    875	case PSERIES_HP_ELOG_ACTION_ADD:
    876		switch (hp_elog->id_type) {
    877		case PSERIES_HP_ELOG_ID_DRC_COUNT:
    878			count = hp_elog->_drc_u.drc_count;
    879			rc = dlpar_memory_add_by_count(count);
    880			break;
    881		case PSERIES_HP_ELOG_ID_DRC_INDEX:
    882			drc_index = hp_elog->_drc_u.drc_index;
    883			rc = dlpar_memory_add_by_index(drc_index);
    884			break;
    885		case PSERIES_HP_ELOG_ID_DRC_IC:
    886			count = hp_elog->_drc_u.ic.count;
    887			drc_index = hp_elog->_drc_u.ic.index;
    888			rc = dlpar_memory_add_by_ic(count, drc_index);
    889			break;
    890		default:
    891			rc = -EINVAL;
    892			break;
    893		}
    894
    895		break;
    896	case PSERIES_HP_ELOG_ACTION_REMOVE:
    897		switch (hp_elog->id_type) {
    898		case PSERIES_HP_ELOG_ID_DRC_COUNT:
    899			count = hp_elog->_drc_u.drc_count;
    900			rc = dlpar_memory_remove_by_count(count);
    901			break;
    902		case PSERIES_HP_ELOG_ID_DRC_INDEX:
    903			drc_index = hp_elog->_drc_u.drc_index;
    904			rc = dlpar_memory_remove_by_index(drc_index);
    905			break;
    906		case PSERIES_HP_ELOG_ID_DRC_IC:
    907			count = hp_elog->_drc_u.ic.count;
    908			drc_index = hp_elog->_drc_u.ic.index;
    909			rc = dlpar_memory_remove_by_ic(count, drc_index);
    910			break;
    911		default:
    912			rc = -EINVAL;
    913			break;
    914		}
    915
    916		break;
    917	default:
    918		pr_err("Invalid action (%d) specified\n", hp_elog->action);
    919		rc = -EINVAL;
    920		break;
    921	}
    922
    923	if (!rc)
    924		rc = drmem_update_dt();
    925
    926	unlock_device_hotplug();
    927	return rc;
    928}
    929
    930static int pseries_add_mem_node(struct device_node *np)
    931{
    932	const __be32 *prop;
    933	unsigned long base;
    934	unsigned long lmb_size;
    935	int ret = -EINVAL;
    936	int addr_cells, size_cells;
    937
    938	/*
    939	 * Check to see if we are actually adding memory
    940	 */
    941	if (!of_node_is_type(np, "memory"))
    942		return 0;
    943
    944	/*
    945	 * Find the base and size of the memblock
    946	 */
    947	prop = of_get_property(np, "reg", NULL);
    948	if (!prop)
    949		return ret;
    950
    951	addr_cells = of_n_addr_cells(np);
    952	size_cells = of_n_size_cells(np);
    953	/*
    954	 * "reg" property represents (addr,size) tuple.
    955	 */
    956	base = of_read_number(prop, addr_cells);
    957	prop += addr_cells;
    958	lmb_size = of_read_number(prop, size_cells);
    959
    960	/*
    961	 * Update memory region to represent the memory add
    962	 */
    963	ret = memblock_add(base, lmb_size);
    964	return (ret < 0) ? -EINVAL : 0;
    965}
    966
    967static int pseries_memory_notifier(struct notifier_block *nb,
    968				   unsigned long action, void *data)
    969{
    970	struct of_reconfig_data *rd = data;
    971	int err = 0;
    972
    973	switch (action) {
    974	case OF_RECONFIG_ATTACH_NODE:
    975		err = pseries_add_mem_node(rd->dn);
    976		break;
    977	case OF_RECONFIG_DETACH_NODE:
    978		err = pseries_remove_mem_node(rd->dn);
    979		break;
    980	case OF_RECONFIG_UPDATE_PROPERTY:
    981		if (!strcmp(rd->dn->name,
    982			    "ibm,dynamic-reconfiguration-memory"))
    983			drmem_update_lmbs(rd->prop);
    984	}
    985	return notifier_from_errno(err);
    986}
    987
    988static struct notifier_block pseries_mem_nb = {
    989	.notifier_call = pseries_memory_notifier,
    990};
    991
    992static int __init pseries_memory_hotplug_init(void)
    993{
    994	if (firmware_has_feature(FW_FEATURE_LPAR))
    995		of_reconfig_notifier_register(&pseries_mem_nb);
    996
    997	return 0;
    998}
    999machine_device_initcall(pseries, pseries_memory_hotplug_init);