cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

numa.c (24954B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Common code for 32 and 64-bit NUMA */
      3#include <linux/acpi.h>
      4#include <linux/kernel.h>
      5#include <linux/mm.h>
      6#include <linux/string.h>
      7#include <linux/init.h>
      8#include <linux/memblock.h>
      9#include <linux/mmzone.h>
     10#include <linux/ctype.h>
     11#include <linux/nodemask.h>
     12#include <linux/sched.h>
     13#include <linux/topology.h>
     14
     15#include <asm/e820/api.h>
     16#include <asm/proto.h>
     17#include <asm/dma.h>
     18#include <asm/amd_nb.h>
     19
     20#include "numa_internal.h"
     21
     22int numa_off;
     23nodemask_t numa_nodes_parsed __initdata;
     24
     25struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
     26EXPORT_SYMBOL(node_data);
     27
     28static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
     29static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
     30
     31static int numa_distance_cnt;
     32static u8 *numa_distance;
     33
     34static __init int numa_setup(char *opt)
     35{
     36	if (!opt)
     37		return -EINVAL;
     38	if (!strncmp(opt, "off", 3))
     39		numa_off = 1;
     40	if (!strncmp(opt, "fake=", 5))
     41		return numa_emu_cmdline(opt + 5);
     42	if (!strncmp(opt, "noacpi", 6))
     43		disable_srat();
     44	if (!strncmp(opt, "nohmat", 6))
     45		disable_hmat();
     46	return 0;
     47}
     48early_param("numa", numa_setup);
     49
     50/*
     51 * apicid, cpu, node mappings
     52 */
     53s16 __apicid_to_node[MAX_LOCAL_APIC] = {
     54	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
     55};
     56
     57int numa_cpu_node(int cpu)
     58{
     59	int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
     60
     61	if (apicid != BAD_APICID)
     62		return __apicid_to_node[apicid];
     63	return NUMA_NO_NODE;
     64}
     65
     66cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
     67EXPORT_SYMBOL(node_to_cpumask_map);
     68
     69/*
     70 * Map cpu index to node index
     71 */
     72DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
     73EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
     74
     75void numa_set_node(int cpu, int node)
     76{
     77	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
     78
     79	/* early setting, no percpu area yet */
     80	if (cpu_to_node_map) {
     81		cpu_to_node_map[cpu] = node;
     82		return;
     83	}
     84
     85#ifdef CONFIG_DEBUG_PER_CPU_MAPS
     86	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
     87		printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
     88		dump_stack();
     89		return;
     90	}
     91#endif
     92	per_cpu(x86_cpu_to_node_map, cpu) = node;
     93
     94	set_cpu_numa_node(cpu, node);
     95}
     96
     97void numa_clear_node(int cpu)
     98{
     99	numa_set_node(cpu, NUMA_NO_NODE);
    100}
    101
    102/*
    103 * Allocate node_to_cpumask_map based on number of available nodes
    104 * Requires node_possible_map to be valid.
    105 *
    106 * Note: cpumask_of_node() is not valid until after this is done.
    107 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
    108 */
    109void __init setup_node_to_cpumask_map(void)
    110{
    111	unsigned int node;
    112
    113	/* setup nr_node_ids if not done yet */
    114	if (nr_node_ids == MAX_NUMNODES)
    115		setup_nr_node_ids();
    116
    117	/* allocate the map */
    118	for (node = 0; node < nr_node_ids; node++)
    119		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
    120
    121	/* cpumask_of_node() will now work */
    122	pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
    123}
    124
    125static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
    126				     struct numa_meminfo *mi)
    127{
    128	/* ignore zero length blks */
    129	if (start == end)
    130		return 0;
    131
    132	/* whine about and ignore invalid blks */
    133	if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
    134		pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
    135			nid, start, end - 1);
    136		return 0;
    137	}
    138
    139	if (mi->nr_blks >= NR_NODE_MEMBLKS) {
    140		pr_err("too many memblk ranges\n");
    141		return -EINVAL;
    142	}
    143
    144	mi->blk[mi->nr_blks].start = start;
    145	mi->blk[mi->nr_blks].end = end;
    146	mi->blk[mi->nr_blks].nid = nid;
    147	mi->nr_blks++;
    148	return 0;
    149}
    150
    151/**
    152 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
    153 * @idx: Index of memblk to remove
    154 * @mi: numa_meminfo to remove memblk from
    155 *
    156 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
    157 * decrementing @mi->nr_blks.
    158 */
    159void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
    160{
    161	mi->nr_blks--;
    162	memmove(&mi->blk[idx], &mi->blk[idx + 1],
    163		(mi->nr_blks - idx) * sizeof(mi->blk[0]));
    164}
    165
    166/**
    167 * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another
    168 * @dst: numa_meminfo to append block to
    169 * @idx: Index of memblk to remove
    170 * @src: numa_meminfo to remove memblk from
    171 */
    172static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
    173					 struct numa_meminfo *src)
    174{
    175	dst->blk[dst->nr_blks++] = src->blk[idx];
    176	numa_remove_memblk_from(idx, src);
    177}
    178
    179/**
    180 * numa_add_memblk - Add one numa_memblk to numa_meminfo
    181 * @nid: NUMA node ID of the new memblk
    182 * @start: Start address of the new memblk
    183 * @end: End address of the new memblk
    184 *
    185 * Add a new memblk to the default numa_meminfo.
    186 *
    187 * RETURNS:
    188 * 0 on success, -errno on failure.
    189 */
    190int __init numa_add_memblk(int nid, u64 start, u64 end)
    191{
    192	return numa_add_memblk_to(nid, start, end, &numa_meminfo);
    193}
    194
    195/* Allocate NODE_DATA for a node on the local memory */
    196static void __init alloc_node_data(int nid)
    197{
    198	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
    199	u64 nd_pa;
    200	void *nd;
    201	int tnid;
    202
    203	/*
    204	 * Allocate node data.  Try node-local memory and then any node.
    205	 * Never allocate in DMA zone.
    206	 */
    207	nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
    208	if (!nd_pa) {
    209		pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
    210		       nd_size, nid);
    211		return;
    212	}
    213	nd = __va(nd_pa);
    214
    215	/* report and initialize */
    216	printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
    217	       nd_pa, nd_pa + nd_size - 1);
    218	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
    219	if (tnid != nid)
    220		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
    221
    222	node_data[nid] = nd;
    223	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
    224
    225	node_set_online(nid);
    226}
    227
    228/**
    229 * numa_cleanup_meminfo - Cleanup a numa_meminfo
    230 * @mi: numa_meminfo to clean up
    231 *
    232 * Sanitize @mi by merging and removing unnecessary memblks.  Also check for
    233 * conflicts and clear unused memblks.
    234 *
    235 * RETURNS:
    236 * 0 on success, -errno on failure.
    237 */
    238int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
    239{
    240	const u64 low = 0;
    241	const u64 high = PFN_PHYS(max_pfn);
    242	int i, j, k;
    243
    244	/* first, trim all entries */
    245	for (i = 0; i < mi->nr_blks; i++) {
    246		struct numa_memblk *bi = &mi->blk[i];
    247
    248		/* move / save reserved memory ranges */
    249		if (!memblock_overlaps_region(&memblock.memory,
    250					bi->start, bi->end - bi->start)) {
    251			numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
    252			continue;
    253		}
    254
    255		/* make sure all non-reserved blocks are inside the limits */
    256		bi->start = max(bi->start, low);
    257
    258		/* preserve info for non-RAM areas above 'max_pfn': */
    259		if (bi->end > high) {
    260			numa_add_memblk_to(bi->nid, high, bi->end,
    261					   &numa_reserved_meminfo);
    262			bi->end = high;
    263		}
    264
    265		/* and there's no empty block */
    266		if (bi->start >= bi->end)
    267			numa_remove_memblk_from(i--, mi);
    268	}
    269
    270	/* merge neighboring / overlapping entries */
    271	for (i = 0; i < mi->nr_blks; i++) {
    272		struct numa_memblk *bi = &mi->blk[i];
    273
    274		for (j = i + 1; j < mi->nr_blks; j++) {
    275			struct numa_memblk *bj = &mi->blk[j];
    276			u64 start, end;
    277
    278			/*
    279			 * See whether there are overlapping blocks.  Whine
    280			 * about but allow overlaps of the same nid.  They
    281			 * will be merged below.
    282			 */
    283			if (bi->end > bj->start && bi->start < bj->end) {
    284				if (bi->nid != bj->nid) {
    285					pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
    286					       bi->nid, bi->start, bi->end - 1,
    287					       bj->nid, bj->start, bj->end - 1);
    288					return -EINVAL;
    289				}
    290				pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
    291					bi->nid, bi->start, bi->end - 1,
    292					bj->start, bj->end - 1);
    293			}
    294
    295			/*
    296			 * Join together blocks on the same node, holes
    297			 * between which don't overlap with memory on other
    298			 * nodes.
    299			 */
    300			if (bi->nid != bj->nid)
    301				continue;
    302			start = min(bi->start, bj->start);
    303			end = max(bi->end, bj->end);
    304			for (k = 0; k < mi->nr_blks; k++) {
    305				struct numa_memblk *bk = &mi->blk[k];
    306
    307				if (bi->nid == bk->nid)
    308					continue;
    309				if (start < bk->end && end > bk->start)
    310					break;
    311			}
    312			if (k < mi->nr_blks)
    313				continue;
    314			printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
    315			       bi->nid, bi->start, bi->end - 1, bj->start,
    316			       bj->end - 1, start, end - 1);
    317			bi->start = start;
    318			bi->end = end;
    319			numa_remove_memblk_from(j--, mi);
    320		}
    321	}
    322
    323	/* clear unused ones */
    324	for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
    325		mi->blk[i].start = mi->blk[i].end = 0;
    326		mi->blk[i].nid = NUMA_NO_NODE;
    327	}
    328
    329	return 0;
    330}
    331
    332/*
    333 * Set nodes, which have memory in @mi, in *@nodemask.
    334 */
    335static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
    336					      const struct numa_meminfo *mi)
    337{
    338	int i;
    339
    340	for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
    341		if (mi->blk[i].start != mi->blk[i].end &&
    342		    mi->blk[i].nid != NUMA_NO_NODE)
    343			node_set(mi->blk[i].nid, *nodemask);
    344}
    345
    346/**
    347 * numa_reset_distance - Reset NUMA distance table
    348 *
    349 * The current table is freed.  The next numa_set_distance() call will
    350 * create a new one.
    351 */
    352void __init numa_reset_distance(void)
    353{
    354	size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
    355
    356	/* numa_distance could be 1LU marking allocation failure, test cnt */
    357	if (numa_distance_cnt)
    358		memblock_free(numa_distance, size);
    359	numa_distance_cnt = 0;
    360	numa_distance = NULL;	/* enable table creation */
    361}
    362
    363static int __init numa_alloc_distance(void)
    364{
    365	nodemask_t nodes_parsed;
    366	size_t size;
    367	int i, j, cnt = 0;
    368	u64 phys;
    369
    370	/* size the new table and allocate it */
    371	nodes_parsed = numa_nodes_parsed;
    372	numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
    373
    374	for_each_node_mask(i, nodes_parsed)
    375		cnt = i;
    376	cnt++;
    377	size = cnt * cnt * sizeof(numa_distance[0]);
    378
    379	phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0,
    380					 PFN_PHYS(max_pfn_mapped));
    381	if (!phys) {
    382		pr_warn("Warning: can't allocate distance table!\n");
    383		/* don't retry until explicitly reset */
    384		numa_distance = (void *)1LU;
    385		return -ENOMEM;
    386	}
    387
    388	numa_distance = __va(phys);
    389	numa_distance_cnt = cnt;
    390
    391	/* fill with the default distances */
    392	for (i = 0; i < cnt; i++)
    393		for (j = 0; j < cnt; j++)
    394			numa_distance[i * cnt + j] = i == j ?
    395				LOCAL_DISTANCE : REMOTE_DISTANCE;
    396	printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
    397
    398	return 0;
    399}
    400
    401/**
    402 * numa_set_distance - Set NUMA distance from one NUMA to another
    403 * @from: the 'from' node to set distance
    404 * @to: the 'to'  node to set distance
    405 * @distance: NUMA distance
    406 *
    407 * Set the distance from node @from to @to to @distance.  If distance table
    408 * doesn't exist, one which is large enough to accommodate all the currently
    409 * known nodes will be created.
    410 *
    411 * If such table cannot be allocated, a warning is printed and further
    412 * calls are ignored until the distance table is reset with
    413 * numa_reset_distance().
    414 *
    415 * If @from or @to is higher than the highest known node or lower than zero
    416 * at the time of table creation or @distance doesn't make sense, the call
    417 * is ignored.
    418 * This is to allow simplification of specific NUMA config implementations.
    419 */
    420void __init numa_set_distance(int from, int to, int distance)
    421{
    422	if (!numa_distance && numa_alloc_distance() < 0)
    423		return;
    424
    425	if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
    426			from < 0 || to < 0) {
    427		pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
    428			     from, to, distance);
    429		return;
    430	}
    431
    432	if ((u8)distance != distance ||
    433	    (from == to && distance != LOCAL_DISTANCE)) {
    434		pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
    435			     from, to, distance);
    436		return;
    437	}
    438
    439	numa_distance[from * numa_distance_cnt + to] = distance;
    440}
    441
    442int __node_distance(int from, int to)
    443{
    444	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
    445		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
    446	return numa_distance[from * numa_distance_cnt + to];
    447}
    448EXPORT_SYMBOL(__node_distance);
    449
    450/*
    451 * Sanity check to catch more bad NUMA configurations (they are amazingly
    452 * common).  Make sure the nodes cover all memory.
    453 */
    454static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
    455{
    456	u64 numaram, e820ram;
    457	int i;
    458
    459	numaram = 0;
    460	for (i = 0; i < mi->nr_blks; i++) {
    461		u64 s = mi->blk[i].start >> PAGE_SHIFT;
    462		u64 e = mi->blk[i].end >> PAGE_SHIFT;
    463		numaram += e - s;
    464		numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
    465		if ((s64)numaram < 0)
    466			numaram = 0;
    467	}
    468
    469	e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
    470
    471	/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
    472	if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
    473		printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
    474		       (numaram << PAGE_SHIFT) >> 20,
    475		       (e820ram << PAGE_SHIFT) >> 20);
    476		return false;
    477	}
    478	return true;
    479}
    480
    481/*
    482 * Mark all currently memblock-reserved physical memory (which covers the
    483 * kernel's own memory ranges) as hot-unswappable.
    484 */
    485static void __init numa_clear_kernel_node_hotplug(void)
    486{
    487	nodemask_t reserved_nodemask = NODE_MASK_NONE;
    488	struct memblock_region *mb_region;
    489	int i;
    490
    491	/*
    492	 * We have to do some preprocessing of memblock regions, to
    493	 * make them suitable for reservation.
    494	 *
    495	 * At this time, all memory regions reserved by memblock are
    496	 * used by the kernel, but those regions are not split up
    497	 * along node boundaries yet, and don't necessarily have their
    498	 * node ID set yet either.
    499	 *
    500	 * So iterate over all memory known to the x86 architecture,
    501	 * and use those ranges to set the nid in memblock.reserved.
    502	 * This will split up the memblock regions along node
    503	 * boundaries and will set the node IDs as well.
    504	 */
    505	for (i = 0; i < numa_meminfo.nr_blks; i++) {
    506		struct numa_memblk *mb = numa_meminfo.blk + i;
    507		int ret;
    508
    509		ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
    510		WARN_ON_ONCE(ret);
    511	}
    512
    513	/*
    514	 * Now go over all reserved memblock regions, to construct a
    515	 * node mask of all kernel reserved memory areas.
    516	 *
    517	 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
    518	 *   numa_meminfo might not include all memblock.reserved
    519	 *   memory ranges, because quirks such as trim_snb_memory()
    520	 *   reserve specific pages for Sandy Bridge graphics. ]
    521	 */
    522	for_each_reserved_mem_region(mb_region) {
    523		int nid = memblock_get_region_node(mb_region);
    524
    525		if (nid != MAX_NUMNODES)
    526			node_set(nid, reserved_nodemask);
    527	}
    528
    529	/*
    530	 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
    531	 * belonging to the reserved node mask.
    532	 *
    533	 * Note that this will include memory regions that reside
    534	 * on nodes that contain kernel memory - entire nodes
    535	 * become hot-unpluggable:
    536	 */
    537	for (i = 0; i < numa_meminfo.nr_blks; i++) {
    538		struct numa_memblk *mb = numa_meminfo.blk + i;
    539
    540		if (!node_isset(mb->nid, reserved_nodemask))
    541			continue;
    542
    543		memblock_clear_hotplug(mb->start, mb->end - mb->start);
    544	}
    545}
    546
    547static int __init numa_register_memblks(struct numa_meminfo *mi)
    548{
    549	int i, nid;
    550
    551	/* Account for nodes with cpus and no memory */
    552	node_possible_map = numa_nodes_parsed;
    553	numa_nodemask_from_meminfo(&node_possible_map, mi);
    554	if (WARN_ON(nodes_empty(node_possible_map)))
    555		return -EINVAL;
    556
    557	for (i = 0; i < mi->nr_blks; i++) {
    558		struct numa_memblk *mb = &mi->blk[i];
    559		memblock_set_node(mb->start, mb->end - mb->start,
    560				  &memblock.memory, mb->nid);
    561	}
    562
    563	/*
    564	 * At very early time, the kernel have to use some memory such as
    565	 * loading the kernel image. We cannot prevent this anyway. So any
    566	 * node the kernel resides in should be un-hotpluggable.
    567	 *
    568	 * And when we come here, alloc node data won't fail.
    569	 */
    570	numa_clear_kernel_node_hotplug();
    571
    572	/*
    573	 * If sections array is gonna be used for pfn -> nid mapping, check
    574	 * whether its granularity is fine enough.
    575	 */
    576	if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
    577		unsigned long pfn_align = node_map_pfn_alignment();
    578
    579		if (pfn_align && pfn_align < PAGES_PER_SECTION) {
    580			pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
    581				PFN_PHYS(pfn_align) >> 20,
    582				PFN_PHYS(PAGES_PER_SECTION) >> 20);
    583			return -EINVAL;
    584		}
    585	}
    586	if (!numa_meminfo_cover_memory(mi))
    587		return -EINVAL;
    588
    589	/* Finally register nodes. */
    590	for_each_node_mask(nid, node_possible_map) {
    591		u64 start = PFN_PHYS(max_pfn);
    592		u64 end = 0;
    593
    594		for (i = 0; i < mi->nr_blks; i++) {
    595			if (nid != mi->blk[i].nid)
    596				continue;
    597			start = min(mi->blk[i].start, start);
    598			end = max(mi->blk[i].end, end);
    599		}
    600
    601		if (start >= end)
    602			continue;
    603
    604		/*
    605		 * Don't confuse VM with a node that doesn't have the
    606		 * minimum amount of memory:
    607		 */
    608		if (end && (end - start) < NODE_MIN_SIZE)
    609			continue;
    610
    611		alloc_node_data(nid);
    612	}
    613
    614	/* Dump memblock with node info and return. */
    615	memblock_dump_all();
    616	return 0;
    617}
    618
    619/*
    620 * There are unfortunately some poorly designed mainboards around that
    621 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
    622 * mapping. To avoid this fill in the mapping for all possible CPUs,
    623 * as the number of CPUs is not known yet. We round robin the existing
    624 * nodes.
    625 */
    626static void __init numa_init_array(void)
    627{
    628	int rr, i;
    629
    630	rr = first_node(node_online_map);
    631	for (i = 0; i < nr_cpu_ids; i++) {
    632		if (early_cpu_to_node(i) != NUMA_NO_NODE)
    633			continue;
    634		numa_set_node(i, rr);
    635		rr = next_node_in(rr, node_online_map);
    636	}
    637}
    638
    639static int __init numa_init(int (*init_func)(void))
    640{
    641	int i;
    642	int ret;
    643
    644	for (i = 0; i < MAX_LOCAL_APIC; i++)
    645		set_apicid_to_node(i, NUMA_NO_NODE);
    646
    647	nodes_clear(numa_nodes_parsed);
    648	nodes_clear(node_possible_map);
    649	nodes_clear(node_online_map);
    650	memset(&numa_meminfo, 0, sizeof(numa_meminfo));
    651	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
    652				  MAX_NUMNODES));
    653	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
    654				  MAX_NUMNODES));
    655	/* In case that parsing SRAT failed. */
    656	WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
    657	numa_reset_distance();
    658
    659	ret = init_func();
    660	if (ret < 0)
    661		return ret;
    662
    663	/*
    664	 * We reset memblock back to the top-down direction
    665	 * here because if we configured ACPI_NUMA, we have
    666	 * parsed SRAT in init_func(). It is ok to have the
    667	 * reset here even if we did't configure ACPI_NUMA
    668	 * or acpi numa init fails and fallbacks to dummy
    669	 * numa init.
    670	 */
    671	memblock_set_bottom_up(false);
    672
    673	ret = numa_cleanup_meminfo(&numa_meminfo);
    674	if (ret < 0)
    675		return ret;
    676
    677	numa_emulation(&numa_meminfo, numa_distance_cnt);
    678
    679	ret = numa_register_memblks(&numa_meminfo);
    680	if (ret < 0)
    681		return ret;
    682
    683	for (i = 0; i < nr_cpu_ids; i++) {
    684		int nid = early_cpu_to_node(i);
    685
    686		if (nid == NUMA_NO_NODE)
    687			continue;
    688		if (!node_online(nid))
    689			numa_clear_node(i);
    690	}
    691	numa_init_array();
    692
    693	return 0;
    694}
    695
    696/**
    697 * dummy_numa_init - Fallback dummy NUMA init
    698 *
    699 * Used if there's no underlying NUMA architecture, NUMA initialization
    700 * fails, or NUMA is disabled on the command line.
    701 *
    702 * Must online at least one node and add memory blocks that cover all
    703 * allowed memory.  This function must not fail.
    704 */
    705static int __init dummy_numa_init(void)
    706{
    707	printk(KERN_INFO "%s\n",
    708	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
    709	printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
    710	       0LLU, PFN_PHYS(max_pfn) - 1);
    711
    712	node_set(0, numa_nodes_parsed);
    713	numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
    714
    715	return 0;
    716}
    717
    718/**
    719 * x86_numa_init - Initialize NUMA
    720 *
    721 * Try each configured NUMA initialization method until one succeeds.  The
    722 * last fallback is dummy single node config encompassing whole memory and
    723 * never fails.
    724 */
    725void __init x86_numa_init(void)
    726{
    727	if (!numa_off) {
    728#ifdef CONFIG_ACPI_NUMA
    729		if (!numa_init(x86_acpi_numa_init))
    730			return;
    731#endif
    732#ifdef CONFIG_AMD_NUMA
    733		if (!numa_init(amd_numa_init))
    734			return;
    735#endif
    736	}
    737
    738	numa_init(dummy_numa_init);
    739}
    740
    741
    742/*
    743 * A node may exist which has one or more Generic Initiators but no CPUs and no
    744 * memory.
    745 *
    746 * This function must be called after init_cpu_to_node(), to ensure that any
    747 * memoryless CPU nodes have already been brought online, and before the
    748 * node_data[nid] is needed for zone list setup in build_all_zonelists().
    749 *
    750 * When this function is called, any nodes containing either memory and/or CPUs
    751 * will already be online and there is no need to do anything extra, even if
    752 * they also contain one or more Generic Initiators.
    753 */
    754void __init init_gi_nodes(void)
    755{
    756	int nid;
    757
    758	/*
    759	 * Exclude this node from
    760	 * bringup_nonboot_cpus
    761	 *  cpu_up
    762	 *   __try_online_node
    763	 *    register_one_node
    764	 * because node_subsys is not initialized yet.
    765	 * TODO remove dependency on node_online
    766	 */
    767	for_each_node_state(nid, N_GENERIC_INITIATOR)
    768		if (!node_online(nid))
    769			node_set_online(nid);
    770}
    771
    772/*
    773 * Setup early cpu_to_node.
    774 *
    775 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
    776 * and apicid_to_node[] tables have valid entries for a CPU.
    777 * This means we skip cpu_to_node[] initialisation for NUMA
    778 * emulation and faking node case (when running a kernel compiled
    779 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
    780 * is already initialized in a round robin manner at numa_init_array,
    781 * prior to this call, and this initialization is good enough
    782 * for the fake NUMA cases.
    783 *
    784 * Called before the per_cpu areas are setup.
    785 */
    786void __init init_cpu_to_node(void)
    787{
    788	int cpu;
    789	u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
    790
    791	BUG_ON(cpu_to_apicid == NULL);
    792
    793	for_each_possible_cpu(cpu) {
    794		int node = numa_cpu_node(cpu);
    795
    796		if (node == NUMA_NO_NODE)
    797			continue;
    798
    799		/*
    800		 * Exclude this node from
    801		 * bringup_nonboot_cpus
    802		 *  cpu_up
    803		 *   __try_online_node
    804		 *    register_one_node
    805		 * because node_subsys is not initialized yet.
    806		 * TODO remove dependency on node_online
    807		 */
    808		if (!node_online(node))
    809			node_set_online(node);
    810
    811		numa_set_node(cpu, node);
    812	}
    813}
    814
    815#ifndef CONFIG_DEBUG_PER_CPU_MAPS
    816
    817# ifndef CONFIG_NUMA_EMU
    818void numa_add_cpu(int cpu)
    819{
    820	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
    821}
    822
    823void numa_remove_cpu(int cpu)
    824{
    825	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
    826}
    827# endif	/* !CONFIG_NUMA_EMU */
    828
    829#else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
    830
    831int __cpu_to_node(int cpu)
    832{
    833	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
    834		printk(KERN_WARNING
    835			"cpu_to_node(%d): usage too early!\n", cpu);
    836		dump_stack();
    837		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
    838	}
    839	return per_cpu(x86_cpu_to_node_map, cpu);
    840}
    841EXPORT_SYMBOL(__cpu_to_node);
    842
    843/*
    844 * Same function as cpu_to_node() but used if called before the
    845 * per_cpu areas are setup.
    846 */
    847int early_cpu_to_node(int cpu)
    848{
    849	if (early_per_cpu_ptr(x86_cpu_to_node_map))
    850		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
    851
    852	if (!cpu_possible(cpu)) {
    853		printk(KERN_WARNING
    854			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
    855		dump_stack();
    856		return NUMA_NO_NODE;
    857	}
    858	return per_cpu(x86_cpu_to_node_map, cpu);
    859}
    860
    861void debug_cpumask_set_cpu(int cpu, int node, bool enable)
    862{
    863	struct cpumask *mask;
    864
    865	if (node == NUMA_NO_NODE) {
    866		/* early_cpu_to_node() already emits a warning and trace */
    867		return;
    868	}
    869	mask = node_to_cpumask_map[node];
    870	if (!mask) {
    871		pr_err("node_to_cpumask_map[%i] NULL\n", node);
    872		dump_stack();
    873		return;
    874	}
    875
    876	if (enable)
    877		cpumask_set_cpu(cpu, mask);
    878	else
    879		cpumask_clear_cpu(cpu, mask);
    880
    881	printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
    882		enable ? "numa_add_cpu" : "numa_remove_cpu",
    883		cpu, node, cpumask_pr_args(mask));
    884	return;
    885}
    886
    887# ifndef CONFIG_NUMA_EMU
    888static void numa_set_cpumask(int cpu, bool enable)
    889{
    890	debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
    891}
    892
    893void numa_add_cpu(int cpu)
    894{
    895	numa_set_cpumask(cpu, true);
    896}
    897
    898void numa_remove_cpu(int cpu)
    899{
    900	numa_set_cpumask(cpu, false);
    901}
    902# endif	/* !CONFIG_NUMA_EMU */
    903
    904/*
    905 * Returns a pointer to the bitmask of CPUs on Node 'node'.
    906 */
    907const struct cpumask *cpumask_of_node(int node)
    908{
    909	if ((unsigned)node >= nr_node_ids) {
    910		printk(KERN_WARNING
    911			"cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
    912			node, nr_node_ids);
    913		dump_stack();
    914		return cpu_none_mask;
    915	}
    916	if (node_to_cpumask_map[node] == NULL) {
    917		printk(KERN_WARNING
    918			"cpumask_of_node(%d): no node_to_cpumask_map!\n",
    919			node);
    920		dump_stack();
    921		return cpu_online_mask;
    922	}
    923	return node_to_cpumask_map[node];
    924}
    925EXPORT_SYMBOL(cpumask_of_node);
    926
    927#endif	/* !CONFIG_DEBUG_PER_CPU_MAPS */
    928
    929#ifdef CONFIG_NUMA_KEEP_MEMINFO
    930static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
    931{
    932	int i;
    933
    934	for (i = 0; i < mi->nr_blks; i++)
    935		if (mi->blk[i].start <= start && mi->blk[i].end > start)
    936			return mi->blk[i].nid;
    937	return NUMA_NO_NODE;
    938}
    939
    940int phys_to_target_node(phys_addr_t start)
    941{
    942	int nid = meminfo_to_nid(&numa_meminfo, start);
    943
    944	/*
    945	 * Prefer online nodes, but if reserved memory might be
    946	 * hot-added continue the search with reserved ranges.
    947	 */
    948	if (nid != NUMA_NO_NODE)
    949		return nid;
    950
    951	return meminfo_to_nid(&numa_reserved_meminfo, start);
    952}
    953EXPORT_SYMBOL_GPL(phys_to_target_node);
    954
    955int memory_add_physaddr_to_nid(u64 start)
    956{
    957	int nid = meminfo_to_nid(&numa_meminfo, start);
    958
    959	if (nid == NUMA_NO_NODE)
    960		nid = numa_meminfo.blk[0].nid;
    961	return nid;
    962}
    963EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
    964#endif