cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

memblock.c (62080B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Procedures for maintaining information about logical memory blocks.
      4 *
      5 * Peter Bergner, IBM Corp.	June 2001.
      6 * Copyright (C) 2001 Peter Bergner.
      7 */
      8
      9#include <linux/kernel.h>
     10#include <linux/slab.h>
     11#include <linux/init.h>
     12#include <linux/bitops.h>
     13#include <linux/poison.h>
     14#include <linux/pfn.h>
     15#include <linux/debugfs.h>
     16#include <linux/kmemleak.h>
     17#include <linux/seq_file.h>
     18#include <linux/memblock.h>
     19
     20#include <asm/sections.h>
     21#include <linux/io.h>
     22
     23#include "internal.h"
     24
     25#define INIT_MEMBLOCK_REGIONS			128
     26#define INIT_PHYSMEM_REGIONS			4
     27
     28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
     29# define INIT_MEMBLOCK_RESERVED_REGIONS		INIT_MEMBLOCK_REGIONS
     30#endif
     31
     32/**
     33 * DOC: memblock overview
     34 *
     35 * Memblock is a method of managing memory regions during the early
     36 * boot period when the usual kernel memory allocators are not up and
     37 * running.
     38 *
     39 * Memblock views the system memory as collections of contiguous
     40 * regions. There are several types of these collections:
     41 *
     42 * * ``memory`` - describes the physical memory available to the
     43 *   kernel; this may differ from the actual physical memory installed
     44 *   in the system, for instance when the memory is restricted with
     45 *   ``mem=`` command line parameter
     46 * * ``reserved`` - describes the regions that were allocated
     47 * * ``physmem`` - describes the actual physical memory available during
     48 *   boot regardless of the possible restrictions and memory hot(un)plug;
     49 *   the ``physmem`` type is only available on some architectures.
     50 *
     51 * Each region is represented by struct memblock_region that
     52 * defines the region extents, its attributes and NUMA node id on NUMA
     53 * systems. Every memory type is described by the struct memblock_type
     54 * which contains an array of memory regions along with
     55 * the allocator metadata. The "memory" and "reserved" types are nicely
     56 * wrapped with struct memblock. This structure is statically
     57 * initialized at build time. The region arrays are initially sized to
     58 * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
     59 * for "reserved". The region array for "physmem" is initially sized to
     60 * %INIT_PHYSMEM_REGIONS.
     61 * The memblock_allow_resize() enables automatic resizing of the region
     62 * arrays during addition of new regions. This feature should be used
     63 * with care so that memory allocated for the region array will not
     64 * overlap with areas that should be reserved, for example initrd.
     65 *
     66 * The early architecture setup should tell memblock what the physical
     67 * memory layout is by using memblock_add() or memblock_add_node()
     68 * functions. The first function does not assign the region to a NUMA
     69 * node and it is appropriate for UMA systems. Yet, it is possible to
     70 * use it on NUMA systems as well and assign the region to a NUMA node
     71 * later in the setup process using memblock_set_node(). The
     72 * memblock_add_node() performs such an assignment directly.
     73 *
     74 * Once memblock is setup the memory can be allocated using one of the
     75 * API variants:
     76 *
     77 * * memblock_phys_alloc*() - these functions return the **physical**
     78 *   address of the allocated memory
     79 * * memblock_alloc*() - these functions return the **virtual** address
     80 *   of the allocated memory.
     81 *
     82 * Note, that both API variants use implicit assumptions about allowed
     83 * memory ranges and the fallback methods. Consult the documentation
     84 * of memblock_alloc_internal() and memblock_alloc_range_nid()
     85 * functions for more elaborate description.
     86 *
     87 * As the system boot progresses, the architecture specific mem_init()
     88 * function frees all the memory to the buddy page allocator.
     89 *
     90 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
     91 * memblock data structures (except "physmem") will be discarded after the
     92 * system initialization completes.
     93 */
     94
     95#ifndef CONFIG_NUMA
     96struct pglist_data __refdata contig_page_data;
     97EXPORT_SYMBOL(contig_page_data);
     98#endif
     99
    100unsigned long max_low_pfn;
    101unsigned long min_low_pfn;
    102unsigned long max_pfn;
    103unsigned long long max_possible_pfn;
    104
    105static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
    106static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
    107#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
    108static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
    109#endif
    110
    111struct memblock memblock __initdata_memblock = {
    112	.memory.regions		= memblock_memory_init_regions,
    113	.memory.cnt		= 1,	/* empty dummy entry */
    114	.memory.max		= INIT_MEMBLOCK_REGIONS,
    115	.memory.name		= "memory",
    116
    117	.reserved.regions	= memblock_reserved_init_regions,
    118	.reserved.cnt		= 1,	/* empty dummy entry */
    119	.reserved.max		= INIT_MEMBLOCK_RESERVED_REGIONS,
    120	.reserved.name		= "reserved",
    121
    122	.bottom_up		= false,
    123	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
    124};
    125
    126#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
    127struct memblock_type physmem = {
    128	.regions		= memblock_physmem_init_regions,
    129	.cnt			= 1,	/* empty dummy entry */
    130	.max			= INIT_PHYSMEM_REGIONS,
    131	.name			= "physmem",
    132};
    133#endif
    134
    135/*
    136 * keep a pointer to &memblock.memory in the text section to use it in
    137 * __next_mem_range() and its helpers.
    138 *  For architectures that do not keep memblock data after init, this
    139 * pointer will be reset to NULL at memblock_discard()
    140 */
    141static __refdata struct memblock_type *memblock_memory = &memblock.memory;
    142
    143#define for_each_memblock_type(i, memblock_type, rgn)			\
    144	for (i = 0, rgn = &memblock_type->regions[0];			\
    145	     i < memblock_type->cnt;					\
    146	     i++, rgn = &memblock_type->regions[i])
    147
    148#define memblock_dbg(fmt, ...)						\
    149	do {								\
    150		if (memblock_debug)					\
    151			pr_info(fmt, ##__VA_ARGS__);			\
    152	} while (0)
    153
    154static int memblock_debug __initdata_memblock;
    155static bool system_has_some_mirror __initdata_memblock = false;
    156static int memblock_can_resize __initdata_memblock;
    157static int memblock_memory_in_slab __initdata_memblock = 0;
    158static int memblock_reserved_in_slab __initdata_memblock = 0;
    159
    160static enum memblock_flags __init_memblock choose_memblock_flags(void)
    161{
    162	return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
    163}
    164
    165/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
    166static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
    167{
    168	return *size = min(*size, PHYS_ADDR_MAX - base);
    169}
    170
    171/*
    172 * Address comparison utilities
    173 */
    174static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
    175				       phys_addr_t base2, phys_addr_t size2)
    176{
    177	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
    178}
    179
    180bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
    181					phys_addr_t base, phys_addr_t size)
    182{
    183	unsigned long i;
    184
    185	memblock_cap_size(base, &size);
    186
    187	for (i = 0; i < type->cnt; i++)
    188		if (memblock_addrs_overlap(base, size, type->regions[i].base,
    189					   type->regions[i].size))
    190			break;
    191	return i < type->cnt;
    192}
    193
    194/**
    195 * __memblock_find_range_bottom_up - find free area utility in bottom-up
    196 * @start: start of candidate range
    197 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
    198 *       %MEMBLOCK_ALLOC_ACCESSIBLE
    199 * @size: size of free area to find
    200 * @align: alignment of free area to find
    201 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
    202 * @flags: pick from blocks based on memory attributes
    203 *
    204 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
    205 *
    206 * Return:
    207 * Found address on success, 0 on failure.
    208 */
    209static phys_addr_t __init_memblock
    210__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
    211				phys_addr_t size, phys_addr_t align, int nid,
    212				enum memblock_flags flags)
    213{
    214	phys_addr_t this_start, this_end, cand;
    215	u64 i;
    216
    217	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
    218		this_start = clamp(this_start, start, end);
    219		this_end = clamp(this_end, start, end);
    220
    221		cand = round_up(this_start, align);
    222		if (cand < this_end && this_end - cand >= size)
    223			return cand;
    224	}
    225
    226	return 0;
    227}
    228
    229/**
    230 * __memblock_find_range_top_down - find free area utility, in top-down
    231 * @start: start of candidate range
    232 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
    233 *       %MEMBLOCK_ALLOC_ACCESSIBLE
    234 * @size: size of free area to find
    235 * @align: alignment of free area to find
    236 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
    237 * @flags: pick from blocks based on memory attributes
    238 *
    239 * Utility called from memblock_find_in_range_node(), find free area top-down.
    240 *
    241 * Return:
    242 * Found address on success, 0 on failure.
    243 */
    244static phys_addr_t __init_memblock
    245__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
    246			       phys_addr_t size, phys_addr_t align, int nid,
    247			       enum memblock_flags flags)
    248{
    249	phys_addr_t this_start, this_end, cand;
    250	u64 i;
    251
    252	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
    253					NULL) {
    254		this_start = clamp(this_start, start, end);
    255		this_end = clamp(this_end, start, end);
    256
    257		if (this_end < size)
    258			continue;
    259
    260		cand = round_down(this_end - size, align);
    261		if (cand >= this_start)
    262			return cand;
    263	}
    264
    265	return 0;
    266}
    267
    268/**
    269 * memblock_find_in_range_node - find free area in given range and node
    270 * @size: size of free area to find
    271 * @align: alignment of free area to find
    272 * @start: start of candidate range
    273 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
    274 *       %MEMBLOCK_ALLOC_ACCESSIBLE
    275 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
    276 * @flags: pick from blocks based on memory attributes
    277 *
    278 * Find @size free area aligned to @align in the specified range and node.
    279 *
    280 * Return:
    281 * Found address on success, 0 on failure.
    282 */
    283static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
    284					phys_addr_t align, phys_addr_t start,
    285					phys_addr_t end, int nid,
    286					enum memblock_flags flags)
    287{
    288	/* pump up @end */
    289	if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
    290	    end == MEMBLOCK_ALLOC_NOLEAKTRACE)
    291		end = memblock.current_limit;
    292
    293	/* avoid allocating the first page */
    294	start = max_t(phys_addr_t, start, PAGE_SIZE);
    295	end = max(start, end);
    296
    297	if (memblock_bottom_up())
    298		return __memblock_find_range_bottom_up(start, end, size, align,
    299						       nid, flags);
    300	else
    301		return __memblock_find_range_top_down(start, end, size, align,
    302						      nid, flags);
    303}
    304
    305/**
    306 * memblock_find_in_range - find free area in given range
    307 * @start: start of candidate range
    308 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
    309 *       %MEMBLOCK_ALLOC_ACCESSIBLE
    310 * @size: size of free area to find
    311 * @align: alignment of free area to find
    312 *
    313 * Find @size free area aligned to @align in the specified range.
    314 *
    315 * Return:
    316 * Found address on success, 0 on failure.
    317 */
    318static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
    319					phys_addr_t end, phys_addr_t size,
    320					phys_addr_t align)
    321{
    322	phys_addr_t ret;
    323	enum memblock_flags flags = choose_memblock_flags();
    324
    325again:
    326	ret = memblock_find_in_range_node(size, align, start, end,
    327					    NUMA_NO_NODE, flags);
    328
    329	if (!ret && (flags & MEMBLOCK_MIRROR)) {
    330		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
    331			&size);
    332		flags &= ~MEMBLOCK_MIRROR;
    333		goto again;
    334	}
    335
    336	return ret;
    337}
    338
    339static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
    340{
    341	type->total_size -= type->regions[r].size;
    342	memmove(&type->regions[r], &type->regions[r + 1],
    343		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
    344	type->cnt--;
    345
    346	/* Special case for empty arrays */
    347	if (type->cnt == 0) {
    348		WARN_ON(type->total_size != 0);
    349		type->cnt = 1;
    350		type->regions[0].base = 0;
    351		type->regions[0].size = 0;
    352		type->regions[0].flags = 0;
    353		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
    354	}
    355}
    356
    357#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
    358/**
    359 * memblock_discard - discard memory and reserved arrays if they were allocated
    360 */
    361void __init memblock_discard(void)
    362{
    363	phys_addr_t addr, size;
    364
    365	if (memblock.reserved.regions != memblock_reserved_init_regions) {
    366		addr = __pa(memblock.reserved.regions);
    367		size = PAGE_ALIGN(sizeof(struct memblock_region) *
    368				  memblock.reserved.max);
    369		if (memblock_reserved_in_slab)
    370			kfree(memblock.reserved.regions);
    371		else
    372			memblock_free_late(addr, size);
    373	}
    374
    375	if (memblock.memory.regions != memblock_memory_init_regions) {
    376		addr = __pa(memblock.memory.regions);
    377		size = PAGE_ALIGN(sizeof(struct memblock_region) *
    378				  memblock.memory.max);
    379		if (memblock_memory_in_slab)
    380			kfree(memblock.memory.regions);
    381		else
    382			memblock_free_late(addr, size);
    383	}
    384
    385	memblock_memory = NULL;
    386}
    387#endif
    388
    389/**
    390 * memblock_double_array - double the size of the memblock regions array
    391 * @type: memblock type of the regions array being doubled
    392 * @new_area_start: starting address of memory range to avoid overlap with
    393 * @new_area_size: size of memory range to avoid overlap with
    394 *
    395 * Double the size of the @type regions array. If memblock is being used to
    396 * allocate memory for a new reserved regions array and there is a previously
    397 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
    398 * waiting to be reserved, ensure the memory used by the new array does
    399 * not overlap.
    400 *
    401 * Return:
    402 * 0 on success, -1 on failure.
    403 */
    404static int __init_memblock memblock_double_array(struct memblock_type *type,
    405						phys_addr_t new_area_start,
    406						phys_addr_t new_area_size)
    407{
    408	struct memblock_region *new_array, *old_array;
    409	phys_addr_t old_alloc_size, new_alloc_size;
    410	phys_addr_t old_size, new_size, addr, new_end;
    411	int use_slab = slab_is_available();
    412	int *in_slab;
    413
    414	/* We don't allow resizing until we know about the reserved regions
    415	 * of memory that aren't suitable for allocation
    416	 */
    417	if (!memblock_can_resize)
    418		return -1;
    419
    420	/* Calculate new doubled size */
    421	old_size = type->max * sizeof(struct memblock_region);
    422	new_size = old_size << 1;
    423	/*
    424	 * We need to allocated new one align to PAGE_SIZE,
    425	 *   so we can free them completely later.
    426	 */
    427	old_alloc_size = PAGE_ALIGN(old_size);
    428	new_alloc_size = PAGE_ALIGN(new_size);
    429
    430	/* Retrieve the slab flag */
    431	if (type == &memblock.memory)
    432		in_slab = &memblock_memory_in_slab;
    433	else
    434		in_slab = &memblock_reserved_in_slab;
    435
    436	/* Try to find some space for it */
    437	if (use_slab) {
    438		new_array = kmalloc(new_size, GFP_KERNEL);
    439		addr = new_array ? __pa(new_array) : 0;
    440	} else {
    441		/* only exclude range when trying to double reserved.regions */
    442		if (type != &memblock.reserved)
    443			new_area_start = new_area_size = 0;
    444
    445		addr = memblock_find_in_range(new_area_start + new_area_size,
    446						memblock.current_limit,
    447						new_alloc_size, PAGE_SIZE);
    448		if (!addr && new_area_size)
    449			addr = memblock_find_in_range(0,
    450				min(new_area_start, memblock.current_limit),
    451				new_alloc_size, PAGE_SIZE);
    452
    453		new_array = addr ? __va(addr) : NULL;
    454	}
    455	if (!addr) {
    456		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
    457		       type->name, type->max, type->max * 2);
    458		return -1;
    459	}
    460
    461	new_end = addr + new_size - 1;
    462	memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
    463			type->name, type->max * 2, &addr, &new_end);
    464
    465	/*
    466	 * Found space, we now need to move the array over before we add the
    467	 * reserved region since it may be our reserved array itself that is
    468	 * full.
    469	 */
    470	memcpy(new_array, type->regions, old_size);
    471	memset(new_array + type->max, 0, old_size);
    472	old_array = type->regions;
    473	type->regions = new_array;
    474	type->max <<= 1;
    475
    476	/* Free old array. We needn't free it if the array is the static one */
    477	if (*in_slab)
    478		kfree(old_array);
    479	else if (old_array != memblock_memory_init_regions &&
    480		 old_array != memblock_reserved_init_regions)
    481		memblock_free(old_array, old_alloc_size);
    482
    483	/*
    484	 * Reserve the new array if that comes from the memblock.  Otherwise, we
    485	 * needn't do it
    486	 */
    487	if (!use_slab)
    488		BUG_ON(memblock_reserve(addr, new_alloc_size));
    489
    490	/* Update slab flag */
    491	*in_slab = use_slab;
    492
    493	return 0;
    494}
    495
    496/**
    497 * memblock_merge_regions - merge neighboring compatible regions
    498 * @type: memblock type to scan
    499 *
    500 * Scan @type and merge neighboring compatible regions.
    501 */
    502static void __init_memblock memblock_merge_regions(struct memblock_type *type)
    503{
    504	int i = 0;
    505
    506	/* cnt never goes below 1 */
    507	while (i < type->cnt - 1) {
    508		struct memblock_region *this = &type->regions[i];
    509		struct memblock_region *next = &type->regions[i + 1];
    510
    511		if (this->base + this->size != next->base ||
    512		    memblock_get_region_node(this) !=
    513		    memblock_get_region_node(next) ||
    514		    this->flags != next->flags) {
    515			BUG_ON(this->base + this->size > next->base);
    516			i++;
    517			continue;
    518		}
    519
    520		this->size += next->size;
    521		/* move forward from next + 1, index of which is i + 2 */
    522		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
    523		type->cnt--;
    524	}
    525}
    526
    527/**
    528 * memblock_insert_region - insert new memblock region
    529 * @type:	memblock type to insert into
    530 * @idx:	index for the insertion point
    531 * @base:	base address of the new region
    532 * @size:	size of the new region
    533 * @nid:	node id of the new region
    534 * @flags:	flags of the new region
    535 *
    536 * Insert new memblock region [@base, @base + @size) into @type at @idx.
    537 * @type must already have extra room to accommodate the new region.
    538 */
    539static void __init_memblock memblock_insert_region(struct memblock_type *type,
    540						   int idx, phys_addr_t base,
    541						   phys_addr_t size,
    542						   int nid,
    543						   enum memblock_flags flags)
    544{
    545	struct memblock_region *rgn = &type->regions[idx];
    546
    547	BUG_ON(type->cnt >= type->max);
    548	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
    549	rgn->base = base;
    550	rgn->size = size;
    551	rgn->flags = flags;
    552	memblock_set_region_node(rgn, nid);
    553	type->cnt++;
    554	type->total_size += size;
    555}
    556
    557/**
    558 * memblock_add_range - add new memblock region
    559 * @type: memblock type to add new region into
    560 * @base: base address of the new region
    561 * @size: size of the new region
    562 * @nid: nid of the new region
    563 * @flags: flags of the new region
    564 *
    565 * Add new memblock region [@base, @base + @size) into @type.  The new region
    566 * is allowed to overlap with existing ones - overlaps don't affect already
    567 * existing regions.  @type is guaranteed to be minimal (all neighbouring
    568 * compatible regions are merged) after the addition.
    569 *
    570 * Return:
    571 * 0 on success, -errno on failure.
    572 */
    573static int __init_memblock memblock_add_range(struct memblock_type *type,
    574				phys_addr_t base, phys_addr_t size,
    575				int nid, enum memblock_flags flags)
    576{
    577	bool insert = false;
    578	phys_addr_t obase = base;
    579	phys_addr_t end = base + memblock_cap_size(base, &size);
    580	int idx, nr_new;
    581	struct memblock_region *rgn;
    582
    583	if (!size)
    584		return 0;
    585
    586	/* special case for empty array */
    587	if (type->regions[0].size == 0) {
    588		WARN_ON(type->cnt != 1 || type->total_size);
    589		type->regions[0].base = base;
    590		type->regions[0].size = size;
    591		type->regions[0].flags = flags;
    592		memblock_set_region_node(&type->regions[0], nid);
    593		type->total_size = size;
    594		return 0;
    595	}
    596repeat:
    597	/*
    598	 * The following is executed twice.  Once with %false @insert and
    599	 * then with %true.  The first counts the number of regions needed
    600	 * to accommodate the new area.  The second actually inserts them.
    601	 */
    602	base = obase;
    603	nr_new = 0;
    604
    605	for_each_memblock_type(idx, type, rgn) {
    606		phys_addr_t rbase = rgn->base;
    607		phys_addr_t rend = rbase + rgn->size;
    608
    609		if (rbase >= end)
    610			break;
    611		if (rend <= base)
    612			continue;
    613		/*
    614		 * @rgn overlaps.  If it separates the lower part of new
    615		 * area, insert that portion.
    616		 */
    617		if (rbase > base) {
    618#ifdef CONFIG_NUMA
    619			WARN_ON(nid != memblock_get_region_node(rgn));
    620#endif
    621			WARN_ON(flags != rgn->flags);
    622			nr_new++;
    623			if (insert)
    624				memblock_insert_region(type, idx++, base,
    625						       rbase - base, nid,
    626						       flags);
    627		}
    628		/* area below @rend is dealt with, forget about it */
    629		base = min(rend, end);
    630	}
    631
    632	/* insert the remaining portion */
    633	if (base < end) {
    634		nr_new++;
    635		if (insert)
    636			memblock_insert_region(type, idx, base, end - base,
    637					       nid, flags);
    638	}
    639
    640	if (!nr_new)
    641		return 0;
    642
    643	/*
    644	 * If this was the first round, resize array and repeat for actual
    645	 * insertions; otherwise, merge and return.
    646	 */
    647	if (!insert) {
    648		while (type->cnt + nr_new > type->max)
    649			if (memblock_double_array(type, obase, size) < 0)
    650				return -ENOMEM;
    651		insert = true;
    652		goto repeat;
    653	} else {
    654		memblock_merge_regions(type);
    655		return 0;
    656	}
    657}
    658
    659/**
    660 * memblock_add_node - add new memblock region within a NUMA node
    661 * @base: base address of the new region
    662 * @size: size of the new region
    663 * @nid: nid of the new region
    664 * @flags: flags of the new region
    665 *
    666 * Add new memblock region [@base, @base + @size) to the "memory"
    667 * type. See memblock_add_range() description for mode details
    668 *
    669 * Return:
    670 * 0 on success, -errno on failure.
    671 */
    672int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
    673				      int nid, enum memblock_flags flags)
    674{
    675	phys_addr_t end = base + size - 1;
    676
    677	memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
    678		     &base, &end, nid, flags, (void *)_RET_IP_);
    679
    680	return memblock_add_range(&memblock.memory, base, size, nid, flags);
    681}
    682
    683/**
    684 * memblock_add - add new memblock region
    685 * @base: base address of the new region
    686 * @size: size of the new region
    687 *
    688 * Add new memblock region [@base, @base + @size) to the "memory"
    689 * type. See memblock_add_range() description for mode details
    690 *
    691 * Return:
    692 * 0 on success, -errno on failure.
    693 */
    694int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
    695{
    696	phys_addr_t end = base + size - 1;
    697
    698	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
    699		     &base, &end, (void *)_RET_IP_);
    700
    701	return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
    702}
    703
    704/**
    705 * memblock_isolate_range - isolate given range into disjoint memblocks
    706 * @type: memblock type to isolate range for
    707 * @base: base of range to isolate
    708 * @size: size of range to isolate
    709 * @start_rgn: out parameter for the start of isolated region
    710 * @end_rgn: out parameter for the end of isolated region
    711 *
    712 * Walk @type and ensure that regions don't cross the boundaries defined by
    713 * [@base, @base + @size).  Crossing regions are split at the boundaries,
    714 * which may create at most two more regions.  The index of the first
    715 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
    716 *
    717 * Return:
    718 * 0 on success, -errno on failure.
    719 */
    720static int __init_memblock memblock_isolate_range(struct memblock_type *type,
    721					phys_addr_t base, phys_addr_t size,
    722					int *start_rgn, int *end_rgn)
    723{
    724	phys_addr_t end = base + memblock_cap_size(base, &size);
    725	int idx;
    726	struct memblock_region *rgn;
    727
    728	*start_rgn = *end_rgn = 0;
    729
    730	if (!size)
    731		return 0;
    732
    733	/* we'll create at most two more regions */
    734	while (type->cnt + 2 > type->max)
    735		if (memblock_double_array(type, base, size) < 0)
    736			return -ENOMEM;
    737
    738	for_each_memblock_type(idx, type, rgn) {
    739		phys_addr_t rbase = rgn->base;
    740		phys_addr_t rend = rbase + rgn->size;
    741
    742		if (rbase >= end)
    743			break;
    744		if (rend <= base)
    745			continue;
    746
    747		if (rbase < base) {
    748			/*
    749			 * @rgn intersects from below.  Split and continue
    750			 * to process the next region - the new top half.
    751			 */
    752			rgn->base = base;
    753			rgn->size -= base - rbase;
    754			type->total_size -= base - rbase;
    755			memblock_insert_region(type, idx, rbase, base - rbase,
    756					       memblock_get_region_node(rgn),
    757					       rgn->flags);
    758		} else if (rend > end) {
    759			/*
    760			 * @rgn intersects from above.  Split and redo the
    761			 * current region - the new bottom half.
    762			 */
    763			rgn->base = end;
    764			rgn->size -= end - rbase;
    765			type->total_size -= end - rbase;
    766			memblock_insert_region(type, idx--, rbase, end - rbase,
    767					       memblock_get_region_node(rgn),
    768					       rgn->flags);
    769		} else {
    770			/* @rgn is fully contained, record it */
    771			if (!*end_rgn)
    772				*start_rgn = idx;
    773			*end_rgn = idx + 1;
    774		}
    775	}
    776
    777	return 0;
    778}
    779
    780static int __init_memblock memblock_remove_range(struct memblock_type *type,
    781					  phys_addr_t base, phys_addr_t size)
    782{
    783	int start_rgn, end_rgn;
    784	int i, ret;
    785
    786	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
    787	if (ret)
    788		return ret;
    789
    790	for (i = end_rgn - 1; i >= start_rgn; i--)
    791		memblock_remove_region(type, i);
    792	return 0;
    793}
    794
    795int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
    796{
    797	phys_addr_t end = base + size - 1;
    798
    799	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
    800		     &base, &end, (void *)_RET_IP_);
    801
    802	return memblock_remove_range(&memblock.memory, base, size);
    803}
    804
    805/**
    806 * memblock_free - free boot memory allocation
    807 * @ptr: starting address of the  boot memory allocation
    808 * @size: size of the boot memory block in bytes
    809 *
    810 * Free boot memory block previously allocated by memblock_alloc_xx() API.
    811 * The freeing memory will not be released to the buddy allocator.
    812 */
    813void __init_memblock memblock_free(void *ptr, size_t size)
    814{
    815	if (ptr)
    816		memblock_phys_free(__pa(ptr), size);
    817}
    818
    819/**
    820 * memblock_phys_free - free boot memory block
    821 * @base: phys starting address of the  boot memory block
    822 * @size: size of the boot memory block in bytes
    823 *
    824 * Free boot memory block previously allocated by memblock_alloc_xx() API.
    825 * The freeing memory will not be released to the buddy allocator.
    826 */
    827int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
    828{
    829	phys_addr_t end = base + size - 1;
    830
    831	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
    832		     &base, &end, (void *)_RET_IP_);
    833
    834	kmemleak_free_part_phys(base, size);
    835	return memblock_remove_range(&memblock.reserved, base, size);
    836}
    837
    838int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
    839{
    840	phys_addr_t end = base + size - 1;
    841
    842	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
    843		     &base, &end, (void *)_RET_IP_);
    844
    845	return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
    846}
    847
    848#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
    849int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
    850{
    851	phys_addr_t end = base + size - 1;
    852
    853	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
    854		     &base, &end, (void *)_RET_IP_);
    855
    856	return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
    857}
    858#endif
    859
    860/**
    861 * memblock_setclr_flag - set or clear flag for a memory region
    862 * @base: base address of the region
    863 * @size: size of the region
    864 * @set: set or clear the flag
    865 * @flag: the flag to update
    866 *
    867 * This function isolates region [@base, @base + @size), and sets/clears flag
    868 *
    869 * Return: 0 on success, -errno on failure.
    870 */
    871static int __init_memblock memblock_setclr_flag(phys_addr_t base,
    872				phys_addr_t size, int set, int flag)
    873{
    874	struct memblock_type *type = &memblock.memory;
    875	int i, ret, start_rgn, end_rgn;
    876
    877	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
    878	if (ret)
    879		return ret;
    880
    881	for (i = start_rgn; i < end_rgn; i++) {
    882		struct memblock_region *r = &type->regions[i];
    883
    884		if (set)
    885			r->flags |= flag;
    886		else
    887			r->flags &= ~flag;
    888	}
    889
    890	memblock_merge_regions(type);
    891	return 0;
    892}
    893
    894/**
    895 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
    896 * @base: the base phys addr of the region
    897 * @size: the size of the region
    898 *
    899 * Return: 0 on success, -errno on failure.
    900 */
    901int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
    902{
    903	return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
    904}
    905
    906/**
    907 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
    908 * @base: the base phys addr of the region
    909 * @size: the size of the region
    910 *
    911 * Return: 0 on success, -errno on failure.
    912 */
    913int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
    914{
    915	return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
    916}
    917
    918/**
    919 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
    920 * @base: the base phys addr of the region
    921 * @size: the size of the region
    922 *
    923 * Return: 0 on success, -errno on failure.
    924 */
    925int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
    926{
    927	system_has_some_mirror = true;
    928
    929	return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
    930}
    931
    932/**
    933 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
    934 * @base: the base phys addr of the region
    935 * @size: the size of the region
    936 *
    937 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
    938 * direct mapping of the physical memory. These regions will still be
    939 * covered by the memory map. The struct page representing NOMAP memory
    940 * frames in the memory map will be PageReserved()
    941 *
    942 * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from
    943 * memblock, the caller must inform kmemleak to ignore that memory
    944 *
    945 * Return: 0 on success, -errno on failure.
    946 */
    947int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
    948{
    949	return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
    950}
    951
    952/**
    953 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
    954 * @base: the base phys addr of the region
    955 * @size: the size of the region
    956 *
    957 * Return: 0 on success, -errno on failure.
    958 */
    959int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
    960{
    961	return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
    962}
    963
    964static bool should_skip_region(struct memblock_type *type,
    965			       struct memblock_region *m,
    966			       int nid, int flags)
    967{
    968	int m_nid = memblock_get_region_node(m);
    969
    970	/* we never skip regions when iterating memblock.reserved or physmem */
    971	if (type != memblock_memory)
    972		return false;
    973
    974	/* only memory regions are associated with nodes, check it */
    975	if (nid != NUMA_NO_NODE && nid != m_nid)
    976		return true;
    977
    978	/* skip hotpluggable memory regions if needed */
    979	if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
    980	    !(flags & MEMBLOCK_HOTPLUG))
    981		return true;
    982
    983	/* if we want mirror memory skip non-mirror memory regions */
    984	if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
    985		return true;
    986
    987	/* skip nomap memory unless we were asked for it explicitly */
    988	if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
    989		return true;
    990
    991	/* skip driver-managed memory unless we were asked for it explicitly */
    992	if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m))
    993		return true;
    994
    995	return false;
    996}
    997
    998/**
    999 * __next_mem_range - next function for for_each_free_mem_range() etc.
   1000 * @idx: pointer to u64 loop variable
   1001 * @nid: node selector, %NUMA_NO_NODE for all nodes
   1002 * @flags: pick from blocks based on memory attributes
   1003 * @type_a: pointer to memblock_type from where the range is taken
   1004 * @type_b: pointer to memblock_type which excludes memory from being taken
   1005 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
   1006 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
   1007 * @out_nid: ptr to int for nid of the range, can be %NULL
   1008 *
   1009 * Find the first area from *@idx which matches @nid, fill the out
   1010 * parameters, and update *@idx for the next iteration.  The lower 32bit of
   1011 * *@idx contains index into type_a and the upper 32bit indexes the
   1012 * areas before each region in type_b.	For example, if type_b regions
   1013 * look like the following,
   1014 *
   1015 *	0:[0-16), 1:[32-48), 2:[128-130)
   1016 *
   1017 * The upper 32bit indexes the following regions.
   1018 *
   1019 *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
   1020 *
   1021 * As both region arrays are sorted, the function advances the two indices
   1022 * in lockstep and returns each intersection.
   1023 */
   1024void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
   1025		      struct memblock_type *type_a,
   1026		      struct memblock_type *type_b, phys_addr_t *out_start,
   1027		      phys_addr_t *out_end, int *out_nid)
   1028{
   1029	int idx_a = *idx & 0xffffffff;
   1030	int idx_b = *idx >> 32;
   1031
   1032	if (WARN_ONCE(nid == MAX_NUMNODES,
   1033	"Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
   1034		nid = NUMA_NO_NODE;
   1035
   1036	for (; idx_a < type_a->cnt; idx_a++) {
   1037		struct memblock_region *m = &type_a->regions[idx_a];
   1038
   1039		phys_addr_t m_start = m->base;
   1040		phys_addr_t m_end = m->base + m->size;
   1041		int	    m_nid = memblock_get_region_node(m);
   1042
   1043		if (should_skip_region(type_a, m, nid, flags))
   1044			continue;
   1045
   1046		if (!type_b) {
   1047			if (out_start)
   1048				*out_start = m_start;
   1049			if (out_end)
   1050				*out_end = m_end;
   1051			if (out_nid)
   1052				*out_nid = m_nid;
   1053			idx_a++;
   1054			*idx = (u32)idx_a | (u64)idx_b << 32;
   1055			return;
   1056		}
   1057
   1058		/* scan areas before each reservation */
   1059		for (; idx_b < type_b->cnt + 1; idx_b++) {
   1060			struct memblock_region *r;
   1061			phys_addr_t r_start;
   1062			phys_addr_t r_end;
   1063
   1064			r = &type_b->regions[idx_b];
   1065			r_start = idx_b ? r[-1].base + r[-1].size : 0;
   1066			r_end = idx_b < type_b->cnt ?
   1067				r->base : PHYS_ADDR_MAX;
   1068
   1069			/*
   1070			 * if idx_b advanced past idx_a,
   1071			 * break out to advance idx_a
   1072			 */
   1073			if (r_start >= m_end)
   1074				break;
   1075			/* if the two regions intersect, we're done */
   1076			if (m_start < r_end) {
   1077				if (out_start)
   1078					*out_start =
   1079						max(m_start, r_start);
   1080				if (out_end)
   1081					*out_end = min(m_end, r_end);
   1082				if (out_nid)
   1083					*out_nid = m_nid;
   1084				/*
   1085				 * The region which ends first is
   1086				 * advanced for the next iteration.
   1087				 */
   1088				if (m_end <= r_end)
   1089					idx_a++;
   1090				else
   1091					idx_b++;
   1092				*idx = (u32)idx_a | (u64)idx_b << 32;
   1093				return;
   1094			}
   1095		}
   1096	}
   1097
   1098	/* signal end of iteration */
   1099	*idx = ULLONG_MAX;
   1100}
   1101
   1102/**
   1103 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
   1104 *
   1105 * @idx: pointer to u64 loop variable
   1106 * @nid: node selector, %NUMA_NO_NODE for all nodes
   1107 * @flags: pick from blocks based on memory attributes
   1108 * @type_a: pointer to memblock_type from where the range is taken
   1109 * @type_b: pointer to memblock_type which excludes memory from being taken
   1110 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
   1111 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
   1112 * @out_nid: ptr to int for nid of the range, can be %NULL
   1113 *
   1114 * Finds the next range from type_a which is not marked as unsuitable
   1115 * in type_b.
   1116 *
   1117 * Reverse of __next_mem_range().
   1118 */
   1119void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
   1120					  enum memblock_flags flags,
   1121					  struct memblock_type *type_a,
   1122					  struct memblock_type *type_b,
   1123					  phys_addr_t *out_start,
   1124					  phys_addr_t *out_end, int *out_nid)
   1125{
   1126	int idx_a = *idx & 0xffffffff;
   1127	int idx_b = *idx >> 32;
   1128
   1129	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
   1130		nid = NUMA_NO_NODE;
   1131
   1132	if (*idx == (u64)ULLONG_MAX) {
   1133		idx_a = type_a->cnt - 1;
   1134		if (type_b != NULL)
   1135			idx_b = type_b->cnt;
   1136		else
   1137			idx_b = 0;
   1138	}
   1139
   1140	for (; idx_a >= 0; idx_a--) {
   1141		struct memblock_region *m = &type_a->regions[idx_a];
   1142
   1143		phys_addr_t m_start = m->base;
   1144		phys_addr_t m_end = m->base + m->size;
   1145		int m_nid = memblock_get_region_node(m);
   1146
   1147		if (should_skip_region(type_a, m, nid, flags))
   1148			continue;
   1149
   1150		if (!type_b) {
   1151			if (out_start)
   1152				*out_start = m_start;
   1153			if (out_end)
   1154				*out_end = m_end;
   1155			if (out_nid)
   1156				*out_nid = m_nid;
   1157			idx_a--;
   1158			*idx = (u32)idx_a | (u64)idx_b << 32;
   1159			return;
   1160		}
   1161
   1162		/* scan areas before each reservation */
   1163		for (; idx_b >= 0; idx_b--) {
   1164			struct memblock_region *r;
   1165			phys_addr_t r_start;
   1166			phys_addr_t r_end;
   1167
   1168			r = &type_b->regions[idx_b];
   1169			r_start = idx_b ? r[-1].base + r[-1].size : 0;
   1170			r_end = idx_b < type_b->cnt ?
   1171				r->base : PHYS_ADDR_MAX;
   1172			/*
   1173			 * if idx_b advanced past idx_a,
   1174			 * break out to advance idx_a
   1175			 */
   1176
   1177			if (r_end <= m_start)
   1178				break;
   1179			/* if the two regions intersect, we're done */
   1180			if (m_end > r_start) {
   1181				if (out_start)
   1182					*out_start = max(m_start, r_start);
   1183				if (out_end)
   1184					*out_end = min(m_end, r_end);
   1185				if (out_nid)
   1186					*out_nid = m_nid;
   1187				if (m_start >= r_start)
   1188					idx_a--;
   1189				else
   1190					idx_b--;
   1191				*idx = (u32)idx_a | (u64)idx_b << 32;
   1192				return;
   1193			}
   1194		}
   1195	}
   1196	/* signal end of iteration */
   1197	*idx = ULLONG_MAX;
   1198}
   1199
   1200/*
   1201 * Common iterator interface used to define for_each_mem_pfn_range().
   1202 */
   1203void __init_memblock __next_mem_pfn_range(int *idx, int nid,
   1204				unsigned long *out_start_pfn,
   1205				unsigned long *out_end_pfn, int *out_nid)
   1206{
   1207	struct memblock_type *type = &memblock.memory;
   1208	struct memblock_region *r;
   1209	int r_nid;
   1210
   1211	while (++*idx < type->cnt) {
   1212		r = &type->regions[*idx];
   1213		r_nid = memblock_get_region_node(r);
   1214
   1215		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
   1216			continue;
   1217		if (nid == MAX_NUMNODES || nid == r_nid)
   1218			break;
   1219	}
   1220	if (*idx >= type->cnt) {
   1221		*idx = -1;
   1222		return;
   1223	}
   1224
   1225	if (out_start_pfn)
   1226		*out_start_pfn = PFN_UP(r->base);
   1227	if (out_end_pfn)
   1228		*out_end_pfn = PFN_DOWN(r->base + r->size);
   1229	if (out_nid)
   1230		*out_nid = r_nid;
   1231}
   1232
   1233/**
   1234 * memblock_set_node - set node ID on memblock regions
   1235 * @base: base of area to set node ID for
   1236 * @size: size of area to set node ID for
   1237 * @type: memblock type to set node ID for
   1238 * @nid: node ID to set
   1239 *
   1240 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
   1241 * Regions which cross the area boundaries are split as necessary.
   1242 *
   1243 * Return:
   1244 * 0 on success, -errno on failure.
   1245 */
   1246int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
   1247				      struct memblock_type *type, int nid)
   1248{
   1249#ifdef CONFIG_NUMA
   1250	int start_rgn, end_rgn;
   1251	int i, ret;
   1252
   1253	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
   1254	if (ret)
   1255		return ret;
   1256
   1257	for (i = start_rgn; i < end_rgn; i++)
   1258		memblock_set_region_node(&type->regions[i], nid);
   1259
   1260	memblock_merge_regions(type);
   1261#endif
   1262	return 0;
   1263}
   1264
   1265#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
   1266/**
   1267 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
   1268 *
   1269 * @idx: pointer to u64 loop variable
   1270 * @zone: zone in which all of the memory blocks reside
   1271 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
   1272 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
   1273 *
   1274 * This function is meant to be a zone/pfn specific wrapper for the
   1275 * for_each_mem_range type iterators. Specifically they are used in the
   1276 * deferred memory init routines and as such we were duplicating much of
   1277 * this logic throughout the code. So instead of having it in multiple
   1278 * locations it seemed like it would make more sense to centralize this to
   1279 * one new iterator that does everything they need.
   1280 */
   1281void __init_memblock
   1282__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
   1283			     unsigned long *out_spfn, unsigned long *out_epfn)
   1284{
   1285	int zone_nid = zone_to_nid(zone);
   1286	phys_addr_t spa, epa;
   1287
   1288	__next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
   1289			 &memblock.memory, &memblock.reserved,
   1290			 &spa, &epa, NULL);
   1291
   1292	while (*idx != U64_MAX) {
   1293		unsigned long epfn = PFN_DOWN(epa);
   1294		unsigned long spfn = PFN_UP(spa);
   1295
   1296		/*
   1297		 * Verify the end is at least past the start of the zone and
   1298		 * that we have at least one PFN to initialize.
   1299		 */
   1300		if (zone->zone_start_pfn < epfn && spfn < epfn) {
   1301			/* if we went too far just stop searching */
   1302			if (zone_end_pfn(zone) <= spfn) {
   1303				*idx = U64_MAX;
   1304				break;
   1305			}
   1306
   1307			if (out_spfn)
   1308				*out_spfn = max(zone->zone_start_pfn, spfn);
   1309			if (out_epfn)
   1310				*out_epfn = min(zone_end_pfn(zone), epfn);
   1311
   1312			return;
   1313		}
   1314
   1315		__next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
   1316				 &memblock.memory, &memblock.reserved,
   1317				 &spa, &epa, NULL);
   1318	}
   1319
   1320	/* signal end of iteration */
   1321	if (out_spfn)
   1322		*out_spfn = ULONG_MAX;
   1323	if (out_epfn)
   1324		*out_epfn = 0;
   1325}
   1326
   1327#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
   1328
   1329/**
   1330 * memblock_alloc_range_nid - allocate boot memory block
   1331 * @size: size of memory block to be allocated in bytes
   1332 * @align: alignment of the region and block's size
   1333 * @start: the lower bound of the memory region to allocate (phys address)
   1334 * @end: the upper bound of the memory region to allocate (phys address)
   1335 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
   1336 * @exact_nid: control the allocation fall back to other nodes
   1337 *
   1338 * The allocation is performed from memory region limited by
   1339 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
   1340 *
   1341 * If the specified node can not hold the requested memory and @exact_nid
   1342 * is false, the allocation falls back to any node in the system.
   1343 *
   1344 * For systems with memory mirroring, the allocation is attempted first
   1345 * from the regions with mirroring enabled and then retried from any
   1346 * memory region.
   1347 *
   1348 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
   1349 * allocated boot memory block, so that it is never reported as leaks.
   1350 *
   1351 * Return:
   1352 * Physical address of allocated memory block on success, %0 on failure.
   1353 */
   1354phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
   1355					phys_addr_t align, phys_addr_t start,
   1356					phys_addr_t end, int nid,
   1357					bool exact_nid)
   1358{
   1359	enum memblock_flags flags = choose_memblock_flags();
   1360	phys_addr_t found;
   1361
   1362	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
   1363		nid = NUMA_NO_NODE;
   1364
   1365	if (!align) {
   1366		/* Can't use WARNs this early in boot on powerpc */
   1367		dump_stack();
   1368		align = SMP_CACHE_BYTES;
   1369	}
   1370
   1371again:
   1372	found = memblock_find_in_range_node(size, align, start, end, nid,
   1373					    flags);
   1374	if (found && !memblock_reserve(found, size))
   1375		goto done;
   1376
   1377	if (nid != NUMA_NO_NODE && !exact_nid) {
   1378		found = memblock_find_in_range_node(size, align, start,
   1379						    end, NUMA_NO_NODE,
   1380						    flags);
   1381		if (found && !memblock_reserve(found, size))
   1382			goto done;
   1383	}
   1384
   1385	if (flags & MEMBLOCK_MIRROR) {
   1386		flags &= ~MEMBLOCK_MIRROR;
   1387		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
   1388			&size);
   1389		goto again;
   1390	}
   1391
   1392	return 0;
   1393
   1394done:
   1395	/*
   1396	 * Skip kmemleak for those places like kasan_init() and
   1397	 * early_pgtable_alloc() due to high volume.
   1398	 */
   1399	if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
   1400		/*
   1401		 * The min_count is set to 0 so that memblock allocated
   1402		 * blocks are never reported as leaks. This is because many
   1403		 * of these blocks are only referred via the physical
   1404		 * address which is not looked up by kmemleak.
   1405		 */
   1406		kmemleak_alloc_phys(found, size, 0, 0);
   1407
   1408	return found;
   1409}
   1410
   1411/**
   1412 * memblock_phys_alloc_range - allocate a memory block inside specified range
   1413 * @size: size of memory block to be allocated in bytes
   1414 * @align: alignment of the region and block's size
   1415 * @start: the lower bound of the memory region to allocate (physical address)
   1416 * @end: the upper bound of the memory region to allocate (physical address)
   1417 *
   1418 * Allocate @size bytes in the between @start and @end.
   1419 *
   1420 * Return: physical address of the allocated memory block on success,
   1421 * %0 on failure.
   1422 */
   1423phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
   1424					     phys_addr_t align,
   1425					     phys_addr_t start,
   1426					     phys_addr_t end)
   1427{
   1428	memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
   1429		     __func__, (u64)size, (u64)align, &start, &end,
   1430		     (void *)_RET_IP_);
   1431	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
   1432					false);
   1433}
   1434
   1435/**
   1436 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
   1437 * @size: size of memory block to be allocated in bytes
   1438 * @align: alignment of the region and block's size
   1439 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
   1440 *
   1441 * Allocates memory block from the specified NUMA node. If the node
   1442 * has no available memory, attempts to allocated from any node in the
   1443 * system.
   1444 *
   1445 * Return: physical address of the allocated memory block on success,
   1446 * %0 on failure.
   1447 */
   1448phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
   1449{
   1450	return memblock_alloc_range_nid(size, align, 0,
   1451					MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
   1452}
   1453
   1454/**
   1455 * memblock_alloc_internal - allocate boot memory block
   1456 * @size: size of memory block to be allocated in bytes
   1457 * @align: alignment of the region and block's size
   1458 * @min_addr: the lower bound of the memory region to allocate (phys address)
   1459 * @max_addr: the upper bound of the memory region to allocate (phys address)
   1460 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
   1461 * @exact_nid: control the allocation fall back to other nodes
   1462 *
   1463 * Allocates memory block using memblock_alloc_range_nid() and
   1464 * converts the returned physical address to virtual.
   1465 *
   1466 * The @min_addr limit is dropped if it can not be satisfied and the allocation
   1467 * will fall back to memory below @min_addr. Other constraints, such
   1468 * as node and mirrored memory will be handled again in
   1469 * memblock_alloc_range_nid().
   1470 *
   1471 * Return:
   1472 * Virtual address of allocated memory block on success, NULL on failure.
   1473 */
   1474static void * __init memblock_alloc_internal(
   1475				phys_addr_t size, phys_addr_t align,
   1476				phys_addr_t min_addr, phys_addr_t max_addr,
   1477				int nid, bool exact_nid)
   1478{
   1479	phys_addr_t alloc;
   1480
   1481	/*
   1482	 * Detect any accidental use of these APIs after slab is ready, as at
   1483	 * this moment memblock may be deinitialized already and its
   1484	 * internal data may be destroyed (after execution of memblock_free_all)
   1485	 */
   1486	if (WARN_ON_ONCE(slab_is_available()))
   1487		return kzalloc_node(size, GFP_NOWAIT, nid);
   1488
   1489	if (max_addr > memblock.current_limit)
   1490		max_addr = memblock.current_limit;
   1491
   1492	alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
   1493					exact_nid);
   1494
   1495	/* retry allocation without lower limit */
   1496	if (!alloc && min_addr)
   1497		alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
   1498						exact_nid);
   1499
   1500	if (!alloc)
   1501		return NULL;
   1502
   1503	return phys_to_virt(alloc);
   1504}
   1505
   1506/**
   1507 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
   1508 * without zeroing memory
   1509 * @size: size of memory block to be allocated in bytes
   1510 * @align: alignment of the region and block's size
   1511 * @min_addr: the lower bound of the memory region from where the allocation
   1512 *	  is preferred (phys address)
   1513 * @max_addr: the upper bound of the memory region from where the allocation
   1514 *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
   1515 *	      allocate only from memory limited by memblock.current_limit value
   1516 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
   1517 *
   1518 * Public function, provides additional debug information (including caller
   1519 * info), if enabled. Does not zero allocated memory.
   1520 *
   1521 * Return:
   1522 * Virtual address of allocated memory block on success, NULL on failure.
   1523 */
   1524void * __init memblock_alloc_exact_nid_raw(
   1525			phys_addr_t size, phys_addr_t align,
   1526			phys_addr_t min_addr, phys_addr_t max_addr,
   1527			int nid)
   1528{
   1529	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
   1530		     __func__, (u64)size, (u64)align, nid, &min_addr,
   1531		     &max_addr, (void *)_RET_IP_);
   1532
   1533	return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
   1534				       true);
   1535}
   1536
   1537/**
   1538 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
   1539 * memory and without panicking
   1540 * @size: size of memory block to be allocated in bytes
   1541 * @align: alignment of the region and block's size
   1542 * @min_addr: the lower bound of the memory region from where the allocation
   1543 *	  is preferred (phys address)
   1544 * @max_addr: the upper bound of the memory region from where the allocation
   1545 *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
   1546 *	      allocate only from memory limited by memblock.current_limit value
   1547 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
   1548 *
   1549 * Public function, provides additional debug information (including caller
   1550 * info), if enabled. Does not zero allocated memory, does not panic if request
   1551 * cannot be satisfied.
   1552 *
   1553 * Return:
   1554 * Virtual address of allocated memory block on success, NULL on failure.
   1555 */
   1556void * __init memblock_alloc_try_nid_raw(
   1557			phys_addr_t size, phys_addr_t align,
   1558			phys_addr_t min_addr, phys_addr_t max_addr,
   1559			int nid)
   1560{
   1561	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
   1562		     __func__, (u64)size, (u64)align, nid, &min_addr,
   1563		     &max_addr, (void *)_RET_IP_);
   1564
   1565	return memblock_alloc_internal(size, align, min_addr, max_addr, nid,
   1566				       false);
   1567}
   1568
   1569/**
   1570 * memblock_alloc_try_nid - allocate boot memory block
   1571 * @size: size of memory block to be allocated in bytes
   1572 * @align: alignment of the region and block's size
   1573 * @min_addr: the lower bound of the memory region from where the allocation
   1574 *	  is preferred (phys address)
   1575 * @max_addr: the upper bound of the memory region from where the allocation
   1576 *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
   1577 *	      allocate only from memory limited by memblock.current_limit value
   1578 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
   1579 *
   1580 * Public function, provides additional debug information (including caller
   1581 * info), if enabled. This function zeroes the allocated memory.
   1582 *
   1583 * Return:
   1584 * Virtual address of allocated memory block on success, NULL on failure.
   1585 */
   1586void * __init memblock_alloc_try_nid(
   1587			phys_addr_t size, phys_addr_t align,
   1588			phys_addr_t min_addr, phys_addr_t max_addr,
   1589			int nid)
   1590{
   1591	void *ptr;
   1592
   1593	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
   1594		     __func__, (u64)size, (u64)align, nid, &min_addr,
   1595		     &max_addr, (void *)_RET_IP_);
   1596	ptr = memblock_alloc_internal(size, align,
   1597					   min_addr, max_addr, nid, false);
   1598	if (ptr)
   1599		memset(ptr, 0, size);
   1600
   1601	return ptr;
   1602}
   1603
   1604/**
   1605 * memblock_free_late - free pages directly to buddy allocator
   1606 * @base: phys starting address of the  boot memory block
   1607 * @size: size of the boot memory block in bytes
   1608 *
   1609 * This is only useful when the memblock allocator has already been torn
   1610 * down, but we are still initializing the system.  Pages are released directly
   1611 * to the buddy allocator.
   1612 */
   1613void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
   1614{
   1615	phys_addr_t cursor, end;
   1616
   1617	end = base + size - 1;
   1618	memblock_dbg("%s: [%pa-%pa] %pS\n",
   1619		     __func__, &base, &end, (void *)_RET_IP_);
   1620	kmemleak_free_part_phys(base, size);
   1621	cursor = PFN_UP(base);
   1622	end = PFN_DOWN(base + size);
   1623
   1624	for (; cursor < end; cursor++) {
   1625		memblock_free_pages(pfn_to_page(cursor), cursor, 0);
   1626		totalram_pages_inc();
   1627	}
   1628}
   1629
   1630/*
   1631 * Remaining API functions
   1632 */
   1633
   1634phys_addr_t __init_memblock memblock_phys_mem_size(void)
   1635{
   1636	return memblock.memory.total_size;
   1637}
   1638
   1639phys_addr_t __init_memblock memblock_reserved_size(void)
   1640{
   1641	return memblock.reserved.total_size;
   1642}
   1643
   1644/* lowest address */
   1645phys_addr_t __init_memblock memblock_start_of_DRAM(void)
   1646{
   1647	return memblock.memory.regions[0].base;
   1648}
   1649
   1650phys_addr_t __init_memblock memblock_end_of_DRAM(void)
   1651{
   1652	int idx = memblock.memory.cnt - 1;
   1653
   1654	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
   1655}
   1656
   1657static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
   1658{
   1659	phys_addr_t max_addr = PHYS_ADDR_MAX;
   1660	struct memblock_region *r;
   1661
   1662	/*
   1663	 * translate the memory @limit size into the max address within one of
   1664	 * the memory memblock regions, if the @limit exceeds the total size
   1665	 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
   1666	 */
   1667	for_each_mem_region(r) {
   1668		if (limit <= r->size) {
   1669			max_addr = r->base + limit;
   1670			break;
   1671		}
   1672		limit -= r->size;
   1673	}
   1674
   1675	return max_addr;
   1676}
   1677
   1678void __init memblock_enforce_memory_limit(phys_addr_t limit)
   1679{
   1680	phys_addr_t max_addr;
   1681
   1682	if (!limit)
   1683		return;
   1684
   1685	max_addr = __find_max_addr(limit);
   1686
   1687	/* @limit exceeds the total size of the memory, do nothing */
   1688	if (max_addr == PHYS_ADDR_MAX)
   1689		return;
   1690
   1691	/* truncate both memory and reserved regions */
   1692	memblock_remove_range(&memblock.memory, max_addr,
   1693			      PHYS_ADDR_MAX);
   1694	memblock_remove_range(&memblock.reserved, max_addr,
   1695			      PHYS_ADDR_MAX);
   1696}
   1697
   1698void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
   1699{
   1700	int start_rgn, end_rgn;
   1701	int i, ret;
   1702
   1703	if (!size)
   1704		return;
   1705
   1706	if (!memblock_memory->total_size) {
   1707		pr_warn("%s: No memory registered yet\n", __func__);
   1708		return;
   1709	}
   1710
   1711	ret = memblock_isolate_range(&memblock.memory, base, size,
   1712						&start_rgn, &end_rgn);
   1713	if (ret)
   1714		return;
   1715
   1716	/* remove all the MAP regions */
   1717	for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
   1718		if (!memblock_is_nomap(&memblock.memory.regions[i]))
   1719			memblock_remove_region(&memblock.memory, i);
   1720
   1721	for (i = start_rgn - 1; i >= 0; i--)
   1722		if (!memblock_is_nomap(&memblock.memory.regions[i]))
   1723			memblock_remove_region(&memblock.memory, i);
   1724
   1725	/* truncate the reserved regions */
   1726	memblock_remove_range(&memblock.reserved, 0, base);
   1727	memblock_remove_range(&memblock.reserved,
   1728			base + size, PHYS_ADDR_MAX);
   1729}
   1730
   1731void __init memblock_mem_limit_remove_map(phys_addr_t limit)
   1732{
   1733	phys_addr_t max_addr;
   1734
   1735	if (!limit)
   1736		return;
   1737
   1738	max_addr = __find_max_addr(limit);
   1739
   1740	/* @limit exceeds the total size of the memory, do nothing */
   1741	if (max_addr == PHYS_ADDR_MAX)
   1742		return;
   1743
   1744	memblock_cap_memory_range(0, max_addr);
   1745}
   1746
   1747static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
   1748{
   1749	unsigned int left = 0, right = type->cnt;
   1750
   1751	do {
   1752		unsigned int mid = (right + left) / 2;
   1753
   1754		if (addr < type->regions[mid].base)
   1755			right = mid;
   1756		else if (addr >= (type->regions[mid].base +
   1757				  type->regions[mid].size))
   1758			left = mid + 1;
   1759		else
   1760			return mid;
   1761	} while (left < right);
   1762	return -1;
   1763}
   1764
   1765bool __init_memblock memblock_is_reserved(phys_addr_t addr)
   1766{
   1767	return memblock_search(&memblock.reserved, addr) != -1;
   1768}
   1769
   1770bool __init_memblock memblock_is_memory(phys_addr_t addr)
   1771{
   1772	return memblock_search(&memblock.memory, addr) != -1;
   1773}
   1774
   1775bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
   1776{
   1777	int i = memblock_search(&memblock.memory, addr);
   1778
   1779	if (i == -1)
   1780		return false;
   1781	return !memblock_is_nomap(&memblock.memory.regions[i]);
   1782}
   1783
   1784int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
   1785			 unsigned long *start_pfn, unsigned long *end_pfn)
   1786{
   1787	struct memblock_type *type = &memblock.memory;
   1788	int mid = memblock_search(type, PFN_PHYS(pfn));
   1789
   1790	if (mid == -1)
   1791		return -1;
   1792
   1793	*start_pfn = PFN_DOWN(type->regions[mid].base);
   1794	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
   1795
   1796	return memblock_get_region_node(&type->regions[mid]);
   1797}
   1798
   1799/**
   1800 * memblock_is_region_memory - check if a region is a subset of memory
   1801 * @base: base of region to check
   1802 * @size: size of region to check
   1803 *
   1804 * Check if the region [@base, @base + @size) is a subset of a memory block.
   1805 *
   1806 * Return:
   1807 * 0 if false, non-zero if true
   1808 */
   1809bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
   1810{
   1811	int idx = memblock_search(&memblock.memory, base);
   1812	phys_addr_t end = base + memblock_cap_size(base, &size);
   1813
   1814	if (idx == -1)
   1815		return false;
   1816	return (memblock.memory.regions[idx].base +
   1817		 memblock.memory.regions[idx].size) >= end;
   1818}
   1819
   1820/**
   1821 * memblock_is_region_reserved - check if a region intersects reserved memory
   1822 * @base: base of region to check
   1823 * @size: size of region to check
   1824 *
   1825 * Check if the region [@base, @base + @size) intersects a reserved
   1826 * memory block.
   1827 *
   1828 * Return:
   1829 * True if they intersect, false if not.
   1830 */
   1831bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
   1832{
   1833	return memblock_overlaps_region(&memblock.reserved, base, size);
   1834}
   1835
   1836void __init_memblock memblock_trim_memory(phys_addr_t align)
   1837{
   1838	phys_addr_t start, end, orig_start, orig_end;
   1839	struct memblock_region *r;
   1840
   1841	for_each_mem_region(r) {
   1842		orig_start = r->base;
   1843		orig_end = r->base + r->size;
   1844		start = round_up(orig_start, align);
   1845		end = round_down(orig_end, align);
   1846
   1847		if (start == orig_start && end == orig_end)
   1848			continue;
   1849
   1850		if (start < end) {
   1851			r->base = start;
   1852			r->size = end - start;
   1853		} else {
   1854			memblock_remove_region(&memblock.memory,
   1855					       r - memblock.memory.regions);
   1856			r--;
   1857		}
   1858	}
   1859}
   1860
   1861void __init_memblock memblock_set_current_limit(phys_addr_t limit)
   1862{
   1863	memblock.current_limit = limit;
   1864}
   1865
   1866phys_addr_t __init_memblock memblock_get_current_limit(void)
   1867{
   1868	return memblock.current_limit;
   1869}
   1870
   1871static void __init_memblock memblock_dump(struct memblock_type *type)
   1872{
   1873	phys_addr_t base, end, size;
   1874	enum memblock_flags flags;
   1875	int idx;
   1876	struct memblock_region *rgn;
   1877
   1878	pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
   1879
   1880	for_each_memblock_type(idx, type, rgn) {
   1881		char nid_buf[32] = "";
   1882
   1883		base = rgn->base;
   1884		size = rgn->size;
   1885		end = base + size - 1;
   1886		flags = rgn->flags;
   1887#ifdef CONFIG_NUMA
   1888		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
   1889			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
   1890				 memblock_get_region_node(rgn));
   1891#endif
   1892		pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
   1893			type->name, idx, &base, &end, &size, nid_buf, flags);
   1894	}
   1895}
   1896
   1897static void __init_memblock __memblock_dump_all(void)
   1898{
   1899	pr_info("MEMBLOCK configuration:\n");
   1900	pr_info(" memory size = %pa reserved size = %pa\n",
   1901		&memblock.memory.total_size,
   1902		&memblock.reserved.total_size);
   1903
   1904	memblock_dump(&memblock.memory);
   1905	memblock_dump(&memblock.reserved);
   1906#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
   1907	memblock_dump(&physmem);
   1908#endif
   1909}
   1910
   1911void __init_memblock memblock_dump_all(void)
   1912{
   1913	if (memblock_debug)
   1914		__memblock_dump_all();
   1915}
   1916
   1917void __init memblock_allow_resize(void)
   1918{
   1919	memblock_can_resize = 1;
   1920}
   1921
   1922static int __init early_memblock(char *p)
   1923{
   1924	if (p && strstr(p, "debug"))
   1925		memblock_debug = 1;
   1926	return 0;
   1927}
   1928early_param("memblock", early_memblock);
   1929
   1930static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
   1931{
   1932	struct page *start_pg, *end_pg;
   1933	phys_addr_t pg, pgend;
   1934
   1935	/*
   1936	 * Convert start_pfn/end_pfn to a struct page pointer.
   1937	 */
   1938	start_pg = pfn_to_page(start_pfn - 1) + 1;
   1939	end_pg = pfn_to_page(end_pfn - 1) + 1;
   1940
   1941	/*
   1942	 * Convert to physical addresses, and round start upwards and end
   1943	 * downwards.
   1944	 */
   1945	pg = PAGE_ALIGN(__pa(start_pg));
   1946	pgend = __pa(end_pg) & PAGE_MASK;
   1947
   1948	/*
   1949	 * If there are free pages between these, free the section of the
   1950	 * memmap array.
   1951	 */
   1952	if (pg < pgend)
   1953		memblock_phys_free(pg, pgend - pg);
   1954}
   1955
   1956/*
   1957 * The mem_map array can get very big.  Free the unused area of the memory map.
   1958 */
   1959static void __init free_unused_memmap(void)
   1960{
   1961	unsigned long start, end, prev_end = 0;
   1962	int i;
   1963
   1964	if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
   1965	    IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
   1966		return;
   1967
   1968	/*
   1969	 * This relies on each bank being in address order.
   1970	 * The banks are sorted previously in bootmem_init().
   1971	 */
   1972	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
   1973#ifdef CONFIG_SPARSEMEM
   1974		/*
   1975		 * Take care not to free memmap entries that don't exist
   1976		 * due to SPARSEMEM sections which aren't present.
   1977		 */
   1978		start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
   1979#endif
   1980		/*
   1981		 * Align down here since many operations in VM subsystem
   1982		 * presume that there are no holes in the memory map inside
   1983		 * a pageblock
   1984		 */
   1985		start = round_down(start, pageblock_nr_pages);
   1986
   1987		/*
   1988		 * If we had a previous bank, and there is a space
   1989		 * between the current bank and the previous, free it.
   1990		 */
   1991		if (prev_end && prev_end < start)
   1992			free_memmap(prev_end, start);
   1993
   1994		/*
   1995		 * Align up here since many operations in VM subsystem
   1996		 * presume that there are no holes in the memory map inside
   1997		 * a pageblock
   1998		 */
   1999		prev_end = ALIGN(end, pageblock_nr_pages);
   2000	}
   2001
   2002#ifdef CONFIG_SPARSEMEM
   2003	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
   2004		prev_end = ALIGN(end, pageblock_nr_pages);
   2005		free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
   2006	}
   2007#endif
   2008}
   2009
   2010static void __init __free_pages_memory(unsigned long start, unsigned long end)
   2011{
   2012	int order;
   2013
   2014	while (start < end) {
   2015		order = min(MAX_ORDER - 1UL, __ffs(start));
   2016
   2017		while (start + (1UL << order) > end)
   2018			order--;
   2019
   2020		memblock_free_pages(pfn_to_page(start), start, order);
   2021
   2022		start += (1UL << order);
   2023	}
   2024}
   2025
   2026static unsigned long __init __free_memory_core(phys_addr_t start,
   2027				 phys_addr_t end)
   2028{
   2029	unsigned long start_pfn = PFN_UP(start);
   2030	unsigned long end_pfn = min_t(unsigned long,
   2031				      PFN_DOWN(end), max_low_pfn);
   2032
   2033	if (start_pfn >= end_pfn)
   2034		return 0;
   2035
   2036	__free_pages_memory(start_pfn, end_pfn);
   2037
   2038	return end_pfn - start_pfn;
   2039}
   2040
   2041static void __init memmap_init_reserved_pages(void)
   2042{
   2043	struct memblock_region *region;
   2044	phys_addr_t start, end;
   2045	u64 i;
   2046
   2047	/* initialize struct pages for the reserved regions */
   2048	for_each_reserved_mem_range(i, &start, &end)
   2049		reserve_bootmem_region(start, end);
   2050
   2051	/* and also treat struct pages for the NOMAP regions as PageReserved */
   2052	for_each_mem_region(region) {
   2053		if (memblock_is_nomap(region)) {
   2054			start = region->base;
   2055			end = start + region->size;
   2056			reserve_bootmem_region(start, end);
   2057		}
   2058	}
   2059}
   2060
   2061static unsigned long __init free_low_memory_core_early(void)
   2062{
   2063	unsigned long count = 0;
   2064	phys_addr_t start, end;
   2065	u64 i;
   2066
   2067	memblock_clear_hotplug(0, -1);
   2068
   2069	memmap_init_reserved_pages();
   2070
   2071	/*
   2072	 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
   2073	 *  because in some case like Node0 doesn't have RAM installed
   2074	 *  low ram will be on Node1
   2075	 */
   2076	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
   2077				NULL)
   2078		count += __free_memory_core(start, end);
   2079
   2080	return count;
   2081}
   2082
   2083static int reset_managed_pages_done __initdata;
   2084
   2085void reset_node_managed_pages(pg_data_t *pgdat)
   2086{
   2087	struct zone *z;
   2088
   2089	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
   2090		atomic_long_set(&z->managed_pages, 0);
   2091}
   2092
   2093void __init reset_all_zones_managed_pages(void)
   2094{
   2095	struct pglist_data *pgdat;
   2096
   2097	if (reset_managed_pages_done)
   2098		return;
   2099
   2100	for_each_online_pgdat(pgdat)
   2101		reset_node_managed_pages(pgdat);
   2102
   2103	reset_managed_pages_done = 1;
   2104}
   2105
   2106/**
   2107 * memblock_free_all - release free pages to the buddy allocator
   2108 */
   2109void __init memblock_free_all(void)
   2110{
   2111	unsigned long pages;
   2112
   2113	free_unused_memmap();
   2114	reset_all_zones_managed_pages();
   2115
   2116	pages = free_low_memory_core_early();
   2117	totalram_pages_add(pages);
   2118}
   2119
   2120#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
   2121
   2122static int memblock_debug_show(struct seq_file *m, void *private)
   2123{
   2124	struct memblock_type *type = m->private;
   2125	struct memblock_region *reg;
   2126	int i;
   2127	phys_addr_t end;
   2128
   2129	for (i = 0; i < type->cnt; i++) {
   2130		reg = &type->regions[i];
   2131		end = reg->base + reg->size - 1;
   2132
   2133		seq_printf(m, "%4d: ", i);
   2134		seq_printf(m, "%pa..%pa\n", &reg->base, &end);
   2135	}
   2136	return 0;
   2137}
   2138DEFINE_SHOW_ATTRIBUTE(memblock_debug);
   2139
   2140static int __init memblock_init_debugfs(void)
   2141{
   2142	struct dentry *root = debugfs_create_dir("memblock", NULL);
   2143
   2144	debugfs_create_file("memory", 0444, root,
   2145			    &memblock.memory, &memblock_debug_fops);
   2146	debugfs_create_file("reserved", 0444, root,
   2147			    &memblock.reserved, &memblock_debug_fops);
   2148#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
   2149	debugfs_create_file("physmem", 0444, root, &physmem,
   2150			    &memblock_debug_fops);
   2151#endif
   2152
   2153	return 0;
   2154}
   2155__initcall(memblock_init_debugfs);
   2156
   2157#endif /* CONFIG_DEBUG_FS */