cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bootmem_info.c (3390B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Bootmem core functions.
      4 *
      5 * Copyright (c) 2020, Bytedance.
      6 *
      7 *     Author: Muchun Song <songmuchun@bytedance.com>
      8 *
      9 */
     10#include <linux/mm.h>
     11#include <linux/compiler.h>
     12#include <linux/memblock.h>
     13#include <linux/bootmem_info.h>
     14#include <linux/memory_hotplug.h>
     15
     16void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
     17{
     18	page->index = type;
     19	SetPagePrivate(page);
     20	set_page_private(page, info);
     21	page_ref_inc(page);
     22}
     23
     24void put_page_bootmem(struct page *page)
     25{
     26	unsigned long type = page->index;
     27
     28	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
     29	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
     30
     31	if (page_ref_dec_return(page) == 1) {
     32		page->index = 0;
     33		ClearPagePrivate(page);
     34		set_page_private(page, 0);
     35		INIT_LIST_HEAD(&page->lru);
     36		free_reserved_page(page);
     37	}
     38}
     39
     40#ifndef CONFIG_SPARSEMEM_VMEMMAP
     41static void __init register_page_bootmem_info_section(unsigned long start_pfn)
     42{
     43	unsigned long mapsize, section_nr, i;
     44	struct mem_section *ms;
     45	struct page *page, *memmap;
     46	struct mem_section_usage *usage;
     47
     48	section_nr = pfn_to_section_nr(start_pfn);
     49	ms = __nr_to_section(section_nr);
     50
     51	/* Get section's memmap address */
     52	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
     53
     54	/*
     55	 * Get page for the memmap's phys address
     56	 * XXX: need more consideration for sparse_vmemmap...
     57	 */
     58	page = virt_to_page(memmap);
     59	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
     60	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
     61
     62	/* remember memmap's page */
     63	for (i = 0; i < mapsize; i++, page++)
     64		get_page_bootmem(section_nr, page, SECTION_INFO);
     65
     66	usage = ms->usage;
     67	page = virt_to_page(usage);
     68
     69	mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
     70
     71	for (i = 0; i < mapsize; i++, page++)
     72		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
     73
     74}
     75#else /* CONFIG_SPARSEMEM_VMEMMAP */
     76static void __init register_page_bootmem_info_section(unsigned long start_pfn)
     77{
     78	unsigned long mapsize, section_nr, i;
     79	struct mem_section *ms;
     80	struct page *page, *memmap;
     81	struct mem_section_usage *usage;
     82
     83	section_nr = pfn_to_section_nr(start_pfn);
     84	ms = __nr_to_section(section_nr);
     85
     86	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
     87
     88	register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
     89
     90	usage = ms->usage;
     91	page = virt_to_page(usage);
     92
     93	mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
     94
     95	for (i = 0; i < mapsize; i++, page++)
     96		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
     97}
     98#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
     99
    100void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
    101{
    102	unsigned long i, pfn, end_pfn, nr_pages;
    103	int node = pgdat->node_id;
    104	struct page *page;
    105
    106	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
    107	page = virt_to_page(pgdat);
    108
    109	for (i = 0; i < nr_pages; i++, page++)
    110		get_page_bootmem(node, page, NODE_INFO);
    111
    112	pfn = pgdat->node_start_pfn;
    113	end_pfn = pgdat_end_pfn(pgdat);
    114
    115	/* register section info */
    116	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
    117		/*
    118		 * Some platforms can assign the same pfn to multiple nodes - on
    119		 * node0 as well as nodeN.  To avoid registering a pfn against
    120		 * multiple nodes we check that this pfn does not already
    121		 * reside in some other nodes.
    122		 */
    123		if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
    124			register_page_bootmem_info_section(pfn);
    125	}
    126}