cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

init.c (4484B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
      4 */
      5#include <linux/init.h>
      6#include <linux/export.h>
      7#include <linux/signal.h>
      8#include <linux/sched.h>
      9#include <linux/smp.h>
     10#include <linux/kernel.h>
     11#include <linux/errno.h>
     12#include <linux/string.h>
     13#include <linux/types.h>
     14#include <linux/pagemap.h>
     15#include <linux/memblock.h>
     16#include <linux/memremap.h>
     17#include <linux/mm.h>
     18#include <linux/mman.h>
     19#include <linux/highmem.h>
     20#include <linux/swap.h>
     21#include <linux/proc_fs.h>
     22#include <linux/pfn.h>
     23#include <linux/hardirq.h>
     24#include <linux/gfp.h>
     25#include <linux/initrd.h>
     26#include <linux/mmzone.h>
     27
     28#include <asm/asm-offsets.h>
     29#include <asm/bootinfo.h>
     30#include <asm/cpu.h>
     31#include <asm/dma.h>
     32#include <asm/mmu_context.h>
     33#include <asm/sections.h>
     34#include <asm/pgtable.h>
     35#include <asm/pgalloc.h>
     36#include <asm/tlb.h>
     37
     38/*
     39 * We have up to 8 empty zeroed pages so we can map one of the right colour
     40 * when needed.	 Since page is never written to after the initialization we
     41 * don't have to care about aliases on other CPUs.
     42 */
     43unsigned long empty_zero_page, zero_page_mask;
     44EXPORT_SYMBOL_GPL(empty_zero_page);
     45EXPORT_SYMBOL(zero_page_mask);
     46
     47void setup_zero_pages(void)
     48{
     49	unsigned int order, i;
     50	struct page *page;
     51
     52	order = 0;
     53
     54	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
     55	if (!empty_zero_page)
     56		panic("Oh boy, that early out of memory?");
     57
     58	page = virt_to_page((void *)empty_zero_page);
     59	split_page(page, order);
     60	for (i = 0; i < (1 << order); i++, page++)
     61		mark_page_reserved(page);
     62
     63	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
     64}
     65
     66void copy_user_highpage(struct page *to, struct page *from,
     67	unsigned long vaddr, struct vm_area_struct *vma)
     68{
     69	void *vfrom, *vto;
     70
     71	vto = kmap_atomic(to);
     72	vfrom = kmap_atomic(from);
     73	copy_page(vto, vfrom);
     74	kunmap_atomic(vfrom);
     75	kunmap_atomic(vto);
     76	/* Make sure this page is cleared on other CPU's too before using it */
     77	smp_wmb();
     78}
     79
     80int __ref page_is_ram(unsigned long pfn)
     81{
     82	unsigned long addr = PFN_PHYS(pfn);
     83
     84	return memblock_is_memory(addr) && !memblock_is_reserved(addr);
     85}
     86
     87#ifndef CONFIG_NUMA
     88void __init paging_init(void)
     89{
     90	unsigned long max_zone_pfns[MAX_NR_ZONES];
     91
     92#ifdef CONFIG_ZONE_DMA
     93	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
     94#endif
     95#ifdef CONFIG_ZONE_DMA32
     96	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
     97#endif
     98	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
     99
    100	free_area_init(max_zone_pfns);
    101}
    102
    103void __init mem_init(void)
    104{
    105	max_mapnr = max_low_pfn;
    106	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
    107
    108	memblock_free_all();
    109	setup_zero_pages();	/* Setup zeroed pages.  */
    110}
    111#endif /* !CONFIG_NUMA */
    112
    113void __ref free_initmem(void)
    114{
    115	free_initmem_default(POISON_FREE_INITMEM);
    116}
    117
    118#ifdef CONFIG_MEMORY_HOTPLUG
    119int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
    120{
    121	unsigned long start_pfn = start >> PAGE_SHIFT;
    122	unsigned long nr_pages = size >> PAGE_SHIFT;
    123	int ret;
    124
    125	ret = __add_pages(nid, start_pfn, nr_pages, params);
    126
    127	if (ret)
    128		pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
    129				__func__,  ret);
    130
    131	return ret;
    132}
    133
    134#ifdef CONFIG_NUMA
    135int memory_add_physaddr_to_nid(u64 start)
    136{
    137	int nid;
    138
    139	nid = pa_to_nid(start);
    140	return nid;
    141}
    142EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
    143#endif
    144
    145#ifdef CONFIG_MEMORY_HOTREMOVE
    146void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
    147{
    148	unsigned long start_pfn = start >> PAGE_SHIFT;
    149	unsigned long nr_pages = size >> PAGE_SHIFT;
    150	struct page *page = pfn_to_page(start_pfn);
    151
    152	/* With altmap the first mapped page is offset from @start */
    153	if (altmap)
    154		page += vmem_altmap_offset(altmap);
    155	__remove_pages(start_pfn, nr_pages, altmap);
    156}
    157#endif
    158#endif
    159
    160/*
    161 * Align swapper_pg_dir in to 64K, allows its address to be loaded
    162 * with a single LUI instruction in the TLB handlers.  If we used
    163 * __aligned(64K), its size would get rounded up to the alignment
    164 * size, and waste space.  So we place it in its own section and align
    165 * it in the linker script.
    166 */
    167pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
    168
    169pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
    170#ifndef __PAGETABLE_PUD_FOLDED
    171pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
    172#endif
    173#ifndef __PAGETABLE_PMD_FOLDED
    174pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
    175EXPORT_SYMBOL_GPL(invalid_pmd_table);
    176#endif
    177pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
    178EXPORT_SYMBOL(invalid_pte_table);