cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

init.c (12014B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  linux/arch/arm/mm/init.c
      4 *
      5 *  Copyright (C) 1995-2005 Russell King
      6 */
      7#include <linux/kernel.h>
      8#include <linux/errno.h>
      9#include <linux/swap.h>
     10#include <linux/init.h>
     11#include <linux/mman.h>
     12#include <linux/sched/signal.h>
     13#include <linux/sched/task.h>
     14#include <linux/export.h>
     15#include <linux/nodemask.h>
     16#include <linux/initrd.h>
     17#include <linux/of_fdt.h>
     18#include <linux/highmem.h>
     19#include <linux/gfp.h>
     20#include <linux/memblock.h>
     21#include <linux/dma-map-ops.h>
     22#include <linux/sizes.h>
     23#include <linux/stop_machine.h>
     24#include <linux/swiotlb.h>
     25
     26#include <asm/cp15.h>
     27#include <asm/mach-types.h>
     28#include <asm/memblock.h>
     29#include <asm/memory.h>
     30#include <asm/prom.h>
     31#include <asm/sections.h>
     32#include <asm/setup.h>
     33#include <asm/set_memory.h>
     34#include <asm/system_info.h>
     35#include <asm/tlb.h>
     36#include <asm/fixmap.h>
     37#include <asm/ptdump.h>
     38
     39#include <asm/mach/arch.h>
     40#include <asm/mach/map.h>
     41
     42#include "mm.h"
     43
     44#ifdef CONFIG_CPU_CP15_MMU
     45unsigned long __init __clear_cr(unsigned long mask)
     46{
     47	cr_alignment = cr_alignment & ~mask;
     48	return cr_alignment;
     49}
     50#endif
     51
     52#ifdef CONFIG_BLK_DEV_INITRD
     53static int __init parse_tag_initrd(const struct tag *tag)
     54{
     55	pr_warn("ATAG_INITRD is deprecated; "
     56		"please update your bootloader.\n");
     57	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
     58	phys_initrd_size = tag->u.initrd.size;
     59	return 0;
     60}
     61
     62__tagtable(ATAG_INITRD, parse_tag_initrd);
     63
     64static int __init parse_tag_initrd2(const struct tag *tag)
     65{
     66	phys_initrd_start = tag->u.initrd.start;
     67	phys_initrd_size = tag->u.initrd.size;
     68	return 0;
     69}
     70
     71__tagtable(ATAG_INITRD2, parse_tag_initrd2);
     72#endif
     73
     74static void __init find_limits(unsigned long *min, unsigned long *max_low,
     75			       unsigned long *max_high)
     76{
     77	*max_low = PFN_DOWN(memblock_get_current_limit());
     78	*min = PFN_UP(memblock_start_of_DRAM());
     79	*max_high = PFN_DOWN(memblock_end_of_DRAM());
     80}
     81
     82#ifdef CONFIG_ZONE_DMA
     83
     84phys_addr_t arm_dma_zone_size __read_mostly;
     85EXPORT_SYMBOL(arm_dma_zone_size);
     86
     87/*
     88 * The DMA mask corresponding to the maximum bus address allocatable
     89 * using GFP_DMA.  The default here places no restriction on DMA
     90 * allocations.  This must be the smallest DMA mask in the system,
     91 * so a successful GFP_DMA allocation will always satisfy this.
     92 */
     93phys_addr_t arm_dma_limit;
     94unsigned long arm_dma_pfn_limit;
     95#endif
     96
     97void __init setup_dma_zone(const struct machine_desc *mdesc)
     98{
     99#ifdef CONFIG_ZONE_DMA
    100	if (mdesc->dma_zone_size) {
    101		arm_dma_zone_size = mdesc->dma_zone_size;
    102		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
    103	} else
    104		arm_dma_limit = 0xffffffff;
    105	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
    106#endif
    107}
    108
    109static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
    110	unsigned long max_high)
    111{
    112	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
    113
    114#ifdef CONFIG_ZONE_DMA
    115	max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
    116#endif
    117	max_zone_pfn[ZONE_NORMAL] = max_low;
    118#ifdef CONFIG_HIGHMEM
    119	max_zone_pfn[ZONE_HIGHMEM] = max_high;
    120#endif
    121	free_area_init(max_zone_pfn);
    122}
    123
    124#ifdef CONFIG_HAVE_ARCH_PFN_VALID
    125int pfn_valid(unsigned long pfn)
    126{
    127	phys_addr_t addr = __pfn_to_phys(pfn);
    128	unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
    129
    130	if (__phys_to_pfn(addr) != pfn)
    131		return 0;
    132
    133	/*
    134	 * If address less than pageblock_size bytes away from a present
    135	 * memory chunk there still will be a memory map entry for it
    136	 * because we round freed memory map to the pageblock boundaries.
    137	 */
    138	if (memblock_overlaps_region(&memblock.memory,
    139				     ALIGN_DOWN(addr, pageblock_size),
    140				     pageblock_size))
    141		return 1;
    142
    143	return 0;
    144}
    145EXPORT_SYMBOL(pfn_valid);
    146#endif
    147
    148static bool arm_memblock_steal_permitted = true;
    149
    150phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
    151{
    152	phys_addr_t phys;
    153
    154	BUG_ON(!arm_memblock_steal_permitted);
    155
    156	phys = memblock_phys_alloc(size, align);
    157	if (!phys)
    158		panic("Failed to steal %pa bytes at %pS\n",
    159		      &size, (void *)_RET_IP_);
    160
    161	memblock_phys_free(phys, size);
    162	memblock_remove(phys, size);
    163
    164	return phys;
    165}
    166
    167#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
    168void check_cpu_icache_size(int cpuid)
    169{
    170	u32 size, ctr;
    171
    172	asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
    173
    174	size = 1 << ((ctr & 0xf) + 2);
    175	if (cpuid != 0 && icache_size != size)
    176		pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
    177			cpuid);
    178	if (icache_size > size)
    179		icache_size = size;
    180}
    181#endif
    182
    183void __init arm_memblock_init(const struct machine_desc *mdesc)
    184{
    185	/* Register the kernel text, kernel data and initrd with memblock. */
    186	memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
    187
    188	reserve_initrd_mem();
    189
    190	arm_mm_memblock_reserve();
    191
    192	/* reserve any platform specific memblock areas */
    193	if (mdesc->reserve)
    194		mdesc->reserve();
    195
    196	early_init_fdt_scan_reserved_mem();
    197
    198	/* reserve memory for DMA contiguous allocations */
    199	dma_contiguous_reserve(arm_dma_limit);
    200
    201	arm_memblock_steal_permitted = false;
    202	memblock_dump_all();
    203}
    204
    205void __init bootmem_init(void)
    206{
    207	memblock_allow_resize();
    208
    209	find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
    210
    211	early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
    212		      (phys_addr_t)max_low_pfn << PAGE_SHIFT);
    213
    214	/*
    215	 * sparse_init() tries to allocate memory from memblock, so must be
    216	 * done after the fixed reservations
    217	 */
    218	sparse_init();
    219
    220	/*
    221	 * Now free the memory - free_area_init needs
    222	 * the sparse mem_map arrays initialized by sparse_init()
    223	 * for memmap_init_zone(), otherwise all PFNs are invalid.
    224	 */
    225	zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
    226}
    227
    228/*
    229 * Poison init memory with an undefined instruction (ARM) or a branch to an
    230 * undefined instruction (Thumb).
    231 */
    232static inline void poison_init_mem(void *s, size_t count)
    233{
    234	u32 *p = (u32 *)s;
    235	for (; count != 0; count -= 4)
    236		*p++ = 0xe7fddef0;
    237}
    238
    239static void __init free_highpages(void)
    240{
    241#ifdef CONFIG_HIGHMEM
    242	unsigned long max_low = max_low_pfn;
    243	phys_addr_t range_start, range_end;
    244	u64 i;
    245
    246	/* set highmem page free */
    247	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
    248				&range_start, &range_end, NULL) {
    249		unsigned long start = PFN_UP(range_start);
    250		unsigned long end = PFN_DOWN(range_end);
    251
    252		/* Ignore complete lowmem entries */
    253		if (end <= max_low)
    254			continue;
    255
    256		/* Truncate partial highmem entries */
    257		if (start < max_low)
    258			start = max_low;
    259
    260		for (; start < end; start++)
    261			free_highmem_page(pfn_to_page(start));
    262	}
    263#endif
    264}
    265
    266/*
    267 * mem_init() marks the free areas in the mem_map and tells us how much
    268 * memory is free.  This is done after various parts of the system have
    269 * claimed their memory after the kernel image.
    270 */
    271void __init mem_init(void)
    272{
    273#ifdef CONFIG_ARM_LPAE
    274	swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE);
    275#endif
    276
    277	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
    278
    279	/* this will put all unused low memory onto the freelists */
    280	memblock_free_all();
    281
    282#ifdef CONFIG_SA1111
    283	/* now that our DMA memory is actually so designated, we can free it */
    284	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
    285#endif
    286
    287	free_highpages();
    288
    289	/*
    290	 * Check boundaries twice: Some fundamental inconsistencies can
    291	 * be detected at build time already.
    292	 */
    293#ifdef CONFIG_MMU
    294	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
    295	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
    296#endif
    297
    298#ifdef CONFIG_HIGHMEM
    299	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
    300	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
    301#endif
    302}
    303
    304#ifdef CONFIG_STRICT_KERNEL_RWX
    305struct section_perm {
    306	const char *name;
    307	unsigned long start;
    308	unsigned long end;
    309	pmdval_t mask;
    310	pmdval_t prot;
    311	pmdval_t clear;
    312};
    313
    314/* First section-aligned location at or after __start_rodata. */
    315extern char __start_rodata_section_aligned[];
    316
    317static struct section_perm nx_perms[] = {
    318	/* Make pages tables, etc before _stext RW (set NX). */
    319	{
    320		.name	= "pre-text NX",
    321		.start	= PAGE_OFFSET,
    322		.end	= (unsigned long)_stext,
    323		.mask	= ~PMD_SECT_XN,
    324		.prot	= PMD_SECT_XN,
    325	},
    326	/* Make init RW (set NX). */
    327	{
    328		.name	= "init NX",
    329		.start	= (unsigned long)__init_begin,
    330		.end	= (unsigned long)_sdata,
    331		.mask	= ~PMD_SECT_XN,
    332		.prot	= PMD_SECT_XN,
    333	},
    334	/* Make rodata NX (set RO in ro_perms below). */
    335	{
    336		.name	= "rodata NX",
    337		.start  = (unsigned long)__start_rodata_section_aligned,
    338		.end    = (unsigned long)__init_begin,
    339		.mask   = ~PMD_SECT_XN,
    340		.prot   = PMD_SECT_XN,
    341	},
    342};
    343
    344static struct section_perm ro_perms[] = {
    345	/* Make kernel code and rodata RX (set RO). */
    346	{
    347		.name	= "text/rodata RO",
    348		.start  = (unsigned long)_stext,
    349		.end    = (unsigned long)__init_begin,
    350#ifdef CONFIG_ARM_LPAE
    351		.mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
    352		.prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
    353#else
    354		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
    355		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
    356		.clear  = PMD_SECT_AP_WRITE,
    357#endif
    358	},
    359};
    360
    361/*
    362 * Updates section permissions only for the current mm (sections are
    363 * copied into each mm). During startup, this is the init_mm. Is only
    364 * safe to be called with preemption disabled, as under stop_machine().
    365 */
    366static inline void section_update(unsigned long addr, pmdval_t mask,
    367				  pmdval_t prot, struct mm_struct *mm)
    368{
    369	pmd_t *pmd;
    370
    371	pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
    372
    373#ifdef CONFIG_ARM_LPAE
    374	pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
    375#else
    376	if (addr & SECTION_SIZE)
    377		pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
    378	else
    379		pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
    380#endif
    381	flush_pmd_entry(pmd);
    382	local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
    383}
    384
    385/* Make sure extended page tables are in use. */
    386static inline bool arch_has_strict_perms(void)
    387{
    388	if (cpu_architecture() < CPU_ARCH_ARMv6)
    389		return false;
    390
    391	return !!(get_cr() & CR_XP);
    392}
    393
    394static void set_section_perms(struct section_perm *perms, int n, bool set,
    395			      struct mm_struct *mm)
    396{
    397	size_t i;
    398	unsigned long addr;
    399
    400	if (!arch_has_strict_perms())
    401		return;
    402
    403	for (i = 0; i < n; i++) {
    404		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
    405		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
    406			pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
    407				perms[i].name, perms[i].start, perms[i].end,
    408				SECTION_SIZE);
    409			continue;
    410		}
    411
    412		for (addr = perms[i].start;
    413		     addr < perms[i].end;
    414		     addr += SECTION_SIZE)
    415			section_update(addr, perms[i].mask,
    416				set ? perms[i].prot : perms[i].clear, mm);
    417	}
    418
    419}
    420
    421/**
    422 * update_sections_early intended to be called only through stop_machine
    423 * framework and executed by only one CPU while all other CPUs will spin and
    424 * wait, so no locking is required in this function.
    425 */
    426static void update_sections_early(struct section_perm perms[], int n)
    427{
    428	struct task_struct *t, *s;
    429
    430	for_each_process(t) {
    431		if (t->flags & PF_KTHREAD)
    432			continue;
    433		for_each_thread(t, s)
    434			if (s->mm)
    435				set_section_perms(perms, n, true, s->mm);
    436	}
    437	set_section_perms(perms, n, true, current->active_mm);
    438	set_section_perms(perms, n, true, &init_mm);
    439}
    440
    441static int __fix_kernmem_perms(void *unused)
    442{
    443	update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
    444	return 0;
    445}
    446
    447static void fix_kernmem_perms(void)
    448{
    449	stop_machine(__fix_kernmem_perms, NULL, NULL);
    450}
    451
    452static int __mark_rodata_ro(void *unused)
    453{
    454	update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
    455	return 0;
    456}
    457
    458void mark_rodata_ro(void)
    459{
    460	stop_machine(__mark_rodata_ro, NULL, NULL);
    461	debug_checkwx();
    462}
    463
    464#else
    465static inline void fix_kernmem_perms(void) { }
    466#endif /* CONFIG_STRICT_KERNEL_RWX */
    467
    468void free_initmem(void)
    469{
    470	fix_kernmem_perms();
    471
    472	poison_init_mem(__init_begin, __init_end - __init_begin);
    473	if (!machine_is_integrator() && !machine_is_cintegrator())
    474		free_initmem_default(-1);
    475}
    476
    477#ifdef CONFIG_BLK_DEV_INITRD
    478void free_initrd_mem(unsigned long start, unsigned long end)
    479{
    480	if (start == initrd_start)
    481		start = round_down(start, PAGE_SIZE);
    482	if (end == initrd_end)
    483		end = round_up(end, PAGE_SIZE);
    484
    485	poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
    486	free_reserved_area((void *)start, (void *)end, -1, "initrd");
    487}
    488#endif