cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

early_ioremap.c (6870B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Provide common bits of early_ioremap() support for architectures needing
      4 * temporary mappings during boot before ioremap() is available.
      5 *
      6 * This is mostly a direct copy of the x86 early_ioremap implementation.
      7 *
      8 * (C) Copyright 1995 1996, 2014 Linus Torvalds
      9 *
     10 */
     11#include <linux/kernel.h>
     12#include <linux/init.h>
     13#include <linux/io.h>
     14#include <linux/module.h>
     15#include <linux/slab.h>
     16#include <linux/mm.h>
     17#include <linux/vmalloc.h>
     18#include <asm/fixmap.h>
     19#include <asm/early_ioremap.h>
     20#include "internal.h"
     21
     22#ifdef CONFIG_MMU
     23static int early_ioremap_debug __initdata;
     24
     25static int __init early_ioremap_debug_setup(char *str)
     26{
     27	early_ioremap_debug = 1;
     28
     29	return 0;
     30}
     31early_param("early_ioremap_debug", early_ioremap_debug_setup);
     32
     33static int after_paging_init __initdata;
     34
     35pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr,
     36						    unsigned long size,
     37						    pgprot_t prot)
     38{
     39	return prot;
     40}
     41
     42void __init early_ioremap_reset(void)
     43{
     44	after_paging_init = 1;
     45}
     46
     47/*
     48 * Generally, ioremap() is available after paging_init() has been called.
     49 * Architectures wanting to allow early_ioremap after paging_init() can
     50 * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
     51 */
     52#ifndef __late_set_fixmap
     53static inline void __init __late_set_fixmap(enum fixed_addresses idx,
     54					    phys_addr_t phys, pgprot_t prot)
     55{
     56	BUG();
     57}
     58#endif
     59
     60#ifndef __late_clear_fixmap
     61static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
     62{
     63	BUG();
     64}
     65#endif
     66
     67static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
     68static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
     69static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
     70
     71void __init early_ioremap_setup(void)
     72{
     73	int i;
     74
     75	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
     76		if (WARN_ON(prev_map[i]))
     77			break;
     78
     79	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
     80		slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
     81}
     82
     83static int __init check_early_ioremap_leak(void)
     84{
     85	int count = 0;
     86	int i;
     87
     88	for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
     89		if (prev_map[i])
     90			count++;
     91
     92	if (WARN(count, KERN_WARNING
     93		 "Debug warning: early ioremap leak of %d areas detected.\n"
     94		 "please boot with early_ioremap_debug and report the dmesg.\n",
     95		 count))
     96		return 1;
     97	return 0;
     98}
     99late_initcall(check_early_ioremap_leak);
    100
    101static void __init __iomem *
    102__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
    103{
    104	unsigned long offset;
    105	resource_size_t last_addr;
    106	unsigned int nrpages;
    107	enum fixed_addresses idx;
    108	int i, slot;
    109
    110	WARN_ON(system_state >= SYSTEM_RUNNING);
    111
    112	slot = -1;
    113	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
    114		if (!prev_map[i]) {
    115			slot = i;
    116			break;
    117		}
    118	}
    119
    120	if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n",
    121		 __func__, &phys_addr, size))
    122		return NULL;
    123
    124	/* Don't allow wraparound or zero size */
    125	last_addr = phys_addr + size - 1;
    126	if (WARN_ON(!size || last_addr < phys_addr))
    127		return NULL;
    128
    129	prev_size[slot] = size;
    130	/*
    131	 * Mappings have to be page-aligned
    132	 */
    133	offset = offset_in_page(phys_addr);
    134	phys_addr &= PAGE_MASK;
    135	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
    136
    137	/*
    138	 * Mappings have to fit in the FIX_BTMAP area.
    139	 */
    140	nrpages = size >> PAGE_SHIFT;
    141	if (WARN_ON(nrpages > NR_FIX_BTMAPS))
    142		return NULL;
    143
    144	/*
    145	 * Ok, go for it..
    146	 */
    147	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
    148	while (nrpages > 0) {
    149		if (after_paging_init)
    150			__late_set_fixmap(idx, phys_addr, prot);
    151		else
    152			__early_set_fixmap(idx, phys_addr, prot);
    153		phys_addr += PAGE_SIZE;
    154		--idx;
    155		--nrpages;
    156	}
    157	WARN(early_ioremap_debug, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n",
    158	     __func__, &phys_addr, size, slot, offset, slot_virt[slot]);
    159
    160	prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
    161	return prev_map[slot];
    162}
    163
    164void __init early_iounmap(void __iomem *addr, unsigned long size)
    165{
    166	unsigned long virt_addr;
    167	unsigned long offset;
    168	unsigned int nrpages;
    169	enum fixed_addresses idx;
    170	int i, slot;
    171
    172	slot = -1;
    173	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
    174		if (prev_map[i] == addr) {
    175			slot = i;
    176			break;
    177		}
    178	}
    179
    180	if (WARN(slot < 0, "%s(%p, %08lx) not found slot\n",
    181		  __func__, addr, size))
    182		return;
    183
    184	if (WARN(prev_size[slot] != size,
    185		 "%s(%p, %08lx) [%d] size not consistent %08lx\n",
    186		  __func__, addr, size, slot, prev_size[slot]))
    187		return;
    188
    189	WARN(early_ioremap_debug, "%s(%p, %08lx) [%d]\n",
    190	      __func__, addr, size, slot);
    191
    192	virt_addr = (unsigned long)addr;
    193	if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
    194		return;
    195
    196	offset = offset_in_page(virt_addr);
    197	nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
    198
    199	idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
    200	while (nrpages > 0) {
    201		if (after_paging_init)
    202			__late_clear_fixmap(idx);
    203		else
    204			__early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
    205		--idx;
    206		--nrpages;
    207	}
    208	prev_map[slot] = NULL;
    209}
    210
    211/* Remap an IO device */
    212void __init __iomem *
    213early_ioremap(resource_size_t phys_addr, unsigned long size)
    214{
    215	return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
    216}
    217
    218/* Remap memory */
    219void __init *
    220early_memremap(resource_size_t phys_addr, unsigned long size)
    221{
    222	pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
    223						     FIXMAP_PAGE_NORMAL);
    224
    225	return (__force void *)__early_ioremap(phys_addr, size, prot);
    226}
    227#ifdef FIXMAP_PAGE_RO
    228void __init *
    229early_memremap_ro(resource_size_t phys_addr, unsigned long size)
    230{
    231	pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
    232						     FIXMAP_PAGE_RO);
    233
    234	return (__force void *)__early_ioremap(phys_addr, size, prot);
    235}
    236#endif
    237
    238#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
    239void __init *
    240early_memremap_prot(resource_size_t phys_addr, unsigned long size,
    241		    unsigned long prot_val)
    242{
    243	return (__force void *)__early_ioremap(phys_addr, size,
    244					       __pgprot(prot_val));
    245}
    246#endif
    247
    248#define MAX_MAP_CHUNK	(NR_FIX_BTMAPS << PAGE_SHIFT)
    249
    250void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
    251{
    252	unsigned long slop, clen;
    253	char *p;
    254
    255	while (size) {
    256		slop = offset_in_page(src);
    257		clen = size;
    258		if (clen > MAX_MAP_CHUNK - slop)
    259			clen = MAX_MAP_CHUNK - slop;
    260		p = early_memremap(src & PAGE_MASK, clen + slop);
    261		memcpy(dest, p + slop, clen);
    262		early_memunmap(p, clen + slop);
    263		dest += clen;
    264		src += clen;
    265		size -= clen;
    266	}
    267}
    268
    269#else /* CONFIG_MMU */
    270
    271void __init __iomem *
    272early_ioremap(resource_size_t phys_addr, unsigned long size)
    273{
    274	return (__force void __iomem *)phys_addr;
    275}
    276
    277/* Remap memory */
    278void __init *
    279early_memremap(resource_size_t phys_addr, unsigned long size)
    280{
    281	return (void *)phys_addr;
    282}
    283void __init *
    284early_memremap_ro(resource_size_t phys_addr, unsigned long size)
    285{
    286	return (void *)phys_addr;
    287}
    288
    289void __init early_iounmap(void __iomem *addr, unsigned long size)
    290{
    291}
    292
    293#endif /* CONFIG_MMU */
    294
    295
    296void __init early_memunmap(void *addr, unsigned long size)
    297{
    298	early_iounmap((__force void __iomem *)addr, size);
    299}