cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ioremap.c (12845B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  linux/arch/arm/mm/ioremap.c
      4 *
      5 * Re-map IO memory to kernel address space so that we can access it.
      6 *
      7 * (C) Copyright 1995 1996 Linus Torvalds
      8 *
      9 * Hacked for ARM by Phil Blundell <philb@gnu.org>
     10 * Hacked to allow all architectures to build, and various cleanups
     11 * by Russell King
     12 *
     13 * This allows a driver to remap an arbitrary region of bus memory into
     14 * virtual space.  One should *only* use readl, writel, memcpy_toio and
     15 * so on with such remapped areas.
     16 *
     17 * Because the ARM only has a 32-bit address space we can't address the
     18 * whole of the (physical) PCI space at once.  PCI huge-mode addressing
     19 * allows us to circumvent this restriction by splitting PCI space into
     20 * two 2GB chunks and mapping only one at a time into processor memory.
     21 * We use MMU protection domains to trap any attempt to access the bank
     22 * that is not currently mapped.  (This isn't fully implemented yet.)
     23 */
     24#include <linux/module.h>
     25#include <linux/errno.h>
     26#include <linux/mm.h>
     27#include <linux/vmalloc.h>
     28#include <linux/io.h>
     29#include <linux/sizes.h>
     30#include <linux/memblock.h>
     31
     32#include <asm/cp15.h>
     33#include <asm/cputype.h>
     34#include <asm/cacheflush.h>
     35#include <asm/early_ioremap.h>
     36#include <asm/mmu_context.h>
     37#include <asm/pgalloc.h>
     38#include <asm/tlbflush.h>
     39#include <asm/set_memory.h>
     40#include <asm/system_info.h>
     41
     42#include <asm/mach/map.h>
     43#include <asm/mach/pci.h>
     44#include "mm.h"
     45
     46
     47LIST_HEAD(static_vmlist);
     48
     49static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
     50			size_t size, unsigned int mtype)
     51{
     52	struct static_vm *svm;
     53	struct vm_struct *vm;
     54
     55	list_for_each_entry(svm, &static_vmlist, list) {
     56		vm = &svm->vm;
     57		if (!(vm->flags & VM_ARM_STATIC_MAPPING))
     58			continue;
     59		if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
     60			continue;
     61
     62		if (vm->phys_addr > paddr ||
     63			paddr + size - 1 > vm->phys_addr + vm->size - 1)
     64			continue;
     65
     66		return svm;
     67	}
     68
     69	return NULL;
     70}
     71
     72struct static_vm *find_static_vm_vaddr(void *vaddr)
     73{
     74	struct static_vm *svm;
     75	struct vm_struct *vm;
     76
     77	list_for_each_entry(svm, &static_vmlist, list) {
     78		vm = &svm->vm;
     79
     80		/* static_vmlist is ascending order */
     81		if (vm->addr > vaddr)
     82			break;
     83
     84		if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
     85			return svm;
     86	}
     87
     88	return NULL;
     89}
     90
     91void __init add_static_vm_early(struct static_vm *svm)
     92{
     93	struct static_vm *curr_svm;
     94	struct vm_struct *vm;
     95	void *vaddr;
     96
     97	vm = &svm->vm;
     98	vm_area_add_early(vm);
     99	vaddr = vm->addr;
    100
    101	list_for_each_entry(curr_svm, &static_vmlist, list) {
    102		vm = &curr_svm->vm;
    103
    104		if (vm->addr > vaddr)
    105			break;
    106	}
    107	list_add_tail(&svm->list, &curr_svm->list);
    108}
    109
    110int ioremap_page(unsigned long virt, unsigned long phys,
    111		 const struct mem_type *mtype)
    112{
    113	return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
    114				  __pgprot(mtype->prot_pte));
    115}
    116EXPORT_SYMBOL(ioremap_page);
    117
    118void __check_vmalloc_seq(struct mm_struct *mm)
    119{
    120	int seq;
    121
    122	do {
    123		seq = atomic_read(&init_mm.context.vmalloc_seq);
    124		memcpy(pgd_offset(mm, VMALLOC_START),
    125		       pgd_offset_k(VMALLOC_START),
    126		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
    127					pgd_index(VMALLOC_START)));
    128		/*
    129		 * Use a store-release so that other CPUs that observe the
    130		 * counter's new value are guaranteed to see the results of the
    131		 * memcpy as well.
    132		 */
    133		atomic_set_release(&mm->context.vmalloc_seq, seq);
    134	} while (seq != atomic_read(&init_mm.context.vmalloc_seq));
    135}
    136
    137#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
    138/*
    139 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
    140 * the other CPUs will not see this change until their next context switch.
    141 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
    142 * which requires the new ioremap'd region to be referenced, the CPU will
    143 * reference the _old_ region.
    144 *
    145 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
    146 * mask the size back to 1MB aligned or we will overflow in the loop below.
    147 */
    148static void unmap_area_sections(unsigned long virt, unsigned long size)
    149{
    150	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
    151	pmd_t *pmdp = pmd_off_k(addr);
    152
    153	do {
    154		pmd_t pmd = *pmdp;
    155
    156		if (!pmd_none(pmd)) {
    157			/*
    158			 * Clear the PMD from the page table, and
    159			 * increment the vmalloc sequence so others
    160			 * notice this change.
    161			 *
    162			 * Note: this is still racy on SMP machines.
    163			 */
    164			pmd_clear(pmdp);
    165			atomic_inc_return_release(&init_mm.context.vmalloc_seq);
    166
    167			/*
    168			 * Free the page table, if there was one.
    169			 */
    170			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
    171				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
    172		}
    173
    174		addr += PMD_SIZE;
    175		pmdp += 2;
    176	} while (addr < end);
    177
    178	/*
    179	 * Ensure that the active_mm is up to date - we want to
    180	 * catch any use-after-iounmap cases.
    181	 */
    182	check_vmalloc_seq(current->active_mm);
    183
    184	flush_tlb_kernel_range(virt, end);
    185}
    186
    187static int
    188remap_area_sections(unsigned long virt, unsigned long pfn,
    189		    size_t size, const struct mem_type *type)
    190{
    191	unsigned long addr = virt, end = virt + size;
    192	pmd_t *pmd = pmd_off_k(addr);
    193
    194	/*
    195	 * Remove and free any PTE-based mapping, and
    196	 * sync the current kernel mapping.
    197	 */
    198	unmap_area_sections(virt, size);
    199
    200	do {
    201		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
    202		pfn += SZ_1M >> PAGE_SHIFT;
    203		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
    204		pfn += SZ_1M >> PAGE_SHIFT;
    205		flush_pmd_entry(pmd);
    206
    207		addr += PMD_SIZE;
    208		pmd += 2;
    209	} while (addr < end);
    210
    211	return 0;
    212}
    213
    214static int
    215remap_area_supersections(unsigned long virt, unsigned long pfn,
    216			 size_t size, const struct mem_type *type)
    217{
    218	unsigned long addr = virt, end = virt + size;
    219	pmd_t *pmd = pmd_off_k(addr);
    220
    221	/*
    222	 * Remove and free any PTE-based mapping, and
    223	 * sync the current kernel mapping.
    224	 */
    225	unmap_area_sections(virt, size);
    226	do {
    227		unsigned long super_pmd_val, i;
    228
    229		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
    230				PMD_SECT_SUPER;
    231		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
    232
    233		for (i = 0; i < 8; i++) {
    234			pmd[0] = __pmd(super_pmd_val);
    235			pmd[1] = __pmd(super_pmd_val);
    236			flush_pmd_entry(pmd);
    237
    238			addr += PMD_SIZE;
    239			pmd += 2;
    240		}
    241
    242		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
    243	} while (addr < end);
    244
    245	return 0;
    246}
    247#endif
    248
    249static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
    250	unsigned long offset, size_t size, unsigned int mtype, void *caller)
    251{
    252	const struct mem_type *type;
    253	int err;
    254	unsigned long addr;
    255	struct vm_struct *area;
    256	phys_addr_t paddr = __pfn_to_phys(pfn);
    257
    258#ifndef CONFIG_ARM_LPAE
    259	/*
    260	 * High mappings must be supersection aligned
    261	 */
    262	if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
    263		return NULL;
    264#endif
    265
    266	type = get_mem_type(mtype);
    267	if (!type)
    268		return NULL;
    269
    270	/*
    271	 * Page align the mapping size, taking account of any offset.
    272	 */
    273	size = PAGE_ALIGN(offset + size);
    274
    275	/*
    276	 * Try to reuse one of the static mapping whenever possible.
    277	 */
    278	if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
    279		struct static_vm *svm;
    280
    281		svm = find_static_vm_paddr(paddr, size, mtype);
    282		if (svm) {
    283			addr = (unsigned long)svm->vm.addr;
    284			addr += paddr - svm->vm.phys_addr;
    285			return (void __iomem *) (offset + addr);
    286		}
    287	}
    288
    289	/*
    290	 * Don't allow RAM to be mapped with mismatched attributes - this
    291	 * causes problems with ARMv6+
    292	 */
    293	if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
    294		    mtype != MT_MEMORY_RW))
    295		return NULL;
    296
    297	area = get_vm_area_caller(size, VM_IOREMAP, caller);
    298 	if (!area)
    299 		return NULL;
    300 	addr = (unsigned long)area->addr;
    301	area->phys_addr = paddr;
    302
    303#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
    304	if (DOMAIN_IO == 0 &&
    305	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
    306	       cpu_is_xsc3()) && pfn >= 0x100000 &&
    307	       !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
    308		area->flags |= VM_ARM_SECTION_MAPPING;
    309		err = remap_area_supersections(addr, pfn, size, type);
    310	} else if (!((paddr | size | addr) & ~PMD_MASK)) {
    311		area->flags |= VM_ARM_SECTION_MAPPING;
    312		err = remap_area_sections(addr, pfn, size, type);
    313	} else
    314#endif
    315		err = ioremap_page_range(addr, addr + size, paddr,
    316					 __pgprot(type->prot_pte));
    317
    318	if (err) {
    319 		vunmap((void *)addr);
    320 		return NULL;
    321 	}
    322
    323	flush_cache_vmap(addr, addr + size);
    324	return (void __iomem *) (offset + addr);
    325}
    326
    327void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
    328	unsigned int mtype, void *caller)
    329{
    330	phys_addr_t last_addr;
    331 	unsigned long offset = phys_addr & ~PAGE_MASK;
    332 	unsigned long pfn = __phys_to_pfn(phys_addr);
    333
    334 	/*
    335 	 * Don't allow wraparound or zero size
    336	 */
    337	last_addr = phys_addr + size - 1;
    338	if (!size || last_addr < phys_addr)
    339		return NULL;
    340
    341	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
    342			caller);
    343}
    344
    345/*
    346 * Remap an arbitrary physical address space into the kernel virtual
    347 * address space. Needed when the kernel wants to access high addresses
    348 * directly.
    349 *
    350 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
    351 * have to convert them into an offset in a page-aligned mapping, but the
    352 * caller shouldn't need to know that small detail.
    353 */
    354void __iomem *
    355__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
    356		  unsigned int mtype)
    357{
    358	return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
    359					__builtin_return_address(0));
    360}
    361EXPORT_SYMBOL(__arm_ioremap_pfn);
    362
    363void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
    364				      unsigned int, void *) =
    365	__arm_ioremap_caller;
    366
    367void __iomem *ioremap(resource_size_t res_cookie, size_t size)
    368{
    369	return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
    370				   __builtin_return_address(0));
    371}
    372EXPORT_SYMBOL(ioremap);
    373
    374void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
    375{
    376	return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
    377				   __builtin_return_address(0));
    378}
    379EXPORT_SYMBOL(ioremap_cache);
    380
    381void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
    382{
    383	return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
    384				   __builtin_return_address(0));
    385}
    386EXPORT_SYMBOL(ioremap_wc);
    387
    388/*
    389 * Remap an arbitrary physical address space into the kernel virtual
    390 * address space as memory. Needed when the kernel wants to execute
    391 * code in external memory. This is needed for reprogramming source
    392 * clocks that would affect normal memory for example. Please see
    393 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
    394 */
    395void __iomem *
    396__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
    397{
    398	unsigned int mtype;
    399
    400	if (cached)
    401		mtype = MT_MEMORY_RWX;
    402	else
    403		mtype = MT_MEMORY_RWX_NONCACHED;
    404
    405	return __arm_ioremap_caller(phys_addr, size, mtype,
    406			__builtin_return_address(0));
    407}
    408
    409void __arm_iomem_set_ro(void __iomem *ptr, size_t size)
    410{
    411	set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE);
    412}
    413
    414void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
    415{
    416	return (__force void *)arch_ioremap_caller(phys_addr, size,
    417						   MT_MEMORY_RW,
    418						   __builtin_return_address(0));
    419}
    420
    421void __iounmap(volatile void __iomem *io_addr)
    422{
    423	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
    424	struct static_vm *svm;
    425
    426	/* If this is a static mapping, we must leave it alone */
    427	svm = find_static_vm_vaddr(addr);
    428	if (svm)
    429		return;
    430
    431#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
    432	{
    433		struct vm_struct *vm;
    434
    435		vm = find_vm_area(addr);
    436
    437		/*
    438		 * If this is a section based mapping we need to handle it
    439		 * specially as the VM subsystem does not know how to handle
    440		 * such a beast.
    441		 */
    442		if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
    443			unmap_area_sections((unsigned long)vm->addr, vm->size);
    444	}
    445#endif
    446
    447	vunmap(addr);
    448}
    449
    450void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
    451
    452void iounmap(volatile void __iomem *cookie)
    453{
    454	arch_iounmap(cookie);
    455}
    456EXPORT_SYMBOL(iounmap);
    457
    458#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
    459static int pci_ioremap_mem_type = MT_DEVICE;
    460
    461void pci_ioremap_set_mem_type(int mem_type)
    462{
    463	pci_ioremap_mem_type = mem_type;
    464}
    465
    466int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
    467{
    468	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
    469
    470	if (!(res->flags & IORESOURCE_IO))
    471		return -EINVAL;
    472
    473	if (res->end > IO_SPACE_LIMIT)
    474		return -EINVAL;
    475
    476	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
    477				  __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
    478}
    479EXPORT_SYMBOL(pci_remap_iospace);
    480
    481void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
    482{
    483	return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
    484				   __builtin_return_address(0));
    485}
    486EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
    487#endif
    488
    489/*
    490 * Must be called after early_fixmap_init
    491 */
    492void __init early_ioremap_init(void)
    493{
    494	early_ioremap_setup();
    495}
    496
    497bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
    498				 unsigned long flags)
    499{
    500	unsigned long pfn = PHYS_PFN(offset);
    501
    502	return memblock_is_map_memory(pfn);
    503}