cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ioremap.c (2522B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2
      3#include <linux/io.h>
      4#include <linux/slab.h>
      5#include <linux/mmzone.h>
      6#include <linux/vmalloc.h>
      7#include <asm/io-workarounds.h>
      8
      9unsigned long ioremap_bot;
     10EXPORT_SYMBOL(ioremap_bot);
     11
     12void __iomem *ioremap(phys_addr_t addr, unsigned long size)
     13{
     14	pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
     15	void *caller = __builtin_return_address(0);
     16
     17	if (iowa_is_active())
     18		return iowa_ioremap(addr, size, prot, caller);
     19	return __ioremap_caller(addr, size, prot, caller);
     20}
     21EXPORT_SYMBOL(ioremap);
     22
     23void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size)
     24{
     25	pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
     26	void *caller = __builtin_return_address(0);
     27
     28	if (iowa_is_active())
     29		return iowa_ioremap(addr, size, prot, caller);
     30	return __ioremap_caller(addr, size, prot, caller);
     31}
     32EXPORT_SYMBOL(ioremap_wc);
     33
     34void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
     35{
     36	pgprot_t prot = pgprot_cached(PAGE_KERNEL);
     37	void *caller = __builtin_return_address(0);
     38
     39	if (iowa_is_active())
     40		return iowa_ioremap(addr, size, prot, caller);
     41	return __ioremap_caller(addr, size, prot, caller);
     42}
     43
     44void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
     45{
     46	pte_t pte = __pte(flags);
     47	void *caller = __builtin_return_address(0);
     48
     49	/* writeable implies dirty for kernel addresses */
     50	if (pte_write(pte))
     51		pte = pte_mkdirty(pte);
     52
     53	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
     54	pte = pte_exprotect(pte);
     55	pte = pte_mkprivileged(pte);
     56
     57	if (iowa_is_active())
     58		return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
     59	return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
     60}
     61EXPORT_SYMBOL(ioremap_prot);
     62
     63int early_ioremap_range(unsigned long ea, phys_addr_t pa,
     64			unsigned long size, pgprot_t prot)
     65{
     66	unsigned long i;
     67
     68	for (i = 0; i < size; i += PAGE_SIZE) {
     69		int err = map_kernel_page(ea + i, pa + i, prot);
     70
     71		if (WARN_ON_ONCE(err))  /* Should clean up */
     72			return err;
     73	}
     74
     75	return 0;
     76}
     77
     78void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
     79			 pgprot_t prot, void *caller)
     80{
     81	struct vm_struct *area;
     82	int ret;
     83	unsigned long va;
     84
     85	area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller);
     86	if (area == NULL)
     87		return NULL;
     88
     89	area->phys_addr = pa;
     90	va = (unsigned long)area->addr;
     91
     92	ret = ioremap_page_range(va, va + size, pa, prot);
     93	if (!ret)
     94		return (void __iomem *)area->addr + offset;
     95
     96	vunmap_range(va, va + size);
     97	free_vm_area(area);
     98
     99	return NULL;
    100}