cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ioremap.c (3653B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * OpenRISC ioremap.c
      4 *
      5 * Linux architectural port borrowing liberally from similar works of
      6 * others.  All original copyrights apply as per the original source
      7 * declaration.
      8 *
      9 * Modifications for the OpenRISC architecture:
     10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
     11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
     12 */
     13
     14#include <linux/vmalloc.h>
     15#include <linux/io.h>
     16#include <linux/pgtable.h>
     17#include <asm/pgalloc.h>
     18#include <asm/fixmap.h>
     19#include <asm/bug.h>
     20#include <linux/sched.h>
     21#include <asm/tlbflush.h>
     22
     23extern int mem_init_done;
     24
     25static unsigned int fixmaps_used __initdata;
     26
     27/*
     28 * Remap an arbitrary physical address space into the kernel virtual
     29 * address space. Needed when the kernel wants to access high addresses
     30 * directly.
     31 *
     32 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
     33 * have to convert them into an offset in a page-aligned mapping, but the
     34 * caller shouldn't need to know that small detail.
     35 */
     36void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
     37{
     38	phys_addr_t p;
     39	unsigned long v;
     40	unsigned long offset, last_addr;
     41	struct vm_struct *area = NULL;
     42
     43	/* Don't allow wraparound or zero size */
     44	last_addr = addr + size - 1;
     45	if (!size || last_addr < addr)
     46		return NULL;
     47
     48	/*
     49	 * Mappings have to be page-aligned
     50	 */
     51	offset = addr & ~PAGE_MASK;
     52	p = addr & PAGE_MASK;
     53	size = PAGE_ALIGN(last_addr + 1) - p;
     54
     55	if (likely(mem_init_done)) {
     56		area = get_vm_area(size, VM_IOREMAP);
     57		if (!area)
     58			return NULL;
     59		v = (unsigned long)area->addr;
     60	} else {
     61		if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
     62			return NULL;
     63		v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
     64		fixmaps_used += (size >> PAGE_SHIFT);
     65	}
     66
     67	if (ioremap_page_range(v, v + size, p,
     68			__pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) {
     69		if (likely(mem_init_done))
     70			vfree(area->addr);
     71		else
     72			fixmaps_used -= (size >> PAGE_SHIFT);
     73		return NULL;
     74	}
     75
     76	return (void __iomem *)(offset + (char *)v);
     77}
     78EXPORT_SYMBOL(ioremap);
     79
     80void iounmap(void __iomem *addr)
     81{
     82	/* If the page is from the fixmap pool then we just clear out
     83	 * the fixmap mapping.
     84	 */
     85	if (unlikely((unsigned long)addr > FIXADDR_START)) {
     86		/* This is a bit broken... we don't really know
     87		 * how big the area is so it's difficult to know
     88		 * how many fixed pages to invalidate...
     89		 * just flush tlb and hope for the best...
     90		 * consider this a FIXME
     91		 *
     92		 * Really we should be clearing out one or more page
     93		 * table entries for these virtual addresses so that
     94		 * future references cause a page fault... for now, we
     95		 * rely on two things:
     96		 *   i)  this code never gets called on known boards
     97		 *   ii) invalid accesses to the freed areas aren't made
     98		 */
     99		flush_tlb_all();
    100		return;
    101	}
    102
    103	return vfree((void *)(PAGE_MASK & (unsigned long)addr));
    104}
    105EXPORT_SYMBOL(iounmap);
    106
    107/**
    108 * OK, this one's a bit tricky... ioremap can get called before memory is
    109 * initialized (early serial console does this) and will want to alloc a page
    110 * for its mapping.  No userspace pages will ever get allocated before memory
    111 * is initialized so this applies only to kernel pages.  In the event that
    112 * this is called before memory is initialized we allocate the page using
    113 * the memblock infrastructure.
    114 */
    115
    116pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
    117{
    118	pte_t *pte;
    119
    120	if (likely(mem_init_done)) {
    121		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
    122	} else {
    123		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
    124		if (!pte)
    125			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
    126			      __func__, PAGE_SIZE, PAGE_SIZE);
    127	}
    128
    129	return pte;
    130}