cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmap.c (2428B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Based on arch/arm/mm/mmap.c
      4 *
      5 * Copyright (C) 2012 ARM Ltd.
      6 */
      7
      8#include <linux/io.h>
      9#include <linux/memblock.h>
     10#include <linux/mm.h>
     11#include <linux/types.h>
     12
     13#include <asm/cpufeature.h>
     14#include <asm/page.h>
     15
     16/*
     17 * You really shouldn't be using read() or write() on /dev/mem.  This might go
     18 * away in the future.
     19 */
     20int valid_phys_addr_range(phys_addr_t addr, size_t size)
     21{
     22	/*
     23	 * Check whether addr is covered by a memory region without the
     24	 * MEMBLOCK_NOMAP attribute, and whether that region covers the
     25	 * entire range. In theory, this could lead to false negatives
     26	 * if the range is covered by distinct but adjacent memory regions
     27	 * that only differ in other attributes. However, few of such
     28	 * attributes have been defined, and it is debatable whether it
     29	 * follows that /dev/mem read() calls should be able traverse
     30	 * such boundaries.
     31	 */
     32	return memblock_is_region_memory(addr, size) &&
     33	       memblock_is_map_memory(addr);
     34}
     35
     36/*
     37 * Do not allow /dev/mem mappings beyond the supported physical range.
     38 */
     39int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
     40{
     41	return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
     42}
     43
     44static int __init adjust_protection_map(void)
     45{
     46	/*
     47	 * With Enhanced PAN we can honour the execute-only permissions as
     48	 * there is no PAN override with such mappings.
     49	 */
     50	if (cpus_have_const_cap(ARM64_HAS_EPAN)) {
     51		protection_map[VM_EXEC] = PAGE_EXECONLY;
     52		protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
     53	}
     54
     55	return 0;
     56}
     57arch_initcall(adjust_protection_map);
     58
     59pgprot_t vm_get_page_prot(unsigned long vm_flags)
     60{
     61	pteval_t prot = pgprot_val(protection_map[vm_flags &
     62				   (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
     63
     64	if (vm_flags & VM_ARM64_BTI)
     65		prot |= PTE_GP;
     66
     67	/*
     68	 * There are two conditions required for returning a Normal Tagged
     69	 * memory type: (1) the user requested it via PROT_MTE passed to
     70	 * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
     71	 * register (1) as VM_MTE in the vma->vm_flags and (2) as
     72	 * VM_MTE_ALLOWED. Note that the latter can only be set during the
     73	 * mmap() call since mprotect() does not accept MAP_* flags.
     74	 * Checking for VM_MTE only is sufficient since arch_validate_flags()
     75	 * does not permit (VM_MTE & !VM_MTE_ALLOWED).
     76	 */
     77	if (vm_flags & VM_MTE)
     78		prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
     79
     80	return __pgprot(prot);
     81}
     82EXPORT_SYMBOL(vm_get_page_prot);