cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pageattr.c (6028B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
      4 */
      5#include <linux/kernel.h>
      6#include <linux/mm.h>
      7#include <linux/module.h>
      8#include <linux/sched.h>
      9#include <linux/vmalloc.h>
     10
     11#include <asm/cacheflush.h>
     12#include <asm/set_memory.h>
     13#include <asm/tlbflush.h>
     14
     15struct page_change_data {
     16	pgprot_t set_mask;
     17	pgprot_t clear_mask;
     18};
     19
     20bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
     21
     22bool can_set_direct_map(void)
     23{
     24	return rodata_full || debug_pagealloc_enabled();
     25}
     26
     27static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
     28{
     29	struct page_change_data *cdata = data;
     30	pte_t pte = READ_ONCE(*ptep);
     31
     32	pte = clear_pte_bit(pte, cdata->clear_mask);
     33	pte = set_pte_bit(pte, cdata->set_mask);
     34
     35	set_pte(ptep, pte);
     36	return 0;
     37}
     38
     39/*
     40 * This function assumes that the range is mapped with PAGE_SIZE pages.
     41 */
     42static int __change_memory_common(unsigned long start, unsigned long size,
     43				pgprot_t set_mask, pgprot_t clear_mask)
     44{
     45	struct page_change_data data;
     46	int ret;
     47
     48	data.set_mask = set_mask;
     49	data.clear_mask = clear_mask;
     50
     51	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
     52					&data);
     53
     54	flush_tlb_kernel_range(start, start + size);
     55	return ret;
     56}
     57
     58static int change_memory_common(unsigned long addr, int numpages,
     59				pgprot_t set_mask, pgprot_t clear_mask)
     60{
     61	unsigned long start = addr;
     62	unsigned long size = PAGE_SIZE * numpages;
     63	unsigned long end = start + size;
     64	struct vm_struct *area;
     65	int i;
     66
     67	if (!PAGE_ALIGNED(addr)) {
     68		start &= PAGE_MASK;
     69		end = start + size;
     70		WARN_ON_ONCE(1);
     71	}
     72
     73	/*
     74	 * Kernel VA mappings are always live, and splitting live section
     75	 * mappings into page mappings may cause TLB conflicts. This means
     76	 * we have to ensure that changing the permission bits of the range
     77	 * we are operating on does not result in such splitting.
     78	 *
     79	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
     80	 * Those are guaranteed to consist entirely of page mappings, and
     81	 * splitting is never needed.
     82	 *
     83	 * So check whether the [addr, addr + size) interval is entirely
     84	 * covered by precisely one VM area that has the VM_ALLOC flag set.
     85	 */
     86	area = find_vm_area((void *)addr);
     87	if (!area ||
     88	    end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
     89	    !(area->flags & VM_ALLOC))
     90		return -EINVAL;
     91
     92	if (!numpages)
     93		return 0;
     94
     95	/*
     96	 * If we are manipulating read-only permissions, apply the same
     97	 * change to the linear mapping of the pages that back this VM area.
     98	 */
     99	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
    100			    pgprot_val(clear_mask) == PTE_RDONLY)) {
    101		for (i = 0; i < area->nr_pages; i++) {
    102			__change_memory_common((u64)page_address(area->pages[i]),
    103					       PAGE_SIZE, set_mask, clear_mask);
    104		}
    105	}
    106
    107	/*
    108	 * Get rid of potentially aliasing lazily unmapped vm areas that may
    109	 * have permissions set that deviate from the ones we are setting here.
    110	 */
    111	vm_unmap_aliases();
    112
    113	return __change_memory_common(start, size, set_mask, clear_mask);
    114}
    115
    116int set_memory_ro(unsigned long addr, int numpages)
    117{
    118	return change_memory_common(addr, numpages,
    119					__pgprot(PTE_RDONLY),
    120					__pgprot(PTE_WRITE));
    121}
    122
    123int set_memory_rw(unsigned long addr, int numpages)
    124{
    125	return change_memory_common(addr, numpages,
    126					__pgprot(PTE_WRITE),
    127					__pgprot(PTE_RDONLY));
    128}
    129
    130int set_memory_nx(unsigned long addr, int numpages)
    131{
    132	return change_memory_common(addr, numpages,
    133					__pgprot(PTE_PXN),
    134					__pgprot(PTE_MAYBE_GP));
    135}
    136
    137int set_memory_x(unsigned long addr, int numpages)
    138{
    139	return change_memory_common(addr, numpages,
    140					__pgprot(PTE_MAYBE_GP),
    141					__pgprot(PTE_PXN));
    142}
    143
    144int set_memory_valid(unsigned long addr, int numpages, int enable)
    145{
    146	if (enable)
    147		return __change_memory_common(addr, PAGE_SIZE * numpages,
    148					__pgprot(PTE_VALID),
    149					__pgprot(0));
    150	else
    151		return __change_memory_common(addr, PAGE_SIZE * numpages,
    152					__pgprot(0),
    153					__pgprot(PTE_VALID));
    154}
    155
    156int set_direct_map_invalid_noflush(struct page *page)
    157{
    158	struct page_change_data data = {
    159		.set_mask = __pgprot(0),
    160		.clear_mask = __pgprot(PTE_VALID),
    161	};
    162
    163	if (!can_set_direct_map())
    164		return 0;
    165
    166	return apply_to_page_range(&init_mm,
    167				   (unsigned long)page_address(page),
    168				   PAGE_SIZE, change_page_range, &data);
    169}
    170
    171int set_direct_map_default_noflush(struct page *page)
    172{
    173	struct page_change_data data = {
    174		.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
    175		.clear_mask = __pgprot(PTE_RDONLY),
    176	};
    177
    178	if (!can_set_direct_map())
    179		return 0;
    180
    181	return apply_to_page_range(&init_mm,
    182				   (unsigned long)page_address(page),
    183				   PAGE_SIZE, change_page_range, &data);
    184}
    185
    186#ifdef CONFIG_DEBUG_PAGEALLOC
    187void __kernel_map_pages(struct page *page, int numpages, int enable)
    188{
    189	if (!can_set_direct_map())
    190		return;
    191
    192	set_memory_valid((unsigned long)page_address(page), numpages, enable);
    193}
    194#endif /* CONFIG_DEBUG_PAGEALLOC */
    195
    196/*
    197 * This function is used to determine if a linear map page has been marked as
    198 * not-valid. Walk the page table and check the PTE_VALID bit. This is based
    199 * on kern_addr_valid(), which almost does what we need.
    200 *
    201 * Because this is only called on the kernel linear map,  p?d_sect() implies
    202 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
    203 * disabled.
    204 */
    205bool kernel_page_present(struct page *page)
    206{
    207	pgd_t *pgdp;
    208	p4d_t *p4dp;
    209	pud_t *pudp, pud;
    210	pmd_t *pmdp, pmd;
    211	pte_t *ptep;
    212	unsigned long addr = (unsigned long)page_address(page);
    213
    214	if (!can_set_direct_map())
    215		return true;
    216
    217	pgdp = pgd_offset_k(addr);
    218	if (pgd_none(READ_ONCE(*pgdp)))
    219		return false;
    220
    221	p4dp = p4d_offset(pgdp, addr);
    222	if (p4d_none(READ_ONCE(*p4dp)))
    223		return false;
    224
    225	pudp = pud_offset(p4dp, addr);
    226	pud = READ_ONCE(*pudp);
    227	if (pud_none(pud))
    228		return false;
    229	if (pud_sect(pud))
    230		return true;
    231
    232	pmdp = pmd_offset(pudp, addr);
    233	pmd = READ_ONCE(*pmdp);
    234	if (pmd_none(pmd))
    235		return false;
    236	if (pmd_sect(pmd))
    237		return true;
    238
    239	ptep = pte_offset_kernel(pmdp, addr);
    240	return pte_valid(READ_ONCE(*ptep));
    241}