cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tlb.c (7658B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
      4 */
      5#include <linux/init.h>
      6#include <linux/sched.h>
      7#include <linux/smp.h>
      8#include <linux/mm.h>
      9#include <linux/hugetlb.h>
     10#include <linux/export.h>
     11
     12#include <asm/cpu.h>
     13#include <asm/bootinfo.h>
     14#include <asm/mmu_context.h>
     15#include <asm/pgtable.h>
     16#include <asm/tlb.h>
     17
     18void local_flush_tlb_all(void)
     19{
     20	invtlb_all(INVTLB_CURRENT_ALL, 0, 0);
     21}
     22EXPORT_SYMBOL(local_flush_tlb_all);
     23
     24void local_flush_tlb_user(void)
     25{
     26	invtlb_all(INVTLB_CURRENT_GFALSE, 0, 0);
     27}
     28EXPORT_SYMBOL(local_flush_tlb_user);
     29
     30void local_flush_tlb_kernel(void)
     31{
     32	invtlb_all(INVTLB_CURRENT_GTRUE, 0, 0);
     33}
     34EXPORT_SYMBOL(local_flush_tlb_kernel);
     35
     36/*
     37 * All entries common to a mm share an asid. To effectively flush
     38 * these entries, we just bump the asid.
     39 */
     40void local_flush_tlb_mm(struct mm_struct *mm)
     41{
     42	int cpu;
     43
     44	preempt_disable();
     45
     46	cpu = smp_processor_id();
     47
     48	if (asid_valid(mm, cpu))
     49		drop_mmu_context(mm, cpu);
     50	else
     51		cpumask_clear_cpu(cpu, mm_cpumask(mm));
     52
     53	preempt_enable();
     54}
     55
     56void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
     57	unsigned long end)
     58{
     59	struct mm_struct *mm = vma->vm_mm;
     60	int cpu = smp_processor_id();
     61
     62	if (asid_valid(mm, cpu)) {
     63		unsigned long size, flags;
     64
     65		local_irq_save(flags);
     66		start = round_down(start, PAGE_SIZE << 1);
     67		end = round_up(end, PAGE_SIZE << 1);
     68		size = (end - start) >> (PAGE_SHIFT + 1);
     69		if (size <= (current_cpu_data.tlbsizestlbsets ?
     70			     current_cpu_data.tlbsize / 8 :
     71			     current_cpu_data.tlbsize / 2)) {
     72			int asid = cpu_asid(cpu, mm);
     73
     74			while (start < end) {
     75				invtlb(INVTLB_ADDR_GFALSE_AND_ASID, asid, start);
     76				start += (PAGE_SIZE << 1);
     77			}
     78		} else {
     79			drop_mmu_context(mm, cpu);
     80		}
     81		local_irq_restore(flags);
     82	} else {
     83		cpumask_clear_cpu(cpu, mm_cpumask(mm));
     84	}
     85}
     86
     87void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
     88{
     89	unsigned long size, flags;
     90
     91	local_irq_save(flags);
     92	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
     93	size = (size + 1) >> 1;
     94	if (size <= (current_cpu_data.tlbsizestlbsets ?
     95		     current_cpu_data.tlbsize / 8 :
     96		     current_cpu_data.tlbsize / 2)) {
     97
     98		start &= (PAGE_MASK << 1);
     99		end += ((PAGE_SIZE << 1) - 1);
    100		end &= (PAGE_MASK << 1);
    101
    102		while (start < end) {
    103			invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, start);
    104			start += (PAGE_SIZE << 1);
    105		}
    106	} else {
    107		local_flush_tlb_kernel();
    108	}
    109	local_irq_restore(flags);
    110}
    111
    112void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
    113{
    114	int cpu = smp_processor_id();
    115
    116	if (asid_valid(vma->vm_mm, cpu)) {
    117		int newpid;
    118
    119		newpid = cpu_asid(cpu, vma->vm_mm);
    120		page &= (PAGE_MASK << 1);
    121		invtlb(INVTLB_ADDR_GFALSE_AND_ASID, newpid, page);
    122	} else {
    123		cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm));
    124	}
    125}
    126
    127/*
    128 * This one is only used for pages with the global bit set so we don't care
    129 * much about the ASID.
    130 */
    131void local_flush_tlb_one(unsigned long page)
    132{
    133	page &= (PAGE_MASK << 1);
    134	invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, page);
    135}
    136
    137static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
    138{
    139#ifdef CONFIG_HUGETLB_PAGE
    140	int idx;
    141	unsigned long lo;
    142	unsigned long flags;
    143
    144	local_irq_save(flags);
    145
    146	address &= (PAGE_MASK << 1);
    147	write_csr_entryhi(address);
    148	tlb_probe();
    149	idx = read_csr_tlbidx();
    150	write_csr_pagesize(PS_HUGE_SIZE);
    151	lo = pmd_to_entrylo(pte_val(*ptep));
    152	write_csr_entrylo0(lo);
    153	write_csr_entrylo1(lo + (HPAGE_SIZE >> 1));
    154
    155	if (idx < 0)
    156		tlb_write_random();
    157	else
    158		tlb_write_indexed();
    159	write_csr_pagesize(PS_DEFAULT_SIZE);
    160
    161	local_irq_restore(flags);
    162#endif
    163}
    164
    165void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
    166{
    167	int idx;
    168	unsigned long flags;
    169
    170	/*
    171	 * Handle debugger faulting in for debugee.
    172	 */
    173	if (current->active_mm != vma->vm_mm)
    174		return;
    175
    176	if (pte_val(*ptep) & _PAGE_HUGE) {
    177		__update_hugetlb(vma, address, ptep);
    178		return;
    179	}
    180
    181	local_irq_save(flags);
    182
    183	if ((unsigned long)ptep & sizeof(pte_t))
    184		ptep--;
    185
    186	address &= (PAGE_MASK << 1);
    187	write_csr_entryhi(address);
    188	tlb_probe();
    189	idx = read_csr_tlbidx();
    190	write_csr_pagesize(PS_DEFAULT_SIZE);
    191	write_csr_entrylo0(pte_val(*ptep++));
    192	write_csr_entrylo1(pte_val(*ptep));
    193	if (idx < 0)
    194		tlb_write_random();
    195	else
    196		tlb_write_indexed();
    197
    198	local_irq_restore(flags);
    199}
    200
    201static void setup_ptwalker(void)
    202{
    203	unsigned long pwctl0, pwctl1;
    204	unsigned long pgd_i = 0, pgd_w = 0;
    205	unsigned long pud_i = 0, pud_w = 0;
    206	unsigned long pmd_i = 0, pmd_w = 0;
    207	unsigned long pte_i = 0, pte_w = 0;
    208
    209	pgd_i = PGDIR_SHIFT;
    210	pgd_w = PAGE_SHIFT - 3;
    211#if CONFIG_PGTABLE_LEVELS > 3
    212	pud_i = PUD_SHIFT;
    213	pud_w = PAGE_SHIFT - 3;
    214#endif
    215#if CONFIG_PGTABLE_LEVELS > 2
    216	pmd_i = PMD_SHIFT;
    217	pmd_w = PAGE_SHIFT - 3;
    218#endif
    219	pte_i = PAGE_SHIFT;
    220	pte_w = PAGE_SHIFT - 3;
    221
    222	pwctl0 = pte_i | pte_w << 5 | pmd_i << 10 | pmd_w << 15 | pud_i << 20 | pud_w << 25;
    223	pwctl1 = pgd_i | pgd_w << 6;
    224
    225	csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0);
    226	csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1);
    227	csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
    228	csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
    229	csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID);
    230}
    231
    232static void output_pgtable_bits_defines(void)
    233{
    234#define pr_define(fmt, ...)					\
    235	pr_debug("#define " fmt, ##__VA_ARGS__)
    236
    237	pr_debug("#include <asm/asm.h>\n");
    238	pr_debug("#include <asm/regdef.h>\n");
    239	pr_debug("\n");
    240
    241	pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
    242	pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
    243	pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
    244	pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
    245	pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
    246	pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
    247	pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
    248	pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
    249	pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
    250	pr_debug("\n");
    251}
    252
    253#ifdef CONFIG_NUMA
    254static unsigned long pcpu_handlers[NR_CPUS];
    255#endif
    256extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
    257
    258void setup_tlb_handler(int cpu)
    259{
    260	setup_ptwalker();
    261	output_pgtable_bits_defines();
    262
    263	/* The tlb handlers are generated only once */
    264	if (cpu == 0) {
    265		memcpy((void *)tlbrentry, handle_tlb_refill, 0x80);
    266		local_flush_icache_range(tlbrentry, tlbrentry + 0x80);
    267		set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE);
    268		set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE);
    269		set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE);
    270		set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE);
    271		set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
    272		set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
    273		set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
    274	}
    275#ifdef CONFIG_NUMA
    276	else {
    277		void *addr;
    278		struct page *page;
    279		const int vec_sz = sizeof(exception_handlers);
    280
    281		if (pcpu_handlers[cpu])
    282			return;
    283
    284		page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz));
    285		if (!page)
    286			return;
    287
    288		addr = page_address(page);
    289		pcpu_handlers[cpu] = (unsigned long)addr;
    290		memcpy((void *)addr, (void *)eentry, vec_sz);
    291		local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz);
    292		csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
    293		csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
    294		csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
    295	}
    296#endif
    297}
    298
    299void tlb_init(int cpu)
    300{
    301	write_csr_pagesize(PS_DEFAULT_SIZE);
    302	write_csr_stlbpgsize(PS_DEFAULT_SIZE);
    303	write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE);
    304	setup_tlb_handler(cpu);
    305	local_flush_tlb_all();
    306}