cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tlb.c (7011B)


      1/*
      2 * arch/xtensa/mm/tlb.c
      3 *
      4 * Logic that manipulates the Xtensa MMU.  Derived from MIPS.
      5 *
      6 * This file is subject to the terms and conditions of the GNU General Public
      7 * License.  See the file "COPYING" in the main directory of this archive
      8 * for more details.
      9 *
     10 * Copyright (C) 2001 - 2003 Tensilica Inc.
     11 *
     12 * Joe Taylor
     13 * Chris Zankel	<chris@zankel.net>
     14 * Marc Gauthier
     15 */
     16
     17#include <linux/mm.h>
     18#include <asm/processor.h>
     19#include <asm/mmu_context.h>
     20#include <asm/tlbflush.h>
     21#include <asm/cacheflush.h>
     22
     23
     24static inline void __flush_itlb_all (void)
     25{
     26	int w, i;
     27
     28	for (w = 0; w < ITLB_ARF_WAYS; w++) {
     29		for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
     30			int e = w + (i << PAGE_SHIFT);
     31			invalidate_itlb_entry_no_isync(e);
     32		}
     33	}
     34	asm volatile ("isync\n");
     35}
     36
     37static inline void __flush_dtlb_all (void)
     38{
     39	int w, i;
     40
     41	for (w = 0; w < DTLB_ARF_WAYS; w++) {
     42		for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
     43			int e = w + (i << PAGE_SHIFT);
     44			invalidate_dtlb_entry_no_isync(e);
     45		}
     46	}
     47	asm volatile ("isync\n");
     48}
     49
     50
     51void local_flush_tlb_all(void)
     52{
     53	__flush_itlb_all();
     54	__flush_dtlb_all();
     55}
     56
     57/* If mm is current, we simply assign the current task a new ASID, thus,
     58 * invalidating all previous tlb entries. If mm is someone else's user mapping,
     59 * wie invalidate the context, thus, when that user mapping is swapped in,
     60 * a new context will be assigned to it.
     61 */
     62
     63void local_flush_tlb_mm(struct mm_struct *mm)
     64{
     65	int cpu = smp_processor_id();
     66
     67	if (mm == current->active_mm) {
     68		unsigned long flags;
     69		local_irq_save(flags);
     70		mm->context.asid[cpu] = NO_CONTEXT;
     71		activate_context(mm, cpu);
     72		local_irq_restore(flags);
     73	} else {
     74		mm->context.asid[cpu] = NO_CONTEXT;
     75		mm->context.cpu = -1;
     76	}
     77}
     78
     79
     80#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
     81#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
     82#if _ITLB_ENTRIES > _DTLB_ENTRIES
     83# define _TLB_ENTRIES _ITLB_ENTRIES
     84#else
     85# define _TLB_ENTRIES _DTLB_ENTRIES
     86#endif
     87
     88void local_flush_tlb_range(struct vm_area_struct *vma,
     89		unsigned long start, unsigned long end)
     90{
     91	int cpu = smp_processor_id();
     92	struct mm_struct *mm = vma->vm_mm;
     93	unsigned long flags;
     94
     95	if (mm->context.asid[cpu] == NO_CONTEXT)
     96		return;
     97
     98	pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
     99		 (unsigned long)mm->context.asid[cpu], start, end);
    100	local_irq_save(flags);
    101
    102	if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
    103		int oldpid = get_rasid_register();
    104
    105		set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
    106		start &= PAGE_MASK;
    107		if (vma->vm_flags & VM_EXEC)
    108			while(start < end) {
    109				invalidate_itlb_mapping(start);
    110				invalidate_dtlb_mapping(start);
    111				start += PAGE_SIZE;
    112			}
    113		else
    114			while(start < end) {
    115				invalidate_dtlb_mapping(start);
    116				start += PAGE_SIZE;
    117			}
    118
    119		set_rasid_register(oldpid);
    120	} else {
    121		local_flush_tlb_mm(mm);
    122	}
    123	local_irq_restore(flags);
    124}
    125
    126void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
    127{
    128	int cpu = smp_processor_id();
    129	struct mm_struct* mm = vma->vm_mm;
    130	unsigned long flags;
    131	int oldpid;
    132
    133	if (mm->context.asid[cpu] == NO_CONTEXT)
    134		return;
    135
    136	local_irq_save(flags);
    137
    138	oldpid = get_rasid_register();
    139	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
    140
    141	if (vma->vm_flags & VM_EXEC)
    142		invalidate_itlb_mapping(page);
    143	invalidate_dtlb_mapping(page);
    144
    145	set_rasid_register(oldpid);
    146
    147	local_irq_restore(flags);
    148}
    149
    150void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
    151{
    152	if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
    153	    end - start < _TLB_ENTRIES << PAGE_SHIFT) {
    154		start &= PAGE_MASK;
    155		while (start < end) {
    156			invalidate_itlb_mapping(start);
    157			invalidate_dtlb_mapping(start);
    158			start += PAGE_SIZE;
    159		}
    160	} else {
    161		local_flush_tlb_all();
    162	}
    163}
    164
    165void update_mmu_tlb(struct vm_area_struct *vma,
    166		    unsigned long address, pte_t *ptep)
    167{
    168	local_flush_tlb_page(vma, address);
    169}
    170
    171#ifdef CONFIG_DEBUG_TLB_SANITY
    172
    173static unsigned get_pte_for_vaddr(unsigned vaddr)
    174{
    175	struct task_struct *task = get_current();
    176	struct mm_struct *mm = task->mm;
    177	pgd_t *pgd;
    178	p4d_t *p4d;
    179	pud_t *pud;
    180	pmd_t *pmd;
    181	pte_t *pte;
    182
    183	if (!mm)
    184		mm = task->active_mm;
    185	pgd = pgd_offset(mm, vaddr);
    186	if (pgd_none_or_clear_bad(pgd))
    187		return 0;
    188	p4d = p4d_offset(pgd, vaddr);
    189	if (p4d_none_or_clear_bad(p4d))
    190		return 0;
    191	pud = pud_offset(p4d, vaddr);
    192	if (pud_none_or_clear_bad(pud))
    193		return 0;
    194	pmd = pmd_offset(pud, vaddr);
    195	if (pmd_none_or_clear_bad(pmd))
    196		return 0;
    197	pte = pte_offset_map(pmd, vaddr);
    198	if (!pte)
    199		return 0;
    200	return pte_val(*pte);
    201}
    202
    203enum {
    204	TLB_SUSPICIOUS	= 1,
    205	TLB_INSANE	= 2,
    206};
    207
    208static void tlb_insane(void)
    209{
    210	BUG_ON(1);
    211}
    212
    213static void tlb_suspicious(void)
    214{
    215	WARN_ON(1);
    216}
    217
    218/*
    219 * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
    220 * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
    221 *
    222 * Check that valid TLB entries either have the same PA as the PTE, or PTE is
    223 * marked as non-present. Non-present PTE and the page with non-zero refcount
    224 * and zero mapcount is normal for batched TLB flush operation. Zero refcount
    225 * means that the page was freed prematurely. Non-zero mapcount is unusual,
    226 * but does not necessary means an error, thus marked as suspicious.
    227 */
    228static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
    229{
    230	unsigned tlbidx = w | (e << PAGE_SHIFT);
    231	unsigned r0 = dtlb ?
    232		read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
    233	unsigned r1 = dtlb ?
    234		read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
    235	unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
    236	unsigned pte = get_pte_for_vaddr(vpn);
    237	unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
    238	unsigned tlb_asid = r0 & ASID_MASK;
    239	bool kernel = tlb_asid == 1;
    240	int rc = 0;
    241
    242	if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
    243		pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
    244				dtlb ? 'D' : 'I', w, e, vpn,
    245				kernel ? "kernel" : "user");
    246		rc |= TLB_INSANE;
    247	}
    248
    249	if (tlb_asid == mm_asid) {
    250		if ((pte ^ r1) & PAGE_MASK) {
    251			pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
    252					dtlb ? 'D' : 'I', w, e, r0, r1, pte);
    253			if (pte == 0 || !pte_present(__pte(pte))) {
    254				struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
    255				pr_err("page refcount: %d, mapcount: %d\n",
    256						page_count(p),
    257						page_mapcount(p));
    258				if (!page_count(p))
    259					rc |= TLB_INSANE;
    260				else if (page_mapcount(p))
    261					rc |= TLB_SUSPICIOUS;
    262			} else {
    263				rc |= TLB_INSANE;
    264			}
    265		}
    266	}
    267	return rc;
    268}
    269
    270void check_tlb_sanity(void)
    271{
    272	unsigned long flags;
    273	unsigned w, e;
    274	int bug = 0;
    275
    276	local_irq_save(flags);
    277	for (w = 0; w < DTLB_ARF_WAYS; ++w)
    278		for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
    279			bug |= check_tlb_entry(w, e, true);
    280	for (w = 0; w < ITLB_ARF_WAYS; ++w)
    281		for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
    282			bug |= check_tlb_entry(w, e, false);
    283	if (bug & TLB_INSANE)
    284		tlb_insane();
    285	if (bug & TLB_SUSPICIOUS)
    286		tlb_suspicious();
    287	local_irq_restore(flags);
    288}
    289
    290#endif /* CONFIG_DEBUG_TLB_SANITY */