cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tlb-v6.S (2522B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 *  linux/arch/arm/mm/tlb-v6.S
      4 *
      5 *  Copyright (C) 1997-2002 Russell King
      6 *
      7 *  ARM architecture version 6 TLB handling functions.
      8 *  These assume a split I/D TLB.
      9 */
     10#include <linux/init.h>
     11#include <linux/linkage.h>
     12#include <asm/asm-offsets.h>
     13#include <asm/assembler.h>
     14#include <asm/page.h>
     15#include <asm/tlbflush.h>
     16#include "proc-macros.S"
     17
     18#define HARVARD_TLB
     19
     20/*
     21 *	v6wbi_flush_user_tlb_range(start, end, vma)
     22 *
     23 *	Invalidate a range of TLB entries in the specified address space.
     24 *
     25 *	- start - start address (may not be aligned)
     26 *	- end   - end address (exclusive, may not be aligned)
     27 *	- vma   - vm_area_struct describing address range
     28 *
     29 *	It is assumed that:
     30 *	- the "Invalidate single entry" instruction will invalidate
     31 *	  both the I and the D TLBs on Harvard-style TLBs
     32 */
     33ENTRY(v6wbi_flush_user_tlb_range)
     34	vma_vm_mm r3, r2			@ get vma->vm_mm
     35	mov	ip, #0
     36	mmid	r3, r3				@ get vm_mm->context.id
     37	mcr	p15, 0, ip, c7, c10, 4		@ drain write buffer
     38	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
     39	mov	r1, r1, lsr #PAGE_SHIFT
     40	asid	r3, r3				@ mask ASID
     41	orr	r0, r3, r0, lsl #PAGE_SHIFT	@ Create initial MVA
     42	mov	r1, r1, lsl #PAGE_SHIFT
     43	vma_vm_flags r2, r2			@ get vma->vm_flags
     441:
     45#ifdef HARVARD_TLB
     46	mcr	p15, 0, r0, c8, c6, 1		@ TLB invalidate D MVA (was 1)
     47	tst	r2, #VM_EXEC			@ Executable area ?
     48	mcrne	p15, 0, r0, c8, c5, 1		@ TLB invalidate I MVA (was 1)
     49#else
     50	mcr	p15, 0, r0, c8, c7, 1		@ TLB invalidate MVA (was 1)
     51#endif
     52	add	r0, r0, #PAGE_SZ
     53	cmp	r0, r1
     54	blo	1b
     55	mcr	p15, 0, ip, c7, c10, 4		@ data synchronization barrier
     56	ret	lr
     57
     58/*
     59 *	v6wbi_flush_kern_tlb_range(start,end)
     60 *
     61 *	Invalidate a range of kernel TLB entries
     62 *
     63 *	- start - start address (may not be aligned)
     64 *	- end   - end address (exclusive, may not be aligned)
     65 */
     66ENTRY(v6wbi_flush_kern_tlb_range)
     67	mov	r2, #0
     68	mcr	p15, 0, r2, c7, c10, 4		@ drain write buffer
     69	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
     70	mov	r1, r1, lsr #PAGE_SHIFT
     71	mov	r0, r0, lsl #PAGE_SHIFT
     72	mov	r1, r1, lsl #PAGE_SHIFT
     731:
     74#ifdef HARVARD_TLB
     75	mcr	p15, 0, r0, c8, c6, 1		@ TLB invalidate D MVA
     76	mcr	p15, 0, r0, c8, c5, 1		@ TLB invalidate I MVA
     77#else
     78	mcr	p15, 0, r0, c8, c7, 1		@ TLB invalidate MVA
     79#endif
     80	add	r0, r0, #PAGE_SZ
     81	cmp	r0, r1
     82	blo	1b
     83	mcr	p15, 0, r2, c7, c10, 4		@ data synchronization barrier
     84	mcr	p15, 0, r2, c7, c5, 4		@ prefetch flush (isb)
     85	ret	lr
     86
     87	__INIT
     88
     89	/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
     90	define_tlb_functions v6wbi, v6wbi_tlb_flags