cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cache-v4wb.S (6163B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 *  linux/arch/arm/mm/cache-v4wb.S
      4 *
      5 *  Copyright (C) 1997-2002 Russell king
      6 */
      7#include <linux/linkage.h>
      8#include <linux/init.h>
      9#include <asm/assembler.h>
     10#include <asm/memory.h>
     11#include <asm/page.h>
     12#include "proc-macros.S"
     13
     14/*
     15 * The size of one data cache line.
     16 */
     17#define CACHE_DLINESIZE	32
     18
     19/*
     20 * The total size of the data cache.
     21 */
     22#if defined(CONFIG_CPU_SA110)
     23# define CACHE_DSIZE	16384
     24#elif defined(CONFIG_CPU_SA1100)
     25# define CACHE_DSIZE	8192
     26#else
     27# error Unknown cache size
     28#endif
     29
     30/*
     31 * This is the size at which it becomes more efficient to
     32 * clean the whole cache, rather than using the individual
     33 * cache line maintenance instructions.
     34 *
     35 *  Size  Clean (ticks) Dirty (ticks)
     36 *   4096   21  20  21    53  55  54
     37 *   8192   40  41  40   106 100 102
     38 *  16384   77  77  76   140 140 138
     39 *  32768  150 149 150   214 216 212 <---
     40 *  65536  296 297 296   351 358 361
     41 * 131072  591 591 591   656 657 651
     42 *  Whole  132 136 132   221 217 207 <---
     43 */
     44#define CACHE_DLIMIT	(CACHE_DSIZE * 4)
     45
     46	.data
     47	.align	2
     48flush_base:
     49	.long	FLUSH_BASE
     50	.text
     51
     52/*
     53 *	flush_icache_all()
     54 *
     55 *	Unconditionally clean and invalidate the entire icache.
     56 */
     57ENTRY(v4wb_flush_icache_all)
     58	mov	r0, #0
     59	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
     60	ret	lr
     61ENDPROC(v4wb_flush_icache_all)
     62
     63/*
     64 *	flush_user_cache_all()
     65 *
     66 *	Clean and invalidate all cache entries in a particular address
     67 *	space.
     68 */
     69ENTRY(v4wb_flush_user_cache_all)
     70	/* FALLTHROUGH */
     71/*
     72 *	flush_kern_cache_all()
     73 *
     74 *	Clean and invalidate the entire cache.
     75 */
     76ENTRY(v4wb_flush_kern_cache_all)
     77	mov	ip, #0
     78	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
     79__flush_whole_cache:
     80	ldr	r3, =flush_base
     81	ldr	r1, [r3, #0]
     82	eor	r1, r1, #CACHE_DSIZE
     83	str	r1, [r3, #0]
     84	add	r2, r1, #CACHE_DSIZE
     851:	ldr	r3, [r1], #32
     86	cmp	r1, r2
     87	blo	1b
     88#ifdef FLUSH_BASE_MINICACHE
     89	add	r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
     90	sub	r1, r2, #512			@ only 512 bytes
     911:	ldr	r3, [r1], #32
     92	cmp	r1, r2
     93	blo	1b
     94#endif
     95	mcr	p15, 0, ip, c7, c10, 4		@ drain write buffer
     96	ret	lr
     97
     98/*
     99 *	flush_user_cache_range(start, end, flags)
    100 *
    101 *	Invalidate a range of cache entries in the specified
    102 *	address space.
    103 *
    104 *	- start - start address (inclusive, page aligned)
    105 *	- end	- end address (exclusive, page aligned)
    106 *	- flags	- vma_area_struct flags describing address space
    107 */
    108ENTRY(v4wb_flush_user_cache_range)
    109	mov	ip, #0
    110	sub	r3, r1, r0			@ calculate total size
    111	tst	r2, #VM_EXEC			@ executable region?
    112	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
    113
    114	cmp	r3, #CACHE_DLIMIT		@ total size >= limit?
    115	bhs	__flush_whole_cache		@ flush whole D cache
    116
    1171:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
    118	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
    119	add	r0, r0, #CACHE_DLINESIZE
    120	cmp	r0, r1
    121	blo	1b
    122	tst	r2, #VM_EXEC
    123	mcrne	p15, 0, ip, c7, c10, 4		@ drain write buffer
    124	ret	lr
    125
    126/*
    127 *	flush_kern_dcache_area(void *addr, size_t size)
    128 *
    129 *	Ensure no D cache aliasing occurs, either with itself or
    130 *	the I cache
    131 *
    132 *	- addr	- kernel address
    133 *	- size	- region size
    134 */
    135ENTRY(v4wb_flush_kern_dcache_area)
    136	add	r1, r0, r1
    137	/* fall through */
    138
    139/*
    140 *	coherent_kern_range(start, end)
    141 *
    142 *	Ensure coherency between the Icache and the Dcache in the
    143 *	region described by start.  If you have non-snooping
    144 *	Harvard caches, you need to implement this function.
    145 *
    146 *	- start  - virtual start address
    147 *	- end	 - virtual end address
    148 */
    149ENTRY(v4wb_coherent_kern_range)
    150	/* fall through */
    151
    152/*
    153 *	coherent_user_range(start, end)
    154 *
    155 *	Ensure coherency between the Icache and the Dcache in the
    156 *	region described by start.  If you have non-snooping
    157 *	Harvard caches, you need to implement this function.
    158 *
    159 *	- start  - virtual start address
    160 *	- end	 - virtual end address
    161 */
    162ENTRY(v4wb_coherent_user_range)
    163	bic	r0, r0, #CACHE_DLINESIZE - 1
    1641:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
    165	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
    166	add	r0, r0, #CACHE_DLINESIZE
    167	cmp	r0, r1
    168	blo	1b
    169	mov	r0, #0
    170	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
    171	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
    172	ret	lr
    173
    174
    175/*
    176 *	dma_inv_range(start, end)
    177 *
    178 *	Invalidate (discard) the specified virtual address range.
    179 *	May not write back any entries.  If 'start' or 'end'
    180 *	are not cache line aligned, those lines must be written
    181 *	back.
    182 *
    183 *	- start  - virtual start address
    184 *	- end	 - virtual end address
    185 */
    186v4wb_dma_inv_range:
    187	tst	r0, #CACHE_DLINESIZE - 1
    188	bic	r0, r0, #CACHE_DLINESIZE - 1
    189	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
    190	tst	r1, #CACHE_DLINESIZE - 1
    191	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
    1921:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
    193	add	r0, r0, #CACHE_DLINESIZE
    194	cmp	r0, r1
    195	blo	1b
    196	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
    197	ret	lr
    198
    199/*
    200 *	dma_clean_range(start, end)
    201 *
    202 *	Clean (write back) the specified virtual address range.
    203 *
    204 *	- start  - virtual start address
    205 *	- end	 - virtual end address
    206 */
    207v4wb_dma_clean_range:
    208	bic	r0, r0, #CACHE_DLINESIZE - 1
    2091:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
    210	add	r0, r0, #CACHE_DLINESIZE
    211	cmp	r0, r1
    212	blo	1b
    213	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
    214	ret	lr
    215
    216/*
    217 *	dma_flush_range(start, end)
    218 *
    219 *	Clean and invalidate the specified virtual address range.
    220 *
    221 *	- start  - virtual start address
    222 *	- end	 - virtual end address
    223 *
    224 *	This is actually the same as v4wb_coherent_kern_range()
    225 */
    226	.globl	v4wb_dma_flush_range
    227	.set	v4wb_dma_flush_range, v4wb_coherent_kern_range
    228
    229/*
    230 *	dma_map_area(start, size, dir)
    231 *	- start	- kernel virtual start address
    232 *	- size	- size of region
    233 *	- dir	- DMA direction
    234 */
    235ENTRY(v4wb_dma_map_area)
    236	add	r1, r1, r0
    237	cmp	r2, #DMA_TO_DEVICE
    238	beq	v4wb_dma_clean_range
    239	bcs	v4wb_dma_inv_range
    240	b	v4wb_dma_flush_range
    241ENDPROC(v4wb_dma_map_area)
    242
    243/*
    244 *	dma_unmap_area(start, size, dir)
    245 *	- start	- kernel virtual start address
    246 *	- size	- size of region
    247 *	- dir	- DMA direction
    248 */
    249ENTRY(v4wb_dma_unmap_area)
    250	ret	lr
    251ENDPROC(v4wb_dma_unmap_area)
    252
    253	.globl	v4wb_flush_kern_cache_louis
    254	.equ	v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
    255
    256	__INITDATA
    257
    258	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
    259	define_cache_functions v4wb