cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

proc-arm922.S (10529B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 *  linux/arch/arm/mm/proc-arm922.S: MMU functions for ARM922
      4 *
      5 *  Copyright (C) 1999,2000 ARM Limited
      6 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
      7 *  Copyright (C) 2001 Altera Corporation
      8 *  hacked for non-paged-MM by Hyok S. Choi, 2003.
      9 *
     10 * These are the low level assembler for performing cache and TLB
     11 * functions on the arm922.
     12 *
     13 *  CONFIG_CPU_ARM922_CPU_IDLE -> nohlt
     14 */
     15#include <linux/linkage.h>
     16#include <linux/init.h>
     17#include <linux/pgtable.h>
     18#include <asm/assembler.h>
     19#include <asm/hwcap.h>
     20#include <asm/pgtable-hwdef.h>
     21#include <asm/page.h>
     22#include <asm/ptrace.h>
     23#include "proc-macros.S"
     24
     25/*
     26 * The size of one data cache line.
     27 */
     28#define CACHE_DLINESIZE	32
     29
     30/*
     31 * The number of data cache segments.
     32 */
     33#define CACHE_DSEGMENTS	4
     34
     35/*
     36 * The number of lines in a cache segment.
     37 */
     38#define CACHE_DENTRIES	64
     39
     40/*
     41 * This is the size at which it becomes more efficient to
     42 * clean the whole cache, rather than using the individual
     43 * cache line maintenance instructions.  (I think this should
     44 * be 32768).
     45 */
     46#define CACHE_DLIMIT	8192
     47
     48
     49	.text
     50/*
     51 * cpu_arm922_proc_init()
     52 */
     53ENTRY(cpu_arm922_proc_init)
     54	ret	lr
     55
     56/*
     57 * cpu_arm922_proc_fin()
     58 */
     59ENTRY(cpu_arm922_proc_fin)
     60	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
     61	bic	r0, r0, #0x1000			@ ...i............
     62	bic	r0, r0, #0x000e			@ ............wca.
     63	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
     64	ret	lr
     65
     66/*
     67 * cpu_arm922_reset(loc)
     68 *
     69 * Perform a soft reset of the system.  Put the CPU into the
     70 * same state as it would be if it had been reset, and branch
     71 * to what would be the reset vector.
     72 *
     73 * loc: location to jump to for soft reset
     74 */
     75	.align	5
     76	.pushsection	.idmap.text, "ax"
     77ENTRY(cpu_arm922_reset)
     78	mov	ip, #0
     79	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
     80	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
     81#ifdef CONFIG_MMU
     82	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
     83#endif
     84	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
     85	bic	ip, ip, #0x000f			@ ............wcam
     86	bic	ip, ip, #0x1100			@ ...i...s........
     87	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
     88	ret	r0
     89ENDPROC(cpu_arm922_reset)
     90	.popsection
     91
     92/*
     93 * cpu_arm922_do_idle()
     94 */
     95	.align	5
     96ENTRY(cpu_arm922_do_idle)
     97	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
     98	ret	lr
     99
    100
    101#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
    102
    103/*
    104 *	flush_icache_all()
    105 *
    106 *	Unconditionally clean and invalidate the entire icache.
    107 */
    108ENTRY(arm922_flush_icache_all)
    109	mov	r0, #0
    110	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
    111	ret	lr
    112ENDPROC(arm922_flush_icache_all)
    113
    114/*
    115 *	flush_user_cache_all()
    116 *
    117 *	Clean and invalidate all cache entries in a particular
    118 *	address space.
    119 */
    120ENTRY(arm922_flush_user_cache_all)
    121	/* FALLTHROUGH */
    122
    123/*
    124 *	flush_kern_cache_all()
    125 *
    126 *	Clean and invalidate the entire cache.
    127 */
    128ENTRY(arm922_flush_kern_cache_all)
    129	mov	r2, #VM_EXEC
    130	mov	ip, #0
    131__flush_whole_cache:
    132	mov	r1, #(CACHE_DSEGMENTS - 1) << 5	@ 8 segments
    1331:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
    1342:	mcr	p15, 0, r3, c7, c14, 2		@ clean+invalidate D index
    135	subs	r3, r3, #1 << 26
    136	bcs	2b				@ entries 63 to 0
    137	subs	r1, r1, #1 << 5
    138	bcs	1b				@ segments 7 to 0
    139	tst	r2, #VM_EXEC
    140	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
    141	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
    142	ret	lr
    143
    144/*
    145 *	flush_user_cache_range(start, end, flags)
    146 *
    147 *	Clean and invalidate a range of cache entries in the
    148 *	specified address range.
    149 *
    150 *	- start	- start address (inclusive)
    151 *	- end	- end address (exclusive)
    152 *	- flags	- vm_flags describing address space
    153 */
    154ENTRY(arm922_flush_user_cache_range)
    155	mov	ip, #0
    156	sub	r3, r1, r0			@ calculate total size
    157	cmp	r3, #CACHE_DLIMIT
    158	bhs	__flush_whole_cache
    159
    1601:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
    161	tst	r2, #VM_EXEC
    162	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
    163	add	r0, r0, #CACHE_DLINESIZE
    164	cmp	r0, r1
    165	blo	1b
    166	tst	r2, #VM_EXEC
    167	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
    168	ret	lr
    169
    170/*
    171 *	coherent_kern_range(start, end)
    172 *
    173 *	Ensure coherency between the Icache and the Dcache in the
    174 *	region described by start, end.  If you have non-snooping
    175 *	Harvard caches, you need to implement this function.
    176 *
    177 *	- start	- virtual start address
    178 *	- end	- virtual end address
    179 */
    180ENTRY(arm922_coherent_kern_range)
    181	/* FALLTHROUGH */
    182
    183/*
    184 *	coherent_user_range(start, end)
    185 *
    186 *	Ensure coherency between the Icache and the Dcache in the
    187 *	region described by start, end.  If you have non-snooping
    188 *	Harvard caches, you need to implement this function.
    189 *
    190 *	- start	- virtual start address
    191 *	- end	- virtual end address
    192 */
    193ENTRY(arm922_coherent_user_range)
    194	bic	r0, r0, #CACHE_DLINESIZE - 1
    1951:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
    196	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
    197	add	r0, r0, #CACHE_DLINESIZE
    198	cmp	r0, r1
    199	blo	1b
    200	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
    201	mov	r0, #0
    202	ret	lr
    203
    204/*
    205 *	flush_kern_dcache_area(void *addr, size_t size)
    206 *
    207 *	Ensure no D cache aliasing occurs, either with itself or
    208 *	the I cache
    209 *
    210 *	- addr	- kernel address
    211 *	- size	- region size
    212 */
    213ENTRY(arm922_flush_kern_dcache_area)
    214	add	r1, r0, r1
    2151:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
    216	add	r0, r0, #CACHE_DLINESIZE
    217	cmp	r0, r1
    218	blo	1b
    219	mov	r0, #0
    220	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
    221	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
    222	ret	lr
    223
    224/*
    225 *	dma_inv_range(start, end)
    226 *
    227 *	Invalidate (discard) the specified virtual address range.
    228 *	May not write back any entries.  If 'start' or 'end'
    229 *	are not cache line aligned, those lines must be written
    230 *	back.
    231 *
    232 *	- start	- virtual start address
    233 *	- end	- virtual end address
    234 *
    235 * (same as v4wb)
    236 */
    237arm922_dma_inv_range:
    238	tst	r0, #CACHE_DLINESIZE - 1
    239	bic	r0, r0, #CACHE_DLINESIZE - 1
    240	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
    241	tst	r1, #CACHE_DLINESIZE - 1
    242	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
    2431:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
    244	add	r0, r0, #CACHE_DLINESIZE
    245	cmp	r0, r1
    246	blo	1b
    247	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
    248	ret	lr
    249
    250/*
    251 *	dma_clean_range(start, end)
    252 *
    253 *	Clean the specified virtual address range.
    254 *
    255 *	- start	- virtual start address
    256 *	- end	- virtual end address
    257 *
    258 * (same as v4wb)
    259 */
    260arm922_dma_clean_range:
    261	bic	r0, r0, #CACHE_DLINESIZE - 1
    2621:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
    263	add	r0, r0, #CACHE_DLINESIZE
    264	cmp	r0, r1
    265	blo	1b
    266	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
    267	ret	lr
    268
    269/*
    270 *	dma_flush_range(start, end)
    271 *
    272 *	Clean and invalidate the specified virtual address range.
    273 *
    274 *	- start	- virtual start address
    275 *	- end	- virtual end address
    276 */
    277ENTRY(arm922_dma_flush_range)
    278	bic	r0, r0, #CACHE_DLINESIZE - 1
    2791:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
    280	add	r0, r0, #CACHE_DLINESIZE
    281	cmp	r0, r1
    282	blo	1b
    283	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
    284	ret	lr
    285
    286/*
    287 *	dma_map_area(start, size, dir)
    288 *	- start	- kernel virtual start address
    289 *	- size	- size of region
    290 *	- dir	- DMA direction
    291 */
    292ENTRY(arm922_dma_map_area)
    293	add	r1, r1, r0
    294	cmp	r2, #DMA_TO_DEVICE
    295	beq	arm922_dma_clean_range
    296	bcs	arm922_dma_inv_range
    297	b	arm922_dma_flush_range
    298ENDPROC(arm922_dma_map_area)
    299
    300/*
    301 *	dma_unmap_area(start, size, dir)
    302 *	- start	- kernel virtual start address
    303 *	- size	- size of region
    304 *	- dir	- DMA direction
    305 */
    306ENTRY(arm922_dma_unmap_area)
    307	ret	lr
    308ENDPROC(arm922_dma_unmap_area)
    309
    310	.globl	arm922_flush_kern_cache_louis
    311	.equ	arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all
    312
    313	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
    314	define_cache_functions arm922
    315#endif
    316
    317
    318ENTRY(cpu_arm922_dcache_clean_area)
    319#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
    3201:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
    321	add	r0, r0, #CACHE_DLINESIZE
    322	subs	r1, r1, #CACHE_DLINESIZE
    323	bhi	1b
    324#endif
    325	ret	lr
    326
    327/* =============================== PageTable ============================== */
    328
    329/*
    330 * cpu_arm922_switch_mm(pgd)
    331 *
    332 * Set the translation base pointer to be as described by pgd.
    333 *
    334 * pgd: new page tables
    335 */
    336	.align	5
    337ENTRY(cpu_arm922_switch_mm)
    338#ifdef CONFIG_MMU
    339	mov	ip, #0
    340#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
    341	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
    342#else
    343@ && 'Clean & Invalidate whole DCache'
    344@ && Re-written to use Index Ops.
    345@ && Uses registers r1, r3 and ip
    346
    347	mov	r1, #(CACHE_DSEGMENTS - 1) << 5	@ 4 segments
    3481:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
    3492:	mcr	p15, 0, r3, c7, c14, 2		@ clean & invalidate D index
    350	subs	r3, r3, #1 << 26
    351	bcs	2b				@ entries 63 to 0
    352	subs	r1, r1, #1 << 5
    353	bcs	1b				@ segments 7 to 0
    354#endif
    355	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
    356	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
    357	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
    358	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
    359#endif
    360	ret	lr
    361
    362/*
    363 * cpu_arm922_set_pte_ext(ptep, pte, ext)
    364 *
    365 * Set a PTE and flush it out
    366 */
    367	.align	5
    368ENTRY(cpu_arm922_set_pte_ext)
    369#ifdef CONFIG_MMU
    370	armv3_set_pte_ext
    371	mov	r0, r0
    372	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
    373	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
    374#endif /* CONFIG_MMU */
    375	ret	lr
    376
    377	.type	__arm922_setup, #function
    378__arm922_setup:
    379	mov	r0, #0
    380	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
    381	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
    382#ifdef CONFIG_MMU
    383	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
    384#endif
    385	adr	r5, arm922_crval
    386	ldmia	r5, {r5, r6}
    387	mrc	p15, 0, r0, c1, c0		@ get control register v4
    388	bic	r0, r0, r5
    389	orr	r0, r0, r6
    390	ret	lr
    391	.size	__arm922_setup, . - __arm922_setup
    392
    393	/*
    394	 *  R
    395	 * .RVI ZFRS BLDP WCAM
    396	 * ..11 0001 ..11 0101
    397	 * 
    398	 */
    399	.type	arm922_crval, #object
    400arm922_crval:
    401	crval	clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130
    402
    403	__INITDATA
    404	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
    405	define_processor_functions arm922, dabort=v4t_early_abort, pabort=legacy_pabort
    406
    407	.section ".rodata"
    408
    409	string	cpu_arch_name, "armv4t"
    410	string	cpu_elf_name, "v4"
    411	string	cpu_arm922_name, "ARM922T"
    412
    413	.align
    414
    415	.section ".proc.info.init", "a"
    416
    417	.type	__arm922_proc_info,#object
    418__arm922_proc_info:
    419	.long	0x41009220
    420	.long	0xff00fff0
    421	.long   PMD_TYPE_SECT | \
    422		PMD_SECT_BUFFERABLE | \
    423		PMD_SECT_CACHEABLE | \
    424		PMD_BIT4 | \
    425		PMD_SECT_AP_WRITE | \
    426		PMD_SECT_AP_READ
    427	.long   PMD_TYPE_SECT | \
    428		PMD_BIT4 | \
    429		PMD_SECT_AP_WRITE | \
    430		PMD_SECT_AP_READ
    431	initfn	__arm922_setup, __arm922_proc_info
    432	.long	cpu_arch_name
    433	.long	cpu_elf_name
    434	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
    435	.long	cpu_arm922_name
    436	.long	arm922_processor_functions
    437	.long	v4wbi_tlb_fns
    438	.long	v4wb_user_fns
    439#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
    440	.long	arm922_cache_fns
    441#else
    442	.long	v4wt_cache_fns
    443#endif
    444	.size	__arm922_proc_info, . - __arm922_proc_info