cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

proc-macros.S (9328B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * We need constants.h for:
      4 *  VMA_VM_MM
      5 *  VMA_VM_FLAGS
      6 *  VM_EXEC
      7 */
      8#include <asm/asm-offsets.h>
      9#include <asm/thread_info.h>
     10
     11#ifdef CONFIG_CPU_V7M
     12#include <asm/v7m.h>
     13#endif
     14
     15/*
     16 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
     17 */
     18	.macro	vma_vm_mm, rd, rn
     19	ldr	\rd, [\rn, #VMA_VM_MM]
     20	.endm
     21
     22/*
     23 * vma_vm_flags - get vma->vm_flags
     24 */
     25	.macro	vma_vm_flags, rd, rn
     26	ldr	\rd, [\rn, #VMA_VM_FLAGS]
     27	.endm
     28
     29/*
     30 * act_mm - get current->active_mm
     31 */
     32	.macro	act_mm, rd
     33	get_current \rd
     34	.if (TSK_ACTIVE_MM > IMM12_MASK)
     35	add	\rd, \rd, #TSK_ACTIVE_MM & ~IMM12_MASK
     36	.endif
     37	ldr	\rd, [\rd, #TSK_ACTIVE_MM & IMM12_MASK]
     38	.endm
     39
     40/*
     41 * mmid - get context id from mm pointer (mm->context.id)
     42 * note, this field is 64bit, so in big-endian the two words are swapped too.
     43 */
     44	.macro	mmid, rd, rn
     45#ifdef __ARMEB__
     46	ldr	\rd, [\rn, #MM_CONTEXT_ID + 4 ]
     47#else
     48	ldr	\rd, [\rn, #MM_CONTEXT_ID]
     49#endif
     50	.endm
     51
     52/*
     53 * mask_asid - mask the ASID from the context ID
     54 */
     55	.macro	asid, rd, rn
     56	and	\rd, \rn, #255
     57	.endm
     58
     59	.macro	crval, clear, mmuset, ucset
     60#ifdef CONFIG_MMU
     61	.word	\clear
     62	.word	\mmuset
     63#else
     64	.word	\clear
     65	.word	\ucset
     66#endif
     67	.endm
     68
     69/*
     70 * dcache_line_size - get the minimum D-cache line size from the CTR register
     71 * on ARMv7.
     72 */
     73	.macro	dcache_line_size, reg, tmp
     74#ifdef CONFIG_CPU_V7M
     75	movw	\tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
     76	movt	\tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
     77	ldr     \tmp, [\tmp]
     78#else
     79	mrc	p15, 0, \tmp, c0, c0, 1		@ read ctr
     80#endif
     81	lsr	\tmp, \tmp, #16
     82	and	\tmp, \tmp, #0xf		@ cache line size encoding
     83	mov	\reg, #4			@ bytes per word
     84	mov	\reg, \reg, lsl \tmp		@ actual cache line size
     85	.endm
     86
     87/*
     88 * icache_line_size - get the minimum I-cache line size from the CTR register
     89 * on ARMv7.
     90 */
     91	.macro	icache_line_size, reg, tmp
     92#ifdef CONFIG_CPU_V7M
     93	movw	\tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
     94	movt	\tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
     95	ldr     \tmp, [\tmp]
     96#else
     97	mrc	p15, 0, \tmp, c0, c0, 1		@ read ctr
     98#endif
     99	and	\tmp, \tmp, #0xf		@ cache line size encoding
    100	mov	\reg, #4			@ bytes per word
    101	mov	\reg, \reg, lsl \tmp		@ actual cache line size
    102	.endm
    103
    104/*
    105 * Sanity check the PTE configuration for the code below - which makes
    106 * certain assumptions about how these bits are laid out.
    107 */
    108#ifdef CONFIG_MMU
    109#if L_PTE_SHARED != PTE_EXT_SHARED
    110#error PTE shared bit mismatch
    111#endif
    112#if !defined (CONFIG_ARM_LPAE) && \
    113	(L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
    114	 L_PTE_PRESENT) > L_PTE_SHARED
    115#error Invalid Linux PTE bit settings
    116#endif
    117#endif	/* CONFIG_MMU */
    118
    119/*
    120 * The ARMv6 and ARMv7 set_pte_ext translation function.
    121 *
    122 * Permission translation:
    123 *  YUWD  APX AP1 AP0	SVC	User
    124 *  0xxx   0   0   0	no acc	no acc
    125 *  100x   1   0   1	r/o	no acc
    126 *  10x0   1   0   1	r/o	no acc
    127 *  1011   0   0   1	r/w	no acc
    128 *  110x   1   1   1	r/o	r/o
    129 *  11x0   1   1   1	r/o	r/o
    130 *  1111   0   1   1	r/w	r/w
    131 */
    132	.macro	armv6_mt_table pfx
    133\pfx\()_mt_table:
    134	.long	0x00						@ L_PTE_MT_UNCACHED
    135	.long	PTE_EXT_TEX(1)					@ L_PTE_MT_BUFFERABLE
    136	.long	PTE_CACHEABLE					@ L_PTE_MT_WRITETHROUGH
    137	.long	PTE_CACHEABLE | PTE_BUFFERABLE			@ L_PTE_MT_WRITEBACK
    138	.long	PTE_BUFFERABLE					@ L_PTE_MT_DEV_SHARED
    139	.long	0x00						@ unused
    140	.long	0x00						@ L_PTE_MT_MINICACHE (not present)
    141	.long	PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE	@ L_PTE_MT_WRITEALLOC
    142	.long	0x00						@ unused
    143	.long	PTE_EXT_TEX(1)					@ L_PTE_MT_DEV_WC
    144	.long	0x00						@ unused
    145	.long	PTE_CACHEABLE | PTE_BUFFERABLE			@ L_PTE_MT_DEV_CACHED
    146	.long	PTE_EXT_TEX(2)					@ L_PTE_MT_DEV_NONSHARED
    147	.long	0x00						@ unused
    148	.long	0x00						@ unused
    149	.long	PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX	@ L_PTE_MT_VECTORS
    150	.endm
    151
    152	.macro	armv6_set_pte_ext pfx
    153	str	r1, [r0], #2048			@ linux version
    154
    155	bic	r3, r1, #0x000003fc
    156	bic	r3, r3, #PTE_TYPE_MASK
    157	orr	r3, r3, r2
    158	orr	r3, r3, #PTE_EXT_AP0 | 2
    159
    160	adr	ip, \pfx\()_mt_table
    161	and	r2, r1, #L_PTE_MT_MASK
    162	ldr	r2, [ip, r2]
    163
    164	eor	r1, r1, #L_PTE_DIRTY
    165	tst	r1, #L_PTE_DIRTY|L_PTE_RDONLY
    166	orrne	r3, r3, #PTE_EXT_APX
    167
    168	tst	r1, #L_PTE_USER
    169	orrne	r3, r3, #PTE_EXT_AP1
    170	tstne	r3, #PTE_EXT_APX
    171
    172	@ user read-only -> kernel read-only
    173	bicne	r3, r3, #PTE_EXT_AP0
    174
    175	tst	r1, #L_PTE_XN
    176	orrne	r3, r3, #PTE_EXT_XN
    177
    178	eor	r3, r3, r2
    179
    180	tst	r1, #L_PTE_YOUNG
    181	tstne	r1, #L_PTE_PRESENT
    182	moveq	r3, #0
    183	tstne	r1, #L_PTE_NONE
    184	movne	r3, #0
    185
    186	str	r3, [r0]
    187	mcr	p15, 0, r0, c7, c10, 1		@ flush_pte
    188	.endm
    189
    190
    191/*
    192 * The ARMv3, ARMv4 and ARMv5 set_pte_ext translation function,
    193 * covering most CPUs except Xscale and Xscale 3.
    194 *
    195 * Permission translation:
    196 *  YUWD   AP	SVC	User
    197 *  0xxx  0x00	no acc	no acc
    198 *  100x  0x00	r/o	no acc
    199 *  10x0  0x00	r/o	no acc
    200 *  1011  0x55	r/w	no acc
    201 *  110x  0xaa	r/w	r/o
    202 *  11x0  0xaa	r/w	r/o
    203 *  1111  0xff	r/w	r/w
    204 */
    205	.macro	armv3_set_pte_ext wc_disable=1
    206	str	r1, [r0], #2048			@ linux version
    207
    208	eor	r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY
    209
    210	bic	r2, r1, #PTE_SMALL_AP_MASK	@ keep C, B bits
    211	bic	r2, r2, #PTE_TYPE_MASK
    212	orr	r2, r2, #PTE_TYPE_SMALL
    213
    214	tst	r3, #L_PTE_USER			@ user?
    215	orrne	r2, r2, #PTE_SMALL_AP_URO_SRW
    216
    217	tst	r3, #L_PTE_RDONLY | L_PTE_DIRTY	@ write and dirty?
    218	orreq	r2, r2, #PTE_SMALL_AP_UNO_SRW
    219
    220	tst	r3, #L_PTE_PRESENT | L_PTE_YOUNG	@ present and young?
    221	movne	r2, #0
    222
    223	.if	\wc_disable
    224#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
    225	tst	r2, #PTE_CACHEABLE
    226	bicne	r2, r2, #PTE_BUFFERABLE
    227#endif
    228	.endif
    229	str	r2, [r0]		@ hardware version
    230	.endm
    231
    232
    233/*
    234 * Xscale set_pte_ext translation, split into two halves to cope
    235 * with work-arounds.  r3 must be preserved by code between these
    236 * two macros.
    237 *
    238 * Permission translation:
    239 *  YUWD  AP	SVC	User
    240 *  0xxx  00	no acc	no acc
    241 *  100x  00	r/o	no acc
    242 *  10x0  00	r/o	no acc
    243 *  1011  01	r/w	no acc
    244 *  110x  10	r/w	r/o
    245 *  11x0  10	r/w	r/o
    246 *  1111  11	r/w	r/w
    247 */
    248	.macro	xscale_set_pte_ext_prologue
    249	str	r1, [r0]			@ linux version
    250
    251	eor	r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY
    252
    253	bic	r2, r1, #PTE_SMALL_AP_MASK	@ keep C, B bits
    254	orr	r2, r2, #PTE_TYPE_EXT		@ extended page
    255
    256	tst	r3, #L_PTE_USER			@ user?
    257	orrne	r2, r2, #PTE_EXT_AP_URO_SRW	@ yes -> user r/o, system r/w
    258
    259	tst	r3, #L_PTE_RDONLY | L_PTE_DIRTY	@ write and dirty?
    260	orreq	r2, r2, #PTE_EXT_AP_UNO_SRW	@ yes -> user n/a, system r/w
    261						@ combined with user -> user r/w
    262	.endm
    263
    264	.macro	xscale_set_pte_ext_epilogue
    265	tst	r3, #L_PTE_PRESENT | L_PTE_YOUNG	@ present and young?
    266	movne	r2, #0				@ no -> fault
    267
    268	str	r2, [r0, #2048]!		@ hardware version
    269	mov	ip, #0
    270	mcr	p15, 0, r0, c7, c10, 1		@ clean L1 D line
    271	mcr	p15, 0, ip, c7, c10, 4		@ data write barrier
    272	.endm
    273
    274.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
    275/*
    276 * If we are building for big.Little with branch predictor hardening,
    277 * we need the processor function tables to remain available after boot.
    278 */
    279#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
    280	.section ".rodata"
    281#endif
    282	.type	\name\()_processor_functions, #object
    283	.align 2
    284ENTRY(\name\()_processor_functions)
    285	.word	\dabort
    286	.word	\pabort
    287	.word	cpu_\name\()_proc_init
    288	.word	\bugs
    289	.word	cpu_\name\()_proc_fin
    290	.word	cpu_\name\()_reset
    291	.word	cpu_\name\()_do_idle
    292	.word	cpu_\name\()_dcache_clean_area
    293	.word	cpu_\name\()_switch_mm
    294
    295	.if \nommu
    296	.word	0
    297	.else
    298	.word	cpu_\name\()_set_pte_ext
    299	.endif
    300
    301	.if \suspend
    302	.word	cpu_\name\()_suspend_size
    303#ifdef CONFIG_ARM_CPU_SUSPEND
    304	.word	cpu_\name\()_do_suspend
    305	.word	cpu_\name\()_do_resume
    306#else
    307	.word	0
    308	.word	0
    309#endif
    310	.else
    311	.word	0
    312	.word	0
    313	.word	0
    314	.endif
    315
    316	.size	\name\()_processor_functions, . - \name\()_processor_functions
    317#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
    318	.previous
    319#endif
    320.endm
    321
    322.macro define_cache_functions name:req
    323	.align 2
    324	.type	\name\()_cache_fns, #object
    325ENTRY(\name\()_cache_fns)
    326	.long	\name\()_flush_icache_all
    327	.long	\name\()_flush_kern_cache_all
    328	.long   \name\()_flush_kern_cache_louis
    329	.long	\name\()_flush_user_cache_all
    330	.long	\name\()_flush_user_cache_range
    331	.long	\name\()_coherent_kern_range
    332	.long	\name\()_coherent_user_range
    333	.long	\name\()_flush_kern_dcache_area
    334	.long	\name\()_dma_map_area
    335	.long	\name\()_dma_unmap_area
    336	.long	\name\()_dma_flush_range
    337	.size	\name\()_cache_fns, . - \name\()_cache_fns
    338.endm
    339
    340.macro define_tlb_functions name:req, flags_up:req, flags_smp
    341	.type	\name\()_tlb_fns, #object
    342	.align 2
    343ENTRY(\name\()_tlb_fns)
    344	.long	\name\()_flush_user_tlb_range
    345	.long	\name\()_flush_kern_tlb_range
    346	.ifnb \flags_smp
    347		ALT_SMP(.long	\flags_smp )
    348		ALT_UP(.long	\flags_up )
    349	.else
    350		.long	\flags_up
    351	.endif
    352	.size	\name\()_tlb_fns, . - \name\()_tlb_fns
    353.endm
    354
    355.macro globl_equ x, y
    356	.globl	\x
    357	.equ	\x, \y
    358.endm
    359
    360.macro	initfn, func, base
    361	.long	\func - \base
    362.endm
    363
    364	/*
    365	 * Macro to calculate the log2 size for the protection region
    366	 * registers. This calculates rd = log2(size) - 1.  tmp must
    367	 * not be the same register as rd.
    368	 */
    369.macro	pr_sz, rd, size, tmp
    370	mov	\tmp, \size, lsr #12
    371	mov	\rd, #11
    3721:	movs	\tmp, \tmp, lsr #1
    373	addne	\rd, \rd, #1
    374	bne	1b
    375.endm
    376
    377	/*
    378	 * Macro to generate a protection region register value
    379	 * given a pre-masked address, size, and enable bit.
    380	 * Corrupts size.
    381	 */
    382.macro	pr_val, dest, addr, size, enable
    383	pr_sz	\dest, \size, \size		@ calculate log2(size) - 1
    384	orr	\dest, \addr, \dest, lsl #1	@ mask in the region size
    385	orr	\dest, \dest, \enable
    386.endm