cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

syscall-abi-asm.S (7284B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2// Copyright (C) 2021 ARM Limited.
      3//
      4// Assembly portion of the syscall ABI test
      5
      6//
      7// Load values from memory into registers, invoke a syscall and save the
      8// register values back to memory for later checking.  The syscall to be
      9// invoked is configured in x8 of the input GPR data.
     10//
     11// x0:	SVE VL, 0 for FP only
     12// x1:	SME VL
     13//
     14//	GPRs:	gpr_in, gpr_out
     15//	FPRs:	fpr_in, fpr_out
     16//	Zn:	z_in, z_out
     17//	Pn:	p_in, p_out
     18//	FFR:	ffr_in, ffr_out
     19//	ZA:	za_in, za_out
     20//	SVCR:	svcr_in, svcr_out
     21
     22#include "syscall-abi.h"
     23
     24.arch_extension sve
     25
     26/*
     27 * LDR (vector to ZA array):
     28 *	LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
     29 */
     30.macro _ldr_za nw, nxbase, offset=0
     31	.inst	0xe1000000			\
     32		| (((\nw) & 3) << 13)		\
     33		| ((\nxbase) << 5)		\
     34		| ((\offset) & 7)
     35.endm
     36
     37/*
     38 * STR (vector from ZA array):
     39 *	STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
     40 */
     41.macro _str_za nw, nxbase, offset=0
     42	.inst	0xe1200000			\
     43		| (((\nw) & 3) << 13)		\
     44		| ((\nxbase) << 5)		\
     45		| ((\offset) & 7)
     46.endm
     47
     48.globl do_syscall
     49do_syscall:
     50	// Store callee saved registers x19-x29 (80 bytes) plus x0 and x1
     51	stp	x29, x30, [sp, #-112]!
     52	mov	x29, sp
     53	stp	x0, x1, [sp, #16]
     54	stp	x19, x20, [sp, #32]
     55	stp	x21, x22, [sp, #48]
     56	stp	x23, x24, [sp, #64]
     57	stp	x25, x26, [sp, #80]
     58	stp	x27, x28, [sp, #96]
     59
     60	// Set SVCR if we're doing SME
     61	cbz	x1, 1f
     62	adrp	x2, svcr_in
     63	ldr	x2, [x2, :lo12:svcr_in]
     64	msr	S3_3_C4_C2_2, x2
     651:
     66
     67	// Load ZA if it's enabled - uses x12 as scratch due to SME LDR
     68	tbz	x2, #SVCR_ZA_SHIFT, 1f
     69	mov	w12, #0
     70	ldr	x2, =za_in
     712:	_ldr_za 12, 2
     72	add	x2, x2, x1
     73	add	x12, x12, #1
     74	cmp	x1, x12
     75	bne	2b
     761:
     77
     78	// Load GPRs x8-x28, and save our SP/FP for later comparison
     79	ldr	x2, =gpr_in
     80	add	x2, x2, #64
     81	ldp	x8, x9, [x2], #16
     82	ldp	x10, x11, [x2], #16
     83	ldp	x12, x13, [x2], #16
     84	ldp	x14, x15, [x2], #16
     85	ldp	x16, x17, [x2], #16
     86	ldp	x18, x19, [x2], #16
     87	ldp	x20, x21, [x2], #16
     88	ldp	x22, x23, [x2], #16
     89	ldp	x24, x25, [x2], #16
     90	ldp	x26, x27, [x2], #16
     91	ldr	x28, [x2], #8
     92	str	x29, [x2], #8		// FP
     93	str	x30, [x2], #8		// LR
     94
     95	// Load FPRs if we're not doing SVE
     96	cbnz	x0, 1f
     97	ldr	x2, =fpr_in
     98	ldp	q0, q1, [x2]
     99	ldp	q2, q3, [x2, #16 * 2]
    100	ldp	q4, q5, [x2, #16 * 4]
    101	ldp	q6, q7, [x2, #16 * 6]
    102	ldp	q8, q9, [x2, #16 * 8]
    103	ldp	q10, q11, [x2, #16 * 10]
    104	ldp	q12, q13, [x2, #16 * 12]
    105	ldp	q14, q15, [x2, #16 * 14]
    106	ldp	q16, q17, [x2, #16 * 16]
    107	ldp	q18, q19, [x2, #16 * 18]
    108	ldp	q20, q21, [x2, #16 * 20]
    109	ldp	q22, q23, [x2, #16 * 22]
    110	ldp	q24, q25, [x2, #16 * 24]
    111	ldp	q26, q27, [x2, #16 * 26]
    112	ldp	q28, q29, [x2, #16 * 28]
    113	ldp	q30, q31, [x2, #16 * 30]
    1141:
    115
    116	// Load the SVE registers if we're doing SVE/SME
    117	cbz	x0, 1f
    118
    119	ldr	x2, =z_in
    120	ldr	z0, [x2, #0, MUL VL]
    121	ldr	z1, [x2, #1, MUL VL]
    122	ldr	z2, [x2, #2, MUL VL]
    123	ldr	z3, [x2, #3, MUL VL]
    124	ldr	z4, [x2, #4, MUL VL]
    125	ldr	z5, [x2, #5, MUL VL]
    126	ldr	z6, [x2, #6, MUL VL]
    127	ldr	z7, [x2, #7, MUL VL]
    128	ldr	z8, [x2, #8, MUL VL]
    129	ldr	z9, [x2, #9, MUL VL]
    130	ldr	z10, [x2, #10, MUL VL]
    131	ldr	z11, [x2, #11, MUL VL]
    132	ldr	z12, [x2, #12, MUL VL]
    133	ldr	z13, [x2, #13, MUL VL]
    134	ldr	z14, [x2, #14, MUL VL]
    135	ldr	z15, [x2, #15, MUL VL]
    136	ldr	z16, [x2, #16, MUL VL]
    137	ldr	z17, [x2, #17, MUL VL]
    138	ldr	z18, [x2, #18, MUL VL]
    139	ldr	z19, [x2, #19, MUL VL]
    140	ldr	z20, [x2, #20, MUL VL]
    141	ldr	z21, [x2, #21, MUL VL]
    142	ldr	z22, [x2, #22, MUL VL]
    143	ldr	z23, [x2, #23, MUL VL]
    144	ldr	z24, [x2, #24, MUL VL]
    145	ldr	z25, [x2, #25, MUL VL]
    146	ldr	z26, [x2, #26, MUL VL]
    147	ldr	z27, [x2, #27, MUL VL]
    148	ldr	z28, [x2, #28, MUL VL]
    149	ldr	z29, [x2, #29, MUL VL]
    150	ldr	z30, [x2, #30, MUL VL]
    151	ldr	z31, [x2, #31, MUL VL]
    152
    153	// Only set a non-zero FFR, test patterns must be zero since the
    154	// syscall should clear it - this lets us handle FA64.
    155	ldr	x2, =ffr_in
    156	ldr	p0, [x2, #0]
    157	ldr	x2, [x2, #0]
    158	cbz	x2, 2f
    159	wrffr	p0.b
    1602:
    161
    162	ldr	x2, =p_in
    163	ldr	p0, [x2, #0, MUL VL]
    164	ldr	p1, [x2, #1, MUL VL]
    165	ldr	p2, [x2, #2, MUL VL]
    166	ldr	p3, [x2, #3, MUL VL]
    167	ldr	p4, [x2, #4, MUL VL]
    168	ldr	p5, [x2, #5, MUL VL]
    169	ldr	p6, [x2, #6, MUL VL]
    170	ldr	p7, [x2, #7, MUL VL]
    171	ldr	p8, [x2, #8, MUL VL]
    172	ldr	p9, [x2, #9, MUL VL]
    173	ldr	p10, [x2, #10, MUL VL]
    174	ldr	p11, [x2, #11, MUL VL]
    175	ldr	p12, [x2, #12, MUL VL]
    176	ldr	p13, [x2, #13, MUL VL]
    177	ldr	p14, [x2, #14, MUL VL]
    178	ldr	p15, [x2, #15, MUL VL]
    1791:
    180
    181	// Do the syscall
    182	svc	#0
    183
    184	// Save GPRs x8-x30
    185	ldr	x2, =gpr_out
    186	add	x2, x2, #64
    187	stp	x8, x9, [x2], #16
    188	stp	x10, x11, [x2], #16
    189	stp	x12, x13, [x2], #16
    190	stp	x14, x15, [x2], #16
    191	stp	x16, x17, [x2], #16
    192	stp	x18, x19, [x2], #16
    193	stp	x20, x21, [x2], #16
    194	stp	x22, x23, [x2], #16
    195	stp	x24, x25, [x2], #16
    196	stp	x26, x27, [x2], #16
    197	stp	x28, x29, [x2], #16
    198	str	x30, [x2]
    199
    200	// Restore x0 and x1 for feature checks
    201	ldp	x0, x1, [sp, #16]
    202
    203	// Save FPSIMD state
    204	ldr	x2, =fpr_out
    205	stp	q0, q1, [x2]
    206	stp	q2, q3, [x2, #16 * 2]
    207	stp	q4, q5, [x2, #16 * 4]
    208	stp	q6, q7, [x2, #16 * 6]
    209	stp	q8, q9, [x2, #16 * 8]
    210	stp	q10, q11, [x2, #16 * 10]
    211	stp	q12, q13, [x2, #16 * 12]
    212	stp	q14, q15, [x2, #16 * 14]
    213	stp	q16, q17, [x2, #16 * 16]
    214	stp	q18, q19, [x2, #16 * 18]
    215	stp	q20, q21, [x2, #16 * 20]
    216	stp	q22, q23, [x2, #16 * 22]
    217	stp	q24, q25, [x2, #16 * 24]
    218	stp	q26, q27, [x2, #16 * 26]
    219	stp	q28, q29, [x2, #16 * 28]
    220	stp	q30, q31, [x2, #16 * 30]
    221
    222	// Save SVCR if we're doing SME
    223	cbz	x1, 1f
    224	mrs	x2, S3_3_C4_C2_2
    225	adrp	x3, svcr_out
    226	str	x2, [x3, :lo12:svcr_out]
    2271:
    228
    229	// Save ZA if it's enabled - uses x12 as scratch due to SME STR
    230	tbz	x2, #SVCR_ZA_SHIFT, 1f
    231	mov	w12, #0
    232	ldr	x2, =za_out
    2332:	_str_za 12, 2
    234	add	x2, x2, x1
    235	add	x12, x12, #1
    236	cmp	x1, x12
    237	bne	2b
    2381:
    239
    240	// Save the SVE state if we have some
    241	cbz	x0, 1f
    242
    243	ldr	x2, =z_out
    244	str	z0, [x2, #0, MUL VL]
    245	str	z1, [x2, #1, MUL VL]
    246	str	z2, [x2, #2, MUL VL]
    247	str	z3, [x2, #3, MUL VL]
    248	str	z4, [x2, #4, MUL VL]
    249	str	z5, [x2, #5, MUL VL]
    250	str	z6, [x2, #6, MUL VL]
    251	str	z7, [x2, #7, MUL VL]
    252	str	z8, [x2, #8, MUL VL]
    253	str	z9, [x2, #9, MUL VL]
    254	str	z10, [x2, #10, MUL VL]
    255	str	z11, [x2, #11, MUL VL]
    256	str	z12, [x2, #12, MUL VL]
    257	str	z13, [x2, #13, MUL VL]
    258	str	z14, [x2, #14, MUL VL]
    259	str	z15, [x2, #15, MUL VL]
    260	str	z16, [x2, #16, MUL VL]
    261	str	z17, [x2, #17, MUL VL]
    262	str	z18, [x2, #18, MUL VL]
    263	str	z19, [x2, #19, MUL VL]
    264	str	z20, [x2, #20, MUL VL]
    265	str	z21, [x2, #21, MUL VL]
    266	str	z22, [x2, #22, MUL VL]
    267	str	z23, [x2, #23, MUL VL]
    268	str	z24, [x2, #24, MUL VL]
    269	str	z25, [x2, #25, MUL VL]
    270	str	z26, [x2, #26, MUL VL]
    271	str	z27, [x2, #27, MUL VL]
    272	str	z28, [x2, #28, MUL VL]
    273	str	z29, [x2, #29, MUL VL]
    274	str	z30, [x2, #30, MUL VL]
    275	str	z31, [x2, #31, MUL VL]
    276
    277	ldr	x2, =p_out
    278	str	p0, [x2, #0, MUL VL]
    279	str	p1, [x2, #1, MUL VL]
    280	str	p2, [x2, #2, MUL VL]
    281	str	p3, [x2, #3, MUL VL]
    282	str	p4, [x2, #4, MUL VL]
    283	str	p5, [x2, #5, MUL VL]
    284	str	p6, [x2, #6, MUL VL]
    285	str	p7, [x2, #7, MUL VL]
    286	str	p8, [x2, #8, MUL VL]
    287	str	p9, [x2, #9, MUL VL]
    288	str	p10, [x2, #10, MUL VL]
    289	str	p11, [x2, #11, MUL VL]
    290	str	p12, [x2, #12, MUL VL]
    291	str	p13, [x2, #13, MUL VL]
    292	str	p14, [x2, #14, MUL VL]
    293	str	p15, [x2, #15, MUL VL]
    294
    295	// Only save FFR if we wrote a value for SME
    296	ldr	x2, =ffr_in
    297	ldr	x2, [x2, #0]
    298	cbz	x2, 1f
    299	ldr	x2, =ffr_out
    300	rdffr	p0.b
    301	str	p0, [x2, #0]
    3021:
    303
    304	// Restore callee saved registers x19-x30
    305	ldp	x19, x20, [sp, #32]
    306	ldp	x21, x22, [sp, #48]
    307	ldp	x23, x24, [sp, #64]
    308	ldp	x25, x26, [sp, #80]
    309	ldp	x27, x28, [sp, #96]
    310	ldp	x29, x30, [sp], #112
    311
    312	// Clear SVCR if we were doing SME so future tests don't have ZA
    313	cbz	x1, 1f
    314	msr	S3_3_C4_C2_2, xzr
    3151:
    316
    317	ret