cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

memcpy.S (8952B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/* memcpy.S: Sparc optimized memcpy and memmove code
      3 * Hand optimized from GNU libc's memcpy and memmove
      4 * Copyright (C) 1991,1996 Free Software Foundation
      5 * Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi)
      6 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
      7 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
      8 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
      9 */
     10
     11#include <asm/export.h>
     12#define FUNC(x) 		\
     13	.globl	x;		\
     14	.type	x,@function;	\
     15	.align	4;		\
     16x:
     17
     18/* Both these macros have to start with exactly the same insn */
     19#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
     20	ldd	[%src + (offset) + 0x00], %t0; \
     21	ldd	[%src + (offset) + 0x08], %t2; \
     22	ldd	[%src + (offset) + 0x10], %t4; \
     23	ldd	[%src + (offset) + 0x18], %t6; \
     24	st	%t0, [%dst + (offset) + 0x00]; \
     25	st	%t1, [%dst + (offset) + 0x04]; \
     26	st	%t2, [%dst + (offset) + 0x08]; \
     27	st	%t3, [%dst + (offset) + 0x0c]; \
     28	st	%t4, [%dst + (offset) + 0x10]; \
     29	st	%t5, [%dst + (offset) + 0x14]; \
     30	st	%t6, [%dst + (offset) + 0x18]; \
     31	st	%t7, [%dst + (offset) + 0x1c];
     32
     33#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
     34	ldd	[%src + (offset) + 0x00], %t0; \
     35	ldd	[%src + (offset) + 0x08], %t2; \
     36	ldd	[%src + (offset) + 0x10], %t4; \
     37	ldd	[%src + (offset) + 0x18], %t6; \
     38	std	%t0, [%dst + (offset) + 0x00]; \
     39	std	%t2, [%dst + (offset) + 0x08]; \
     40	std	%t4, [%dst + (offset) + 0x10]; \
     41	std	%t6, [%dst + (offset) + 0x18];
     42
     43#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
     44	ldd	[%src - (offset) - 0x10], %t0; \
     45	ldd	[%src - (offset) - 0x08], %t2; \
     46	st	%t0, [%dst - (offset) - 0x10]; \
     47	st	%t1, [%dst - (offset) - 0x0c]; \
     48	st	%t2, [%dst - (offset) - 0x08]; \
     49	st	%t3, [%dst - (offset) - 0x04];
     50
     51#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
     52	ldd	[%src - (offset) - 0x10], %t0; \
     53	ldd	[%src - (offset) - 0x08], %t2; \
     54	std	%t0, [%dst - (offset) - 0x10]; \
     55	std	%t2, [%dst - (offset) - 0x08];
     56
     57#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
     58	ldub	[%src - (offset) - 0x02], %t0; \
     59	ldub	[%src - (offset) - 0x01], %t1; \
     60	stb	%t0, [%dst - (offset) - 0x02]; \
     61	stb	%t1, [%dst - (offset) - 0x01];
     62
     63	.text
     64	.align	4
     65
     66FUNC(memmove)
     67EXPORT_SYMBOL(memmove)
     68	cmp		%o0, %o1
     69	mov		%o0, %g7
     70	bleu		9f
     71	 sub		%o0, %o1, %o4
     72
     73	add		%o1, %o2, %o3
     74	cmp		%o3, %o0
     75	bleu		0f
     76	 andcc		%o4, 3, %o5
     77
     78	add		%o1, %o2, %o1
     79	add		%o0, %o2, %o0
     80	sub		%o1, 1, %o1
     81	sub		%o0, 1, %o0
     82	
     831:	/* reverse_bytes */
     84
     85	ldub		[%o1], %o4
     86	subcc		%o2, 1, %o2
     87	stb		%o4, [%o0]
     88	sub		%o1, 1, %o1
     89	bne		1b
     90	 sub		%o0, 1, %o0
     91
     92	retl
     93	 mov		%g7, %o0
     94
     95/* NOTE: This code is executed just for the cases,
     96         where %src (=%o1) & 3 is != 0.
     97	 We need to align it to 4. So, for (%src & 3)
     98	 1 we need to do ldub,lduh
     99	 2 lduh
    100	 3 just ldub
    101         so even if it looks weird, the branches
    102         are correct here. -jj
    103 */
    10478:	/* dword_align */
    105
    106	andcc		%o1, 1, %g0
    107	be		4f
    108	 andcc		%o1, 2, %g0
    109
    110	ldub		[%o1], %g2
    111	add		%o1, 1, %o1
    112	stb		%g2, [%o0]
    113	sub		%o2, 1, %o2
    114	bne		3f
    115	 add		%o0, 1, %o0
    1164:
    117	lduh		[%o1], %g2
    118	add		%o1, 2, %o1
    119	sth		%g2, [%o0]
    120	sub		%o2, 2, %o2
    121	b		3f
    122	 add		%o0, 2, %o0
    123
    124FUNC(memcpy)	/* %o0=dst %o1=src %o2=len */
    125EXPORT_SYMBOL(memcpy)
    126
    127	sub		%o0, %o1, %o4
    128	mov		%o0, %g7
    1299:
    130	andcc		%o4, 3, %o5
    1310:
    132	bne		86f
    133	 cmp		%o2, 15
    134
    135	bleu		90f
    136	 andcc		%o1, 3, %g0
    137
    138	bne		78b
    1393:
    140	 andcc		%o1, 4, %g0
    141
    142	be		2f
    143	 mov		%o2, %g1
    144
    145	ld		[%o1], %o4
    146	sub		%g1, 4, %g1
    147	st		%o4, [%o0]
    148	add		%o1, 4, %o1
    149	add		%o0, 4, %o0
    1502:
    151	andcc		%g1, 0xffffff80, %g0
    152	be		3f
    153	 andcc		%o0, 4, %g0
    154
    155	be		82f + 4
    1565:
    157	MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
    158	MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
    159	MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
    160	MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
    161	sub		%g1, 128, %g1
    162	add		%o1, 128, %o1
    163	cmp		%g1, 128
    164	bge		5b
    165	 add		%o0, 128, %o0
    1663:
    167	andcc		%g1, 0x70, %g4
    168	be		80f
    169	 andcc		%g1, 8, %g0
    170
    171	sethi		%hi(80f), %o5
    172	srl		%g4, 1, %o4
    173	add		%g4, %o4, %o4
    174	add		%o1, %g4, %o1
    175	sub		%o5, %o4, %o5
    176	jmpl		%o5 + %lo(80f), %g0
    177	 add		%o0, %g4, %o0
    178
    17979:	/* memcpy_table */
    180
    181	MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
    182	MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
    183	MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
    184	MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
    185	MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
    186	MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
    187	MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
    188
    18980:	/* memcpy_table_end */
    190	be		81f
    191	 andcc		%g1, 4, %g0
    192
    193	ldd		[%o1], %g2
    194	add		%o0, 8, %o0
    195	st		%g2, [%o0 - 0x08]
    196	add		%o1, 8, %o1
    197	st		%g3, [%o0 - 0x04]
    198
    19981:	/* memcpy_last7 */
    200
    201	be		1f
    202	 andcc		%g1, 2, %g0
    203
    204	ld		[%o1], %g2
    205	add		%o1, 4, %o1
    206	st		%g2, [%o0]
    207	add		%o0, 4, %o0
    2081:
    209	be		1f
    210	 andcc		%g1, 1, %g0
    211
    212	lduh		[%o1], %g2
    213	add		%o1, 2, %o1
    214	sth		%g2, [%o0]
    215	add		%o0, 2, %o0
    2161:
    217	be		1f
    218	 nop
    219
    220	ldub		[%o1], %g2
    221	stb		%g2, [%o0]
    2221:
    223	retl
    224	 mov		%g7, %o0
    225
    22682:	/* ldd_std */
    227	MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
    228	MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
    229	MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
    230	MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
    231	subcc		%g1, 128, %g1
    232	add		%o1, 128, %o1
    233	cmp		%g1, 128
    234	bge		82b
    235	 add		%o0, 128, %o0
    236
    237	andcc		%g1, 0x70, %g4
    238	be		84f
    239	 andcc		%g1, 8, %g0
    240
    241	sethi		%hi(84f), %o5
    242	add		%o1, %g4, %o1
    243	sub		%o5, %g4, %o5
    244	jmpl		%o5 + %lo(84f), %g0
    245	 add		%o0, %g4, %o0
    246
    24783:	/* amemcpy_table */
    248
    249	MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
    250	MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
    251	MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
    252	MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
    253	MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
    254	MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
    255	MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
    256
    25784:	/* amemcpy_table_end */
    258	be		85f
    259	 andcc		%g1, 4, %g0
    260
    261	ldd		[%o1], %g2
    262	add		%o0, 8, %o0
    263	std		%g2, [%o0 - 0x08]
    264	add		%o1, 8, %o1
    26585:	/* amemcpy_last7 */
    266	be		1f
    267	 andcc		%g1, 2, %g0
    268
    269	ld		[%o1], %g2
    270	add		%o1, 4, %o1
    271	st		%g2, [%o0]
    272	add		%o0, 4, %o0
    2731:
    274	be		1f
    275	 andcc		%g1, 1, %g0
    276
    277	lduh		[%o1], %g2
    278	add		%o1, 2, %o1
    279	sth		%g2, [%o0]
    280	add		%o0, 2, %o0
    2811:
    282	be		1f
    283	 nop
    284
    285	ldub		[%o1], %g2
    286	stb		%g2, [%o0]
    2871:
    288	retl
    289	 mov		%g7, %o0
    290
    29186:	/* non_aligned */
    292	cmp		%o2, 6
    293	bleu		88f
    294	 nop
    295
    296	save		%sp, -96, %sp
    297	andcc		%i0, 3, %g0
    298	be		61f
    299	 andcc		%i0, 1, %g0
    300	be		60f
    301	 andcc		%i0, 2, %g0
    302
    303	ldub		[%i1], %g5
    304	add		%i1, 1, %i1
    305	stb		%g5, [%i0]
    306	sub		%i2, 1, %i2
    307	bne		61f
    308	 add		%i0, 1, %i0
    30960:
    310	ldub		[%i1], %g3
    311	add		%i1, 2, %i1
    312	stb		%g3, [%i0]
    313	sub		%i2, 2, %i2
    314	ldub		[%i1 - 1], %g3
    315	add		%i0, 2, %i0
    316	stb		%g3, [%i0 - 1]
    31761:
    318	and		%i1, 3, %g2
    319	and		%i2, 0xc, %g3
    320	and		%i1, -4, %i1
    321	cmp		%g3, 4
    322	sll		%g2, 3, %g4
    323	mov		32, %g2
    324	be		4f
    325	 sub		%g2, %g4, %l0
    326	
    327	blu		3f
    328	 cmp		%g3, 0x8
    329
    330	be		2f
    331	 srl		%i2, 2, %g3
    332
    333	ld		[%i1], %i3
    334	add		%i0, -8, %i0
    335	ld		[%i1 + 4], %i4
    336	b		8f
    337	 add		%g3, 1, %g3
    3382:
    339	ld		[%i1], %i4
    340	add		%i0, -12, %i0
    341	ld		[%i1 + 4], %i5
    342	add		%g3, 2, %g3
    343	b		9f
    344	 add		%i1, -4, %i1
    3453:
    346	ld		[%i1], %g1
    347	add		%i0, -4, %i0
    348	ld		[%i1 + 4], %i3
    349	srl		%i2, 2, %g3
    350	b		7f
    351	 add		%i1, 4, %i1
    3524:
    353	ld		[%i1], %i5
    354	cmp		%i2, 7
    355	ld		[%i1 + 4], %g1
    356	srl		%i2, 2, %g3
    357	bleu		10f
    358	 add		%i1, 8, %i1
    359
    360	ld		[%i1], %i3
    361	add		%g3, -1, %g3
    3625:
    363	sll		%i5, %g4, %g2
    364	srl		%g1, %l0, %g5
    365	or		%g2, %g5, %g2
    366	st		%g2, [%i0]
    3677:
    368	ld		[%i1 + 4], %i4
    369	sll		%g1, %g4, %g2
    370	srl		%i3, %l0, %g5
    371	or		%g2, %g5, %g2
    372	st		%g2, [%i0 + 4]
    3738:
    374	ld		[%i1 + 8], %i5
    375	sll		%i3, %g4, %g2
    376	srl		%i4, %l0, %g5
    377	or		%g2, %g5, %g2
    378	st		%g2, [%i0 + 8]
    3799:
    380	ld		[%i1 + 12], %g1
    381	sll		%i4, %g4, %g2
    382	srl		%i5, %l0, %g5
    383	addcc		%g3, -4, %g3
    384	or		%g2, %g5, %g2
    385	add		%i1, 16, %i1
    386	st		%g2, [%i0 + 12]
    387	add		%i0, 16, %i0
    388	bne,a		5b
    389	 ld		[%i1], %i3
    39010:
    391	sll		%i5, %g4, %g2
    392	srl		%g1, %l0, %g5
    393	srl		%l0, 3, %g3
    394	or		%g2, %g5, %g2
    395	sub		%i1, %g3, %i1
    396	andcc		%i2, 2, %g0
    397	st		%g2, [%i0]
    398	be		1f
    399	 andcc		%i2, 1, %g0
    400
    401	ldub		[%i1], %g2
    402	add		%i1, 2, %i1
    403	stb		%g2, [%i0 + 4]
    404	add		%i0, 2, %i0
    405	ldub		[%i1 - 1], %g2
    406	stb		%g2, [%i0 + 3]
    4071:
    408	be		1f
    409	 nop
    410	ldub		[%i1], %g2
    411	stb		%g2, [%i0 + 4]
    4121:
    413	ret
    414	 restore	%g7, %g0, %o0
    415
    41688:	/* short_end */
    417
    418	and		%o2, 0xe, %o3
    41920:
    420	sethi		%hi(89f), %o5
    421	sll		%o3, 3, %o4
    422	add		%o0, %o3, %o0
    423	sub		%o5, %o4, %o5
    424	add		%o1, %o3, %o1
    425	jmpl		%o5 + %lo(89f), %g0
    426	 andcc		%o2, 1, %g0
    427
    428	MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
    429	MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
    430	MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
    431	MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
    432	MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
    433	MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
    434	MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
    435
    43689:	/* short_table_end */
    437
    438	be		1f
    439	 nop
    440
    441	ldub		[%o1], %g2
    442	stb		%g2, [%o0]
    4431:
    444	retl
    445	 mov		%g7, %o0
    446
    44790:	/* short_aligned_end */
    448	bne		88b
    449	 andcc		%o2, 8, %g0
    450
    451	be		1f
    452	 andcc		%o2, 4, %g0
    453
    454	ld		[%o1 + 0x00], %g2
    455	ld		[%o1 + 0x04], %g3
    456	add		%o1, 8, %o1
    457	st		%g2, [%o0 + 0x00]
    458	st		%g3, [%o0 + 0x04]
    459	add		%o0, 8, %o0
    4601:
    461	b		81b
    462	 mov		%o2, %g1