cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

memcpy.S (1945B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
      3
      4#include <linux/linkage.h>
      5#include "sysdep.h"
      6
      7ENTRY(__memcpy)
      8ENTRY(memcpy)
      9	/* Test if len less than 4 bytes.  */
     10	mov	r12, r0
     11	cmplti	r2, 4
     12	bt	.L_copy_by_byte
     13
     14	andi	r13, r0, 3
     15	movi	r19, 4
     16	/* Test if dest is not 4 bytes aligned.  */
     17	bnez	r13, .L_dest_not_aligned
     18
     19/* Hardware can handle unaligned access directly.  */
     20.L_dest_aligned:
     21	/* If dest is aligned, then copy.  */
     22	zext	r18, r2, 31, 4
     23
     24	/* Test if len less than 16 bytes.  */
     25	bez	r18, .L_len_less_16bytes
     26	movi	r19, 0
     27
     28	LABLE_ALIGN
     29.L_len_larger_16bytes:
     30#if defined(__CK860__)
     31	ldw	r3, (r1, 0)
     32	stw	r3, (r0, 0)
     33	ldw	r3, (r1, 4)
     34	stw	r3, (r0, 4)
     35	ldw	r3, (r1, 8)
     36	stw	r3, (r0, 8)
     37	ldw	r3, (r1, 12)
     38	addi	r1, 16
     39	stw	r3, (r0, 12)
     40	addi	r0, 16
     41#else
     42	ldw	r20, (r1, 0)
     43	ldw	r21, (r1, 4)
     44	ldw	r22, (r1, 8)
     45	ldw	r23, (r1, 12)
     46	stw	r20, (r0, 0)
     47	stw	r21, (r0, 4)
     48	stw	r22, (r0, 8)
     49	stw	r23, (r0, 12)
     50	PRE_BNEZAD (r18)
     51	addi	r1, 16
     52	addi	r0, 16
     53#endif
     54	BNEZAD (r18, .L_len_larger_16bytes)
     55
     56.L_len_less_16bytes:
     57	zext	r18, r2, 3, 2
     58	bez	r18, .L_copy_by_byte
     59.L_len_less_16bytes_loop:
     60	ldw	r3, (r1, 0)
     61	PRE_BNEZAD (r18)
     62	addi	r1, 4
     63	stw	r3, (r0, 0)
     64	addi	r0, 4
     65	BNEZAD (r18, .L_len_less_16bytes_loop)
     66
     67/* Test if len less than 4 bytes.  */
     68.L_copy_by_byte:
     69	zext	r18, r2, 1, 0
     70	bez	r18, .L_return
     71.L_copy_by_byte_loop:
     72	ldb	r3, (r1, 0)
     73	PRE_BNEZAD (r18)
     74	addi	r1, 1
     75	stb	r3, (r0, 0)
     76	addi	r0, 1
     77	BNEZAD (r18, .L_copy_by_byte_loop)
     78
     79.L_return:
     80	mov	r0, r12
     81	rts
     82
     83/*
     84 * If dest is not aligned, just copying some bytes makes the
     85 * dest align.
     86 */
     87.L_dest_not_aligned:
     88	sub	r13, r19, r13
     89	sub	r2, r13
     90
     91/* Makes the dest align.  */
     92.L_dest_not_aligned_loop:
     93	ldb	r3, (r1, 0)
     94	PRE_BNEZAD (r13)
     95	addi	r1, 1
     96	stb	r3, (r0, 0)
     97	addi	r0, 1
     98	BNEZAD (r13, .L_dest_not_aligned_loop)
     99	cmplti	r2, 4
    100	bt	.L_copy_by_byte
    101
    102	/* Check whether the src is aligned.  */
    103	jbr	.L_dest_aligned
    104ENDPROC(__memcpy)