cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

misc.S (7456B)


      1/*
      2 * arch/xtensa/mm/misc.S
      3 *
      4 * Miscellaneous assembly functions.
      5 *
      6 * This file is subject to the terms and conditions of the GNU General Public
      7 * License.  See the file "COPYING" in the main directory of this archive
      8 * for more details.
      9 *
     10 * Copyright (C) 2001 - 2007 Tensilica Inc.
     11 *
     12 * Chris Zankel	<chris@zankel.net>
     13 */
     14
     15
     16#include <linux/linkage.h>
     17#include <linux/pgtable.h>
     18#include <asm/page.h>
     19#include <asm/asmmacro.h>
     20#include <asm/cacheasm.h>
     21#include <asm/tlbflush.h>
     22
     23
     24/*
     25 * clear_page and clear_user_page are the same for non-cache-aliased configs.
     26 *
     27 * clear_page (unsigned long page)
     28 *                    a2
     29 */
     30
     31ENTRY(clear_page)
     32
     33	abi_entry_default
     34
     35	movi	a3, 0
     36	__loopi	a2, a7, PAGE_SIZE, 32
     37	s32i	a3, a2, 0
     38	s32i	a3, a2, 4
     39	s32i	a3, a2, 8
     40	s32i	a3, a2, 12
     41	s32i	a3, a2, 16
     42	s32i	a3, a2, 20
     43	s32i	a3, a2, 24
     44	s32i	a3, a2, 28
     45	__endla	a2, a7, 32
     46
     47	abi_ret_default
     48
     49ENDPROC(clear_page)
     50
     51/*
     52 * copy_page and copy_user_page are the same for non-cache-aliased configs.
     53 *
     54 * copy_page (void *to, void *from)
     55 *               a2          a3
     56 */
     57
     58ENTRY(copy_page)
     59
     60	abi_entry_default
     61
     62	__loopi a2, a4, PAGE_SIZE, 32
     63
     64	l32i    a8, a3, 0
     65	l32i    a9, a3, 4
     66	s32i    a8, a2, 0
     67	s32i    a9, a2, 4
     68
     69	l32i    a8, a3, 8
     70	l32i    a9, a3, 12
     71	s32i    a8, a2, 8
     72	s32i    a9, a2, 12
     73
     74	l32i    a8, a3, 16
     75	l32i    a9, a3, 20
     76	s32i    a8, a2, 16
     77	s32i    a9, a2, 20
     78
     79	l32i    a8, a3, 24
     80	l32i    a9, a3, 28
     81	s32i    a8, a2, 24
     82	s32i    a9, a2, 28
     83
     84	addi    a2, a2, 32
     85	addi    a3, a3, 32
     86
     87	__endl  a2, a4
     88
     89	abi_ret_default
     90
     91ENDPROC(copy_page)
     92
     93#ifdef CONFIG_MMU
     94/*
     95 * If we have to deal with cache aliasing, we use temporary memory mappings
     96 * to ensure that the source and destination pages have the same color as
     97 * the virtual address. We use way 0 and 1 for temporary mappings in such cases.
     98 *
     99 * The temporary DTLB entries shouldn't be flushed by interrupts, but are
    100 * flushed by preemptive task switches. Special code in the 
    101 * fast_second_level_miss handler re-established the temporary mapping. 
    102 * It requires that the PPNs for the destination and source addresses are
    103 * in a6, and a7, respectively.
    104 */
    105
    106/* TLB miss exceptions are treated special in the following region */
    107
    108ENTRY(__tlbtemp_mapping_start)
    109
    110#if (DCACHE_WAY_SIZE > PAGE_SIZE)
    111
    112/*
    113 * clear_page_alias(void *addr, unsigned long paddr)
    114 *                     a2              a3
    115 */
    116
    117ENTRY(clear_page_alias)
    118
    119	abi_entry_default
    120
    121	movi	a5, PAGE_OFFSET
    122	addi	a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
    123	mov	a4, a2
    124	wdtlb	a6, a2
    125	dsync
    126
    127	movi	a3, 0
    128	__loopi	a2, a7, PAGE_SIZE, 32
    129	s32i	a3, a2, 0
    130	s32i	a3, a2, 4
    131	s32i	a3, a2, 8
    132	s32i	a3, a2, 12
    133	s32i	a3, a2, 16
    134	s32i	a3, a2, 20
    135	s32i	a3, a2, 24
    136	s32i	a3, a2, 28
    137	__endla	a2, a7, 32
    138
    139	/* We need to invalidate the temporary dtlb entry. */
    140
    141	idtlb	a4
    142	dsync
    143
    144	abi_ret_default
    145
    146ENDPROC(clear_page_alias)
    147
    148/*
    149 * copy_page_alias(void *to, void *from,
    150 *			a2	  a3
    151 *                 unsigned long to_paddr, unsigned long from_paddr)
    152 *	        		 a4			 a5
    153 */
    154
    155ENTRY(copy_page_alias)
    156
    157	abi_entry_default
    158
    159	/* Setup a temporary DTLB for destination. */
    160
    161	addi	a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE)
    162	wdtlb	a6, a2
    163	dsync
    164
    165	/* Setup a temporary DTLB for source. */
    166
    167	addi	a7, a5, PAGE_KERNEL
    168	addi	a8, a3, 1				# way1
    169
    170	wdtlb	a7, a8
    171	dsync
    172
    1731:	__loopi a2, a4, PAGE_SIZE, 32
    174
    175	l32i    a8, a3, 0
    176	l32i    a9, a3, 4
    177	s32i    a8, a2, 0
    178	s32i    a9, a2, 4
    179
    180	l32i    a8, a3, 8
    181	l32i    a9, a3, 12
    182	s32i    a8, a2, 8
    183	s32i    a9, a2, 12
    184
    185	l32i    a8, a3, 16
    186	l32i    a9, a3, 20
    187	s32i    a8, a2, 16
    188	s32i    a9, a2, 20
    189
    190	l32i    a8, a3, 24
    191	l32i    a9, a3, 28
    192	s32i    a8, a2, 24
    193	s32i    a9, a2, 28
    194
    195	addi    a2, a2, 32
    196	addi    a3, a3, 32
    197
    198	__endl  a2, a4
    199
    200	/* We need to invalidate any temporary mapping! */
    201
    202	addi	a2, a2, -PAGE_SIZE
    203	idtlb	a2
    204	dsync
    205
    206	addi	a3, a3, -PAGE_SIZE+1
    207	idtlb	a3
    208	dsync
    209
    210	abi_ret_default
    211
    212ENDPROC(copy_page_alias)
    213
    214#endif
    215
    216#if (DCACHE_WAY_SIZE > PAGE_SIZE)
    217
    218/*
    219 * void __flush_invalidate_dcache_page_alias (addr, phys)
    220 *                                             a2    a3
    221 */
    222
    223ENTRY(__flush_invalidate_dcache_page_alias)
    224
    225	abi_entry_default
    226
    227	movi	a7, 0			# required for exception handler
    228	addi	a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
    229	mov	a4, a2
    230	wdtlb	a6, a2
    231	dsync
    232
    233	___flush_invalidate_dcache_page a2 a3
    234
    235	idtlb	a4
    236	dsync
    237
    238	abi_ret_default
    239
    240ENDPROC(__flush_invalidate_dcache_page_alias)
    241
    242/*
    243 * void __invalidate_dcache_page_alias (addr, phys)
    244 *                                       a2    a3
    245 */
    246
    247ENTRY(__invalidate_dcache_page_alias)
    248
    249	abi_entry_default
    250
    251	movi	a7, 0			# required for exception handler
    252	addi	a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
    253	mov	a4, a2
    254	wdtlb	a6, a2
    255	dsync
    256
    257	___invalidate_dcache_page a2 a3
    258
    259	idtlb	a4
    260	dsync
    261
    262	abi_ret_default
    263
    264ENDPROC(__invalidate_dcache_page_alias)
    265#endif
    266
    267ENTRY(__tlbtemp_mapping_itlb)
    268
    269#if (ICACHE_WAY_SIZE > PAGE_SIZE)
    270	
    271ENTRY(__invalidate_icache_page_alias)
    272
    273	abi_entry_default
    274
    275	addi	a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
    276	mov	a4, a2
    277	witlb	a6, a2
    278	isync
    279
    280	___invalidate_icache_page a2 a3
    281
    282	iitlb	a4
    283	isync
    284	abi_ret_default
    285
    286ENDPROC(__invalidate_icache_page_alias)
    287
    288#endif
    289
    290/* End of special treatment in tlb miss exception */
    291
    292ENTRY(__tlbtemp_mapping_end)
    293
    294#endif /* CONFIG_MMU
    295
    296/*
    297 * void __invalidate_icache_page(ulong start)
    298 */
    299
    300ENTRY(__invalidate_icache_page)
    301
    302	abi_entry_default
    303
    304	___invalidate_icache_page a2 a3
    305	isync
    306
    307	abi_ret_default
    308
    309ENDPROC(__invalidate_icache_page)
    310
    311/*
    312 * void __invalidate_dcache_page(ulong start)
    313 */
    314
    315ENTRY(__invalidate_dcache_page)
    316
    317	abi_entry_default
    318
    319	___invalidate_dcache_page a2 a3
    320	dsync
    321
    322	abi_ret_default
    323
    324ENDPROC(__invalidate_dcache_page)
    325
    326/*
    327 * void __flush_invalidate_dcache_page(ulong start)
    328 */
    329
    330ENTRY(__flush_invalidate_dcache_page)
    331
    332	abi_entry_default
    333
    334	___flush_invalidate_dcache_page a2 a3
    335
    336	dsync
    337	abi_ret_default
    338
    339ENDPROC(__flush_invalidate_dcache_page)
    340
    341/*
    342 * void __flush_dcache_page(ulong start)
    343 */
    344
    345ENTRY(__flush_dcache_page)
    346
    347	abi_entry_default
    348
    349	___flush_dcache_page a2 a3
    350
    351	dsync
    352	abi_ret_default
    353
    354ENDPROC(__flush_dcache_page)
    355
    356/*
    357 * void __invalidate_icache_range(ulong start, ulong size)
    358 */
    359
    360ENTRY(__invalidate_icache_range)
    361
    362	abi_entry_default
    363
    364	___invalidate_icache_range a2 a3 a4
    365	isync
    366
    367	abi_ret_default
    368
    369ENDPROC(__invalidate_icache_range)
    370
    371/*
    372 * void __flush_invalidate_dcache_range(ulong start, ulong size)
    373 */
    374
    375ENTRY(__flush_invalidate_dcache_range)
    376
    377	abi_entry_default
    378
    379	___flush_invalidate_dcache_range a2 a3 a4
    380	dsync
    381
    382	abi_ret_default
    383
    384ENDPROC(__flush_invalidate_dcache_range)
    385
    386/*
    387 * void _flush_dcache_range(ulong start, ulong size)
    388 */
    389
    390ENTRY(__flush_dcache_range)
    391
    392	abi_entry_default
    393
    394	___flush_dcache_range a2 a3 a4
    395	dsync
    396
    397	abi_ret_default
    398
    399ENDPROC(__flush_dcache_range)
    400
    401/*
    402 * void _invalidate_dcache_range(ulong start, ulong size)
    403 */
    404
    405ENTRY(__invalidate_dcache_range)
    406
    407	abi_entry_default
    408
    409	___invalidate_dcache_range a2 a3 a4
    410
    411	abi_ret_default
    412
    413ENDPROC(__invalidate_dcache_range)
    414
    415/*
    416 * void _invalidate_icache_all(void)
    417 */
    418
    419ENTRY(__invalidate_icache_all)
    420
    421	abi_entry_default
    422
    423	___invalidate_icache_all a2 a3
    424	isync
    425
    426	abi_ret_default
    427
    428ENDPROC(__invalidate_icache_all)
    429
    430/*
    431 * void _flush_invalidate_dcache_all(void)
    432 */
    433
    434ENTRY(__flush_invalidate_dcache_all)
    435
    436	abi_entry_default
    437
    438	___flush_invalidate_dcache_all a2 a3
    439	dsync
    440
    441	abi_ret_default
    442
    443ENDPROC(__flush_invalidate_dcache_all)
    444
    445/*
    446 * void _invalidate_dcache_all(void)
    447 */
    448
    449ENTRY(__invalidate_dcache_all)
    450
    451	abi_entry_default
    452
    453	___invalidate_dcache_all a2 a3
    454	dsync
    455
    456	abi_ret_default
    457
    458ENDPROC(__invalidate_dcache_all)