cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

assembler.h (16923B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 *  arch/arm/include/asm/assembler.h
      4 *
      5 *  Copyright (C) 1996-2000 Russell King
      6 *
      7 *  This file contains arm architecture specific defines
      8 *  for the different processors.
      9 *
     10 *  Do not include any C declarations in this file - it is included by
     11 *  assembler source.
     12 */
     13#ifndef __ASM_ASSEMBLER_H__
     14#define __ASM_ASSEMBLER_H__
     15
     16#ifndef __ASSEMBLY__
     17#error "Only include this from assembly code"
     18#endif
     19
     20#include <asm/ptrace.h>
     21#include <asm/opcodes-virt.h>
     22#include <asm/asm-offsets.h>
     23#include <asm/page.h>
     24#include <asm/thread_info.h>
     25#include <asm/uaccess-asm.h>
     26
     27#define IOMEM(x)	(x)
     28
     29/*
     30 * Endian independent macros for shifting bytes within registers.
     31 */
     32#ifndef __ARMEB__
     33#define lspull          lsr
     34#define lspush          lsl
     35#define get_byte_0      lsl #0
     36#define get_byte_1	lsr #8
     37#define get_byte_2	lsr #16
     38#define get_byte_3	lsr #24
     39#define put_byte_0      lsl #0
     40#define put_byte_1	lsl #8
     41#define put_byte_2	lsl #16
     42#define put_byte_3	lsl #24
     43#else
     44#define lspull          lsl
     45#define lspush          lsr
     46#define get_byte_0	lsr #24
     47#define get_byte_1	lsr #16
     48#define get_byte_2	lsr #8
     49#define get_byte_3      lsl #0
     50#define put_byte_0	lsl #24
     51#define put_byte_1	lsl #16
     52#define put_byte_2	lsl #8
     53#define put_byte_3      lsl #0
     54#endif
     55
     56/* Select code for any configuration running in BE8 mode */
     57#ifdef CONFIG_CPU_ENDIAN_BE8
     58#define ARM_BE8(code...) code
     59#else
     60#define ARM_BE8(code...)
     61#endif
     62
     63/*
     64 * Data preload for architectures that support it
     65 */
     66#if __LINUX_ARM_ARCH__ >= 5
     67#define PLD(code...)	code
     68#else
     69#define PLD(code...)
     70#endif
     71
     72/*
     73 * This can be used to enable code to cacheline align the destination
     74 * pointer when bulk writing to memory.  Experiments on StrongARM and
     75 * XScale didn't show this a worthwhile thing to do when the cache is not
     76 * set to write-allocate (this would need further testing on XScale when WA
     77 * is used).
     78 *
     79 * On Feroceon there is much to gain however, regardless of cache mode.
     80 */
     81#ifdef CONFIG_CPU_FEROCEON
     82#define CALGN(code...) code
     83#else
     84#define CALGN(code...)
     85#endif
     86
     87#define IMM12_MASK 0xfff
     88
     89/* the frame pointer used for stack unwinding */
     90ARM(	fpreg	.req	r11	)
     91THUMB(	fpreg	.req	r7	)
     92
     93/*
     94 * Enable and disable interrupts
     95 */
     96#if __LINUX_ARM_ARCH__ >= 6
     97	.macro	disable_irq_notrace
     98	cpsid	i
     99	.endm
    100
    101	.macro	enable_irq_notrace
    102	cpsie	i
    103	.endm
    104#else
    105	.macro	disable_irq_notrace
    106	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
    107	.endm
    108
    109	.macro	enable_irq_notrace
    110	msr	cpsr_c, #SVC_MODE
    111	.endm
    112#endif
    113
    114#if __LINUX_ARM_ARCH__ < 7
    115	.macro	dsb, args
    116	mcr	p15, 0, r0, c7, c10, 4
    117	.endm
    118
    119	.macro	isb, args
    120	mcr	p15, 0, r0, c7, c5, 4
    121	.endm
    122#endif
    123
    124	.macro asm_trace_hardirqs_off, save=1
    125#if defined(CONFIG_TRACE_IRQFLAGS)
    126	.if \save
    127	stmdb   sp!, {r0-r3, ip, lr}
    128	.endif
    129	bl	trace_hardirqs_off
    130	.if \save
    131	ldmia	sp!, {r0-r3, ip, lr}
    132	.endif
    133#endif
    134	.endm
    135
    136	.macro asm_trace_hardirqs_on, cond=al, save=1
    137#if defined(CONFIG_TRACE_IRQFLAGS)
    138	/*
    139	 * actually the registers should be pushed and pop'd conditionally, but
    140	 * after bl the flags are certainly clobbered
    141	 */
    142	.if \save
    143	stmdb   sp!, {r0-r3, ip, lr}
    144	.endif
    145	bl\cond	trace_hardirqs_on
    146	.if \save
    147	ldmia	sp!, {r0-r3, ip, lr}
    148	.endif
    149#endif
    150	.endm
    151
    152	.macro disable_irq, save=1
    153	disable_irq_notrace
    154	asm_trace_hardirqs_off \save
    155	.endm
    156
    157	.macro enable_irq
    158	asm_trace_hardirqs_on
    159	enable_irq_notrace
    160	.endm
    161/*
    162 * Save the current IRQ state and disable IRQs.  Note that this macro
    163 * assumes FIQs are enabled, and that the processor is in SVC mode.
    164 */
    165	.macro	save_and_disable_irqs, oldcpsr
    166#ifdef CONFIG_CPU_V7M
    167	mrs	\oldcpsr, primask
    168#else
    169	mrs	\oldcpsr, cpsr
    170#endif
    171	disable_irq
    172	.endm
    173
    174	.macro	save_and_disable_irqs_notrace, oldcpsr
    175#ifdef CONFIG_CPU_V7M
    176	mrs	\oldcpsr, primask
    177#else
    178	mrs	\oldcpsr, cpsr
    179#endif
    180	disable_irq_notrace
    181	.endm
    182
    183/*
    184 * Restore interrupt state previously stored in a register.  We don't
    185 * guarantee that this will preserve the flags.
    186 */
    187	.macro	restore_irqs_notrace, oldcpsr
    188#ifdef CONFIG_CPU_V7M
    189	msr	primask, \oldcpsr
    190#else
    191	msr	cpsr_c, \oldcpsr
    192#endif
    193	.endm
    194
    195	.macro restore_irqs, oldcpsr
    196	tst	\oldcpsr, #PSR_I_BIT
    197	asm_trace_hardirqs_on cond=eq
    198	restore_irqs_notrace \oldcpsr
    199	.endm
    200
    201/*
    202 * Assembly version of "adr rd, BSYM(sym)".  This should only be used to
    203 * reference local symbols in the same assembly file which are to be
    204 * resolved by the assembler.  Other usage is undefined.
    205 */
    206	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
    207	.macro	badr\c, rd, sym
    208#ifdef CONFIG_THUMB2_KERNEL
    209	adr\c	\rd, \sym + 1
    210#else
    211	adr\c	\rd, \sym
    212#endif
    213	.endm
    214	.endr
    215
    216/*
    217 * Get current thread_info.
    218 */
    219	.macro	get_thread_info, rd
    220	/* thread_info is the first member of struct task_struct */
    221	get_current \rd
    222	.endm
    223
    224/*
    225 * Increment/decrement the preempt count.
    226 */
    227#ifdef CONFIG_PREEMPT_COUNT
    228	.macro	inc_preempt_count, ti, tmp
    229	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
    230	add	\tmp, \tmp, #1			@ increment it
    231	str	\tmp, [\ti, #TI_PREEMPT]
    232	.endm
    233
    234	.macro	dec_preempt_count, ti, tmp
    235	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
    236	sub	\tmp, \tmp, #1			@ decrement it
    237	str	\tmp, [\ti, #TI_PREEMPT]
    238	.endm
    239
    240	.macro	dec_preempt_count_ti, ti, tmp
    241	get_thread_info \ti
    242	dec_preempt_count \ti, \tmp
    243	.endm
    244#else
    245	.macro	inc_preempt_count, ti, tmp
    246	.endm
    247
    248	.macro	dec_preempt_count, ti, tmp
    249	.endm
    250
    251	.macro	dec_preempt_count_ti, ti, tmp
    252	.endm
    253#endif
    254
    255#define USERL(l, x...)				\
    2569999:	x;					\
    257	.pushsection __ex_table,"a";		\
    258	.align	3;				\
    259	.long	9999b,l;			\
    260	.popsection
    261
    262#define USER(x...)	USERL(9001f, x)
    263
    264#ifdef CONFIG_SMP
    265#define ALT_SMP(instr...)					\
    2669998:	instr
    267/*
    268 * Note: if you get assembler errors from ALT_UP() when building with
    269 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
    270 * ALT_SMP( W(instr) ... )
    271 */
    272#define ALT_UP(instr...)					\
    273	.pushsection ".alt.smp.init", "a"			;\
    274	.align	2						;\
    275	.long	9998b - .					;\
    2769997:	instr							;\
    277	.if . - 9997b == 2					;\
    278		nop						;\
    279	.endif							;\
    280	.if . - 9997b != 4					;\
    281		.error "ALT_UP() content must assemble to exactly 4 bytes";\
    282	.endif							;\
    283	.popsection
    284#define ALT_UP_B(label)					\
    285	.pushsection ".alt.smp.init", "a"			;\
    286	.align	2						;\
    287	.long	9998b - .					;\
    288	W(b)	. + (label - 9998b)					;\
    289	.popsection
    290#else
    291#define ALT_SMP(instr...)
    292#define ALT_UP(instr...) instr
    293#define ALT_UP_B(label) b label
    294#endif
    295
    296	/*
    297	 * this_cpu_offset - load the per-CPU offset of this CPU into
    298	 * 		     register 'rd'
    299	 */
    300	.macro		this_cpu_offset, rd:req
    301#ifdef CONFIG_SMP
    302ALT_SMP(mrc		p15, 0, \rd, c13, c0, 4)
    303#ifdef CONFIG_CPU_V6
    304ALT_UP_B(.L1_\@)
    305.L0_\@:
    306	.subsection	1
    307.L1_\@: ldr_va		\rd, __per_cpu_offset
    308	b		.L0_\@
    309	.previous
    310#endif
    311#else
    312	mov		\rd, #0
    313#endif
    314	.endm
    315
    316	/*
    317	 * set_current - store the task pointer of this CPU's current task
    318	 */
    319	.macro		set_current, rn:req, tmp:req
    320#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
    3219998:	mcr		p15, 0, \rn, c13, c0, 3		@ set TPIDRURO register
    322#ifdef CONFIG_CPU_V6
    323ALT_UP_B(.L0_\@)
    324	.subsection	1
    325.L0_\@: str_va		\rn, __current, \tmp
    326	b		.L1_\@
    327	.previous
    328.L1_\@:
    329#endif
    330#else
    331	str_va		\rn, __current, \tmp
    332#endif
    333	.endm
    334
    335	/*
    336	 * get_current - load the task pointer of this CPU's current task
    337	 */
    338	.macro		get_current, rd:req
    339#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
    3409998:	mrc		p15, 0, \rd, c13, c0, 3		@ get TPIDRURO register
    341#ifdef CONFIG_CPU_V6
    342ALT_UP_B(.L0_\@)
    343	.subsection	1
    344.L0_\@: ldr_va		\rd, __current
    345	b		.L1_\@
    346	.previous
    347.L1_\@:
    348#endif
    349#else
    350	ldr_va		\rd, __current
    351#endif
    352	.endm
    353
    354	/*
    355	 * reload_current - reload the task pointer of this CPU's current task
    356	 *		    into the TLS register
    357	 */
    358	.macro		reload_current, t1:req, t2:req
    359#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
    360#ifdef CONFIG_CPU_V6
    361ALT_SMP(nop)
    362ALT_UP_B(.L0_\@)
    363#endif
    364	ldr_this_cpu	\t1, __entry_task, \t1, \t2
    365	mcr		p15, 0, \t1, c13, c0, 3		@ store in TPIDRURO
    366.L0_\@:
    367#endif
    368	.endm
    369
    370/*
    371 * Instruction barrier
    372 */
    373	.macro	instr_sync
    374#if __LINUX_ARM_ARCH__ >= 7
    375	isb
    376#elif __LINUX_ARM_ARCH__ == 6
    377	mcr	p15, 0, r0, c7, c5, 4
    378#endif
    379	.endm
    380
    381/*
    382 * SMP data memory barrier
    383 */
    384	.macro	smp_dmb mode
    385#ifdef CONFIG_SMP
    386#if __LINUX_ARM_ARCH__ >= 7
    387	.ifeqs "\mode","arm"
    388	ALT_SMP(dmb	ish)
    389	.else
    390	ALT_SMP(W(dmb)	ish)
    391	.endif
    392#elif __LINUX_ARM_ARCH__ == 6
    393	ALT_SMP(mcr	p15, 0, r0, c7, c10, 5)	@ dmb
    394#else
    395#error Incompatible SMP platform
    396#endif
    397	.ifeqs "\mode","arm"
    398	ALT_UP(nop)
    399	.else
    400	ALT_UP(W(nop))
    401	.endif
    402#endif
    403	.endm
    404
    405#if defined(CONFIG_CPU_V7M)
    406	/*
    407	 * setmode is used to assert to be in svc mode during boot. For v7-M
    408	 * this is done in __v7m_setup, so setmode can be empty here.
    409	 */
    410	.macro	setmode, mode, reg
    411	.endm
    412#elif defined(CONFIG_THUMB2_KERNEL)
    413	.macro	setmode, mode, reg
    414	mov	\reg, #\mode
    415	msr	cpsr_c, \reg
    416	.endm
    417#else
    418	.macro	setmode, mode, reg
    419	msr	cpsr_c, #\mode
    420	.endm
    421#endif
    422
    423/*
    424 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
    425 * a scratch register for the macro to overwrite.
    426 *
    427 * This macro is intended for forcing the CPU into SVC mode at boot time.
    428 * you cannot return to the original mode.
    429 */
    430.macro safe_svcmode_maskall reg:req
    431#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
    432	mrs	\reg , cpsr
    433	eor	\reg, \reg, #HYP_MODE
    434	tst	\reg, #MODE_MASK
    435	bic	\reg , \reg , #MODE_MASK
    436	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
    437THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
    438	bne	1f
    439	orr	\reg, \reg, #PSR_A_BIT
    440	badr	lr, 2f
    441	msr	spsr_cxsf, \reg
    442	__MSR_ELR_HYP(14)
    443	__ERET
    4441:	msr	cpsr_c, \reg
    4452:
    446#else
    447/*
    448 * workaround for possibly broken pre-v6 hardware
    449 * (akita, Sharp Zaurus C-1000, PXA270-based)
    450 */
    451	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
    452#endif
    453.endm
    454
    455/*
    456 * STRT/LDRT access macros with ARM and Thumb-2 variants
    457 */
    458#ifdef CONFIG_THUMB2_KERNEL
    459
    460	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
    4619999:
    462	.if	\inc == 1
    463	\instr\()b\t\cond\().w \reg, [\ptr, #\off]
    464	.elseif	\inc == 4
    465	\instr\t\cond\().w \reg, [\ptr, #\off]
    466	.else
    467	.error	"Unsupported inc macro argument"
    468	.endif
    469
    470	.pushsection __ex_table,"a"
    471	.align	3
    472	.long	9999b, \abort
    473	.popsection
    474	.endm
    475
    476	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
    477	@ explicit IT instruction needed because of the label
    478	@ introduced by the USER macro
    479	.ifnc	\cond,al
    480	.if	\rept == 1
    481	itt	\cond
    482	.elseif	\rept == 2
    483	ittt	\cond
    484	.else
    485	.error	"Unsupported rept macro argument"
    486	.endif
    487	.endif
    488
    489	@ Slightly optimised to avoid incrementing the pointer twice
    490	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
    491	.if	\rept == 2
    492	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
    493	.endif
    494
    495	add\cond \ptr, #\rept * \inc
    496	.endm
    497
    498#else	/* !CONFIG_THUMB2_KERNEL */
    499
    500	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
    501	.rept	\rept
    5029999:
    503	.if	\inc == 1
    504	\instr\()b\t\cond \reg, [\ptr], #\inc
    505	.elseif	\inc == 4
    506	\instr\t\cond \reg, [\ptr], #\inc
    507	.else
    508	.error	"Unsupported inc macro argument"
    509	.endif
    510
    511	.pushsection __ex_table,"a"
    512	.align	3
    513	.long	9999b, \abort
    514	.popsection
    515	.endr
    516	.endm
    517
    518#endif	/* CONFIG_THUMB2_KERNEL */
    519
    520	.macro	strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
    521	usracc	str, \reg, \ptr, \inc, \cond, \rept, \abort
    522	.endm
    523
    524	.macro	ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
    525	usracc	ldr, \reg, \ptr, \inc, \cond, \rept, \abort
    526	.endm
    527
    528/* Utility macro for declaring string literals */
    529	.macro	string name:req, string
    530	.type \name , #object
    531\name:
    532	.asciz "\string"
    533	.size \name , . - \name
    534	.endm
    535
    536	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
    537	.macro	ret\c, reg
    538#if __LINUX_ARM_ARCH__ < 6
    539	mov\c	pc, \reg
    540#else
    541	.ifeqs	"\reg", "lr"
    542	bx\c	\reg
    543	.else
    544	mov\c	pc, \reg
    545	.endif
    546#endif
    547	.endm
    548	.endr
    549
    550	.macro	ret.w, reg
    551	ret	\reg
    552#ifdef CONFIG_THUMB2_KERNEL
    553	nop
    554#endif
    555	.endm
    556
    557	.macro	bug, msg, line
    558#ifdef CONFIG_THUMB2_KERNEL
    5591:	.inst	0xde02
    560#else
    5611:	.inst	0xe7f001f2
    562#endif
    563#ifdef CONFIG_DEBUG_BUGVERBOSE
    564	.pushsection .rodata.str, "aMS", %progbits, 1
    5652:	.asciz	"\msg"
    566	.popsection
    567	.pushsection __bug_table, "aw"
    568	.align	2
    569	.word	1b, 2b
    570	.hword	\line
    571	.popsection
    572#endif
    573	.endm
    574
    575#ifdef CONFIG_KPROBES
    576#define _ASM_NOKPROBE(entry)				\
    577	.pushsection "_kprobe_blacklist", "aw" ;	\
    578	.balign 4 ;					\
    579	.long entry;					\
    580	.popsection
    581#else
    582#define _ASM_NOKPROBE(entry)
    583#endif
    584
    585	.macro		__adldst_l, op, reg, sym, tmp, c
    586	.if		__LINUX_ARM_ARCH__ < 7
    587	ldr\c		\tmp, .La\@
    588	.subsection	1
    589	.align		2
    590.La\@:	.long		\sym - .Lpc\@
    591	.previous
    592	.else
    593	.ifnb		\c
    594 THUMB(	ittt		\c			)
    595	.endif
    596	movw\c		\tmp, #:lower16:\sym - .Lpc\@
    597	movt\c		\tmp, #:upper16:\sym - .Lpc\@
    598	.endif
    599
    600#ifndef CONFIG_THUMB2_KERNEL
    601	.set		.Lpc\@, . + 8			// PC bias
    602	.ifc		\op, add
    603	add\c		\reg, \tmp, pc
    604	.else
    605	\op\c		\reg, [pc, \tmp]
    606	.endif
    607#else
    608.Lb\@:	add\c		\tmp, \tmp, pc
    609	/*
    610	 * In Thumb-2 builds, the PC bias depends on whether we are currently
    611	 * emitting into a .arm or a .thumb section. The size of the add opcode
    612	 * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
    613	 * emitting in ARM mode, so let's use this to account for the bias.
    614	 */
    615	.set		.Lpc\@, . + (. - .Lb\@)
    616
    617	.ifnc		\op, add
    618	\op\c		\reg, [\tmp]
    619	.endif
    620#endif
    621	.endm
    622
    623	/*
    624	 * mov_l - move a constant value or [relocated] address into a register
    625	 */
    626	.macro		mov_l, dst:req, imm:req, cond
    627	.if		__LINUX_ARM_ARCH__ < 7
    628	ldr\cond	\dst, =\imm
    629	.else
    630	movw\cond	\dst, #:lower16:\imm
    631	movt\cond	\dst, #:upper16:\imm
    632	.endif
    633	.endm
    634
    635	/*
    636	 * adr_l - adr pseudo-op with unlimited range
    637	 *
    638	 * @dst: destination register
    639	 * @sym: name of the symbol
    640	 * @cond: conditional opcode suffix
    641	 */
    642	.macro		adr_l, dst:req, sym:req, cond
    643	__adldst_l	add, \dst, \sym, \dst, \cond
    644	.endm
    645
    646	/*
    647	 * ldr_l - ldr <literal> pseudo-op with unlimited range
    648	 *
    649	 * @dst: destination register
    650	 * @sym: name of the symbol
    651	 * @cond: conditional opcode suffix
    652	 */
    653	.macro		ldr_l, dst:req, sym:req, cond
    654	__adldst_l	ldr, \dst, \sym, \dst, \cond
    655	.endm
    656
    657	/*
    658	 * str_l - str <literal> pseudo-op with unlimited range
    659	 *
    660	 * @src: source register
    661	 * @sym: name of the symbol
    662	 * @tmp: mandatory scratch register
    663	 * @cond: conditional opcode suffix
    664	 */
    665	.macro		str_l, src:req, sym:req, tmp:req, cond
    666	__adldst_l	str, \src, \sym, \tmp, \cond
    667	.endm
    668
    669	.macro		__ldst_va, op, reg, tmp, sym, cond, offset
    670#if __LINUX_ARM_ARCH__ >= 7 || \
    671    !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
    672    (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
    673	mov_l		\tmp, \sym, \cond
    674#else
    675	/*
    676	 * Avoid a literal load, by emitting a sequence of ADD/LDR instructions
    677	 * with the appropriate relocations. The combined sequence has a range
    678	 * of -/+ 256 MiB, which should be sufficient for the core kernel and
    679	 * for modules loaded into the module region.
    680	 */
    681	.globl		\sym
    682	.reloc		.L0_\@, R_ARM_ALU_PC_G0_NC, \sym
    683	.reloc		.L1_\@, R_ARM_ALU_PC_G1_NC, \sym
    684	.reloc		.L2_\@, R_ARM_LDR_PC_G2, \sym
    685.L0_\@: sub\cond	\tmp, pc, #8 - \offset
    686.L1_\@: sub\cond	\tmp, \tmp, #4 - \offset
    687.L2_\@:
    688#endif
    689	\op\cond	\reg, [\tmp, #\offset]
    690	.endm
    691
    692	/*
    693	 * ldr_va - load a 32-bit word from the virtual address of \sym
    694	 */
    695	.macro		ldr_va, rd:req, sym:req, cond, tmp, offset=0
    696	.ifnb		\tmp
    697	__ldst_va	ldr, \rd, \tmp, \sym, \cond, \offset
    698	.else
    699	__ldst_va	ldr, \rd, \rd, \sym, \cond, \offset
    700	.endif
    701	.endm
    702
    703	/*
    704	 * str_va - store a 32-bit word to the virtual address of \sym
    705	 */
    706	.macro		str_va, rn:req, sym:req, tmp:req, cond
    707	__ldst_va	str, \rn, \tmp, \sym, \cond, 0
    708	.endm
    709
    710	/*
    711	 * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
    712	 *			without using a temp register. Supported in ARM mode
    713	 *			only.
    714	 */
    715	.macro		ldr_this_cpu_armv6, rd:req, sym:req
    716	this_cpu_offset	\rd
    717	.globl		\sym
    718	.reloc		.L0_\@, R_ARM_ALU_PC_G0_NC, \sym
    719	.reloc		.L1_\@, R_ARM_ALU_PC_G1_NC, \sym
    720	.reloc		.L2_\@, R_ARM_LDR_PC_G2, \sym
    721	add		\rd, \rd, pc
    722.L0_\@: sub		\rd, \rd, #4
    723.L1_\@: sub		\rd, \rd, #0
    724.L2_\@: ldr		\rd, [\rd, #4]
    725	.endm
    726
    727	/*
    728	 * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
    729	 *		  into register 'rd', which may be the stack pointer,
    730	 *		  using 't1' and 't2' as general temp registers. These
    731	 *		  are permitted to overlap with 'rd' if != sp
    732	 */
    733	.macro		ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
    734#ifndef CONFIG_SMP
    735	ldr_va		\rd, \sym, tmp=\t1
    736#elif __LINUX_ARM_ARCH__ >= 7 || \
    737      !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
    738      (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
    739	this_cpu_offset	\t1
    740	mov_l		\t2, \sym
    741	ldr		\rd, [\t1, \t2]
    742#else
    743	ldr_this_cpu_armv6 \rd, \sym
    744#endif
    745	.endm
    746
    747	/*
    748	 * rev_l - byte-swap a 32-bit value
    749	 *
    750	 * @val: source/destination register
    751	 * @tmp: scratch register
    752	 */
    753	.macro		rev_l, val:req, tmp:req
    754	.if		__LINUX_ARM_ARCH__ < 6
    755	eor		\tmp, \val, \val, ror #16
    756	bic		\tmp, \tmp, #0x00ff0000
    757	mov		\val, \val, ror #8
    758	eor		\val, \val, \tmp, lsr #8
    759	.else
    760	rev		\val, \val
    761	.endif
    762	.endm
    763
    764	/*
    765	 * bl_r - branch and link to register
    766	 *
    767	 * @dst: target to branch to
    768	 * @c: conditional opcode suffix
    769	 */
    770	.macro		bl_r, dst:req, c
    771	.if		__LINUX_ARM_ARCH__ < 6
    772	mov\c		lr, pc
    773	mov\c		pc, \dst
    774	.else
    775	blx\c		\dst
    776	.endif
    777	.endm
    778
    779#endif /* __ASM_ASSEMBLER_H__ */