cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mca_asm.h (7356B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * File:	mca_asm.h
      4 * Purpose:	Machine check handling specific defines
      5 *
      6 * Copyright (C) 1999 Silicon Graphics, Inc.
      7 * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
      8 * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
      9 * Copyright (C) 2000 Hewlett-Packard Co.
     10 * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
     11 * Copyright (C) 2002 Intel Corp.
     12 * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
     13 * Copyright (C) 2005 Silicon Graphics, Inc
     14 * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
     15 */
     16#ifndef _ASM_IA64_MCA_ASM_H
     17#define _ASM_IA64_MCA_ASM_H
     18
     19#include <asm/percpu.h>
     20
     21#define PSR_IC		13
     22#define PSR_I		14
     23#define	PSR_DT		17
     24#define PSR_RT		27
     25#define PSR_MC		35
     26#define PSR_IT		36
     27#define PSR_BN		44
     28
     29/*
     30 * This macro converts a instruction virtual address to a physical address
     31 * Right now for simulation purposes the virtual addresses are
     32 * direct mapped to physical addresses.
     33 *	1. Lop off bits 61 thru 63 in the virtual address
     34 */
     35#define INST_VA_TO_PA(addr)							\
     36	dep	addr	= 0, addr, 61, 3
     37/*
     38 * This macro converts a data virtual address to a physical address
     39 * Right now for simulation purposes the virtual addresses are
     40 * direct mapped to physical addresses.
     41 *	1. Lop off bits 61 thru 63 in the virtual address
     42 */
     43#define DATA_VA_TO_PA(addr)							\
     44	tpa	addr	= addr
     45/*
     46 * This macro converts a data physical address to a virtual address
     47 * Right now for simulation purposes the virtual addresses are
     48 * direct mapped to physical addresses.
     49 *	1. Put 0x7 in bits 61 thru 63.
     50 */
     51#define DATA_PA_TO_VA(addr,temp)							\
     52	mov	temp	= 0x7	;;							\
     53	dep	addr	= temp, addr, 61, 3
     54
     55#define GET_THIS_PADDR(reg, var)		\
     56	mov	reg = IA64_KR(PER_CPU_DATA);;	\
     57        addl	reg = THIS_CPU(var), reg
     58
     59/*
     60 * This macro jumps to the instruction at the given virtual address
     61 * and starts execution in physical mode with all the address
     62 * translations turned off.
     63 *	1.	Save the current psr
     64 *	2.	Make sure that all the upper 32 bits are off
     65 *
     66 *	3.	Clear the interrupt enable and interrupt state collection bits
     67 *		in the psr before updating the ipsr and iip.
     68 *
     69 *	4.	Turn off the instruction, data and rse translation bits of the psr
     70 *		and store the new value into ipsr
     71 *		Also make sure that the interrupts are disabled.
     72 *		Ensure that we are in little endian mode.
     73 *		[psr.{rt, it, dt, i, be} = 0]
     74 *
     75 *	5.	Get the physical address corresponding to the virtual address
     76 *		of the next instruction bundle and put it in iip.
     77 *		(Using magic numbers 24 and 40 in the deposint instruction since
     78 *		 the IA64_SDK code directly maps to lower 24bits as physical address
     79 *		 from a virtual address).
     80 *
     81 *	6.	Do an rfi to move the values from ipsr to psr and iip to ip.
     82 */
     83#define  PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr)				\
     84	mov	old_psr = psr;								\
     85	;;										\
     86	dep	old_psr = 0, old_psr, 32, 32;						\
     87											\
     88	mov	ar.rsc = 0 ;								\
     89	;;										\
     90	srlz.d;										\
     91	mov	temp2 = ar.bspstore;							\
     92	;;										\
     93	DATA_VA_TO_PA(temp2);								\
     94	;;										\
     95	mov	temp1 = ar.rnat;							\
     96	;;										\
     97	mov	ar.bspstore = temp2;							\
     98	;;										\
     99	mov	ar.rnat = temp1;							\
    100	mov	temp1 = psr;								\
    101	mov	temp2 = psr;								\
    102	;;										\
    103											\
    104	dep	temp2 = 0, temp2, PSR_IC, 2;						\
    105	;;										\
    106	mov	psr.l = temp2;								\
    107	;;										\
    108	srlz.d;										\
    109	dep	temp1 = 0, temp1, 32, 32;						\
    110	;;										\
    111	dep	temp1 = 0, temp1, PSR_IT, 1;						\
    112	;;										\
    113	dep	temp1 = 0, temp1, PSR_DT, 1;						\
    114	;;										\
    115	dep	temp1 = 0, temp1, PSR_RT, 1;						\
    116	;;										\
    117	dep	temp1 = 0, temp1, PSR_I, 1;						\
    118	;;										\
    119	dep	temp1 = 0, temp1, PSR_IC, 1;						\
    120	;;										\
    121	dep	temp1 = -1, temp1, PSR_MC, 1;						\
    122	;;										\
    123	mov	cr.ipsr = temp1;							\
    124	;;										\
    125	LOAD_PHYSICAL(p0, temp2, start_addr);						\
    126	;;										\
    127	mov	cr.iip = temp2;								\
    128	mov	cr.ifs = r0;								\
    129	DATA_VA_TO_PA(sp);								\
    130	DATA_VA_TO_PA(gp);								\
    131	;;										\
    132	srlz.i;										\
    133	;;										\
    134	nop	1;									\
    135	nop	2;									\
    136	nop	1;									\
    137	nop	2;									\
    138	rfi;										\
    139	;;
    140
    141/*
    142 * This macro jumps to the instruction at the given virtual address
    143 * and starts execution in virtual mode with all the address
    144 * translations turned on.
    145 *	1.	Get the old saved psr
    146 *
    147 *	2.	Clear the interrupt state collection bit in the current psr.
    148 *
    149 *	3.	Set the instruction translation bit back in the old psr
    150 *		Note we have to do this since we are right now saving only the
    151 *		lower 32-bits of old psr.(Also the old psr has the data and
    152 *		rse translation bits on)
    153 *
    154 *	4.	Set ipsr to this old_psr with "it" bit set and "bn" = 1.
    155 *
    156 *	5.	Reset the current thread pointer (r13).
    157 *
    158 *	6.	Set iip to the virtual address of the next instruction bundle.
    159 *
    160 *	7.	Do an rfi to move ipsr to psr and iip to ip.
    161 */
    162
    163#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr)	\
    164	mov	temp2 = psr;					\
    165	;;							\
    166	mov	old_psr = temp2;				\
    167	;;							\
    168	dep	temp2 = 0, temp2, PSR_IC, 2;			\
    169	;;							\
    170	mov	psr.l = temp2;					\
    171	mov	ar.rsc = 0;					\
    172	;;							\
    173	srlz.d;							\
    174	mov	r13 = ar.k6;					\
    175	mov	temp2 = ar.bspstore;				\
    176	;;							\
    177	DATA_PA_TO_VA(temp2,temp1);				\
    178	;;							\
    179	mov	temp1 = ar.rnat;				\
    180	;;							\
    181	mov	ar.bspstore = temp2;				\
    182	;;							\
    183	mov	ar.rnat = temp1;				\
    184	;;							\
    185	mov	temp1 = old_psr;				\
    186	;;							\
    187	mov	temp2 = 1;					\
    188	;;							\
    189	dep	temp1 = temp2, temp1, PSR_IC, 1;		\
    190	;;							\
    191	dep	temp1 = temp2, temp1, PSR_IT, 1;		\
    192	;;							\
    193	dep	temp1 = temp2, temp1, PSR_DT, 1;		\
    194	;;							\
    195	dep	temp1 = temp2, temp1, PSR_RT, 1;		\
    196	;;							\
    197	dep	temp1 = temp2, temp1, PSR_BN, 1;		\
    198	;;							\
    199								\
    200	mov     cr.ipsr = temp1;				\
    201	movl	temp2 = start_addr;				\
    202	;;							\
    203	mov	cr.iip = temp2;					\
    204	movl	gp = __gp					\
    205	;;							\
    206	DATA_PA_TO_VA(sp, temp1);				\
    207	srlz.i;							\
    208	;;							\
    209	nop	1;						\
    210	nop	2;						\
    211	nop	1;						\
    212	rfi							\
    213	;;
    214
    215/*
    216 * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
    217 * stacks, except that the SAL/OS state and a switch_stack are stored near the
    218 * top of the MCA/INIT stack.  To support concurrent entry to MCA or INIT, as
    219 * well as MCA over INIT, each event needs its own SAL/OS state.  All entries
    220 * are 16 byte aligned.
    221 *
    222 *      +---------------------------+
    223 *      |          pt_regs          |
    224 *      +---------------------------+
    225 *      |        switch_stack       |
    226 *      +---------------------------+
    227 *      |        SAL/OS state       |
    228 *      +---------------------------+
    229 *      |    16 byte scratch area   |
    230 *      +---------------------------+ <-------- SP at start of C MCA handler
    231 *      |           .....           |
    232 *      +---------------------------+
    233 *      | RBS for MCA/INIT handler  |
    234 *      +---------------------------+
    235 *      | struct task for MCA/INIT  |
    236 *      +---------------------------+ <-------- Bottom of MCA/INIT stack
    237 */
    238
    239#define ALIGN16(x)			((x)&~15)
    240#define MCA_PT_REGS_OFFSET		ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE)
    241#define MCA_SWITCH_STACK_OFFSET		ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE)
    242#define MCA_SOS_OFFSET			ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE)
    243#define MCA_SP_OFFSET			ALIGN16(MCA_SOS_OFFSET-16)
    244
    245#endif /* _ASM_IA64_MCA_ASM_H */