cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

book3s_32_sr.S (3477B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 *
      4 * Copyright SUSE Linux Products GmbH 2009
      5 *
      6 * Authors: Alexander Graf <agraf@suse.de>
      7 */
      8
      9/******************************************************************************
     10 *                                                                            *
     11 *                               Entry code                                   *
     12 *                                                                            *
     13 *****************************************************************************/
     14
     15.macro LOAD_GUEST_SEGMENTS
     16
     17	/* Required state:
     18	 *
     19	 * MSR = ~IR|DR
     20	 * R1 = host R1
     21	 * R2 = host R2
     22	 * R3 = shadow vcpu
     23	 * all other volatile GPRS = free except R4, R6
     24	 * SVCPU[CR]  = guest CR
     25	 * SVCPU[XER] = guest XER
     26	 * SVCPU[CTR] = guest CTR
     27	 * SVCPU[LR]  = guest LR
     28	 */
     29
     30#define XCHG_SR(n)	lwz	r9, (SVCPU_SR+(n*4))(r3);  \
     31			mtsr	n, r9
     32
     33	XCHG_SR(0)
     34	XCHG_SR(1)
     35	XCHG_SR(2)
     36	XCHG_SR(3)
     37	XCHG_SR(4)
     38	XCHG_SR(5)
     39	XCHG_SR(6)
     40	XCHG_SR(7)
     41	XCHG_SR(8)
     42	XCHG_SR(9)
     43	XCHG_SR(10)
     44	XCHG_SR(11)
     45	XCHG_SR(12)
     46	XCHG_SR(13)
     47	XCHG_SR(14)
     48	XCHG_SR(15)
     49
     50	/* Clear BATs. */
     51
     52#define KVM_KILL_BAT(n, reg)		\
     53        mtspr   SPRN_IBAT##n##U,reg;	\
     54        mtspr   SPRN_IBAT##n##L,reg;	\
     55        mtspr   SPRN_DBAT##n##U,reg;	\
     56        mtspr   SPRN_DBAT##n##L,reg;	\
     57
     58        li	r9, 0
     59	KVM_KILL_BAT(0, r9)
     60	KVM_KILL_BAT(1, r9)
     61	KVM_KILL_BAT(2, r9)
     62	KVM_KILL_BAT(3, r9)
     63
     64.endm
     65
     66/******************************************************************************
     67 *                                                                            *
     68 *                               Exit code                                    *
     69 *                                                                            *
     70 *****************************************************************************/
     71
     72.macro LOAD_HOST_SEGMENTS
     73
     74	/* Register usage at this point:
     75	 *
     76	 * R1         = host R1
     77	 * R2         = host R2
     78	 * R12        = exit handler id
     79	 * R13        = shadow vcpu - SHADOW_VCPU_OFF
     80	 * SVCPU.*    = guest *
     81	 * SVCPU[CR]  = guest CR
     82	 * SVCPU[XER] = guest XER
     83	 * SVCPU[CTR] = guest CTR
     84	 * SVCPU[LR]  = guest LR
     85	 *
     86	 */
     87
     88	/* Restore BATs */
     89
     90	/* We only overwrite the upper part, so we only restoree
     91	   the upper part. */
     92#define KVM_LOAD_BAT(n, reg, RA, RB)	\
     93	lwz	RA,(n*16)+0(reg);	\
     94	lwz	RB,(n*16)+4(reg);	\
     95	mtspr	SPRN_IBAT##n##U,RA;	\
     96	mtspr	SPRN_IBAT##n##L,RB;	\
     97	lwz	RA,(n*16)+8(reg);	\
     98	lwz	RB,(n*16)+12(reg);	\
     99	mtspr	SPRN_DBAT##n##U,RA;	\
    100	mtspr	SPRN_DBAT##n##L,RB;	\
    101
    102	lis     r9, BATS@ha
    103	addi    r9, r9, BATS@l
    104	tophys(r9, r9)
    105	KVM_LOAD_BAT(0, r9, r10, r11)
    106	KVM_LOAD_BAT(1, r9, r10, r11)
    107	KVM_LOAD_BAT(2, r9, r10, r11)
    108	KVM_LOAD_BAT(3, r9, r10, r11)
    109
    110	/* Restore Segment Registers */
    111
    112	/* 0xc - 0xf */
    113
    114        li      r0, 4
    115        mtctr   r0
    116	LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc))
    117        lis     r4, 0xc000
    1183:      mtsrin  r3, r4
    119        addi    r3, r3, 0x111     /* increment VSID */
    120        addis   r4, r4, 0x1000    /* address of next segment */
    121        bdnz    3b
    122
    123	/* 0x0 - 0xb */
    124
    125	/* switch_mmu_context() needs paging, let's enable it */
    126	mfmsr   r9
    127	ori     r11, r9, MSR_DR
    128	mtmsr   r11
    129	sync
    130
    131	/* switch_mmu_context() clobbers r12, rescue it */
    132	SAVE_GPR(12, r1)
    133
    134	/* Calling switch_mmu_context(<inv>, current->mm, <inv>); */
    135	lwz	r4, MM(r2)
    136	bl	switch_mmu_context
    137
    138	/* restore r12 */
    139	REST_GPR(12, r1)
    140
    141	/* Disable paging again */
    142	mfmsr   r9
    143	li      r6, MSR_DR
    144	andc    r9, r9, r6
    145	mtmsr	r9
    146	sync
    147
    148.endm