trampoline_64.S (3651B)
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * kexec trampoline 4 * 5 * Based on code taken from kexec-tools and kexec-lite. 6 * 7 * Copyright (C) 2004 - 2005, Milton D Miller II, IBM Corporation 8 * Copyright (C) 2006, Mohan Kumar M, IBM Corporation 9 * Copyright (C) 2013, Anton Blanchard, IBM Corporation 10 */ 11 12#include <asm/asm-compat.h> 13#include <asm/crashdump-ppc64.h> 14 15 .balign 256 16 .globl purgatory_start 17purgatory_start: 18 b master 19 20 /* ABI: possible run_at_load flag at 0x5c */ 21 .org purgatory_start + 0x5c 22 .globl run_at_load 23run_at_load: 24 .long 0 25 .size run_at_load, . - run_at_load 26 27 /* ABI: slaves start at 60 with r3=phys */ 28 .org purgatory_start + 0x60 29slave: 30 b . 31 /* ABI: end of copied region */ 32 .org purgatory_start + 0x100 33 .size purgatory_start, . - purgatory_start 34 35/* 36 * The above 0x100 bytes at purgatory_start are replaced with the 37 * code from the kernel (or next stage) by setup_purgatory(). 38 */ 39 40master: 41 or %r1,%r1,%r1 /* low priority to let other threads catchup */ 42 isync 43 mr %r17,%r3 /* save cpu id to r17 */ 44 mr %r15,%r4 /* save physical address in reg15 */ 45 46 /* Work out where we're running */ 47 bcl 20, 31, 0f 480: mflr %r18 49 50 /* 51 * Copy BACKUP_SRC_SIZE bytes from BACKUP_SRC_START to 52 * backup_start 8 bytes at a time. 53 * 54 * Use r3 = dest, r4 = src, r5 = size, r6 = count 55 */ 56 ld %r3, (backup_start - 0b)(%r18) 57 cmpdi %cr0, %r3, 0 58 beq .Lskip_copy /* skip if there is no backup region */ 59 lis %r5, BACKUP_SRC_SIZE@h 60 ori %r5, %r5, BACKUP_SRC_SIZE@l 61 cmpdi %cr0, %r5, 0 62 beq .Lskip_copy /* skip if copy size is zero */ 63 lis %r4, BACKUP_SRC_START@h 64 ori %r4, %r4, BACKUP_SRC_START@l 65 li %r6, 0 66.Lcopy_loop: 67 ldx %r0, %r6, %r4 68 stdx %r0, %r6, %r3 69 addi %r6, %r6, 8 70 cmpld %cr0, %r6, %r5 71 blt .Lcopy_loop 72 73.Lskip_copy: 74 or %r3,%r3,%r3 /* ok now to high priority, lets boot */ 75 lis %r6,0x1 76 mtctr %r6 /* delay a bit for slaves to catch up */ 77 bdnz . /* before we overwrite 0-100 again */ 78 79 /* load device-tree address */ 80 ld %r3, (dt_offset - 0b)(%r18) 81 mr %r16,%r3 /* save dt address in reg16 */ 82 li %r4,20 83 LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */ 84 cmpwi %cr0,%r6,2 /* v2 or later? */ 85 blt 1f 86 li %r4,28 87 STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */ 881: 89 /* Load opal base and entry values in r8 & r9 respectively */ 90 ld %r8,(opal_base - 0b)(%r18) 91 ld %r9,(opal_entry - 0b)(%r18) 92 93 /* load the kernel address */ 94 ld %r4,(kernel - 0b)(%r18) 95 96 /* load the run_at_load flag */ 97 /* possibly patched by kexec */ 98 ld %r6,(run_at_load - 0b)(%r18) 99 /* and patch it into the kernel */ 100 stw %r6,(0x5c)(%r4) 101 102 mr %r3,%r16 /* restore dt address */ 103 104 li %r5,0 /* r5 will be 0 for kernel */ 105 106 mfmsr %r11 107 andi. %r10,%r11,1 /* test MSR_LE */ 108 bne .Little_endian 109 110 mtctr %r4 /* prepare branch to */ 111 bctr /* start kernel */ 112 113.Little_endian: 114 mtsrr0 %r4 /* prepare branch to */ 115 116 clrrdi %r11,%r11,1 /* clear MSR_LE */ 117 mtsrr1 %r11 118 119 rfid /* update MSR and start kernel */ 120 121 .balign 8 122 .globl kernel 123kernel: 124 .8byte 0x0 125 .size kernel, . - kernel 126 127 .balign 8 128 .globl dt_offset 129dt_offset: 130 .8byte 0x0 131 .size dt_offset, . - dt_offset 132 133 .balign 8 134 .globl backup_start 135backup_start: 136 .8byte 0x0 137 .size backup_start, . - backup_start 138 139 .balign 8 140 .globl opal_base 141opal_base: 142 .8byte 0x0 143 .size opal_base, . - opal_base 144 145 .balign 8 146 .globl opal_entry 147opal_entry: 148 .8byte 0x0 149 .size opal_entry, . - opal_entry 150 151 .data 152 .balign 8 153.globl purgatory_sha256_digest 154purgatory_sha256_digest: 155 .skip 32 156 .size purgatory_sha256_digest, . - purgatory_sha256_digest 157 158 .balign 8 159.globl purgatory_sha_regions 160purgatory_sha_regions: 161 .skip 8 * 2 * 16 162 .size purgatory_sha_regions, . - purgatory_sha_regions