vmlinux-xip.lds.S (2908B)
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 * Copyright (C) 2017 SiFive 5 * Copyright (C) 2020 Vitaly Wool, Konsulko AB 6 */ 7 8#include <asm/pgtable.h> 9#define LOAD_OFFSET KERNEL_LINK_ADDR 10/* No __ro_after_init data in the .rodata section - which will always be ro */ 11#define RO_AFTER_INIT_DATA 12 13#include <asm/vmlinux.lds.h> 14#include <asm/page.h> 15#include <asm/cache.h> 16#include <asm/thread_info.h> 17 18OUTPUT_ARCH(riscv) 19ENTRY(_start) 20 21jiffies = jiffies_64; 22 23SECTIONS 24{ 25 /* Beginning of code and text segment */ 26 . = LOAD_OFFSET; 27 _xiprom = .; 28 _start = .; 29 HEAD_TEXT_SECTION 30 INIT_TEXT_SECTION(PAGE_SIZE) 31 /* we have to discard exit text and such at runtime, not link time */ 32 .exit.text : 33 { 34 EXIT_TEXT 35 } 36 37 .text : { 38 _text = .; 39 _stext = .; 40 TEXT_TEXT 41 SCHED_TEXT 42 CPUIDLE_TEXT 43 LOCK_TEXT 44 KPROBES_TEXT 45 ENTRY_TEXT 46 IRQENTRY_TEXT 47 SOFTIRQENTRY_TEXT 48 _etext = .; 49 } 50 RO_DATA(L1_CACHE_BYTES) 51 .srodata : { 52 *(.srodata*) 53 } 54 .init.rodata : { 55 INIT_SETUP(16) 56 INIT_CALLS 57 CON_INITCALL 58 INIT_RAM_FS 59 } 60 _exiprom = .; /* End of XIP ROM area */ 61 62 63/* 64 * From this point, stuff is considered writable and will be copied to RAM 65 */ 66 __data_loc = ALIGN(PAGE_SIZE); /* location in file */ 67 . = KERNEL_LINK_ADDR + XIP_OFFSET; /* location in memory */ 68 69#undef LOAD_OFFSET 70#define LOAD_OFFSET (KERNEL_LINK_ADDR + XIP_OFFSET - (__data_loc & XIP_OFFSET_MASK)) 71 72 _sdata = .; /* Start of data section */ 73 _data = .; 74 RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 75 _edata = .; 76 __start_ro_after_init = .; 77 .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) { 78 *(.data..ro_after_init) 79 } 80 __end_ro_after_init = .; 81 82 . = ALIGN(PAGE_SIZE); 83 __init_begin = .; 84 .init.data : { 85 INIT_DATA 86 } 87 .exit.data : { 88 EXIT_DATA 89 } 90 . = ALIGN(8); 91 __soc_early_init_table : { 92 __soc_early_init_table_start = .; 93 KEEP(*(__soc_early_init_table)) 94 __soc_early_init_table_end = .; 95 } 96 __soc_builtin_dtb_table : { 97 __soc_builtin_dtb_table_start = .; 98 KEEP(*(__soc_builtin_dtb_table)) 99 __soc_builtin_dtb_table_end = .; 100 } 101 102 . = ALIGN(8); 103 .alternative : { 104 __alt_start = .; 105 *(.alternative) 106 __alt_end = .; 107 } 108 __init_end = .; 109 110 . = ALIGN(16); 111 .xip.traps : { 112 __xip_traps_start = .; 113 *(.xip.traps) 114 __xip_traps_end = .; 115 } 116 117 . = ALIGN(PAGE_SIZE); 118 .sdata : { 119 __global_pointer$ = . + 0x800; 120 *(.sdata*) 121 *(.sbss*) 122 } 123 124 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) 125 126 PERCPU_SECTION(L1_CACHE_BYTES) 127 128 .rel.dyn : AT(ADDR(.rel.dyn) - LOAD_OFFSET) { 129 *(.rel.dyn*) 130 } 131 132 /* 133 * End of copied data. We need a dummy section to get its LMA. 134 * Also located before final ALIGN() as trailing padding is not stored 135 * in the resulting binary file and useless to copy. 136 */ 137 .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { } 138 _edata_loc = LOADADDR(.data.endmark); 139 140 . = ALIGN(PAGE_SIZE); 141 _end = .; 142 143 STABS_DEBUG 144 DWARF_DEBUG 145 146 DISCARDS 147}