cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmlinux.lds.S (4335B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2
      3#include <linux/pgtable.h>
      4#include <asm/cache.h>
      5#include <asm/ptrace.h>
      6#include <asm/thread_info.h>
      7
      8#define EMITS_PT_NOTE
      9#define RO_EXCEPTION_TABLE_ALIGN	16
     10
     11#include <asm-generic/vmlinux.lds.h>
     12
     13OUTPUT_FORMAT("elf64-ia64-little")
     14OUTPUT_ARCH(ia64)
     15ENTRY(phys_start)
     16jiffies = jiffies_64;
     17
     18PHDRS {
     19	text   PT_LOAD;
     20	percpu PT_LOAD;
     21	data   PT_LOAD;
     22	note   PT_NOTE;
     23	unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
     24}
     25
     26SECTIONS {
     27	/*
     28	 * unwind exit sections must be discarded before
     29	 * the rest of the sections get included.
     30	 */
     31	/DISCARD/ : {
     32		*(.IA_64.unwind.exit.text)
     33		*(.IA_64.unwind_info.exit.text)
     34		*(.comment)
     35		*(.note)
     36	}
     37
     38	v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
     39	phys_start = _start - LOAD_OFFSET;
     40
     41	code : {
     42	} :text
     43	. = KERNEL_START;
     44
     45	_text = .;
     46	_stext = .;
     47
     48	.text : AT(ADDR(.text) - LOAD_OFFSET) {
     49		__start_ivt_text = .;
     50		*(.text..ivt)
     51		__end_ivt_text = .;
     52		TEXT_TEXT
     53		SCHED_TEXT
     54		CPUIDLE_TEXT
     55		LOCK_TEXT
     56		KPROBES_TEXT
     57		IRQENTRY_TEXT
     58		SOFTIRQENTRY_TEXT
     59		*(.gnu.linkonce.t*)
     60	}
     61
     62	.text2 : AT(ADDR(.text2) - LOAD_OFFSET)	{
     63		*(.text2)
     64	}
     65
     66#ifdef CONFIG_SMP
     67	.text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
     68		*(.text..lock)
     69	}
     70#endif
     71	_etext = .;
     72
     73	/*
     74	 * Read-only data
     75	 */
     76
     77	/* MCA table */
     78	. = ALIGN(16);
     79	__mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
     80		__start___mca_table = .;
     81		*(__mca_table)
     82		__stop___mca_table = .;
     83	}
     84
     85	.data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
     86		__start___phys_stack_reg_patchlist = .;
     87		*(.data..patch.phys_stack_reg)
     88		__end___phys_stack_reg_patchlist = .;
     89	}
     90
     91	/*
     92	 * Global data
     93	 */
     94	_data = .;
     95
     96	/* Unwind info & table: */
     97	. = ALIGN(8);
     98	.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
     99		*(.IA_64.unwind_info*)
    100	}
    101	.IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
    102		__start_unwind = .;
    103		*(.IA_64.unwind*)
    104		__end_unwind = .;
    105	} :text :unwind
    106	code_continues2 : {
    107	} :text
    108
    109	RO_DATA(4096)
    110
    111	.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
    112		__start_opd = .;
    113		*(.opd)
    114		__end_opd = .;
    115	}
    116
    117	/*
    118	 * Initialization code and data:
    119	 */
    120	. = ALIGN(PAGE_SIZE);
    121	__init_begin = .;
    122
    123	INIT_TEXT_SECTION(PAGE_SIZE)
    124	INIT_DATA_SECTION(16)
    125
    126	.data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
    127		__start___vtop_patchlist = .;
    128		*(.data..patch.vtop)
    129		__end___vtop_patchlist = .;
    130	}
    131
    132	.data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
    133		__start___rse_patchlist = .;
    134		*(.data..patch.rse)
    135		__end___rse_patchlist = .;
    136	}
    137
    138	.data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
    139		__start___mckinley_e9_bundles = .;
    140		*(.data..patch.mckinley_e9)
    141		__end___mckinley_e9_bundles = .;
    142	}
    143
    144#ifdef	CONFIG_SMP
    145	. = ALIGN(PERCPU_PAGE_SIZE);
    146	__cpu0_per_cpu = .;
    147	. = . + PERCPU_PAGE_SIZE;   /* cpu0 per-cpu space */
    148#endif
    149
    150	. = ALIGN(PAGE_SIZE);
    151	__init_end = .;
    152
    153	.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
    154		PAGE_ALIGNED_DATA(PAGE_SIZE)
    155		. = ALIGN(PAGE_SIZE);
    156		__start_gate_section = .;
    157		*(.data..gate)
    158		__stop_gate_section = .;
    159	}
    160	/*
    161	 * make sure the gate page doesn't expose
    162	 * kernel data
    163	 */
    164	. = ALIGN(PAGE_SIZE);
    165
    166	/* Per-cpu data: */
    167	. = ALIGN(PERCPU_PAGE_SIZE);
    168	PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
    169	__phys_per_cpu_start = __per_cpu_load;
    170	/*
    171	 * ensure percpu data fits
    172	 * into percpu page size
    173	 */
    174	. = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
    175
    176	data : {
    177	} :data
    178	.data : AT(ADDR(.data) - LOAD_OFFSET) {
    179		_sdata  =  .;
    180		INIT_TASK_DATA(PAGE_SIZE)
    181		CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
    182		READ_MOSTLY_DATA(SMP_CACHE_BYTES)
    183		DATA_DATA
    184		*(.data1)
    185		*(.gnu.linkonce.d*)
    186		CONSTRUCTORS
    187	}
    188
    189	BUG_TABLE
    190
    191	. = ALIGN(16);	/* gp must be 16-byte aligned for exc. table */
    192	.got : AT(ADDR(.got) - LOAD_OFFSET) {
    193		*(.got.plt)
    194		*(.got)
    195	}
    196	__gp = ADDR(.got) + 0x200000;
    197
    198	/*
    199	 * We want the small data sections together,
    200	 * so single-instruction offsets can access
    201	 * them all, and initialized data all before
    202	 * uninitialized, so we can shorten the
    203	 * on-disk segment size.
    204	 */
    205	.sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
    206		*(.sdata)
    207		*(.sdata1)
    208		*(.srdata)
    209	}
    210	_edata  =  .;
    211
    212	BSS_SECTION(0, 0, 0)
    213
    214	_end = .;
    215
    216	code : {
    217	} :text
    218
    219	STABS_DEBUG
    220	DWARF_DEBUG
    221	ELF_DETAILS
    222
    223	/* Default discards */
    224	DISCARDS
    225}