cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

head.S (5545B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Early kernel startup code for Hexagon
      4 *
      5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
      6 */
      7
      8#include <linux/linkage.h>
      9#include <linux/init.h>
     10#include <asm/asm-offsets.h>
     11#include <asm/mem-layout.h>
     12#include <asm/vm_mmu.h>
     13#include <asm/page.h>
     14#include <asm/hexagon_vm.h>
     15
     16#define SEGTABLE_ENTRIES #0x0e0
     17
     18	__INIT
     19ENTRY(stext)
     20	/*
     21	 * VMM will already have set up true vector page, MMU, etc.
     22	 * To set up initial kernel identity map, we have to pass
     23	 * the VMM a pointer to some canonical page tables. In
     24	 * this implementation, we're assuming that we've got
     25	 * them precompiled. Generate value in R24, as we'll need
     26	 * it again shortly.
     27	 */
     28	r24.L = #LO(swapper_pg_dir)
     29	r24.H = #HI(swapper_pg_dir)
     30
     31	/*
     32	 * Symbol is kernel segment address, but we need
     33	 * the logical/physical address.
     34	 */
     35	r25 = pc;
     36	r2.h = #0xffc0;
     37	r2.l = #0x0000;
     38	r25 = and(r2,r25);	/*  R25 holds PHYS_OFFSET now  */
     39	r1.h = #HI(PAGE_OFFSET);
     40	r1.l = #LO(PAGE_OFFSET);
     41	r24 = sub(r24,r1);	/* swapper_pg_dir - PAGE_OFFSET */
     42	r24 = add(r24,r25);	/* + PHYS_OFFSET */
     43
     44	r0 = r24;  /* aka __pa(swapper_pg_dir)  */
     45
     46	/*
     47	 * Initialize page dir to make the virtual and physical
     48	 * addresses where the kernel was loaded be identical.
     49	 * Done in 4MB chunks.
     50	 */
     51#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X	\
     52		  | __HEXAGON_C_WB_L2 << 6			\
     53		  | __HVM_PDE_S_4MB)
     54
     55	/*
     56	 * Get number of VA=PA entries; only really needed for jump
     57	 * to hyperspace; gets blown away immediately after
     58	 */
     59
     60	{
     61		r1.l = #LO(_end);
     62		r2.l = #LO(stext);
     63		r3 = #1;
     64	}
     65	{
     66		r1.h = #HI(_end);
     67		r2.h = #HI(stext);
     68		r3 = asl(r3, #22);
     69	}
     70	{
     71		r1 = sub(r1, r2);
     72		r3 = add(r3, #-1);
     73	}  /* r1 =  _end - stext  */
     74	r1 = add(r1, r3);  /*  + (4M-1) */
     75	r26 = lsr(r1, #22); /*  / 4M = # of entries */
     76
     77	r1 = r25;
     78	r2.h = #0xffc0;
     79	r2.l = #0x0000;		/* round back down to 4MB boundary  */
     80	r1 = and(r1,r2);
     81	r2 = lsr(r1, #22)	/* 4MB page number		*/
     82	r2 = asl(r2, #2)	/* times sizeof(PTE) (4bytes)	*/
     83	r0 = add(r0,r2)		/* r0 = address of correct PTE	*/
     84	r2 = #PTE_BITS
     85	r1 = add(r1,r2)		/* r1 = 4MB PTE for the first entry	*/
     86	r2.h = #0x0040
     87	r2.l = #0x0000		/* 4MB increments */
     88	loop0(1f,r26);
     891:
     90	memw(r0 ++ #4) = r1
     91	{ r1 = add(r1, r2); } :endloop0
     92
     93	/*  Also need to overwrite the initial 0xc0000000 entries  */
     94	/*  PAGE_OFFSET >> (4MB shift - 4 bytes per entry shift)  */
     95	R1.H = #HI(PAGE_OFFSET >> (22 - 2))
     96	R1.L = #LO(PAGE_OFFSET >> (22 - 2))
     97
     98	r0 = add(r1, r24);	/* advance to 0xc0000000 entry */
     99	r1 = r25;
    100	r2.h = #0xffc0;
    101	r2.l = #0x0000;		/* round back down to 4MB boundary  */
    102	r1 = and(r1,r2);	/* for huge page */
    103	r2 = #PTE_BITS
    104	r1 = add(r1,r2);
    105	r2.h = #0x0040
    106	r2.l = #0x0000		/* 4MB increments */
    107
    108	loop0(1f,SEGTABLE_ENTRIES);
    1091:
    110	memw(r0 ++ #4) = r1;
    111	{ r1 = add(r1,r2); } :endloop0
    112
    113	r0 = r24;
    114
    115	/*
    116	 * The subroutine wrapper around the virtual instruction touches
    117	 * no memory, so we should be able to use it even here.
    118	 * Note that in this version, R1 and R2 get "clobbered"; see
    119	 * vm_ops.S
    120	 */
    121	r1 = #VM_TRANS_TYPE_TABLE
    122	call	__vmnewmap;
    123
    124	/*  Jump into virtual address range.  */
    125
    126	r31.h = #hi(__head_s_vaddr_target)
    127	r31.l = #lo(__head_s_vaddr_target)
    128	jumpr r31
    129
    130	/*  Insert trippy space effects.  */
    131
    132__head_s_vaddr_target:
    133	/*
    134	 * Tear down VA=PA translation now that we are running
    135	 * in kernel virtual space.
    136	 */
    137	r0 = #__HVM_PDE_S_INVALID
    138
    139	r1.h = #0xffc0;
    140	r1.l = #0x0000;
    141	r2 = r25;		/* phys_offset */
    142	r2 = and(r1,r2);
    143
    144	r1.l = #lo(swapper_pg_dir)
    145	r1.h = #hi(swapper_pg_dir)
    146	r2 = lsr(r2, #22)	/* 4MB page number		*/
    147	r2 = asl(r2, #2)	/* times sizeof(PTE) (4bytes)	*/
    148	r1 = add(r1,r2);
    149	loop0(1f,r26)
    150
    1511:
    152	{
    153		memw(R1 ++ #4) = R0
    154	}:endloop0
    155
    156	r0 = r24
    157	r1 = #VM_TRANS_TYPE_TABLE
    158	call __vmnewmap
    159
    160	/*  Go ahead and install the trap0 return so angel calls work  */
    161	r0.h = #hi(_K_provisional_vec)
    162	r0.l = #lo(_K_provisional_vec)
    163	call __vmsetvec
    164
    165	/*
    166	 * OK, at this point we should start to be much more careful,
    167	 * we're going to enter C code and start touching memory
    168	 * in all sorts of places.
    169	 * This means:
    170	 *      SGP needs to be OK
    171	 *	Need to lock shared resources
    172	 *	A bunch of other things that will cause
    173	 * 	all kinds of painful bugs
    174	 */
    175
    176	/*
    177	 * Stack pointer should be pointed at the init task's
    178	 * thread stack, which should have been declared in arch/init_task.c.
    179	 * So uhhhhh...
    180	 * It's accessible via the init_thread_union, which is a union
    181	 * of a thread_info struct and a stack; of course, the top
    182	 * of the stack is not for you.  The end of the stack
    183	 * is simply init_thread_union + THREAD_SIZE.
    184	 */
    185
    186	{r29.H = #HI(init_thread_union); r0.H = #HI(_THREAD_SIZE); }
    187	{r29.L = #LO(init_thread_union); r0.L = #LO(_THREAD_SIZE); }
    188
    189	/*  initialize the register used to point to current_thread_info */
    190	/*  Fixme:  THREADINFO_REG can't be R2 because of that memset thing. */
    191	{r29 = add(r29,r0); THREADINFO_REG = r29; }
    192
    193	/*  Hack:  zero bss; */
    194	{ r0.L = #LO(__bss_start);  r1 = #0; r2.l = #LO(__bss_stop); }
    195	{ r0.H = #HI(__bss_start);           r2.h = #HI(__bss_stop); }
    196
    197	r2 = sub(r2,r0);
    198	call memset;
    199
    200	/*  Set PHYS_OFFSET; should be in R25 */
    201#ifdef CONFIG_HEXAGON_PHYS_OFFSET
    202	r0.l = #LO(__phys_offset);
    203	r0.h = #HI(__phys_offset);
    204	memw(r0) = r25;
    205#endif
    206
    207	/* Time to make the doughnuts.   */
    208	call start_kernel
    209
    210	/*
    211	 * Should not reach here.
    212	 */
    2131:
    214	jump 1b
    215
    216.p2align PAGE_SHIFT
    217ENTRY(external_cmdline_buffer)
    218        .fill _PAGE_SIZE,1,0
    219
    220.data
    221.p2align PAGE_SHIFT
    222ENTRY(empty_zero_page)
    223        .fill _PAGE_SIZE,1,0