cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ivt.S (53034B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * arch/ia64/kernel/ivt.S
      4 *
      5 * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
      6 *	Stephane Eranian <eranian@hpl.hp.com>
      7 *	David Mosberger <davidm@hpl.hp.com>
      8 * Copyright (C) 2000, 2002-2003 Intel Co
      9 *	Asit Mallick <asit.k.mallick@intel.com>
     10 *      Suresh Siddha <suresh.b.siddha@intel.com>
     11 *      Kenneth Chen <kenneth.w.chen@intel.com>
     12 *      Fenghua Yu <fenghua.yu@intel.com>
     13 *
     14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
     15 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
     16 *
     17 * Copyright (C) 2005 Hewlett-Packard Co
     18 *	Dan Magenheimer <dan.magenheimer@hp.com>
     19 *      Xen paravirtualization
     20 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
     21 *                    VA Linux Systems Japan K.K.
     22 *                    pv_ops.
     23 *      Yaozu (Eddie) Dong <eddie.dong@intel.com>
     24 */
     25/*
     26 * This file defines the interruption vector table used by the CPU.
     27 * It does not include one entry per possible cause of interruption.
     28 *
     29 * The first 20 entries of the table contain 64 bundles each while the
     30 * remaining 48 entries contain only 16 bundles each.
     31 *
     32 * The 64 bundles are used to allow inlining the whole handler for critical
     33 * interruptions like TLB misses.
     34 *
     35 *  For each entry, the comment is as follows:
     36 *
     37 *		// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
     38 *  entry offset ----/     /         /                  /          /
     39 *  entry number ---------/         /                  /          /
     40 *  size of the entry -------------/                  /          /
     41 *  vector name -------------------------------------/          /
     42 *  interruptions triggering this vector ----------------------/
     43 *
     44 * The table is 32KB in size and must be aligned on 32KB boundary.
     45 * (The CPU ignores the 15 lower bits of the address)
     46 *
     47 * Table is based upon EAS2.6 (Oct 1999)
     48 */
     49
     50
     51#include <linux/pgtable.h>
     52#include <asm/asmmacro.h>
     53#include <asm/break.h>
     54#include <asm/kregs.h>
     55#include <asm/asm-offsets.h>
     56#include <asm/processor.h>
     57#include <asm/ptrace.h>
     58#include <asm/thread_info.h>
     59#include <asm/unistd.h>
     60#include <asm/errno.h>
     61#include <asm/export.h>
     62
     63#if 0
     64# define PSR_DEFAULT_BITS	psr.ac
     65#else
     66# define PSR_DEFAULT_BITS	0
     67#endif
     68
     69#if 0
     70  /*
     71   * This lets you track the last eight faults that occurred on the CPU.  Make sure ar.k2 isn't
     72   * needed for something else before enabling this...
     73   */
     74# define DBG_FAULT(i)	mov r16=ar.k2;;	shl r16=r16,8;;	add r16=(i),r16;;mov ar.k2=r16
     75#else
     76# define DBG_FAULT(i)
     77#endif
     78
     79#include "minstate.h"
     80
     81#define FAULT(n)									\
     82	mov r31=pr;									\
     83	mov r19=n;;			/* prepare to save predicates */		\
     84	br.sptk.many dispatch_to_fault_handler
     85
     86	.section .text..ivt,"ax"
     87
     88	.align 32768	// align on 32KB boundary
     89	.global ia64_ivt
     90	EXPORT_DATA_SYMBOL(ia64_ivt)
     91ia64_ivt:
     92/////////////////////////////////////////////////////////////////////////////////////////
     93// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
     94ENTRY(vhpt_miss)
     95	DBG_FAULT(0)
     96	/*
     97	 * The VHPT vector is invoked when the TLB entry for the virtual page table
     98	 * is missing.  This happens only as a result of a previous
     99	 * (the "original") TLB miss, which may either be caused by an instruction
    100	 * fetch or a data access (or non-access).
    101	 *
    102	 * What we do here is normal TLB miss handing for the _original_ miss,
    103	 * followed by inserting the TLB entry for the virtual page table page
    104	 * that the VHPT walker was attempting to access.  The latter gets
    105	 * inserted as long as page table entry above pte level have valid
    106	 * mappings for the faulting address.  The TLB entry for the original
    107	 * miss gets inserted only if the pte entry indicates that the page is
    108	 * present.
    109	 *
    110	 * do_page_fault gets invoked in the following cases:
    111	 *	- the faulting virtual address uses unimplemented address bits
    112	 *	- the faulting virtual address has no valid page table mapping
    113	 */
    114	MOV_FROM_IFA(r16)			// get address that caused the TLB miss
    115#ifdef CONFIG_HUGETLB_PAGE
    116	movl r18=PAGE_SHIFT
    117	MOV_FROM_ITIR(r25)
    118#endif
    119	;;
    120	RSM_PSR_DT				// use physical addressing for data
    121	mov r31=pr				// save the predicate registers
    122	mov r19=IA64_KR(PT_BASE)		// get page table base address
    123	shl r21=r16,3				// shift bit 60 into sign bit
    124	shr.u r17=r16,61			// get the region number into r17
    125	;;
    126	shr.u r22=r21,3
    127#ifdef CONFIG_HUGETLB_PAGE
    128	extr.u r26=r25,2,6
    129	;;
    130	cmp.ne p8,p0=r18,r26
    131	sub r27=r26,r18
    132	;;
    133(p8)	dep r25=r18,r25,2,6
    134(p8)	shr r22=r22,r27
    135#endif
    136	;;
    137	cmp.eq p6,p7=5,r17			// is IFA pointing into to region 5?
    138	shr.u r18=r22,PGDIR_SHIFT		// get bottom portion of pgd index bit
    139	;;
    140(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
    141
    142	srlz.d
    143	LOAD_PHYSICAL(p6, r19, swapper_pg_dir)	// region 5 is rooted at swapper_pg_dir
    144
    145	.pred.rel "mutex", p6, p7
    146(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
    147(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
    148	;;
    149(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=pgd_offset for region 5
    150(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=pgd_offset for region[0-4]
    151	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
    152#if CONFIG_PGTABLE_LEVELS == 4
    153	shr.u r28=r22,PUD_SHIFT			// shift pud index into position
    154#else
    155	shr.u r18=r22,PMD_SHIFT			// shift pmd index into position
    156#endif
    157	;;
    158	ld8 r17=[r17]				// get *pgd (may be 0)
    159	;;
    160(p7)	cmp.eq p6,p7=r17,r0			// was pgd_present(*pgd) == NULL?
    161#if CONFIG_PGTABLE_LEVELS == 4
    162	dep r28=r28,r17,3,(PAGE_SHIFT-3)	// r28=pud_offset(pgd,addr)
    163	;;
    164	shr.u r18=r22,PMD_SHIFT			// shift pmd index into position
    165(p7)	ld8 r29=[r28]				// get *pud (may be 0)
    166	;;
    167(p7)	cmp.eq.or.andcm p6,p7=r29,r0		// was pud_present(*pud) == NULL?
    168	dep r17=r18,r29,3,(PAGE_SHIFT-3)	// r17=pmd_offset(pud,addr)
    169#else
    170	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// r17=pmd_offset(pgd,addr)
    171#endif
    172	;;
    173(p7)	ld8 r20=[r17]				// get *pmd (may be 0)
    174	shr.u r19=r22,PAGE_SHIFT		// shift pte index into position
    175	;;
    176(p7)	cmp.eq.or.andcm p6,p7=r20,r0		// was pmd_present(*pmd) == NULL?
    177	dep r21=r19,r20,3,(PAGE_SHIFT-3)	// r21=pte_offset(pmd,addr)
    178	;;
    179(p7)	ld8 r18=[r21]				// read *pte
    180	MOV_FROM_ISR(r19)			// cr.isr bit 32 tells us if this is an insn miss
    181	;;
    182(p7)	tbit.z p6,p7=r18,_PAGE_P_BIT		// page present bit cleared?
    183	MOV_FROM_IHA(r22)			// get the VHPT address that caused the TLB miss
    184	;;					// avoid RAW on p7
    185(p7)	tbit.nz.unc p10,p11=r19,32		// is it an instruction TLB miss?
    186	dep r23=0,r20,0,PAGE_SHIFT		// clear low bits to get page address
    187	;;
    188	ITC_I_AND_D(p10, p11, r18, r24)		// insert the instruction TLB entry and
    189						// insert the data TLB entry
    190(p6)	br.cond.spnt.many page_fault		// handle bad address/page not present (page fault)
    191	MOV_TO_IFA(r22, r24)
    192
    193#ifdef CONFIG_HUGETLB_PAGE
    194	MOV_TO_ITIR(p8, r25, r24)		// change to default page-size for VHPT
    195#endif
    196
    197	/*
    198	 * Now compute and insert the TLB entry for the virtual page table.  We never
    199	 * execute in a page table page so there is no need to set the exception deferral
    200	 * bit.
    201	 */
    202	adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
    203	;;
    204	ITC_D(p7, r24, r25)
    205	;;
    206#ifdef CONFIG_SMP
    207	/*
    208	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
    209	 * cannot possibly affect the following loads:
    210	 */
    211	dv_serialize_data
    212
    213	/*
    214	 * Re-check pagetable entry.  If they changed, we may have received a ptc.g
    215	 * between reading the pagetable and the "itc".  If so, flush the entry we
    216	 * inserted and retry.  At this point, we have:
    217	 *
    218	 * r28 = equivalent of pud_offset(pgd, ifa)
    219	 * r17 = equivalent of pmd_offset(pud, ifa)
    220	 * r21 = equivalent of pte_offset(pmd, ifa)
    221	 *
    222	 * r29 = *pud
    223	 * r20 = *pmd
    224	 * r18 = *pte
    225	 */
    226	ld8 r25=[r21]				// read *pte again
    227	ld8 r26=[r17]				// read *pmd again
    228#if CONFIG_PGTABLE_LEVELS == 4
    229	ld8 r19=[r28]				// read *pud again
    230#endif
    231	cmp.ne p6,p7=r0,r0
    232	;;
    233	cmp.ne.or.andcm p6,p7=r26,r20		// did *pmd change
    234#if CONFIG_PGTABLE_LEVELS == 4
    235	cmp.ne.or.andcm p6,p7=r19,r29		// did *pud change
    236#endif
    237	mov r27=PAGE_SHIFT<<2
    238	;;
    239(p6)	ptc.l r22,r27				// purge PTE page translation
    240(p7)	cmp.ne.or.andcm p6,p7=r25,r18		// did *pte change
    241	;;
    242(p6)	ptc.l r16,r27				// purge translation
    243#endif
    244
    245	mov pr=r31,-1				// restore predicate registers
    246	RFI
    247END(vhpt_miss)
    248
    249	.org ia64_ivt+0x400
    250/////////////////////////////////////////////////////////////////////////////////////////
    251// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
    252ENTRY(itlb_miss)
    253	DBG_FAULT(1)
    254	/*
    255	 * The ITLB handler accesses the PTE via the virtually mapped linear
    256	 * page table.  If a nested TLB miss occurs, we switch into physical
    257	 * mode, walk the page table, and then re-execute the PTE read and
    258	 * go on normally after that.
    259	 */
    260	MOV_FROM_IFA(r16)			// get virtual address
    261	mov r29=b0				// save b0
    262	mov r31=pr				// save predicates
    263.itlb_fault:
    264	MOV_FROM_IHA(r17)			// get virtual address of PTE
    265	movl r30=1f				// load nested fault continuation point
    266	;;
    2671:	ld8 r18=[r17]				// read *pte
    268	;;
    269	mov b0=r29
    270	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
    271(p6)	br.cond.spnt page_fault
    272	;;
    273	ITC_I(p0, r18, r19)
    274	;;
    275#ifdef CONFIG_SMP
    276	/*
    277	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
    278	 * cannot possibly affect the following loads:
    279	 */
    280	dv_serialize_data
    281
    282	ld8 r19=[r17]				// read *pte again and see if same
    283	mov r20=PAGE_SHIFT<<2			// setup page size for purge
    284	;;
    285	cmp.ne p7,p0=r18,r19
    286	;;
    287(p7)	ptc.l r16,r20
    288#endif
    289	mov pr=r31,-1
    290	RFI
    291END(itlb_miss)
    292
    293	.org ia64_ivt+0x0800
    294/////////////////////////////////////////////////////////////////////////////////////////
    295// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
    296ENTRY(dtlb_miss)
    297	DBG_FAULT(2)
    298	/*
    299	 * The DTLB handler accesses the PTE via the virtually mapped linear
    300	 * page table.  If a nested TLB miss occurs, we switch into physical
    301	 * mode, walk the page table, and then re-execute the PTE read and
    302	 * go on normally after that.
    303	 */
    304	MOV_FROM_IFA(r16)			// get virtual address
    305	mov r29=b0				// save b0
    306	mov r31=pr				// save predicates
    307dtlb_fault:
    308	MOV_FROM_IHA(r17)			// get virtual address of PTE
    309	movl r30=1f				// load nested fault continuation point
    310	;;
    3111:	ld8 r18=[r17]				// read *pte
    312	;;
    313	mov b0=r29
    314	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
    315(p6)	br.cond.spnt page_fault
    316	;;
    317	ITC_D(p0, r18, r19)
    318	;;
    319#ifdef CONFIG_SMP
    320	/*
    321	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
    322	 * cannot possibly affect the following loads:
    323	 */
    324	dv_serialize_data
    325
    326	ld8 r19=[r17]				// read *pte again and see if same
    327	mov r20=PAGE_SHIFT<<2			// setup page size for purge
    328	;;
    329	cmp.ne p7,p0=r18,r19
    330	;;
    331(p7)	ptc.l r16,r20
    332#endif
    333	mov pr=r31,-1
    334	RFI
    335END(dtlb_miss)
    336
    337	.org ia64_ivt+0x0c00
    338/////////////////////////////////////////////////////////////////////////////////////////
    339// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
    340ENTRY(alt_itlb_miss)
    341	DBG_FAULT(3)
    342	MOV_FROM_IFA(r16)	// get address that caused the TLB miss
    343	movl r17=PAGE_KERNEL
    344	MOV_FROM_IPSR(p0, r21)
    345	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
    346	mov r31=pr
    347	;;
    348#ifdef CONFIG_DISABLE_VHPT
    349	shr.u r22=r16,61			// get the region number into r21
    350	;;
    351	cmp.gt p8,p0=6,r22			// user mode
    352	;;
    353	THASH(p8, r17, r16, r23)
    354	;;
    355	MOV_TO_IHA(p8, r17, r23)
    356(p8)	mov r29=b0				// save b0
    357(p8)	br.cond.dptk .itlb_fault
    358#endif
    359	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
    360	and r19=r19,r16		// clear ed, reserved bits, and PTE control bits
    361	shr.u r18=r16,57	// move address bit 61 to bit 4
    362	;;
    363	andcm r18=0x10,r18	// bit 4=~address-bit(61)
    364	cmp.ne p8,p0=r0,r23	// psr.cpl != 0?
    365	or r19=r17,r19		// insert PTE control bits into r19
    366	;;
    367	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
    368(p8)	br.cond.spnt page_fault
    369	;;
    370	ITC_I(p0, r19, r18)	// insert the TLB entry
    371	mov pr=r31,-1
    372	RFI
    373END(alt_itlb_miss)
    374
    375	.org ia64_ivt+0x1000
    376/////////////////////////////////////////////////////////////////////////////////////////
    377// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
    378ENTRY(alt_dtlb_miss)
    379	DBG_FAULT(4)
    380	MOV_FROM_IFA(r16)	// get address that caused the TLB miss
    381	movl r17=PAGE_KERNEL
    382	MOV_FROM_ISR(r20)
    383	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
    384	MOV_FROM_IPSR(p0, r21)
    385	mov r31=pr
    386	mov r24=PERCPU_ADDR
    387	;;
    388#ifdef CONFIG_DISABLE_VHPT
    389	shr.u r22=r16,61			// get the region number into r21
    390	;;
    391	cmp.gt p8,p0=6,r22			// access to region 0-5
    392	;;
    393	THASH(p8, r17, r16, r25)
    394	;;
    395	MOV_TO_IHA(p8, r17, r25)
    396(p8)	mov r29=b0				// save b0
    397(p8)	br.cond.dptk dtlb_fault
    398#endif
    399	cmp.ge p10,p11=r16,r24			// access to per_cpu_data?
    400	tbit.z p12,p0=r16,61			// access to region 6?
    401	mov r25=PERCPU_PAGE_SHIFT << 2
    402	mov r26=PERCPU_PAGE_SIZE
    403	nop.m 0
    404	nop.b 0
    405	;;
    406(p10)	mov r19=IA64_KR(PER_CPU_DATA)
    407(p11)	and r19=r19,r16				// clear non-ppn fields
    408	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
    409	and r22=IA64_ISR_CODE_MASK,r20		// get the isr.code field
    410	tbit.nz p6,p7=r20,IA64_ISR_SP_BIT	// is speculation bit on?
    411	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT	// is non-access bit on?
    412	;;
    413(p10)	sub r19=r19,r26
    414	MOV_TO_ITIR(p10, r25, r24)
    415	cmp.ne p8,p0=r0,r23
    416(p9)	cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field
    417(p12)	dep r17=-1,r17,4,1			// set ma=UC for region 6 addr
    418(p8)	br.cond.spnt page_fault
    419
    420	dep r21=-1,r21,IA64_PSR_ED_BIT,1
    421	;;
    422	or r19=r19,r17		// insert PTE control bits into r19
    423	MOV_TO_IPSR(p6, r21, r24)
    424	;;
    425	ITC_D(p7, r19, r18)	// insert the TLB entry
    426	mov pr=r31,-1
    427	RFI
    428END(alt_dtlb_miss)
    429
    430	.org ia64_ivt+0x1400
    431/////////////////////////////////////////////////////////////////////////////////////////
    432// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
    433ENTRY(nested_dtlb_miss)
    434	/*
    435	 * In the absence of kernel bugs, we get here when the virtually mapped linear
    436	 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
    437	 * Access-bit, or Data Access-bit faults).  If the DTLB entry for the virtual page
    438	 * table is missing, a nested TLB miss fault is triggered and control is
    439	 * transferred to this point.  When this happens, we lookup the pte for the
    440	 * faulting address by walking the page table in physical mode and return to the
    441	 * continuation point passed in register r30 (or call page_fault if the address is
    442	 * not mapped).
    443	 *
    444	 * Input:	r16:	faulting address
    445	 *		r29:	saved b0
    446	 *		r30:	continuation address
    447	 *		r31:	saved pr
    448	 *
    449	 * Output:	r17:	physical address of PTE of faulting address
    450	 *		r29:	saved b0
    451	 *		r30:	continuation address
    452	 *		r31:	saved pr
    453	 *
    454	 * Clobbered:	b0, r18, r19, r21, r22, psr.dt (cleared)
    455	 */
    456	RSM_PSR_DT				// switch to using physical data addressing
    457	mov r19=IA64_KR(PT_BASE)		// get the page table base address
    458	shl r21=r16,3				// shift bit 60 into sign bit
    459	MOV_FROM_ITIR(r18)
    460	;;
    461	shr.u r17=r16,61			// get the region number into r17
    462	extr.u r18=r18,2,6			// get the faulting page size
    463	;;
    464	cmp.eq p6,p7=5,r17			// is faulting address in region 5?
    465	add r22=-PAGE_SHIFT,r18			// adjustment for hugetlb address
    466	add r18=PGDIR_SHIFT-PAGE_SHIFT,r18
    467	;;
    468	shr.u r22=r16,r22
    469	shr.u r18=r16,r18
    470(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
    471
    472	srlz.d
    473	LOAD_PHYSICAL(p6, r19, swapper_pg_dir)	// region 5 is rooted at swapper_pg_dir
    474
    475	.pred.rel "mutex", p6, p7
    476(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
    477(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
    478	;;
    479(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=pgd_offset for region 5
    480(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=pgd_offset for region[0-4]
    481	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
    482#if CONFIG_PGTABLE_LEVELS == 4
    483	shr.u r18=r22,PUD_SHIFT			// shift pud index into position
    484#else
    485	shr.u r18=r22,PMD_SHIFT			// shift pmd index into position
    486#endif
    487	;;
    488	ld8 r17=[r17]				// get *pgd (may be 0)
    489	;;
    490(p7)	cmp.eq p6,p7=r17,r0			// was pgd_present(*pgd) == NULL?
    491	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// r17=p[u|m]d_offset(pgd,addr)
    492	;;
    493#if CONFIG_PGTABLE_LEVELS == 4
    494(p7)	ld8 r17=[r17]				// get *pud (may be 0)
    495	shr.u r18=r22,PMD_SHIFT			// shift pmd index into position
    496	;;
    497(p7)	cmp.eq.or.andcm p6,p7=r17,r0		// was pud_present(*pud) == NULL?
    498	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// r17=pmd_offset(pud,addr)
    499	;;
    500#endif
    501(p7)	ld8 r17=[r17]				// get *pmd (may be 0)
    502	shr.u r19=r22,PAGE_SHIFT		// shift pte index into position
    503	;;
    504(p7)	cmp.eq.or.andcm p6,p7=r17,r0		// was pmd_present(*pmd) == NULL?
    505	dep r17=r19,r17,3,(PAGE_SHIFT-3)	// r17=pte_offset(pmd,addr);
    506(p6)	br.cond.spnt page_fault
    507	mov b0=r30
    508	br.sptk.many b0				// return to continuation point
    509END(nested_dtlb_miss)
    510
    511	.org ia64_ivt+0x1800
    512/////////////////////////////////////////////////////////////////////////////////////////
    513// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
    514ENTRY(ikey_miss)
    515	DBG_FAULT(6)
    516	FAULT(6)
    517END(ikey_miss)
    518
    519	.org ia64_ivt+0x1c00
    520/////////////////////////////////////////////////////////////////////////////////////////
    521// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
    522ENTRY(dkey_miss)
    523	DBG_FAULT(7)
    524	FAULT(7)
    525END(dkey_miss)
    526
    527	.org ia64_ivt+0x2000
    528/////////////////////////////////////////////////////////////////////////////////////////
    529// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
    530ENTRY(dirty_bit)
    531	DBG_FAULT(8)
    532	/*
    533	 * What we do here is to simply turn on the dirty bit in the PTE.  We need to
    534	 * update both the page-table and the TLB entry.  To efficiently access the PTE,
    535	 * we address it through the virtual page table.  Most likely, the TLB entry for
    536	 * the relevant virtual page table page is still present in the TLB so we can
    537	 * normally do this without additional TLB misses.  In case the necessary virtual
    538	 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
    539	 * up the physical address of the L3 PTE and then continue at label 1 below.
    540	 */
    541	MOV_FROM_IFA(r16)			// get the address that caused the fault
    542	movl r30=1f				// load continuation point in case of nested fault
    543	;;
    544	THASH(p0, r17, r16, r18)		// compute virtual address of L3 PTE
    545	mov r29=b0				// save b0 in case of nested fault
    546	mov r31=pr				// save pr
    547#ifdef CONFIG_SMP
    548	mov r28=ar.ccv				// save ar.ccv
    549	;;
    5501:	ld8 r18=[r17]
    551	;;					// avoid RAW on r18
    552	mov ar.ccv=r18				// set compare value for cmpxchg
    553	or r25=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits
    554	tbit.z p7,p6 = r18,_PAGE_P_BIT		// Check present bit
    555	;;
    556(p6)	cmpxchg8.acq r26=[r17],r25,ar.ccv	// Only update if page is present
    557	mov r24=PAGE_SHIFT<<2
    558	;;
    559(p6)	cmp.eq p6,p7=r26,r18			// Only compare if page is present
    560	;;
    561	ITC_D(p6, r25, r18)			// install updated PTE
    562	;;
    563	/*
    564	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
    565	 * cannot possibly affect the following loads:
    566	 */
    567	dv_serialize_data
    568
    569	ld8 r18=[r17]				// read PTE again
    570	;;
    571	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
    572	;;
    573(p7)	ptc.l r16,r24
    574	mov b0=r29				// restore b0
    575	mov ar.ccv=r28
    576#else
    577	;;
    5781:	ld8 r18=[r17]
    579	;;					// avoid RAW on r18
    580	or r18=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits
    581	mov b0=r29				// restore b0
    582	;;
    583	st8 [r17]=r18				// store back updated PTE
    584	ITC_D(p0, r18, r16)			// install updated PTE
    585#endif
    586	mov pr=r31,-1				// restore pr
    587	RFI
    588END(dirty_bit)
    589
    590	.org ia64_ivt+0x2400
    591/////////////////////////////////////////////////////////////////////////////////////////
    592// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
    593ENTRY(iaccess_bit)
    594	DBG_FAULT(9)
    595	// Like Entry 8, except for instruction access
    596	MOV_FROM_IFA(r16)			// get the address that caused the fault
    597	movl r30=1f				// load continuation point in case of nested fault
    598	mov r31=pr				// save predicates
    599#ifdef CONFIG_ITANIUM
    600	/*
    601	 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
    602	 */
    603	MOV_FROM_IPSR(p0, r17)
    604	;;
    605	MOV_FROM_IIP(r18)
    606	tbit.z p6,p0=r17,IA64_PSR_IS_BIT	// IA64 instruction set?
    607	;;
    608(p6)	mov r16=r18				// if so, use cr.iip instead of cr.ifa
    609#endif /* CONFIG_ITANIUM */
    610	;;
    611	THASH(p0, r17, r16, r18)		// compute virtual address of L3 PTE
    612	mov r29=b0				// save b0 in case of nested fault)
    613#ifdef CONFIG_SMP
    614	mov r28=ar.ccv				// save ar.ccv
    615	;;
    6161:	ld8 r18=[r17]
    617	;;
    618	mov ar.ccv=r18				// set compare value for cmpxchg
    619	or r25=_PAGE_A,r18			// set the accessed bit
    620	tbit.z p7,p6 = r18,_PAGE_P_BIT	 	// Check present bit
    621	;;
    622(p6)	cmpxchg8.acq r26=[r17],r25,ar.ccv	// Only if page present
    623	mov r24=PAGE_SHIFT<<2
    624	;;
    625(p6)	cmp.eq p6,p7=r26,r18			// Only if page present
    626	;;
    627	ITC_I(p6, r25, r26)			// install updated PTE
    628	;;
    629	/*
    630	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
    631	 * cannot possibly affect the following loads:
    632	 */
    633	dv_serialize_data
    634
    635	ld8 r18=[r17]				// read PTE again
    636	;;
    637	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
    638	;;
    639(p7)	ptc.l r16,r24
    640	mov b0=r29				// restore b0
    641	mov ar.ccv=r28
    642#else /* !CONFIG_SMP */
    643	;;
    6441:	ld8 r18=[r17]
    645	;;
    646	or r18=_PAGE_A,r18			// set the accessed bit
    647	mov b0=r29				// restore b0
    648	;;
    649	st8 [r17]=r18				// store back updated PTE
    650	ITC_I(p0, r18, r16)			// install updated PTE
    651#endif /* !CONFIG_SMP */
    652	mov pr=r31,-1
    653	RFI
    654END(iaccess_bit)
    655
    656	.org ia64_ivt+0x2800
    657/////////////////////////////////////////////////////////////////////////////////////////
    658// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
    659ENTRY(daccess_bit)
    660	DBG_FAULT(10)
    661	// Like Entry 8, except for data access
    662	MOV_FROM_IFA(r16)			// get the address that caused the fault
    663	movl r30=1f				// load continuation point in case of nested fault
    664	;;
    665	THASH(p0, r17, r16, r18)		// compute virtual address of L3 PTE
    666	mov r31=pr
    667	mov r29=b0				// save b0 in case of nested fault)
    668#ifdef CONFIG_SMP
    669	mov r28=ar.ccv				// save ar.ccv
    670	;;
    6711:	ld8 r18=[r17]
    672	;;					// avoid RAW on r18
    673	mov ar.ccv=r18				// set compare value for cmpxchg
    674	or r25=_PAGE_A,r18			// set the dirty bit
    675	tbit.z p7,p6 = r18,_PAGE_P_BIT		// Check present bit
    676	;;
    677(p6)	cmpxchg8.acq r26=[r17],r25,ar.ccv	// Only if page is present
    678	mov r24=PAGE_SHIFT<<2
    679	;;
    680(p6)	cmp.eq p6,p7=r26,r18			// Only if page is present
    681	;;
    682	ITC_D(p6, r25, r26)			// install updated PTE
    683	/*
    684	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
    685	 * cannot possibly affect the following loads:
    686	 */
    687	dv_serialize_data
    688	;;
    689	ld8 r18=[r17]				// read PTE again
    690	;;
    691	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
    692	;;
    693(p7)	ptc.l r16,r24
    694	mov ar.ccv=r28
    695#else
    696	;;
    6971:	ld8 r18=[r17]
    698	;;					// avoid RAW on r18
    699	or r18=_PAGE_A,r18			// set the accessed bit
    700	;;
    701	st8 [r17]=r18				// store back updated PTE
    702	ITC_D(p0, r18, r16)			// install updated PTE
    703#endif
    704	mov b0=r29				// restore b0
    705	mov pr=r31,-1
    706	RFI
    707END(daccess_bit)
    708
    709	.org ia64_ivt+0x2c00
    710/////////////////////////////////////////////////////////////////////////////////////////
    711// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
    712ENTRY(break_fault)
    713	/*
    714	 * The streamlined system call entry/exit paths only save/restore the initial part
    715	 * of pt_regs.  This implies that the callers of system-calls must adhere to the
    716	 * normal procedure calling conventions.
    717	 *
    718	 *   Registers to be saved & restored:
    719	 *	CR registers: cr.ipsr, cr.iip, cr.ifs
    720	 *	AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
    721	 * 	others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
    722	 *   Registers to be restored only:
    723	 * 	r8-r11: output value from the system call.
    724	 *
    725	 * During system call exit, scratch registers (including r15) are modified/cleared
    726	 * to prevent leaking bits from kernel to user level.
    727	 */
    728	DBG_FAULT(11)
    729	mov.m r16=IA64_KR(CURRENT)		// M2 r16 <- current task (12 cyc)
    730	MOV_FROM_IPSR(p0, r29)			// M2 (12 cyc)
    731	mov r31=pr				// I0 (2 cyc)
    732
    733	MOV_FROM_IIM(r17)			// M2 (2 cyc)
    734	mov.m r27=ar.rsc			// M2 (12 cyc)
    735	mov r18=__IA64_BREAK_SYSCALL		// A
    736
    737	mov.m ar.rsc=0				// M2
    738	mov.m r21=ar.fpsr			// M2 (12 cyc)
    739	mov r19=b6				// I0 (2 cyc)
    740	;;
    741	mov.m r23=ar.bspstore			// M2 (12 cyc)
    742	mov.m r24=ar.rnat			// M2 (5 cyc)
    743	mov.i r26=ar.pfs			// I0 (2 cyc)
    744
    745	invala					// M0|1
    746	nop.m 0					// M
    747	mov r20=r1				// A			save r1
    748
    749	nop.m 0
    750	movl r30=sys_call_table			// X
    751
    752	MOV_FROM_IIP(r28)			// M2 (2 cyc)
    753	cmp.eq p0,p7=r18,r17			// I0 is this a system call?
    754(p7)	br.cond.spnt non_syscall		// B  no ->
    755	//
    756	// From this point on, we are definitely on the syscall-path
    757	// and we can use (non-banked) scratch registers.
    758	//
    759///////////////////////////////////////////////////////////////////////
    760	mov r1=r16				// A    move task-pointer to "addl"-addressable reg
    761	mov r2=r16				// A    setup r2 for ia64_syscall_setup
    762	add r9=TI_FLAGS+IA64_TASK_SIZE,r16	// A	r9 = &current_thread_info()->flags
    763
    764	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
    765	adds r15=-1024,r15			// A    subtract 1024 from syscall number
    766	mov r3=NR_syscalls - 1
    767	;;
    768	ld1.bias r17=[r16]			// M0|1 r17 = current->thread.on_ustack flag
    769	ld4 r9=[r9]				// M0|1 r9 = current_thread_info()->flags
    770	extr.u r8=r29,41,2			// I0   extract ei field from cr.ipsr
    771
    772	shladd r30=r15,3,r30			// A    r30 = sys_call_table + 8*(syscall-1024)
    773	addl r22=IA64_RBS_OFFSET,r1		// A    compute base of RBS
    774	cmp.leu p6,p7=r15,r3			// A    syscall number in range?
    775	;;
    776
    777	lfetch.fault.excl.nt1 [r22]		// M0|1 prefetch RBS
    778(p6)	ld8 r30=[r30]				// M0|1 load address of syscall entry point
    779	tnat.nz.or p7,p0=r15			// I0	is syscall nr a NaT?
    780
    781	mov.m ar.bspstore=r22			// M2   switch to kernel RBS
    782	cmp.eq p8,p9=2,r8			// A    isr.ei==2?
    783	;;
    784
    785(p8)	mov r8=0				// A    clear ei to 0
    786(p7)	movl r30=sys_ni_syscall			// X
    787
    788(p8)	adds r28=16,r28				// A    switch cr.iip to next bundle
    789(p9)	adds r8=1,r8				// A    increment ei to next slot
    790#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
    791	;;
    792	mov b6=r30				// I0   setup syscall handler branch reg early
    793#else
    794	nop.i 0
    795	;;
    796#endif
    797
    798	mov.m r25=ar.unat			// M2 (5 cyc)
    799	dep r29=r8,r29,41,2			// I0   insert new ei into cr.ipsr
    800	adds r15=1024,r15			// A    restore original syscall number
    801	//
    802	// If any of the above loads miss in L1D, we'll stall here until
    803	// the data arrives.
    804	//
    805///////////////////////////////////////////////////////////////////////
    806	st1 [r16]=r0				// M2|3 clear current->thread.on_ustack flag
    807#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
    808	MOV_FROM_ITC(p0, p14, r30, r18)		// M    get cycle for accounting
    809#else
    810	mov b6=r30				// I0   setup syscall handler branch reg early
    811#endif
    812	cmp.eq pKStk,pUStk=r0,r17		// A    were we on kernel stacks already?
    813
    814	and r9=_TIF_SYSCALL_TRACEAUDIT,r9	// A    mask trace or audit
    815	mov r18=ar.bsp				// M2 (12 cyc)
    816(pKStk)	br.cond.spnt .break_fixup		// B	we're already in kernel-mode -- fix up RBS
    817	;;
    818.back_from_break_fixup:
    819(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A    compute base of memory stack
    820	cmp.eq p14,p0=r9,r0			// A    are syscalls being traced/audited?
    821	br.call.sptk.many b7=ia64_syscall_setup	// B
    8221:
    823#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
    824	// mov.m r30=ar.itc is called in advance, and r13 is current
    825	add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13	// A
    826	add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13	// A
    827(pKStk)	br.cond.spnt .skip_accounting		// B	unlikely skip
    828	;;
    829	ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP	// M  get last stamp
    830	ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE	// M  time at leave
    831	;;
    832	ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME	// M  cumulated stime
    833	ld8 r21=[r17]				// M  cumulated utime
    834	sub r22=r19,r18				// A  stime before leave
    835	;;
    836	st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP	// M  update stamp
    837	sub r18=r30,r19				// A  elapsed time in user
    838	;;
    839	add r20=r20,r22				// A  sum stime
    840	add r21=r21,r18				// A  sum utime
    841	;;
    842	st8 [r16]=r20				// M  update stime
    843	st8 [r17]=r21				// M  update utime
    844	;;
    845.skip_accounting:
    846#endif
    847	mov ar.rsc=0x3				// M2   set eager mode, pl 0, LE, loadrs=0
    848	nop 0
    849	BSW_1(r2, r14)				// B (6 cyc) regs are saved, switch to bank 1
    850	;;
    851
    852	SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16)	// M2	now it's safe to re-enable intr.-collection
    853						// M0   ensure interruption collection is on
    854	movl r3=ia64_ret_from_syscall		// X
    855	;;
    856	mov rp=r3				// I0   set the real return addr
    857(p10)	br.cond.spnt.many ia64_ret_from_syscall	// B    return if bad call-frame or r15 is a NaT
    858
    859	SSM_PSR_I(p15, p15, r16)		// M2   restore psr.i
    860(p14)	br.call.sptk.many b6=b6			// B    invoke syscall-handker (ignore return addr)
    861	br.cond.spnt.many ia64_trace_syscall	// B	do syscall-tracing thingamagic
    862	// NOT REACHED
    863///////////////////////////////////////////////////////////////////////
    864	// On entry, we optimistically assumed that we're coming from user-space.
    865	// For the rare cases where a system-call is done from within the kernel,
    866	// we fix things up at this point:
    867.break_fixup:
    868	add r1=-IA64_PT_REGS_SIZE,sp		// A    allocate space for pt_regs structure
    869	mov ar.rnat=r24				// M2	restore kernel's AR.RNAT
    870	;;
    871	mov ar.bspstore=r23			// M2	restore kernel's AR.BSPSTORE
    872	br.cond.sptk .back_from_break_fixup
    873END(break_fault)
    874
    875	.org ia64_ivt+0x3000
    876/////////////////////////////////////////////////////////////////////////////////////////
    877// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
    878ENTRY(interrupt)
    879	/* interrupt handler has become too big to fit this area. */
    880	br.sptk.many __interrupt
    881END(interrupt)
    882
    883	.org ia64_ivt+0x3400
    884/////////////////////////////////////////////////////////////////////////////////////////
    885// 0x3400 Entry 13 (size 64 bundles) Reserved
    886	DBG_FAULT(13)
    887	FAULT(13)
    888
    889	.org ia64_ivt+0x3800
    890/////////////////////////////////////////////////////////////////////////////////////////
    891// 0x3800 Entry 14 (size 64 bundles) Reserved
    892	DBG_FAULT(14)
    893	FAULT(14)
    894
    895	/*
    896	 * There is no particular reason for this code to be here, other than that
    897	 * there happens to be space here that would go unused otherwise.  If this
    898	 * fault ever gets "unreserved", simply moved the following code to a more
    899	 * suitable spot...
    900	 *
    901	 * ia64_syscall_setup() is a separate subroutine so that it can
    902	 *	allocate stacked registers so it can safely demine any
    903	 *	potential NaT values from the input registers.
    904	 *
    905	 * On entry:
    906	 *	- executing on bank 0 or bank 1 register set (doesn't matter)
    907	 *	-  r1: stack pointer
    908	 *	-  r2: current task pointer
    909	 *	-  r3: preserved
    910	 *	- r11: original contents (saved ar.pfs to be saved)
    911	 *	- r12: original contents (sp to be saved)
    912	 *	- r13: original contents (tp to be saved)
    913	 *	- r15: original contents (syscall # to be saved)
    914	 *	- r18: saved bsp (after switching to kernel stack)
    915	 *	- r19: saved b6
    916	 *	- r20: saved r1 (gp)
    917	 *	- r21: saved ar.fpsr
    918	 *	- r22: kernel's register backing store base (krbs_base)
    919	 *	- r23: saved ar.bspstore
    920	 *	- r24: saved ar.rnat
    921	 *	- r25: saved ar.unat
    922	 *	- r26: saved ar.pfs
    923	 *	- r27: saved ar.rsc
    924	 *	- r28: saved cr.iip
    925	 *	- r29: saved cr.ipsr
    926	 *	- r30: ar.itc for accounting (don't touch)
    927	 *	- r31: saved pr
    928	 *	-  b0: original contents (to be saved)
    929	 * On exit:
    930	 *	-  p10: TRUE if syscall is invoked with more than 8 out
    931	 *		registers or r15's Nat is true
    932	 *	-  r1: kernel's gp
    933	 *	-  r3: preserved (same as on entry)
    934	 *	-  r8: -EINVAL if p10 is true
    935	 *	- r12: points to kernel stack
    936	 *	- r13: points to current task
    937	 *	- r14: preserved (same as on entry)
    938	 *	- p13: preserved
    939	 *	- p15: TRUE if interrupts need to be re-enabled
    940	 *	- ar.fpsr: set to kernel settings
    941	 *	-  b6: preserved (same as on entry)
    942	 */
    943GLOBAL_ENTRY(ia64_syscall_setup)
    944#if PT(B6) != 0
    945# error This code assumes that b6 is the first field in pt_regs.
    946#endif
    947	st8 [r1]=r19				// save b6
    948	add r16=PT(CR_IPSR),r1			// initialize first base pointer
    949	add r17=PT(R11),r1			// initialize second base pointer
    950	;;
    951	alloc r19=ar.pfs,8,0,0,0		// ensure in0-in7 are writable
    952	st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR)	// save cr.ipsr
    953	tnat.nz p8,p0=in0
    954
    955	st8.spill [r17]=r11,PT(CR_IIP)-PT(R11)	// save r11
    956	tnat.nz p9,p0=in1
    957(pKStk)	mov r18=r0				// make sure r18 isn't NaT
    958	;;
    959
    960	st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS)	// save ar.pfs
    961	st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP)	// save cr.iip
    962	mov r28=b0				// save b0 (2 cyc)
    963	;;
    964
    965	st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT)	// save ar.unat
    966	dep r19=0,r19,38,26			// clear all bits but 0..37 [I0]
    967(p8)	mov in0=-1
    968	;;
    969
    970	st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS)	// store ar.pfs.pfm in cr.ifs
    971	extr.u r11=r19,7,7	// I0		// get sol of ar.pfs
    972	and r8=0x7f,r19		// A		// get sof of ar.pfs
    973
    974	st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
    975	tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
    976(p9)	mov in1=-1
    977	;;
    978
    979(pUStk) sub r18=r18,r22				// r18=RSE.ndirty*8
    980	tnat.nz p10,p0=in2
    981	add r11=8,r11
    982	;;
    983(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16		// skip over ar_rnat field
    984(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17	// skip over ar_bspstore field
    985	tnat.nz p11,p0=in3
    986	;;
    987(p10)	mov in2=-1
    988	tnat.nz p12,p0=in4				// [I0]
    989(p11)	mov in3=-1
    990	;;
    991(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT)	// save ar.rnat
    992(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE)	// save ar.bspstore
    993	shl r18=r18,16				// compute ar.rsc to be used for "loadrs"
    994	;;
    995	st8 [r16]=r31,PT(LOADRS)-PT(PR)		// save predicates
    996	st8 [r17]=r28,PT(R1)-PT(B0)		// save b0
    997	tnat.nz p13,p0=in5				// [I0]
    998	;;
    999	st8 [r16]=r18,PT(R12)-PT(LOADRS)	// save ar.rsc value for "loadrs"
   1000	st8.spill [r17]=r20,PT(R13)-PT(R1)	// save original r1
   1001(p12)	mov in4=-1
   1002	;;
   1003
   1004.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12)	// save r12
   1005.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13)		// save r13
   1006(p13)	mov in5=-1
   1007	;;
   1008	st8 [r16]=r21,PT(R8)-PT(AR_FPSR)	// save ar.fpsr
   1009	tnat.nz p13,p0=in6
   1010	cmp.lt p10,p9=r11,r8	// frame size can't be more than local+8
   1011	;;
   1012	mov r8=1
   1013(p9)	tnat.nz p10,p0=r15
   1014	adds r12=-16,r1		// switch to kernel memory stack (with 16 bytes of scratch)
   1015
   1016	st8.spill [r17]=r15			// save r15
   1017	tnat.nz p8,p0=in7
   1018	nop.i 0
   1019
   1020	mov r13=r2				// establish `current'
   1021	movl r1=__gp				// establish kernel global pointer
   1022	;;
   1023	st8 [r16]=r8		// ensure pt_regs.r8 != 0 (see handle_syscall_error)
   1024(p13)	mov in6=-1
   1025(p8)	mov in7=-1
   1026
   1027	cmp.eq pSys,pNonSys=r0,r0		// set pSys=1, pNonSys=0
   1028	movl r17=FPSR_DEFAULT
   1029	;;
   1030	mov.m ar.fpsr=r17			// set ar.fpsr to kernel default value
   1031(p10)	mov r8=-EINVAL
   1032	br.ret.sptk.many b7
   1033END(ia64_syscall_setup)
   1034
   1035	.org ia64_ivt+0x3c00
   1036/////////////////////////////////////////////////////////////////////////////////////////
   1037// 0x3c00 Entry 15 (size 64 bundles) Reserved
   1038	DBG_FAULT(15)
   1039	FAULT(15)
   1040
   1041	.org ia64_ivt+0x4000
   1042/////////////////////////////////////////////////////////////////////////////////////////
   1043// 0x4000 Entry 16 (size 64 bundles) Reserved
   1044	DBG_FAULT(16)
   1045	FAULT(16)
   1046
   1047#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
   1048	/*
   1049	 * There is no particular reason for this code to be here, other than
   1050	 * that there happens to be space here that would go unused otherwise.
   1051	 * If this fault ever gets "unreserved", simply moved the following
   1052	 * code to a more suitable spot...
   1053	 *
   1054	 * account_sys_enter is called from SAVE_MIN* macros if accounting is
   1055	 * enabled and if the macro is entered from user mode.
   1056	 */
   1057GLOBAL_ENTRY(account_sys_enter)
   1058	// mov.m r20=ar.itc is called in advance, and r13 is current
   1059	add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
   1060	add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
   1061	;;
   1062	ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP	// time at last check in kernel
   1063	ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE	// time at left from kernel
   1064        ;;
   1065	ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME	// cumulated stime
   1066	ld8 r21=[r17]				// cumulated utime
   1067	sub r22=r19,r18				// stime before leave kernel
   1068	;;
   1069	st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP	// update stamp
   1070	sub r18=r20,r19				// elapsed time in user mode
   1071	;;
   1072	add r23=r23,r22				// sum stime
   1073	add r21=r21,r18				// sum utime
   1074	;;
   1075	st8 [r16]=r23				// update stime
   1076	st8 [r17]=r21				// update utime
   1077	;;
   1078	br.ret.sptk.many rp
   1079END(account_sys_enter)
   1080#endif
   1081
   1082	.org ia64_ivt+0x4400
   1083/////////////////////////////////////////////////////////////////////////////////////////
   1084// 0x4400 Entry 17 (size 64 bundles) Reserved
   1085	DBG_FAULT(17)
   1086	FAULT(17)
   1087
   1088	.org ia64_ivt+0x4800
   1089/////////////////////////////////////////////////////////////////////////////////////////
   1090// 0x4800 Entry 18 (size 64 bundles) Reserved
   1091	DBG_FAULT(18)
   1092	FAULT(18)
   1093
   1094	.org ia64_ivt+0x4c00
   1095/////////////////////////////////////////////////////////////////////////////////////////
   1096// 0x4c00 Entry 19 (size 64 bundles) Reserved
   1097	DBG_FAULT(19)
   1098	FAULT(19)
   1099
   1100//
   1101// --- End of long entries, Beginning of short entries
   1102//
   1103
   1104	.org ia64_ivt+0x5000
   1105/////////////////////////////////////////////////////////////////////////////////////////
   1106// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
   1107ENTRY(page_not_present)
   1108	DBG_FAULT(20)
   1109	MOV_FROM_IFA(r16)
   1110	RSM_PSR_DT
   1111	/*
   1112	 * The Linux page fault handler doesn't expect non-present pages to be in
   1113	 * the TLB.  Flush the existing entry now, so we meet that expectation.
   1114	 */
   1115	mov r17=PAGE_SHIFT<<2
   1116	;;
   1117	ptc.l r16,r17
   1118	;;
   1119	mov r31=pr
   1120	srlz.d
   1121	br.sptk.many page_fault
   1122END(page_not_present)
   1123
   1124	.org ia64_ivt+0x5100
   1125/////////////////////////////////////////////////////////////////////////////////////////
   1126// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
   1127ENTRY(key_permission)
   1128	DBG_FAULT(21)
   1129	MOV_FROM_IFA(r16)
   1130	RSM_PSR_DT
   1131	mov r31=pr
   1132	;;
   1133	srlz.d
   1134	br.sptk.many page_fault
   1135END(key_permission)
   1136
   1137	.org ia64_ivt+0x5200
   1138/////////////////////////////////////////////////////////////////////////////////////////
   1139// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
   1140ENTRY(iaccess_rights)
   1141	DBG_FAULT(22)
   1142	MOV_FROM_IFA(r16)
   1143	RSM_PSR_DT
   1144	mov r31=pr
   1145	;;
   1146	srlz.d
   1147	br.sptk.many page_fault
   1148END(iaccess_rights)
   1149
   1150	.org ia64_ivt+0x5300
   1151/////////////////////////////////////////////////////////////////////////////////////////
   1152// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
   1153ENTRY(daccess_rights)
   1154	DBG_FAULT(23)
   1155	MOV_FROM_IFA(r16)
   1156	RSM_PSR_DT
   1157	mov r31=pr
   1158	;;
   1159	srlz.d
   1160	br.sptk.many page_fault
   1161END(daccess_rights)
   1162
   1163	.org ia64_ivt+0x5400
   1164/////////////////////////////////////////////////////////////////////////////////////////
   1165// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
   1166ENTRY(general_exception)
   1167	DBG_FAULT(24)
   1168	MOV_FROM_ISR(r16)
   1169	mov r31=pr
   1170	;;
   1171	cmp4.eq p6,p0=0,r16
   1172(p6)	br.sptk.many dispatch_illegal_op_fault
   1173	;;
   1174	mov r19=24		// fault number
   1175	br.sptk.many dispatch_to_fault_handler
   1176END(general_exception)
   1177
   1178	.org ia64_ivt+0x5500
   1179/////////////////////////////////////////////////////////////////////////////////////////
   1180// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
   1181ENTRY(disabled_fp_reg)
   1182	DBG_FAULT(25)
   1183	rsm psr.dfh		// ensure we can access fph
   1184	;;
   1185	srlz.d
   1186	mov r31=pr
   1187	mov r19=25
   1188	br.sptk.many dispatch_to_fault_handler
   1189END(disabled_fp_reg)
   1190
   1191	.org ia64_ivt+0x5600
   1192/////////////////////////////////////////////////////////////////////////////////////////
   1193// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
   1194ENTRY(nat_consumption)
   1195	DBG_FAULT(26)
   1196
   1197	MOV_FROM_IPSR(p0, r16)
   1198	MOV_FROM_ISR(r17)
   1199	mov r31=pr				// save PR
   1200	;;
   1201	and r18=0xf,r17				// r18 = cr.ipsr.code{3:0}
   1202	tbit.z p6,p0=r17,IA64_ISR_NA_BIT
   1203	;;
   1204	cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
   1205	dep r16=-1,r16,IA64_PSR_ED_BIT,1
   1206(p6)	br.cond.spnt 1f		// branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
   1207	;;
   1208	MOV_TO_IPSR(p0, r16, r18)
   1209	mov pr=r31,-1
   1210	;;
   1211	RFI
   1212
   12131:	mov pr=r31,-1
   1214	;;
   1215	FAULT(26)
   1216END(nat_consumption)
   1217
   1218	.org ia64_ivt+0x5700
   1219/////////////////////////////////////////////////////////////////////////////////////////
   1220// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
   1221ENTRY(speculation_vector)
   1222	DBG_FAULT(27)
   1223	/*
   1224	 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
   1225	 * this part of the architecture is not implemented in hardware on some CPUs, such
   1226	 * as Itanium.  Thus, in general we need to emulate the behavior.  IIM contains
   1227	 * the relative target (not yet sign extended).  So after sign extending it we
   1228	 * simply add it to IIP.  We also need to reset the EI field of the IPSR to zero,
   1229	 * i.e., the slot to restart into.
   1230	 *
   1231	 * cr.imm contains zero_ext(imm21)
   1232	 */
   1233	MOV_FROM_IIM(r18)
   1234	;;
   1235	MOV_FROM_IIP(r17)
   1236	shl r18=r18,43			// put sign bit in position (43=64-21)
   1237	;;
   1238
   1239	MOV_FROM_IPSR(p0, r16)
   1240	shr r18=r18,39			// sign extend (39=43-4)
   1241	;;
   1242
   1243	add r17=r17,r18			// now add the offset
   1244	;;
   1245	MOV_TO_IIP(r17, r19)
   1246	dep r16=0,r16,41,2		// clear EI
   1247	;;
   1248
   1249	MOV_TO_IPSR(p0, r16, r19)
   1250	;;
   1251
   1252	RFI
   1253END(speculation_vector)
   1254
   1255	.org ia64_ivt+0x5800
   1256/////////////////////////////////////////////////////////////////////////////////////////
   1257// 0x5800 Entry 28 (size 16 bundles) Reserved
   1258	DBG_FAULT(28)
   1259	FAULT(28)
   1260
   1261	.org ia64_ivt+0x5900
   1262/////////////////////////////////////////////////////////////////////////////////////////
   1263// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
   1264ENTRY(debug_vector)
   1265	DBG_FAULT(29)
   1266	FAULT(29)
   1267END(debug_vector)
   1268
   1269	.org ia64_ivt+0x5a00
   1270/////////////////////////////////////////////////////////////////////////////////////////
   1271// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
   1272ENTRY(unaligned_access)
   1273	DBG_FAULT(30)
   1274	mov r31=pr		// prepare to save predicates
   1275	;;
   1276	br.sptk.many dispatch_unaligned_handler
   1277END(unaligned_access)
   1278
   1279	.org ia64_ivt+0x5b00
   1280/////////////////////////////////////////////////////////////////////////////////////////
   1281// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
   1282ENTRY(unsupported_data_reference)
   1283	DBG_FAULT(31)
   1284	FAULT(31)
   1285END(unsupported_data_reference)
   1286
   1287	.org ia64_ivt+0x5c00
   1288/////////////////////////////////////////////////////////////////////////////////////////
   1289// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
   1290ENTRY(floating_point_fault)
   1291	DBG_FAULT(32)
   1292	FAULT(32)
   1293END(floating_point_fault)
   1294
   1295	.org ia64_ivt+0x5d00
   1296/////////////////////////////////////////////////////////////////////////////////////////
   1297// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
   1298ENTRY(floating_point_trap)
   1299	DBG_FAULT(33)
   1300	FAULT(33)
   1301END(floating_point_trap)
   1302
   1303	.org ia64_ivt+0x5e00
   1304/////////////////////////////////////////////////////////////////////////////////////////
   1305// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
   1306ENTRY(lower_privilege_trap)
   1307	DBG_FAULT(34)
   1308	FAULT(34)
   1309END(lower_privilege_trap)
   1310
   1311	.org ia64_ivt+0x5f00
   1312/////////////////////////////////////////////////////////////////////////////////////////
   1313// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
   1314ENTRY(taken_branch_trap)
   1315	DBG_FAULT(35)
   1316	FAULT(35)
   1317END(taken_branch_trap)
   1318
   1319	.org ia64_ivt+0x6000
   1320/////////////////////////////////////////////////////////////////////////////////////////
   1321// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
   1322ENTRY(single_step_trap)
   1323	DBG_FAULT(36)
   1324	FAULT(36)
   1325END(single_step_trap)
   1326
   1327	.org ia64_ivt+0x6100
   1328/////////////////////////////////////////////////////////////////////////////////////////
   1329// 0x6100 Entry 37 (size 16 bundles) Reserved
   1330	DBG_FAULT(37)
   1331	FAULT(37)
   1332
   1333	.org ia64_ivt+0x6200
   1334/////////////////////////////////////////////////////////////////////////////////////////
   1335// 0x6200 Entry 38 (size 16 bundles) Reserved
   1336	DBG_FAULT(38)
   1337	FAULT(38)
   1338
   1339	.org ia64_ivt+0x6300
   1340/////////////////////////////////////////////////////////////////////////////////////////
   1341// 0x6300 Entry 39 (size 16 bundles) Reserved
   1342	DBG_FAULT(39)
   1343	FAULT(39)
   1344
   1345	.org ia64_ivt+0x6400
   1346/////////////////////////////////////////////////////////////////////////////////////////
   1347// 0x6400 Entry 40 (size 16 bundles) Reserved
   1348	DBG_FAULT(40)
   1349	FAULT(40)
   1350
   1351	.org ia64_ivt+0x6500
   1352/////////////////////////////////////////////////////////////////////////////////////////
   1353// 0x6500 Entry 41 (size 16 bundles) Reserved
   1354	DBG_FAULT(41)
   1355	FAULT(41)
   1356
   1357	.org ia64_ivt+0x6600
   1358/////////////////////////////////////////////////////////////////////////////////////////
   1359// 0x6600 Entry 42 (size 16 bundles) Reserved
   1360	DBG_FAULT(42)
   1361	FAULT(42)
   1362
   1363	.org ia64_ivt+0x6700
   1364/////////////////////////////////////////////////////////////////////////////////////////
   1365// 0x6700 Entry 43 (size 16 bundles) Reserved
   1366	DBG_FAULT(43)
   1367	FAULT(43)
   1368
   1369	.org ia64_ivt+0x6800
   1370/////////////////////////////////////////////////////////////////////////////////////////
   1371// 0x6800 Entry 44 (size 16 bundles) Reserved
   1372	DBG_FAULT(44)
   1373	FAULT(44)
   1374
   1375	.org ia64_ivt+0x6900
   1376/////////////////////////////////////////////////////////////////////////////////////////
   1377// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
   1378ENTRY(ia32_exception)
   1379	DBG_FAULT(45)
   1380	FAULT(45)
   1381END(ia32_exception)
   1382
   1383	.org ia64_ivt+0x6a00
   1384/////////////////////////////////////////////////////////////////////////////////////////
   1385// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
   1386ENTRY(ia32_intercept)
   1387	DBG_FAULT(46)
   1388	FAULT(46)
   1389END(ia32_intercept)
   1390
   1391	.org ia64_ivt+0x6b00
   1392/////////////////////////////////////////////////////////////////////////////////////////
   1393// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
   1394ENTRY(ia32_interrupt)
   1395	DBG_FAULT(47)
   1396	FAULT(47)
   1397END(ia32_interrupt)
   1398
   1399	.org ia64_ivt+0x6c00
   1400/////////////////////////////////////////////////////////////////////////////////////////
   1401// 0x6c00 Entry 48 (size 16 bundles) Reserved
   1402	DBG_FAULT(48)
   1403	FAULT(48)
   1404
   1405	.org ia64_ivt+0x6d00
   1406/////////////////////////////////////////////////////////////////////////////////////////
   1407// 0x6d00 Entry 49 (size 16 bundles) Reserved
   1408	DBG_FAULT(49)
   1409	FAULT(49)
   1410
   1411	.org ia64_ivt+0x6e00
   1412/////////////////////////////////////////////////////////////////////////////////////////
   1413// 0x6e00 Entry 50 (size 16 bundles) Reserved
   1414	DBG_FAULT(50)
   1415	FAULT(50)
   1416
   1417	.org ia64_ivt+0x6f00
   1418/////////////////////////////////////////////////////////////////////////////////////////
   1419// 0x6f00 Entry 51 (size 16 bundles) Reserved
   1420	DBG_FAULT(51)
   1421	FAULT(51)
   1422
   1423	.org ia64_ivt+0x7000
   1424/////////////////////////////////////////////////////////////////////////////////////////
   1425// 0x7000 Entry 52 (size 16 bundles) Reserved
   1426	DBG_FAULT(52)
   1427	FAULT(52)
   1428
   1429	.org ia64_ivt+0x7100
   1430/////////////////////////////////////////////////////////////////////////////////////////
   1431// 0x7100 Entry 53 (size 16 bundles) Reserved
   1432	DBG_FAULT(53)
   1433	FAULT(53)
   1434
   1435	.org ia64_ivt+0x7200
   1436/////////////////////////////////////////////////////////////////////////////////////////
   1437// 0x7200 Entry 54 (size 16 bundles) Reserved
   1438	DBG_FAULT(54)
   1439	FAULT(54)
   1440
   1441	.org ia64_ivt+0x7300
   1442/////////////////////////////////////////////////////////////////////////////////////////
   1443// 0x7300 Entry 55 (size 16 bundles) Reserved
   1444	DBG_FAULT(55)
   1445	FAULT(55)
   1446
   1447	.org ia64_ivt+0x7400
   1448/////////////////////////////////////////////////////////////////////////////////////////
   1449// 0x7400 Entry 56 (size 16 bundles) Reserved
   1450	DBG_FAULT(56)
   1451	FAULT(56)
   1452
   1453	.org ia64_ivt+0x7500
   1454/////////////////////////////////////////////////////////////////////////////////////////
   1455// 0x7500 Entry 57 (size 16 bundles) Reserved
   1456	DBG_FAULT(57)
   1457	FAULT(57)
   1458
   1459	.org ia64_ivt+0x7600
   1460/////////////////////////////////////////////////////////////////////////////////////////
   1461// 0x7600 Entry 58 (size 16 bundles) Reserved
   1462	DBG_FAULT(58)
   1463	FAULT(58)
   1464
   1465	.org ia64_ivt+0x7700
   1466/////////////////////////////////////////////////////////////////////////////////////////
   1467// 0x7700 Entry 59 (size 16 bundles) Reserved
   1468	DBG_FAULT(59)
   1469	FAULT(59)
   1470
   1471	.org ia64_ivt+0x7800
   1472/////////////////////////////////////////////////////////////////////////////////////////
   1473// 0x7800 Entry 60 (size 16 bundles) Reserved
   1474	DBG_FAULT(60)
   1475	FAULT(60)
   1476
   1477	.org ia64_ivt+0x7900
   1478/////////////////////////////////////////////////////////////////////////////////////////
   1479// 0x7900 Entry 61 (size 16 bundles) Reserved
   1480	DBG_FAULT(61)
   1481	FAULT(61)
   1482
   1483	.org ia64_ivt+0x7a00
   1484/////////////////////////////////////////////////////////////////////////////////////////
   1485// 0x7a00 Entry 62 (size 16 bundles) Reserved
   1486	DBG_FAULT(62)
   1487	FAULT(62)
   1488
   1489	.org ia64_ivt+0x7b00
   1490/////////////////////////////////////////////////////////////////////////////////////////
   1491// 0x7b00 Entry 63 (size 16 bundles) Reserved
   1492	DBG_FAULT(63)
   1493	FAULT(63)
   1494
   1495	.org ia64_ivt+0x7c00
   1496/////////////////////////////////////////////////////////////////////////////////////////
   1497// 0x7c00 Entry 64 (size 16 bundles) Reserved
   1498	DBG_FAULT(64)
   1499	FAULT(64)
   1500
   1501	.org ia64_ivt+0x7d00
   1502/////////////////////////////////////////////////////////////////////////////////////////
   1503// 0x7d00 Entry 65 (size 16 bundles) Reserved
   1504	DBG_FAULT(65)
   1505	FAULT(65)
   1506
   1507	.org ia64_ivt+0x7e00
   1508/////////////////////////////////////////////////////////////////////////////////////////
   1509// 0x7e00 Entry 66 (size 16 bundles) Reserved
   1510	DBG_FAULT(66)
   1511	FAULT(66)
   1512
   1513	.org ia64_ivt+0x7f00
   1514/////////////////////////////////////////////////////////////////////////////////////////
   1515// 0x7f00 Entry 67 (size 16 bundles) Reserved
   1516	DBG_FAULT(67)
   1517	FAULT(67)
   1518
   1519	//-----------------------------------------------------------------------------------
   1520	// call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
   1521ENTRY(page_fault)
   1522	SSM_PSR_DT_AND_SRLZ_I
   1523	;;
   1524	SAVE_MIN_WITH_COVER
   1525	alloc r15=ar.pfs,0,0,3,0
   1526	MOV_FROM_IFA(out0)
   1527	MOV_FROM_ISR(out1)
   1528	SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3)
   1529	adds r3=8,r2				// set up second base pointer
   1530	SSM_PSR_I(p15, p15, r14)		// restore psr.i
   1531	movl r14=ia64_leave_kernel
   1532	;;
   1533	SAVE_REST
   1534	mov rp=r14
   1535	;;
   1536	adds out2=16,r12			// out2 = pointer to pt_regs
   1537	br.call.sptk.many b6=ia64_do_page_fault	// ignore return address
   1538END(page_fault)
   1539
   1540ENTRY(non_syscall)
   1541	mov ar.rsc=r27			// restore ar.rsc before SAVE_MIN_WITH_COVER
   1542	;;
   1543	SAVE_MIN_WITH_COVER
   1544
   1545	// There is no particular reason for this code to be here, other than that
   1546	// there happens to be space here that would go unused otherwise.  If this
   1547	// fault ever gets "unreserved", simply moved the following code to a more
   1548	// suitable spot...
   1549
   1550	alloc r14=ar.pfs,0,0,2,0
   1551	MOV_FROM_IIM(out0)
   1552	add out1=16,sp
   1553	adds r3=8,r2			// set up second base pointer for SAVE_REST
   1554
   1555	SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24)
   1556					// guarantee that interruption collection is on
   1557	SSM_PSR_I(p15, p15, r15)	// restore psr.i
   1558	movl r15=ia64_leave_kernel
   1559	;;
   1560	SAVE_REST
   1561	mov rp=r15
   1562	;;
   1563	br.call.sptk.many b6=ia64_bad_break	// avoid WAW on CFM and ignore return addr
   1564END(non_syscall)
   1565
   1566ENTRY(__interrupt)
   1567	DBG_FAULT(12)
   1568	mov r31=pr		// prepare to save predicates
   1569	;;
   1570	SAVE_MIN_WITH_COVER	// uses r31; defines r2 and r3
   1571	SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14)
   1572				// ensure everybody knows psr.ic is back on
   1573	adds r3=8,r2		// set up second base pointer for SAVE_REST
   1574	;;
   1575	SAVE_REST
   1576	;;
   1577	MCA_RECOVER_RANGE(interrupt)
   1578	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
   1579	MOV_FROM_IVR(out0, r8)	// pass cr.ivr as first arg
   1580	add out1=16,sp		// pass pointer to pt_regs as second arg
   1581	;;
   1582	srlz.d			// make sure we see the effect of cr.ivr
   1583	movl r14=ia64_leave_kernel
   1584	;;
   1585	mov rp=r14
   1586	br.call.sptk.many b6=ia64_handle_irq
   1587END(__interrupt)
   1588
   1589	/*
   1590	 * There is no particular reason for this code to be here, other than that
   1591	 * there happens to be space here that would go unused otherwise.  If this
   1592	 * fault ever gets "unreserved", simply moved the following code to a more
   1593	 * suitable spot...
   1594	 */
   1595
   1596ENTRY(dispatch_unaligned_handler)
   1597	SAVE_MIN_WITH_COVER
   1598	;;
   1599	alloc r14=ar.pfs,0,0,2,0		// now it's safe (must be first in insn group!)
   1600	MOV_FROM_IFA(out0)
   1601	adds out1=16,sp
   1602
   1603	SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
   1604						// guarantee that interruption collection is on
   1605	SSM_PSR_I(p15, p15, r3)			// restore psr.i
   1606	adds r3=8,r2				// set up second base pointer
   1607	;;
   1608	SAVE_REST
   1609	movl r14=ia64_leave_kernel
   1610	;;
   1611	mov rp=r14
   1612	br.sptk.many ia64_prepare_handle_unaligned
   1613END(dispatch_unaligned_handler)
   1614
   1615	/*
   1616	 * There is no particular reason for this code to be here, other than that
   1617	 * there happens to be space here that would go unused otherwise.  If this
   1618	 * fault ever gets "unreserved", simply moved the following code to a more
   1619	 * suitable spot...
   1620	 */
   1621
   1622ENTRY(dispatch_to_fault_handler)
   1623	/*
   1624	 * Input:
   1625	 *	psr.ic:	off
   1626	 *	r19:	fault vector number (e.g., 24 for General Exception)
   1627	 *	r31:	contains saved predicates (pr)
   1628	 */
   1629	SAVE_MIN_WITH_COVER_R19
   1630	alloc r14=ar.pfs,0,0,5,0
   1631	MOV_FROM_ISR(out1)
   1632	MOV_FROM_IFA(out2)
   1633	MOV_FROM_IIM(out3)
   1634	MOV_FROM_ITIR(out4)
   1635	;;
   1636	SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0)
   1637						// guarantee that interruption collection is on
   1638	mov out0=r15
   1639	;;
   1640	SSM_PSR_I(p15, p15, r3)			// restore psr.i
   1641	adds r3=8,r2				// set up second base pointer for SAVE_REST
   1642	;;
   1643	SAVE_REST
   1644	movl r14=ia64_leave_kernel
   1645	;;
   1646	mov rp=r14
   1647	br.call.sptk.many b6=ia64_fault
   1648END(dispatch_to_fault_handler)
   1649
   1650	/*
   1651	 * Squatting in this space ...
   1652	 *
   1653	 * This special case dispatcher for illegal operation faults allows preserved
   1654	 * registers to be modified through a callback function (asm only) that is handed
   1655	 * back from the fault handler in r8. Up to three arguments can be passed to the
   1656	 * callback function by returning an aggregate with the callback as its first
   1657	 * element, followed by the arguments.
   1658	 */
   1659ENTRY(dispatch_illegal_op_fault)
   1660	.prologue
   1661	.body
   1662	SAVE_MIN_WITH_COVER
   1663	SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
   1664				// guarantee that interruption collection is on
   1665	;;
   1666	SSM_PSR_I(p15, p15, r3)	// restore psr.i
   1667	adds r3=8,r2	// set up second base pointer for SAVE_REST
   1668	;;
   1669	alloc r14=ar.pfs,0,0,1,0	// must be first in insn group
   1670	mov out0=ar.ec
   1671	;;
   1672	SAVE_REST
   1673	PT_REGS_UNWIND_INFO(0)
   1674	;;
   1675	br.call.sptk.many rp=ia64_illegal_op_fault
   1676.ret0:	;;
   1677	alloc r14=ar.pfs,0,0,3,0	// must be first in insn group
   1678	mov out0=r9
   1679	mov out1=r10
   1680	mov out2=r11
   1681	movl r15=ia64_leave_kernel
   1682	;;
   1683	mov rp=r15
   1684	mov b6=r8
   1685	;;
   1686	cmp.ne p6,p0=0,r8
   1687(p6)	br.call.dpnt.many b6=b6		// call returns to ia64_leave_kernel
   1688	br.sptk.many ia64_leave_kernel
   1689END(dispatch_illegal_op_fault)