cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

processor.h (17812B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_IA64_PROCESSOR_H
      3#define _ASM_IA64_PROCESSOR_H
      4
      5/*
      6 * Copyright (C) 1998-2004 Hewlett-Packard Co
      7 *	David Mosberger-Tang <davidm@hpl.hp.com>
      8 *	Stephane Eranian <eranian@hpl.hp.com>
      9 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
     10 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
     11 *
     12 * 11/24/98	S.Eranian	added ia64_set_iva()
     13 * 12/03/99	D. Mosberger	implement thread_saved_pc() via kernel unwind API
     14 * 06/16/00	A. Mallick	added csd/ssd/tssd for ia32 support
     15 */
     16
     17
     18#include <asm/intrinsics.h>
     19#include <asm/kregs.h>
     20#include <asm/ptrace.h>
     21#include <asm/ustack.h>
     22
     23#define IA64_NUM_PHYS_STACK_REG	96
     24#define IA64_NUM_DBG_REGS	8
     25
     26#define DEFAULT_MAP_BASE	__IA64_UL_CONST(0x2000000000000000)
     27#define DEFAULT_TASK_SIZE	__IA64_UL_CONST(0xa000000000000000)
     28
     29/*
     30 * TASK_SIZE really is a mis-named.  It really is the maximum user
     31 * space address (plus one).  On IA-64, there are five regions of 2TB
     32 * each (assuming 8KB page size), for a total of 8TB of user virtual
     33 * address space.
     34 */
     35#define TASK_SIZE       	DEFAULT_TASK_SIZE
     36
     37/*
     38 * This decides where the kernel will search for a free chunk of vm
     39 * space during mmap's.
     40 */
     41#define TASK_UNMAPPED_BASE	(current->thread.map_base)
     42
     43#define IA64_THREAD_FPH_VALID	(__IA64_UL(1) << 0)	/* floating-point high state valid? */
     44#define IA64_THREAD_DBG_VALID	(__IA64_UL(1) << 1)	/* debug registers valid? */
     45#define IA64_THREAD_PM_VALID	(__IA64_UL(1) << 2)	/* performance registers valid? */
     46#define IA64_THREAD_UAC_NOPRINT	(__IA64_UL(1) << 3)	/* don't log unaligned accesses */
     47#define IA64_THREAD_UAC_SIGBUS	(__IA64_UL(1) << 4)	/* generate SIGBUS on unaligned acc. */
     48#define IA64_THREAD_MIGRATION	(__IA64_UL(1) << 5)	/* require migration
     49							   sync at ctx sw */
     50#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)	/* don't log any fpswa faults */
     51#define IA64_THREAD_FPEMU_SIGFPE  (__IA64_UL(1) << 7)	/* send a SIGFPE for fpswa faults */
     52
     53#define IA64_THREAD_UAC_SHIFT	3
     54#define IA64_THREAD_UAC_MASK	(IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
     55#define IA64_THREAD_FPEMU_SHIFT	6
     56#define IA64_THREAD_FPEMU_MASK	(IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
     57
     58
     59/*
     60 * This shift should be large enough to be able to represent 1000000000/itc_freq with good
     61 * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
     62 * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
     63 */
     64#define IA64_NSEC_PER_CYC_SHIFT	30
     65
     66#ifndef __ASSEMBLY__
     67
     68#include <linux/cache.h>
     69#include <linux/compiler.h>
     70#include <linux/threads.h>
     71#include <linux/types.h>
     72#include <linux/bitops.h>
     73
     74#include <asm/fpu.h>
     75#include <asm/page.h>
     76#include <asm/percpu.h>
     77#include <asm/rse.h>
     78#include <asm/unwind.h>
     79#include <linux/atomic.h>
     80#ifdef CONFIG_NUMA
     81#include <asm/nodedata.h>
     82#endif
     83
     84/* like above but expressed as bitfields for more efficient access: */
     85struct ia64_psr {
     86	__u64 reserved0 : 1;
     87	__u64 be : 1;
     88	__u64 up : 1;
     89	__u64 ac : 1;
     90	__u64 mfl : 1;
     91	__u64 mfh : 1;
     92	__u64 reserved1 : 7;
     93	__u64 ic : 1;
     94	__u64 i : 1;
     95	__u64 pk : 1;
     96	__u64 reserved2 : 1;
     97	__u64 dt : 1;
     98	__u64 dfl : 1;
     99	__u64 dfh : 1;
    100	__u64 sp : 1;
    101	__u64 pp : 1;
    102	__u64 di : 1;
    103	__u64 si : 1;
    104	__u64 db : 1;
    105	__u64 lp : 1;
    106	__u64 tb : 1;
    107	__u64 rt : 1;
    108	__u64 reserved3 : 4;
    109	__u64 cpl : 2;
    110	__u64 is : 1;
    111	__u64 mc : 1;
    112	__u64 it : 1;
    113	__u64 id : 1;
    114	__u64 da : 1;
    115	__u64 dd : 1;
    116	__u64 ss : 1;
    117	__u64 ri : 2;
    118	__u64 ed : 1;
    119	__u64 bn : 1;
    120	__u64 reserved4 : 19;
    121};
    122
    123union ia64_isr {
    124	__u64  val;
    125	struct {
    126		__u64 code : 16;
    127		__u64 vector : 8;
    128		__u64 reserved1 : 8;
    129		__u64 x : 1;
    130		__u64 w : 1;
    131		__u64 r : 1;
    132		__u64 na : 1;
    133		__u64 sp : 1;
    134		__u64 rs : 1;
    135		__u64 ir : 1;
    136		__u64 ni : 1;
    137		__u64 so : 1;
    138		__u64 ei : 2;
    139		__u64 ed : 1;
    140		__u64 reserved2 : 20;
    141	};
    142};
    143
    144union ia64_lid {
    145	__u64 val;
    146	struct {
    147		__u64  rv  : 16;
    148		__u64  eid : 8;
    149		__u64  id  : 8;
    150		__u64  ig  : 32;
    151	};
    152};
    153
    154union ia64_tpr {
    155	__u64 val;
    156	struct {
    157		__u64 ig0 : 4;
    158		__u64 mic : 4;
    159		__u64 rsv : 8;
    160		__u64 mmi : 1;
    161		__u64 ig1 : 47;
    162	};
    163};
    164
    165union ia64_itir {
    166	__u64 val;
    167	struct {
    168		__u64 rv3  :  2; /* 0-1 */
    169		__u64 ps   :  6; /* 2-7 */
    170		__u64 key  : 24; /* 8-31 */
    171		__u64 rv4  : 32; /* 32-63 */
    172	};
    173};
    174
    175union  ia64_rr {
    176	__u64 val;
    177	struct {
    178		__u64  ve	:  1;  /* enable hw walker */
    179		__u64  reserved0:  1;  /* reserved */
    180		__u64  ps	:  6;  /* log page size */
    181		__u64  rid	: 24;  /* region id */
    182		__u64  reserved1: 32;  /* reserved */
    183	};
    184};
    185
    186/*
    187 * CPU type, hardware bug flags, and per-CPU state.  Frequently used
    188 * state comes earlier:
    189 */
    190struct cpuinfo_ia64 {
    191	unsigned int softirq_pending;
    192	unsigned long itm_delta;	/* # of clock cycles between clock ticks */
    193	unsigned long itm_next;		/* interval timer mask value to use for next clock tick */
    194	unsigned long nsec_per_cyc;	/* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
    195	unsigned long unimpl_va_mask;	/* mask of unimplemented virtual address bits (from PAL) */
    196	unsigned long unimpl_pa_mask;	/* mask of unimplemented physical address bits (from PAL) */
    197	unsigned long itc_freq;		/* frequency of ITC counter */
    198	unsigned long proc_freq;	/* frequency of processor */
    199	unsigned long cyc_per_usec;	/* itc_freq/1000000 */
    200	unsigned long ptce_base;
    201	unsigned int ptce_count[2];
    202	unsigned int ptce_stride[2];
    203	struct task_struct *ksoftirqd;	/* kernel softirq daemon for this CPU */
    204
    205#ifdef CONFIG_SMP
    206	unsigned long loops_per_jiffy;
    207	int cpu;
    208	unsigned int socket_id;	/* physical processor socket id */
    209	unsigned short core_id;	/* core id */
    210	unsigned short thread_id; /* thread id */
    211	unsigned short num_log;	/* Total number of logical processors on
    212				 * this socket that were successfully booted */
    213	unsigned char cores_per_socket;	/* Cores per processor socket */
    214	unsigned char threads_per_core;	/* Threads per core */
    215#endif
    216
    217	/* CPUID-derived information: */
    218	unsigned long ppn;
    219	unsigned long features;
    220	unsigned char number;
    221	unsigned char revision;
    222	unsigned char model;
    223	unsigned char family;
    224	unsigned char archrev;
    225	char vendor[16];
    226	char *model_name;
    227
    228#ifdef CONFIG_NUMA
    229	struct ia64_node_data *node_data;
    230#endif
    231};
    232
    233DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
    234
    235/*
    236 * The "local" data variable.  It refers to the per-CPU data of the currently executing
    237 * CPU, much like "current" points to the per-task data of the currently executing task.
    238 * Do not use the address of local_cpu_data, since it will be different from
    239 * cpu_data(smp_processor_id())!
    240 */
    241#define local_cpu_data		(&__ia64_per_cpu_var(ia64_cpu_info))
    242#define cpu_data(cpu)		(&per_cpu(ia64_cpu_info, cpu))
    243
    244extern void print_cpu_info (struct cpuinfo_ia64 *);
    245
    246#define SET_UNALIGN_CTL(task,value)								\
    247({												\
    248	(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK)			\
    249				| (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK));	\
    250	0;											\
    251})
    252#define GET_UNALIGN_CTL(task,addr)								\
    253({												\
    254	put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT,	\
    255		 (int __user *) (addr));							\
    256})
    257
    258#define SET_FPEMU_CTL(task,value)								\
    259({												\
    260	(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK)		\
    261			  | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK));	\
    262	0;											\
    263})
    264#define GET_FPEMU_CTL(task,addr)								\
    265({												\
    266	put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT,	\
    267		 (int __user *) (addr));							\
    268})
    269
    270struct thread_struct {
    271	__u32 flags;			/* various thread flags (see IA64_THREAD_*) */
    272	/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
    273	__u8 on_ustack;			/* executing on user-stacks? */
    274	__u8 pad[3];
    275	__u64 ksp;			/* kernel stack pointer */
    276	__u64 map_base;			/* base address for get_unmapped_area() */
    277	__u64 rbs_bot;			/* the base address for the RBS */
    278	int last_fph_cpu;		/* CPU that may hold the contents of f32-f127 */
    279	unsigned long dbr[IA64_NUM_DBG_REGS];
    280	unsigned long ibr[IA64_NUM_DBG_REGS];
    281	struct ia64_fpreg fph[96];	/* saved/loaded on demand */
    282};
    283
    284#define INIT_THREAD {						\
    285	.flags =	0,					\
    286	.on_ustack =	0,					\
    287	.ksp =		0,					\
    288	.map_base =	DEFAULT_MAP_BASE,			\
    289	.rbs_bot =	STACK_TOP - DEFAULT_USER_STACK_SIZE,	\
    290	.last_fph_cpu =  -1,					\
    291	.dbr =		{0, },					\
    292	.ibr =		{0, },					\
    293	.fph =		{{{{0}}}, }				\
    294}
    295
    296#define start_thread(regs,new_ip,new_sp) do {							\
    297	regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL))		\
    298			 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS));		\
    299	regs->cr_iip = new_ip;									\
    300	regs->ar_rsc = 0xf;		/* eager mode, privilege level 3 */			\
    301	regs->ar_rnat = 0;									\
    302	regs->ar_bspstore = current->thread.rbs_bot;						\
    303	regs->ar_fpsr = FPSR_DEFAULT;								\
    304	regs->loadrs = 0;									\
    305	regs->r8 = get_dumpable(current->mm);	/* set "don't zap registers" flag */		\
    306	regs->r12 = new_sp - 16;	/* allocate 16 byte scratch area */			\
    307	if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) {	\
    308		/*										\
    309		 * Zap scratch regs to avoid leaking bits between processes with different	\
    310		 * uid/privileges.								\
    311		 */										\
    312		regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0;					\
    313		regs->r1 = 0; regs->r9  = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0;	\
    314	}											\
    315} while (0)
    316
    317/* Forward declarations, a strange C thing... */
    318struct mm_struct;
    319struct task_struct;
    320
    321/*
    322 * Free all resources held by a thread. This is called after the
    323 * parent of DEAD_TASK has collected the exit status of the task via
    324 * wait().
    325 */
    326#define release_thread(dead_task)
    327
    328/* Get wait channel for task P.  */
    329extern unsigned long __get_wchan (struct task_struct *p);
    330
    331/* Return instruction pointer of blocked task TSK.  */
    332#define KSTK_EIP(tsk)					\
    333  ({							\
    334	struct pt_regs *_regs = task_pt_regs(tsk);	\
    335	_regs->cr_iip + ia64_psr(_regs)->ri;		\
    336  })
    337
    338/* Return stack pointer of blocked task TSK.  */
    339#define KSTK_ESP(tsk)  ((tsk)->thread.ksp)
    340
    341extern void ia64_getreg_unknown_kr (void);
    342extern void ia64_setreg_unknown_kr (void);
    343
    344#define ia64_get_kr(regnum)					\
    345({								\
    346	unsigned long r = 0;					\
    347								\
    348	switch (regnum) {					\
    349	    case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break;	\
    350	    case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break;	\
    351	    case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break;	\
    352	    case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break;	\
    353	    case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break;	\
    354	    case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break;	\
    355	    case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break;	\
    356	    case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break;	\
    357	    default: ia64_getreg_unknown_kr(); break;		\
    358	}							\
    359	r;							\
    360})
    361
    362#define ia64_set_kr(regnum, r) 					\
    363({								\
    364	switch (regnum) {					\
    365	    case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break;	\
    366	    case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break;	\
    367	    case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break;	\
    368	    case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break;	\
    369	    case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break;	\
    370	    case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break;	\
    371	    case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break;	\
    372	    case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break;	\
    373	    default: ia64_setreg_unknown_kr(); break;		\
    374	}							\
    375})
    376
    377/*
    378 * The following three macros can't be inline functions because we don't have struct
    379 * task_struct at this point.
    380 */
    381
    382/*
    383 * Return TRUE if task T owns the fph partition of the CPU we're running on.
    384 * Must be called from code that has preemption disabled.
    385 */
    386#define ia64_is_local_fpu_owner(t)								\
    387({												\
    388	struct task_struct *__ia64_islfo_task = (t);						\
    389	(__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()				\
    390	 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER));	\
    391})
    392
    393/*
    394 * Mark task T as owning the fph partition of the CPU we're running on.
    395 * Must be called from code that has preemption disabled.
    396 */
    397#define ia64_set_local_fpu_owner(t) do {						\
    398	struct task_struct *__ia64_slfo_task = (t);					\
    399	__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();			\
    400	ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task);		\
    401} while (0)
    402
    403/* Mark the fph partition of task T as being invalid on all CPUs.  */
    404#define ia64_drop_fpu(t)	((t)->thread.last_fph_cpu = -1)
    405
    406extern void __ia64_init_fpu (void);
    407extern void __ia64_save_fpu (struct ia64_fpreg *fph);
    408extern void __ia64_load_fpu (struct ia64_fpreg *fph);
    409extern void ia64_save_debug_regs (unsigned long *save_area);
    410extern void ia64_load_debug_regs (unsigned long *save_area);
    411
    412#define ia64_fph_enable()	do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
    413#define ia64_fph_disable()	do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
    414
    415/* load fp 0.0 into fph */
    416static inline void
    417ia64_init_fpu (void) {
    418	ia64_fph_enable();
    419	__ia64_init_fpu();
    420	ia64_fph_disable();
    421}
    422
    423/* save f32-f127 at FPH */
    424static inline void
    425ia64_save_fpu (struct ia64_fpreg *fph) {
    426	ia64_fph_enable();
    427	__ia64_save_fpu(fph);
    428	ia64_fph_disable();
    429}
    430
    431/* load f32-f127 from FPH */
    432static inline void
    433ia64_load_fpu (struct ia64_fpreg *fph) {
    434	ia64_fph_enable();
    435	__ia64_load_fpu(fph);
    436	ia64_fph_disable();
    437}
    438
    439static inline __u64
    440ia64_clear_ic (void)
    441{
    442	__u64 psr;
    443	psr = ia64_getreg(_IA64_REG_PSR);
    444	ia64_stop();
    445	ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
    446	ia64_srlz_i();
    447	return psr;
    448}
    449
    450/*
    451 * Restore the psr.
    452 */
    453static inline void
    454ia64_set_psr (__u64 psr)
    455{
    456	ia64_stop();
    457	ia64_setreg(_IA64_REG_PSR_L, psr);
    458	ia64_srlz_i();
    459}
    460
    461/*
    462 * Insert a translation into an instruction and/or data translation
    463 * register.
    464 */
    465static inline void
    466ia64_itr (__u64 target_mask, __u64 tr_num,
    467	  __u64 vmaddr, __u64 pte,
    468	  __u64 log_page_size)
    469{
    470	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
    471	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
    472	ia64_stop();
    473	if (target_mask & 0x1)
    474		ia64_itri(tr_num, pte);
    475	if (target_mask & 0x2)
    476		ia64_itrd(tr_num, pte);
    477}
    478
    479/*
    480 * Insert a translation into the instruction and/or data translation
    481 * cache.
    482 */
    483static inline void
    484ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
    485	  __u64 log_page_size)
    486{
    487	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
    488	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
    489	ia64_stop();
    490	/* as per EAS2.6, itc must be the last instruction in an instruction group */
    491	if (target_mask & 0x1)
    492		ia64_itci(pte);
    493	if (target_mask & 0x2)
    494		ia64_itcd(pte);
    495}
    496
    497/*
    498 * Purge a range of addresses from instruction and/or data translation
    499 * register(s).
    500 */
    501static inline void
    502ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
    503{
    504	if (target_mask & 0x1)
    505		ia64_ptri(vmaddr, (log_size << 2));
    506	if (target_mask & 0x2)
    507		ia64_ptrd(vmaddr, (log_size << 2));
    508}
    509
    510/* Set the interrupt vector address.  The address must be suitably aligned (32KB).  */
    511static inline void
    512ia64_set_iva (void *ivt_addr)
    513{
    514	ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
    515	ia64_srlz_i();
    516}
    517
    518/* Set the page table address and control bits.  */
    519static inline void
    520ia64_set_pta (__u64 pta)
    521{
    522	/* Note: srlz.i implies srlz.d */
    523	ia64_setreg(_IA64_REG_CR_PTA, pta);
    524	ia64_srlz_i();
    525}
    526
    527static inline void
    528ia64_eoi (void)
    529{
    530	ia64_setreg(_IA64_REG_CR_EOI, 0);
    531	ia64_srlz_d();
    532}
    533
    534#define cpu_relax()	ia64_hint(ia64_hint_pause)
    535
    536static inline int
    537ia64_get_irr(unsigned int vector)
    538{
    539	unsigned int reg = vector / 64;
    540	unsigned int bit = vector % 64;
    541	u64 irr;
    542
    543	switch (reg) {
    544	case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
    545	case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
    546	case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
    547	case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
    548	}
    549
    550	return test_bit(bit, &irr);
    551}
    552
    553static inline void
    554ia64_set_lrr0 (unsigned long val)
    555{
    556	ia64_setreg(_IA64_REG_CR_LRR0, val);
    557	ia64_srlz_d();
    558}
    559
    560static inline void
    561ia64_set_lrr1 (unsigned long val)
    562{
    563	ia64_setreg(_IA64_REG_CR_LRR1, val);
    564	ia64_srlz_d();
    565}
    566
    567
    568/*
    569 * Given the address to which a spill occurred, return the unat bit
    570 * number that corresponds to this address.
    571 */
    572static inline __u64
    573ia64_unat_pos (void *spill_addr)
    574{
    575	return ((__u64) spill_addr >> 3) & 0x3f;
    576}
    577
    578/*
    579 * Set the NaT bit of an integer register which was spilled at address
    580 * SPILL_ADDR.  UNAT is the mask to be updated.
    581 */
    582static inline void
    583ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
    584{
    585	__u64 bit = ia64_unat_pos(spill_addr);
    586	__u64 mask = 1UL << bit;
    587
    588	*unat = (*unat & ~mask) | (nat << bit);
    589}
    590
    591static inline __u64
    592ia64_get_ivr (void)
    593{
    594	__u64 r;
    595	ia64_srlz_d();
    596	r = ia64_getreg(_IA64_REG_CR_IVR);
    597	ia64_srlz_d();
    598	return r;
    599}
    600
    601static inline void
    602ia64_set_dbr (__u64 regnum, __u64 value)
    603{
    604	__ia64_set_dbr(regnum, value);
    605#ifdef CONFIG_ITANIUM
    606	ia64_srlz_d();
    607#endif
    608}
    609
    610static inline __u64
    611ia64_get_dbr (__u64 regnum)
    612{
    613	__u64 retval;
    614
    615	retval = __ia64_get_dbr(regnum);
    616#ifdef CONFIG_ITANIUM
    617	ia64_srlz_d();
    618#endif
    619	return retval;
    620}
    621
    622static inline __u64
    623ia64_rotr (__u64 w, __u64 n)
    624{
    625	return (w >> n) | (w << (64 - n));
    626}
    627
    628#define ia64_rotl(w,n)	ia64_rotr((w), (64) - (n))
    629
    630/*
    631 * Take a mapped kernel address and return the equivalent address
    632 * in the region 7 identity mapped virtual area.
    633 */
    634static inline void *
    635ia64_imva (void *addr)
    636{
    637	void *result;
    638	result = (void *) ia64_tpa(addr);
    639	return __va(result);
    640}
    641
    642#define ARCH_HAS_PREFETCH
    643#define ARCH_HAS_PREFETCHW
    644#define ARCH_HAS_SPINLOCK_PREFETCH
    645#define PREFETCH_STRIDE			L1_CACHE_BYTES
    646
    647static inline void
    648prefetch (const void *x)
    649{
    650	 ia64_lfetch(ia64_lfhint_none, x);
    651}
    652
    653static inline void
    654prefetchw (const void *x)
    655{
    656	ia64_lfetch_excl(ia64_lfhint_none, x);
    657}
    658
    659#define spin_lock_prefetch(x)	prefetchw(x)
    660
    661extern unsigned long boot_option_idle_override;
    662
    663enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
    664			 IDLE_NOMWAIT, IDLE_POLL};
    665
    666void default_idle(void);
    667
    668#endif /* !__ASSEMBLY__ */
    669
    670#endif /* _ASM_IA64_PROCESSOR_H */