cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ptrace.c (21909B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Kernel support for the ptrace() and syscall tracing interfaces.
      4 *
      5 * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
      6 * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
      7 * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
      8 * Copyright (C) 2008-2016 Helge Deller <deller@gmx.de>
      9 */
     10
     11#include <linux/kernel.h>
     12#include <linux/sched.h>
     13#include <linux/mm.h>
     14#include <linux/smp.h>
     15#include <linux/elf.h>
     16#include <linux/errno.h>
     17#include <linux/ptrace.h>
     18#include <linux/user.h>
     19#include <linux/personality.h>
     20#include <linux/regset.h>
     21#include <linux/security.h>
     22#include <linux/seccomp.h>
     23#include <linux/compat.h>
     24#include <linux/signal.h>
     25#include <linux/audit.h>
     26
     27#include <linux/uaccess.h>
     28#include <asm/processor.h>
     29#include <asm/asm-offsets.h>
     30
     31/* PSW bits we allow the debugger to modify */
     32#define USER_PSW_BITS	(PSW_N | PSW_B | PSW_V | PSW_CB)
     33
     34#define CREATE_TRACE_POINTS
     35#include <trace/events/syscalls.h>
     36
     37/*
     38 * These are our native regset flavors.
     39 */
     40enum parisc_regset {
     41	REGSET_GENERAL,
     42	REGSET_FP
     43};
     44
     45/*
     46 * Called by kernel/ptrace.c when detaching..
     47 *
     48 * Make sure single step bits etc are not set.
     49 */
     50void ptrace_disable(struct task_struct *task)
     51{
     52	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
     53	clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
     54
     55	/* make sure the trap bits are not set */
     56	pa_psw(task)->r = 0;
     57	pa_psw(task)->t = 0;
     58	pa_psw(task)->h = 0;
     59	pa_psw(task)->l = 0;
     60}
     61
     62/*
     63 * The following functions are called by ptrace_resume() when
     64 * enabling or disabling single/block tracing.
     65 */
     66void user_disable_single_step(struct task_struct *task)
     67{
     68	ptrace_disable(task);
     69}
     70
     71void user_enable_single_step(struct task_struct *task)
     72{
     73	clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
     74	set_tsk_thread_flag(task, TIF_SINGLESTEP);
     75
     76	if (pa_psw(task)->n) {
     77		/* Nullified, just crank over the queue. */
     78		task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
     79		task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
     80		task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
     81		pa_psw(task)->n = 0;
     82		pa_psw(task)->x = 0;
     83		pa_psw(task)->y = 0;
     84		pa_psw(task)->z = 0;
     85		pa_psw(task)->b = 0;
     86		ptrace_disable(task);
     87		/* Don't wake up the task, but let the
     88		   parent know something happened. */
     89		force_sig_fault_to_task(SIGTRAP, TRAP_TRACE,
     90					(void __user *) (task_regs(task)->iaoq[0] & ~3),
     91					task);
     92		/* notify_parent(task, SIGCHLD); */
     93		return;
     94	}
     95
     96	/* Enable recovery counter traps.  The recovery counter
     97	 * itself will be set to zero on a task switch.  If the
     98	 * task is suspended on a syscall then the syscall return
     99	 * path will overwrite the recovery counter with a suitable
    100	 * value such that it traps once back in user space.  We
    101	 * disable interrupts in the tasks PSW here also, to avoid
    102	 * interrupts while the recovery counter is decrementing.
    103	 */
    104	pa_psw(task)->r = 1;
    105	pa_psw(task)->t = 0;
    106	pa_psw(task)->h = 0;
    107	pa_psw(task)->l = 0;
    108}
    109
    110void user_enable_block_step(struct task_struct *task)
    111{
    112	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
    113	set_tsk_thread_flag(task, TIF_BLOCKSTEP);
    114
    115	/* Enable taken branch trap. */
    116	pa_psw(task)->r = 0;
    117	pa_psw(task)->t = 1;
    118	pa_psw(task)->h = 0;
    119	pa_psw(task)->l = 0;
    120}
    121
    122long arch_ptrace(struct task_struct *child, long request,
    123		 unsigned long addr, unsigned long data)
    124{
    125	unsigned long __user *datap = (unsigned long __user *)data;
    126	unsigned long tmp;
    127	long ret = -EIO;
    128
    129	switch (request) {
    130
    131	/* Read the word at location addr in the USER area.  For ptraced
    132	   processes, the kernel saves all regs on a syscall. */
    133	case PTRACE_PEEKUSR:
    134		if ((addr & (sizeof(unsigned long)-1)) ||
    135		     addr >= sizeof(struct pt_regs))
    136			break;
    137		tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
    138		ret = put_user(tmp, datap);
    139		break;
    140
    141	/* Write the word at location addr in the USER area.  This will need
    142	   to change when the kernel no longer saves all regs on a syscall.
    143	   FIXME.  There is a problem at the moment in that r3-r18 are only
    144	   saved if the process is ptraced on syscall entry, and even then
    145	   those values are overwritten by actual register values on syscall
    146	   exit. */
    147	case PTRACE_POKEUSR:
    148		/* Some register values written here may be ignored in
    149		 * entry.S:syscall_restore_rfi; e.g. iaoq is written with
    150		 * r31/r31+4, and not with the values in pt_regs.
    151		 */
    152		if (addr == PT_PSW) {
    153			/* Allow writing to Nullify, Divide-step-correction,
    154			 * and carry/borrow bits.
    155			 * BEWARE, if you set N, and then single step, it won't
    156			 * stop on the nullified instruction.
    157			 */
    158			data &= USER_PSW_BITS;
    159			task_regs(child)->gr[0] &= ~USER_PSW_BITS;
    160			task_regs(child)->gr[0] |= data;
    161			ret = 0;
    162			break;
    163		}
    164
    165		if ((addr & (sizeof(unsigned long)-1)) ||
    166		     addr >= sizeof(struct pt_regs))
    167			break;
    168		if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
    169			data |= 3; /* ensure userspace privilege */
    170		}
    171		if ((addr >= PT_GR1 && addr <= PT_GR31) ||
    172				addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
    173				(addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
    174				addr == PT_SAR) {
    175			*(unsigned long *) ((char *) task_regs(child) + addr) = data;
    176			ret = 0;
    177		}
    178		break;
    179
    180	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
    181		return copy_regset_to_user(child,
    182					   task_user_regset_view(current),
    183					   REGSET_GENERAL,
    184					   0, sizeof(struct user_regs_struct),
    185					   datap);
    186
    187	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
    188		return copy_regset_from_user(child,
    189					     task_user_regset_view(current),
    190					     REGSET_GENERAL,
    191					     0, sizeof(struct user_regs_struct),
    192					     datap);
    193
    194	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
    195		return copy_regset_to_user(child,
    196					   task_user_regset_view(current),
    197					   REGSET_FP,
    198					   0, sizeof(struct user_fp_struct),
    199					   datap);
    200
    201	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
    202		return copy_regset_from_user(child,
    203					     task_user_regset_view(current),
    204					     REGSET_FP,
    205					     0, sizeof(struct user_fp_struct),
    206					     datap);
    207
    208	default:
    209		ret = ptrace_request(child, request, addr, data);
    210		break;
    211	}
    212
    213	return ret;
    214}
    215
    216
    217#ifdef CONFIG_COMPAT
    218
    219/* This function is needed to translate 32 bit pt_regs offsets in to
    220 * 64 bit pt_regs offsets.  For example, a 32 bit gdb under a 64 bit kernel
    221 * will request offset 12 if it wants gr3, but the lower 32 bits of
    222 * the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4).
    223 * This code relies on a 32 bit pt_regs being comprised of 32 bit values
    224 * except for the fp registers which (a) are 64 bits, and (b) follow
    225 * the gr registers at the start of pt_regs.  The 32 bit pt_regs should
    226 * be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[]
    227 * being 64 bit in both cases.
    228 */
    229
    230static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
    231{
    232	compat_ulong_t pos;
    233
    234	if (offset < 32*4)	/* gr[0..31] */
    235		pos = offset * 2 + 4;
    236	else if (offset < 32*4+32*8)	/* fr[0] ... fr[31] */
    237		pos = (offset - 32*4) + PT_FR0;
    238	else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */
    239		pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4;
    240	else
    241		pos = sizeof(struct pt_regs);
    242
    243	return pos;
    244}
    245
    246long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
    247			compat_ulong_t addr, compat_ulong_t data)
    248{
    249	compat_uint_t tmp;
    250	long ret = -EIO;
    251
    252	switch (request) {
    253
    254	case PTRACE_PEEKUSR:
    255		if (addr & (sizeof(compat_uint_t)-1))
    256			break;
    257		addr = translate_usr_offset(addr);
    258		if (addr >= sizeof(struct pt_regs))
    259			break;
    260
    261		tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
    262		ret = put_user(tmp, (compat_uint_t *) (unsigned long) data);
    263		break;
    264
    265	/* Write the word at location addr in the USER area.  This will need
    266	   to change when the kernel no longer saves all regs on a syscall.
    267	   FIXME.  There is a problem at the moment in that r3-r18 are only
    268	   saved if the process is ptraced on syscall entry, and even then
    269	   those values are overwritten by actual register values on syscall
    270	   exit. */
    271	case PTRACE_POKEUSR:
    272		/* Some register values written here may be ignored in
    273		 * entry.S:syscall_restore_rfi; e.g. iaoq is written with
    274		 * r31/r31+4, and not with the values in pt_regs.
    275		 */
    276		if (addr == PT_PSW) {
    277			/* Since PT_PSW==0, it is valid for 32 bit processes
    278			 * under 64 bit kernels as well.
    279			 */
    280			ret = arch_ptrace(child, request, addr, data);
    281		} else {
    282			if (addr & (sizeof(compat_uint_t)-1))
    283				break;
    284			addr = translate_usr_offset(addr);
    285			if (addr >= sizeof(struct pt_regs))
    286				break;
    287			if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
    288				data |= 3; /* ensure userspace privilege */
    289			}
    290			if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
    291				/* Special case, fp regs are 64 bits anyway */
    292				*(__u32 *) ((char *) task_regs(child) + addr) = data;
    293				ret = 0;
    294			}
    295			else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
    296					addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 ||
    297					addr == PT_SAR+4) {
    298				/* Zero the top 32 bits */
    299				*(__u32 *) ((char *) task_regs(child) + addr - 4) = 0;
    300				*(__u32 *) ((char *) task_regs(child) + addr) = data;
    301				ret = 0;
    302			}
    303		}
    304		break;
    305
    306	default:
    307		ret = compat_ptrace_request(child, request, addr, data);
    308		break;
    309	}
    310
    311	return ret;
    312}
    313#endif
    314
    315long do_syscall_trace_enter(struct pt_regs *regs)
    316{
    317	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
    318		int rc = ptrace_report_syscall_entry(regs);
    319
    320		/*
    321		 * As tracesys_next does not set %r28 to -ENOSYS
    322		 * when %r20 is set to -1, initialize it here.
    323		 */
    324		regs->gr[28] = -ENOSYS;
    325
    326		if (rc) {
    327			/*
    328			 * A nonzero return code from
    329			 * ptrace_report_syscall_entry() tells us
    330			 * to prevent the syscall execution.  Skip
    331			 * the syscall call and the syscall restart handling.
    332			 *
    333			 * Note that the tracer may also just change
    334			 * regs->gr[20] to an invalid syscall number,
    335			 * that is handled by tracesys_next.
    336			 */
    337			regs->gr[20] = -1UL;
    338			return -1;
    339		}
    340	}
    341
    342	/* Do the secure computing check after ptrace. */
    343	if (secure_computing() == -1)
    344		return -1;
    345
    346#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
    347	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
    348		trace_sys_enter(regs, regs->gr[20]);
    349#endif
    350
    351#ifdef CONFIG_64BIT
    352	if (!is_compat_task())
    353		audit_syscall_entry(regs->gr[20], regs->gr[26], regs->gr[25],
    354				    regs->gr[24], regs->gr[23]);
    355	else
    356#endif
    357		audit_syscall_entry(regs->gr[20] & 0xffffffff,
    358			regs->gr[26] & 0xffffffff,
    359			regs->gr[25] & 0xffffffff,
    360			regs->gr[24] & 0xffffffff,
    361			regs->gr[23] & 0xffffffff);
    362
    363	/*
    364	 * Sign extend the syscall number to 64bit since it may have been
    365	 * modified by a compat ptrace call
    366	 */
    367	return (int) ((u32) regs->gr[20]);
    368}
    369
    370void do_syscall_trace_exit(struct pt_regs *regs)
    371{
    372	int stepping = test_thread_flag(TIF_SINGLESTEP) ||
    373		test_thread_flag(TIF_BLOCKSTEP);
    374
    375	audit_syscall_exit(regs);
    376
    377#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
    378	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
    379		trace_sys_exit(regs, regs->gr[20]);
    380#endif
    381
    382	if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
    383		ptrace_report_syscall_exit(regs, stepping);
    384}
    385
    386
    387/*
    388 * regset functions.
    389 */
    390
    391static int fpr_get(struct task_struct *target,
    392		     const struct user_regset *regset,
    393		     struct membuf to)
    394{
    395	struct pt_regs *regs = task_regs(target);
    396
    397	return membuf_write(&to, regs->fr, ELF_NFPREG * sizeof(__u64));
    398}
    399
    400static int fpr_set(struct task_struct *target,
    401		     const struct user_regset *regset,
    402		     unsigned int pos, unsigned int count,
    403		     const void *kbuf, const void __user *ubuf)
    404{
    405	struct pt_regs *regs = task_regs(target);
    406	const __u64 *k = kbuf;
    407	const __u64 __user *u = ubuf;
    408	__u64 reg;
    409
    410	pos /= sizeof(reg);
    411	count /= sizeof(reg);
    412
    413	if (kbuf)
    414		for (; count > 0 && pos < ELF_NFPREG; --count)
    415			regs->fr[pos++] = *k++;
    416	else
    417		for (; count > 0 && pos < ELF_NFPREG; --count) {
    418			if (__get_user(reg, u++))
    419				return -EFAULT;
    420			regs->fr[pos++] = reg;
    421		}
    422
    423	kbuf = k;
    424	ubuf = u;
    425	pos *= sizeof(reg);
    426	count *= sizeof(reg);
    427	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
    428					 ELF_NFPREG * sizeof(reg), -1);
    429}
    430
    431#define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long))
    432
    433static unsigned long get_reg(struct pt_regs *regs, int num)
    434{
    435	switch (num) {
    436	case RI(gr[0]) ... RI(gr[31]):	return regs->gr[num - RI(gr[0])];
    437	case RI(sr[0]) ... RI(sr[7]):	return regs->sr[num - RI(sr[0])];
    438	case RI(iasq[0]):		return regs->iasq[0];
    439	case RI(iasq[1]):		return regs->iasq[1];
    440	case RI(iaoq[0]):		return regs->iaoq[0];
    441	case RI(iaoq[1]):		return regs->iaoq[1];
    442	case RI(sar):			return regs->sar;
    443	case RI(iir):			return regs->iir;
    444	case RI(isr):			return regs->isr;
    445	case RI(ior):			return regs->ior;
    446	case RI(ipsw):			return regs->ipsw;
    447	case RI(cr27):			return regs->cr27;
    448	case RI(cr0):			return mfctl(0);
    449	case RI(cr24):			return mfctl(24);
    450	case RI(cr25):			return mfctl(25);
    451	case RI(cr26):			return mfctl(26);
    452	case RI(cr28):			return mfctl(28);
    453	case RI(cr29):			return mfctl(29);
    454	case RI(cr30):			return mfctl(30);
    455	case RI(cr31):			return mfctl(31);
    456	case RI(cr8):			return mfctl(8);
    457	case RI(cr9):			return mfctl(9);
    458	case RI(cr12):			return mfctl(12);
    459	case RI(cr13):			return mfctl(13);
    460	case RI(cr10):			return mfctl(10);
    461	case RI(cr15):			return mfctl(15);
    462	default:			return 0;
    463	}
    464}
    465
    466static void set_reg(struct pt_regs *regs, int num, unsigned long val)
    467{
    468	switch (num) {
    469	case RI(gr[0]): /*
    470			 * PSW is in gr[0].
    471			 * Allow writing to Nullify, Divide-step-correction,
    472			 * and carry/borrow bits.
    473			 * BEWARE, if you set N, and then single step, it won't
    474			 * stop on the nullified instruction.
    475			 */
    476			val &= USER_PSW_BITS;
    477			regs->gr[0] &= ~USER_PSW_BITS;
    478			regs->gr[0] |= val;
    479			return;
    480	case RI(gr[1]) ... RI(gr[31]):
    481			regs->gr[num - RI(gr[0])] = val;
    482			return;
    483	case RI(iaoq[0]):
    484	case RI(iaoq[1]):
    485			/* set 2 lowest bits to ensure userspace privilege: */
    486			regs->iaoq[num - RI(iaoq[0])] = val | 3;
    487			return;
    488	case RI(sar):	regs->sar = val;
    489			return;
    490	default:	return;
    491#if 0
    492	/* do not allow to change any of the following registers (yet) */
    493	case RI(sr[0]) ... RI(sr[7]):	return regs->sr[num - RI(sr[0])];
    494	case RI(iasq[0]):		return regs->iasq[0];
    495	case RI(iasq[1]):		return regs->iasq[1];
    496	case RI(iir):			return regs->iir;
    497	case RI(isr):			return regs->isr;
    498	case RI(ior):			return regs->ior;
    499	case RI(ipsw):			return regs->ipsw;
    500	case RI(cr27):			return regs->cr27;
    501        case cr0, cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31;
    502        case cr8, cr9, cr12, cr13, cr10, cr15;
    503#endif
    504	}
    505}
    506
    507static int gpr_get(struct task_struct *target,
    508		     const struct user_regset *regset,
    509		     struct membuf to)
    510{
    511	struct pt_regs *regs = task_regs(target);
    512	unsigned int pos;
    513
    514	for (pos = 0; pos < ELF_NGREG; pos++)
    515		membuf_store(&to, get_reg(regs, pos));
    516	return 0;
    517}
    518
    519static int gpr_set(struct task_struct *target,
    520		     const struct user_regset *regset,
    521		     unsigned int pos, unsigned int count,
    522		     const void *kbuf, const void __user *ubuf)
    523{
    524	struct pt_regs *regs = task_regs(target);
    525	const unsigned long *k = kbuf;
    526	const unsigned long __user *u = ubuf;
    527	unsigned long reg;
    528
    529	pos /= sizeof(reg);
    530	count /= sizeof(reg);
    531
    532	if (kbuf)
    533		for (; count > 0 && pos < ELF_NGREG; --count)
    534			set_reg(regs, pos++, *k++);
    535	else
    536		for (; count > 0 && pos < ELF_NGREG; --count) {
    537			if (__get_user(reg, u++))
    538				return -EFAULT;
    539			set_reg(regs, pos++, reg);
    540		}
    541
    542	kbuf = k;
    543	ubuf = u;
    544	pos *= sizeof(reg);
    545	count *= sizeof(reg);
    546	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
    547					 ELF_NGREG * sizeof(reg), -1);
    548}
    549
    550static const struct user_regset native_regsets[] = {
    551	[REGSET_GENERAL] = {
    552		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
    553		.size = sizeof(long), .align = sizeof(long),
    554		.regset_get = gpr_get, .set = gpr_set
    555	},
    556	[REGSET_FP] = {
    557		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
    558		.size = sizeof(__u64), .align = sizeof(__u64),
    559		.regset_get = fpr_get, .set = fpr_set
    560	}
    561};
    562
    563static const struct user_regset_view user_parisc_native_view = {
    564	.name = "parisc", .e_machine = ELF_ARCH, .ei_osabi = ELFOSABI_LINUX,
    565	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
    566};
    567
    568#ifdef CONFIG_64BIT
    569static int gpr32_get(struct task_struct *target,
    570		     const struct user_regset *regset,
    571		     struct membuf to)
    572{
    573	struct pt_regs *regs = task_regs(target);
    574	unsigned int pos;
    575
    576	for (pos = 0; pos < ELF_NGREG; pos++)
    577		membuf_store(&to, (compat_ulong_t)get_reg(regs, pos));
    578
    579	return 0;
    580}
    581
    582static int gpr32_set(struct task_struct *target,
    583		     const struct user_regset *regset,
    584		     unsigned int pos, unsigned int count,
    585		     const void *kbuf, const void __user *ubuf)
    586{
    587	struct pt_regs *regs = task_regs(target);
    588	const compat_ulong_t *k = kbuf;
    589	const compat_ulong_t __user *u = ubuf;
    590	compat_ulong_t reg;
    591
    592	pos /= sizeof(reg);
    593	count /= sizeof(reg);
    594
    595	if (kbuf)
    596		for (; count > 0 && pos < ELF_NGREG; --count)
    597			set_reg(regs, pos++, *k++);
    598	else
    599		for (; count > 0 && pos < ELF_NGREG; --count) {
    600			if (__get_user(reg, u++))
    601				return -EFAULT;
    602			set_reg(regs, pos++, reg);
    603		}
    604
    605	kbuf = k;
    606	ubuf = u;
    607	pos *= sizeof(reg);
    608	count *= sizeof(reg);
    609	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
    610					 ELF_NGREG * sizeof(reg), -1);
    611}
    612
    613/*
    614 * These are the regset flavors matching the 32bit native set.
    615 */
    616static const struct user_regset compat_regsets[] = {
    617	[REGSET_GENERAL] = {
    618		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
    619		.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
    620		.regset_get = gpr32_get, .set = gpr32_set
    621	},
    622	[REGSET_FP] = {
    623		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
    624		.size = sizeof(__u64), .align = sizeof(__u64),
    625		.regset_get = fpr_get, .set = fpr_set
    626	}
    627};
    628
    629static const struct user_regset_view user_parisc_compat_view = {
    630	.name = "parisc", .e_machine = EM_PARISC, .ei_osabi = ELFOSABI_LINUX,
    631	.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
    632};
    633#endif	/* CONFIG_64BIT */
    634
    635const struct user_regset_view *task_user_regset_view(struct task_struct *task)
    636{
    637	BUILD_BUG_ON(sizeof(struct user_regs_struct)/sizeof(long) != ELF_NGREG);
    638	BUILD_BUG_ON(sizeof(struct user_fp_struct)/sizeof(__u64) != ELF_NFPREG);
    639#ifdef CONFIG_64BIT
    640	if (is_compat_task())
    641		return &user_parisc_compat_view;
    642#endif
    643	return &user_parisc_native_view;
    644}
    645
    646
    647/* HAVE_REGS_AND_STACK_ACCESS_API feature */
    648
    649struct pt_regs_offset {
    650	const char *name;
    651	int offset;
    652};
    653
    654#define REG_OFFSET_NAME(r)    {.name = #r, .offset = offsetof(struct pt_regs, r)}
    655#define REG_OFFSET_INDEX(r,i) {.name = #r#i, .offset = offsetof(struct pt_regs, r[i])}
    656#define REG_OFFSET_END {.name = NULL, .offset = 0}
    657
    658static const struct pt_regs_offset regoffset_table[] = {
    659	REG_OFFSET_INDEX(gr,0),
    660	REG_OFFSET_INDEX(gr,1),
    661	REG_OFFSET_INDEX(gr,2),
    662	REG_OFFSET_INDEX(gr,3),
    663	REG_OFFSET_INDEX(gr,4),
    664	REG_OFFSET_INDEX(gr,5),
    665	REG_OFFSET_INDEX(gr,6),
    666	REG_OFFSET_INDEX(gr,7),
    667	REG_OFFSET_INDEX(gr,8),
    668	REG_OFFSET_INDEX(gr,9),
    669	REG_OFFSET_INDEX(gr,10),
    670	REG_OFFSET_INDEX(gr,11),
    671	REG_OFFSET_INDEX(gr,12),
    672	REG_OFFSET_INDEX(gr,13),
    673	REG_OFFSET_INDEX(gr,14),
    674	REG_OFFSET_INDEX(gr,15),
    675	REG_OFFSET_INDEX(gr,16),
    676	REG_OFFSET_INDEX(gr,17),
    677	REG_OFFSET_INDEX(gr,18),
    678	REG_OFFSET_INDEX(gr,19),
    679	REG_OFFSET_INDEX(gr,20),
    680	REG_OFFSET_INDEX(gr,21),
    681	REG_OFFSET_INDEX(gr,22),
    682	REG_OFFSET_INDEX(gr,23),
    683	REG_OFFSET_INDEX(gr,24),
    684	REG_OFFSET_INDEX(gr,25),
    685	REG_OFFSET_INDEX(gr,26),
    686	REG_OFFSET_INDEX(gr,27),
    687	REG_OFFSET_INDEX(gr,28),
    688	REG_OFFSET_INDEX(gr,29),
    689	REG_OFFSET_INDEX(gr,30),
    690	REG_OFFSET_INDEX(gr,31),
    691	REG_OFFSET_INDEX(sr,0),
    692	REG_OFFSET_INDEX(sr,1),
    693	REG_OFFSET_INDEX(sr,2),
    694	REG_OFFSET_INDEX(sr,3),
    695	REG_OFFSET_INDEX(sr,4),
    696	REG_OFFSET_INDEX(sr,5),
    697	REG_OFFSET_INDEX(sr,6),
    698	REG_OFFSET_INDEX(sr,7),
    699	REG_OFFSET_INDEX(iasq,0),
    700	REG_OFFSET_INDEX(iasq,1),
    701	REG_OFFSET_INDEX(iaoq,0),
    702	REG_OFFSET_INDEX(iaoq,1),
    703	REG_OFFSET_NAME(cr27),
    704	REG_OFFSET_NAME(ksp),
    705	REG_OFFSET_NAME(kpc),
    706	REG_OFFSET_NAME(sar),
    707	REG_OFFSET_NAME(iir),
    708	REG_OFFSET_NAME(isr),
    709	REG_OFFSET_NAME(ior),
    710	REG_OFFSET_NAME(ipsw),
    711	REG_OFFSET_END,
    712};
    713
    714/**
    715 * regs_query_register_offset() - query register offset from its name
    716 * @name:	the name of a register
    717 *
    718 * regs_query_register_offset() returns the offset of a register in struct
    719 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
    720 */
    721int regs_query_register_offset(const char *name)
    722{
    723	const struct pt_regs_offset *roff;
    724	for (roff = regoffset_table; roff->name != NULL; roff++)
    725		if (!strcmp(roff->name, name))
    726			return roff->offset;
    727	return -EINVAL;
    728}
    729
    730/**
    731 * regs_query_register_name() - query register name from its offset
    732 * @offset:	the offset of a register in struct pt_regs.
    733 *
    734 * regs_query_register_name() returns the name of a register from its
    735 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
    736 */
    737const char *regs_query_register_name(unsigned int offset)
    738{
    739	const struct pt_regs_offset *roff;
    740	for (roff = regoffset_table; roff->name != NULL; roff++)
    741		if (roff->offset == offset)
    742			return roff->name;
    743	return NULL;
    744}
    745
    746/**
    747 * regs_within_kernel_stack() - check the address in the stack
    748 * @regs:      pt_regs which contains kernel stack pointer.
    749 * @addr:      address which is checked.
    750 *
    751 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
    752 * If @addr is within the kernel stack, it returns true. If not, returns false.
    753 */
    754int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
    755{
    756	return ((addr & ~(THREAD_SIZE - 1))  ==
    757		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
    758}
    759
    760/**
    761 * regs_get_kernel_stack_nth() - get Nth entry of the stack
    762 * @regs:	pt_regs which contains kernel stack pointer.
    763 * @n:		stack entry number.
    764 *
    765 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
    766 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
    767 * this returns 0.
    768 */
    769unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
    770{
    771	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
    772
    773	addr -= n;
    774
    775	if (!regs_within_kernel_stack(regs, (unsigned long)addr))
    776		return 0;
    777
    778	return *addr;
    779}