cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ptrace.c (54116B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Based on arch/arm/kernel/ptrace.c
      4 *
      5 * By Ross Biro 1/23/92
      6 * edited by Linus Torvalds
      7 * ARM modifications Copyright (C) 2000 Russell King
      8 * Copyright (C) 2012 ARM Ltd.
      9 */
     10
     11#include <linux/audit.h>
     12#include <linux/compat.h>
     13#include <linux/kernel.h>
     14#include <linux/sched/signal.h>
     15#include <linux/sched/task_stack.h>
     16#include <linux/mm.h>
     17#include <linux/nospec.h>
     18#include <linux/smp.h>
     19#include <linux/ptrace.h>
     20#include <linux/user.h>
     21#include <linux/seccomp.h>
     22#include <linux/security.h>
     23#include <linux/init.h>
     24#include <linux/signal.h>
     25#include <linux/string.h>
     26#include <linux/uaccess.h>
     27#include <linux/perf_event.h>
     28#include <linux/hw_breakpoint.h>
     29#include <linux/regset.h>
     30#include <linux/elf.h>
     31
     32#include <asm/compat.h>
     33#include <asm/cpufeature.h>
     34#include <asm/debug-monitors.h>
     35#include <asm/fpsimd.h>
     36#include <asm/mte.h>
     37#include <asm/pointer_auth.h>
     38#include <asm/stacktrace.h>
     39#include <asm/syscall.h>
     40#include <asm/traps.h>
     41#include <asm/system_misc.h>
     42
     43#define CREATE_TRACE_POINTS
     44#include <trace/events/syscalls.h>
     45
     46struct pt_regs_offset {
     47	const char *name;
     48	int offset;
     49};
     50
     51#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
     52#define REG_OFFSET_END {.name = NULL, .offset = 0}
     53#define GPR_OFFSET_NAME(r) \
     54	{.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
     55
     56static const struct pt_regs_offset regoffset_table[] = {
     57	GPR_OFFSET_NAME(0),
     58	GPR_OFFSET_NAME(1),
     59	GPR_OFFSET_NAME(2),
     60	GPR_OFFSET_NAME(3),
     61	GPR_OFFSET_NAME(4),
     62	GPR_OFFSET_NAME(5),
     63	GPR_OFFSET_NAME(6),
     64	GPR_OFFSET_NAME(7),
     65	GPR_OFFSET_NAME(8),
     66	GPR_OFFSET_NAME(9),
     67	GPR_OFFSET_NAME(10),
     68	GPR_OFFSET_NAME(11),
     69	GPR_OFFSET_NAME(12),
     70	GPR_OFFSET_NAME(13),
     71	GPR_OFFSET_NAME(14),
     72	GPR_OFFSET_NAME(15),
     73	GPR_OFFSET_NAME(16),
     74	GPR_OFFSET_NAME(17),
     75	GPR_OFFSET_NAME(18),
     76	GPR_OFFSET_NAME(19),
     77	GPR_OFFSET_NAME(20),
     78	GPR_OFFSET_NAME(21),
     79	GPR_OFFSET_NAME(22),
     80	GPR_OFFSET_NAME(23),
     81	GPR_OFFSET_NAME(24),
     82	GPR_OFFSET_NAME(25),
     83	GPR_OFFSET_NAME(26),
     84	GPR_OFFSET_NAME(27),
     85	GPR_OFFSET_NAME(28),
     86	GPR_OFFSET_NAME(29),
     87	GPR_OFFSET_NAME(30),
     88	{.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
     89	REG_OFFSET_NAME(sp),
     90	REG_OFFSET_NAME(pc),
     91	REG_OFFSET_NAME(pstate),
     92	REG_OFFSET_END,
     93};
     94
     95/**
     96 * regs_query_register_offset() - query register offset from its name
     97 * @name:	the name of a register
     98 *
     99 * regs_query_register_offset() returns the offset of a register in struct
    100 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
    101 */
    102int regs_query_register_offset(const char *name)
    103{
    104	const struct pt_regs_offset *roff;
    105
    106	for (roff = regoffset_table; roff->name != NULL; roff++)
    107		if (!strcmp(roff->name, name))
    108			return roff->offset;
    109	return -EINVAL;
    110}
    111
    112/**
    113 * regs_within_kernel_stack() - check the address in the stack
    114 * @regs:      pt_regs which contains kernel stack pointer.
    115 * @addr:      address which is checked.
    116 *
    117 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
    118 * If @addr is within the kernel stack, it returns true. If not, returns false.
    119 */
    120static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
    121{
    122	return ((addr & ~(THREAD_SIZE - 1))  ==
    123		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
    124		on_irq_stack(addr, sizeof(unsigned long), NULL);
    125}
    126
    127/**
    128 * regs_get_kernel_stack_nth() - get Nth entry of the stack
    129 * @regs:	pt_regs which contains kernel stack pointer.
    130 * @n:		stack entry number.
    131 *
    132 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
    133 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
    134 * this returns 0.
    135 */
    136unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
    137{
    138	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
    139
    140	addr += n;
    141	if (regs_within_kernel_stack(regs, (unsigned long)addr))
    142		return *addr;
    143	else
    144		return 0;
    145}
    146
    147/*
    148 * TODO: does not yet catch signals sent when the child dies.
    149 * in exit.c or in signal.c.
    150 */
    151
    152/*
    153 * Called by kernel/ptrace.c when detaching..
    154 */
    155void ptrace_disable(struct task_struct *child)
    156{
    157	/*
    158	 * This would be better off in core code, but PTRACE_DETACH has
    159	 * grown its fair share of arch-specific worts and changing it
    160	 * is likely to cause regressions on obscure architectures.
    161	 */
    162	user_disable_single_step(child);
    163}
    164
    165#ifdef CONFIG_HAVE_HW_BREAKPOINT
    166/*
    167 * Handle hitting a HW-breakpoint.
    168 */
    169static void ptrace_hbptriggered(struct perf_event *bp,
    170				struct perf_sample_data *data,
    171				struct pt_regs *regs)
    172{
    173	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
    174	const char *desc = "Hardware breakpoint trap (ptrace)";
    175
    176#ifdef CONFIG_COMPAT
    177	if (is_compat_task()) {
    178		int si_errno = 0;
    179		int i;
    180
    181		for (i = 0; i < ARM_MAX_BRP; ++i) {
    182			if (current->thread.debug.hbp_break[i] == bp) {
    183				si_errno = (i << 1) + 1;
    184				break;
    185			}
    186		}
    187
    188		for (i = 0; i < ARM_MAX_WRP; ++i) {
    189			if (current->thread.debug.hbp_watch[i] == bp) {
    190				si_errno = -((i << 1) + 1);
    191				break;
    192			}
    193		}
    194		arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
    195						  desc);
    196		return;
    197	}
    198#endif
    199	arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
    200}
    201
    202/*
    203 * Unregister breakpoints from this task and reset the pointers in
    204 * the thread_struct.
    205 */
    206void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
    207{
    208	int i;
    209	struct thread_struct *t = &tsk->thread;
    210
    211	for (i = 0; i < ARM_MAX_BRP; i++) {
    212		if (t->debug.hbp_break[i]) {
    213			unregister_hw_breakpoint(t->debug.hbp_break[i]);
    214			t->debug.hbp_break[i] = NULL;
    215		}
    216	}
    217
    218	for (i = 0; i < ARM_MAX_WRP; i++) {
    219		if (t->debug.hbp_watch[i]) {
    220			unregister_hw_breakpoint(t->debug.hbp_watch[i]);
    221			t->debug.hbp_watch[i] = NULL;
    222		}
    223	}
    224}
    225
    226void ptrace_hw_copy_thread(struct task_struct *tsk)
    227{
    228	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
    229}
    230
    231static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
    232					       struct task_struct *tsk,
    233					       unsigned long idx)
    234{
    235	struct perf_event *bp = ERR_PTR(-EINVAL);
    236
    237	switch (note_type) {
    238	case NT_ARM_HW_BREAK:
    239		if (idx >= ARM_MAX_BRP)
    240			goto out;
    241		idx = array_index_nospec(idx, ARM_MAX_BRP);
    242		bp = tsk->thread.debug.hbp_break[idx];
    243		break;
    244	case NT_ARM_HW_WATCH:
    245		if (idx >= ARM_MAX_WRP)
    246			goto out;
    247		idx = array_index_nospec(idx, ARM_MAX_WRP);
    248		bp = tsk->thread.debug.hbp_watch[idx];
    249		break;
    250	}
    251
    252out:
    253	return bp;
    254}
    255
    256static int ptrace_hbp_set_event(unsigned int note_type,
    257				struct task_struct *tsk,
    258				unsigned long idx,
    259				struct perf_event *bp)
    260{
    261	int err = -EINVAL;
    262
    263	switch (note_type) {
    264	case NT_ARM_HW_BREAK:
    265		if (idx >= ARM_MAX_BRP)
    266			goto out;
    267		idx = array_index_nospec(idx, ARM_MAX_BRP);
    268		tsk->thread.debug.hbp_break[idx] = bp;
    269		err = 0;
    270		break;
    271	case NT_ARM_HW_WATCH:
    272		if (idx >= ARM_MAX_WRP)
    273			goto out;
    274		idx = array_index_nospec(idx, ARM_MAX_WRP);
    275		tsk->thread.debug.hbp_watch[idx] = bp;
    276		err = 0;
    277		break;
    278	}
    279
    280out:
    281	return err;
    282}
    283
    284static struct perf_event *ptrace_hbp_create(unsigned int note_type,
    285					    struct task_struct *tsk,
    286					    unsigned long idx)
    287{
    288	struct perf_event *bp;
    289	struct perf_event_attr attr;
    290	int err, type;
    291
    292	switch (note_type) {
    293	case NT_ARM_HW_BREAK:
    294		type = HW_BREAKPOINT_X;
    295		break;
    296	case NT_ARM_HW_WATCH:
    297		type = HW_BREAKPOINT_RW;
    298		break;
    299	default:
    300		return ERR_PTR(-EINVAL);
    301	}
    302
    303	ptrace_breakpoint_init(&attr);
    304
    305	/*
    306	 * Initialise fields to sane defaults
    307	 * (i.e. values that will pass validation).
    308	 */
    309	attr.bp_addr	= 0;
    310	attr.bp_len	= HW_BREAKPOINT_LEN_4;
    311	attr.bp_type	= type;
    312	attr.disabled	= 1;
    313
    314	bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
    315	if (IS_ERR(bp))
    316		return bp;
    317
    318	err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
    319	if (err)
    320		return ERR_PTR(err);
    321
    322	return bp;
    323}
    324
    325static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
    326				     struct arch_hw_breakpoint_ctrl ctrl,
    327				     struct perf_event_attr *attr)
    328{
    329	int err, len, type, offset, disabled = !ctrl.enabled;
    330
    331	attr->disabled = disabled;
    332	if (disabled)
    333		return 0;
    334
    335	err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
    336	if (err)
    337		return err;
    338
    339	switch (note_type) {
    340	case NT_ARM_HW_BREAK:
    341		if ((type & HW_BREAKPOINT_X) != type)
    342			return -EINVAL;
    343		break;
    344	case NT_ARM_HW_WATCH:
    345		if ((type & HW_BREAKPOINT_RW) != type)
    346			return -EINVAL;
    347		break;
    348	default:
    349		return -EINVAL;
    350	}
    351
    352	attr->bp_len	= len;
    353	attr->bp_type	= type;
    354	attr->bp_addr	+= offset;
    355
    356	return 0;
    357}
    358
    359static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
    360{
    361	u8 num;
    362	u32 reg = 0;
    363
    364	switch (note_type) {
    365	case NT_ARM_HW_BREAK:
    366		num = hw_breakpoint_slots(TYPE_INST);
    367		break;
    368	case NT_ARM_HW_WATCH:
    369		num = hw_breakpoint_slots(TYPE_DATA);
    370		break;
    371	default:
    372		return -EINVAL;
    373	}
    374
    375	reg |= debug_monitors_arch();
    376	reg <<= 8;
    377	reg |= num;
    378
    379	*info = reg;
    380	return 0;
    381}
    382
    383static int ptrace_hbp_get_ctrl(unsigned int note_type,
    384			       struct task_struct *tsk,
    385			       unsigned long idx,
    386			       u32 *ctrl)
    387{
    388	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
    389
    390	if (IS_ERR(bp))
    391		return PTR_ERR(bp);
    392
    393	*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
    394	return 0;
    395}
    396
    397static int ptrace_hbp_get_addr(unsigned int note_type,
    398			       struct task_struct *tsk,
    399			       unsigned long idx,
    400			       u64 *addr)
    401{
    402	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
    403
    404	if (IS_ERR(bp))
    405		return PTR_ERR(bp);
    406
    407	*addr = bp ? counter_arch_bp(bp)->address : 0;
    408	return 0;
    409}
    410
    411static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
    412							struct task_struct *tsk,
    413							unsigned long idx)
    414{
    415	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
    416
    417	if (!bp)
    418		bp = ptrace_hbp_create(note_type, tsk, idx);
    419
    420	return bp;
    421}
    422
    423static int ptrace_hbp_set_ctrl(unsigned int note_type,
    424			       struct task_struct *tsk,
    425			       unsigned long idx,
    426			       u32 uctrl)
    427{
    428	int err;
    429	struct perf_event *bp;
    430	struct perf_event_attr attr;
    431	struct arch_hw_breakpoint_ctrl ctrl;
    432
    433	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
    434	if (IS_ERR(bp)) {
    435		err = PTR_ERR(bp);
    436		return err;
    437	}
    438
    439	attr = bp->attr;
    440	decode_ctrl_reg(uctrl, &ctrl);
    441	err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
    442	if (err)
    443		return err;
    444
    445	return modify_user_hw_breakpoint(bp, &attr);
    446}
    447
    448static int ptrace_hbp_set_addr(unsigned int note_type,
    449			       struct task_struct *tsk,
    450			       unsigned long idx,
    451			       u64 addr)
    452{
    453	int err;
    454	struct perf_event *bp;
    455	struct perf_event_attr attr;
    456
    457	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
    458	if (IS_ERR(bp)) {
    459		err = PTR_ERR(bp);
    460		return err;
    461	}
    462
    463	attr = bp->attr;
    464	attr.bp_addr = addr;
    465	err = modify_user_hw_breakpoint(bp, &attr);
    466	return err;
    467}
    468
    469#define PTRACE_HBP_ADDR_SZ	sizeof(u64)
    470#define PTRACE_HBP_CTRL_SZ	sizeof(u32)
    471#define PTRACE_HBP_PAD_SZ	sizeof(u32)
    472
    473static int hw_break_get(struct task_struct *target,
    474			const struct user_regset *regset,
    475			struct membuf to)
    476{
    477	unsigned int note_type = regset->core_note_type;
    478	int ret, idx = 0;
    479	u32 info, ctrl;
    480	u64 addr;
    481
    482	/* Resource info */
    483	ret = ptrace_hbp_get_resource_info(note_type, &info);
    484	if (ret)
    485		return ret;
    486
    487	membuf_write(&to, &info, sizeof(info));
    488	membuf_zero(&to, sizeof(u32));
    489	/* (address, ctrl) registers */
    490	while (to.left) {
    491		ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
    492		if (ret)
    493			return ret;
    494		ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
    495		if (ret)
    496			return ret;
    497		membuf_store(&to, addr);
    498		membuf_store(&to, ctrl);
    499		membuf_zero(&to, sizeof(u32));
    500		idx++;
    501	}
    502	return 0;
    503}
    504
    505static int hw_break_set(struct task_struct *target,
    506			const struct user_regset *regset,
    507			unsigned int pos, unsigned int count,
    508			const void *kbuf, const void __user *ubuf)
    509{
    510	unsigned int note_type = regset->core_note_type;
    511	int ret, idx = 0, offset, limit;
    512	u32 ctrl;
    513	u64 addr;
    514
    515	/* Resource info and pad */
    516	offset = offsetof(struct user_hwdebug_state, dbg_regs);
    517	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
    518	if (ret)
    519		return ret;
    520
    521	/* (address, ctrl) registers */
    522	limit = regset->n * regset->size;
    523	while (count && offset < limit) {
    524		if (count < PTRACE_HBP_ADDR_SZ)
    525			return -EINVAL;
    526		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
    527					 offset, offset + PTRACE_HBP_ADDR_SZ);
    528		if (ret)
    529			return ret;
    530		ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
    531		if (ret)
    532			return ret;
    533		offset += PTRACE_HBP_ADDR_SZ;
    534
    535		if (!count)
    536			break;
    537		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
    538					 offset, offset + PTRACE_HBP_CTRL_SZ);
    539		if (ret)
    540			return ret;
    541		ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
    542		if (ret)
    543			return ret;
    544		offset += PTRACE_HBP_CTRL_SZ;
    545
    546		ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
    547						offset,
    548						offset + PTRACE_HBP_PAD_SZ);
    549		if (ret)
    550			return ret;
    551		offset += PTRACE_HBP_PAD_SZ;
    552		idx++;
    553	}
    554
    555	return 0;
    556}
    557#endif	/* CONFIG_HAVE_HW_BREAKPOINT */
    558
    559static int gpr_get(struct task_struct *target,
    560		   const struct user_regset *regset,
    561		   struct membuf to)
    562{
    563	struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
    564	return membuf_write(&to, uregs, sizeof(*uregs));
    565}
    566
    567static int gpr_set(struct task_struct *target, const struct user_regset *regset,
    568		   unsigned int pos, unsigned int count,
    569		   const void *kbuf, const void __user *ubuf)
    570{
    571	int ret;
    572	struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
    573
    574	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
    575	if (ret)
    576		return ret;
    577
    578	if (!valid_user_regs(&newregs, target))
    579		return -EINVAL;
    580
    581	task_pt_regs(target)->user_regs = newregs;
    582	return 0;
    583}
    584
    585static int fpr_active(struct task_struct *target, const struct user_regset *regset)
    586{
    587	if (!system_supports_fpsimd())
    588		return -ENODEV;
    589	return regset->n;
    590}
    591
    592/*
    593 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
    594 */
    595static int __fpr_get(struct task_struct *target,
    596		     const struct user_regset *regset,
    597		     struct membuf to)
    598{
    599	struct user_fpsimd_state *uregs;
    600
    601	sve_sync_to_fpsimd(target);
    602
    603	uregs = &target->thread.uw.fpsimd_state;
    604
    605	return membuf_write(&to, uregs, sizeof(*uregs));
    606}
    607
    608static int fpr_get(struct task_struct *target, const struct user_regset *regset,
    609		   struct membuf to)
    610{
    611	if (!system_supports_fpsimd())
    612		return -EINVAL;
    613
    614	if (target == current)
    615		fpsimd_preserve_current_state();
    616
    617	return __fpr_get(target, regset, to);
    618}
    619
    620static int __fpr_set(struct task_struct *target,
    621		     const struct user_regset *regset,
    622		     unsigned int pos, unsigned int count,
    623		     const void *kbuf, const void __user *ubuf,
    624		     unsigned int start_pos)
    625{
    626	int ret;
    627	struct user_fpsimd_state newstate;
    628
    629	/*
    630	 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
    631	 * short copyin can't resurrect stale data.
    632	 */
    633	sve_sync_to_fpsimd(target);
    634
    635	newstate = target->thread.uw.fpsimd_state;
    636
    637	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
    638				 start_pos, start_pos + sizeof(newstate));
    639	if (ret)
    640		return ret;
    641
    642	target->thread.uw.fpsimd_state = newstate;
    643
    644	return ret;
    645}
    646
    647static int fpr_set(struct task_struct *target, const struct user_regset *regset,
    648		   unsigned int pos, unsigned int count,
    649		   const void *kbuf, const void __user *ubuf)
    650{
    651	int ret;
    652
    653	if (!system_supports_fpsimd())
    654		return -EINVAL;
    655
    656	ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
    657	if (ret)
    658		return ret;
    659
    660	sve_sync_from_fpsimd_zeropad(target);
    661	fpsimd_flush_task_state(target);
    662
    663	return ret;
    664}
    665
    666static int tls_get(struct task_struct *target, const struct user_regset *regset,
    667		   struct membuf to)
    668{
    669	if (target == current)
    670		tls_preserve_current_state();
    671
    672	return membuf_store(&to, target->thread.uw.tp_value);
    673}
    674
    675static int tls_set(struct task_struct *target, const struct user_regset *regset,
    676		   unsigned int pos, unsigned int count,
    677		   const void *kbuf, const void __user *ubuf)
    678{
    679	int ret;
    680	unsigned long tls = target->thread.uw.tp_value;
    681
    682	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
    683	if (ret)
    684		return ret;
    685
    686	target->thread.uw.tp_value = tls;
    687	return ret;
    688}
    689
    690static int system_call_get(struct task_struct *target,
    691			   const struct user_regset *regset,
    692			   struct membuf to)
    693{
    694	return membuf_store(&to, task_pt_regs(target)->syscallno);
    695}
    696
    697static int system_call_set(struct task_struct *target,
    698			   const struct user_regset *regset,
    699			   unsigned int pos, unsigned int count,
    700			   const void *kbuf, const void __user *ubuf)
    701{
    702	int syscallno = task_pt_regs(target)->syscallno;
    703	int ret;
    704
    705	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
    706	if (ret)
    707		return ret;
    708
    709	task_pt_regs(target)->syscallno = syscallno;
    710	return ret;
    711}
    712
    713#ifdef CONFIG_ARM64_SVE
    714
    715static void sve_init_header_from_task(struct user_sve_header *header,
    716				      struct task_struct *target,
    717				      enum vec_type type)
    718{
    719	unsigned int vq;
    720	bool active;
    721	bool fpsimd_only;
    722	enum vec_type task_type;
    723
    724	memset(header, 0, sizeof(*header));
    725
    726	/* Check if the requested registers are active for the task */
    727	if (thread_sm_enabled(&target->thread))
    728		task_type = ARM64_VEC_SME;
    729	else
    730		task_type = ARM64_VEC_SVE;
    731	active = (task_type == type);
    732
    733	switch (type) {
    734	case ARM64_VEC_SVE:
    735		if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
    736			header->flags |= SVE_PT_VL_INHERIT;
    737		fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
    738		break;
    739	case ARM64_VEC_SME:
    740		if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
    741			header->flags |= SVE_PT_VL_INHERIT;
    742		fpsimd_only = false;
    743		break;
    744	default:
    745		WARN_ON_ONCE(1);
    746		return;
    747	}
    748
    749	if (active) {
    750		if (fpsimd_only) {
    751			header->flags |= SVE_PT_REGS_FPSIMD;
    752		} else {
    753			header->flags |= SVE_PT_REGS_SVE;
    754		}
    755	}
    756
    757	header->vl = task_get_vl(target, type);
    758	vq = sve_vq_from_vl(header->vl);
    759
    760	header->max_vl = vec_max_vl(type);
    761	header->size = SVE_PT_SIZE(vq, header->flags);
    762	header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
    763				      SVE_PT_REGS_SVE);
    764}
    765
    766static unsigned int sve_size_from_header(struct user_sve_header const *header)
    767{
    768	return ALIGN(header->size, SVE_VQ_BYTES);
    769}
    770
    771static int sve_get_common(struct task_struct *target,
    772			  const struct user_regset *regset,
    773			  struct membuf to,
    774			  enum vec_type type)
    775{
    776	struct user_sve_header header;
    777	unsigned int vq;
    778	unsigned long start, end;
    779
    780	/* Header */
    781	sve_init_header_from_task(&header, target, type);
    782	vq = sve_vq_from_vl(header.vl);
    783
    784	membuf_write(&to, &header, sizeof(header));
    785
    786	if (target == current)
    787		fpsimd_preserve_current_state();
    788
    789	BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
    790	BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
    791
    792	switch ((header.flags & SVE_PT_REGS_MASK)) {
    793	case SVE_PT_REGS_FPSIMD:
    794		return __fpr_get(target, regset, to);
    795
    796	case SVE_PT_REGS_SVE:
    797		start = SVE_PT_SVE_OFFSET;
    798		end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
    799		membuf_write(&to, target->thread.sve_state, end - start);
    800
    801		start = end;
    802		end = SVE_PT_SVE_FPSR_OFFSET(vq);
    803		membuf_zero(&to, end - start);
    804
    805		/*
    806		 * Copy fpsr, and fpcr which must follow contiguously in
    807		 * struct fpsimd_state:
    808		 */
    809		start = end;
    810		end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
    811		membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
    812			     end - start);
    813
    814		start = end;
    815		end = sve_size_from_header(&header);
    816		return membuf_zero(&to, end - start);
    817
    818	default:
    819		return 0;
    820	}
    821}
    822
    823static int sve_get(struct task_struct *target,
    824		   const struct user_regset *regset,
    825		   struct membuf to)
    826{
    827	if (!system_supports_sve())
    828		return -EINVAL;
    829
    830	return sve_get_common(target, regset, to, ARM64_VEC_SVE);
    831}
    832
    833static int sve_set_common(struct task_struct *target,
    834			  const struct user_regset *regset,
    835			  unsigned int pos, unsigned int count,
    836			  const void *kbuf, const void __user *ubuf,
    837			  enum vec_type type)
    838{
    839	int ret;
    840	struct user_sve_header header;
    841	unsigned int vq;
    842	unsigned long start, end;
    843
    844	/* Header */
    845	if (count < sizeof(header))
    846		return -EINVAL;
    847	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
    848				 0, sizeof(header));
    849	if (ret)
    850		goto out;
    851
    852	/*
    853	 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
    854	 * vec_set_vector_length(), which will also validate them for us:
    855	 */
    856	ret = vec_set_vector_length(target, type, header.vl,
    857		((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
    858	if (ret)
    859		goto out;
    860
    861	/* Actual VL set may be less than the user asked for: */
    862	vq = sve_vq_from_vl(task_get_vl(target, type));
    863
    864	/* Enter/exit streaming mode */
    865	if (system_supports_sme()) {
    866		u64 old_svcr = target->thread.svcr;
    867
    868		switch (type) {
    869		case ARM64_VEC_SVE:
    870			target->thread.svcr &= ~SVCR_SM_MASK;
    871			break;
    872		case ARM64_VEC_SME:
    873			target->thread.svcr |= SVCR_SM_MASK;
    874			break;
    875		default:
    876			WARN_ON_ONCE(1);
    877			return -EINVAL;
    878		}
    879
    880		/*
    881		 * If we switched then invalidate any existing SVE
    882		 * state and ensure there's storage.
    883		 */
    884		if (target->thread.svcr != old_svcr)
    885			sve_alloc(target);
    886	}
    887
    888	/* Registers: FPSIMD-only case */
    889
    890	BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
    891	if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
    892		ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
    893				SVE_PT_FPSIMD_OFFSET);
    894		clear_tsk_thread_flag(target, TIF_SVE);
    895		if (type == ARM64_VEC_SME)
    896			fpsimd_force_sync_to_sve(target);
    897		goto out;
    898	}
    899
    900	/*
    901	 * Otherwise: no registers or full SVE case.  For backwards
    902	 * compatibility reasons we treat empty flags as SVE registers.
    903	 */
    904
    905	/*
    906	 * If setting a different VL from the requested VL and there is
    907	 * register data, the data layout will be wrong: don't even
    908	 * try to set the registers in this case.
    909	 */
    910	if (count && vq != sve_vq_from_vl(header.vl)) {
    911		ret = -EIO;
    912		goto out;
    913	}
    914
    915	sve_alloc(target);
    916	if (!target->thread.sve_state) {
    917		ret = -ENOMEM;
    918		clear_tsk_thread_flag(target, TIF_SVE);
    919		goto out;
    920	}
    921
    922	/*
    923	 * Ensure target->thread.sve_state is up to date with target's
    924	 * FPSIMD regs, so that a short copyin leaves trailing
    925	 * registers unmodified.  Always enable SVE even if going into
    926	 * streaming mode.
    927	 */
    928	fpsimd_sync_to_sve(target);
    929	set_tsk_thread_flag(target, TIF_SVE);
    930
    931	BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
    932	start = SVE_PT_SVE_OFFSET;
    933	end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
    934	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
    935				 target->thread.sve_state,
    936				 start, end);
    937	if (ret)
    938		goto out;
    939
    940	start = end;
    941	end = SVE_PT_SVE_FPSR_OFFSET(vq);
    942	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
    943					start, end);
    944	if (ret)
    945		goto out;
    946
    947	/*
    948	 * Copy fpsr, and fpcr which must follow contiguously in
    949	 * struct fpsimd_state:
    950	 */
    951	start = end;
    952	end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
    953	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
    954				 &target->thread.uw.fpsimd_state.fpsr,
    955				 start, end);
    956
    957out:
    958	fpsimd_flush_task_state(target);
    959	return ret;
    960}
    961
    962static int sve_set(struct task_struct *target,
    963		   const struct user_regset *regset,
    964		   unsigned int pos, unsigned int count,
    965		   const void *kbuf, const void __user *ubuf)
    966{
    967	if (!system_supports_sve())
    968		return -EINVAL;
    969
    970	return sve_set_common(target, regset, pos, count, kbuf, ubuf,
    971			      ARM64_VEC_SVE);
    972}
    973
    974#endif /* CONFIG_ARM64_SVE */
    975
    976#ifdef CONFIG_ARM64_SME
    977
    978static int ssve_get(struct task_struct *target,
    979		   const struct user_regset *regset,
    980		   struct membuf to)
    981{
    982	if (!system_supports_sme())
    983		return -EINVAL;
    984
    985	return sve_get_common(target, regset, to, ARM64_VEC_SME);
    986}
    987
    988static int ssve_set(struct task_struct *target,
    989		    const struct user_regset *regset,
    990		    unsigned int pos, unsigned int count,
    991		    const void *kbuf, const void __user *ubuf)
    992{
    993	if (!system_supports_sme())
    994		return -EINVAL;
    995
    996	return sve_set_common(target, regset, pos, count, kbuf, ubuf,
    997			      ARM64_VEC_SME);
    998}
    999
   1000static int za_get(struct task_struct *target,
   1001		  const struct user_regset *regset,
   1002		  struct membuf to)
   1003{
   1004	struct user_za_header header;
   1005	unsigned int vq;
   1006	unsigned long start, end;
   1007
   1008	if (!system_supports_sme())
   1009		return -EINVAL;
   1010
   1011	/* Header */
   1012	memset(&header, 0, sizeof(header));
   1013
   1014	if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
   1015		header.flags |= ZA_PT_VL_INHERIT;
   1016
   1017	header.vl = task_get_sme_vl(target);
   1018	vq = sve_vq_from_vl(header.vl);
   1019	header.max_vl = sme_max_vl();
   1020	header.max_size = ZA_PT_SIZE(vq);
   1021
   1022	/* If ZA is not active there is only the header */
   1023	if (thread_za_enabled(&target->thread))
   1024		header.size = ZA_PT_SIZE(vq);
   1025	else
   1026		header.size = ZA_PT_ZA_OFFSET;
   1027
   1028	membuf_write(&to, &header, sizeof(header));
   1029
   1030	BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
   1031	end = ZA_PT_ZA_OFFSET;
   1032
   1033	if (target == current)
   1034		fpsimd_preserve_current_state();
   1035
   1036	/* Any register data to include? */
   1037	if (thread_za_enabled(&target->thread)) {
   1038		start = end;
   1039		end = ZA_PT_SIZE(vq);
   1040		membuf_write(&to, target->thread.za_state, end - start);
   1041	}
   1042
   1043	/* Zero any trailing padding */
   1044	start = end;
   1045	end = ALIGN(header.size, SVE_VQ_BYTES);
   1046	return membuf_zero(&to, end - start);
   1047}
   1048
   1049static int za_set(struct task_struct *target,
   1050		  const struct user_regset *regset,
   1051		  unsigned int pos, unsigned int count,
   1052		  const void *kbuf, const void __user *ubuf)
   1053{
   1054	int ret;
   1055	struct user_za_header header;
   1056	unsigned int vq;
   1057	unsigned long start, end;
   1058
   1059	if (!system_supports_sme())
   1060		return -EINVAL;
   1061
   1062	/* Header */
   1063	if (count < sizeof(header))
   1064		return -EINVAL;
   1065	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
   1066				 0, sizeof(header));
   1067	if (ret)
   1068		goto out;
   1069
   1070	/*
   1071	 * All current ZA_PT_* flags are consumed by
   1072	 * vec_set_vector_length(), which will also validate them for
   1073	 * us:
   1074	 */
   1075	ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
   1076		((unsigned long)header.flags) << 16);
   1077	if (ret)
   1078		goto out;
   1079
   1080	/* Actual VL set may be less than the user asked for: */
   1081	vq = sve_vq_from_vl(task_get_sme_vl(target));
   1082
   1083	/* Ensure there is some SVE storage for streaming mode */
   1084	if (!target->thread.sve_state) {
   1085		sve_alloc(target);
   1086		if (!target->thread.sve_state) {
   1087			clear_thread_flag(TIF_SME);
   1088			ret = -ENOMEM;
   1089			goto out;
   1090		}
   1091	}
   1092
   1093	/* Allocate/reinit ZA storage */
   1094	sme_alloc(target);
   1095	if (!target->thread.za_state) {
   1096		ret = -ENOMEM;
   1097		clear_tsk_thread_flag(target, TIF_SME);
   1098		goto out;
   1099	}
   1100
   1101	/* If there is no data then disable ZA */
   1102	if (!count) {
   1103		target->thread.svcr &= ~SVCR_ZA_MASK;
   1104		goto out;
   1105	}
   1106
   1107	/*
   1108	 * If setting a different VL from the requested VL and there is
   1109	 * register data, the data layout will be wrong: don't even
   1110	 * try to set the registers in this case.
   1111	 */
   1112	if (vq != sve_vq_from_vl(header.vl)) {
   1113		ret = -EIO;
   1114		goto out;
   1115	}
   1116
   1117	BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
   1118	start = ZA_PT_ZA_OFFSET;
   1119	end = ZA_PT_SIZE(vq);
   1120	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
   1121				 target->thread.za_state,
   1122				 start, end);
   1123	if (ret)
   1124		goto out;
   1125
   1126	/* Mark ZA as active and let userspace use it */
   1127	set_tsk_thread_flag(target, TIF_SME);
   1128	target->thread.svcr |= SVCR_ZA_MASK;
   1129
   1130out:
   1131	fpsimd_flush_task_state(target);
   1132	return ret;
   1133}
   1134
   1135#endif /* CONFIG_ARM64_SME */
   1136
   1137#ifdef CONFIG_ARM64_PTR_AUTH
   1138static int pac_mask_get(struct task_struct *target,
   1139			const struct user_regset *regset,
   1140			struct membuf to)
   1141{
   1142	/*
   1143	 * The PAC bits can differ across data and instruction pointers
   1144	 * depending on TCR_EL1.TBID*, which we may make use of in future, so
   1145	 * we expose separate masks.
   1146	 */
   1147	unsigned long mask = ptrauth_user_pac_mask();
   1148	struct user_pac_mask uregs = {
   1149		.data_mask = mask,
   1150		.insn_mask = mask,
   1151	};
   1152
   1153	if (!system_supports_address_auth())
   1154		return -EINVAL;
   1155
   1156	return membuf_write(&to, &uregs, sizeof(uregs));
   1157}
   1158
   1159static int pac_enabled_keys_get(struct task_struct *target,
   1160				const struct user_regset *regset,
   1161				struct membuf to)
   1162{
   1163	long enabled_keys = ptrauth_get_enabled_keys(target);
   1164
   1165	if (IS_ERR_VALUE(enabled_keys))
   1166		return enabled_keys;
   1167
   1168	return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
   1169}
   1170
   1171static int pac_enabled_keys_set(struct task_struct *target,
   1172				const struct user_regset *regset,
   1173				unsigned int pos, unsigned int count,
   1174				const void *kbuf, const void __user *ubuf)
   1175{
   1176	int ret;
   1177	long enabled_keys = ptrauth_get_enabled_keys(target);
   1178
   1179	if (IS_ERR_VALUE(enabled_keys))
   1180		return enabled_keys;
   1181
   1182	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
   1183				 sizeof(long));
   1184	if (ret)
   1185		return ret;
   1186
   1187	return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
   1188					enabled_keys);
   1189}
   1190
   1191#ifdef CONFIG_CHECKPOINT_RESTORE
   1192static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
   1193{
   1194	return (__uint128_t)key->hi << 64 | key->lo;
   1195}
   1196
   1197static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
   1198{
   1199	struct ptrauth_key key = {
   1200		.lo = (unsigned long)ukey,
   1201		.hi = (unsigned long)(ukey >> 64),
   1202	};
   1203
   1204	return key;
   1205}
   1206
   1207static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
   1208				     const struct ptrauth_keys_user *keys)
   1209{
   1210	ukeys->apiakey = pac_key_to_user(&keys->apia);
   1211	ukeys->apibkey = pac_key_to_user(&keys->apib);
   1212	ukeys->apdakey = pac_key_to_user(&keys->apda);
   1213	ukeys->apdbkey = pac_key_to_user(&keys->apdb);
   1214}
   1215
   1216static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
   1217				       const struct user_pac_address_keys *ukeys)
   1218{
   1219	keys->apia = pac_key_from_user(ukeys->apiakey);
   1220	keys->apib = pac_key_from_user(ukeys->apibkey);
   1221	keys->apda = pac_key_from_user(ukeys->apdakey);
   1222	keys->apdb = pac_key_from_user(ukeys->apdbkey);
   1223}
   1224
   1225static int pac_address_keys_get(struct task_struct *target,
   1226				const struct user_regset *regset,
   1227				struct membuf to)
   1228{
   1229	struct ptrauth_keys_user *keys = &target->thread.keys_user;
   1230	struct user_pac_address_keys user_keys;
   1231
   1232	if (!system_supports_address_auth())
   1233		return -EINVAL;
   1234
   1235	pac_address_keys_to_user(&user_keys, keys);
   1236
   1237	return membuf_write(&to, &user_keys, sizeof(user_keys));
   1238}
   1239
   1240static int pac_address_keys_set(struct task_struct *target,
   1241				const struct user_regset *regset,
   1242				unsigned int pos, unsigned int count,
   1243				const void *kbuf, const void __user *ubuf)
   1244{
   1245	struct ptrauth_keys_user *keys = &target->thread.keys_user;
   1246	struct user_pac_address_keys user_keys;
   1247	int ret;
   1248
   1249	if (!system_supports_address_auth())
   1250		return -EINVAL;
   1251
   1252	pac_address_keys_to_user(&user_keys, keys);
   1253	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
   1254				 &user_keys, 0, -1);
   1255	if (ret)
   1256		return ret;
   1257	pac_address_keys_from_user(keys, &user_keys);
   1258
   1259	return 0;
   1260}
   1261
   1262static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
   1263				     const struct ptrauth_keys_user *keys)
   1264{
   1265	ukeys->apgakey = pac_key_to_user(&keys->apga);
   1266}
   1267
   1268static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
   1269				       const struct user_pac_generic_keys *ukeys)
   1270{
   1271	keys->apga = pac_key_from_user(ukeys->apgakey);
   1272}
   1273
   1274static int pac_generic_keys_get(struct task_struct *target,
   1275				const struct user_regset *regset,
   1276				struct membuf to)
   1277{
   1278	struct ptrauth_keys_user *keys = &target->thread.keys_user;
   1279	struct user_pac_generic_keys user_keys;
   1280
   1281	if (!system_supports_generic_auth())
   1282		return -EINVAL;
   1283
   1284	pac_generic_keys_to_user(&user_keys, keys);
   1285
   1286	return membuf_write(&to, &user_keys, sizeof(user_keys));
   1287}
   1288
   1289static int pac_generic_keys_set(struct task_struct *target,
   1290				const struct user_regset *regset,
   1291				unsigned int pos, unsigned int count,
   1292				const void *kbuf, const void __user *ubuf)
   1293{
   1294	struct ptrauth_keys_user *keys = &target->thread.keys_user;
   1295	struct user_pac_generic_keys user_keys;
   1296	int ret;
   1297
   1298	if (!system_supports_generic_auth())
   1299		return -EINVAL;
   1300
   1301	pac_generic_keys_to_user(&user_keys, keys);
   1302	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
   1303				 &user_keys, 0, -1);
   1304	if (ret)
   1305		return ret;
   1306	pac_generic_keys_from_user(keys, &user_keys);
   1307
   1308	return 0;
   1309}
   1310#endif /* CONFIG_CHECKPOINT_RESTORE */
   1311#endif /* CONFIG_ARM64_PTR_AUTH */
   1312
   1313#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
   1314static int tagged_addr_ctrl_get(struct task_struct *target,
   1315				const struct user_regset *regset,
   1316				struct membuf to)
   1317{
   1318	long ctrl = get_tagged_addr_ctrl(target);
   1319
   1320	if (IS_ERR_VALUE(ctrl))
   1321		return ctrl;
   1322
   1323	return membuf_write(&to, &ctrl, sizeof(ctrl));
   1324}
   1325
   1326static int tagged_addr_ctrl_set(struct task_struct *target, const struct
   1327				user_regset *regset, unsigned int pos,
   1328				unsigned int count, const void *kbuf, const
   1329				void __user *ubuf)
   1330{
   1331	int ret;
   1332	long ctrl;
   1333
   1334	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
   1335	if (ret)
   1336		return ret;
   1337
   1338	return set_tagged_addr_ctrl(target, ctrl);
   1339}
   1340#endif
   1341
   1342enum aarch64_regset {
   1343	REGSET_GPR,
   1344	REGSET_FPR,
   1345	REGSET_TLS,
   1346#ifdef CONFIG_HAVE_HW_BREAKPOINT
   1347	REGSET_HW_BREAK,
   1348	REGSET_HW_WATCH,
   1349#endif
   1350	REGSET_SYSTEM_CALL,
   1351#ifdef CONFIG_ARM64_SVE
   1352	REGSET_SVE,
   1353#endif
   1354#ifdef CONFIG_ARM64_SVE
   1355	REGSET_SSVE,
   1356	REGSET_ZA,
   1357#endif
   1358#ifdef CONFIG_ARM64_PTR_AUTH
   1359	REGSET_PAC_MASK,
   1360	REGSET_PAC_ENABLED_KEYS,
   1361#ifdef CONFIG_CHECKPOINT_RESTORE
   1362	REGSET_PACA_KEYS,
   1363	REGSET_PACG_KEYS,
   1364#endif
   1365#endif
   1366#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
   1367	REGSET_TAGGED_ADDR_CTRL,
   1368#endif
   1369};
   1370
   1371static const struct user_regset aarch64_regsets[] = {
   1372	[REGSET_GPR] = {
   1373		.core_note_type = NT_PRSTATUS,
   1374		.n = sizeof(struct user_pt_regs) / sizeof(u64),
   1375		.size = sizeof(u64),
   1376		.align = sizeof(u64),
   1377		.regset_get = gpr_get,
   1378		.set = gpr_set
   1379	},
   1380	[REGSET_FPR] = {
   1381		.core_note_type = NT_PRFPREG,
   1382		.n = sizeof(struct user_fpsimd_state) / sizeof(u32),
   1383		/*
   1384		 * We pretend we have 32-bit registers because the fpsr and
   1385		 * fpcr are 32-bits wide.
   1386		 */
   1387		.size = sizeof(u32),
   1388		.align = sizeof(u32),
   1389		.active = fpr_active,
   1390		.regset_get = fpr_get,
   1391		.set = fpr_set
   1392	},
   1393	[REGSET_TLS] = {
   1394		.core_note_type = NT_ARM_TLS,
   1395		.n = 1,
   1396		.size = sizeof(void *),
   1397		.align = sizeof(void *),
   1398		.regset_get = tls_get,
   1399		.set = tls_set,
   1400	},
   1401#ifdef CONFIG_HAVE_HW_BREAKPOINT
   1402	[REGSET_HW_BREAK] = {
   1403		.core_note_type = NT_ARM_HW_BREAK,
   1404		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
   1405		.size = sizeof(u32),
   1406		.align = sizeof(u32),
   1407		.regset_get = hw_break_get,
   1408		.set = hw_break_set,
   1409	},
   1410	[REGSET_HW_WATCH] = {
   1411		.core_note_type = NT_ARM_HW_WATCH,
   1412		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
   1413		.size = sizeof(u32),
   1414		.align = sizeof(u32),
   1415		.regset_get = hw_break_get,
   1416		.set = hw_break_set,
   1417	},
   1418#endif
   1419	[REGSET_SYSTEM_CALL] = {
   1420		.core_note_type = NT_ARM_SYSTEM_CALL,
   1421		.n = 1,
   1422		.size = sizeof(int),
   1423		.align = sizeof(int),
   1424		.regset_get = system_call_get,
   1425		.set = system_call_set,
   1426	},
   1427#ifdef CONFIG_ARM64_SVE
   1428	[REGSET_SVE] = { /* Scalable Vector Extension */
   1429		.core_note_type = NT_ARM_SVE,
   1430		.n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
   1431				  SVE_VQ_BYTES),
   1432		.size = SVE_VQ_BYTES,
   1433		.align = SVE_VQ_BYTES,
   1434		.regset_get = sve_get,
   1435		.set = sve_set,
   1436	},
   1437#endif
   1438#ifdef CONFIG_ARM64_SME
   1439	[REGSET_SSVE] = { /* Streaming mode SVE */
   1440		.core_note_type = NT_ARM_SSVE,
   1441		.n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE),
   1442				  SVE_VQ_BYTES),
   1443		.size = SVE_VQ_BYTES,
   1444		.align = SVE_VQ_BYTES,
   1445		.regset_get = ssve_get,
   1446		.set = ssve_set,
   1447	},
   1448	[REGSET_ZA] = { /* SME ZA */
   1449		.core_note_type = NT_ARM_ZA,
   1450		/*
   1451		 * ZA is a single register but it's variably sized and
   1452		 * the ptrace core requires that the size of any data
   1453		 * be an exact multiple of the configured register
   1454		 * size so report as though we had SVE_VQ_BYTES
   1455		 * registers. These values aren't exposed to
   1456		 * userspace.
   1457		 */
   1458		.n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES),
   1459		.size = SVE_VQ_BYTES,
   1460		.align = SVE_VQ_BYTES,
   1461		.regset_get = za_get,
   1462		.set = za_set,
   1463	},
   1464#endif
   1465#ifdef CONFIG_ARM64_PTR_AUTH
   1466	[REGSET_PAC_MASK] = {
   1467		.core_note_type = NT_ARM_PAC_MASK,
   1468		.n = sizeof(struct user_pac_mask) / sizeof(u64),
   1469		.size = sizeof(u64),
   1470		.align = sizeof(u64),
   1471		.regset_get = pac_mask_get,
   1472		/* this cannot be set dynamically */
   1473	},
   1474	[REGSET_PAC_ENABLED_KEYS] = {
   1475		.core_note_type = NT_ARM_PAC_ENABLED_KEYS,
   1476		.n = 1,
   1477		.size = sizeof(long),
   1478		.align = sizeof(long),
   1479		.regset_get = pac_enabled_keys_get,
   1480		.set = pac_enabled_keys_set,
   1481	},
   1482#ifdef CONFIG_CHECKPOINT_RESTORE
   1483	[REGSET_PACA_KEYS] = {
   1484		.core_note_type = NT_ARM_PACA_KEYS,
   1485		.n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
   1486		.size = sizeof(__uint128_t),
   1487		.align = sizeof(__uint128_t),
   1488		.regset_get = pac_address_keys_get,
   1489		.set = pac_address_keys_set,
   1490	},
   1491	[REGSET_PACG_KEYS] = {
   1492		.core_note_type = NT_ARM_PACG_KEYS,
   1493		.n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
   1494		.size = sizeof(__uint128_t),
   1495		.align = sizeof(__uint128_t),
   1496		.regset_get = pac_generic_keys_get,
   1497		.set = pac_generic_keys_set,
   1498	},
   1499#endif
   1500#endif
   1501#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
   1502	[REGSET_TAGGED_ADDR_CTRL] = {
   1503		.core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
   1504		.n = 1,
   1505		.size = sizeof(long),
   1506		.align = sizeof(long),
   1507		.regset_get = tagged_addr_ctrl_get,
   1508		.set = tagged_addr_ctrl_set,
   1509	},
   1510#endif
   1511};
   1512
   1513static const struct user_regset_view user_aarch64_view = {
   1514	.name = "aarch64", .e_machine = EM_AARCH64,
   1515	.regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
   1516};
   1517
   1518#ifdef CONFIG_COMPAT
   1519enum compat_regset {
   1520	REGSET_COMPAT_GPR,
   1521	REGSET_COMPAT_VFP,
   1522};
   1523
   1524static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
   1525{
   1526	struct pt_regs *regs = task_pt_regs(task);
   1527
   1528	switch (idx) {
   1529	case 15:
   1530		return regs->pc;
   1531	case 16:
   1532		return pstate_to_compat_psr(regs->pstate);
   1533	case 17:
   1534		return regs->orig_x0;
   1535	default:
   1536		return regs->regs[idx];
   1537	}
   1538}
   1539
   1540static int compat_gpr_get(struct task_struct *target,
   1541			  const struct user_regset *regset,
   1542			  struct membuf to)
   1543{
   1544	int i = 0;
   1545
   1546	while (to.left)
   1547		membuf_store(&to, compat_get_user_reg(target, i++));
   1548	return 0;
   1549}
   1550
   1551static int compat_gpr_set(struct task_struct *target,
   1552			  const struct user_regset *regset,
   1553			  unsigned int pos, unsigned int count,
   1554			  const void *kbuf, const void __user *ubuf)
   1555{
   1556	struct pt_regs newregs;
   1557	int ret = 0;
   1558	unsigned int i, start, num_regs;
   1559
   1560	/* Calculate the number of AArch32 registers contained in count */
   1561	num_regs = count / regset->size;
   1562
   1563	/* Convert pos into an register number */
   1564	start = pos / regset->size;
   1565
   1566	if (start + num_regs > regset->n)
   1567		return -EIO;
   1568
   1569	newregs = *task_pt_regs(target);
   1570
   1571	for (i = 0; i < num_regs; ++i) {
   1572		unsigned int idx = start + i;
   1573		compat_ulong_t reg;
   1574
   1575		if (kbuf) {
   1576			memcpy(&reg, kbuf, sizeof(reg));
   1577			kbuf += sizeof(reg);
   1578		} else {
   1579			ret = copy_from_user(&reg, ubuf, sizeof(reg));
   1580			if (ret) {
   1581				ret = -EFAULT;
   1582				break;
   1583			}
   1584
   1585			ubuf += sizeof(reg);
   1586		}
   1587
   1588		switch (idx) {
   1589		case 15:
   1590			newregs.pc = reg;
   1591			break;
   1592		case 16:
   1593			reg = compat_psr_to_pstate(reg);
   1594			newregs.pstate = reg;
   1595			break;
   1596		case 17:
   1597			newregs.orig_x0 = reg;
   1598			break;
   1599		default:
   1600			newregs.regs[idx] = reg;
   1601		}
   1602
   1603	}
   1604
   1605	if (valid_user_regs(&newregs.user_regs, target))
   1606		*task_pt_regs(target) = newregs;
   1607	else
   1608		ret = -EINVAL;
   1609
   1610	return ret;
   1611}
   1612
   1613static int compat_vfp_get(struct task_struct *target,
   1614			  const struct user_regset *regset,
   1615			  struct membuf to)
   1616{
   1617	struct user_fpsimd_state *uregs;
   1618	compat_ulong_t fpscr;
   1619
   1620	if (!system_supports_fpsimd())
   1621		return -EINVAL;
   1622
   1623	uregs = &target->thread.uw.fpsimd_state;
   1624
   1625	if (target == current)
   1626		fpsimd_preserve_current_state();
   1627
   1628	/*
   1629	 * The VFP registers are packed into the fpsimd_state, so they all sit
   1630	 * nicely together for us. We just need to create the fpscr separately.
   1631	 */
   1632	membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
   1633	fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
   1634		(uregs->fpcr & VFP_FPSCR_CTRL_MASK);
   1635	return membuf_store(&to, fpscr);
   1636}
   1637
   1638static int compat_vfp_set(struct task_struct *target,
   1639			  const struct user_regset *regset,
   1640			  unsigned int pos, unsigned int count,
   1641			  const void *kbuf, const void __user *ubuf)
   1642{
   1643	struct user_fpsimd_state *uregs;
   1644	compat_ulong_t fpscr;
   1645	int ret, vregs_end_pos;
   1646
   1647	if (!system_supports_fpsimd())
   1648		return -EINVAL;
   1649
   1650	uregs = &target->thread.uw.fpsimd_state;
   1651
   1652	vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
   1653	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
   1654				 vregs_end_pos);
   1655
   1656	if (count && !ret) {
   1657		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
   1658					 vregs_end_pos, VFP_STATE_SIZE);
   1659		if (!ret) {
   1660			uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
   1661			uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
   1662		}
   1663	}
   1664
   1665	fpsimd_flush_task_state(target);
   1666	return ret;
   1667}
   1668
   1669static int compat_tls_get(struct task_struct *target,
   1670			  const struct user_regset *regset,
   1671			  struct membuf to)
   1672{
   1673	return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
   1674}
   1675
   1676static int compat_tls_set(struct task_struct *target,
   1677			  const struct user_regset *regset, unsigned int pos,
   1678			  unsigned int count, const void *kbuf,
   1679			  const void __user *ubuf)
   1680{
   1681	int ret;
   1682	compat_ulong_t tls = target->thread.uw.tp_value;
   1683
   1684	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
   1685	if (ret)
   1686		return ret;
   1687
   1688	target->thread.uw.tp_value = tls;
   1689	return ret;
   1690}
   1691
   1692static const struct user_regset aarch32_regsets[] = {
   1693	[REGSET_COMPAT_GPR] = {
   1694		.core_note_type = NT_PRSTATUS,
   1695		.n = COMPAT_ELF_NGREG,
   1696		.size = sizeof(compat_elf_greg_t),
   1697		.align = sizeof(compat_elf_greg_t),
   1698		.regset_get = compat_gpr_get,
   1699		.set = compat_gpr_set
   1700	},
   1701	[REGSET_COMPAT_VFP] = {
   1702		.core_note_type = NT_ARM_VFP,
   1703		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
   1704		.size = sizeof(compat_ulong_t),
   1705		.align = sizeof(compat_ulong_t),
   1706		.active = fpr_active,
   1707		.regset_get = compat_vfp_get,
   1708		.set = compat_vfp_set
   1709	},
   1710};
   1711
   1712static const struct user_regset_view user_aarch32_view = {
   1713	.name = "aarch32", .e_machine = EM_ARM,
   1714	.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
   1715};
   1716
   1717static const struct user_regset aarch32_ptrace_regsets[] = {
   1718	[REGSET_GPR] = {
   1719		.core_note_type = NT_PRSTATUS,
   1720		.n = COMPAT_ELF_NGREG,
   1721		.size = sizeof(compat_elf_greg_t),
   1722		.align = sizeof(compat_elf_greg_t),
   1723		.regset_get = compat_gpr_get,
   1724		.set = compat_gpr_set
   1725	},
   1726	[REGSET_FPR] = {
   1727		.core_note_type = NT_ARM_VFP,
   1728		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
   1729		.size = sizeof(compat_ulong_t),
   1730		.align = sizeof(compat_ulong_t),
   1731		.regset_get = compat_vfp_get,
   1732		.set = compat_vfp_set
   1733	},
   1734	[REGSET_TLS] = {
   1735		.core_note_type = NT_ARM_TLS,
   1736		.n = 1,
   1737		.size = sizeof(compat_ulong_t),
   1738		.align = sizeof(compat_ulong_t),
   1739		.regset_get = compat_tls_get,
   1740		.set = compat_tls_set,
   1741	},
   1742#ifdef CONFIG_HAVE_HW_BREAKPOINT
   1743	[REGSET_HW_BREAK] = {
   1744		.core_note_type = NT_ARM_HW_BREAK,
   1745		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
   1746		.size = sizeof(u32),
   1747		.align = sizeof(u32),
   1748		.regset_get = hw_break_get,
   1749		.set = hw_break_set,
   1750	},
   1751	[REGSET_HW_WATCH] = {
   1752		.core_note_type = NT_ARM_HW_WATCH,
   1753		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
   1754		.size = sizeof(u32),
   1755		.align = sizeof(u32),
   1756		.regset_get = hw_break_get,
   1757		.set = hw_break_set,
   1758	},
   1759#endif
   1760	[REGSET_SYSTEM_CALL] = {
   1761		.core_note_type = NT_ARM_SYSTEM_CALL,
   1762		.n = 1,
   1763		.size = sizeof(int),
   1764		.align = sizeof(int),
   1765		.regset_get = system_call_get,
   1766		.set = system_call_set,
   1767	},
   1768};
   1769
   1770static const struct user_regset_view user_aarch32_ptrace_view = {
   1771	.name = "aarch32", .e_machine = EM_ARM,
   1772	.regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
   1773};
   1774
   1775static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
   1776				   compat_ulong_t __user *ret)
   1777{
   1778	compat_ulong_t tmp;
   1779
   1780	if (off & 3)
   1781		return -EIO;
   1782
   1783	if (off == COMPAT_PT_TEXT_ADDR)
   1784		tmp = tsk->mm->start_code;
   1785	else if (off == COMPAT_PT_DATA_ADDR)
   1786		tmp = tsk->mm->start_data;
   1787	else if (off == COMPAT_PT_TEXT_END_ADDR)
   1788		tmp = tsk->mm->end_code;
   1789	else if (off < sizeof(compat_elf_gregset_t))
   1790		tmp = compat_get_user_reg(tsk, off >> 2);
   1791	else if (off >= COMPAT_USER_SZ)
   1792		return -EIO;
   1793	else
   1794		tmp = 0;
   1795
   1796	return put_user(tmp, ret);
   1797}
   1798
   1799static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
   1800				    compat_ulong_t val)
   1801{
   1802	struct pt_regs newregs = *task_pt_regs(tsk);
   1803	unsigned int idx = off / 4;
   1804
   1805	if (off & 3 || off >= COMPAT_USER_SZ)
   1806		return -EIO;
   1807
   1808	if (off >= sizeof(compat_elf_gregset_t))
   1809		return 0;
   1810
   1811	switch (idx) {
   1812	case 15:
   1813		newregs.pc = val;
   1814		break;
   1815	case 16:
   1816		newregs.pstate = compat_psr_to_pstate(val);
   1817		break;
   1818	case 17:
   1819		newregs.orig_x0 = val;
   1820		break;
   1821	default:
   1822		newregs.regs[idx] = val;
   1823	}
   1824
   1825	if (!valid_user_regs(&newregs.user_regs, tsk))
   1826		return -EINVAL;
   1827
   1828	*task_pt_regs(tsk) = newregs;
   1829	return 0;
   1830}
   1831
   1832#ifdef CONFIG_HAVE_HW_BREAKPOINT
   1833
   1834/*
   1835 * Convert a virtual register number into an index for a thread_info
   1836 * breakpoint array. Breakpoints are identified using positive numbers
   1837 * whilst watchpoints are negative. The registers are laid out as pairs
   1838 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
   1839 * Register 0 is reserved for describing resource information.
   1840 */
   1841static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
   1842{
   1843	return (abs(num) - 1) >> 1;
   1844}
   1845
   1846static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
   1847{
   1848	u8 num_brps, num_wrps, debug_arch, wp_len;
   1849	u32 reg = 0;
   1850
   1851	num_brps	= hw_breakpoint_slots(TYPE_INST);
   1852	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
   1853
   1854	debug_arch	= debug_monitors_arch();
   1855	wp_len		= 8;
   1856	reg		|= debug_arch;
   1857	reg		<<= 8;
   1858	reg		|= wp_len;
   1859	reg		<<= 8;
   1860	reg		|= num_wrps;
   1861	reg		<<= 8;
   1862	reg		|= num_brps;
   1863
   1864	*kdata = reg;
   1865	return 0;
   1866}
   1867
   1868static int compat_ptrace_hbp_get(unsigned int note_type,
   1869				 struct task_struct *tsk,
   1870				 compat_long_t num,
   1871				 u32 *kdata)
   1872{
   1873	u64 addr = 0;
   1874	u32 ctrl = 0;
   1875
   1876	int err, idx = compat_ptrace_hbp_num_to_idx(num);
   1877
   1878	if (num & 1) {
   1879		err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
   1880		*kdata = (u32)addr;
   1881	} else {
   1882		err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
   1883		*kdata = ctrl;
   1884	}
   1885
   1886	return err;
   1887}
   1888
   1889static int compat_ptrace_hbp_set(unsigned int note_type,
   1890				 struct task_struct *tsk,
   1891				 compat_long_t num,
   1892				 u32 *kdata)
   1893{
   1894	u64 addr;
   1895	u32 ctrl;
   1896
   1897	int err, idx = compat_ptrace_hbp_num_to_idx(num);
   1898
   1899	if (num & 1) {
   1900		addr = *kdata;
   1901		err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
   1902	} else {
   1903		ctrl = *kdata;
   1904		err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
   1905	}
   1906
   1907	return err;
   1908}
   1909
   1910static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
   1911				    compat_ulong_t __user *data)
   1912{
   1913	int ret;
   1914	u32 kdata;
   1915
   1916	/* Watchpoint */
   1917	if (num < 0) {
   1918		ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
   1919	/* Resource info */
   1920	} else if (num == 0) {
   1921		ret = compat_ptrace_hbp_get_resource_info(&kdata);
   1922	/* Breakpoint */
   1923	} else {
   1924		ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
   1925	}
   1926
   1927	if (!ret)
   1928		ret = put_user(kdata, data);
   1929
   1930	return ret;
   1931}
   1932
   1933static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
   1934				    compat_ulong_t __user *data)
   1935{
   1936	int ret;
   1937	u32 kdata = 0;
   1938
   1939	if (num == 0)
   1940		return 0;
   1941
   1942	ret = get_user(kdata, data);
   1943	if (ret)
   1944		return ret;
   1945
   1946	if (num < 0)
   1947		ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
   1948	else
   1949		ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
   1950
   1951	return ret;
   1952}
   1953#endif	/* CONFIG_HAVE_HW_BREAKPOINT */
   1954
   1955long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
   1956			compat_ulong_t caddr, compat_ulong_t cdata)
   1957{
   1958	unsigned long addr = caddr;
   1959	unsigned long data = cdata;
   1960	void __user *datap = compat_ptr(data);
   1961	int ret;
   1962
   1963	switch (request) {
   1964		case PTRACE_PEEKUSR:
   1965			ret = compat_ptrace_read_user(child, addr, datap);
   1966			break;
   1967
   1968		case PTRACE_POKEUSR:
   1969			ret = compat_ptrace_write_user(child, addr, data);
   1970			break;
   1971
   1972		case COMPAT_PTRACE_GETREGS:
   1973			ret = copy_regset_to_user(child,
   1974						  &user_aarch32_view,
   1975						  REGSET_COMPAT_GPR,
   1976						  0, sizeof(compat_elf_gregset_t),
   1977						  datap);
   1978			break;
   1979
   1980		case COMPAT_PTRACE_SETREGS:
   1981			ret = copy_regset_from_user(child,
   1982						    &user_aarch32_view,
   1983						    REGSET_COMPAT_GPR,
   1984						    0, sizeof(compat_elf_gregset_t),
   1985						    datap);
   1986			break;
   1987
   1988		case COMPAT_PTRACE_GET_THREAD_AREA:
   1989			ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
   1990				       (compat_ulong_t __user *)datap);
   1991			break;
   1992
   1993		case COMPAT_PTRACE_SET_SYSCALL:
   1994			task_pt_regs(child)->syscallno = data;
   1995			ret = 0;
   1996			break;
   1997
   1998		case COMPAT_PTRACE_GETVFPREGS:
   1999			ret = copy_regset_to_user(child,
   2000						  &user_aarch32_view,
   2001						  REGSET_COMPAT_VFP,
   2002						  0, VFP_STATE_SIZE,
   2003						  datap);
   2004			break;
   2005
   2006		case COMPAT_PTRACE_SETVFPREGS:
   2007			ret = copy_regset_from_user(child,
   2008						    &user_aarch32_view,
   2009						    REGSET_COMPAT_VFP,
   2010						    0, VFP_STATE_SIZE,
   2011						    datap);
   2012			break;
   2013
   2014#ifdef CONFIG_HAVE_HW_BREAKPOINT
   2015		case COMPAT_PTRACE_GETHBPREGS:
   2016			ret = compat_ptrace_gethbpregs(child, addr, datap);
   2017			break;
   2018
   2019		case COMPAT_PTRACE_SETHBPREGS:
   2020			ret = compat_ptrace_sethbpregs(child, addr, datap);
   2021			break;
   2022#endif
   2023
   2024		default:
   2025			ret = compat_ptrace_request(child, request, addr,
   2026						    data);
   2027			break;
   2028	}
   2029
   2030	return ret;
   2031}
   2032#endif /* CONFIG_COMPAT */
   2033
   2034const struct user_regset_view *task_user_regset_view(struct task_struct *task)
   2035{
   2036#ifdef CONFIG_COMPAT
   2037	/*
   2038	 * Core dumping of 32-bit tasks or compat ptrace requests must use the
   2039	 * user_aarch32_view compatible with arm32. Native ptrace requests on
   2040	 * 32-bit children use an extended user_aarch32_ptrace_view to allow
   2041	 * access to the TLS register.
   2042	 */
   2043	if (is_compat_task())
   2044		return &user_aarch32_view;
   2045	else if (is_compat_thread(task_thread_info(task)))
   2046		return &user_aarch32_ptrace_view;
   2047#endif
   2048	return &user_aarch64_view;
   2049}
   2050
   2051long arch_ptrace(struct task_struct *child, long request,
   2052		 unsigned long addr, unsigned long data)
   2053{
   2054	switch (request) {
   2055	case PTRACE_PEEKMTETAGS:
   2056	case PTRACE_POKEMTETAGS:
   2057		return mte_ptrace_copy_tags(child, request, addr, data);
   2058	}
   2059
   2060	return ptrace_request(child, request, addr, data);
   2061}
   2062
   2063enum ptrace_syscall_dir {
   2064	PTRACE_SYSCALL_ENTER = 0,
   2065	PTRACE_SYSCALL_EXIT,
   2066};
   2067
   2068static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
   2069{
   2070	int regno;
   2071	unsigned long saved_reg;
   2072
   2073	/*
   2074	 * We have some ABI weirdness here in the way that we handle syscall
   2075	 * exit stops because we indicate whether or not the stop has been
   2076	 * signalled from syscall entry or syscall exit by clobbering a general
   2077	 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
   2078	 * and restoring its old value after the stop. This means that:
   2079	 *
   2080	 * - Any writes by the tracer to this register during the stop are
   2081	 *   ignored/discarded.
   2082	 *
   2083	 * - The actual value of the register is not available during the stop,
   2084	 *   so the tracer cannot save it and restore it later.
   2085	 *
   2086	 * - Syscall stops behave differently to seccomp and pseudo-step traps
   2087	 *   (the latter do not nobble any registers).
   2088	 */
   2089	regno = (is_compat_task() ? 12 : 7);
   2090	saved_reg = regs->regs[regno];
   2091	regs->regs[regno] = dir;
   2092
   2093	if (dir == PTRACE_SYSCALL_ENTER) {
   2094		if (ptrace_report_syscall_entry(regs))
   2095			forget_syscall(regs);
   2096		regs->regs[regno] = saved_reg;
   2097	} else if (!test_thread_flag(TIF_SINGLESTEP)) {
   2098		ptrace_report_syscall_exit(regs, 0);
   2099		regs->regs[regno] = saved_reg;
   2100	} else {
   2101		regs->regs[regno] = saved_reg;
   2102
   2103		/*
   2104		 * Signal a pseudo-step exception since we are stepping but
   2105		 * tracer modifications to the registers may have rewound the
   2106		 * state machine.
   2107		 */
   2108		ptrace_report_syscall_exit(regs, 1);
   2109	}
   2110}
   2111
   2112int syscall_trace_enter(struct pt_regs *regs)
   2113{
   2114	unsigned long flags = read_thread_flags();
   2115
   2116	if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
   2117		report_syscall(regs, PTRACE_SYSCALL_ENTER);
   2118		if (flags & _TIF_SYSCALL_EMU)
   2119			return NO_SYSCALL;
   2120	}
   2121
   2122	/* Do the secure computing after ptrace; failures should be fast. */
   2123	if (secure_computing() == -1)
   2124		return NO_SYSCALL;
   2125
   2126	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
   2127		trace_sys_enter(regs, regs->syscallno);
   2128
   2129	audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
   2130			    regs->regs[2], regs->regs[3]);
   2131
   2132	return regs->syscallno;
   2133}
   2134
   2135void syscall_trace_exit(struct pt_regs *regs)
   2136{
   2137	unsigned long flags = read_thread_flags();
   2138
   2139	audit_syscall_exit(regs);
   2140
   2141	if (flags & _TIF_SYSCALL_TRACEPOINT)
   2142		trace_sys_exit(regs, syscall_get_return_value(current, regs));
   2143
   2144	if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
   2145		report_syscall(regs, PTRACE_SYSCALL_EXIT);
   2146
   2147	rseq_syscall(regs);
   2148}
   2149
   2150/*
   2151 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
   2152 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
   2153 * not described in ARM DDI 0487D.a.
   2154 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
   2155 * be allocated an EL0 meaning in future.
   2156 * Userspace cannot use these until they have an architectural meaning.
   2157 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
   2158 * We also reserve IL for the kernel; SS is handled dynamically.
   2159 */
   2160#define SPSR_EL1_AARCH64_RES0_BITS \
   2161	(GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
   2162	 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
   2163#define SPSR_EL1_AARCH32_RES0_BITS \
   2164	(GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
   2165
   2166static int valid_compat_regs(struct user_pt_regs *regs)
   2167{
   2168	regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
   2169
   2170	if (!system_supports_mixed_endian_el0()) {
   2171		if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
   2172			regs->pstate |= PSR_AA32_E_BIT;
   2173		else
   2174			regs->pstate &= ~PSR_AA32_E_BIT;
   2175	}
   2176
   2177	if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
   2178	    (regs->pstate & PSR_AA32_A_BIT) == 0 &&
   2179	    (regs->pstate & PSR_AA32_I_BIT) == 0 &&
   2180	    (regs->pstate & PSR_AA32_F_BIT) == 0) {
   2181		return 1;
   2182	}
   2183
   2184	/*
   2185	 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
   2186	 * arch/arm.
   2187	 */
   2188	regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
   2189			PSR_AA32_C_BIT | PSR_AA32_V_BIT |
   2190			PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
   2191			PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
   2192			PSR_AA32_T_BIT;
   2193	regs->pstate |= PSR_MODE32_BIT;
   2194
   2195	return 0;
   2196}
   2197
   2198static int valid_native_regs(struct user_pt_regs *regs)
   2199{
   2200	regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
   2201
   2202	if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
   2203	    (regs->pstate & PSR_D_BIT) == 0 &&
   2204	    (regs->pstate & PSR_A_BIT) == 0 &&
   2205	    (regs->pstate & PSR_I_BIT) == 0 &&
   2206	    (regs->pstate & PSR_F_BIT) == 0) {
   2207		return 1;
   2208	}
   2209
   2210	/* Force PSR to a valid 64-bit EL0t */
   2211	regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
   2212
   2213	return 0;
   2214}
   2215
   2216/*
   2217 * Are the current registers suitable for user mode? (used to maintain
   2218 * security in signal handlers)
   2219 */
   2220int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
   2221{
   2222	/* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
   2223	user_regs_reset_single_step(regs, task);
   2224
   2225	if (is_compat_thread(task_thread_info(task)))
   2226		return valid_compat_regs(regs);
   2227	else
   2228		return valid_native_regs(regs);
   2229}