cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ptrace.c (37805B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * linux/kernel/ptrace.c
      4 *
      5 * (C) Copyright 1999 Linus Torvalds
      6 *
      7 * Common interfaces for "ptrace()" which we do not want
      8 * to continually duplicate across every architecture.
      9 */
     10
     11#include <linux/capability.h>
     12#include <linux/export.h>
     13#include <linux/sched.h>
     14#include <linux/sched/mm.h>
     15#include <linux/sched/coredump.h>
     16#include <linux/sched/task.h>
     17#include <linux/errno.h>
     18#include <linux/mm.h>
     19#include <linux/highmem.h>
     20#include <linux/pagemap.h>
     21#include <linux/ptrace.h>
     22#include <linux/security.h>
     23#include <linux/signal.h>
     24#include <linux/uio.h>
     25#include <linux/audit.h>
     26#include <linux/pid_namespace.h>
     27#include <linux/syscalls.h>
     28#include <linux/uaccess.h>
     29#include <linux/regset.h>
     30#include <linux/hw_breakpoint.h>
     31#include <linux/cn_proc.h>
     32#include <linux/compat.h>
     33#include <linux/sched/signal.h>
     34#include <linux/minmax.h>
     35
     36#include <asm/syscall.h>	/* for syscall_get_* */
     37
     38/*
     39 * Access another process' address space via ptrace.
     40 * Source/target buffer must be kernel space,
     41 * Do not walk the page table directly, use get_user_pages
     42 */
     43int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
     44		     void *buf, int len, unsigned int gup_flags)
     45{
     46	struct mm_struct *mm;
     47	int ret;
     48
     49	mm = get_task_mm(tsk);
     50	if (!mm)
     51		return 0;
     52
     53	if (!tsk->ptrace ||
     54	    (current != tsk->parent) ||
     55	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
     56	     !ptracer_capable(tsk, mm->user_ns))) {
     57		mmput(mm);
     58		return 0;
     59	}
     60
     61	ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
     62	mmput(mm);
     63
     64	return ret;
     65}
     66
     67
     68void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
     69		   const struct cred *ptracer_cred)
     70{
     71	BUG_ON(!list_empty(&child->ptrace_entry));
     72	list_add(&child->ptrace_entry, &new_parent->ptraced);
     73	child->parent = new_parent;
     74	child->ptracer_cred = get_cred(ptracer_cred);
     75}
     76
     77/*
     78 * ptrace a task: make the debugger its new parent and
     79 * move it to the ptrace list.
     80 *
     81 * Must be called with the tasklist lock write-held.
     82 */
     83static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
     84{
     85	__ptrace_link(child, new_parent, current_cred());
     86}
     87
     88/**
     89 * __ptrace_unlink - unlink ptracee and restore its execution state
     90 * @child: ptracee to be unlinked
     91 *
     92 * Remove @child from the ptrace list, move it back to the original parent,
     93 * and restore the execution state so that it conforms to the group stop
     94 * state.
     95 *
     96 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
     97 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
     98 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
     99 * If the ptracer is exiting, the ptracee can be in any state.
    100 *
    101 * After detach, the ptracee should be in a state which conforms to the
    102 * group stop.  If the group is stopped or in the process of stopping, the
    103 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
    104 * up from TASK_TRACED.
    105 *
    106 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
    107 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
    108 * to but in the opposite direction of what happens while attaching to a
    109 * stopped task.  However, in this direction, the intermediate RUNNING
    110 * state is not hidden even from the current ptracer and if it immediately
    111 * re-attaches and performs a WNOHANG wait(2), it may fail.
    112 *
    113 * CONTEXT:
    114 * write_lock_irq(tasklist_lock)
    115 */
    116void __ptrace_unlink(struct task_struct *child)
    117{
    118	const struct cred *old_cred;
    119	BUG_ON(!child->ptrace);
    120
    121	clear_task_syscall_work(child, SYSCALL_TRACE);
    122#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
    123	clear_task_syscall_work(child, SYSCALL_EMU);
    124#endif
    125
    126	child->parent = child->real_parent;
    127	list_del_init(&child->ptrace_entry);
    128	old_cred = child->ptracer_cred;
    129	child->ptracer_cred = NULL;
    130	put_cred(old_cred);
    131
    132	spin_lock(&child->sighand->siglock);
    133	child->ptrace = 0;
    134	/*
    135	 * Clear all pending traps and TRAPPING.  TRAPPING should be
    136	 * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
    137	 */
    138	task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
    139	task_clear_jobctl_trapping(child);
    140
    141	/*
    142	 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
    143	 * @child isn't dead.
    144	 */
    145	if (!(child->flags & PF_EXITING) &&
    146	    (child->signal->flags & SIGNAL_STOP_STOPPED ||
    147	     child->signal->group_stop_count)) {
    148		child->jobctl |= JOBCTL_STOP_PENDING;
    149
    150		/*
    151		 * This is only possible if this thread was cloned by the
    152		 * traced task running in the stopped group, set the signal
    153		 * for the future reports.
    154		 * FIXME: we should change ptrace_init_task() to handle this
    155		 * case.
    156		 */
    157		if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
    158			child->jobctl |= SIGSTOP;
    159	}
    160
    161	/*
    162	 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
    163	 * @child in the butt.  Note that @resume should be used iff @child
    164	 * is in TASK_TRACED; otherwise, we might unduly disrupt
    165	 * TASK_KILLABLE sleeps.
    166	 */
    167	if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
    168		ptrace_signal_wake_up(child, true);
    169
    170	spin_unlock(&child->sighand->siglock);
    171}
    172
    173static bool looks_like_a_spurious_pid(struct task_struct *task)
    174{
    175	if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
    176		return false;
    177
    178	if (task_pid_vnr(task) == task->ptrace_message)
    179		return false;
    180	/*
    181	 * The tracee changed its pid but the PTRACE_EVENT_EXEC event
    182	 * was not wait()'ed, most probably debugger targets the old
    183	 * leader which was destroyed in de_thread().
    184	 */
    185	return true;
    186}
    187
    188/*
    189 * Ensure that nothing can wake it up, even SIGKILL
    190 *
    191 * A task is switched to this state while a ptrace operation is in progress;
    192 * such that the ptrace operation is uninterruptible.
    193 */
    194static bool ptrace_freeze_traced(struct task_struct *task)
    195{
    196	bool ret = false;
    197
    198	/* Lockless, nobody but us can set this flag */
    199	if (task->jobctl & JOBCTL_LISTENING)
    200		return ret;
    201
    202	spin_lock_irq(&task->sighand->siglock);
    203	if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
    204	    !__fatal_signal_pending(task)) {
    205		task->jobctl |= JOBCTL_PTRACE_FROZEN;
    206		ret = true;
    207	}
    208	spin_unlock_irq(&task->sighand->siglock);
    209
    210	return ret;
    211}
    212
    213static void ptrace_unfreeze_traced(struct task_struct *task)
    214{
    215	unsigned long flags;
    216
    217	/*
    218	 * The child may be awake and may have cleared
    219	 * JOBCTL_PTRACE_FROZEN (see ptrace_resume).  The child will
    220	 * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
    221	 */
    222	if (lock_task_sighand(task, &flags)) {
    223		task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
    224		if (__fatal_signal_pending(task)) {
    225			task->jobctl &= ~JOBCTL_TRACED;
    226			wake_up_state(task, __TASK_TRACED);
    227		}
    228		unlock_task_sighand(task, &flags);
    229	}
    230}
    231
    232/**
    233 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
    234 * @child: ptracee to check for
    235 * @ignore_state: don't check whether @child is currently %TASK_TRACED
    236 *
    237 * Check whether @child is being ptraced by %current and ready for further
    238 * ptrace operations.  If @ignore_state is %false, @child also should be in
    239 * %TASK_TRACED state and on return the child is guaranteed to be traced
    240 * and not executing.  If @ignore_state is %true, @child can be in any
    241 * state.
    242 *
    243 * CONTEXT:
    244 * Grabs and releases tasklist_lock and @child->sighand->siglock.
    245 *
    246 * RETURNS:
    247 * 0 on success, -ESRCH if %child is not ready.
    248 */
    249static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
    250{
    251	int ret = -ESRCH;
    252
    253	/*
    254	 * We take the read lock around doing both checks to close a
    255	 * possible race where someone else was tracing our child and
    256	 * detached between these two checks.  After this locked check,
    257	 * we are sure that this is our traced child and that can only
    258	 * be changed by us so it's not changing right after this.
    259	 */
    260	read_lock(&tasklist_lock);
    261	if (child->ptrace && child->parent == current) {
    262		/*
    263		 * child->sighand can't be NULL, release_task()
    264		 * does ptrace_unlink() before __exit_signal().
    265		 */
    266		if (ignore_state || ptrace_freeze_traced(child))
    267			ret = 0;
    268	}
    269	read_unlock(&tasklist_lock);
    270
    271	if (!ret && !ignore_state &&
    272	    WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED)))
    273		ret = -ESRCH;
    274
    275	return ret;
    276}
    277
    278static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
    279{
    280	if (mode & PTRACE_MODE_NOAUDIT)
    281		return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
    282	return ns_capable(ns, CAP_SYS_PTRACE);
    283}
    284
    285/* Returns 0 on success, -errno on denial. */
    286static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
    287{
    288	const struct cred *cred = current_cred(), *tcred;
    289	struct mm_struct *mm;
    290	kuid_t caller_uid;
    291	kgid_t caller_gid;
    292
    293	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
    294		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
    295		return -EPERM;
    296	}
    297
    298	/* May we inspect the given task?
    299	 * This check is used both for attaching with ptrace
    300	 * and for allowing access to sensitive information in /proc.
    301	 *
    302	 * ptrace_attach denies several cases that /proc allows
    303	 * because setting up the necessary parent/child relationship
    304	 * or halting the specified task is impossible.
    305	 */
    306
    307	/* Don't let security modules deny introspection */
    308	if (same_thread_group(task, current))
    309		return 0;
    310	rcu_read_lock();
    311	if (mode & PTRACE_MODE_FSCREDS) {
    312		caller_uid = cred->fsuid;
    313		caller_gid = cred->fsgid;
    314	} else {
    315		/*
    316		 * Using the euid would make more sense here, but something
    317		 * in userland might rely on the old behavior, and this
    318		 * shouldn't be a security problem since
    319		 * PTRACE_MODE_REALCREDS implies that the caller explicitly
    320		 * used a syscall that requests access to another process
    321		 * (and not a filesystem syscall to procfs).
    322		 */
    323		caller_uid = cred->uid;
    324		caller_gid = cred->gid;
    325	}
    326	tcred = __task_cred(task);
    327	if (uid_eq(caller_uid, tcred->euid) &&
    328	    uid_eq(caller_uid, tcred->suid) &&
    329	    uid_eq(caller_uid, tcred->uid)  &&
    330	    gid_eq(caller_gid, tcred->egid) &&
    331	    gid_eq(caller_gid, tcred->sgid) &&
    332	    gid_eq(caller_gid, tcred->gid))
    333		goto ok;
    334	if (ptrace_has_cap(tcred->user_ns, mode))
    335		goto ok;
    336	rcu_read_unlock();
    337	return -EPERM;
    338ok:
    339	rcu_read_unlock();
    340	/*
    341	 * If a task drops privileges and becomes nondumpable (through a syscall
    342	 * like setresuid()) while we are trying to access it, we must ensure
    343	 * that the dumpability is read after the credentials; otherwise,
    344	 * we may be able to attach to a task that we shouldn't be able to
    345	 * attach to (as if the task had dropped privileges without becoming
    346	 * nondumpable).
    347	 * Pairs with a write barrier in commit_creds().
    348	 */
    349	smp_rmb();
    350	mm = task->mm;
    351	if (mm &&
    352	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
    353	     !ptrace_has_cap(mm->user_ns, mode)))
    354	    return -EPERM;
    355
    356	return security_ptrace_access_check(task, mode);
    357}
    358
    359bool ptrace_may_access(struct task_struct *task, unsigned int mode)
    360{
    361	int err;
    362	task_lock(task);
    363	err = __ptrace_may_access(task, mode);
    364	task_unlock(task);
    365	return !err;
    366}
    367
    368static int check_ptrace_options(unsigned long data)
    369{
    370	if (data & ~(unsigned long)PTRACE_O_MASK)
    371		return -EINVAL;
    372
    373	if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
    374		if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
    375		    !IS_ENABLED(CONFIG_SECCOMP))
    376			return -EINVAL;
    377
    378		if (!capable(CAP_SYS_ADMIN))
    379			return -EPERM;
    380
    381		if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
    382		    current->ptrace & PT_SUSPEND_SECCOMP)
    383			return -EPERM;
    384	}
    385	return 0;
    386}
    387
    388static int ptrace_attach(struct task_struct *task, long request,
    389			 unsigned long addr,
    390			 unsigned long flags)
    391{
    392	bool seize = (request == PTRACE_SEIZE);
    393	int retval;
    394
    395	retval = -EIO;
    396	if (seize) {
    397		if (addr != 0)
    398			goto out;
    399		/*
    400		 * This duplicates the check in check_ptrace_options() because
    401		 * ptrace_attach() and ptrace_setoptions() have historically
    402		 * used different error codes for unknown ptrace options.
    403		 */
    404		if (flags & ~(unsigned long)PTRACE_O_MASK)
    405			goto out;
    406		retval = check_ptrace_options(flags);
    407		if (retval)
    408			return retval;
    409		flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
    410	} else {
    411		flags = PT_PTRACED;
    412	}
    413
    414	audit_ptrace(task);
    415
    416	retval = -EPERM;
    417	if (unlikely(task->flags & PF_KTHREAD))
    418		goto out;
    419	if (same_thread_group(task, current))
    420		goto out;
    421
    422	/*
    423	 * Protect exec's credential calculations against our interference;
    424	 * SUID, SGID and LSM creds get determined differently
    425	 * under ptrace.
    426	 */
    427	retval = -ERESTARTNOINTR;
    428	if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
    429		goto out;
    430
    431	task_lock(task);
    432	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
    433	task_unlock(task);
    434	if (retval)
    435		goto unlock_creds;
    436
    437	write_lock_irq(&tasklist_lock);
    438	retval = -EPERM;
    439	if (unlikely(task->exit_state))
    440		goto unlock_tasklist;
    441	if (task->ptrace)
    442		goto unlock_tasklist;
    443
    444	task->ptrace = flags;
    445
    446	ptrace_link(task, current);
    447
    448	/* SEIZE doesn't trap tracee on attach */
    449	if (!seize)
    450		send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
    451
    452	spin_lock(&task->sighand->siglock);
    453
    454	/*
    455	 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
    456	 * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
    457	 * will be cleared if the child completes the transition or any
    458	 * event which clears the group stop states happens.  We'll wait
    459	 * for the transition to complete before returning from this
    460	 * function.
    461	 *
    462	 * This hides STOPPED -> RUNNING -> TRACED transition from the
    463	 * attaching thread but a different thread in the same group can
    464	 * still observe the transient RUNNING state.  IOW, if another
    465	 * thread's WNOHANG wait(2) on the stopped tracee races against
    466	 * ATTACH, the wait(2) may fail due to the transient RUNNING.
    467	 *
    468	 * The following task_is_stopped() test is safe as both transitions
    469	 * in and out of STOPPED are protected by siglock.
    470	 */
    471	if (task_is_stopped(task) &&
    472	    task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) {
    473		task->jobctl &= ~JOBCTL_STOPPED;
    474		signal_wake_up_state(task, __TASK_STOPPED);
    475	}
    476
    477	spin_unlock(&task->sighand->siglock);
    478
    479	retval = 0;
    480unlock_tasklist:
    481	write_unlock_irq(&tasklist_lock);
    482unlock_creds:
    483	mutex_unlock(&task->signal->cred_guard_mutex);
    484out:
    485	if (!retval) {
    486		/*
    487		 * We do not bother to change retval or clear JOBCTL_TRAPPING
    488		 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
    489		 * not return to user-mode, it will exit and clear this bit in
    490		 * __ptrace_unlink() if it wasn't already cleared by the tracee;
    491		 * and until then nobody can ptrace this task.
    492		 */
    493		wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
    494		proc_ptrace_connector(task, PTRACE_ATTACH);
    495	}
    496
    497	return retval;
    498}
    499
    500/**
    501 * ptrace_traceme  --  helper for PTRACE_TRACEME
    502 *
    503 * Performs checks and sets PT_PTRACED.
    504 * Should be used by all ptrace implementations for PTRACE_TRACEME.
    505 */
    506static int ptrace_traceme(void)
    507{
    508	int ret = -EPERM;
    509
    510	write_lock_irq(&tasklist_lock);
    511	/* Are we already being traced? */
    512	if (!current->ptrace) {
    513		ret = security_ptrace_traceme(current->parent);
    514		/*
    515		 * Check PF_EXITING to ensure ->real_parent has not passed
    516		 * exit_ptrace(). Otherwise we don't report the error but
    517		 * pretend ->real_parent untraces us right after return.
    518		 */
    519		if (!ret && !(current->real_parent->flags & PF_EXITING)) {
    520			current->ptrace = PT_PTRACED;
    521			ptrace_link(current, current->real_parent);
    522		}
    523	}
    524	write_unlock_irq(&tasklist_lock);
    525
    526	return ret;
    527}
    528
    529/*
    530 * Called with irqs disabled, returns true if childs should reap themselves.
    531 */
    532static int ignoring_children(struct sighand_struct *sigh)
    533{
    534	int ret;
    535	spin_lock(&sigh->siglock);
    536	ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
    537	      (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
    538	spin_unlock(&sigh->siglock);
    539	return ret;
    540}
    541
    542/*
    543 * Called with tasklist_lock held for writing.
    544 * Unlink a traced task, and clean it up if it was a traced zombie.
    545 * Return true if it needs to be reaped with release_task().
    546 * (We can't call release_task() here because we already hold tasklist_lock.)
    547 *
    548 * If it's a zombie, our attachedness prevented normal parent notification
    549 * or self-reaping.  Do notification now if it would have happened earlier.
    550 * If it should reap itself, return true.
    551 *
    552 * If it's our own child, there is no notification to do. But if our normal
    553 * children self-reap, then this child was prevented by ptrace and we must
    554 * reap it now, in that case we must also wake up sub-threads sleeping in
    555 * do_wait().
    556 */
    557static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
    558{
    559	bool dead;
    560
    561	__ptrace_unlink(p);
    562
    563	if (p->exit_state != EXIT_ZOMBIE)
    564		return false;
    565
    566	dead = !thread_group_leader(p);
    567
    568	if (!dead && thread_group_empty(p)) {
    569		if (!same_thread_group(p->real_parent, tracer))
    570			dead = do_notify_parent(p, p->exit_signal);
    571		else if (ignoring_children(tracer->sighand)) {
    572			__wake_up_parent(p, tracer);
    573			dead = true;
    574		}
    575	}
    576	/* Mark it as in the process of being reaped. */
    577	if (dead)
    578		p->exit_state = EXIT_DEAD;
    579	return dead;
    580}
    581
    582static int ptrace_detach(struct task_struct *child, unsigned int data)
    583{
    584	if (!valid_signal(data))
    585		return -EIO;
    586
    587	/* Architecture-specific hardware disable .. */
    588	ptrace_disable(child);
    589
    590	write_lock_irq(&tasklist_lock);
    591	/*
    592	 * We rely on ptrace_freeze_traced(). It can't be killed and
    593	 * untraced by another thread, it can't be a zombie.
    594	 */
    595	WARN_ON(!child->ptrace || child->exit_state);
    596	/*
    597	 * tasklist_lock avoids the race with wait_task_stopped(), see
    598	 * the comment in ptrace_resume().
    599	 */
    600	child->exit_code = data;
    601	__ptrace_detach(current, child);
    602	write_unlock_irq(&tasklist_lock);
    603
    604	proc_ptrace_connector(child, PTRACE_DETACH);
    605
    606	return 0;
    607}
    608
    609/*
    610 * Detach all tasks we were using ptrace on. Called with tasklist held
    611 * for writing.
    612 */
    613void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
    614{
    615	struct task_struct *p, *n;
    616
    617	list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
    618		if (unlikely(p->ptrace & PT_EXITKILL))
    619			send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
    620
    621		if (__ptrace_detach(tracer, p))
    622			list_add(&p->ptrace_entry, dead);
    623	}
    624}
    625
    626int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
    627{
    628	int copied = 0;
    629
    630	while (len > 0) {
    631		char buf[128];
    632		int this_len, retval;
    633
    634		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
    635		retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
    636
    637		if (!retval) {
    638			if (copied)
    639				break;
    640			return -EIO;
    641		}
    642		if (copy_to_user(dst, buf, retval))
    643			return -EFAULT;
    644		copied += retval;
    645		src += retval;
    646		dst += retval;
    647		len -= retval;
    648	}
    649	return copied;
    650}
    651
    652int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
    653{
    654	int copied = 0;
    655
    656	while (len > 0) {
    657		char buf[128];
    658		int this_len, retval;
    659
    660		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
    661		if (copy_from_user(buf, src, this_len))
    662			return -EFAULT;
    663		retval = ptrace_access_vm(tsk, dst, buf, this_len,
    664				FOLL_FORCE | FOLL_WRITE);
    665		if (!retval) {
    666			if (copied)
    667				break;
    668			return -EIO;
    669		}
    670		copied += retval;
    671		src += retval;
    672		dst += retval;
    673		len -= retval;
    674	}
    675	return copied;
    676}
    677
    678static int ptrace_setoptions(struct task_struct *child, unsigned long data)
    679{
    680	unsigned flags;
    681	int ret;
    682
    683	ret = check_ptrace_options(data);
    684	if (ret)
    685		return ret;
    686
    687	/* Avoid intermediate state when all opts are cleared */
    688	flags = child->ptrace;
    689	flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
    690	flags |= (data << PT_OPT_FLAG_SHIFT);
    691	child->ptrace = flags;
    692
    693	return 0;
    694}
    695
    696static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
    697{
    698	unsigned long flags;
    699	int error = -ESRCH;
    700
    701	if (lock_task_sighand(child, &flags)) {
    702		error = -EINVAL;
    703		if (likely(child->last_siginfo != NULL)) {
    704			copy_siginfo(info, child->last_siginfo);
    705			error = 0;
    706		}
    707		unlock_task_sighand(child, &flags);
    708	}
    709	return error;
    710}
    711
    712static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
    713{
    714	unsigned long flags;
    715	int error = -ESRCH;
    716
    717	if (lock_task_sighand(child, &flags)) {
    718		error = -EINVAL;
    719		if (likely(child->last_siginfo != NULL)) {
    720			copy_siginfo(child->last_siginfo, info);
    721			error = 0;
    722		}
    723		unlock_task_sighand(child, &flags);
    724	}
    725	return error;
    726}
    727
    728static int ptrace_peek_siginfo(struct task_struct *child,
    729				unsigned long addr,
    730				unsigned long data)
    731{
    732	struct ptrace_peeksiginfo_args arg;
    733	struct sigpending *pending;
    734	struct sigqueue *q;
    735	int ret, i;
    736
    737	ret = copy_from_user(&arg, (void __user *) addr,
    738				sizeof(struct ptrace_peeksiginfo_args));
    739	if (ret)
    740		return -EFAULT;
    741
    742	if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
    743		return -EINVAL; /* unknown flags */
    744
    745	if (arg.nr < 0)
    746		return -EINVAL;
    747
    748	/* Ensure arg.off fits in an unsigned long */
    749	if (arg.off > ULONG_MAX)
    750		return 0;
    751
    752	if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
    753		pending = &child->signal->shared_pending;
    754	else
    755		pending = &child->pending;
    756
    757	for (i = 0; i < arg.nr; ) {
    758		kernel_siginfo_t info;
    759		unsigned long off = arg.off + i;
    760		bool found = false;
    761
    762		spin_lock_irq(&child->sighand->siglock);
    763		list_for_each_entry(q, &pending->list, list) {
    764			if (!off--) {
    765				found = true;
    766				copy_siginfo(&info, &q->info);
    767				break;
    768			}
    769		}
    770		spin_unlock_irq(&child->sighand->siglock);
    771
    772		if (!found) /* beyond the end of the list */
    773			break;
    774
    775#ifdef CONFIG_COMPAT
    776		if (unlikely(in_compat_syscall())) {
    777			compat_siginfo_t __user *uinfo = compat_ptr(data);
    778
    779			if (copy_siginfo_to_user32(uinfo, &info)) {
    780				ret = -EFAULT;
    781				break;
    782			}
    783
    784		} else
    785#endif
    786		{
    787			siginfo_t __user *uinfo = (siginfo_t __user *) data;
    788
    789			if (copy_siginfo_to_user(uinfo, &info)) {
    790				ret = -EFAULT;
    791				break;
    792			}
    793		}
    794
    795		data += sizeof(siginfo_t);
    796		i++;
    797
    798		if (signal_pending(current))
    799			break;
    800
    801		cond_resched();
    802	}
    803
    804	if (i > 0)
    805		return i;
    806
    807	return ret;
    808}
    809
    810#ifdef CONFIG_RSEQ
    811static long ptrace_get_rseq_configuration(struct task_struct *task,
    812					  unsigned long size, void __user *data)
    813{
    814	struct ptrace_rseq_configuration conf = {
    815		.rseq_abi_pointer = (u64)(uintptr_t)task->rseq,
    816		.rseq_abi_size = sizeof(*task->rseq),
    817		.signature = task->rseq_sig,
    818		.flags = 0,
    819	};
    820
    821	size = min_t(unsigned long, size, sizeof(conf));
    822	if (copy_to_user(data, &conf, size))
    823		return -EFAULT;
    824	return sizeof(conf);
    825}
    826#endif
    827
    828#define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
    829
    830#ifdef PTRACE_SINGLEBLOCK
    831#define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
    832#else
    833#define is_singleblock(request)		0
    834#endif
    835
    836#ifdef PTRACE_SYSEMU
    837#define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
    838#else
    839#define is_sysemu_singlestep(request)	0
    840#endif
    841
    842static int ptrace_resume(struct task_struct *child, long request,
    843			 unsigned long data)
    844{
    845	if (!valid_signal(data))
    846		return -EIO;
    847
    848	if (request == PTRACE_SYSCALL)
    849		set_task_syscall_work(child, SYSCALL_TRACE);
    850	else
    851		clear_task_syscall_work(child, SYSCALL_TRACE);
    852
    853#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
    854	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
    855		set_task_syscall_work(child, SYSCALL_EMU);
    856	else
    857		clear_task_syscall_work(child, SYSCALL_EMU);
    858#endif
    859
    860	if (is_singleblock(request)) {
    861		if (unlikely(!arch_has_block_step()))
    862			return -EIO;
    863		user_enable_block_step(child);
    864	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
    865		if (unlikely(!arch_has_single_step()))
    866			return -EIO;
    867		user_enable_single_step(child);
    868	} else {
    869		user_disable_single_step(child);
    870	}
    871
    872	/*
    873	 * Change ->exit_code and ->state under siglock to avoid the race
    874	 * with wait_task_stopped() in between; a non-zero ->exit_code will
    875	 * wrongly look like another report from tracee.
    876	 *
    877	 * Note that we need siglock even if ->exit_code == data and/or this
    878	 * status was not reported yet, the new status must not be cleared by
    879	 * wait_task_stopped() after resume.
    880	 */
    881	spin_lock_irq(&child->sighand->siglock);
    882	child->exit_code = data;
    883	child->jobctl &= ~JOBCTL_TRACED;
    884	wake_up_state(child, __TASK_TRACED);
    885	spin_unlock_irq(&child->sighand->siglock);
    886
    887	return 0;
    888}
    889
    890#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
    891
    892static const struct user_regset *
    893find_regset(const struct user_regset_view *view, unsigned int type)
    894{
    895	const struct user_regset *regset;
    896	int n;
    897
    898	for (n = 0; n < view->n; ++n) {
    899		regset = view->regsets + n;
    900		if (regset->core_note_type == type)
    901			return regset;
    902	}
    903
    904	return NULL;
    905}
    906
    907static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
    908			 struct iovec *kiov)
    909{
    910	const struct user_regset_view *view = task_user_regset_view(task);
    911	const struct user_regset *regset = find_regset(view, type);
    912	int regset_no;
    913
    914	if (!regset || (kiov->iov_len % regset->size) != 0)
    915		return -EINVAL;
    916
    917	regset_no = regset - view->regsets;
    918	kiov->iov_len = min(kiov->iov_len,
    919			    (__kernel_size_t) (regset->n * regset->size));
    920
    921	if (req == PTRACE_GETREGSET)
    922		return copy_regset_to_user(task, view, regset_no, 0,
    923					   kiov->iov_len, kiov->iov_base);
    924	else
    925		return copy_regset_from_user(task, view, regset_no, 0,
    926					     kiov->iov_len, kiov->iov_base);
    927}
    928
    929/*
    930 * This is declared in linux/regset.h and defined in machine-dependent
    931 * code.  We put the export here, near the primary machine-neutral use,
    932 * to ensure no machine forgets it.
    933 */
    934EXPORT_SYMBOL_GPL(task_user_regset_view);
    935
    936static unsigned long
    937ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
    938			      struct ptrace_syscall_info *info)
    939{
    940	unsigned long args[ARRAY_SIZE(info->entry.args)];
    941	int i;
    942
    943	info->op = PTRACE_SYSCALL_INFO_ENTRY;
    944	info->entry.nr = syscall_get_nr(child, regs);
    945	syscall_get_arguments(child, regs, args);
    946	for (i = 0; i < ARRAY_SIZE(args); i++)
    947		info->entry.args[i] = args[i];
    948
    949	/* args is the last field in struct ptrace_syscall_info.entry */
    950	return offsetofend(struct ptrace_syscall_info, entry.args);
    951}
    952
    953static unsigned long
    954ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
    955				struct ptrace_syscall_info *info)
    956{
    957	/*
    958	 * As struct ptrace_syscall_info.entry is currently a subset
    959	 * of struct ptrace_syscall_info.seccomp, it makes sense to
    960	 * initialize that subset using ptrace_get_syscall_info_entry().
    961	 * This can be reconsidered in the future if these structures
    962	 * diverge significantly enough.
    963	 */
    964	ptrace_get_syscall_info_entry(child, regs, info);
    965	info->op = PTRACE_SYSCALL_INFO_SECCOMP;
    966	info->seccomp.ret_data = child->ptrace_message;
    967
    968	/* ret_data is the last field in struct ptrace_syscall_info.seccomp */
    969	return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
    970}
    971
    972static unsigned long
    973ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
    974			     struct ptrace_syscall_info *info)
    975{
    976	info->op = PTRACE_SYSCALL_INFO_EXIT;
    977	info->exit.rval = syscall_get_error(child, regs);
    978	info->exit.is_error = !!info->exit.rval;
    979	if (!info->exit.is_error)
    980		info->exit.rval = syscall_get_return_value(child, regs);
    981
    982	/* is_error is the last field in struct ptrace_syscall_info.exit */
    983	return offsetofend(struct ptrace_syscall_info, exit.is_error);
    984}
    985
    986static int
    987ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
    988			void __user *datavp)
    989{
    990	struct pt_regs *regs = task_pt_regs(child);
    991	struct ptrace_syscall_info info = {
    992		.op = PTRACE_SYSCALL_INFO_NONE,
    993		.arch = syscall_get_arch(child),
    994		.instruction_pointer = instruction_pointer(regs),
    995		.stack_pointer = user_stack_pointer(regs),
    996	};
    997	unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
    998	unsigned long write_size;
    999
   1000	/*
   1001	 * This does not need lock_task_sighand() to access
   1002	 * child->last_siginfo because ptrace_freeze_traced()
   1003	 * called earlier by ptrace_check_attach() ensures that
   1004	 * the tracee cannot go away and clear its last_siginfo.
   1005	 */
   1006	switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
   1007	case SIGTRAP | 0x80:
   1008		switch (child->ptrace_message) {
   1009		case PTRACE_EVENTMSG_SYSCALL_ENTRY:
   1010			actual_size = ptrace_get_syscall_info_entry(child, regs,
   1011								    &info);
   1012			break;
   1013		case PTRACE_EVENTMSG_SYSCALL_EXIT:
   1014			actual_size = ptrace_get_syscall_info_exit(child, regs,
   1015								   &info);
   1016			break;
   1017		}
   1018		break;
   1019	case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
   1020		actual_size = ptrace_get_syscall_info_seccomp(child, regs,
   1021							      &info);
   1022		break;
   1023	}
   1024
   1025	write_size = min(actual_size, user_size);
   1026	return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
   1027}
   1028#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
   1029
   1030int ptrace_request(struct task_struct *child, long request,
   1031		   unsigned long addr, unsigned long data)
   1032{
   1033	bool seized = child->ptrace & PT_SEIZED;
   1034	int ret = -EIO;
   1035	kernel_siginfo_t siginfo, *si;
   1036	void __user *datavp = (void __user *) data;
   1037	unsigned long __user *datalp = datavp;
   1038	unsigned long flags;
   1039
   1040	switch (request) {
   1041	case PTRACE_PEEKTEXT:
   1042	case PTRACE_PEEKDATA:
   1043		return generic_ptrace_peekdata(child, addr, data);
   1044	case PTRACE_POKETEXT:
   1045	case PTRACE_POKEDATA:
   1046		return generic_ptrace_pokedata(child, addr, data);
   1047
   1048#ifdef PTRACE_OLDSETOPTIONS
   1049	case PTRACE_OLDSETOPTIONS:
   1050#endif
   1051	case PTRACE_SETOPTIONS:
   1052		ret = ptrace_setoptions(child, data);
   1053		break;
   1054	case PTRACE_GETEVENTMSG:
   1055		ret = put_user(child->ptrace_message, datalp);
   1056		break;
   1057
   1058	case PTRACE_PEEKSIGINFO:
   1059		ret = ptrace_peek_siginfo(child, addr, data);
   1060		break;
   1061
   1062	case PTRACE_GETSIGINFO:
   1063		ret = ptrace_getsiginfo(child, &siginfo);
   1064		if (!ret)
   1065			ret = copy_siginfo_to_user(datavp, &siginfo);
   1066		break;
   1067
   1068	case PTRACE_SETSIGINFO:
   1069		ret = copy_siginfo_from_user(&siginfo, datavp);
   1070		if (!ret)
   1071			ret = ptrace_setsiginfo(child, &siginfo);
   1072		break;
   1073
   1074	case PTRACE_GETSIGMASK: {
   1075		sigset_t *mask;
   1076
   1077		if (addr != sizeof(sigset_t)) {
   1078			ret = -EINVAL;
   1079			break;
   1080		}
   1081
   1082		if (test_tsk_restore_sigmask(child))
   1083			mask = &child->saved_sigmask;
   1084		else
   1085			mask = &child->blocked;
   1086
   1087		if (copy_to_user(datavp, mask, sizeof(sigset_t)))
   1088			ret = -EFAULT;
   1089		else
   1090			ret = 0;
   1091
   1092		break;
   1093	}
   1094
   1095	case PTRACE_SETSIGMASK: {
   1096		sigset_t new_set;
   1097
   1098		if (addr != sizeof(sigset_t)) {
   1099			ret = -EINVAL;
   1100			break;
   1101		}
   1102
   1103		if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
   1104			ret = -EFAULT;
   1105			break;
   1106		}
   1107
   1108		sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
   1109
   1110		/*
   1111		 * Every thread does recalc_sigpending() after resume, so
   1112		 * retarget_shared_pending() and recalc_sigpending() are not
   1113		 * called here.
   1114		 */
   1115		spin_lock_irq(&child->sighand->siglock);
   1116		child->blocked = new_set;
   1117		spin_unlock_irq(&child->sighand->siglock);
   1118
   1119		clear_tsk_restore_sigmask(child);
   1120
   1121		ret = 0;
   1122		break;
   1123	}
   1124
   1125	case PTRACE_INTERRUPT:
   1126		/*
   1127		 * Stop tracee without any side-effect on signal or job
   1128		 * control.  At least one trap is guaranteed to happen
   1129		 * after this request.  If @child is already trapped, the
   1130		 * current trap is not disturbed and another trap will
   1131		 * happen after the current trap is ended with PTRACE_CONT.
   1132		 *
   1133		 * The actual trap might not be PTRACE_EVENT_STOP trap but
   1134		 * the pending condition is cleared regardless.
   1135		 */
   1136		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
   1137			break;
   1138
   1139		/*
   1140		 * INTERRUPT doesn't disturb existing trap sans one
   1141		 * exception.  If ptracer issued LISTEN for the current
   1142		 * STOP, this INTERRUPT should clear LISTEN and re-trap
   1143		 * tracee into STOP.
   1144		 */
   1145		if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
   1146			ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
   1147
   1148		unlock_task_sighand(child, &flags);
   1149		ret = 0;
   1150		break;
   1151
   1152	case PTRACE_LISTEN:
   1153		/*
   1154		 * Listen for events.  Tracee must be in STOP.  It's not
   1155		 * resumed per-se but is not considered to be in TRACED by
   1156		 * wait(2) or ptrace(2).  If an async event (e.g. group
   1157		 * stop state change) happens, tracee will enter STOP trap
   1158		 * again.  Alternatively, ptracer can issue INTERRUPT to
   1159		 * finish listening and re-trap tracee into STOP.
   1160		 */
   1161		if (unlikely(!seized || !lock_task_sighand(child, &flags)))
   1162			break;
   1163
   1164		si = child->last_siginfo;
   1165		if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
   1166			child->jobctl |= JOBCTL_LISTENING;
   1167			/*
   1168			 * If NOTIFY is set, it means event happened between
   1169			 * start of this trap and now.  Trigger re-trap.
   1170			 */
   1171			if (child->jobctl & JOBCTL_TRAP_NOTIFY)
   1172				ptrace_signal_wake_up(child, true);
   1173			ret = 0;
   1174		}
   1175		unlock_task_sighand(child, &flags);
   1176		break;
   1177
   1178	case PTRACE_DETACH:	 /* detach a process that was attached. */
   1179		ret = ptrace_detach(child, data);
   1180		break;
   1181
   1182#ifdef CONFIG_BINFMT_ELF_FDPIC
   1183	case PTRACE_GETFDPIC: {
   1184		struct mm_struct *mm = get_task_mm(child);
   1185		unsigned long tmp = 0;
   1186
   1187		ret = -ESRCH;
   1188		if (!mm)
   1189			break;
   1190
   1191		switch (addr) {
   1192		case PTRACE_GETFDPIC_EXEC:
   1193			tmp = mm->context.exec_fdpic_loadmap;
   1194			break;
   1195		case PTRACE_GETFDPIC_INTERP:
   1196			tmp = mm->context.interp_fdpic_loadmap;
   1197			break;
   1198		default:
   1199			break;
   1200		}
   1201		mmput(mm);
   1202
   1203		ret = put_user(tmp, datalp);
   1204		break;
   1205	}
   1206#endif
   1207
   1208	case PTRACE_SINGLESTEP:
   1209#ifdef PTRACE_SINGLEBLOCK
   1210	case PTRACE_SINGLEBLOCK:
   1211#endif
   1212#ifdef PTRACE_SYSEMU
   1213	case PTRACE_SYSEMU:
   1214	case PTRACE_SYSEMU_SINGLESTEP:
   1215#endif
   1216	case PTRACE_SYSCALL:
   1217	case PTRACE_CONT:
   1218		return ptrace_resume(child, request, data);
   1219
   1220	case PTRACE_KILL:
   1221		send_sig_info(SIGKILL, SEND_SIG_NOINFO, child);
   1222		return 0;
   1223
   1224#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
   1225	case PTRACE_GETREGSET:
   1226	case PTRACE_SETREGSET: {
   1227		struct iovec kiov;
   1228		struct iovec __user *uiov = datavp;
   1229
   1230		if (!access_ok(uiov, sizeof(*uiov)))
   1231			return -EFAULT;
   1232
   1233		if (__get_user(kiov.iov_base, &uiov->iov_base) ||
   1234		    __get_user(kiov.iov_len, &uiov->iov_len))
   1235			return -EFAULT;
   1236
   1237		ret = ptrace_regset(child, request, addr, &kiov);
   1238		if (!ret)
   1239			ret = __put_user(kiov.iov_len, &uiov->iov_len);
   1240		break;
   1241	}
   1242
   1243	case PTRACE_GET_SYSCALL_INFO:
   1244		ret = ptrace_get_syscall_info(child, addr, datavp);
   1245		break;
   1246#endif
   1247
   1248	case PTRACE_SECCOMP_GET_FILTER:
   1249		ret = seccomp_get_filter(child, addr, datavp);
   1250		break;
   1251
   1252	case PTRACE_SECCOMP_GET_METADATA:
   1253		ret = seccomp_get_metadata(child, addr, datavp);
   1254		break;
   1255
   1256#ifdef CONFIG_RSEQ
   1257	case PTRACE_GET_RSEQ_CONFIGURATION:
   1258		ret = ptrace_get_rseq_configuration(child, addr, datavp);
   1259		break;
   1260#endif
   1261
   1262	default:
   1263		break;
   1264	}
   1265
   1266	return ret;
   1267}
   1268
   1269SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
   1270		unsigned long, data)
   1271{
   1272	struct task_struct *child;
   1273	long ret;
   1274
   1275	if (request == PTRACE_TRACEME) {
   1276		ret = ptrace_traceme();
   1277		goto out;
   1278	}
   1279
   1280	child = find_get_task_by_vpid(pid);
   1281	if (!child) {
   1282		ret = -ESRCH;
   1283		goto out;
   1284	}
   1285
   1286	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
   1287		ret = ptrace_attach(child, request, addr, data);
   1288		goto out_put_task_struct;
   1289	}
   1290
   1291	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
   1292				  request == PTRACE_INTERRUPT);
   1293	if (ret < 0)
   1294		goto out_put_task_struct;
   1295
   1296	ret = arch_ptrace(child, request, addr, data);
   1297	if (ret || request != PTRACE_DETACH)
   1298		ptrace_unfreeze_traced(child);
   1299
   1300 out_put_task_struct:
   1301	put_task_struct(child);
   1302 out:
   1303	return ret;
   1304}
   1305
   1306int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
   1307			    unsigned long data)
   1308{
   1309	unsigned long tmp;
   1310	int copied;
   1311
   1312	copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
   1313	if (copied != sizeof(tmp))
   1314		return -EIO;
   1315	return put_user(tmp, (unsigned long __user *)data);
   1316}
   1317
   1318int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
   1319			    unsigned long data)
   1320{
   1321	int copied;
   1322
   1323	copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
   1324			FOLL_FORCE | FOLL_WRITE);
   1325	return (copied == sizeof(data)) ? 0 : -EIO;
   1326}
   1327
   1328#if defined CONFIG_COMPAT
   1329
   1330int compat_ptrace_request(struct task_struct *child, compat_long_t request,
   1331			  compat_ulong_t addr, compat_ulong_t data)
   1332{
   1333	compat_ulong_t __user *datap = compat_ptr(data);
   1334	compat_ulong_t word;
   1335	kernel_siginfo_t siginfo;
   1336	int ret;
   1337
   1338	switch (request) {
   1339	case PTRACE_PEEKTEXT:
   1340	case PTRACE_PEEKDATA:
   1341		ret = ptrace_access_vm(child, addr, &word, sizeof(word),
   1342				FOLL_FORCE);
   1343		if (ret != sizeof(word))
   1344			ret = -EIO;
   1345		else
   1346			ret = put_user(word, datap);
   1347		break;
   1348
   1349	case PTRACE_POKETEXT:
   1350	case PTRACE_POKEDATA:
   1351		ret = ptrace_access_vm(child, addr, &data, sizeof(data),
   1352				FOLL_FORCE | FOLL_WRITE);
   1353		ret = (ret != sizeof(data) ? -EIO : 0);
   1354		break;
   1355
   1356	case PTRACE_GETEVENTMSG:
   1357		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
   1358		break;
   1359
   1360	case PTRACE_GETSIGINFO:
   1361		ret = ptrace_getsiginfo(child, &siginfo);
   1362		if (!ret)
   1363			ret = copy_siginfo_to_user32(
   1364				(struct compat_siginfo __user *) datap,
   1365				&siginfo);
   1366		break;
   1367
   1368	case PTRACE_SETSIGINFO:
   1369		ret = copy_siginfo_from_user32(
   1370			&siginfo, (struct compat_siginfo __user *) datap);
   1371		if (!ret)
   1372			ret = ptrace_setsiginfo(child, &siginfo);
   1373		break;
   1374#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
   1375	case PTRACE_GETREGSET:
   1376	case PTRACE_SETREGSET:
   1377	{
   1378		struct iovec kiov;
   1379		struct compat_iovec __user *uiov =
   1380			(struct compat_iovec __user *) datap;
   1381		compat_uptr_t ptr;
   1382		compat_size_t len;
   1383
   1384		if (!access_ok(uiov, sizeof(*uiov)))
   1385			return -EFAULT;
   1386
   1387		if (__get_user(ptr, &uiov->iov_base) ||
   1388		    __get_user(len, &uiov->iov_len))
   1389			return -EFAULT;
   1390
   1391		kiov.iov_base = compat_ptr(ptr);
   1392		kiov.iov_len = len;
   1393
   1394		ret = ptrace_regset(child, request, addr, &kiov);
   1395		if (!ret)
   1396			ret = __put_user(kiov.iov_len, &uiov->iov_len);
   1397		break;
   1398	}
   1399#endif
   1400
   1401	default:
   1402		ret = ptrace_request(child, request, addr, data);
   1403	}
   1404
   1405	return ret;
   1406}
   1407
   1408COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
   1409		       compat_long_t, addr, compat_long_t, data)
   1410{
   1411	struct task_struct *child;
   1412	long ret;
   1413
   1414	if (request == PTRACE_TRACEME) {
   1415		ret = ptrace_traceme();
   1416		goto out;
   1417	}
   1418
   1419	child = find_get_task_by_vpid(pid);
   1420	if (!child) {
   1421		ret = -ESRCH;
   1422		goto out;
   1423	}
   1424
   1425	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
   1426		ret = ptrace_attach(child, request, addr, data);
   1427		goto out_put_task_struct;
   1428	}
   1429
   1430	ret = ptrace_check_attach(child, request == PTRACE_KILL ||
   1431				  request == PTRACE_INTERRUPT);
   1432	if (!ret) {
   1433		ret = compat_arch_ptrace(child, request, addr, data);
   1434		if (ret || request != PTRACE_DETACH)
   1435			ptrace_unfreeze_traced(child);
   1436	}
   1437
   1438 out_put_task_struct:
   1439	put_task_struct(child);
   1440 out:
   1441	return ret;
   1442}
   1443#endif	/* CONFIG_COMPAT */