diff options
| author | Miquel Raynal <miquel.raynal@bootlin.com> | 2022-03-18 20:14:12 +0100 |
|---|---|---|
| committer | Miquel Raynal <miquel.raynal@bootlin.com> | 2022-03-18 20:14:42 +0100 |
| commit | 4e371d996590f3a7e82a086d499c912c1930e968 (patch) | |
| tree | 0c7b300670901f498238b32e59c307eddf338489 /kernel/fork.c | |
| parent | 8f877b7eab9d60a9deef6beae95656b77d55ab75 (diff) | |
| parent | 151c6b49d679872d6fc0b50e0ad96303091694a2 (diff) | |
| download | cachepc-linux-4e371d996590f3a7e82a086d499c912c1930e968.tar.gz cachepc-linux-4e371d996590f3a7e82a086d499c912c1930e968.zip | |
Merge tag 'spi-nor/for-5.18' into mtd/next
SPI NOR core changes:
- move vendor specific code out of the core into vendor drivers.
- unify all function and object names in the vendor modules.
- make setup() callback optional to improve readability.
- skip erase logic when the SPI_NOR_NO_ERASE flag is set at flash
declaration.
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Diffstat (limited to 'kernel/fork.c')
| -rw-r--r-- | kernel/fork.c | 30 |
1 files changed, 20 insertions, 10 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index d75a528f7b21..a024bf6254df 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2021,18 +2021,18 @@ static __latent_entropy struct task_struct *copy_process( #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif + retval = copy_creds(p, clone_flags); + if (retval < 0) + goto bad_fork_free; + retval = -EAGAIN; if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { if (p->real_cred->user != INIT_USER && !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) - goto bad_fork_free; + goto bad_fork_cleanup_count; } current->flags &= ~PF_NPROC_EXCEEDED; - retval = copy_creds(p, clone_flags); - if (retval < 0) - goto bad_fork_free; - /* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there @@ -2267,6 +2267,17 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_put_pidfd; /* + * Now that the cgroups are pinned, re-clone the parent cgroup and put + * the new task on the correct runqueue. All this *before* the task + * becomes visible. + * + * This isn't part of ->can_fork() because while the re-cloning is + * cgroup specific, it unconditionally needs to place the task on a + * runqueue. + */ + sched_cgroup_fork(p, args); + + /* * From this point on we must avoid any synchronous user-space * communication until we take the tasklist-lock. In particular, we do * not want user-space to be able to predict the process start-time by @@ -2323,10 +2334,6 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_cancel_cgroup; } - /* past the last point of failure */ - if (pidfile) - fd_install(pidfd, pidfile); - init_task_pid_links(p); if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); @@ -2375,8 +2382,11 @@ static __latent_entropy struct task_struct *copy_process( syscall_tracepoint_update(p); write_unlock_irq(&tasklist_lock); + if (pidfile) + fd_install(pidfd, pidfile); + proc_fork_connector(p); - sched_post_fork(p, args); + sched_post_fork(p); cgroup_post_fork(p, args); perf_event_fork(p); |
