cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kthread.c (42523B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Kernel thread helper functions.
      3 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
      4 *   Copyright (C) 2009 Red Hat, Inc.
      5 *
      6 * Creation is done via kthreadd, so that we get a clean environment
      7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
      8 * etc.).
      9 */
     10#include <uapi/linux/sched/types.h>
     11#include <linux/mm.h>
     12#include <linux/mmu_context.h>
     13#include <linux/sched.h>
     14#include <linux/sched/mm.h>
     15#include <linux/sched/task.h>
     16#include <linux/kthread.h>
     17#include <linux/completion.h>
     18#include <linux/err.h>
     19#include <linux/cgroup.h>
     20#include <linux/cpuset.h>
     21#include <linux/unistd.h>
     22#include <linux/file.h>
     23#include <linux/export.h>
     24#include <linux/mutex.h>
     25#include <linux/slab.h>
     26#include <linux/freezer.h>
     27#include <linux/ptrace.h>
     28#include <linux/uaccess.h>
     29#include <linux/numa.h>
     30#include <linux/sched/isolation.h>
     31#include <trace/events/sched.h>
     32
     33
     34static DEFINE_SPINLOCK(kthread_create_lock);
     35static LIST_HEAD(kthread_create_list);
     36struct task_struct *kthreadd_task;
     37
     38struct kthread_create_info
     39{
     40	/* Information passed to kthread() from kthreadd. */
     41	int (*threadfn)(void *data);
     42	void *data;
     43	int node;
     44
     45	/* Result passed back to kthread_create() from kthreadd. */
     46	struct task_struct *result;
     47	struct completion *done;
     48
     49	struct list_head list;
     50};
     51
     52struct kthread {
     53	unsigned long flags;
     54	unsigned int cpu;
     55	int result;
     56	int (*threadfn)(void *);
     57	void *data;
     58	struct completion parked;
     59	struct completion exited;
     60#ifdef CONFIG_BLK_CGROUP
     61	struct cgroup_subsys_state *blkcg_css;
     62#endif
     63	/* To store the full name if task comm is truncated. */
     64	char *full_name;
     65};
     66
     67enum KTHREAD_BITS {
     68	KTHREAD_IS_PER_CPU = 0,
     69	KTHREAD_SHOULD_STOP,
     70	KTHREAD_SHOULD_PARK,
     71};
     72
     73static inline struct kthread *to_kthread(struct task_struct *k)
     74{
     75	WARN_ON(!(k->flags & PF_KTHREAD));
     76	return k->worker_private;
     77}
     78
     79/*
     80 * Variant of to_kthread() that doesn't assume @p is a kthread.
     81 *
     82 * Per construction; when:
     83 *
     84 *   (p->flags & PF_KTHREAD) && p->worker_private
     85 *
     86 * the task is both a kthread and struct kthread is persistent. However
     87 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
     88 * begin_new_exec()).
     89 */
     90static inline struct kthread *__to_kthread(struct task_struct *p)
     91{
     92	void *kthread = p->worker_private;
     93	if (kthread && !(p->flags & PF_KTHREAD))
     94		kthread = NULL;
     95	return kthread;
     96}
     97
     98void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
     99{
    100	struct kthread *kthread = to_kthread(tsk);
    101
    102	if (!kthread || !kthread->full_name) {
    103		__get_task_comm(buf, buf_size, tsk);
    104		return;
    105	}
    106
    107	strscpy_pad(buf, kthread->full_name, buf_size);
    108}
    109
    110bool set_kthread_struct(struct task_struct *p)
    111{
    112	struct kthread *kthread;
    113
    114	if (WARN_ON_ONCE(to_kthread(p)))
    115		return false;
    116
    117	kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
    118	if (!kthread)
    119		return false;
    120
    121	init_completion(&kthread->exited);
    122	init_completion(&kthread->parked);
    123	p->vfork_done = &kthread->exited;
    124
    125	p->worker_private = kthread;
    126	return true;
    127}
    128
    129void free_kthread_struct(struct task_struct *k)
    130{
    131	struct kthread *kthread;
    132
    133	/*
    134	 * Can be NULL if kmalloc() in set_kthread_struct() failed.
    135	 */
    136	kthread = to_kthread(k);
    137	if (!kthread)
    138		return;
    139
    140#ifdef CONFIG_BLK_CGROUP
    141	WARN_ON_ONCE(kthread->blkcg_css);
    142#endif
    143	k->worker_private = NULL;
    144	kfree(kthread->full_name);
    145	kfree(kthread);
    146}
    147
    148/**
    149 * kthread_should_stop - should this kthread return now?
    150 *
    151 * When someone calls kthread_stop() on your kthread, it will be woken
    152 * and this will return true.  You should then return, and your return
    153 * value will be passed through to kthread_stop().
    154 */
    155bool kthread_should_stop(void)
    156{
    157	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
    158}
    159EXPORT_SYMBOL(kthread_should_stop);
    160
    161bool __kthread_should_park(struct task_struct *k)
    162{
    163	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
    164}
    165EXPORT_SYMBOL_GPL(__kthread_should_park);
    166
    167/**
    168 * kthread_should_park - should this kthread park now?
    169 *
    170 * When someone calls kthread_park() on your kthread, it will be woken
    171 * and this will return true.  You should then do the necessary
    172 * cleanup and call kthread_parkme()
    173 *
    174 * Similar to kthread_should_stop(), but this keeps the thread alive
    175 * and in a park position. kthread_unpark() "restarts" the thread and
    176 * calls the thread function again.
    177 */
    178bool kthread_should_park(void)
    179{
    180	return __kthread_should_park(current);
    181}
    182EXPORT_SYMBOL_GPL(kthread_should_park);
    183
    184/**
    185 * kthread_freezable_should_stop - should this freezable kthread return now?
    186 * @was_frozen: optional out parameter, indicates whether %current was frozen
    187 *
    188 * kthread_should_stop() for freezable kthreads, which will enter
    189 * refrigerator if necessary.  This function is safe from kthread_stop() /
    190 * freezer deadlock and freezable kthreads should use this function instead
    191 * of calling try_to_freeze() directly.
    192 */
    193bool kthread_freezable_should_stop(bool *was_frozen)
    194{
    195	bool frozen = false;
    196
    197	might_sleep();
    198
    199	if (unlikely(freezing(current)))
    200		frozen = __refrigerator(true);
    201
    202	if (was_frozen)
    203		*was_frozen = frozen;
    204
    205	return kthread_should_stop();
    206}
    207EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
    208
    209/**
    210 * kthread_func - return the function specified on kthread creation
    211 * @task: kthread task in question
    212 *
    213 * Returns NULL if the task is not a kthread.
    214 */
    215void *kthread_func(struct task_struct *task)
    216{
    217	struct kthread *kthread = __to_kthread(task);
    218	if (kthread)
    219		return kthread->threadfn;
    220	return NULL;
    221}
    222EXPORT_SYMBOL_GPL(kthread_func);
    223
    224/**
    225 * kthread_data - return data value specified on kthread creation
    226 * @task: kthread task in question
    227 *
    228 * Return the data value specified when kthread @task was created.
    229 * The caller is responsible for ensuring the validity of @task when
    230 * calling this function.
    231 */
    232void *kthread_data(struct task_struct *task)
    233{
    234	return to_kthread(task)->data;
    235}
    236EXPORT_SYMBOL_GPL(kthread_data);
    237
    238/**
    239 * kthread_probe_data - speculative version of kthread_data()
    240 * @task: possible kthread task in question
    241 *
    242 * @task could be a kthread task.  Return the data value specified when it
    243 * was created if accessible.  If @task isn't a kthread task or its data is
    244 * inaccessible for any reason, %NULL is returned.  This function requires
    245 * that @task itself is safe to dereference.
    246 */
    247void *kthread_probe_data(struct task_struct *task)
    248{
    249	struct kthread *kthread = __to_kthread(task);
    250	void *data = NULL;
    251
    252	if (kthread)
    253		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
    254	return data;
    255}
    256
    257static void __kthread_parkme(struct kthread *self)
    258{
    259	for (;;) {
    260		/*
    261		 * TASK_PARKED is a special state; we must serialize against
    262		 * possible pending wakeups to avoid store-store collisions on
    263		 * task->state.
    264		 *
    265		 * Such a collision might possibly result in the task state
    266		 * changin from TASK_PARKED and us failing the
    267		 * wait_task_inactive() in kthread_park().
    268		 */
    269		set_special_state(TASK_PARKED);
    270		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
    271			break;
    272
    273		/*
    274		 * Thread is going to call schedule(), do not preempt it,
    275		 * or the caller of kthread_park() may spend more time in
    276		 * wait_task_inactive().
    277		 */
    278		preempt_disable();
    279		complete(&self->parked);
    280		schedule_preempt_disabled();
    281		preempt_enable();
    282	}
    283	__set_current_state(TASK_RUNNING);
    284}
    285
    286void kthread_parkme(void)
    287{
    288	__kthread_parkme(to_kthread(current));
    289}
    290EXPORT_SYMBOL_GPL(kthread_parkme);
    291
    292/**
    293 * kthread_exit - Cause the current kthread return @result to kthread_stop().
    294 * @result: The integer value to return to kthread_stop().
    295 *
    296 * While kthread_exit can be called directly, it exists so that
    297 * functions which do some additional work in non-modular code such as
    298 * module_put_and_kthread_exit can be implemented.
    299 *
    300 * Does not return.
    301 */
    302void __noreturn kthread_exit(long result)
    303{
    304	struct kthread *kthread = to_kthread(current);
    305	kthread->result = result;
    306	do_exit(0);
    307}
    308
    309/**
    310 * kthread_complete_and_exit - Exit the current kthread.
    311 * @comp: Completion to complete
    312 * @code: The integer value to return to kthread_stop().
    313 *
    314 * If present complete @comp and the reuturn code to kthread_stop().
    315 *
    316 * A kernel thread whose module may be removed after the completion of
    317 * @comp can use this function exit safely.
    318 *
    319 * Does not return.
    320 */
    321void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
    322{
    323	if (comp)
    324		complete(comp);
    325
    326	kthread_exit(code);
    327}
    328EXPORT_SYMBOL(kthread_complete_and_exit);
    329
    330static int kthread(void *_create)
    331{
    332	static const struct sched_param param = { .sched_priority = 0 };
    333	/* Copy data: it's on kthread's stack */
    334	struct kthread_create_info *create = _create;
    335	int (*threadfn)(void *data) = create->threadfn;
    336	void *data = create->data;
    337	struct completion *done;
    338	struct kthread *self;
    339	int ret;
    340
    341	self = to_kthread(current);
    342
    343	/* Release the structure when caller killed by a fatal signal. */
    344	done = xchg(&create->done, NULL);
    345	if (!done) {
    346		kfree(create);
    347		kthread_exit(-EINTR);
    348	}
    349
    350	self->threadfn = threadfn;
    351	self->data = data;
    352
    353	/*
    354	 * The new thread inherited kthreadd's priority and CPU mask. Reset
    355	 * back to default in case they have been changed.
    356	 */
    357	sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
    358	set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
    359
    360	/* OK, tell user we're spawned, wait for stop or wakeup */
    361	__set_current_state(TASK_UNINTERRUPTIBLE);
    362	create->result = current;
    363	/*
    364	 * Thread is going to call schedule(), do not preempt it,
    365	 * or the creator may spend more time in wait_task_inactive().
    366	 */
    367	preempt_disable();
    368	complete(done);
    369	schedule_preempt_disabled();
    370	preempt_enable();
    371
    372	ret = -EINTR;
    373	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
    374		cgroup_kthread_ready();
    375		__kthread_parkme(self);
    376		ret = threadfn(data);
    377	}
    378	kthread_exit(ret);
    379}
    380
    381/* called from kernel_clone() to get node information for about to be created task */
    382int tsk_fork_get_node(struct task_struct *tsk)
    383{
    384#ifdef CONFIG_NUMA
    385	if (tsk == kthreadd_task)
    386		return tsk->pref_node_fork;
    387#endif
    388	return NUMA_NO_NODE;
    389}
    390
    391static void create_kthread(struct kthread_create_info *create)
    392{
    393	int pid;
    394
    395#ifdef CONFIG_NUMA
    396	current->pref_node_fork = create->node;
    397#endif
    398	/* We want our own signal handler (we take no signals by default). */
    399	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
    400	if (pid < 0) {
    401		/* Release the structure when caller killed by a fatal signal. */
    402		struct completion *done = xchg(&create->done, NULL);
    403
    404		if (!done) {
    405			kfree(create);
    406			return;
    407		}
    408		create->result = ERR_PTR(pid);
    409		complete(done);
    410	}
    411}
    412
    413static __printf(4, 0)
    414struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
    415						    void *data, int node,
    416						    const char namefmt[],
    417						    va_list args)
    418{
    419	DECLARE_COMPLETION_ONSTACK(done);
    420	struct task_struct *task;
    421	struct kthread_create_info *create = kmalloc(sizeof(*create),
    422						     GFP_KERNEL);
    423
    424	if (!create)
    425		return ERR_PTR(-ENOMEM);
    426	create->threadfn = threadfn;
    427	create->data = data;
    428	create->node = node;
    429	create->done = &done;
    430
    431	spin_lock(&kthread_create_lock);
    432	list_add_tail(&create->list, &kthread_create_list);
    433	spin_unlock(&kthread_create_lock);
    434
    435	wake_up_process(kthreadd_task);
    436	/*
    437	 * Wait for completion in killable state, for I might be chosen by
    438	 * the OOM killer while kthreadd is trying to allocate memory for
    439	 * new kernel thread.
    440	 */
    441	if (unlikely(wait_for_completion_killable(&done))) {
    442		/*
    443		 * If I was killed by a fatal signal before kthreadd (or new
    444		 * kernel thread) calls complete(), leave the cleanup of this
    445		 * structure to that thread.
    446		 */
    447		if (xchg(&create->done, NULL))
    448			return ERR_PTR(-EINTR);
    449		/*
    450		 * kthreadd (or new kernel thread) will call complete()
    451		 * shortly.
    452		 */
    453		wait_for_completion(&done);
    454	}
    455	task = create->result;
    456	if (!IS_ERR(task)) {
    457		char name[TASK_COMM_LEN];
    458		va_list aq;
    459		int len;
    460
    461		/*
    462		 * task is already visible to other tasks, so updating
    463		 * COMM must be protected.
    464		 */
    465		va_copy(aq, args);
    466		len = vsnprintf(name, sizeof(name), namefmt, aq);
    467		va_end(aq);
    468		if (len >= TASK_COMM_LEN) {
    469			struct kthread *kthread = to_kthread(task);
    470
    471			/* leave it truncated when out of memory. */
    472			kthread->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
    473		}
    474		set_task_comm(task, name);
    475	}
    476	kfree(create);
    477	return task;
    478}
    479
    480/**
    481 * kthread_create_on_node - create a kthread.
    482 * @threadfn: the function to run until signal_pending(current).
    483 * @data: data ptr for @threadfn.
    484 * @node: task and thread structures for the thread are allocated on this node
    485 * @namefmt: printf-style name for the thread.
    486 *
    487 * Description: This helper function creates and names a kernel
    488 * thread.  The thread will be stopped: use wake_up_process() to start
    489 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
    490 * is affine to all CPUs.
    491 *
    492 * If thread is going to be bound on a particular cpu, give its node
    493 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
    494 * When woken, the thread will run @threadfn() with @data as its
    495 * argument. @threadfn() can either return directly if it is a
    496 * standalone thread for which no one will call kthread_stop(), or
    497 * return when 'kthread_should_stop()' is true (which means
    498 * kthread_stop() has been called).  The return value should be zero
    499 * or a negative error number; it will be passed to kthread_stop().
    500 *
    501 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
    502 */
    503struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
    504					   void *data, int node,
    505					   const char namefmt[],
    506					   ...)
    507{
    508	struct task_struct *task;
    509	va_list args;
    510
    511	va_start(args, namefmt);
    512	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
    513	va_end(args);
    514
    515	return task;
    516}
    517EXPORT_SYMBOL(kthread_create_on_node);
    518
    519static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
    520{
    521	unsigned long flags;
    522
    523	if (!wait_task_inactive(p, state)) {
    524		WARN_ON(1);
    525		return;
    526	}
    527
    528	/* It's safe because the task is inactive. */
    529	raw_spin_lock_irqsave(&p->pi_lock, flags);
    530	do_set_cpus_allowed(p, mask);
    531	p->flags |= PF_NO_SETAFFINITY;
    532	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
    533}
    534
    535static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
    536{
    537	__kthread_bind_mask(p, cpumask_of(cpu), state);
    538}
    539
    540void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
    541{
    542	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
    543}
    544
    545/**
    546 * kthread_bind - bind a just-created kthread to a cpu.
    547 * @p: thread created by kthread_create().
    548 * @cpu: cpu (might not be online, must be possible) for @k to run on.
    549 *
    550 * Description: This function is equivalent to set_cpus_allowed(),
    551 * except that @cpu doesn't need to be online, and the thread must be
    552 * stopped (i.e., just returned from kthread_create()).
    553 */
    554void kthread_bind(struct task_struct *p, unsigned int cpu)
    555{
    556	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
    557}
    558EXPORT_SYMBOL(kthread_bind);
    559
    560/**
    561 * kthread_create_on_cpu - Create a cpu bound kthread
    562 * @threadfn: the function to run until signal_pending(current).
    563 * @data: data ptr for @threadfn.
    564 * @cpu: The cpu on which the thread should be bound,
    565 * @namefmt: printf-style name for the thread. Format is restricted
    566 *	     to "name.*%u". Code fills in cpu number.
    567 *
    568 * Description: This helper function creates and names a kernel thread
    569 */
    570struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
    571					  void *data, unsigned int cpu,
    572					  const char *namefmt)
    573{
    574	struct task_struct *p;
    575
    576	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
    577				   cpu);
    578	if (IS_ERR(p))
    579		return p;
    580	kthread_bind(p, cpu);
    581	/* CPU hotplug need to bind once again when unparking the thread. */
    582	to_kthread(p)->cpu = cpu;
    583	return p;
    584}
    585EXPORT_SYMBOL(kthread_create_on_cpu);
    586
    587void kthread_set_per_cpu(struct task_struct *k, int cpu)
    588{
    589	struct kthread *kthread = to_kthread(k);
    590	if (!kthread)
    591		return;
    592
    593	WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
    594
    595	if (cpu < 0) {
    596		clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
    597		return;
    598	}
    599
    600	kthread->cpu = cpu;
    601	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
    602}
    603
    604bool kthread_is_per_cpu(struct task_struct *p)
    605{
    606	struct kthread *kthread = __to_kthread(p);
    607	if (!kthread)
    608		return false;
    609
    610	return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
    611}
    612
    613/**
    614 * kthread_unpark - unpark a thread created by kthread_create().
    615 * @k:		thread created by kthread_create().
    616 *
    617 * Sets kthread_should_park() for @k to return false, wakes it, and
    618 * waits for it to return. If the thread is marked percpu then its
    619 * bound to the cpu again.
    620 */
    621void kthread_unpark(struct task_struct *k)
    622{
    623	struct kthread *kthread = to_kthread(k);
    624
    625	/*
    626	 * Newly created kthread was parked when the CPU was offline.
    627	 * The binding was lost and we need to set it again.
    628	 */
    629	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
    630		__kthread_bind(k, kthread->cpu, TASK_PARKED);
    631
    632	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
    633	/*
    634	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
    635	 */
    636	wake_up_state(k, TASK_PARKED);
    637}
    638EXPORT_SYMBOL_GPL(kthread_unpark);
    639
    640/**
    641 * kthread_park - park a thread created by kthread_create().
    642 * @k: thread created by kthread_create().
    643 *
    644 * Sets kthread_should_park() for @k to return true, wakes it, and
    645 * waits for it to return. This can also be called after kthread_create()
    646 * instead of calling wake_up_process(): the thread will park without
    647 * calling threadfn().
    648 *
    649 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
    650 * If called by the kthread itself just the park bit is set.
    651 */
    652int kthread_park(struct task_struct *k)
    653{
    654	struct kthread *kthread = to_kthread(k);
    655
    656	if (WARN_ON(k->flags & PF_EXITING))
    657		return -ENOSYS;
    658
    659	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
    660		return -EBUSY;
    661
    662	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
    663	if (k != current) {
    664		wake_up_process(k);
    665		/*
    666		 * Wait for __kthread_parkme() to complete(), this means we
    667		 * _will_ have TASK_PARKED and are about to call schedule().
    668		 */
    669		wait_for_completion(&kthread->parked);
    670		/*
    671		 * Now wait for that schedule() to complete and the task to
    672		 * get scheduled out.
    673		 */
    674		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
    675	}
    676
    677	return 0;
    678}
    679EXPORT_SYMBOL_GPL(kthread_park);
    680
    681/**
    682 * kthread_stop - stop a thread created by kthread_create().
    683 * @k: thread created by kthread_create().
    684 *
    685 * Sets kthread_should_stop() for @k to return true, wakes it, and
    686 * waits for it to exit. This can also be called after kthread_create()
    687 * instead of calling wake_up_process(): the thread will exit without
    688 * calling threadfn().
    689 *
    690 * If threadfn() may call kthread_exit() itself, the caller must ensure
    691 * task_struct can't go away.
    692 *
    693 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
    694 * was never called.
    695 */
    696int kthread_stop(struct task_struct *k)
    697{
    698	struct kthread *kthread;
    699	int ret;
    700
    701	trace_sched_kthread_stop(k);
    702
    703	get_task_struct(k);
    704	kthread = to_kthread(k);
    705	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
    706	kthread_unpark(k);
    707	wake_up_process(k);
    708	wait_for_completion(&kthread->exited);
    709	ret = kthread->result;
    710	put_task_struct(k);
    711
    712	trace_sched_kthread_stop_ret(ret);
    713	return ret;
    714}
    715EXPORT_SYMBOL(kthread_stop);
    716
    717int kthreadd(void *unused)
    718{
    719	struct task_struct *tsk = current;
    720
    721	/* Setup a clean context for our children to inherit. */
    722	set_task_comm(tsk, "kthreadd");
    723	ignore_signals(tsk);
    724	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
    725	set_mems_allowed(node_states[N_MEMORY]);
    726
    727	current->flags |= PF_NOFREEZE;
    728	cgroup_init_kthreadd();
    729
    730	for (;;) {
    731		set_current_state(TASK_INTERRUPTIBLE);
    732		if (list_empty(&kthread_create_list))
    733			schedule();
    734		__set_current_state(TASK_RUNNING);
    735
    736		spin_lock(&kthread_create_lock);
    737		while (!list_empty(&kthread_create_list)) {
    738			struct kthread_create_info *create;
    739
    740			create = list_entry(kthread_create_list.next,
    741					    struct kthread_create_info, list);
    742			list_del_init(&create->list);
    743			spin_unlock(&kthread_create_lock);
    744
    745			create_kthread(create);
    746
    747			spin_lock(&kthread_create_lock);
    748		}
    749		spin_unlock(&kthread_create_lock);
    750	}
    751
    752	return 0;
    753}
    754
    755void __kthread_init_worker(struct kthread_worker *worker,
    756				const char *name,
    757				struct lock_class_key *key)
    758{
    759	memset(worker, 0, sizeof(struct kthread_worker));
    760	raw_spin_lock_init(&worker->lock);
    761	lockdep_set_class_and_name(&worker->lock, key, name);
    762	INIT_LIST_HEAD(&worker->work_list);
    763	INIT_LIST_HEAD(&worker->delayed_work_list);
    764}
    765EXPORT_SYMBOL_GPL(__kthread_init_worker);
    766
    767/**
    768 * kthread_worker_fn - kthread function to process kthread_worker
    769 * @worker_ptr: pointer to initialized kthread_worker
    770 *
    771 * This function implements the main cycle of kthread worker. It processes
    772 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
    773 * is empty.
    774 *
    775 * The works are not allowed to keep any locks, disable preemption or interrupts
    776 * when they finish. There is defined a safe point for freezing when one work
    777 * finishes and before a new one is started.
    778 *
    779 * Also the works must not be handled by more than one worker at the same time,
    780 * see also kthread_queue_work().
    781 */
    782int kthread_worker_fn(void *worker_ptr)
    783{
    784	struct kthread_worker *worker = worker_ptr;
    785	struct kthread_work *work;
    786
    787	/*
    788	 * FIXME: Update the check and remove the assignment when all kthread
    789	 * worker users are created using kthread_create_worker*() functions.
    790	 */
    791	WARN_ON(worker->task && worker->task != current);
    792	worker->task = current;
    793
    794	if (worker->flags & KTW_FREEZABLE)
    795		set_freezable();
    796
    797repeat:
    798	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
    799
    800	if (kthread_should_stop()) {
    801		__set_current_state(TASK_RUNNING);
    802		raw_spin_lock_irq(&worker->lock);
    803		worker->task = NULL;
    804		raw_spin_unlock_irq(&worker->lock);
    805		return 0;
    806	}
    807
    808	work = NULL;
    809	raw_spin_lock_irq(&worker->lock);
    810	if (!list_empty(&worker->work_list)) {
    811		work = list_first_entry(&worker->work_list,
    812					struct kthread_work, node);
    813		list_del_init(&work->node);
    814	}
    815	worker->current_work = work;
    816	raw_spin_unlock_irq(&worker->lock);
    817
    818	if (work) {
    819		kthread_work_func_t func = work->func;
    820		__set_current_state(TASK_RUNNING);
    821		trace_sched_kthread_work_execute_start(work);
    822		work->func(work);
    823		/*
    824		 * Avoid dereferencing work after this point.  The trace
    825		 * event only cares about the address.
    826		 */
    827		trace_sched_kthread_work_execute_end(work, func);
    828	} else if (!freezing(current))
    829		schedule();
    830
    831	try_to_freeze();
    832	cond_resched();
    833	goto repeat;
    834}
    835EXPORT_SYMBOL_GPL(kthread_worker_fn);
    836
    837static __printf(3, 0) struct kthread_worker *
    838__kthread_create_worker(int cpu, unsigned int flags,
    839			const char namefmt[], va_list args)
    840{
    841	struct kthread_worker *worker;
    842	struct task_struct *task;
    843	int node = NUMA_NO_NODE;
    844
    845	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
    846	if (!worker)
    847		return ERR_PTR(-ENOMEM);
    848
    849	kthread_init_worker(worker);
    850
    851	if (cpu >= 0)
    852		node = cpu_to_node(cpu);
    853
    854	task = __kthread_create_on_node(kthread_worker_fn, worker,
    855						node, namefmt, args);
    856	if (IS_ERR(task))
    857		goto fail_task;
    858
    859	if (cpu >= 0)
    860		kthread_bind(task, cpu);
    861
    862	worker->flags = flags;
    863	worker->task = task;
    864	wake_up_process(task);
    865	return worker;
    866
    867fail_task:
    868	kfree(worker);
    869	return ERR_CAST(task);
    870}
    871
    872/**
    873 * kthread_create_worker - create a kthread worker
    874 * @flags: flags modifying the default behavior of the worker
    875 * @namefmt: printf-style name for the kthread worker (task).
    876 *
    877 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
    878 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
    879 * when the caller was killed by a fatal signal.
    880 */
    881struct kthread_worker *
    882kthread_create_worker(unsigned int flags, const char namefmt[], ...)
    883{
    884	struct kthread_worker *worker;
    885	va_list args;
    886
    887	va_start(args, namefmt);
    888	worker = __kthread_create_worker(-1, flags, namefmt, args);
    889	va_end(args);
    890
    891	return worker;
    892}
    893EXPORT_SYMBOL(kthread_create_worker);
    894
    895/**
    896 * kthread_create_worker_on_cpu - create a kthread worker and bind it
    897 *	to a given CPU and the associated NUMA node.
    898 * @cpu: CPU number
    899 * @flags: flags modifying the default behavior of the worker
    900 * @namefmt: printf-style name for the kthread worker (task).
    901 *
    902 * Use a valid CPU number if you want to bind the kthread worker
    903 * to the given CPU and the associated NUMA node.
    904 *
    905 * A good practice is to add the cpu number also into the worker name.
    906 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
    907 *
    908 * CPU hotplug:
    909 * The kthread worker API is simple and generic. It just provides a way
    910 * to create, use, and destroy workers.
    911 *
    912 * It is up to the API user how to handle CPU hotplug. They have to decide
    913 * how to handle pending work items, prevent queuing new ones, and
    914 * restore the functionality when the CPU goes off and on. There are a
    915 * few catches:
    916 *
    917 *    - CPU affinity gets lost when it is scheduled on an offline CPU.
    918 *
    919 *    - The worker might not exist when the CPU was off when the user
    920 *      created the workers.
    921 *
    922 * Good practice is to implement two CPU hotplug callbacks and to
    923 * destroy/create the worker when the CPU goes down/up.
    924 *
    925 * Return:
    926 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
    927 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
    928 * when the caller was killed by a fatal signal.
    929 */
    930struct kthread_worker *
    931kthread_create_worker_on_cpu(int cpu, unsigned int flags,
    932			     const char namefmt[], ...)
    933{
    934	struct kthread_worker *worker;
    935	va_list args;
    936
    937	va_start(args, namefmt);
    938	worker = __kthread_create_worker(cpu, flags, namefmt, args);
    939	va_end(args);
    940
    941	return worker;
    942}
    943EXPORT_SYMBOL(kthread_create_worker_on_cpu);
    944
    945/*
    946 * Returns true when the work could not be queued at the moment.
    947 * It happens when it is already pending in a worker list
    948 * or when it is being cancelled.
    949 */
    950static inline bool queuing_blocked(struct kthread_worker *worker,
    951				   struct kthread_work *work)
    952{
    953	lockdep_assert_held(&worker->lock);
    954
    955	return !list_empty(&work->node) || work->canceling;
    956}
    957
    958static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
    959					     struct kthread_work *work)
    960{
    961	lockdep_assert_held(&worker->lock);
    962	WARN_ON_ONCE(!list_empty(&work->node));
    963	/* Do not use a work with >1 worker, see kthread_queue_work() */
    964	WARN_ON_ONCE(work->worker && work->worker != worker);
    965}
    966
    967/* insert @work before @pos in @worker */
    968static void kthread_insert_work(struct kthread_worker *worker,
    969				struct kthread_work *work,
    970				struct list_head *pos)
    971{
    972	kthread_insert_work_sanity_check(worker, work);
    973
    974	trace_sched_kthread_work_queue_work(worker, work);
    975
    976	list_add_tail(&work->node, pos);
    977	work->worker = worker;
    978	if (!worker->current_work && likely(worker->task))
    979		wake_up_process(worker->task);
    980}
    981
    982/**
    983 * kthread_queue_work - queue a kthread_work
    984 * @worker: target kthread_worker
    985 * @work: kthread_work to queue
    986 *
    987 * Queue @work to work processor @task for async execution.  @task
    988 * must have been created with kthread_worker_create().  Returns %true
    989 * if @work was successfully queued, %false if it was already pending.
    990 *
    991 * Reinitialize the work if it needs to be used by another worker.
    992 * For example, when the worker was stopped and started again.
    993 */
    994bool kthread_queue_work(struct kthread_worker *worker,
    995			struct kthread_work *work)
    996{
    997	bool ret = false;
    998	unsigned long flags;
    999
   1000	raw_spin_lock_irqsave(&worker->lock, flags);
   1001	if (!queuing_blocked(worker, work)) {
   1002		kthread_insert_work(worker, work, &worker->work_list);
   1003		ret = true;
   1004	}
   1005	raw_spin_unlock_irqrestore(&worker->lock, flags);
   1006	return ret;
   1007}
   1008EXPORT_SYMBOL_GPL(kthread_queue_work);
   1009
   1010/**
   1011 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
   1012 *	delayed work when the timer expires.
   1013 * @t: pointer to the expired timer
   1014 *
   1015 * The format of the function is defined by struct timer_list.
   1016 * It should have been called from irqsafe timer with irq already off.
   1017 */
   1018void kthread_delayed_work_timer_fn(struct timer_list *t)
   1019{
   1020	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
   1021	struct kthread_work *work = &dwork->work;
   1022	struct kthread_worker *worker = work->worker;
   1023	unsigned long flags;
   1024
   1025	/*
   1026	 * This might happen when a pending work is reinitialized.
   1027	 * It means that it is used a wrong way.
   1028	 */
   1029	if (WARN_ON_ONCE(!worker))
   1030		return;
   1031
   1032	raw_spin_lock_irqsave(&worker->lock, flags);
   1033	/* Work must not be used with >1 worker, see kthread_queue_work(). */
   1034	WARN_ON_ONCE(work->worker != worker);
   1035
   1036	/* Move the work from worker->delayed_work_list. */
   1037	WARN_ON_ONCE(list_empty(&work->node));
   1038	list_del_init(&work->node);
   1039	if (!work->canceling)
   1040		kthread_insert_work(worker, work, &worker->work_list);
   1041
   1042	raw_spin_unlock_irqrestore(&worker->lock, flags);
   1043}
   1044EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
   1045
   1046static void __kthread_queue_delayed_work(struct kthread_worker *worker,
   1047					 struct kthread_delayed_work *dwork,
   1048					 unsigned long delay)
   1049{
   1050	struct timer_list *timer = &dwork->timer;
   1051	struct kthread_work *work = &dwork->work;
   1052
   1053	WARN_ON_FUNCTION_MISMATCH(timer->function,
   1054				  kthread_delayed_work_timer_fn);
   1055
   1056	/*
   1057	 * If @delay is 0, queue @dwork->work immediately.  This is for
   1058	 * both optimization and correctness.  The earliest @timer can
   1059	 * expire is on the closest next tick and delayed_work users depend
   1060	 * on that there's no such delay when @delay is 0.
   1061	 */
   1062	if (!delay) {
   1063		kthread_insert_work(worker, work, &worker->work_list);
   1064		return;
   1065	}
   1066
   1067	/* Be paranoid and try to detect possible races already now. */
   1068	kthread_insert_work_sanity_check(worker, work);
   1069
   1070	list_add(&work->node, &worker->delayed_work_list);
   1071	work->worker = worker;
   1072	timer->expires = jiffies + delay;
   1073	add_timer(timer);
   1074}
   1075
   1076/**
   1077 * kthread_queue_delayed_work - queue the associated kthread work
   1078 *	after a delay.
   1079 * @worker: target kthread_worker
   1080 * @dwork: kthread_delayed_work to queue
   1081 * @delay: number of jiffies to wait before queuing
   1082 *
   1083 * If the work has not been pending it starts a timer that will queue
   1084 * the work after the given @delay. If @delay is zero, it queues the
   1085 * work immediately.
   1086 *
   1087 * Return: %false if the @work has already been pending. It means that
   1088 * either the timer was running or the work was queued. It returns %true
   1089 * otherwise.
   1090 */
   1091bool kthread_queue_delayed_work(struct kthread_worker *worker,
   1092				struct kthread_delayed_work *dwork,
   1093				unsigned long delay)
   1094{
   1095	struct kthread_work *work = &dwork->work;
   1096	unsigned long flags;
   1097	bool ret = false;
   1098
   1099	raw_spin_lock_irqsave(&worker->lock, flags);
   1100
   1101	if (!queuing_blocked(worker, work)) {
   1102		__kthread_queue_delayed_work(worker, dwork, delay);
   1103		ret = true;
   1104	}
   1105
   1106	raw_spin_unlock_irqrestore(&worker->lock, flags);
   1107	return ret;
   1108}
   1109EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
   1110
   1111struct kthread_flush_work {
   1112	struct kthread_work	work;
   1113	struct completion	done;
   1114};
   1115
   1116static void kthread_flush_work_fn(struct kthread_work *work)
   1117{
   1118	struct kthread_flush_work *fwork =
   1119		container_of(work, struct kthread_flush_work, work);
   1120	complete(&fwork->done);
   1121}
   1122
   1123/**
   1124 * kthread_flush_work - flush a kthread_work
   1125 * @work: work to flush
   1126 *
   1127 * If @work is queued or executing, wait for it to finish execution.
   1128 */
   1129void kthread_flush_work(struct kthread_work *work)
   1130{
   1131	struct kthread_flush_work fwork = {
   1132		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
   1133		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
   1134	};
   1135	struct kthread_worker *worker;
   1136	bool noop = false;
   1137
   1138	worker = work->worker;
   1139	if (!worker)
   1140		return;
   1141
   1142	raw_spin_lock_irq(&worker->lock);
   1143	/* Work must not be used with >1 worker, see kthread_queue_work(). */
   1144	WARN_ON_ONCE(work->worker != worker);
   1145
   1146	if (!list_empty(&work->node))
   1147		kthread_insert_work(worker, &fwork.work, work->node.next);
   1148	else if (worker->current_work == work)
   1149		kthread_insert_work(worker, &fwork.work,
   1150				    worker->work_list.next);
   1151	else
   1152		noop = true;
   1153
   1154	raw_spin_unlock_irq(&worker->lock);
   1155
   1156	if (!noop)
   1157		wait_for_completion(&fwork.done);
   1158}
   1159EXPORT_SYMBOL_GPL(kthread_flush_work);
   1160
   1161/*
   1162 * Make sure that the timer is neither set nor running and could
   1163 * not manipulate the work list_head any longer.
   1164 *
   1165 * The function is called under worker->lock. The lock is temporary
   1166 * released but the timer can't be set again in the meantime.
   1167 */
   1168static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
   1169					      unsigned long *flags)
   1170{
   1171	struct kthread_delayed_work *dwork =
   1172		container_of(work, struct kthread_delayed_work, work);
   1173	struct kthread_worker *worker = work->worker;
   1174
   1175	/*
   1176	 * del_timer_sync() must be called to make sure that the timer
   1177	 * callback is not running. The lock must be temporary released
   1178	 * to avoid a deadlock with the callback. In the meantime,
   1179	 * any queuing is blocked by setting the canceling counter.
   1180	 */
   1181	work->canceling++;
   1182	raw_spin_unlock_irqrestore(&worker->lock, *flags);
   1183	del_timer_sync(&dwork->timer);
   1184	raw_spin_lock_irqsave(&worker->lock, *flags);
   1185	work->canceling--;
   1186}
   1187
   1188/*
   1189 * This function removes the work from the worker queue.
   1190 *
   1191 * It is called under worker->lock. The caller must make sure that
   1192 * the timer used by delayed work is not running, e.g. by calling
   1193 * kthread_cancel_delayed_work_timer().
   1194 *
   1195 * The work might still be in use when this function finishes. See the
   1196 * current_work proceed by the worker.
   1197 *
   1198 * Return: %true if @work was pending and successfully canceled,
   1199 *	%false if @work was not pending
   1200 */
   1201static bool __kthread_cancel_work(struct kthread_work *work)
   1202{
   1203	/*
   1204	 * Try to remove the work from a worker list. It might either
   1205	 * be from worker->work_list or from worker->delayed_work_list.
   1206	 */
   1207	if (!list_empty(&work->node)) {
   1208		list_del_init(&work->node);
   1209		return true;
   1210	}
   1211
   1212	return false;
   1213}
   1214
   1215/**
   1216 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
   1217 * @worker: kthread worker to use
   1218 * @dwork: kthread delayed work to queue
   1219 * @delay: number of jiffies to wait before queuing
   1220 *
   1221 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
   1222 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
   1223 * @work is guaranteed to be queued immediately.
   1224 *
   1225 * Return: %false if @dwork was idle and queued, %true otherwise.
   1226 *
   1227 * A special case is when the work is being canceled in parallel.
   1228 * It might be caused either by the real kthread_cancel_delayed_work_sync()
   1229 * or yet another kthread_mod_delayed_work() call. We let the other command
   1230 * win and return %true here. The return value can be used for reference
   1231 * counting and the number of queued works stays the same. Anyway, the caller
   1232 * is supposed to synchronize these operations a reasonable way.
   1233 *
   1234 * This function is safe to call from any context including IRQ handler.
   1235 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
   1236 * for details.
   1237 */
   1238bool kthread_mod_delayed_work(struct kthread_worker *worker,
   1239			      struct kthread_delayed_work *dwork,
   1240			      unsigned long delay)
   1241{
   1242	struct kthread_work *work = &dwork->work;
   1243	unsigned long flags;
   1244	int ret;
   1245
   1246	raw_spin_lock_irqsave(&worker->lock, flags);
   1247
   1248	/* Do not bother with canceling when never queued. */
   1249	if (!work->worker) {
   1250		ret = false;
   1251		goto fast_queue;
   1252	}
   1253
   1254	/* Work must not be used with >1 worker, see kthread_queue_work() */
   1255	WARN_ON_ONCE(work->worker != worker);
   1256
   1257	/*
   1258	 * Temporary cancel the work but do not fight with another command
   1259	 * that is canceling the work as well.
   1260	 *
   1261	 * It is a bit tricky because of possible races with another
   1262	 * mod_delayed_work() and cancel_delayed_work() callers.
   1263	 *
   1264	 * The timer must be canceled first because worker->lock is released
   1265	 * when doing so. But the work can be removed from the queue (list)
   1266	 * only when it can be queued again so that the return value can
   1267	 * be used for reference counting.
   1268	 */
   1269	kthread_cancel_delayed_work_timer(work, &flags);
   1270	if (work->canceling) {
   1271		/* The number of works in the queue does not change. */
   1272		ret = true;
   1273		goto out;
   1274	}
   1275	ret = __kthread_cancel_work(work);
   1276
   1277fast_queue:
   1278	__kthread_queue_delayed_work(worker, dwork, delay);
   1279out:
   1280	raw_spin_unlock_irqrestore(&worker->lock, flags);
   1281	return ret;
   1282}
   1283EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
   1284
   1285static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
   1286{
   1287	struct kthread_worker *worker = work->worker;
   1288	unsigned long flags;
   1289	int ret = false;
   1290
   1291	if (!worker)
   1292		goto out;
   1293
   1294	raw_spin_lock_irqsave(&worker->lock, flags);
   1295	/* Work must not be used with >1 worker, see kthread_queue_work(). */
   1296	WARN_ON_ONCE(work->worker != worker);
   1297
   1298	if (is_dwork)
   1299		kthread_cancel_delayed_work_timer(work, &flags);
   1300
   1301	ret = __kthread_cancel_work(work);
   1302
   1303	if (worker->current_work != work)
   1304		goto out_fast;
   1305
   1306	/*
   1307	 * The work is in progress and we need to wait with the lock released.
   1308	 * In the meantime, block any queuing by setting the canceling counter.
   1309	 */
   1310	work->canceling++;
   1311	raw_spin_unlock_irqrestore(&worker->lock, flags);
   1312	kthread_flush_work(work);
   1313	raw_spin_lock_irqsave(&worker->lock, flags);
   1314	work->canceling--;
   1315
   1316out_fast:
   1317	raw_spin_unlock_irqrestore(&worker->lock, flags);
   1318out:
   1319	return ret;
   1320}
   1321
   1322/**
   1323 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
   1324 * @work: the kthread work to cancel
   1325 *
   1326 * Cancel @work and wait for its execution to finish.  This function
   1327 * can be used even if the work re-queues itself. On return from this
   1328 * function, @work is guaranteed to be not pending or executing on any CPU.
   1329 *
   1330 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
   1331 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
   1332 *
   1333 * The caller must ensure that the worker on which @work was last
   1334 * queued can't be destroyed before this function returns.
   1335 *
   1336 * Return: %true if @work was pending, %false otherwise.
   1337 */
   1338bool kthread_cancel_work_sync(struct kthread_work *work)
   1339{
   1340	return __kthread_cancel_work_sync(work, false);
   1341}
   1342EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
   1343
   1344/**
   1345 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
   1346 *	wait for it to finish.
   1347 * @dwork: the kthread delayed work to cancel
   1348 *
   1349 * This is kthread_cancel_work_sync() for delayed works.
   1350 *
   1351 * Return: %true if @dwork was pending, %false otherwise.
   1352 */
   1353bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
   1354{
   1355	return __kthread_cancel_work_sync(&dwork->work, true);
   1356}
   1357EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
   1358
   1359/**
   1360 * kthread_flush_worker - flush all current works on a kthread_worker
   1361 * @worker: worker to flush
   1362 *
   1363 * Wait until all currently executing or pending works on @worker are
   1364 * finished.
   1365 */
   1366void kthread_flush_worker(struct kthread_worker *worker)
   1367{
   1368	struct kthread_flush_work fwork = {
   1369		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
   1370		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
   1371	};
   1372
   1373	kthread_queue_work(worker, &fwork.work);
   1374	wait_for_completion(&fwork.done);
   1375}
   1376EXPORT_SYMBOL_GPL(kthread_flush_worker);
   1377
   1378/**
   1379 * kthread_destroy_worker - destroy a kthread worker
   1380 * @worker: worker to be destroyed
   1381 *
   1382 * Flush and destroy @worker.  The simple flush is enough because the kthread
   1383 * worker API is used only in trivial scenarios.  There are no multi-step state
   1384 * machines needed.
   1385 */
   1386void kthread_destroy_worker(struct kthread_worker *worker)
   1387{
   1388	struct task_struct *task;
   1389
   1390	task = worker->task;
   1391	if (WARN_ON(!task))
   1392		return;
   1393
   1394	kthread_flush_worker(worker);
   1395	kthread_stop(task);
   1396	WARN_ON(!list_empty(&worker->work_list));
   1397	kfree(worker);
   1398}
   1399EXPORT_SYMBOL(kthread_destroy_worker);
   1400
   1401/**
   1402 * kthread_use_mm - make the calling kthread operate on an address space
   1403 * @mm: address space to operate on
   1404 */
   1405void kthread_use_mm(struct mm_struct *mm)
   1406{
   1407	struct mm_struct *active_mm;
   1408	struct task_struct *tsk = current;
   1409
   1410	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
   1411	WARN_ON_ONCE(tsk->mm);
   1412
   1413	task_lock(tsk);
   1414	/* Hold off tlb flush IPIs while switching mm's */
   1415	local_irq_disable();
   1416	active_mm = tsk->active_mm;
   1417	if (active_mm != mm) {
   1418		mmgrab(mm);
   1419		tsk->active_mm = mm;
   1420	}
   1421	tsk->mm = mm;
   1422	membarrier_update_current_mm(mm);
   1423	switch_mm_irqs_off(active_mm, mm, tsk);
   1424	local_irq_enable();
   1425	task_unlock(tsk);
   1426#ifdef finish_arch_post_lock_switch
   1427	finish_arch_post_lock_switch();
   1428#endif
   1429
   1430	/*
   1431	 * When a kthread starts operating on an address space, the loop
   1432	 * in membarrier_{private,global}_expedited() may not observe
   1433	 * that tsk->mm, and not issue an IPI. Membarrier requires a
   1434	 * memory barrier after storing to tsk->mm, before accessing
   1435	 * user-space memory. A full memory barrier for membarrier
   1436	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
   1437	 * mmdrop(), or explicitly with smp_mb().
   1438	 */
   1439	if (active_mm != mm)
   1440		mmdrop(active_mm);
   1441	else
   1442		smp_mb();
   1443}
   1444EXPORT_SYMBOL_GPL(kthread_use_mm);
   1445
   1446/**
   1447 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
   1448 * @mm: address space to operate on
   1449 */
   1450void kthread_unuse_mm(struct mm_struct *mm)
   1451{
   1452	struct task_struct *tsk = current;
   1453
   1454	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
   1455	WARN_ON_ONCE(!tsk->mm);
   1456
   1457	task_lock(tsk);
   1458	/*
   1459	 * When a kthread stops operating on an address space, the loop
   1460	 * in membarrier_{private,global}_expedited() may not observe
   1461	 * that tsk->mm, and not issue an IPI. Membarrier requires a
   1462	 * memory barrier after accessing user-space memory, before
   1463	 * clearing tsk->mm.
   1464	 */
   1465	smp_mb__after_spinlock();
   1466	sync_mm_rss(mm);
   1467	local_irq_disable();
   1468	tsk->mm = NULL;
   1469	membarrier_update_current_mm(NULL);
   1470	/* active_mm is still 'mm' */
   1471	enter_lazy_tlb(mm, tsk);
   1472	local_irq_enable();
   1473	task_unlock(tsk);
   1474}
   1475EXPORT_SYMBOL_GPL(kthread_unuse_mm);
   1476
   1477#ifdef CONFIG_BLK_CGROUP
   1478/**
   1479 * kthread_associate_blkcg - associate blkcg to current kthread
   1480 * @css: the cgroup info
   1481 *
   1482 * Current thread must be a kthread. The thread is running jobs on behalf of
   1483 * other threads. In some cases, we expect the jobs attach cgroup info of
   1484 * original threads instead of that of current thread. This function stores
   1485 * original thread's cgroup info in current kthread context for later
   1486 * retrieval.
   1487 */
   1488void kthread_associate_blkcg(struct cgroup_subsys_state *css)
   1489{
   1490	struct kthread *kthread;
   1491
   1492	if (!(current->flags & PF_KTHREAD))
   1493		return;
   1494	kthread = to_kthread(current);
   1495	if (!kthread)
   1496		return;
   1497
   1498	if (kthread->blkcg_css) {
   1499		css_put(kthread->blkcg_css);
   1500		kthread->blkcg_css = NULL;
   1501	}
   1502	if (css) {
   1503		css_get(css);
   1504		kthread->blkcg_css = css;
   1505	}
   1506}
   1507EXPORT_SYMBOL(kthread_associate_blkcg);
   1508
   1509/**
   1510 * kthread_blkcg - get associated blkcg css of current kthread
   1511 *
   1512 * Current thread must be a kthread.
   1513 */
   1514struct cgroup_subsys_state *kthread_blkcg(void)
   1515{
   1516	struct kthread *kthread;
   1517
   1518	if (current->flags & PF_KTHREAD) {
   1519		kthread = to_kthread(current);
   1520		if (kthread)
   1521			return kthread->blkcg_css;
   1522	}
   1523	return NULL;
   1524}
   1525#endif