summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2020-06-06 10:45:37 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2020-06-06 10:45:37 -0700
commit8dd06ef34b6e2f41b29fbf5fc1663780f2524285 (patch)
treed59152f37ea7b36c3cf1778e20b85239f89c4e77 /kernel/workqueue.c
parent642aa86eaf8f1e6fe894f20fd7f12f0db52ee03c (diff)
parentd0ac7079d5fcaaaf3eb99c197a08ac1b399754f4 (diff)
downloadcachepc-linux-8dd06ef34b6e2f41b29fbf5fc1663780f2524285.tar.gz
cachepc-linux-8dd06ef34b6e2f41b29fbf5fc1663780f2524285.zip
Merge branch 'next' into for-linus
Prepare input updates for 5.8 merge window.
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index cfc923558e04..4e01c448b4b4 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1411,14 +1411,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
return;
rcu_read_lock();
retry:
- if (req_cpu == WORK_CPU_UNBOUND)
- cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-
/* pwq which will be used unless @work is executing elsewhere */
- if (!(wq->flags & WQ_UNBOUND))
- pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
- else
+ if (wq->flags & WQ_UNBOUND) {
+ if (req_cpu == WORK_CPU_UNBOUND)
+ cpu = wq_select_unbound_cpu(raw_smp_processor_id());
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
+ } else {
+ if (req_cpu == WORK_CPU_UNBOUND)
+ cpu = raw_smp_processor_id();
+ pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+ }
/*
* If @work was previously on a different pool, it might still be
@@ -2266,7 +2268,7 @@ __acquires(&pool->lock)
* While we must be careful to not use "work" after this, the trace
* point will only record its address.
*/
- trace_workqueue_execute_end(work);
+ trace_workqueue_execute_end(work, worker->current_func);
lock_map_release(&lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
@@ -2280,7 +2282,7 @@ __acquires(&pool->lock)
}
/*
- * The following prevents a kworker from hogging CPU on !PREEMPT
+ * The following prevents a kworker from hogging CPU on !PREEMPTION
* kernels, where a requeueing work item waiting for something to
* happen could deadlock with stop_machine as such work item could
* indefinitely requeue itself while all other CPUs are trapped in