]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 1 Mar 2013 17:15:38 +0000 (09:15 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 1 Mar 2013 17:15:38 +0000 (09:15 -0800)
added patches:
workqueue-consider-work-function-when-searching-for-busy-work-items.patch

queue-3.8/series
queue-3.8/workqueue-consider-work-function-when-searching-for-busy-work-items.patch [new file with mode: 0644]

index b18d86bee1a0d2fb5c64d33c311253f12d437039..cb76e1cf57775b6ccd3743a33a5ba100ca2f07c7 100644 (file)
@@ -57,3 +57,4 @@ svcrpc-fix-rpc-server-shutdown-races.patch
 hid-add-support-for-sony-rf-receiver-with-usb-product-id-0x0374.patch
 hid-clean-up-quirk-for-sony-rf-receivers.patch
 fuse-don-t-warn-when-nlink-is-zero.patch
+workqueue-consider-work-function-when-searching-for-busy-work-items.patch
diff --git a/queue-3.8/workqueue-consider-work-function-when-searching-for-busy-work-items.patch b/queue-3.8/workqueue-consider-work-function-when-searching-for-busy-work-items.patch
new file mode 100644 (file)
index 0000000..deac34e
--- /dev/null
@@ -0,0 +1,136 @@
+From a2c1c57be8d9fd5b716113c8991d3d702eeacf77 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 18 Dec 2012 10:35:02 -0800
+Subject: workqueue: consider work function when searching for busy work items
+
+From: Tejun Heo <tj@kernel.org>
+
+commit a2c1c57be8d9fd5b716113c8991d3d702eeacf77 upstream.
+
+To avoid executing the same work item concurrenlty, workqueue hashes
+currently busy workers according to their current work items and looks
+up the the table when it wants to execute a new work item.  If there
+already is a worker which is executing the new work item, the new item
+is queued to the found worker so that it gets executed only after the
+current execution finishes.
+
+Unfortunately, a work item may be freed while being executed and thus
+recycled for different purposes.  If it gets recycled for a different
+work item and queued while the previous execution is still in
+progress, workqueue may make the new work item wait for the old one
+although the two aren't really related in any way.
+
+In extreme cases, this false dependency may lead to deadlock although
+it's extremely unlikely given that there aren't too many self-freeing
+work item users and they usually don't wait for other work items.
+
+To alleviate the problem, record the current work function in each
+busy worker and match it together with the work item address in
+find_worker_executing_work().  While this isn't complete, it ensures
+that unrelated work items don't interact with each other and in the
+very unlikely case where a twisted wq user triggers it, it's always
+onto itself making the culprit easy to spot.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Andrey Isakov <andy51@gmx.ru>
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=51701
+Cc: stable@vger.kernel.org
+
+---
+ kernel/workqueue.c |   36 +++++++++++++++++++++++++++++-------
+ 1 file changed, 29 insertions(+), 7 deletions(-)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -138,6 +138,7 @@ struct worker {
+       };
+       struct work_struct      *current_work;  /* L: work being processed */
++      work_func_t             current_func;   /* L: current_work's fn */
+       struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
+       struct list_head        scheduled;      /* L: scheduled works */
+       struct task_struct      *task;          /* I: worker task */
+@@ -910,7 +911,8 @@ static struct worker *__find_worker_exec
+       struct hlist_node *tmp;
+       hlist_for_each_entry(worker, tmp, bwh, hentry)
+-              if (worker->current_work == work)
++              if (worker->current_work == work &&
++                  worker->current_func == work->func)
+                       return worker;
+       return NULL;
+ }
+@@ -920,9 +922,27 @@ static struct worker *__find_worker_exec
+  * @gcwq: gcwq of interest
+  * @work: work to find worker for
+  *
+- * Find a worker which is executing @work on @gcwq.  This function is
+- * identical to __find_worker_executing_work() except that this
+- * function calculates @bwh itself.
++ * Find a worker which is executing @work on @gcwq by searching
++ * @gcwq->busy_hash which is keyed by the address of @work.  For a worker
++ * to match, its current execution should match the address of @work and
++ * its work function.  This is to avoid unwanted dependency between
++ * unrelated work executions through a work item being recycled while still
++ * being executed.
++ *
++ * This is a bit tricky.  A work item may be freed once its execution
++ * starts and nothing prevents the freed area from being recycled for
++ * another work item.  If the same work item address ends up being reused
++ * before the original execution finishes, workqueue will identify the
++ * recycled work item as currently executing and make it wait until the
++ * current execution finishes, introducing an unwanted dependency.
++ *
++ * This function checks the work item address, work function and workqueue
++ * to avoid false positives.  Note that this isn't complete as one may
++ * construct a work function which can introduce dependency onto itself
++ * through a recycled work item.  Well, if somebody wants to shoot oneself
++ * in the foot that badly, there's only so much we can do, and if such
++ * deadlock actually occurs, it should be easy to locate the culprit work
++ * function.
+  *
+  * CONTEXT:
+  * spin_lock_irq(gcwq->lock).
+@@ -2168,7 +2188,6 @@ __acquires(&gcwq->lock)
+       struct global_cwq *gcwq = pool->gcwq;
+       struct hlist_head *bwh = busy_worker_head(gcwq, work);
+       bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
+-      work_func_t f = work->func;
+       int work_color;
+       struct worker *collision;
+ #ifdef CONFIG_LOCKDEP
+@@ -2208,6 +2227,7 @@ __acquires(&gcwq->lock)
+       debug_work_deactivate(work);
+       hlist_add_head(&worker->hentry, bwh);
+       worker->current_work = work;
++      worker->current_func = work->func;
+       worker->current_cwq = cwq;
+       work_color = get_work_color(work);
+@@ -2240,7 +2260,7 @@ __acquires(&gcwq->lock)
+       lock_map_acquire_read(&cwq->wq->lockdep_map);
+       lock_map_acquire(&lockdep_map);
+       trace_workqueue_execute_start(work);
+-      f(work);
++      worker->current_func(work);
+       /*
+        * While we must be careful to not use "work" after this, the trace
+        * point will only record its address.
+@@ -2252,7 +2272,8 @@ __acquires(&gcwq->lock)
+       if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
+               pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
+                      "     last function: %pf\n",
+-                     current->comm, preempt_count(), task_pid_nr(current), f);
++                     current->comm, preempt_count(), task_pid_nr(current),
++                     worker->current_func);
+               debug_show_held_locks(current);
+               dump_stack();
+       }
+@@ -2266,6 +2287,7 @@ __acquires(&gcwq->lock)
+       /* we're done with it, release */
+       hlist_del_init(&worker->hentry);
+       worker->current_work = NULL;
++      worker->current_func = NULL;
+       worker->current_cwq = NULL;
+       cwq_dec_nr_in_flight(cwq, work_color);
+ }