--- /dev/null
+From lists@kaiser.cx Tue Feb 3 14:46:37 2015
+From: Martin Kaiser <lists@kaiser.cx>
+Date: Fri, 30 Jan 2015 15:01:29 +0100
+Subject: gpio: squelch a compiler warning
+To: gregkh@linux-foundation.org
+Cc: linux-kernel@vger.kernel.org
+Message-ID: <20150130140129.GA19503@viti.kaiser.cx>
+
+From: Martin Kaiser <lists@kaiser.cx>
+
+drivers/gpio/gpiolib-of.c: In function 'of_gpiochip_find_and_xlate':
+drivers/gpio/gpiolib-of.c:51:21: warning: assignment makes integer from
+pointer without a cast [enabled by default]
+ gg_data->out_gpio = ERR_PTR(ret);
+ ^
+this was introduced in d1c3449160df60fac4abb56f0ba0a3784305e43e
+
+the upstream kernel changed the type of out_gpio from int to struct gpio_desc *
+as part of a larger refactoring that wasn't backported
+
+Signed-off-by: Martin Kaiser <martin@kaiser.cx>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpiolib-of.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -48,7 +48,7 @@ static int of_gpiochip_find_and_xlate(st
+ * Return true to stop looking and return the translation
+ * error via out_gpio
+ */
+- gg_data->out_gpio = ERR_PTR(ret);
++ gg_data->out_gpio = ret;
+ return true;
+ }
+
--- /dev/null
+From 29187a9eeaf362d8422e62e17a22a6e115277a49 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 16 Jan 2015 14:21:16 -0500
+Subject: workqueue: fix subtle pool management issue which can stall whole worker_pool
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 29187a9eeaf362d8422e62e17a22a6e115277a49 upstream.
+
+A worker_pool's forward progress is guaranteed by the fact that the
+last idle worker assumes the manager role to create more workers and
+summon the rescuers if creating workers doesn't succeed in timely
+manner before proceeding to execute work items.
+
+This manager role is implemented in manage_workers(), which indicates
+whether the worker may proceed to work item execution with its return
+value. This is necessary because multiple workers may contend for the
+manager role, and, if there already is a manager, others should
+proceed to work item execution.
+
+Unfortunately, the function also indicates that the worker may proceed
+to work item execution if need_to_create_worker() is false at the head
+of the function. need_to_create_worker() tests the following
+conditions.
+
+ pending work items && !nr_running && !nr_idle
+
+The first and third conditions are protected by pool->lock and thus
+won't change while holding pool->lock; however, nr_running can change
+asynchronously as other workers block and resume and while it's likely
+to be zero, as someone woke this worker up in the first place, some
+other workers could have become runnable inbetween making it non-zero.
+
+If this happens, manage_worker() could return false even with zero
+nr_idle making the worker, the last idle one, proceed to execute work
+items. If then all workers of the pool end up blocking on a resource
+which can only be released by a work item which is pending on that
+pool, the whole pool can deadlock as there's no one to create more
+workers or summon the rescuers.
+
+This patch fixes the problem by removing the early exit condition from
+maybe_create_worker() and making manage_workers() return false iff
+there's already another manager, which ensures that the last worker
+doesn't start executing work items.
+
+We can leave the early exit condition alone and just ignore the return
+value but the only reason it was put there is because the
+manage_workers() used to perform both creations and destructions of
+workers and thus the function may be invoked while the pool is trying
+to reduce the number of workers. Now that manage_workers() is called
+only when more workers are needed, the only case this early exit
+condition is triggered is rare race conditions rendering it pointless.
+
+Tested with simulated workload and modified workqueue code which
+trigger the pool deadlock reliably without this patch.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Eric Sandeen <sandeen@sandeen.net>
+Link: http://lkml.kernel.org/g/54B019F4.8030009@sandeen.net
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ kernel/workqueue.c | 39 +++++++++++++--------------------------
+ 1 file changed, 13 insertions(+), 26 deletions(-)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1934,17 +1934,13 @@ static void pool_mayday_timeout(unsigned
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times. Does GFP_KERNEL allocations. Called only from
+ * manager.
+- *
+- * RETURNS:
+- * %false if no action was taken and pool->lock stayed locked, %true
+- * otherwise.
+ */
+-static bool maybe_create_worker(struct worker_pool *pool)
++static void maybe_create_worker(struct worker_pool *pool)
+ __releases(&pool->lock)
+ __acquires(&pool->lock)
+ {
+ if (!need_to_create_worker(pool))
+- return false;
++ return;
+ restart:
+ spin_unlock_irq(&pool->lock);
+
+@@ -1961,7 +1957,7 @@ restart:
+ start_worker(worker);
+ if (WARN_ON_ONCE(need_to_create_worker(pool)))
+ goto restart;
+- return true;
++ return;
+ }
+
+ if (!need_to_create_worker(pool))
+@@ -1978,7 +1974,7 @@ restart:
+ spin_lock_irq(&pool->lock);
+ if (need_to_create_worker(pool))
+ goto restart;
+- return true;
++ return;
+ }
+
+ /**
+@@ -1991,15 +1987,9 @@ restart:
+ * LOCKING:
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times. Called only from manager.
+- *
+- * RETURNS:
+- * %false if no action was taken and pool->lock stayed locked, %true
+- * otherwise.
+ */
+-static bool maybe_destroy_workers(struct worker_pool *pool)
++static void maybe_destroy_workers(struct worker_pool *pool)
+ {
+- bool ret = false;
+-
+ while (too_many_workers(pool)) {
+ struct worker *worker;
+ unsigned long expires;
+@@ -2013,10 +2003,7 @@ static bool maybe_destroy_workers(struct
+ }
+
+ destroy_worker(worker);
+- ret = true;
+ }
+-
+- return ret;
+ }
+
+ /**
+@@ -2036,13 +2023,14 @@ static bool maybe_destroy_workers(struct
+ * multiple times. Does GFP_KERNEL allocations.
+ *
+ * RETURNS:
+- * spin_lock_irq(pool->lock) which may be released and regrabbed
+- * multiple times. Does GFP_KERNEL allocations.
++ * %false if the pool doesn't need management and the caller can safely
++ * start processing works, %true if management function was performed and
++ * the conditions that the caller verified before calling the function may
++ * no longer be true.
+ */
+ static bool manage_workers(struct worker *worker)
+ {
+ struct worker_pool *pool = worker->pool;
+- bool ret = false;
+
+ /*
+ * Managership is governed by two mutexes - manager_arb and
+@@ -2066,7 +2054,7 @@ static bool manage_workers(struct worker
+ * manager_mutex.
+ */
+ if (!mutex_trylock(&pool->manager_arb))
+- return ret;
++ return false;
+
+ /*
+ * With manager arbitration won, manager_mutex would be free in
+@@ -2076,7 +2064,6 @@ static bool manage_workers(struct worker
+ spin_unlock_irq(&pool->lock);
+ mutex_lock(&pool->manager_mutex);
+ spin_lock_irq(&pool->lock);
+- ret = true;
+ }
+
+ pool->flags &= ~POOL_MANAGE_WORKERS;
+@@ -2085,12 +2072,12 @@ static bool manage_workers(struct worker
+ * Destroy and then create so that may_start_working() is true
+ * on return.
+ */
+- ret |= maybe_destroy_workers(pool);
+- ret |= maybe_create_worker(pool);
++ maybe_destroy_workers(pool);
++ maybe_create_worker(pool);
+
+ mutex_unlock(&pool->manager_mutex);
+ mutex_unlock(&pool->manager_arb);
+- return ret;
++ return true;
+ }
+
+ /**