--- /dev/null
+From 4fa3e78be7e985ca814ce2aa0c09cbee404efcf7 Mon Sep 17 00:00:00 2001
+From: Bjorn Helgaas <bhelgaas@google.com>
+Date: Tue, 29 Jan 2013 16:44:27 -0700
+Subject: Driver core: treat unregistered bus_types as having no devices
+
+From: Bjorn Helgaas <bhelgaas@google.com>
+
+commit 4fa3e78be7e985ca814ce2aa0c09cbee404efcf7 upstream.
+
+A bus_type has a list of devices (klist_devices), but the list and the
+subsys_private structure that contains it are not initialized until the
+bus_type is registered with bus_register().
+
+The panic/reboot path has fixups that look up devices in pci_bus_type. If
+we panic before registering pci_bus_type, the bus_type exists but the list
+does not, so mach_reboot_fixups() trips over a null pointer and panics
+again:
+
+ mach_reboot_fixups
+ pci_get_device
+ ..
+ bus_find_device(&pci_bus_type, ...)
+ bus->p is NULL
+
+Joonsoo reported a problem when panicking before PCI was initialized.
+I think this patch should be sufficient to replace the patch he posted
+here: https://lkml.org/lkml/2012/12/28/75 ("[PATCH] x86, reboot: skip
+reboot_fixups in early boot phase")
+
+Reported-by: Joonsoo Kim <js1304@gmail.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/bus.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -289,7 +289,7 @@ int bus_for_each_dev(struct bus_type *bu
+ struct device *dev;
+ int error = 0;
+
+- if (!bus)
++ if (!bus || !bus->p)
+ return -EINVAL;
+
+ klist_iter_init_node(&bus->p->klist_devices, &i,
+@@ -323,7 +323,7 @@ struct device *bus_find_device(struct bu
+ struct klist_iter i;
+ struct device *dev;
+
+- if (!bus)
++ if (!bus || !bus->p)
+ return NULL;
+
+ klist_iter_init_node(&bus->p->klist_devices, &i,
--- /dev/null
+From 21a92735f660eaecf69a6f2e777f18463760ec32 Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.co.il>
+Date: Mon, 8 Oct 2012 16:29:24 -0700
+Subject: mm: mmu_notifier: have mmu_notifiers use a global SRCU so they may safely schedule
+
+From: Sagi Grimberg <sagig@mellanox.co.il>
+
+commit 21a92735f660eaecf69a6f2e777f18463760ec32 upstream.
+
+With an RCU based mmu_notifier implementation, any callout to
+mmu_notifier_invalidate_range_{start,end}() or
+mmu_notifier_invalidate_page() would not be allowed to call schedule()
+as that could potentially allow a modification to the mmu_notifier
+structure while it is currently being used.
+
+Since srcu allocs 4 machine words per instance per cpu, we may end up
+with memory exhaustion if we use srcu per mm. So all mms share a global
+srcu. Note that during large mmu_notifier activity exit & unregister
+paths might hang for longer periods, but it is tolerable for current
+mmu_notifier clients.
+
+Signed-off-by: Sagi Grimberg <sagig@mellanox.co.il>
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Haggai Eran <haggaie@mellanox.com>
+Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/mmu_notifier.h | 1
+ mm/mmu_notifier.c | 73 ++++++++++++++++++++++++++++---------------
+ 2 files changed, 49 insertions(+), 25 deletions(-)
+
+--- a/include/linux/mmu_notifier.h
++++ b/include/linux/mmu_notifier.h
+@@ -4,6 +4,7 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/mm_types.h>
++#include <linux/srcu.h>
+
+ struct mmu_notifier;
+ struct mmu_notifier_ops;
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -14,10 +14,14 @@
+ #include <linux/module.h>
+ #include <linux/mm.h>
+ #include <linux/err.h>
++#include <linux/srcu.h>
+ #include <linux/rcupdate.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+
++/* global SRCU for all MMs */
++struct srcu_struct srcu;
++
+ /*
+ * This function can't run concurrently against mmu_notifier_register
+ * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
+@@ -25,8 +29,8 @@
+ * in parallel despite there being no task using this mm any more,
+ * through the vmas outside of the exit_mmap context, such as with
+ * vmtruncate. This serializes against mmu_notifier_unregister with
+- * the mmu_notifier_mm->lock in addition to RCU and it serializes
+- * against the other mmu notifiers with RCU. struct mmu_notifier_mm
++ * the mmu_notifier_mm->lock in addition to SRCU and it serializes
++ * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
+ * can't go away from under us as exit_mmap holds an mm_count pin
+ * itself.
+ */
+@@ -34,12 +38,13 @@ void __mmu_notifier_release(struct mm_st
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+ /*
+ * RCU here will block mmu_notifier_unregister until
+ * ->release returns.
+ */
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
+ /*
+ * if ->release runs before mmu_notifier_unregister it
+@@ -50,7 +55,7 @@ void __mmu_notifier_release(struct mm_st
+ */
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+@@ -68,7 +73,7 @@ void __mmu_notifier_release(struct mm_st
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+ /*
+- * synchronize_rcu here prevents mmu_notifier_release to
++ * synchronize_srcu here prevents mmu_notifier_release to
+ * return to exit_mmap (which would proceed freeing all pages
+ * in the mm) until the ->release method returns, if it was
+ * invoked by mmu_notifier_unregister.
+@@ -76,7 +81,7 @@ void __mmu_notifier_release(struct mm_st
+ * The mmu_notifier_mm can't go away from under us because one
+ * mm_count is hold by exit_mmap.
+ */
+- synchronize_rcu();
++ synchronize_srcu(&srcu);
+ }
+
+ /*
+@@ -89,14 +94,14 @@ int __mmu_notifier_clear_flush_young(str
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
+- int young = 0;
++ int young = 0, id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->clear_flush_young)
+ young |= mn->ops->clear_flush_young(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+
+ return young;
+ }
+@@ -106,9 +111,9 @@ int __mmu_notifier_test_young(struct mm_
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
+- int young = 0;
++ int young = 0, id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->test_young) {
+ young = mn->ops->test_young(mn, mm, address);
+@@ -116,7 +121,7 @@ int __mmu_notifier_test_young(struct mm_
+ break;
+ }
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+
+ return young;
+ }
+@@ -126,8 +131,9 @@ void __mmu_notifier_change_pte(struct mm
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->change_pte)
+ mn->ops->change_pte(mn, mm, address, pte);
+@@ -138,7 +144,7 @@ void __mmu_notifier_change_pte(struct mm
+ else if (mn->ops->invalidate_page)
+ mn->ops->invalidate_page(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+@@ -146,13 +152,14 @@ void __mmu_notifier_invalidate_page(stru
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_page)
+ mn->ops->invalidate_page(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+@@ -160,13 +167,14 @@ void __mmu_notifier_invalidate_range_sta
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_range_start)
+ mn->ops->invalidate_range_start(mn, mm, start, end);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+@@ -174,13 +182,14 @@ void __mmu_notifier_invalidate_range_end
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_range_end)
+ mn->ops->invalidate_range_end(mn, mm, start, end);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+@@ -192,6 +201,12 @@ static int do_mmu_notifier_register(stru
+
+ BUG_ON(atomic_read(&mm->mm_users) <= 0);
+
++ /*
++ * Verify that mmu_notifier_init() already run and the global srcu is
++ * initialized.
++ */
++ BUG_ON(!srcu.per_cpu_ref);
++
+ ret = -ENOMEM;
+ mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
+ if (unlikely(!mmu_notifier_mm))
+@@ -274,8 +289,8 @@ void __mmu_notifier_mm_destroy(struct mm
+ /*
+ * This releases the mm_count pin automatically and frees the mm
+ * structure if it was the last user of it. It serializes against
+- * running mmu notifiers with RCU and against mmu_notifier_unregister
+- * with the unregister lock + RCU. All sptes must be dropped before
++ * running mmu notifiers with SRCU and against mmu_notifier_unregister
++ * with the unregister lock + SRCU. All sptes must be dropped before
+ * calling mmu_notifier_unregister. ->release or any other notifier
+ * method may be invoked concurrently with mmu_notifier_unregister,
+ * and only after mmu_notifier_unregister returned we're guaranteed
+@@ -290,8 +305,9 @@ void mmu_notifier_unregister(struct mmu_
+ * RCU here will force exit_mmap to wait ->release to finish
+ * before freeing the pages.
+ */
+- rcu_read_lock();
++ int id;
+
++ id = srcu_read_lock(&srcu);
+ /*
+ * exit_mmap will block in mmu_notifier_release to
+ * guarantee ->release is called before freeing the
+@@ -299,7 +315,7 @@ void mmu_notifier_unregister(struct mmu_
+ */
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ hlist_del_rcu(&mn->hlist);
+@@ -310,10 +326,17 @@ void mmu_notifier_unregister(struct mmu_
+ * Wait any running method to finish, of course including
+ * ->release if it was run by mmu_notifier_relase instead of us.
+ */
+- synchronize_rcu();
++ synchronize_srcu(&srcu);
+
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+ mmdrop(mm);
+ }
+ EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
++
++static int __init mmu_notifier_init(void)
++{
++ return init_srcu_struct(&srcu);
++}
++
++module_init(mmu_notifier_init);
--- /dev/null
+From 70400303ce0c4ced3139499c676d5c79636b0c72 Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Mon, 8 Oct 2012 16:31:52 -0700
+Subject: mm: mmu_notifier: make the mmu_notifier srcu static
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 70400303ce0c4ced3139499c676d5c79636b0c72 upstream.
+
+The variable must be static especially given the variable name.
+
+s/RCU/SRCU/ over a few comments.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
+Cc: Sagi Grimberg <sagig@mellanox.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Haggai Eran <haggaie@mellanox.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mmu_notifier.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -20,7 +20,7 @@
+ #include <linux/slab.h>
+
+ /* global SRCU for all MMs */
+-struct srcu_struct srcu;
++static struct srcu_struct srcu;
+
+ /*
+ * This function can't run concurrently against mmu_notifier_register
+@@ -41,7 +41,7 @@ void __mmu_notifier_release(struct mm_st
+ int id;
+
+ /*
+- * RCU here will block mmu_notifier_unregister until
++ * SRCU here will block mmu_notifier_unregister until
+ * ->release returns.
+ */
+ id = srcu_read_lock(&srcu);
+@@ -302,7 +302,7 @@ void mmu_notifier_unregister(struct mmu_
+
+ if (!hlist_unhashed(&mn->hlist)) {
+ /*
+- * RCU here will force exit_mmap to wait ->release to finish
++ * SRCU here will force exit_mmap to wait ->release to finish
+ * before freeing the pages.
+ */
+ int id;
--- /dev/null
+From 751efd8610d3d7d67b7bdf7f62646edea7365dd7 Mon Sep 17 00:00:00 2001
+From: Robin Holt <holt@sgi.com>
+Date: Fri, 22 Feb 2013 16:35:34 -0800
+Subject: mmu_notifier_unregister NULL Pointer deref and multiple ->release() callouts
+
+From: Robin Holt <holt@sgi.com>
+
+commit 751efd8610d3d7d67b7bdf7f62646edea7365dd7 upstream.
+
+There is a race condition between mmu_notifier_unregister() and
+__mmu_notifier_release().
+
+Assume two tasks, one calling mmu_notifier_unregister() as a result of a
+filp_close() ->flush() callout (task A), and the other calling
+mmu_notifier_release() from an mmput() (task B).
+
+ A B
+t1 srcu_read_lock()
+t2 if (!hlist_unhashed())
+t3 srcu_read_unlock()
+t4 srcu_read_lock()
+t5 hlist_del_init_rcu()
+t6 synchronize_srcu()
+t7 srcu_read_unlock()
+t8 hlist_del_rcu() <--- NULL pointer deref.
+
+Additionally, the list traversal in __mmu_notifier_release() is not
+protected by the by the mmu_notifier_mm->hlist_lock which can result in
+callouts to the ->release() notifier from both mmu_notifier_unregister()
+and __mmu_notifier_release().
+
+-stable suggestions:
+
+The stable trees prior to 3.7.y need commits 21a92735f660 and
+70400303ce0c cherry-picked in that order prior to cherry-picking this
+commit. The 3.7.y tree already has those two commits.
+
+Signed-off-by: Robin Holt <holt@sgi.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
+Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
+Cc: Avi Kivity <avi@redhat.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Marcelo Tosatti <mtosatti@redhat.com>
+Cc: Sagi Grimberg <sagig@mellanox.co.il>
+Cc: Haggai Eran <haggaie@mellanox.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mmu_notifier.c | 82 +++++++++++++++++++++++++++---------------------------
+ 1 file changed, 42 insertions(+), 40 deletions(-)
+
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -37,49 +37,51 @@ static struct srcu_struct srcu;
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ struct mmu_notifier *mn;
+- struct hlist_node *n;
+ int id;
+
+ /*
+- * SRCU here will block mmu_notifier_unregister until
+- * ->release returns.
++ * srcu_read_lock() here will block synchronize_srcu() in
++ * mmu_notifier_unregister() until all registered
++ * ->release() callouts this function makes have
++ * returned.
+ */
+ id = srcu_read_lock(&srcu);
+- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
+- /*
+- * if ->release runs before mmu_notifier_unregister it
+- * must be handled as it's the only way for the driver
+- * to flush all existing sptes and stop the driver
+- * from establishing any more sptes before all the
+- * pages in the mm are freed.
+- */
+- if (mn->ops->release)
+- mn->ops->release(mn, mm);
+- srcu_read_unlock(&srcu, id);
+-
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+ mn = hlist_entry(mm->mmu_notifier_mm->list.first,
+ struct mmu_notifier,
+ hlist);
++
+ /*
+- * We arrived before mmu_notifier_unregister so
+- * mmu_notifier_unregister will do nothing other than
+- * to wait ->release to finish and
+- * mmu_notifier_unregister to return.
++ * Unlink. This will prevent mmu_notifier_unregister()
++ * from also making the ->release() callout.
+ */
+ hlist_del_init_rcu(&mn->hlist);
++ spin_unlock(&mm->mmu_notifier_mm->lock);
++
++ /*
++ * Clear sptes. (see 'release' description in mmu_notifier.h)
++ */
++ if (mn->ops->release)
++ mn->ops->release(mn, mm);
++
++ spin_lock(&mm->mmu_notifier_mm->lock);
+ }
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+ /*
+- * synchronize_srcu here prevents mmu_notifier_release to
+- * return to exit_mmap (which would proceed freeing all pages
+- * in the mm) until the ->release method returns, if it was
+- * invoked by mmu_notifier_unregister.
+- *
+- * The mmu_notifier_mm can't go away from under us because one
+- * mm_count is hold by exit_mmap.
++ * All callouts to ->release() which we have done are complete.
++ * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
++ */
++ srcu_read_unlock(&srcu, id);
++
++ /*
++ * mmu_notifier_unregister() may have unlinked a notifier and may
++ * still be calling out to it. Additionally, other notifiers
++ * may have been active via vmtruncate() et. al. Block here
++ * to ensure that all notifier callouts for this mm have been
++ * completed and the sptes are really cleaned up before returning
++ * to exit_mmap().
+ */
+ synchronize_srcu(&srcu);
+ }
+@@ -300,31 +302,31 @@ void mmu_notifier_unregister(struct mmu_
+ {
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
++ spin_lock(&mm->mmu_notifier_mm->lock);
+ if (!hlist_unhashed(&mn->hlist)) {
+- /*
+- * SRCU here will force exit_mmap to wait ->release to finish
+- * before freeing the pages.
+- */
+ int id;
+
+- id = srcu_read_lock(&srcu);
+ /*
+- * exit_mmap will block in mmu_notifier_release to
+- * guarantee ->release is called before freeing the
+- * pages.
++ * Ensure we synchronize up with __mmu_notifier_release().
+ */
++ id = srcu_read_lock(&srcu);
++
++ hlist_del_rcu(&mn->hlist);
++ spin_unlock(&mm->mmu_notifier_mm->lock);
++
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+- srcu_read_unlock(&srcu, id);
+
+- spin_lock(&mm->mmu_notifier_mm->lock);
+- hlist_del_rcu(&mn->hlist);
++ /*
++ * Allow __mmu_notifier_release() to complete.
++ */
++ srcu_read_unlock(&srcu, id);
++ } else
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+- }
+
+ /*
+- * Wait any running method to finish, of course including
+- * ->release if it was run by mmu_notifier_relase instead of us.
++ * Wait for any running method to finish, including ->release() if it
++ * was run by __mmu_notifier_release() instead of us.
+ */
+ synchronize_srcu(&srcu);
+
alsa-ali5451-remove-irq-enabling-in-pointer-callback.patch
alsa-rme32.c-irq-enabling-after-spin_lock_irq.patch
tty-set_termios-set_termiox-should-not-return-eintr.patch
+xen-netback-check-correct-frag-when-looking-for-head-frag.patch
+xen-send-spinlock-ipi-to-all-waiters.patch
+driver-core-treat-unregistered-bus_types-as-having-no-devices.patch
+mm-mmu_notifier-have-mmu_notifiers-use-a-global-srcu-so-they-may-safely-schedule.patch
+mm-mmu_notifier-make-the-mmu_notifier-srcu-static.patch
+mmu_notifier_unregister-null-pointer-deref-and-multiple-release-callouts.patch
--- /dev/null
+From Ian.Campbell@citrix.com Mon Feb 25 09:28:27 2013
+From: Ian Campbell <ian.campbell@citrix.com>
+Date: Tue, 19 Feb 2013 09:05:46 +0000
+Subject: xen/netback: check correct frag when looking for head frag
+To: "gregkh@linuxfoundation.org" <gregkh@linuxfoundation.org>
+Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>, "mattjd@gmail.com" <mattjd@gmail.com>, "davem@davemloft.net" <davem@davemloft.net>, "JBeulich@suse.com" <JBeulich@suse.com>, "konrad.wilk@oracle.com" <konrad.wilk@oracle.com>
+Message-ID: <1361264746.1051.65.camel@zakaz.uk.xensource.com>
+
+From: Ian Campbell <ian.campbell@citrix.com>
+
+When I backported 7d5145d8eb2b "xen/netback: don't leak pages on failure in
+xen_netbk_tx_check_gop" to 3.0 (where it became f0457844e605) I somehow picked
+up an extraneous hunk breaking this.
+
+Reported-by: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
+Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/xen-netback/netback.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -990,7 +990,7 @@ static int xen_netbk_tx_check_gop(struct
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+ /* Skip first skb fragment if it is on same page as header fragment. */
+- start = ((unsigned long)shinfo->frags[i].page == pending_idx);
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+
+ for (i = start; i < nr_frags; i++) {
+ int j, newerr;
--- /dev/null
+From 76eaca031f0af2bb303e405986f637811956a422 Mon Sep 17 00:00:00 2001
+From: Stefan Bader <stefan.bader@canonical.com>
+Date: Fri, 15 Feb 2013 09:48:52 +0100
+Subject: xen: Send spinlock IPI to all waiters
+
+From: Stefan Bader <stefan.bader@canonical.com>
+
+commit 76eaca031f0af2bb303e405986f637811956a422 upstream.
+
+There is a loophole between Xen's current implementation of
+pv-spinlocks and the scheduler. This was triggerable through
+a testcase until v3.6 changed the TLB flushing code. The
+problem potentially is still there just not observable in the
+same way.
+
+What could happen was (is):
+
+1. CPU n tries to schedule task x away and goes into a slow
+ wait for the runq lock of CPU n-# (must be one with a lower
+ number).
+2. CPU n-#, while processing softirqs, tries to balance domains
+ and goes into a slow wait for its own runq lock (for updating
+ some records). Since this is a spin_lock_irqsave in softirq
+ context, interrupts will be re-enabled for the duration of
+ the poll_irq hypercall used by Xen.
+3. Before the runq lock of CPU n-# is unlocked, CPU n-1 receives
+ an interrupt (e.g. endio) and when processing the interrupt,
+ tries to wake up task x. But that is in schedule and still
+ on_cpu, so try_to_wake_up goes into a tight loop.
+4. The runq lock of CPU n-# gets unlocked, but the message only
+ gets sent to the first waiter, which is CPU n-# and that is
+ busily stuck.
+5. CPU n-# never returns from the nested interruption to take and
+ release the lock because the scheduler uses a busy wait.
+ And CPU n never finishes the task migration because the unlock
+ notification only went to CPU n-#.
+
+To avoid this and since the unlocking code has no real sense of
+which waiter is best suited to grab the lock, just send the IPI
+to all of them. This causes the waiters to return from the hyper-
+call (those not interrupted at least) and do active spinlocking.
+
+BugLink: http://bugs.launchpad.net/bugs/1011792
+
+Acked-by: Jan Beulich <JBeulich@suse.com>
+Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/spinlock.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -313,7 +313,6 @@ static noinline void xen_spin_unlock_slo
+ if (per_cpu(lock_spinners, cpu) == xl) {
+ ADD_STATS(released_slow_kicked, 1);
+ xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+- break;
+ }
+ }
+ }