From 508c63c083d41afd59e2ad36e03773fc9d8e55ba Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 25 Feb 2013 09:52:04 -0800 Subject: [PATCH] 3.4-stable patches added patches: b43-increase-number-of-rx-dma-slots.patch driver-core-treat-unregistered-bus_types-as-having-no-devices.patch mm-mmu_notifier-have-mmu_notifiers-use-a-global-srcu-so-they-may-safely-schedule.patch mm-mmu_notifier-make-the-mmu_notifier-srcu-static.patch mmu_notifier_unregister-null-pointer-deref-and-multiple-release-callouts.patch rtlwifi-rtl8192cu-add-new-usb-id.patch rtlwifi-usb-allocate-urb-control-message-setup_packet-and-data-buffer-separately.patch xen-close-evtchn-port-if-binding-to-irq-fails.patch xen-send-spinlock-ipi-to-all-waiters.patch --- .../b43-increase-number-of-rx-dma-slots.patch | 37 +++ ...tered-bus_types-as-having-no-devices.patch | 57 ++++ ...bal-srcu-so-they-may-safely-schedule.patch | 288 ++++++++++++++++++ ...er-make-the-mmu_notifier-srcu-static.patch | 55 ++++ ...-deref-and-multiple-release-callouts.patch | 180 +++++++++++ .../rtlwifi-rtl8192cu-add-new-usb-id.patch | 30 ++ ...up_packet-and-data-buffer-separately.patch | 113 +++++++ queue-3.4/series | 9 + ...-evtchn-port-if-binding-to-irq-fails.patch | 43 +++ ...xen-send-spinlock-ipi-to-all-waiters.patch | 63 ++++ 10 files changed, 875 insertions(+) create mode 100644 queue-3.4/b43-increase-number-of-rx-dma-slots.patch create mode 100644 queue-3.4/driver-core-treat-unregistered-bus_types-as-having-no-devices.patch create mode 100644 queue-3.4/mm-mmu_notifier-have-mmu_notifiers-use-a-global-srcu-so-they-may-safely-schedule.patch create mode 100644 queue-3.4/mm-mmu_notifier-make-the-mmu_notifier-srcu-static.patch create mode 100644 queue-3.4/mmu_notifier_unregister-null-pointer-deref-and-multiple-release-callouts.patch create mode 100644 queue-3.4/rtlwifi-rtl8192cu-add-new-usb-id.patch create mode 100644 queue-3.4/rtlwifi-usb-allocate-urb-control-message-setup_packet-and-data-buffer-separately.patch create mode 100644 queue-3.4/xen-close-evtchn-port-if-binding-to-irq-fails.patch create mode 100644 queue-3.4/xen-send-spinlock-ipi-to-all-waiters.patch diff --git a/queue-3.4/b43-increase-number-of-rx-dma-slots.patch b/queue-3.4/b43-increase-number-of-rx-dma-slots.patch new file mode 100644 index 00000000000..8300fc88462 --- /dev/null +++ b/queue-3.4/b43-increase-number-of-rx-dma-slots.patch @@ -0,0 +1,37 @@ +From ccae0e50c16a7f7adb029c169147400d1ce9f703 Mon Sep 17 00:00:00 2001 +From: Larry Finger +Date: Sun, 17 Feb 2013 17:01:20 +0000 +Subject: b43: Increase number of RX DMA slots + +From: Larry Finger + +commit ccae0e50c16a7f7adb029c169147400d1ce9f703 upstream. + +Bastian Bittorf reported that some of the silent freezes on a Linksys WRT54G +were due to overflow of the RX DMA ring buffer, which was created with 64 +slots. That finding reminded me that I was seeing similar crashed on a netbook, +which also has a relatively slow processor. After increasing the number of +slots to 128, runs on the netbook that previously failed now worked; however, +I found that 109 slots had been used in one test. For that reason, the number +of slots is being increased to 256. + +Signed-off-by: Larry Finger +Cc: Bastian Bittorf +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/wireless/b43/dma.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/wireless/b43/dma.h ++++ b/drivers/net/wireless/b43/dma.h +@@ -169,7 +169,7 @@ struct b43_dmadesc_generic { + + /* DMA engine tuning knobs */ + #define B43_TXRING_SLOTS 256 +-#define B43_RXRING_SLOTS 64 ++#define B43_RXRING_SLOTS 256 + #define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN) + #define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN) + diff --git a/queue-3.4/driver-core-treat-unregistered-bus_types-as-having-no-devices.patch b/queue-3.4/driver-core-treat-unregistered-bus_types-as-having-no-devices.patch new file mode 100644 index 00000000000..e224523bb47 --- /dev/null +++ b/queue-3.4/driver-core-treat-unregistered-bus_types-as-having-no-devices.patch @@ -0,0 +1,57 @@ +From 4fa3e78be7e985ca814ce2aa0c09cbee404efcf7 Mon Sep 17 00:00:00 2001 +From: Bjorn Helgaas +Date: Tue, 29 Jan 2013 16:44:27 -0700 +Subject: Driver core: treat unregistered bus_types as having no devices + +From: Bjorn Helgaas + +commit 4fa3e78be7e985ca814ce2aa0c09cbee404efcf7 upstream. + +A bus_type has a list of devices (klist_devices), but the list and the +subsys_private structure that contains it are not initialized until the +bus_type is registered with bus_register(). + +The panic/reboot path has fixups that look up devices in pci_bus_type. If +we panic before registering pci_bus_type, the bus_type exists but the list +does not, so mach_reboot_fixups() trips over a null pointer and panics +again: + + mach_reboot_fixups + pci_get_device + .. + bus_find_device(&pci_bus_type, ...) + bus->p is NULL + +Joonsoo reported a problem when panicking before PCI was initialized. +I think this patch should be sufficient to replace the patch he posted +here: https://lkml.org/lkml/2012/12/28/75 ("[PATCH] x86, reboot: skip +reboot_fixups in early boot phase") + +Reported-by: Joonsoo Kim +Signed-off-by: Bjorn Helgaas +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/base/bus.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/base/bus.c ++++ b/drivers/base/bus.c +@@ -294,7 +294,7 @@ int bus_for_each_dev(struct bus_type *bu + struct device *dev; + int error = 0; + +- if (!bus) ++ if (!bus || !bus->p) + return -EINVAL; + + klist_iter_init_node(&bus->p->klist_devices, &i, +@@ -328,7 +328,7 @@ struct device *bus_find_device(struct bu + struct klist_iter i; + struct device *dev; + +- if (!bus) ++ if (!bus || !bus->p) + return NULL; + + klist_iter_init_node(&bus->p->klist_devices, &i, diff --git a/queue-3.4/mm-mmu_notifier-have-mmu_notifiers-use-a-global-srcu-so-they-may-safely-schedule.patch b/queue-3.4/mm-mmu_notifier-have-mmu_notifiers-use-a-global-srcu-so-they-may-safely-schedule.patch new file mode 100644 index 00000000000..21feb9f80d5 --- /dev/null +++ b/queue-3.4/mm-mmu_notifier-have-mmu_notifiers-use-a-global-srcu-so-they-may-safely-schedule.patch @@ -0,0 +1,288 @@ +From 21a92735f660eaecf69a6f2e777f18463760ec32 Mon Sep 17 00:00:00 2001 +From: Sagi Grimberg +Date: Mon, 8 Oct 2012 16:29:24 -0700 +Subject: mm: mmu_notifier: have mmu_notifiers use a global SRCU so they may safely schedule + +From: Sagi Grimberg + +commit 21a92735f660eaecf69a6f2e777f18463760ec32 upstream. + +With an RCU based mmu_notifier implementation, any callout to +mmu_notifier_invalidate_range_{start,end}() or +mmu_notifier_invalidate_page() would not be allowed to call schedule() +as that could potentially allow a modification to the mmu_notifier +structure while it is currently being used. + +Since srcu allocs 4 machine words per instance per cpu, we may end up +with memory exhaustion if we use srcu per mm. So all mms share a global +srcu. Note that during large mmu_notifier activity exit & unregister +paths might hang for longer periods, but it is tolerable for current +mmu_notifier clients. + +Signed-off-by: Sagi Grimberg +Signed-off-by: Andrea Arcangeli +Cc: Peter Zijlstra +Cc: Haggai Eran +Cc: "Paul E. McKenney" +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/mmu_notifier.h | 1 + mm/mmu_notifier.c | 73 ++++++++++++++++++++++++++++--------------- + 2 files changed, 49 insertions(+), 25 deletions(-) + +--- a/include/linux/mmu_notifier.h ++++ b/include/linux/mmu_notifier.h +@@ -4,6 +4,7 @@ + #include + #include + #include ++#include + + struct mmu_notifier; + struct mmu_notifier_ops; +--- a/mm/mmu_notifier.c ++++ b/mm/mmu_notifier.c +@@ -14,10 +14,14 @@ + #include + #include + #include ++#include + #include + #include + #include + ++/* global SRCU for all MMs */ ++struct srcu_struct srcu; ++ + /* + * This function can't run concurrently against mmu_notifier_register + * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap +@@ -25,8 +29,8 @@ + * in parallel despite there being no task using this mm any more, + * through the vmas outside of the exit_mmap context, such as with + * vmtruncate. This serializes against mmu_notifier_unregister with +- * the mmu_notifier_mm->lock in addition to RCU and it serializes +- * against the other mmu notifiers with RCU. struct mmu_notifier_mm ++ * the mmu_notifier_mm->lock in addition to SRCU and it serializes ++ * against the other mmu notifiers with SRCU. struct mmu_notifier_mm + * can't go away from under us as exit_mmap holds an mm_count pin + * itself. + */ +@@ -34,12 +38,13 @@ void __mmu_notifier_release(struct mm_st + { + struct mmu_notifier *mn; + struct hlist_node *n; ++ int id; + + /* + * RCU here will block mmu_notifier_unregister until + * ->release returns. + */ +- rcu_read_lock(); ++ id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) + /* + * if ->release runs before mmu_notifier_unregister it +@@ -50,7 +55,7 @@ void __mmu_notifier_release(struct mm_st + */ + if (mn->ops->release) + mn->ops->release(mn, mm); +- rcu_read_unlock(); ++ srcu_read_unlock(&srcu, id); + + spin_lock(&mm->mmu_notifier_mm->lock); + while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { +@@ -68,7 +73,7 @@ void __mmu_notifier_release(struct mm_st + spin_unlock(&mm->mmu_notifier_mm->lock); + + /* +- * synchronize_rcu here prevents mmu_notifier_release to ++ * synchronize_srcu here prevents mmu_notifier_release to + * return to exit_mmap (which would proceed freeing all pages + * in the mm) until the ->release method returns, if it was + * invoked by mmu_notifier_unregister. +@@ -76,7 +81,7 @@ void __mmu_notifier_release(struct mm_st + * The mmu_notifier_mm can't go away from under us because one + * mm_count is hold by exit_mmap. + */ +- synchronize_rcu(); ++ synchronize_srcu(&srcu); + } + + /* +@@ -89,14 +94,14 @@ int __mmu_notifier_clear_flush_young(str + { + struct mmu_notifier *mn; + struct hlist_node *n; +- int young = 0; ++ int young = 0, id; + +- rcu_read_lock(); ++ id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->clear_flush_young) + young |= mn->ops->clear_flush_young(mn, mm, address); + } +- rcu_read_unlock(); ++ srcu_read_unlock(&srcu, id); + + return young; + } +@@ -106,9 +111,9 @@ int __mmu_notifier_test_young(struct mm_ + { + struct mmu_notifier *mn; + struct hlist_node *n; +- int young = 0; ++ int young = 0, id; + +- rcu_read_lock(); ++ id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->test_young) { + young = mn->ops->test_young(mn, mm, address); +@@ -116,7 +121,7 @@ int __mmu_notifier_test_young(struct mm_ + break; + } + } +- rcu_read_unlock(); ++ srcu_read_unlock(&srcu, id); + + return young; + } +@@ -126,8 +131,9 @@ void __mmu_notifier_change_pte(struct mm + { + struct mmu_notifier *mn; + struct hlist_node *n; ++ int id; + +- rcu_read_lock(); ++ id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->change_pte) + mn->ops->change_pte(mn, mm, address, pte); +@@ -138,7 +144,7 @@ void __mmu_notifier_change_pte(struct mm + else if (mn->ops->invalidate_page) + mn->ops->invalidate_page(mn, mm, address); + } +- rcu_read_unlock(); ++ srcu_read_unlock(&srcu, id); + } + + void __mmu_notifier_invalidate_page(struct mm_struct *mm, +@@ -146,13 +152,14 @@ void __mmu_notifier_invalidate_page(stru + { + struct mmu_notifier *mn; + struct hlist_node *n; ++ int id; + +- rcu_read_lock(); ++ id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->invalidate_page) + mn->ops->invalidate_page(mn, mm, address); + } +- rcu_read_unlock(); ++ srcu_read_unlock(&srcu, id); + } + + void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, +@@ -160,13 +167,14 @@ void __mmu_notifier_invalidate_range_sta + { + struct mmu_notifier *mn; + struct hlist_node *n; ++ int id; + +- rcu_read_lock(); ++ id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->invalidate_range_start) + mn->ops->invalidate_range_start(mn, mm, start, end); + } +- rcu_read_unlock(); ++ srcu_read_unlock(&srcu, id); + } + + void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, +@@ -174,13 +182,14 @@ void __mmu_notifier_invalidate_range_end + { + struct mmu_notifier *mn; + struct hlist_node *n; ++ int id; + +- rcu_read_lock(); ++ id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->invalidate_range_end) + mn->ops->invalidate_range_end(mn, mm, start, end); + } +- rcu_read_unlock(); ++ srcu_read_unlock(&srcu, id); + } + + static int do_mmu_notifier_register(struct mmu_notifier *mn, +@@ -192,6 +201,12 @@ static int do_mmu_notifier_register(stru + + BUG_ON(atomic_read(&mm->mm_users) <= 0); + ++ /* ++ * Verify that mmu_notifier_init() already run and the global srcu is ++ * initialized. ++ */ ++ BUG_ON(!srcu.per_cpu_ref); ++ + ret = -ENOMEM; + mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); + if (unlikely(!mmu_notifier_mm)) +@@ -274,8 +289,8 @@ void __mmu_notifier_mm_destroy(struct mm + /* + * This releases the mm_count pin automatically and frees the mm + * structure if it was the last user of it. It serializes against +- * running mmu notifiers with RCU and against mmu_notifier_unregister +- * with the unregister lock + RCU. All sptes must be dropped before ++ * running mmu notifiers with SRCU and against mmu_notifier_unregister ++ * with the unregister lock + SRCU. All sptes must be dropped before + * calling mmu_notifier_unregister. ->release or any other notifier + * method may be invoked concurrently with mmu_notifier_unregister, + * and only after mmu_notifier_unregister returned we're guaranteed +@@ -290,8 +305,9 @@ void mmu_notifier_unregister(struct mmu_ + * RCU here will force exit_mmap to wait ->release to finish + * before freeing the pages. + */ +- rcu_read_lock(); ++ int id; + ++ id = srcu_read_lock(&srcu); + /* + * exit_mmap will block in mmu_notifier_release to + * guarantee ->release is called before freeing the +@@ -299,7 +315,7 @@ void mmu_notifier_unregister(struct mmu_ + */ + if (mn->ops->release) + mn->ops->release(mn, mm); +- rcu_read_unlock(); ++ srcu_read_unlock(&srcu, id); + + spin_lock(&mm->mmu_notifier_mm->lock); + hlist_del_rcu(&mn->hlist); +@@ -310,10 +326,17 @@ void mmu_notifier_unregister(struct mmu_ + * Wait any running method to finish, of course including + * ->release if it was run by mmu_notifier_relase instead of us. + */ +- synchronize_rcu(); ++ synchronize_srcu(&srcu); + + BUG_ON(atomic_read(&mm->mm_count) <= 0); + + mmdrop(mm); + } + EXPORT_SYMBOL_GPL(mmu_notifier_unregister); ++ ++static int __init mmu_notifier_init(void) ++{ ++ return init_srcu_struct(&srcu); ++} ++ ++module_init(mmu_notifier_init); diff --git a/queue-3.4/mm-mmu_notifier-make-the-mmu_notifier-srcu-static.patch b/queue-3.4/mm-mmu_notifier-make-the-mmu_notifier-srcu-static.patch new file mode 100644 index 00000000000..587298dddf6 --- /dev/null +++ b/queue-3.4/mm-mmu_notifier-make-the-mmu_notifier-srcu-static.patch @@ -0,0 +1,55 @@ +From 70400303ce0c4ced3139499c676d5c79636b0c72 Mon Sep 17 00:00:00 2001 +From: Andrea Arcangeli +Date: Mon, 8 Oct 2012 16:31:52 -0700 +Subject: mm: mmu_notifier: make the mmu_notifier srcu static + +From: Andrea Arcangeli + +commit 70400303ce0c4ced3139499c676d5c79636b0c72 upstream. + +The variable must be static especially given the variable name. + +s/RCU/SRCU/ over a few comments. + +Signed-off-by: Andrea Arcangeli +Cc: Xiao Guangrong +Cc: Sagi Grimberg +Cc: Peter Zijlstra +Cc: Haggai Eran +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/mmu_notifier.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/mm/mmu_notifier.c ++++ b/mm/mmu_notifier.c +@@ -20,7 +20,7 @@ + #include + + /* global SRCU for all MMs */ +-struct srcu_struct srcu; ++static struct srcu_struct srcu; + + /* + * This function can't run concurrently against mmu_notifier_register +@@ -41,7 +41,7 @@ void __mmu_notifier_release(struct mm_st + int id; + + /* +- * RCU here will block mmu_notifier_unregister until ++ * SRCU here will block mmu_notifier_unregister until + * ->release returns. + */ + id = srcu_read_lock(&srcu); +@@ -302,7 +302,7 @@ void mmu_notifier_unregister(struct mmu_ + + if (!hlist_unhashed(&mn->hlist)) { + /* +- * RCU here will force exit_mmap to wait ->release to finish ++ * SRCU here will force exit_mmap to wait ->release to finish + * before freeing the pages. + */ + int id; diff --git a/queue-3.4/mmu_notifier_unregister-null-pointer-deref-and-multiple-release-callouts.patch b/queue-3.4/mmu_notifier_unregister-null-pointer-deref-and-multiple-release-callouts.patch new file mode 100644 index 00000000000..88be9d3ce9b --- /dev/null +++ b/queue-3.4/mmu_notifier_unregister-null-pointer-deref-and-multiple-release-callouts.patch @@ -0,0 +1,180 @@ +From 751efd8610d3d7d67b7bdf7f62646edea7365dd7 Mon Sep 17 00:00:00 2001 +From: Robin Holt +Date: Fri, 22 Feb 2013 16:35:34 -0800 +Subject: mmu_notifier_unregister NULL Pointer deref and multiple ->release() callouts + +From: Robin Holt + +commit 751efd8610d3d7d67b7bdf7f62646edea7365dd7 upstream. + +There is a race condition between mmu_notifier_unregister() and +__mmu_notifier_release(). + +Assume two tasks, one calling mmu_notifier_unregister() as a result of a +filp_close() ->flush() callout (task A), and the other calling +mmu_notifier_release() from an mmput() (task B). + + A B +t1 srcu_read_lock() +t2 if (!hlist_unhashed()) +t3 srcu_read_unlock() +t4 srcu_read_lock() +t5 hlist_del_init_rcu() +t6 synchronize_srcu() +t7 srcu_read_unlock() +t8 hlist_del_rcu() <--- NULL pointer deref. + +Additionally, the list traversal in __mmu_notifier_release() is not +protected by the by the mmu_notifier_mm->hlist_lock which can result in +callouts to the ->release() notifier from both mmu_notifier_unregister() +and __mmu_notifier_release(). + +-stable suggestions: + +The stable trees prior to 3.7.y need commits 21a92735f660 and +70400303ce0c cherry-picked in that order prior to cherry-picking this +commit. The 3.7.y tree already has those two commits. + +Signed-off-by: Robin Holt +Cc: Andrea Arcangeli +Cc: Wanpeng Li +Cc: Xiao Guangrong +Cc: Avi Kivity +Cc: Hugh Dickins +Cc: Marcelo Tosatti +Cc: Sagi Grimberg +Cc: Haggai Eran +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + mm/mmu_notifier.c | 82 +++++++++++++++++++++++++++--------------------------- + 1 file changed, 42 insertions(+), 40 deletions(-) + +--- a/mm/mmu_notifier.c ++++ b/mm/mmu_notifier.c +@@ -37,49 +37,51 @@ static struct srcu_struct srcu; + void __mmu_notifier_release(struct mm_struct *mm) + { + struct mmu_notifier *mn; +- struct hlist_node *n; + int id; + + /* +- * SRCU here will block mmu_notifier_unregister until +- * ->release returns. ++ * srcu_read_lock() here will block synchronize_srcu() in ++ * mmu_notifier_unregister() until all registered ++ * ->release() callouts this function makes have ++ * returned. + */ + id = srcu_read_lock(&srcu); +- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) +- /* +- * if ->release runs before mmu_notifier_unregister it +- * must be handled as it's the only way for the driver +- * to flush all existing sptes and stop the driver +- * from establishing any more sptes before all the +- * pages in the mm are freed. +- */ +- if (mn->ops->release) +- mn->ops->release(mn, mm); +- srcu_read_unlock(&srcu, id); +- + spin_lock(&mm->mmu_notifier_mm->lock); + while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { + mn = hlist_entry(mm->mmu_notifier_mm->list.first, + struct mmu_notifier, + hlist); ++ + /* +- * We arrived before mmu_notifier_unregister so +- * mmu_notifier_unregister will do nothing other than +- * to wait ->release to finish and +- * mmu_notifier_unregister to return. ++ * Unlink. This will prevent mmu_notifier_unregister() ++ * from also making the ->release() callout. + */ + hlist_del_init_rcu(&mn->hlist); ++ spin_unlock(&mm->mmu_notifier_mm->lock); ++ ++ /* ++ * Clear sptes. (see 'release' description in mmu_notifier.h) ++ */ ++ if (mn->ops->release) ++ mn->ops->release(mn, mm); ++ ++ spin_lock(&mm->mmu_notifier_mm->lock); + } + spin_unlock(&mm->mmu_notifier_mm->lock); + + /* +- * synchronize_srcu here prevents mmu_notifier_release to +- * return to exit_mmap (which would proceed freeing all pages +- * in the mm) until the ->release method returns, if it was +- * invoked by mmu_notifier_unregister. +- * +- * The mmu_notifier_mm can't go away from under us because one +- * mm_count is hold by exit_mmap. ++ * All callouts to ->release() which we have done are complete. ++ * Allow synchronize_srcu() in mmu_notifier_unregister() to complete ++ */ ++ srcu_read_unlock(&srcu, id); ++ ++ /* ++ * mmu_notifier_unregister() may have unlinked a notifier and may ++ * still be calling out to it. Additionally, other notifiers ++ * may have been active via vmtruncate() et. al. Block here ++ * to ensure that all notifier callouts for this mm have been ++ * completed and the sptes are really cleaned up before returning ++ * to exit_mmap(). + */ + synchronize_srcu(&srcu); + } +@@ -300,31 +302,31 @@ void mmu_notifier_unregister(struct mmu_ + { + BUG_ON(atomic_read(&mm->mm_count) <= 0); + ++ spin_lock(&mm->mmu_notifier_mm->lock); + if (!hlist_unhashed(&mn->hlist)) { +- /* +- * SRCU here will force exit_mmap to wait ->release to finish +- * before freeing the pages. +- */ + int id; + +- id = srcu_read_lock(&srcu); + /* +- * exit_mmap will block in mmu_notifier_release to +- * guarantee ->release is called before freeing the +- * pages. ++ * Ensure we synchronize up with __mmu_notifier_release(). + */ ++ id = srcu_read_lock(&srcu); ++ ++ hlist_del_rcu(&mn->hlist); ++ spin_unlock(&mm->mmu_notifier_mm->lock); ++ + if (mn->ops->release) + mn->ops->release(mn, mm); +- srcu_read_unlock(&srcu, id); + +- spin_lock(&mm->mmu_notifier_mm->lock); +- hlist_del_rcu(&mn->hlist); ++ /* ++ * Allow __mmu_notifier_release() to complete. ++ */ ++ srcu_read_unlock(&srcu, id); ++ } else + spin_unlock(&mm->mmu_notifier_mm->lock); +- } + + /* +- * Wait any running method to finish, of course including +- * ->release if it was run by mmu_notifier_relase instead of us. ++ * Wait for any running method to finish, including ->release() if it ++ * was run by __mmu_notifier_release() instead of us. + */ + synchronize_srcu(&srcu); + diff --git a/queue-3.4/rtlwifi-rtl8192cu-add-new-usb-id.patch b/queue-3.4/rtlwifi-rtl8192cu-add-new-usb-id.patch new file mode 100644 index 00000000000..20377052606 --- /dev/null +++ b/queue-3.4/rtlwifi-rtl8192cu-add-new-usb-id.patch @@ -0,0 +1,30 @@ +From 8708aac79e4572ba673d7a21e94ddca9f3abb7fc Mon Sep 17 00:00:00 2001 +From: Larry Finger +Date: Fri, 8 Feb 2013 12:28:18 -0600 +Subject: rtlwifi: rtl8192cu: Add new USB ID + +From: Larry Finger + +commit 8708aac79e4572ba673d7a21e94ddca9f3abb7fc upstream. + +A new model of the RTL8188CUS has appeared. + +Reported-and-tested-by: Thomas Rosenkrantz +Signed-off-by: Larry Finger +Signed-off-by: John W. Linville +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/wireless/rtlwifi/rtl8192cu/sw.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c ++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +@@ -285,6 +285,7 @@ static struct usb_device_id rtl8192c_usb + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)}, + /* RTL8188CUS-VL */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)}, ++ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)}, + /* 8188 Combo for BC4 */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, + diff --git a/queue-3.4/rtlwifi-usb-allocate-urb-control-message-setup_packet-and-data-buffer-separately.patch b/queue-3.4/rtlwifi-usb-allocate-urb-control-message-setup_packet-and-data-buffer-separately.patch new file mode 100644 index 00000000000..72b63c913a6 --- /dev/null +++ b/queue-3.4/rtlwifi-usb-allocate-urb-control-message-setup_packet-and-data-buffer-separately.patch @@ -0,0 +1,113 @@ +From bc6b89237acb3dee6af6e64e51a18255fef89cc2 Mon Sep 17 00:00:00 2001 +From: Jussi Kivilinna +Date: Mon, 18 Feb 2013 10:29:30 +0200 +Subject: rtlwifi: usb: allocate URB control message setup_packet and data buffer separately + +From: Jussi Kivilinna + +commit bc6b89237acb3dee6af6e64e51a18255fef89cc2 upstream. + +rtlwifi allocates both setup_packet and data buffer of control message urb, +using shared kmalloc in _usbctrl_vendorreq_async_write. Structure used for +allocating is: + struct { + u8 data[254]; + struct usb_ctrlrequest dr; + }; + +Because 'struct usb_ctrlrequest' is __packed, setup packet is unaligned and +DMA mapping of both 'data' and 'dr' confuses ARM/sunxi, leading to memory +corruptions and freezes. + +Patch changes setup packet to be allocated separately. + +[v2]: + - Use WARN_ON_ONCE instead of WARN_ON + +Signed-off-by: Jussi Kivilinna +Signed-off-by: John W. Linville +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/wireless/rtlwifi/usb.c | 44 +++++++++++++++++++++++-------------- + 1 file changed, 28 insertions(+), 16 deletions(-) + +--- a/drivers/net/wireless/rtlwifi/usb.c ++++ b/drivers/net/wireless/rtlwifi/usb.c +@@ -42,8 +42,12 @@ + + static void usbctrl_async_callback(struct urb *urb) + { +- if (urb) +- kfree(urb->context); ++ if (urb) { ++ /* free dr */ ++ kfree(urb->setup_packet); ++ /* free databuf */ ++ kfree(urb->transfer_buffer); ++ } + } + + static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, +@@ -55,39 +59,47 @@ static int _usbctrl_vendorreq_async_writ + u8 reqtype; + struct usb_ctrlrequest *dr; + struct urb *urb; +- struct rtl819x_async_write_data { +- u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE]; +- struct usb_ctrlrequest dr; +- } *buf; ++ const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE; ++ u8 *databuf; ++ ++ if (WARN_ON_ONCE(len > databuf_maxlen)) ++ len = databuf_maxlen; + + pipe = usb_sndctrlpipe(udev, 0); /* write_out */ + reqtype = REALTEK_USB_VENQT_WRITE; + +- buf = kmalloc(sizeof(*buf), GFP_ATOMIC); +- if (!buf) ++ dr = kmalloc(sizeof(*dr), GFP_ATOMIC); ++ if (!dr) + return -ENOMEM; + ++ databuf = kmalloc(databuf_maxlen, GFP_ATOMIC); ++ if (!databuf) { ++ kfree(dr); ++ return -ENOMEM; ++ } ++ + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { +- kfree(buf); ++ kfree(databuf); ++ kfree(dr); + return -ENOMEM; + } + +- dr = &buf->dr; +- + dr->bRequestType = reqtype; + dr->bRequest = request; + dr->wValue = cpu_to_le16(value); + dr->wIndex = cpu_to_le16(index); + dr->wLength = cpu_to_le16(len); + /* data are already in little-endian order */ +- memcpy(buf, pdata, len); ++ memcpy(databuf, pdata, len); + usb_fill_control_urb(urb, udev, pipe, +- (unsigned char *)dr, buf, len, +- usbctrl_async_callback, buf); ++ (unsigned char *)dr, databuf, len, ++ usbctrl_async_callback, NULL); + rc = usb_submit_urb(urb, GFP_ATOMIC); +- if (rc < 0) +- kfree(buf); ++ if (rc < 0) { ++ kfree(databuf); ++ kfree(dr); ++ } + usb_free_urb(urb); + return rc; + } diff --git a/queue-3.4/series b/queue-3.4/series index 4fa9b7ee2d4..236b421fceb 100644 --- a/queue-3.4/series +++ b/queue-3.4/series @@ -12,3 +12,12 @@ alsa-rme32.c-irq-enabling-after-spin_lock_irq.patch tty-prevent-deadlock-in-n_gsm-driver.patch tty-set_termios-set_termiox-should-not-return-eintr.patch usb-serial-fix-null-pointer-dereferences-on-disconnect.patch +b43-increase-number-of-rx-dma-slots.patch +rtlwifi-rtl8192cu-add-new-usb-id.patch +rtlwifi-usb-allocate-urb-control-message-setup_packet-and-data-buffer-separately.patch +xen-send-spinlock-ipi-to-all-waiters.patch +xen-close-evtchn-port-if-binding-to-irq-fails.patch +driver-core-treat-unregistered-bus_types-as-having-no-devices.patch +mm-mmu_notifier-have-mmu_notifiers-use-a-global-srcu-so-they-may-safely-schedule.patch +mm-mmu_notifier-make-the-mmu_notifier-srcu-static.patch +mmu_notifier_unregister-null-pointer-deref-and-multiple-release-callouts.patch diff --git a/queue-3.4/xen-close-evtchn-port-if-binding-to-irq-fails.patch b/queue-3.4/xen-close-evtchn-port-if-binding-to-irq-fails.patch new file mode 100644 index 00000000000..c74e73fc732 --- /dev/null +++ b/queue-3.4/xen-close-evtchn-port-if-binding-to-irq-fails.patch @@ -0,0 +1,43 @@ +From e7e44e444876478d50630f57b0c31d29f6725020 Mon Sep 17 00:00:00 2001 +From: Wei Liu +Date: Mon, 18 Feb 2013 14:57:58 +0000 +Subject: xen: close evtchn port if binding to irq fails + +From: Wei Liu + +commit e7e44e444876478d50630f57b0c31d29f6725020 upstream. + +Signed-off-by: Wei Liu +Signed-off-by: Konrad Rzeszutek Wilk +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/xen/evtchn.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/drivers/xen/evtchn.c ++++ b/drivers/xen/evtchn.c +@@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct pe + u->name, (void *)(unsigned long)port); + if (rc >= 0) + rc = evtchn_make_refcounted(port); ++ else { ++ /* bind failed, should close the port now */ ++ struct evtchn_close close; ++ close.port = port; ++ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) ++ BUG(); ++ set_port_user(port, NULL); ++ } + + return rc; + } +@@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(stru + { + int irq = irq_from_evtchn(port); + ++ BUG_ON(irq < 0); ++ + unbind_from_irqhandler(irq, (void *)(unsigned long)port); + + set_port_user(port, NULL); diff --git a/queue-3.4/xen-send-spinlock-ipi-to-all-waiters.patch b/queue-3.4/xen-send-spinlock-ipi-to-all-waiters.patch new file mode 100644 index 00000000000..d4c724b745c --- /dev/null +++ b/queue-3.4/xen-send-spinlock-ipi-to-all-waiters.patch @@ -0,0 +1,63 @@ +From 76eaca031f0af2bb303e405986f637811956a422 Mon Sep 17 00:00:00 2001 +From: Stefan Bader +Date: Fri, 15 Feb 2013 09:48:52 +0100 +Subject: xen: Send spinlock IPI to all waiters + +From: Stefan Bader + +commit 76eaca031f0af2bb303e405986f637811956a422 upstream. + +There is a loophole between Xen's current implementation of +pv-spinlocks and the scheduler. This was triggerable through +a testcase until v3.6 changed the TLB flushing code. The +problem potentially is still there just not observable in the +same way. + +What could happen was (is): + +1. CPU n tries to schedule task x away and goes into a slow + wait for the runq lock of CPU n-# (must be one with a lower + number). +2. CPU n-#, while processing softirqs, tries to balance domains + and goes into a slow wait for its own runq lock (for updating + some records). Since this is a spin_lock_irqsave in softirq + context, interrupts will be re-enabled for the duration of + the poll_irq hypercall used by Xen. +3. Before the runq lock of CPU n-# is unlocked, CPU n-1 receives + an interrupt (e.g. endio) and when processing the interrupt, + tries to wake up task x. But that is in schedule and still + on_cpu, so try_to_wake_up goes into a tight loop. +4. The runq lock of CPU n-# gets unlocked, but the message only + gets sent to the first waiter, which is CPU n-# and that is + busily stuck. +5. CPU n-# never returns from the nested interruption to take and + release the lock because the scheduler uses a busy wait. + And CPU n never finishes the task migration because the unlock + notification only went to CPU n-#. + +To avoid this and since the unlocking code has no real sense of +which waiter is best suited to grab the lock, just send the IPI +to all of them. This causes the waiters to return from the hyper- +call (those not interrupted at least) and do active spinlocking. + +BugLink: http://bugs.launchpad.net/bugs/1011792 + +Acked-by: Jan Beulich +Signed-off-by: Stefan Bader +Signed-off-by: Konrad Rzeszutek Wilk +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/xen/spinlock.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/arch/x86/xen/spinlock.c ++++ b/arch/x86/xen/spinlock.c +@@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slo + if (per_cpu(lock_spinners, cpu) == xl) { + ADD_STATS(released_slow_kicked, 1); + xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); +- break; + } + } + } -- 2.47.3