From: Greg Kroah-Hartman Date: Wed, 11 Jul 2012 22:53:28 +0000 (-0700) Subject: 3.4-stable patches X-Git-Tag: v3.0.37~11 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=6f0461f87f3a2db3ac4a32c5c150c3fd6e1149e8;p=thirdparty%2Fkernel%2Fstable-queue.git 3.4-stable patches added patches: ath9k-fix-panic-caused-by-returning-a-descriptor-we-have-queued-for-reuse.patch macvtap-zerocopy-validate-vectors-before-building-skb.patch mm-pmd_read_atomic-fix-32bit-pae-pmd-walk-vs-pmd_populate-smp-race-condition.patch net-wireless-ipw2x00-add-supported-cipher-suites-to-wiphy-initialization.patch raid5-delayed-stripe-fix.patch rtl8187-brightness_set-can-not-sleep.patch tg3-apply-short-dma-frag-workaround-to-5906.patch thp-avoid-atomic64_read-in-pmd_read_atomic-for-32bit-pae.patch --- diff --git a/queue-3.4/ath9k-fix-panic-caused-by-returning-a-descriptor-we-have-queued-for-reuse.patch b/queue-3.4/ath9k-fix-panic-caused-by-returning-a-descriptor-we-have-queued-for-reuse.patch new file mode 100644 index 00000000000..04d10a35c22 --- /dev/null +++ b/queue-3.4/ath9k-fix-panic-caused-by-returning-a-descriptor-we-have-queued-for-reuse.patch @@ -0,0 +1,67 @@ +From 6bb51c70cabaadddc54a6454844eceba91a56083 Mon Sep 17 00:00:00 2001 +From: Tom Hughes +Date: Wed, 27 Jun 2012 18:21:15 +0100 +Subject: ath9k: fix panic caused by returning a descriptor we have queued for reuse + +From: Tom Hughes + +commit 6bb51c70cabaadddc54a6454844eceba91a56083 upstream. + +Commit 3a2923e83c introduced a bug when a corrupt descriptor +is encountered - although the following descriptor is discarded +and returned to the queue for reuse the associated frame is +also returned for processing. This leads to a panic: + +BUG: unable to handle kernel NULL pointer dereference at 000000000000003a +IP: [] ath_rx_tasklet+0x165/0x1b00 [ath9k] +Call Trace: + +[] ? map_single+0x60/0x60 +[] ? ath9k_ioread32+0x34/0x90 [ath9k] +[] athk9k_tasklet+0xdc/0x160 [ath9k] +[] tasklet_action+0x63/0xd0 +[] __do_softirq+0xc0/0x1e0 +[] ? native_sched_clock+0x13/0x80 +[] call_softirq+0x1c/0x30 +[] do_softirq+0x75/0xb0 +[] irq_exit+0xb5/0xc0 +[] do_IRQ+0x63/0xe0 +[] common_interrupt+0x6a/0x6a + +[] ? intel_idle+0xea/0x150 +[] ? intel_idle+0xcb/0x150 +[] cpuidle_enter+0x19/0x20 +[] cpuidle_idle_call+0xa9/0x240 +[] cpu_idle+0xaf/0x120 +[] rest_init+0x72/0x74 +[] start_kernel+0x3b7/0x3c4 +[] ? repair_env_string+0x5e/0x5e +[] x86_64_start_reservations+0x131/0x135 +[] x86_64_start_kernel+0x100/0x10f + +Making sure bf is cleared to NULL in this case restores the +old behaviour. + +Signed-off-by: Tom Hughes +Signed-off-by: John W. Linville +Cc: Josh Boyer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/wireless/ath/ath9k/recv.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/wireless/ath/ath9k/recv.c ++++ b/drivers/net/wireless/ath/ath9k/recv.c +@@ -695,9 +695,9 @@ static bool ath_edma_get_buffers(struct + __skb_unlink(skb, &rx_edma->rx_fifo); + list_add_tail(&bf->list, &sc->rx.rxbuf); + ath_rx_edma_buf_link(sc, qtype); +- } else { +- bf = NULL; + } ++ ++ bf = NULL; + } + + *dest = bf; diff --git a/queue-3.4/macvtap-zerocopy-validate-vectors-before-building-skb.patch b/queue-3.4/macvtap-zerocopy-validate-vectors-before-building-skb.patch new file mode 100644 index 00000000000..df4c2e3a6d9 --- /dev/null +++ b/queue-3.4/macvtap-zerocopy-validate-vectors-before-building-skb.patch @@ -0,0 +1,82 @@ +From b92946e2919134ebe2a4083e4302236295ea2a73 Mon Sep 17 00:00:00 2001 +From: Jason Wang +Date: Wed, 2 May 2012 11:42:15 +0800 +Subject: macvtap: zerocopy: validate vectors before building skb + +From: Jason Wang + +commit b92946e2919134ebe2a4083e4302236295ea2a73 upstream. + +There're several reasons that the vectors need to be validated: + +- Return error when caller provides vectors whose num is greater than UIO_MAXIOV. +- Linearize part of skb when userspace provides vectors grater than MAX_SKB_FRAGS. +- Return error when userspace provides vectors whose total length may exceed +- MAX_SKB_FRAGS * PAGE_SIZE. + +Signed-off-by: Jason Wang +Signed-off-by: Michael S. Tsirkin +Cc: Josh Boyer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/macvtap.c | 25 +++++++++++++++++++++---- + 1 file changed, 21 insertions(+), 4 deletions(-) + +--- a/drivers/net/macvtap.c ++++ b/drivers/net/macvtap.c +@@ -528,9 +528,10 @@ static int zerocopy_sg_from_iovec(struct + } + base = (unsigned long)from->iov_base + offset1; + size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; ++ if (i + size > MAX_SKB_FRAGS) ++ return -EMSGSIZE; + num_pages = get_user_pages_fast(base, size, 0, &page[i]); +- if ((num_pages != size) || +- (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags)) ++ if (num_pages != size) + /* put_page is in skb free */ + return -EFAULT; + skb->data_len += len; +@@ -647,7 +648,7 @@ static ssize_t macvtap_get_user(struct m + int err; + struct virtio_net_hdr vnet_hdr = { 0 }; + int vnet_hdr_len = 0; +- int copylen; ++ int copylen = 0; + bool zerocopy = false; + + if (q->flags & IFF_VNET_HDR) { +@@ -676,15 +677,31 @@ static ssize_t macvtap_get_user(struct m + if (unlikely(len < ETH_HLEN)) + goto err; + ++ err = -EMSGSIZE; ++ if (unlikely(count > UIO_MAXIOV)) ++ goto err; ++ + if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) + zerocopy = true; + + if (zerocopy) { ++ /* Userspace may produce vectors with count greater than ++ * MAX_SKB_FRAGS, so we need to linearize parts of the skb ++ * to let the rest of data to be fit in the frags. ++ */ ++ if (count > MAX_SKB_FRAGS) { ++ copylen = iov_length(iv, count - MAX_SKB_FRAGS); ++ if (copylen < vnet_hdr_len) ++ copylen = 0; ++ else ++ copylen -= vnet_hdr_len; ++ } + /* There are 256 bytes to be copied in skb, so there is enough + * room for skb expand head in case it is used. + * The rest buffer is mapped from userspace. + */ +- copylen = vnet_hdr.hdr_len; ++ if (copylen < vnet_hdr.hdr_len) ++ copylen = vnet_hdr.hdr_len; + if (!copylen) + copylen = GOODCOPY_LEN; + } else diff --git a/queue-3.4/mm-pmd_read_atomic-fix-32bit-pae-pmd-walk-vs-pmd_populate-smp-race-condition.patch b/queue-3.4/mm-pmd_read_atomic-fix-32bit-pae-pmd-walk-vs-pmd_populate-smp-race-condition.patch new file mode 100644 index 00000000000..8ba6b839f74 --- /dev/null +++ b/queue-3.4/mm-pmd_read_atomic-fix-32bit-pae-pmd-walk-vs-pmd_populate-smp-race-condition.patch @@ -0,0 +1,213 @@ +From 26c191788f18129af0eb32a358cdaea0c7479626 Mon Sep 17 00:00:00 2001 +From: Andrea Arcangeli +Date: Tue, 29 May 2012 15:06:49 -0700 +Subject: mm: pmd_read_atomic: fix 32bit PAE pmd walk vs pmd_populate SMP race condition + +From: Andrea Arcangeli + +commit 26c191788f18129af0eb32a358cdaea0c7479626 upstream. + +When holding the mmap_sem for reading, pmd_offset_map_lock should only +run on a pmd_t that has been read atomically from the pmdp pointer, +otherwise we may read only half of it leading to this crash. + +PID: 11679 TASK: f06e8000 CPU: 3 COMMAND: "do_race_2_panic" + #0 [f06a9dd8] crash_kexec at c049b5ec + #1 [f06a9e2c] oops_end at c083d1c2 + #2 [f06a9e40] no_context at c0433ded + #3 [f06a9e64] bad_area_nosemaphore at c043401a + #4 [f06a9e6c] __do_page_fault at c0434493 + #5 [f06a9eec] do_page_fault at c083eb45 + #6 [f06a9f04] error_code (via page_fault) at c083c5d5 + EAX: 01fb470c EBX: fff35000 ECX: 00000003 EDX: 00000100 EBP: + 00000000 + DS: 007b ESI: 9e201000 ES: 007b EDI: 01fb4700 GS: 00e0 + CS: 0060 EIP: c083bc14 ERR: ffffffff EFLAGS: 00010246 + #7 [f06a9f38] _spin_lock at c083bc14 + #8 [f06a9f44] sys_mincore at c0507b7d + #9 [f06a9fb0] system_call at c083becd + start len + EAX: ffffffda EBX: 9e200000 ECX: 00001000 EDX: 6228537f + DS: 007b ESI: 00000000 ES: 007b EDI: 003d0f00 + SS: 007b ESP: 62285354 EBP: 62285388 GS: 0033 + CS: 0073 EIP: 00291416 ERR: 000000da EFLAGS: 00000286 + +This should be a longstanding bug affecting x86 32bit PAE without THP. +Only archs with 64bit large pmd_t and 32bit unsigned long should be +affected. + +With THP enabled the barrier() in pmd_none_or_trans_huge_or_clear_bad() +would partly hide the bug when the pmd transition from none to stable, +by forcing a re-read of the *pmd in pmd_offset_map_lock, but when THP is +enabled a new set of problem arises by the fact could then transition +freely in any of the none, pmd_trans_huge or pmd_trans_stable states. +So making the barrier in pmd_none_or_trans_huge_or_clear_bad() +unconditional isn't good idea and it would be a flakey solution. + +This should be fully fixed by introducing a pmd_read_atomic that reads +the pmd in order with THP disabled, or by reading the pmd atomically +with cmpxchg8b with THP enabled. + +Luckily this new race condition only triggers in the places that must +already be covered by pmd_none_or_trans_huge_or_clear_bad() so the fix +is localized there but this bug is not related to THP. + +NOTE: this can trigger on x86 32bit systems with PAE enabled with more +than 4G of ram, otherwise the high part of the pmd will never risk to be +truncated because it would be zero at all times, in turn so hiding the +SMP race. + +This bug was discovered and fully debugged by Ulrich, quote: + +---- +[..] +pmd_none_or_trans_huge_or_clear_bad() loads the content of edx and +eax. + + 496 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t + *pmd) + 497 { + 498 /* depend on compiler for an atomic pmd read */ + 499 pmd_t pmdval = *pmd; + + // edi = pmd pointer +0xc0507a74 : mov 0x8(%esp),%edi +... + // edx = PTE page table high address +0xc0507a84 : mov 0x4(%edi),%edx +... + // eax = PTE page table low address +0xc0507a8e : mov (%edi),%eax + +[..] + +Please note that the PMD is not read atomically. These are two "mov" +instructions where the high order bits of the PMD entry are fetched +first. Hence, the above machine code is prone to the following race. + +- The PMD entry {high|low} is 0x0000000000000000. + The "mov" at 0xc0507a84 loads 0x00000000 into edx. + +- A page fault (on another CPU) sneaks in between the two "mov" + instructions and instantiates the PMD. + +- The PMD entry {high|low} is now 0x00000003fda38067. + The "mov" at 0xc0507a8e loads 0xfda38067 into eax. +---- + +Reported-by: Ulrich Obergfell +Signed-off-by: Andrea Arcangeli +Cc: Mel Gorman +Cc: Hugh Dickins +Cc: Larry Woodman +Cc: Petr Matousek +Cc: Rik van Riel +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/pgtable-3level.h | 50 ++++++++++++++++++++++++++++++++++ + include/asm-generic/pgtable.h | 22 +++++++++++++- + 2 files changed, 70 insertions(+), 2 deletions(-) + +--- a/arch/x86/include/asm/pgtable-3level.h ++++ b/arch/x86/include/asm/pgtable-3level.h +@@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t + ptep->pte_low = pte.pte_low; + } + ++#define pmd_read_atomic pmd_read_atomic ++/* ++ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with ++ * a "*pmdp" dereference done by gcc. Problem is, in certain places ++ * where pte_offset_map_lock is called, concurrent page faults are ++ * allowed, if the mmap_sem is hold for reading. An example is mincore ++ * vs page faults vs MADV_DONTNEED. On the page fault side ++ * pmd_populate rightfully does a set_64bit, but if we're reading the ++ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen ++ * because gcc will not read the 64bit of the pmd atomically. To fix ++ * this all places running pmd_offset_map_lock() while holding the ++ * mmap_sem in read mode, shall read the pmdp pointer using this ++ * function to know if the pmd is null nor not, and in turn to know if ++ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd ++ * operations. ++ * ++ * Without THP if the mmap_sem is hold for reading, the ++ * pmd can only transition from null to not null while pmd_read_atomic runs. ++ * So there's no need of literally reading it atomically. ++ * ++ * With THP if the mmap_sem is hold for reading, the pmd can become ++ * THP or null or point to a pte (and in turn become "stable") at any ++ * time under pmd_read_atomic, so it's mandatory to read it atomically ++ * with cmpxchg8b. ++ */ ++#ifndef CONFIG_TRANSPARENT_HUGEPAGE ++static inline pmd_t pmd_read_atomic(pmd_t *pmdp) ++{ ++ pmdval_t ret; ++ u32 *tmp = (u32 *)pmdp; ++ ++ ret = (pmdval_t) (*tmp); ++ if (ret) { ++ /* ++ * If the low part is null, we must not read the high part ++ * or we can end up with a partial pmd. ++ */ ++ smp_rmb(); ++ ret |= ((pmdval_t)*(tmp + 1)) << 32; ++ } ++ ++ return (pmd_t) { ret }; ++} ++#else /* CONFIG_TRANSPARENT_HUGEPAGE */ ++static inline pmd_t pmd_read_atomic(pmd_t *pmdp) ++{ ++ return (pmd_t) { atomic64_read((atomic64_t *)pmdp) }; ++} ++#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ ++ + static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) + { + set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -446,6 +446,18 @@ static inline int pmd_write(pmd_t pmd) + #endif /* __HAVE_ARCH_PMD_WRITE */ + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + ++#ifndef pmd_read_atomic ++static inline pmd_t pmd_read_atomic(pmd_t *pmdp) ++{ ++ /* ++ * Depend on compiler for an atomic pmd read. NOTE: this is ++ * only going to work, if the pmdval_t isn't larger than ++ * an unsigned long. ++ */ ++ return *pmdp; ++} ++#endif ++ + /* + * This function is meant to be used by sites walking pagetables with + * the mmap_sem hold in read mode to protect against MADV_DONTNEED and +@@ -459,11 +471,17 @@ static inline int pmd_write(pmd_t pmd) + * undefined so behaving like if the pmd was none is safe (because it + * can return none anyway). The compiler level barrier() is critically + * important to compute the two checks atomically on the same pmdval. ++ * ++ * For 32bit kernels with a 64bit large pmd_t this automatically takes ++ * care of reading the pmd atomically to avoid SMP race conditions ++ * against pmd_populate() when the mmap_sem is hold for reading by the ++ * caller (a special atomic read not done by "gcc" as in the generic ++ * version above, is also needed when THP is disabled because the page ++ * fault can populate the pmd from under us). + */ + static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) + { +- /* depend on compiler for an atomic pmd read */ +- pmd_t pmdval = *pmd; ++ pmd_t pmdval = pmd_read_atomic(pmd); + /* + * The barrier will stabilize the pmdval in a register or on + * the stack so that it will stop changing under the code. diff --git a/queue-3.4/net-wireless-ipw2x00-add-supported-cipher-suites-to-wiphy-initialization.patch b/queue-3.4/net-wireless-ipw2x00-add-supported-cipher-suites-to-wiphy-initialization.patch new file mode 100644 index 00000000000..4a39e176222 --- /dev/null +++ b/queue-3.4/net-wireless-ipw2x00-add-supported-cipher-suites-to-wiphy-initialization.patch @@ -0,0 +1,94 @@ +From a141e6a0097118bb35024485f1faffc0d9042f5c Mon Sep 17 00:00:00 2001 +From: Stanislav Yakovlev +Date: Tue, 10 Apr 2012 21:44:47 -0400 +Subject: net/wireless: ipw2x00: add supported cipher suites to wiphy initialization + +From: Stanislav Yakovlev + +commit a141e6a0097118bb35024485f1faffc0d9042f5c upstream. + +Driver doesn't report its supported cipher suites through cfg80211 +interface. It still uses wext interface and probably will not work +through nl80211, but will at least correctly advertise supported +features. + +Bug was reported by Omar Siam. +https://bugzilla.kernel.org/show_bug.cgi?id=43049 + +Signed-off-by: Stanislav Yakovlev +Signed-off-by: John W. Linville +Cc: Josh Boyer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/wireless/ipw2x00/ipw.h | 23 +++++++++++++++++++++++ + drivers/net/wireless/ipw2x00/ipw2100.c | 4 ++++ + drivers/net/wireless/ipw2x00/ipw2200.c | 4 ++++ + 3 files changed, 31 insertions(+) + +--- /dev/null ++++ b/drivers/net/wireless/ipw2x00/ipw.h +@@ -0,0 +1,23 @@ ++/* ++ * Intel Pro/Wireless 2100, 2200BG, 2915ABG network connection driver ++ * ++ * Copyright 2012 Stanislav Yakovlev ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#ifndef __IPW_H__ ++#define __IPW_H__ ++ ++#include ++ ++static const u32 ipw_cipher_suites[] = { ++ WLAN_CIPHER_SUITE_WEP40, ++ WLAN_CIPHER_SUITE_WEP104, ++ WLAN_CIPHER_SUITE_TKIP, ++ WLAN_CIPHER_SUITE_CCMP, ++}; ++ ++#endif +--- a/drivers/net/wireless/ipw2x00/ipw2100.c ++++ b/drivers/net/wireless/ipw2x00/ipw2100.c +@@ -166,6 +166,7 @@ that only one external action is invoked + #include + + #include "ipw2100.h" ++#include "ipw.h" + + #define IPW2100_VERSION "git-1.2.2" + +@@ -1946,6 +1947,9 @@ static int ipw2100_wdev_init(struct net_ + wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; + } + ++ wdev->wiphy->cipher_suites = ipw_cipher_suites; ++ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites); ++ + set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); + if (wiphy_register(wdev->wiphy)) { + ipw2100_down(priv); +--- a/drivers/net/wireless/ipw2x00/ipw2200.c ++++ b/drivers/net/wireless/ipw2x00/ipw2200.c +@@ -34,6 +34,7 @@ + #include + #include + #include "ipw2200.h" ++#include "ipw.h" + + + #ifndef KBUILD_EXTMOD +@@ -11544,6 +11545,9 @@ static int ipw_wdev_init(struct net_devi + wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band; + } + ++ wdev->wiphy->cipher_suites = ipw_cipher_suites; ++ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites); ++ + set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); + + /* With that information in place, we can now register the wiphy... */ diff --git a/queue-3.4/raid5-delayed-stripe-fix.patch b/queue-3.4/raid5-delayed-stripe-fix.patch new file mode 100644 index 00000000000..ee135d561e6 --- /dev/null +++ b/queue-3.4/raid5-delayed-stripe-fix.patch @@ -0,0 +1,41 @@ +From fab363b5ff502d1b39ddcfec04271f5858d9f26e Mon Sep 17 00:00:00 2001 +From: Shaohua Li +Date: Tue, 3 Jul 2012 15:57:19 +1000 +Subject: raid5: delayed stripe fix + +From: Shaohua Li + +commit fab363b5ff502d1b39ddcfec04271f5858d9f26e upstream. + +There isn't locking setting STRIPE_DELAYED and STRIPE_PREREAD_ACTIVE bits, but +the two bits have relationship. A delayed stripe can be moved to hold list only +when preread active stripe count is below IO_THRESHOLD. If a stripe has both +the bits set, such stripe will be in delayed list and preread count not 0, +which will make such stripe never leave delayed list. + +Signed-off-by: Shaohua Li +Signed-off-by: NeilBrown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/raid5.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -196,12 +196,14 @@ static void __release_stripe(struct r5co + BUG_ON(!list_empty(&sh->lru)); + BUG_ON(atomic_read(&conf->active_stripes)==0); + if (test_bit(STRIPE_HANDLE, &sh->state)) { +- if (test_bit(STRIPE_DELAYED, &sh->state)) ++ if (test_bit(STRIPE_DELAYED, &sh->state) && ++ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + list_add_tail(&sh->lru, &conf->delayed_list); + else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && + sh->bm_seq - conf->seq_write > 0) + list_add_tail(&sh->lru, &conf->bitmap_list); + else { ++ clear_bit(STRIPE_DELAYED, &sh->state); + clear_bit(STRIPE_BIT_DELAY, &sh->state); + list_add_tail(&sh->lru, &conf->handle_list); + } diff --git a/queue-3.4/rtl8187-brightness_set-can-not-sleep.patch b/queue-3.4/rtl8187-brightness_set-can-not-sleep.patch new file mode 100644 index 00000000000..0d38eecff2e --- /dev/null +++ b/queue-3.4/rtl8187-brightness_set-can-not-sleep.patch @@ -0,0 +1,55 @@ +From 0fde0a8cfd0ede7f310d6a681c8e5a7cb3e32406 Mon Sep 17 00:00:00 2001 +From: Stanislaw Gruszka +Date: Wed, 16 May 2012 11:06:21 +0200 +Subject: rtl8187: ->brightness_set can not sleep + +From: Stanislaw Gruszka + +commit 0fde0a8cfd0ede7f310d6a681c8e5a7cb3e32406 upstream. + +Fix: + +BUG: sleeping function called from invalid context at kernel/workqueue.c:2547 +in_atomic(): 1, irqs_disabled(): 0, pid: 629, name: wpa_supplicant +2 locks held by wpa_supplicant/629: + #0: (rtnl_mutex){+.+.+.}, at: [] rtnl_lock+0x14/0x20 + #1: (&trigger->leddev_list_lock){.+.?..}, at: [] led_trigger_event+0x21/0x80 +Pid: 629, comm: wpa_supplicant Not tainted 3.3.0-0.rc3.git5.1.fc17.i686 +Call Trace: + [] __might_sleep+0x126/0x1d0 + [] wait_on_work+0x2c/0x1d0 + [] __cancel_work_timer+0x6a/0x120 + [] cancel_delayed_work_sync+0x10/0x20 + [] rtl8187_led_brightness_set+0x82/0xf0 [rtl8187] + [] led_trigger_event+0x5c/0x80 + [] ieee80211_led_radio+0x1d/0x40 [mac80211] + [] ieee80211_stop_device+0x13/0x230 [mac80211] + +Removing _sync is ok, because if led_on work is currently running +it will be finished before led_off work start to perform, since +they are always queued on the same mac80211 local->workqueue. + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=795176 + +Signed-off-by: Stanislaw Gruszka +Acked-by: Larry Finger +Acked-by: Hin-Tak Leung +Signed-off-by: John W. Linville +Cc: Josh Boyer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/wireless/rtl818x/rtl8187/leds.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c ++++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c +@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(s + radio_on = true; + } else if (radio_on) { + radio_on = false; +- cancel_delayed_work_sync(&priv->led_on); ++ cancel_delayed_work(&priv->led_on); + ieee80211_queue_delayed_work(hw, &priv->led_off, 0); + } + } else if (radio_on) { diff --git a/queue-3.4/series b/queue-3.4/series index 16488161d5d..8185da9d4a0 100644 --- a/queue-3.4/series +++ b/queue-3.4/series @@ -163,3 +163,11 @@ iwlwifi-remove-log_event-debugfs-file-debugging-is-disabled.patch tracing-change-cpu-ring-buffer-state-from-tracing_cpumask.patch mwifiex-fix-wrong-return-values-in-add_virtual_intf-error-cases.patch gspca-core-fix-buffers-staying-in-queued-state-after-a-stream_off.patch +raid5-delayed-stripe-fix.patch +tg3-apply-short-dma-frag-workaround-to-5906.patch +ath9k-fix-panic-caused-by-returning-a-descriptor-we-have-queued-for-reuse.patch +mm-pmd_read_atomic-fix-32bit-pae-pmd-walk-vs-pmd_populate-smp-race-condition.patch +thp-avoid-atomic64_read-in-pmd_read_atomic-for-32bit-pae.patch +rtl8187-brightness_set-can-not-sleep.patch +macvtap-zerocopy-validate-vectors-before-building-skb.patch +net-wireless-ipw2x00-add-supported-cipher-suites-to-wiphy-initialization.patch diff --git a/queue-3.4/tg3-apply-short-dma-frag-workaround-to-5906.patch b/queue-3.4/tg3-apply-short-dma-frag-workaround-to-5906.patch new file mode 100644 index 00000000000..09536501ce4 --- /dev/null +++ b/queue-3.4/tg3-apply-short-dma-frag-workaround-to-5906.patch @@ -0,0 +1,34 @@ +From b7abee6ef888117f92db370620ebf116a38e3f4d Mon Sep 17 00:00:00 2001 +From: Matt Carlson +Date: Thu, 7 Jun 2012 12:56:54 +0000 +Subject: tg3: Apply short DMA frag workaround to 5906 + +From: Matt Carlson + +commit b7abee6ef888117f92db370620ebf116a38e3f4d upstream. + +5906 devices also need the short DMA fragment workaround. This patch +makes the necessary change. + +Signed-off-by: Matt Carlson +Tested-by: Christian Kujau +Signed-off-by: David S. Miller +Cc: Josh Boyer +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/ethernet/broadcom/tg3.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -14248,7 +14248,8 @@ static int __devinit tg3_get_invariants( + } + } + +- if (tg3_flag(tp, 5755_PLUS)) ++ if (tg3_flag(tp, 5755_PLUS) || ++ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_flag_set(tp, SHORT_DMA_BUG); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) diff --git a/queue-3.4/thp-avoid-atomic64_read-in-pmd_read_atomic-for-32bit-pae.patch b/queue-3.4/thp-avoid-atomic64_read-in-pmd_read_atomic-for-32bit-pae.patch new file mode 100644 index 00000000000..81ebc4e90f6 --- /dev/null +++ b/queue-3.4/thp-avoid-atomic64_read-in-pmd_read_atomic-for-32bit-pae.patch @@ -0,0 +1,120 @@ +From e4eed03fd06578571c01d4f1478c874bb432c815 Mon Sep 17 00:00:00 2001 +From: Andrea Arcangeli +Date: Wed, 20 Jun 2012 12:52:57 -0700 +Subject: thp: avoid atomic64_read in pmd_read_atomic for 32bit PAE + +From: Andrea Arcangeli + +commit e4eed03fd06578571c01d4f1478c874bb432c815 upstream. + +In the x86 32bit PAE CONFIG_TRANSPARENT_HUGEPAGE=y case while holding the +mmap_sem for reading, cmpxchg8b cannot be used to read pmd contents under +Xen. + +So instead of dealing only with "consistent" pmdvals in +pmd_none_or_trans_huge_or_clear_bad() (which would be conceptually +simpler) we let pmd_none_or_trans_huge_or_clear_bad() deal with pmdvals +where the low 32bit and high 32bit could be inconsistent (to avoid having +to use cmpxchg8b). + +The only guarantee we get from pmd_read_atomic is that if the low part of +the pmd was found null, the high part will be null too (so the pmd will be +considered unstable). And if the low part of the pmd is found "stable" +later, then it means the whole pmd was read atomically (because after a +pmd is stable, neither MADV_DONTNEED nor page faults can alter it anymore, +and we read the high part after the low part). + +In the 32bit PAE x86 case, it is enough to read the low part of the pmdval +atomically to declare the pmd as "stable" and that's true for THP and no +THP, furthermore in the THP case we also have a barrier() that will +prevent any inconsistent pmdvals to be cached by a later re-read of the +*pmd. + +Signed-off-by: Andrea Arcangeli +Cc: Jonathan Nieder +Cc: Ulrich Obergfell +Cc: Mel Gorman +Cc: Hugh Dickins +Cc: Larry Woodman +Cc: Petr Matousek +Cc: Rik van Riel +Cc: Jan Beulich +Cc: KOSAKI Motohiro +Tested-by: Andrew Jones +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/pgtable-3level.h | 30 +++++++++++++++++------------- + include/asm-generic/pgtable.h | 10 ++++++++++ + 2 files changed, 27 insertions(+), 13 deletions(-) + +--- a/arch/x86/include/asm/pgtable-3level.h ++++ b/arch/x86/include/asm/pgtable-3level.h +@@ -47,16 +47,26 @@ static inline void native_set_pte(pte_t + * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd + * operations. + * +- * Without THP if the mmap_sem is hold for reading, the +- * pmd can only transition from null to not null while pmd_read_atomic runs. +- * So there's no need of literally reading it atomically. ++ * Without THP if the mmap_sem is hold for reading, the pmd can only ++ * transition from null to not null while pmd_read_atomic runs. So ++ * we can always return atomic pmd values with this function. + * + * With THP if the mmap_sem is hold for reading, the pmd can become +- * THP or null or point to a pte (and in turn become "stable") at any +- * time under pmd_read_atomic, so it's mandatory to read it atomically +- * with cmpxchg8b. ++ * trans_huge or none or point to a pte (and in turn become "stable") ++ * at any time under pmd_read_atomic. We could read it really ++ * atomically here with a atomic64_read for the THP enabled case (and ++ * it would be a whole lot simpler), but to avoid using cmpxchg8b we ++ * only return an atomic pmdval if the low part of the pmdval is later ++ * found stable (i.e. pointing to a pte). And we're returning a none ++ * pmdval if the low part of the pmd is none. In some cases the high ++ * and low part of the pmdval returned may not be consistent if THP is ++ * enabled (the low part may point to previously mapped hugepage, ++ * while the high part may point to a more recently mapped hugepage), ++ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part ++ * of the pmd to be read atomically to decide if the pmd is unstable ++ * or not, with the only exception of when the low part of the pmd is ++ * zero in which case we return a none pmd. + */ +-#ifndef CONFIG_TRANSPARENT_HUGEPAGE + static inline pmd_t pmd_read_atomic(pmd_t *pmdp) + { + pmdval_t ret; +@@ -74,12 +84,6 @@ static inline pmd_t pmd_read_atomic(pmd_ + + return (pmd_t) { ret }; + } +-#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +-static inline pmd_t pmd_read_atomic(pmd_t *pmdp) +-{ +- return (pmd_t) { atomic64_read((atomic64_t *)pmdp) }; +-} +-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + + static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) + { +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -485,6 +485,16 @@ static inline int pmd_none_or_trans_huge + /* + * The barrier will stabilize the pmdval in a register or on + * the stack so that it will stop changing under the code. ++ * ++ * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, ++ * pmd_read_atomic is allowed to return a not atomic pmdval ++ * (for example pointing to an hugepage that has never been ++ * mapped in the pmd). The below checks will only care about ++ * the low part of the pmd with 32bit PAE x86 anyway, with the ++ * exception of pmd_none(). So the important thing is that if ++ * the low part of the pmd is found null, the high part will ++ * be also null or the pmd_none() check below would be ++ * confused. + */ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + barrier();