]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 14 Apr 2013 14:37:31 +0000 (07:37 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 14 Apr 2013 14:37:31 +0000 (07:37 -0700)
added patches:
gpio-fix-wrong-checking-condition-for-gpio-range.patch
kobject-fix-kset_find_obj-race-with-concurrent-last-kobject_put.patch
vfs-revert-spurious-fix-to-spinning-prevention-in-prune_icache_sb.patch
x86-32-fix-possible-incomplete-tlb-invalidate-with-pae-pagetables.patch

queue-3.8/gpio-fix-wrong-checking-condition-for-gpio-range.patch [new file with mode: 0644]
queue-3.8/kobject-fix-kset_find_obj-race-with-concurrent-last-kobject_put.patch [new file with mode: 0644]
queue-3.8/series
queue-3.8/vfs-revert-spurious-fix-to-spinning-prevention-in-prune_icache_sb.patch [new file with mode: 0644]
queue-3.8/x86-32-fix-possible-incomplete-tlb-invalidate-with-pae-pagetables.patch [new file with mode: 0644]

diff --git a/queue-3.8/gpio-fix-wrong-checking-condition-for-gpio-range.patch b/queue-3.8/gpio-fix-wrong-checking-condition-for-gpio-range.patch
new file mode 100644 (file)
index 0000000..8140305
--- /dev/null
@@ -0,0 +1,47 @@
+From ad4e1a7caf937ad395ced585ca85a7d14395dc80 Mon Sep 17 00:00:00 2001
+From: Haojian Zhuang <haojian.zhuang@linaro.org>
+Date: Sun, 17 Feb 2013 19:42:48 +0800
+Subject: gpio: fix wrong checking condition for gpio range
+
+From: Haojian Zhuang <haojian.zhuang@linaro.org>
+
+commit ad4e1a7caf937ad395ced585ca85a7d14395dc80 upstream.
+
+If index++ calculates from 0, the checking condition of "while
+(index++)" fails & it doesn't check any more. It doesn't follow
+the loop that used at here.
+
+Replace it by endless loop at here. Then it keeps parsing
+"gpio-ranges" property until it ends.
+
+Signed-off-by: Haojian Zhuang <haojian.zhuang@linaro.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Jonghwan Choi <jhbird.choi@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpiolib-of.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -228,7 +228,7 @@ static void of_gpiochip_add_pin_range(st
+       if (!np)
+               return;
+-      do {
++      for (;; index++) {
+               ret = of_parse_phandle_with_args(np, "gpio-ranges",
+                               "#gpio-range-cells", index, &pinspec);
+               if (ret)
+@@ -257,8 +257,7 @@ static void of_gpiochip_add_pin_range(st
+               if (ret)
+                       break;
+-
+-      } while (index++);
++      }
+ }
+ #else
diff --git a/queue-3.8/kobject-fix-kset_find_obj-race-with-concurrent-last-kobject_put.patch b/queue-3.8/kobject-fix-kset_find_obj-race-with-concurrent-last-kobject_put.patch
new file mode 100644 (file)
index 0000000..6db7a81
--- /dev/null
@@ -0,0 +1,91 @@
+From a49b7e82cab0f9b41f483359be83f44fbb6b4979 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sat, 13 Apr 2013 15:15:30 -0700
+Subject: kobject: fix kset_find_obj() race with concurrent last kobject_put()
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit a49b7e82cab0f9b41f483359be83f44fbb6b4979 upstream.
+
+Anatol Pomozov identified a race condition that hits module unloading
+and re-loading.  To quote Anatol:
+
+ "This is a race codition that exists between kset_find_obj() and
+  kobject_put().  kset_find_obj() might return kobject that has refcount
+  equal to 0 if this kobject is freeing by kobject_put() in other
+  thread.
+
+  Here is timeline for the crash in case if kset_find_obj() searches for
+  an object tht nobody holds and other thread is doing kobject_put() on
+  the same kobject:
+
+    THREAD A (calls kset_find_obj())     THREAD B (calls kobject_put())
+    splin_lock()
+                                         atomic_dec_return(kobj->kref), counter gets zero here
+                                         ... starts kobject cleanup ....
+                                         spin_lock() // WAIT thread A in kobj_kset_leave()
+    iterate over kset->list
+    atomic_inc(kobj->kref) (counter becomes 1)
+    spin_unlock()
+                                         spin_lock() // taken
+                                         // it does not know that thread A increased counter so it
+                                         remove obj from list
+                                         spin_unlock()
+                                         vfree(module) // frees module object with containing kobj
+
+    // kobj points to freed memory area!!
+    kobject_put(kobj) // OOPS!!!!
+
+  The race above happens because module.c tries to use kset_find_obj()
+  when somebody unloads module.  The module.c code was introduced in
+  commit 6494a93d55fa"
+
+Anatol supplied a patch specific for module.c that worked around the
+problem by simply not using kset_find_obj() at all, but rather than make
+a local band-aid, this just fixes kset_find_obj() to be thread-safe
+using the proper model of refusing the get a new reference if the
+refcount has already dropped to zero.
+
+See examples of this proper refcount handling not only in the kref
+documentation, but in various other equivalent uses of this pattern by
+grepping for atomic_inc_not_zero().
+
+[ Side note: the module race does indicate that module loading and
+  unloading is not properly serialized wrt sysfs information using the
+  module mutex.  That may require further thought, but this is the
+  correct fix at the kobject layer regardless. ]
+
+Reported-analyzed-and-tested-by: Anatol Pomozov <anatol.pomozov@gmail.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/kobject.c |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobje
+       return kobj;
+ }
++static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
++{
++      if (!kref_get_unless_zero(&kobj->kref))
++              kobj = NULL;
++      return kobj;
++}
++
+ /*
+  * kobject_cleanup - free kobject resources.
+  * @kobj: object to cleanup
+@@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kse
+       list_for_each_entry(k, &kset->list, entry) {
+               if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
+-                      ret = kobject_get(k);
++                      ret = kobject_get_unless_zero(k);
+                       break;
+               }
+       }
index 8b7b2b8f170b6eb63d0a48ae9122327b33a63ac4..23d608517390461045d3e73c9103004fb9eea7a8 100644 (file)
@@ -14,3 +14,7 @@ gfs2-return-error-if-malloc-failed-in-gfs2_rs_alloc.patch
 scsi-libsas-fix-handling-vacant-phy-in-sas_set_ex_phy.patch
 cifs-allow-passwords-which-begin-with-a-delimitor.patch
 target-fix-incorrect-fallthrough-of-alua-standby-offline-transition-cdbs.patch
+vfs-revert-spurious-fix-to-spinning-prevention-in-prune_icache_sb.patch
+kobject-fix-kset_find_obj-race-with-concurrent-last-kobject_put.patch
+gpio-fix-wrong-checking-condition-for-gpio-range.patch
+x86-32-fix-possible-incomplete-tlb-invalidate-with-pae-pagetables.patch
diff --git a/queue-3.8/vfs-revert-spurious-fix-to-spinning-prevention-in-prune_icache_sb.patch b/queue-3.8/vfs-revert-spurious-fix-to-spinning-prevention-in-prune_icache_sb.patch
new file mode 100644 (file)
index 0000000..0ee7c46
--- /dev/null
@@ -0,0 +1,39 @@
+From 5b55d708335a9e3e4f61f2dadf7511502205ccd1 Mon Sep 17 00:00:00 2001
+From: Suleiman Souhlal <suleiman@google.com>
+Date: Sat, 13 Apr 2013 16:03:06 -0700
+Subject: vfs: Revert spurious fix to spinning prevention in prune_icache_sb
+
+From: Suleiman Souhlal <suleiman@google.com>
+
+commit 5b55d708335a9e3e4f61f2dadf7511502205ccd1 upstream.
+
+Revert commit 62a3ddef6181 ("vfs: fix spinning prevention in prune_icache_sb").
+
+This commit doesn't look right: since we are looking at the tail of the
+list (sb->s_inode_lru.prev) if we want to skip an inode, we should put
+it back at the head of the list instead of the tail, otherwise we will
+keep spinning on it.
+
+Discovered when investigating why prune_icache_sb came top in perf
+reports of a swapping load.
+
+Signed-off-by: Suleiman Souhlal <suleiman@google.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/inode.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block
+                * inode to the back of the list so we don't spin on it.
+                */
+               if (!spin_trylock(&inode->i_lock)) {
+-                      list_move_tail(&inode->i_lru, &sb->s_inode_lru);
++                      list_move(&inode->i_lru, &sb->s_inode_lru);
+                       continue;
+               }
diff --git a/queue-3.8/x86-32-fix-possible-incomplete-tlb-invalidate-with-pae-pagetables.patch b/queue-3.8/x86-32-fix-possible-incomplete-tlb-invalidate-with-pae-pagetables.patch
new file mode 100644 (file)
index 0000000..b04d34f
--- /dev/null
@@ -0,0 +1,111 @@
+From 1de14c3c5cbc9bb17e9dcc648cda51c0c85d54b9 Mon Sep 17 00:00:00 2001
+From: Dave Hansen <dave@sr71.net>
+Date: Fri, 12 Apr 2013 16:23:54 -0700
+Subject: x86-32: Fix possible incomplete TLB invalidate with PAE pagetables
+
+From: Dave Hansen <dave@sr71.net>
+
+commit 1de14c3c5cbc9bb17e9dcc648cda51c0c85d54b9 upstream.
+
+This patch attempts to fix:
+
+       https://bugzilla.kernel.org/show_bug.cgi?id=56461
+
+The symptom is a crash and messages like this:
+
+       chrome: Corrupted page table at address 34a03000
+       *pdpt = 0000000000000000 *pde = 0000000000000000
+       Bad pagetable: 000f [#1] PREEMPT SMP
+
+Ingo guesses this got introduced by commit 611ae8e3f520 ("x86/tlb:
+enable tlb flush range support for x86") since that code started to free
+unused pagetables.
+
+On x86-32 PAE kernels, that new code has the potential to free an entire
+PMD page and will clear one of the four page-directory-pointer-table
+(aka pgd_t entries).
+
+The hardware aggressively "caches" these top-level entries and invlpg
+does not actually affect the CPU's copy.  If we clear one we *HAVE* to
+do a full TLB flush, otherwise we might continue using a freed pmd page.
+(note, we do this properly on the population side in pud_populate()).
+
+This patch tracks whenever we clear one of these entries in the 'struct
+mmu_gather', and ensures that we follow up with a full tlb flush.
+
+BTW, I disassembled and checked that:
+
+       if (tlb->fullmm == 0)
+and
+       if (!tlb->fullmm && !tlb->need_flush_all)
+
+generate essentially the same code, so there should be zero impact there
+to the !PAE case.
+
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Peter Anvin <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Artem S Tashkinov <t.artem@mailcity.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/tlb.h |    2 +-
+ arch/x86/mm/pgtable.c      |    7 +++++++
+ include/asm-generic/tlb.h  |    7 ++++++-
+ mm/memory.c                |    1 +
+ 4 files changed, 15 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/tlb.h
++++ b/arch/x86/include/asm/tlb.h
+@@ -7,7 +7,7 @@
+ #define tlb_flush(tlb)                                                        \
+ {                                                                     \
+-      if (tlb->fullmm == 0)                                           \
++      if (!tlb->fullmm && !tlb->need_flush_all)                       \
+               flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
+       else                                                            \
+               flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL);   \
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *
+ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+ {
+       paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
++      /*
++       * NOTE! For PAE, any changes to the top page-directory-pointer-table
++       * entries need a full cr3 reload to flush.
++       */
++#ifdef CONFIG_X86_PAE
++      tlb->need_flush_all = 1;
++#endif
+       tlb_remove_page(tlb, virt_to_page(pmd));
+ }
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -99,7 +99,12 @@ struct mmu_gather {
+       unsigned int            need_flush : 1, /* Did free PTEs */
+                               fast_mode  : 1; /* No batching   */
+-      unsigned int            fullmm;
++      /* we are in the middle of an operation to clear
++       * a full mm and can make some optimizations */
++      unsigned int            fullmm : 1,
++      /* we have performed an operation which
++       * requires a complete flush of the tlb */
++                              need_flush_all : 1;
+       struct mmu_gather_batch *active;
+       struct mmu_gather_batch local;
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -212,6 +212,7 @@ void tlb_gather_mmu(struct mmu_gather *t
+       tlb->mm = mm;
+       tlb->fullmm     = fullmm;
++      tlb->need_flush_all = 0;
+       tlb->start      = -1UL;
+       tlb->end        = 0;
+       tlb->need_flush = 0;