]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.0-stable patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Fri, 13 Jan 2012 20:35:35 +0000 (12:35 -0800)
committerGreg Kroah-Hartman <gregkh@suse.de>
Fri, 13 Jan 2012 20:35:35 +0000 (12:35 -0800)
added patches:
mac80211-fix-rx-key-null-pointer-dereference-in-promiscuous-mode.patch
memcg-add-mem_cgroup_replace_page_cache-to-fix-lru-issue.patch
rtl8192se-fix-bug-caused-by-failure-to-check-skb-allocation.patch
x86-fix-mmap-random-address-range.patch

queue-3.0/mac80211-fix-rx-key-null-pointer-dereference-in-promiscuous-mode.patch [new file with mode: 0644]
queue-3.0/memcg-add-mem_cgroup_replace_page_cache-to-fix-lru-issue.patch [new file with mode: 0644]
queue-3.0/rtl8192se-fix-bug-caused-by-failure-to-check-skb-allocation.patch [new file with mode: 0644]
queue-3.0/series
queue-3.0/x86-fix-mmap-random-address-range.patch [new file with mode: 0644]

diff --git a/queue-3.0/mac80211-fix-rx-key-null-pointer-dereference-in-promiscuous-mode.patch b/queue-3.0/mac80211-fix-rx-key-null-pointer-dereference-in-promiscuous-mode.patch
new file mode 100644 (file)
index 0000000..12caeda
--- /dev/null
@@ -0,0 +1,49 @@
+From 1140afa862842ac3e56678693050760edc4ecde9 Mon Sep 17 00:00:00 2001
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+Date: Wed, 11 Jan 2012 09:26:54 +0100
+Subject: mac80211: fix rx->key NULL pointer dereference in promiscuous mode
+
+From: Stanislaw Gruszka <sgruszka@redhat.com>
+
+commit 1140afa862842ac3e56678693050760edc4ecde9 upstream.
+
+Since:
+
+commit 816c04fe7ef01dd9649f5ccfe796474db8708be5
+Author: Christian Lamparter <chunkeey@googlemail.com>
+Date:   Sat Apr 30 15:24:30 2011 +0200
+
+    mac80211: consolidate MIC failure report handling
+
+is possible to that we dereference rx->key == NULL when driver set
+RX_FLAG_MMIC_STRIPPED and not RX_FLAG_IV_STRIPPED and we are in
+promiscuous mode. This happen with rt73usb and rt61pci at least.
+
+Before the commit we always check rx->key against NULL, so I assume
+fix should be done in mac80211 (also mic_fail path has similar check).
+
+References:
+https://bugzilla.redhat.com/show_bug.cgi?id=769766
+http://rt2x00.serialmonkey.com/pipermail/users_rt2x00.serialmonkey.com/2012-January/004395.html
+
+Reported-by: Stuart D Gathman <stuart@gathman.org>
+Reported-by: Kai Wohlfahrt <kai.scorpio@gmail.com>
+Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/wpa.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/mac80211/wpa.c
++++ b/net/mac80211/wpa.c
+@@ -109,7 +109,7 @@ ieee80211_rx_h_michael_mic_verify(struct
+               if (status->flag & RX_FLAG_MMIC_ERROR)
+                       goto mic_fail;
+-              if (!(status->flag & RX_FLAG_IV_STRIPPED))
++              if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
+                       goto update_iv;
+               return RX_CONTINUE;
diff --git a/queue-3.0/memcg-add-mem_cgroup_replace_page_cache-to-fix-lru-issue.patch b/queue-3.0/memcg-add-mem_cgroup_replace_page_cache-to-fix-lru-issue.patch
new file mode 100644 (file)
index 0000000..7d532ea
--- /dev/null
@@ -0,0 +1,172 @@
+From ab936cbcd02072a34b60d268f94440fd5cf1970b Mon Sep 17 00:00:00 2001
+From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Date: Thu, 12 Jan 2012 17:17:44 -0800
+Subject: memcg: add mem_cgroup_replace_page_cache() to fix LRU issue
+
+From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+
+commit ab936cbcd02072a34b60d268f94440fd5cf1970b upstream.
+
+Commit ef6a3c6311 ("mm: add replace_page_cache_page() function") added a
+function replace_page_cache_page().  This function replaces a page in the
+radix-tree with a new page.  WHen doing this, memory cgroup needs to fix
+up the accounting information.  memcg need to check PCG_USED bit etc.
+
+In some(many?) cases, 'newpage' is on LRU before calling
+replace_page_cache().  So, memcg's LRU accounting information should be
+fixed, too.
+
+This patch adds mem_cgroup_replace_page_cache() and removes the old hooks.
+ In that function, old pages will be unaccounted without touching
+res_counter and new page will be accounted to the memcg (of old page).
+WHen overwriting pc->mem_cgroup of newpage, take zone->lru_lock and avoid
+races with LRU handling.
+
+Background:
+  replace_page_cache_page() is called by FUSE code in its splice() handling.
+  Here, 'newpage' is replacing oldpage but this newpage is not a newly allocated
+  page and may be on LRU. LRU mis-accounting will be critical for memory cgroup
+  because rmdir() checks the whole LRU is empty and there is no account leak.
+  If a page is on the other LRU than it should be, rmdir() will fail.
+
+This bug was added in March 2011, but no bug report yet.  I guess there
+are not many people who use memcg and FUSE at the same time with upstream
+kernels.
+
+The result of this bug is that admin cannot destroy a memcg because of
+account leak.  So, no panic, no deadlock.  And, even if an active cgroup
+exist, umount can succseed.  So no problem at shutdown.
+
+Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Michal Hocko <mhocko@suse.cz>
+Cc: Miklos Szeredi <mszeredi@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/memcontrol.h |    6 ++++++
+ mm/filemap.c               |   18 ++----------------
+ mm/memcontrol.c            |   44 ++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 52 insertions(+), 16 deletions(-)
+
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -119,6 +119,8 @@ struct zone_reclaim_stat*
+ mem_cgroup_get_reclaim_stat_from_page(struct page *page);
+ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+                                       struct task_struct *p);
++extern void mem_cgroup_replace_page_cache(struct page *oldpage,
++                                      struct page *newpage);
+ #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+ extern int do_swap_account;
+@@ -370,6 +372,10 @@ static inline
+ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
+ {
+ }
++static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
++                              struct page *newpage)
++{
++}
+ #endif /* CONFIG_CGROUP_MEM_CONT */
+ #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -396,24 +396,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_ran
+ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ {
+       int error;
+-      struct mem_cgroup *memcg = NULL;
+       VM_BUG_ON(!PageLocked(old));
+       VM_BUG_ON(!PageLocked(new));
+       VM_BUG_ON(new->mapping);
+-      /*
+-       * This is not page migration, but prepare_migration and
+-       * end_migration does enough work for charge replacement.
+-       *
+-       * In the longer term we probably want a specialized function
+-       * for moving the charge from old to new in a more efficient
+-       * manner.
+-       */
+-      error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
+-      if (error)
+-              return error;
+-
+       error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+       if (!error) {
+               struct address_space *mapping = old->mapping;
+@@ -435,13 +422,12 @@ int replace_page_cache_page(struct page
+               if (PageSwapBacked(new))
+                       __inc_zone_page_state(new, NR_SHMEM);
+               spin_unlock_irq(&mapping->tree_lock);
++              /* mem_cgroup codes must not be called under tree_lock */
++              mem_cgroup_replace_page_cache(old, new);
+               radix_tree_preload_end();
+               if (freepage)
+                       freepage(old);
+               page_cache_release(old);
+-              mem_cgroup_end_migration(memcg, old, new, true);
+-      } else {
+-              mem_cgroup_end_migration(memcg, old, new, false);
+       }
+       return error;
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3422,6 +3422,50 @@ int mem_cgroup_shmem_charge_fallback(str
+       return ret;
+ }
++/*
++ * At replace page cache, newpage is not under any memcg but it's on
++ * LRU. So, this function doesn't touch res_counter but handles LRU
++ * in correct way. Both pages are locked so we cannot race with uncharge.
++ */
++void mem_cgroup_replace_page_cache(struct page *oldpage,
++                                struct page *newpage)
++{
++      struct mem_cgroup *memcg;
++      struct page_cgroup *pc;
++      struct zone *zone;
++      enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
++      unsigned long flags;
++
++      if (mem_cgroup_disabled())
++              return;
++
++      pc = lookup_page_cgroup(oldpage);
++      /* fix accounting on old pages */
++      lock_page_cgroup(pc);
++      memcg = pc->mem_cgroup;
++      mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
++      ClearPageCgroupUsed(pc);
++      unlock_page_cgroup(pc);
++
++      if (PageSwapBacked(oldpage))
++              type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
++
++      zone = page_zone(newpage);
++      pc = lookup_page_cgroup(newpage);
++      /*
++       * Even if newpage->mapping was NULL before starting replacement,
++       * the newpage may be on LRU(or pagevec for LRU) already. We lock
++       * LRU while we overwrite pc->mem_cgroup.
++       */
++      spin_lock_irqsave(&zone->lru_lock, flags);
++      if (PageLRU(newpage))
++              del_page_from_lru_list(zone, newpage, page_lru(newpage));
++      __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
++      if (PageLRU(newpage))
++              add_page_to_lru_list(zone, newpage, page_lru(newpage));
++      spin_unlock_irqrestore(&zone->lru_lock, flags);
++}
++
+ #ifdef CONFIG_DEBUG_VM
+ static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
+ {
diff --git a/queue-3.0/rtl8192se-fix-bug-caused-by-failure-to-check-skb-allocation.patch b/queue-3.0/rtl8192se-fix-bug-caused-by-failure-to-check-skb-allocation.patch
new file mode 100644 (file)
index 0000000..ae059bb
--- /dev/null
@@ -0,0 +1,41 @@
+From d90db4b12bc1b9b8a787ef28550fdb767ee25a49 Mon Sep 17 00:00:00 2001
+From: Larry Finger <Larry.Finger@lwfinger.net>
+Date: Wed, 4 Jan 2012 20:50:47 -0600
+Subject: rtl8192se: Fix BUG caused by failure to check skb allocation
+
+From: Larry Finger <Larry.Finger@lwfinger.net>
+
+commit d90db4b12bc1b9b8a787ef28550fdb767ee25a49 upstream.
+
+When downloading firmware into the device, the driver fails to check the
+return when allocating an skb. When the allocation fails, a BUG can be
+generated, as seen in https://bugzilla.redhat.com/show_bug.cgi?id=771656.
+
+Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/rtlwifi/rtl8192se/fw.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+@@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcod
+               /* Allocate skb buffer to contain firmware */
+               /* info and tx descriptor info. */
+               skb = dev_alloc_skb(frag_length);
++              if (!skb)
++                      return false;
+               skb_reserve(skb, extra_descoffset);
+               seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
+                                       extra_descoffset));
+@@ -575,6 +577,8 @@ static bool _rtl92s_firmware_set_h2c_cmd
+       len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
+       skb = dev_alloc_skb(len);
++      if (!skb)
++              return false;
+       cb_desc = (struct rtl_tcb_desc *)(skb->cb);
+       cb_desc->queue_index = TXCMD_QUEUE;
+       cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
index 7ec16ec3c10dec864ca0a5eff8f2bdae660cefd1..471db1b54ca7396e530589a45799c43fd642703d 100644 (file)
@@ -28,3 +28,7 @@ xen-xenbus-reject-replies-with-payload-xenstore_payload_max.patch
 ima-free-duplicate-measurement-memory.patch
 ima-fix-invalid-memory-reference.patch
 pnp-work-around-dell-1536-1546-bios-mmconfig-bug-that-breaks-usb.patch
+rtl8192se-fix-bug-caused-by-failure-to-check-skb-allocation.patch
+mac80211-fix-rx-key-null-pointer-dereference-in-promiscuous-mode.patch
+memcg-add-mem_cgroup_replace_page_cache-to-fix-lru-issue.patch
+x86-fix-mmap-random-address-range.patch
diff --git a/queue-3.0/x86-fix-mmap-random-address-range.patch b/queue-3.0/x86-fix-mmap-random-address-range.patch
new file mode 100644 (file)
index 0000000..1dc6429
--- /dev/null
@@ -0,0 +1,45 @@
+From 9af0c7a6fa860698d080481f24a342ba74b68982 Mon Sep 17 00:00:00 2001
+From: Ludwig Nussel <ludwig.nussel@suse.de>
+Date: Tue, 15 Nov 2011 14:46:46 -0800
+Subject: x86: Fix mmap random address range
+
+From: Ludwig Nussel <ludwig.nussel@suse.de>
+
+commit 9af0c7a6fa860698d080481f24a342ba74b68982 upstream.
+
+On x86_32 casting the unsigned int result of get_random_int() to
+long may result in a negative value.  On x86_32 the range of
+mmap_rnd() therefore was -255 to 255.  The 32bit mode on x86_64
+used 0 to 255 as intended.
+
+The bug was introduced by 675a081 ("x86: unify mmap_{32|64}.c")
+in January 2008.
+
+Signed-off-by: Ludwig Nussel <ludwig.nussel@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: harvey.harrison@gmail.com
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Harvey Harrison <harvey.harrison@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Link: http://lkml.kernel.org/r/201111152246.pAFMklOB028527@wpaz5.hot.corp.google.com
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/mmap.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -87,9 +87,9 @@ static unsigned long mmap_rnd(void)
+       */
+       if (current->flags & PF_RANDOMIZE) {
+               if (mmap_is_ia32())
+-                      rnd = (long)get_random_int() % (1<<8);
++                      rnd = get_random_int() % (1<<8);
+               else
+-                      rnd = (long)(get_random_int() % (1<<28));
++                      rnd = get_random_int() % (1<<28);
+       }
+       return rnd << PAGE_SHIFT;
+ }