]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Open next round of queue-2.6.19
authorChris Wright <chrisw@sous-sol.org>
Mon, 11 Dec 2006 22:22:55 +0000 (14:22 -0800)
committerChris Wright <chrisw@sous-sol.org>
Mon, 11 Dec 2006 22:22:55 +0000 (14:22 -0800)
queue-2.6.19/dm-crypt-select-crypto_cbc.patch [new file with mode: 0644]
queue-2.6.19/ieee80211softmac-fix-mutex_lock-at-exit-of-ieee80211_softmac_get_genie.patch [new file with mode: 0644]
queue-2.6.19/read_zero_pagealigned-locking-fix.patch [new file with mode: 0644]
queue-2.6.19/series [new file with mode: 0644]
queue-2.6.19/sha512-fix-sha384-block-size.patch [new file with mode: 0644]
queue-2.6.19/x86-64-mark-rdtsc-as-sync-only-for-netburst-not-for-core2.patch [new file with mode: 0644]

diff --git a/queue-2.6.19/dm-crypt-select-crypto_cbc.patch b/queue-2.6.19/dm-crypt-select-crypto_cbc.patch
new file mode 100644 (file)
index 0000000..f432f37
--- /dev/null
@@ -0,0 +1,27 @@
+From stable-bounces@linux.kernel.org  Sat Dec  9 14:56:35 2006
+Date: Sun, 10 Dec 2006 09:50:36 +1100
+Message-ID: <20061209225035.GA12802@gondor.apana.org.au>
+From: Herbert Xu <herbert@gondor.apana.org.au>
+To: Rene Herman <rene.herman@gmail.com>
+Cc: torvalds@osdl.org, "David S. Miller" <davem@davemloft.net>, stable@kernel.org
+Subject: dm-crypt: Select CRYPTO_CBC
+
+As CBC is the default chaining method for cryptoloop, we should select
+it from cryptoloop to ease the transition.  Spotted by Rene Herman.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/Kconfig |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- linux-2.6.19.1.orig/drivers/md/Kconfig
++++ linux-2.6.19.1/drivers/md/Kconfig
+@@ -215,6 +215,7 @@ config DM_CRYPT
+       tristate "Crypt target support"
+       depends on BLK_DEV_DM && EXPERIMENTAL
+       select CRYPTO
++      select CRYPTO_CBC
+       ---help---
+         This device-mapper target allows you to create a device that
+         transparently encrypts the data on it. You'll need to activate
diff --git a/queue-2.6.19/ieee80211softmac-fix-mutex_lock-at-exit-of-ieee80211_softmac_get_genie.patch b/queue-2.6.19/ieee80211softmac-fix-mutex_lock-at-exit-of-ieee80211_softmac_get_genie.patch
new file mode 100644 (file)
index 0000000..4fab8db
--- /dev/null
@@ -0,0 +1,32 @@
+From stable-bounces@linux.kernel.org  Sun Dec 10 09:45:56 2006
+From: Michael Buesch <mb@bu3sch.de>
+To: stable@kernel.org
+Date: Sun, 10 Dec 2006 18:39:28 +0100
+Message-Id: <200612101839.28687.mb@bu3sch.de>
+Cc: Andrew Morton <akpm@osdl.org>, Johannes Berg <johannes@sipsolutions.net>, "John W. Linville" <linville@tuxdriver.com>, dsd@gentoo.org
+Subject: ieee80211softmac: Fix mutex_lock at exit of ieee80211_softmac_get_genie
+
+From: Ulrich Kunitz <kune@deine-taler.de>
+
+ieee80211softmac_wx_get_genie locks the associnfo mutex at
+function exit. This patch fixes it. The patch is against Linus'
+tree (commit af1713e0).
+
+Signed-off-by: Ulrich Kunitz <kune@deine-taler.de>
+Signed-off-by: Michael Buesch <mb@bu3sch.de>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ net/ieee80211/softmac/ieee80211softmac_wx.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- linux-2.6.19.1.orig/net/ieee80211/softmac/ieee80211softmac_wx.c
++++ linux-2.6.19.1/net/ieee80211/softmac/ieee80211softmac_wx.c
+@@ -463,7 +463,7 @@ ieee80211softmac_wx_get_genie(struct net
+                       err = -E2BIG;
+       }
+       spin_unlock_irqrestore(&mac->lock, flags);
+-      mutex_lock(&mac->associnfo.mutex);
++      mutex_unlock(&mac->associnfo.mutex);
+       return err;
+ }
diff --git a/queue-2.6.19/read_zero_pagealigned-locking-fix.patch b/queue-2.6.19/read_zero_pagealigned-locking-fix.patch
new file mode 100644 (file)
index 0000000..c09a43f
--- /dev/null
@@ -0,0 +1,149 @@
+From stable-bounces@linux.kernel.org  Sun Dec 10 02:24:42 2006
+Message-Id: <200612101018.kBAAIiFj021055@shell0.pdx.osdl.net>
+From: akpm@osdl.org
+To: torvalds@osdl.org
+Date: Sun, 10 Dec 2006 02:18:43 -0800
+Cc: akpm@osdl.org, hugh@veritas.com, Ramiro.Voicu@cern.ch, stable@kernel.org
+Subject: read_zero_pagealigned() locking fix
+
+From: Hugh Dickins <hugh@veritas.com>
+
+Ramiro Voicu hits the BUG_ON(!pte_none(*pte)) in zeromap_pte_range: kernel
+bugzilla 7645.  Right: read_zero_pagealigned uses down_read of mmap_sem,
+but another thread's racing read of /dev/zero, or a normal fault, can
+easily set that pte again, in between zap_page_range and zeromap_page_range
+getting there.  It's been wrong ever since 2.4.3.
+
+The simple fix is to use down_write instead, but that would serialize reads
+of /dev/zero more than at present: perhaps some app would be badly
+affected.  So instead let zeromap_page_range return the error instead of
+BUG_ON, and read_zero_pagealigned break to the slower clear_user loop in
+that case - there's no need to optimize for it.
+
+Use -EEXIST for when a pte is found: BUG_ON in mmap_zero (the other user of
+zeromap_page_range), though it really isn't interesting there.  And since
+mmap_zero wants -EAGAIN for out-of-memory, the zeromaps better return that
+than -ENOMEM.
+
+Signed-off-by: Hugh Dickins <hugh@veritas.com>
+Cc: Ramiro Voicu: <Ramiro.Voicu@cern.ch>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@osdl.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+
+ drivers/char/mem.c |   12 ++++++++----
+ mm/memory.c        |   32 +++++++++++++++++++++-----------
+ 2 files changed, 29 insertions(+), 15 deletions(-)
+
+--- linux-2.6.19.1.orig/drivers/char/mem.c
++++ linux-2.6.19.1/drivers/char/mem.c
+@@ -646,7 +646,8 @@ static inline size_t read_zero_pagealign
+                       count = size;
+               zap_page_range(vma, addr, count, NULL);
+-              zeromap_page_range(vma, addr, count, PAGE_COPY);
++              if (zeromap_page_range(vma, addr, count, PAGE_COPY))
++                      break;
+               size -= count;
+               buf += count;
+@@ -713,11 +714,14 @@ out:
+ static int mmap_zero(struct file * file, struct vm_area_struct * vma)
+ {
++      int err;
++
+       if (vma->vm_flags & VM_SHARED)
+               return shmem_zero_setup(vma);
+-      if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
+-              return -EAGAIN;
+-      return 0;
++      err = zeromap_page_range(vma, vma->vm_start,
++                      vma->vm_end - vma->vm_start, vma->vm_page_prot);
++      BUG_ON(err == -EEXIST);
++      return err;
+ }
+ #else /* CONFIG_MMU */
+ static ssize_t read_zero(struct file * file, char * buf, 
+--- linux-2.6.19.1.orig/mm/memory.c
++++ linux-2.6.19.1/mm/memory.c
+@@ -1110,23 +1110,29 @@ static int zeromap_pte_range(struct mm_s
+ {
+       pte_t *pte;
+       spinlock_t *ptl;
++      int err = 0;
+       pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+       if (!pte)
+-              return -ENOMEM;
++              return -EAGAIN;
+       arch_enter_lazy_mmu_mode();
+       do {
+               struct page *page = ZERO_PAGE(addr);
+               pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
++
++              if (unlikely(!pte_none(*pte))) {
++                      err = -EEXIST;
++                      pte++;
++                      break;
++              }
+               page_cache_get(page);
+               page_add_file_rmap(page);
+               inc_mm_counter(mm, file_rss);
+-              BUG_ON(!pte_none(*pte));
+               set_pte_at(mm, addr, pte, zero_pte);
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+       arch_leave_lazy_mmu_mode();
+       pte_unmap_unlock(pte - 1, ptl);
+-      return 0;
++      return err;
+ }
+ static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -1134,16 +1140,18 @@ static inline int zeromap_pmd_range(stru
+ {
+       pmd_t *pmd;
+       unsigned long next;
++      int err;
+       pmd = pmd_alloc(mm, pud, addr);
+       if (!pmd)
+-              return -ENOMEM;
++              return -EAGAIN;
+       do {
+               next = pmd_addr_end(addr, end);
+-              if (zeromap_pte_range(mm, pmd, addr, next, prot))
+-                      return -ENOMEM;
++              err = zeromap_pte_range(mm, pmd, addr, next, prot);
++              if (err)
++                      break;
+       } while (pmd++, addr = next, addr != end);
+-      return 0;
++      return err;
+ }
+ static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -1151,16 +1159,18 @@ static inline int zeromap_pud_range(stru
+ {
+       pud_t *pud;
+       unsigned long next;
++      int err;
+       pud = pud_alloc(mm, pgd, addr);
+       if (!pud)
+-              return -ENOMEM;
++              return -EAGAIN;
+       do {
+               next = pud_addr_end(addr, end);
+-              if (zeromap_pmd_range(mm, pud, addr, next, prot))
+-                      return -ENOMEM;
++              err = zeromap_pmd_range(mm, pud, addr, next, prot);
++              if (err)
++                      break;
+       } while (pud++, addr = next, addr != end);
+-      return 0;
++      return err;
+ }
+ int zeromap_page_range(struct vm_area_struct *vma,
diff --git a/queue-2.6.19/series b/queue-2.6.19/series
new file mode 100644 (file)
index 0000000..6144ce0
--- /dev/null
@@ -0,0 +1,5 @@
+dm-crypt-select-crypto_cbc.patch
+sha512-fix-sha384-block-size.patch
+read_zero_pagealigned-locking-fix.patch
+ieee80211softmac-fix-mutex_lock-at-exit-of-ieee80211_softmac_get_genie.patch
+x86-64-mark-rdtsc-as-sync-only-for-netburst-not-for-core2.patch
diff --git a/queue-2.6.19/sha512-fix-sha384-block-size.patch b/queue-2.6.19/sha512-fix-sha384-block-size.patch
new file mode 100644 (file)
index 0000000..947c6f5
--- /dev/null
@@ -0,0 +1,30 @@
+From stable-bounces@linux.kernel.org  Sat Dec  9 16:37:52 2006
+Date: Sun, 10 Dec 2006 11:32:06 +1100
+Message-ID: <20061210003206.GA14068@gondor.apana.org.au>
+From: Herbert Xu <herbert@gondor.apana.org.au>
+To: stable@kernel.org
+Subject: sha512: Fix sha384 block size
+
+The SHA384 block size should be 128 bytes, not 96 bytes.  This was
+spotted by Andrew Donofrio.
+
+This breaks HMAC which uses the block size during setup and the final
+calculation.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ crypto/sha512.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- linux-2.6.19.1.orig/crypto/sha512.c
++++ linux-2.6.19.1/crypto/sha512.c
+@@ -24,7 +24,7 @@
+ #define SHA384_DIGEST_SIZE 48
+ #define SHA512_DIGEST_SIZE 64
+-#define SHA384_HMAC_BLOCK_SIZE  96
++#define SHA384_HMAC_BLOCK_SIZE 128
+ #define SHA512_HMAC_BLOCK_SIZE 128
+ struct sha512_ctx {
diff --git a/queue-2.6.19/x86-64-mark-rdtsc-as-sync-only-for-netburst-not-for-core2.patch b/queue-2.6.19/x86-64-mark-rdtsc-as-sync-only-for-netburst-not-for-core2.patch
new file mode 100644 (file)
index 0000000..38f0e08
--- /dev/null
@@ -0,0 +1,43 @@
+From stable-bounces@linux.kernel.org  Mon Dec 11 13:26:59 2006
+From: Arjan van de Ven <arjan@linux.intel.com>
+To: stable@kernel.org
+Date: Mon, 11 Dec 2006 21:45:01 +0100
+Message-Id: <1165869901.27217.439.camel@laptopd505.fenrus.org>
+Subject: x86-64: Mark rdtsc as sync only for netburst, not for core2
+
+On the Core2 cpus, the rdtsc instruction is not serializing (as defined
+in the architecture reference since rdtsc exists) and due to the deep
+speculation of these cores, it's possible that you can observe time go
+backwards between cores due to this speculation. Since the kernel
+already deals with this with the SYNC_RDTSC flag, the solution is
+simple, only assume that the instruction is serializing on family 15...
+
+The price one pays for this is a slightly slower gettimeofday (by a
+dozen or two cycles), but that increase is quite small to pay for a
+really-going-forward tsc counter.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+Signed-off-by: Andi Kleen <ak@suse.de>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+Commit:     f3d73707a1e84f0687a05144b70b660441e999c7
+Author:     Arjan van de Ven <arjan@linux.intel.com>
+AuthorDate: Thu Dec 7 02:14:12 2006 +0100
+
+ arch/x86_64/kernel/setup.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- linux-2.6.19.1.orig/arch/x86_64/kernel/setup.c
++++ linux-2.6.19.1/arch/x86_64/kernel/setup.c
+@@ -854,7 +854,10 @@ static void __cpuinit init_intel(struct 
+               set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+       if (c->x86 == 6)
+               set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
+-      set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++      if (c->x86 == 15)
++              set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++      else
++              clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+       c->x86_max_cores = intel_num_cpu_cores(c);
+       srat_detect_node();