]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
add more patches for 2.6.25 queue
authorChris Wright <chrisw@sous-sol.org>
Fri, 6 Jun 2008 23:58:56 +0000 (16:58 -0700)
committerChris Wright <chrisw@sous-sol.org>
Fri, 6 Jun 2008 23:58:56 +0000 (16:58 -0700)
12 files changed:
queue-2.6.25/atl1-fix-4g-memory-corruption-bug.patch [new file with mode: 0644]
queue-2.6.25/brk-make-sys_brk-honor-compat_brk-when-computing-lower-bound.patch [new file with mode: 0644]
queue-2.6.25/capabilities-remain-source-compatible-with-32-bit-raw-legacy-capability-support.patch [new file with mode: 0644]
queue-2.6.25/cpufreq-fix-null-object-access-on-transmeta-cpu.patch [new file with mode: 0644]
queue-2.6.25/ecryptfs-remove-unnecessary-page-decrypt-call.patch [new file with mode: 0644]
queue-2.6.25/md-do-not-compute-parity-unless-it-is-on-a-failed-drive.patch [new file with mode: 0644]
queue-2.6.25/md-fix-prexor-vs-sync_request-race.patch [new file with mode: 0644]
queue-2.6.25/md-fix-uninitialized-use-of-mddev-recovery_wait.patch [new file with mode: 0644]
queue-2.6.25/pagemap-fix-bug-in-add_to_pagemap-require-aligned-length-reads-of-proc-pid-pagemap.patch [new file with mode: 0644]
queue-2.6.25/proc-calculate-the-correct-proc-pid-link-count.patch [new file with mode: 0644]
queue-2.6.25/series
queue-2.6.25/smack-fuse-mount-hang-fix.patch [new file with mode: 0644]

diff --git a/queue-2.6.25/atl1-fix-4g-memory-corruption-bug.patch b/queue-2.6.25/atl1-fix-4g-memory-corruption-bug.patch
new file mode 100644 (file)
index 0000000..53e032e
--- /dev/null
@@ -0,0 +1,37 @@
+From 574ea5a1be6579bed4f1429aba19c5749324658e Mon Sep 17 00:00:00 2001
+From: Jay Cliburn <jacliburn@bellsouth.net>
+Date: Thu, 5 Jun 2008 18:44:59 -0500
+Message-ID: <20080606124723.3511d0a7@osprey.hogchain.net>
+Subject: atl1: fix 4G memory corruption bug
+
+From: Alexey Dobriyan <adobriyan@gmail.com>
+
+upstream commit: aefdbf1a3b832a580a50cf3d1dcbb717be7cbdbe
+
+When using 4+ GB RAM and SWIOTLB is active, the driver corrupts
+memory by writing an skb after the relevant DMA page has been
+unmapped.  Although this doesn't happen when *not* using bounce
+buffers, clearing the pointer to the DMA page after unmapping
+it fixes the problem.
+
+http://marc.info/?t=120861317000005&r=2&w=2
+
+Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
+Signed-off-by: Jay Cliburn <jacliburn@bellsouth.net>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+[jacliburn@bellsouth.net: backport to 2.6.25.4]
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/net/atl1/atl1_main.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/atl1/atl1_main.c
++++ b/drivers/net/atl1/atl1_main.c
+@@ -1334,6 +1334,7 @@ rrd_ok:
+               /* Good Receive */
+               pci_unmap_page(adapter->pdev, buffer_info->dma,
+                              buffer_info->length, PCI_DMA_FROMDEVICE);
++              buffer_info->dma = 0;
+               skb = buffer_info->skb;
+               length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
diff --git a/queue-2.6.25/brk-make-sys_brk-honor-compat_brk-when-computing-lower-bound.patch b/queue-2.6.25/brk-make-sys_brk-honor-compat_brk-when-computing-lower-bound.patch
new file mode 100644 (file)
index 0000000..d22a754
--- /dev/null
@@ -0,0 +1,55 @@
+From stable-bounces@linux.kernel.org  Fri Jun  6 12:14:58 2008
+Date: Fri, 6 Jun 2008 18:43:23 GMT
+Message-Id: <200806061843.m56IhN6T015189@hera.kernel.org>
+From: jejb@kernel.org
+To: jejb@kernel.org, stable@kernel.org
+Subject: brk: make sys_brk() honor COMPAT_BRK when computing lower bound
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+upstream commit: a5b4592cf77b973c29e7c9695873a26052b58951
+
+Fix a regression introduced by
+
+commit 4cc6028d4040f95cdb590a87db478b42b8be0508
+Author: Jiri Kosina <jkosina@suse.cz>
+Date:   Wed Feb 6 22:39:44 2008 +0100
+
+    brk: check the lower bound properly
+
+The check in sys_brk() on minimum value the brk might have must take
+CONFIG_COMPAT_BRK setting into account.  When this option is turned on
+(i.e.  we support ancient legacy binaries, e.g.  libc5-linked stuff), the
+lower bound on brk value is mm->end_code, otherwise the brk start is
+allowed to be arbitrarily shifted.
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ mm/mmap.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -242,10 +242,16 @@ asmlinkage unsigned long sys_brk(unsigne
+       unsigned long rlim, retval;
+       unsigned long newbrk, oldbrk;
+       struct mm_struct *mm = current->mm;
++      unsigned long min_brk;
+       down_write(&mm->mmap_sem);
+-      if (brk < mm->start_brk)
++#ifdef CONFIG_COMPAT_BRK
++      min_brk = mm->end_code;
++#else
++      min_brk = mm->start_brk;
++#endif
++      if (brk < min_brk)
+               goto out;
+       /*
diff --git a/queue-2.6.25/capabilities-remain-source-compatible-with-32-bit-raw-legacy-capability-support.patch b/queue-2.6.25/capabilities-remain-source-compatible-with-32-bit-raw-legacy-capability-support.patch
new file mode 100644 (file)
index 0000000..85a0e5c
--- /dev/null
@@ -0,0 +1,306 @@
+From stable-bounces@linux.kernel.org  Fri Jun  6 12:17:34 2008
+Date: Fri, 6 Jun 2008 18:44:08 GMT
+Message-Id: <200806061844.m56Ii8Dw015470@hera.kernel.org>
+From: jejb@kernel.org
+To: jejb@kernel.org, stable@kernel.org
+Subject: capabilities: remain source compatible with 32-bit raw legacy capability support.
+
+From: Andrew G. Morgan <morgan@kernel.org>
+
+upstream commit: ca05a99a54db1db5bca72eccb5866d2a86f8517f
+
+Source code out there hard-codes a notion of what the
+_LINUX_CAPABILITY_VERSION #define means in terms of the semantics of the
+raw capability system calls capget() and capset().  Its unfortunate, but
+true.
+
+Since the confusing header file has been in a released kernel, there is
+software that is erroneously using 64-bit capabilities with the semantics
+of 32-bit compatibilities.  These recently compiled programs may suffer
+corruption of their memory when sys_getcap() overwrites more memory than
+they are coded to expect, and the raising of added capabilities when using
+sys_capset().
+
+As such, this patch does a number of things to clean up the situation
+for all. It
+
+  1. forces the _LINUX_CAPABILITY_VERSION define to always retain its
+     legacy value.
+
+  2. adopts a new #define strategy for the kernel's internal
+     implementation of the preferred magic.
+
+  3. deprecates v2 capability magic in favor of a new (v3) magic
+     number. The functionality of v3 is entirely equivalent to v2,
+     the only difference being that the v2 magic causes the kernel
+     to log a "deprecated" warning so the admin can find applications
+     that may be using v2 inappropriately.
+
+[User space code continues to be encouraged to use the libcap API which
+protects the application from details like this.  libcap-2.10 is the first
+to support v3 capabilities.]
+
+Fixes issue reported in https://bugzilla.redhat.com/show_bug.cgi?id=447518.
+Thanks to Bojan Smojver for the report.
+
+[akpm@linux-foundation.org: s/depreciate/deprecate/g]
+[akpm@linux-foundation.org: be robust about put_user size]
+[akpm@linux-foundation.org: coding-style fixes]
+Signed-off-by: Andrew G. Morgan <morgan@kernel.org>
+Cc: Serge E. Hallyn <serue@us.ibm.com>
+Cc: Bojan Smojver <bojan@rexursive.com>
+Cc: stable@kernel.org
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ fs/proc/array.c            |    2 
+ include/linux/capability.h |   29 ++++++++---
+ kernel/capability.c        |  111 +++++++++++++++++++++++++++++----------------
+ 3 files changed, 95 insertions(+), 47 deletions(-)
+
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -287,7 +287,7 @@ static void render_cap_t(struct seq_file
+       seq_printf(m, "%s", header);
+       CAP_FOR_EACH_U32(__capi) {
+               seq_printf(m, "%08x",
+-                         a->cap[(_LINUX_CAPABILITY_U32S-1) - __capi]);
++                         a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
+       }
+       seq_printf(m, "\n");
+ }
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -31,11 +31,11 @@ struct task_struct;
+ #define _LINUX_CAPABILITY_VERSION_1  0x19980330
+ #define _LINUX_CAPABILITY_U32S_1     1
+-#define _LINUX_CAPABILITY_VERSION_2  0x20071026
++#define _LINUX_CAPABILITY_VERSION_2  0x20071026  /* deprecated - use v3 */
+ #define _LINUX_CAPABILITY_U32S_2     2
+-#define _LINUX_CAPABILITY_VERSION    _LINUX_CAPABILITY_VERSION_2
+-#define _LINUX_CAPABILITY_U32S       _LINUX_CAPABILITY_U32S_2
++#define _LINUX_CAPABILITY_VERSION_3  0x20080522
++#define _LINUX_CAPABILITY_U32S_3     2
+ typedef struct __user_cap_header_struct {
+       __u32 version;
+@@ -77,10 +77,23 @@ struct vfs_cap_data {
+       } data[VFS_CAP_U32];
+ };
+-#ifdef __KERNEL__
++#ifndef __KERNEL__
++
++/*
++ * Backwardly compatible definition for source code - trapped in a
++ * 32-bit world. If you find you need this, please consider using
++ * libcap to untrap yourself...
++ */
++#define _LINUX_CAPABILITY_VERSION  _LINUX_CAPABILITY_VERSION_1
++#define _LINUX_CAPABILITY_U32S     _LINUX_CAPABILITY_U32S_1
++
++#else
++
++#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
++#define _KERNEL_CAPABILITY_U32S    _LINUX_CAPABILITY_U32S_3
+ typedef struct kernel_cap_struct {
+-      __u32 cap[_LINUX_CAPABILITY_U32S];
++      __u32 cap[_KERNEL_CAPABILITY_U32S];
+ } kernel_cap_t;
+ #define _USER_CAP_HEADER_SIZE  (sizeof(struct __user_cap_header_struct))
+@@ -350,7 +363,7 @@ typedef struct kernel_cap_struct {
+  */
+ #define CAP_FOR_EACH_U32(__capi)  \
+-      for (__capi = 0; __capi < _LINUX_CAPABILITY_U32S; ++__capi)
++      for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
+ # define CAP_FS_MASK_B0     (CAP_TO_MASK(CAP_CHOWN)           \
+                           | CAP_TO_MASK(CAP_DAC_OVERRIDE)     \
+@@ -360,7 +373,7 @@ typedef struct kernel_cap_struct {
+ # define CAP_FS_MASK_B1     (CAP_TO_MASK(CAP_MAC_OVERRIDE))
+-#if _LINUX_CAPABILITY_U32S != 2
++#if _KERNEL_CAPABILITY_U32S != 2
+ # error Fix up hand-coded capability macro initializers
+ #else /* HAND-CODED capability initializers */
+@@ -371,7 +384,7 @@ typedef struct kernel_cap_struct {
+ # define CAP_NFSD_SET     {{ CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE), \
+                            CAP_FS_MASK_B1 } }
+-#endif /* _LINUX_CAPABILITY_U32S != 2 */
++#endif /* _KERNEL_CAPABILITY_U32S != 2 */
+ #define CAP_INIT_INH_SET    CAP_EMPTY_SET
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -53,6 +53,69 @@ static void warn_legacy_capability_use(v
+ }
+ /*
++ * Version 2 capabilities worked fine, but the linux/capability.h file
++ * that accompanied their introduction encouraged their use without
++ * the necessary user-space source code changes. As such, we have
++ * created a version 3 with equivalent functionality to version 2, but
++ * with a header change to protect legacy source code from using
++ * version 2 when it wanted to use version 1. If your system has code
++ * that trips the following warning, it is using version 2 specific
++ * capabilities and may be doing so insecurely.
++ *
++ * The remedy is to either upgrade your version of libcap (to 2.10+,
++ * if the application is linked against it), or recompile your
++ * application with modern kernel headers and this warning will go
++ * away.
++ */
++
++static void warn_deprecated_v2(void)
++{
++      static int warned;
++
++      if (!warned) {
++              char name[sizeof(current->comm)];
++
++              printk(KERN_INFO "warning: `%s' uses deprecated v2"
++                     " capabilities in a way that may be insecure.\n",
++                     get_task_comm(name, current));
++              warned = 1;
++      }
++}
++
++/*
++ * Version check. Return the number of u32s in each capability flag
++ * array, or a negative value on error.
++ */
++static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
++{
++      __u32 version;
++
++      if (get_user(version, &header->version))
++              return -EFAULT;
++
++      switch (version) {
++      case _LINUX_CAPABILITY_VERSION_1:
++              warn_legacy_capability_use();
++              *tocopy = _LINUX_CAPABILITY_U32S_1;
++              break;
++      case _LINUX_CAPABILITY_VERSION_2:
++              warn_deprecated_v2();
++              /*
++               * fall through - v3 is otherwise equivalent to v2.
++               */
++      case _LINUX_CAPABILITY_VERSION_3:
++              *tocopy = _LINUX_CAPABILITY_U32S_3;
++              break;
++      default:
++              if (put_user((u32)_KERNEL_CAPABILITY_VERSION, &header->version))
++                      return -EFAULT;
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/*
+  * For sys_getproccap() and sys_setproccap(), any of the three
+  * capability set pointers may be NULL -- indicating that that set is
+  * uninteresting and/or not to be changed.
+@@ -71,27 +134,13 @@ asmlinkage long sys_capget(cap_user_head
+ {
+       int ret = 0;
+       pid_t pid;
+-      __u32 version;
+       struct task_struct *target;
+       unsigned tocopy;
+       kernel_cap_t pE, pI, pP;
+-      if (get_user(version, &header->version))
+-              return -EFAULT;
+-
+-      switch (version) {
+-      case _LINUX_CAPABILITY_VERSION_1:
+-              warn_legacy_capability_use();
+-              tocopy = _LINUX_CAPABILITY_U32S_1;
+-              break;
+-      case _LINUX_CAPABILITY_VERSION_2:
+-              tocopy = _LINUX_CAPABILITY_U32S_2;
+-              break;
+-      default:
+-              if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
+-                      return -EFAULT;
+-              return -EINVAL;
+-      }
++      ret = cap_validate_magic(header, &tocopy);
++      if (ret != 0)
++              return ret;
+       if (get_user(pid, &header->pid))
+               return -EFAULT;
+@@ -118,7 +167,7 @@ out:
+       spin_unlock(&task_capability_lock);
+       if (!ret) {
+-              struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
++              struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
+               unsigned i;
+               for (i = 0; i < tocopy; i++) {
+@@ -128,7 +177,7 @@ out:
+               }
+               /*
+-               * Note, in the case, tocopy < _LINUX_CAPABILITY_U32S,
++               * Note, in the case, tocopy < _KERNEL_CAPABILITY_U32S,
+                * we silently drop the upper capabilities here. This
+                * has the effect of making older libcap
+                * implementations implicitly drop upper capability
+@@ -240,30 +289,16 @@ static inline int cap_set_all(kernel_cap
+  */
+ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
+ {
+-      struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
++      struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
+       unsigned i, tocopy;
+       kernel_cap_t inheritable, permitted, effective;
+-      __u32 version;
+       struct task_struct *target;
+       int ret;
+       pid_t pid;
+-      if (get_user(version, &header->version))
+-              return -EFAULT;
+-
+-      switch (version) {
+-      case _LINUX_CAPABILITY_VERSION_1:
+-              warn_legacy_capability_use();
+-              tocopy = _LINUX_CAPABILITY_U32S_1;
+-              break;
+-      case _LINUX_CAPABILITY_VERSION_2:
+-              tocopy = _LINUX_CAPABILITY_U32S_2;
+-              break;
+-      default:
+-              if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
+-                      return -EFAULT;
+-              return -EINVAL;
+-      }
++      ret = cap_validate_magic(header, &tocopy);
++      if (ret != 0)
++              return ret;
+       if (get_user(pid, &header->pid))
+               return -EFAULT;
+@@ -281,7 +316,7 @@ asmlinkage long sys_capset(cap_user_head
+               permitted.cap[i] = kdata[i].permitted;
+               inheritable.cap[i] = kdata[i].inheritable;
+       }
+-      while (i < _LINUX_CAPABILITY_U32S) {
++      while (i < _KERNEL_CAPABILITY_U32S) {
+               effective.cap[i] = 0;
+               permitted.cap[i] = 0;
+               inheritable.cap[i] = 0;
diff --git a/queue-2.6.25/cpufreq-fix-null-object-access-on-transmeta-cpu.patch b/queue-2.6.25/cpufreq-fix-null-object-access-on-transmeta-cpu.patch
new file mode 100644 (file)
index 0000000..77b68fc
--- /dev/null
@@ -0,0 +1,56 @@
+From stable-bounces@linux.kernel.org  Fri Jun  6 16:46:17 2008
+Date: Fri, 6 Jun 2008 18:41:31 GMT
+Message-Id: <200806061841.m56IfVAe014792@hera.kernel.org>
+From: jejb@kernel.org
+To: jejb@kernel.org, stable@kernel.org
+Subject: cpufreq: fix null object access on Transmeta CPU
+
+From: CHIKAMA masaki <masaki.chikama@gmail.com>
+
+upstream commit: 879000f94442860e72c934f9e568989bc7fb8ec4
+
+If cpu specific cpufreq driver(i.e.  longrun) has "setpolicy" function,
+governor object isn't set into cpufreq_policy object at "__cpufreq_set_policy"
+function in driver/cpufreq/cpufreq.c .
+
+This causes a null object access at "store_scaling_setspeed" and
+"show_scaling_setspeed" function in driver/cpufreq/cpufreq.c when reading or
+writing through /sys interface (ex.  cat
+/sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed)
+
+Addresses:
+       http://bugzilla.kernel.org/show_bug.cgi?id=10654
+       https://bugzilla.redhat.com/show_bug.cgi?id=443354
+
+Signed-off-by: CHIKAMA Masaki <masaki.chikama@gmail.com>
+Cc: Dave Jones <davej@codemonkey.org.uk>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Acked-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/cpufreq/cpufreq.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -607,7 +607,7 @@ static ssize_t store_scaling_setspeed(st
+       unsigned int freq = 0;
+       unsigned int ret;
+-      if (!policy->governor->store_setspeed)
++      if (!policy->governor || !policy->governor->store_setspeed)
+               return -EINVAL;
+       ret = sscanf(buf, "%u", &freq);
+@@ -621,7 +621,7 @@ static ssize_t store_scaling_setspeed(st
+ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
+ {
+-      if (!policy->governor->show_setspeed)
++      if (!policy->governor || !policy->governor->show_setspeed)
+               return sprintf(buf, "<unsupported>\n");
+       return policy->governor->show_setspeed(policy, buf);
diff --git a/queue-2.6.25/ecryptfs-remove-unnecessary-page-decrypt-call.patch b/queue-2.6.25/ecryptfs-remove-unnecessary-page-decrypt-call.patch
new file mode 100644 (file)
index 0000000..e211511
--- /dev/null
@@ -0,0 +1,89 @@
+From stable-bounces@linux.kernel.org  Fri Jun  6 12:15:35 2008
+Date: Fri, 6 Jun 2008 18:43:31 GMT
+Message-Id: <200806061843.m56IhVog015228@hera.kernel.org>
+From: jejb@kernel.org
+To: jejb@kernel.org, stable@kernel.org
+Subject: eCryptfs: remove unnecessary page decrypt call
+
+From: Michael Halcrow <mhalcrow@us.ibm.com>
+
+upstream commit: d3e49afbb66109613c3474f2273f5830ac2dcb09
+
+The page decrypt calls in ecryptfs_write() are both pointless and buggy.
+Pointless because ecryptfs_get_locked_page() has already brought the page
+up to date, and buggy because prior mmap writes will just be blown away by
+the decrypt call.
+
+This patch also removes the declaration of a now-nonexistent function
+ecryptfs_write_zeros().
+
+Thanks to Eric Sandeen and David Kleikamp for helping to track this
+down.
+
+Eric said:
+
+   fsx w/ mmap dies quickly ( < 100 ops) without this, and survives
+   nicely (to millions of ops+) with it in place.
+
+Signed-off-by: Michael Halcrow <mhalcrow@us.ibm.com>
+Cc: Eric Sandeen <sandeen@redhat.com>
+Cc: Dave Kleikamp <shaggy@austin.ibm.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[chrisw: backport to 2.6.25.5]
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ fs/ecryptfs/ecryptfs_kernel.h |    2 --
+ fs/ecryptfs/read_write.c      |   22 ----------------------
+ 2 files changed, 24 deletions(-)
+
+--- a/fs/ecryptfs/ecryptfs_kernel.h
++++ b/fs/ecryptfs/ecryptfs_kernel.h
+@@ -626,8 +626,6 @@ int ecryptfs_get_tfm_and_mutex_for_ciphe
+ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
+                                     struct ecryptfs_auth_tok **auth_tok,
+                                     char *sig);
+-int ecryptfs_write_zeros(struct file *file, pgoff_t index, int start,
+-                       int num_zeros);
+ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
+                        loff_t offset, size_t size);
+ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
+--- a/fs/ecryptfs/read_write.c
++++ b/fs/ecryptfs/read_write.c
+@@ -157,20 +157,6 @@ int ecryptfs_write(struct file *ecryptfs
+                              ecryptfs_page_idx, rc);
+                       goto out;
+               }
+-              if (start_offset_in_page) {
+-                      /* Read in the page from the lower
+-                       * into the eCryptfs inode page cache,
+-                       * decrypting */
+-                      rc = ecryptfs_decrypt_page(ecryptfs_page);
+-                      if (rc) {
+-                              printk(KERN_ERR "%s: Error decrypting "
+-                                     "page; rc = [%d]\n",
+-                                     __FUNCTION__, rc);
+-                              ClearPageUptodate(ecryptfs_page);
+-                              page_cache_release(ecryptfs_page);
+-                              goto out;
+-                      }
+-              }
+               ecryptfs_page_virt = kmap_atomic(ecryptfs_page, KM_USER0);
+               /*
+@@ -349,14 +335,6 @@ int ecryptfs_read(char *data, loff_t off
+                              ecryptfs_page_idx, rc);
+                       goto out;
+               }
+-              rc = ecryptfs_decrypt_page(ecryptfs_page);
+-              if (rc) {
+-                      printk(KERN_ERR "%s: Error decrypting "
+-                             "page; rc = [%d]\n", __FUNCTION__, rc);
+-                      ClearPageUptodate(ecryptfs_page);
+-                      page_cache_release(ecryptfs_page);
+-                      goto out;
+-              }
+               ecryptfs_page_virt = kmap_atomic(ecryptfs_page, KM_USER0);
+               memcpy((data + data_offset),
+                      ((char *)ecryptfs_page_virt + start_offset_in_page),
diff --git a/queue-2.6.25/md-do-not-compute-parity-unless-it-is-on-a-failed-drive.patch b/queue-2.6.25/md-do-not-compute-parity-unless-it-is-on-a-failed-drive.patch
new file mode 100644 (file)
index 0000000..ab14b3f
--- /dev/null
@@ -0,0 +1,52 @@
+From stable-bounces@linux.kernel.org  Fri Jun  6 12:16:05 2008
+Date: Fri, 6 Jun 2008 18:43:45 GMT
+Message-Id: <200806061843.m56Ihjqh015293@hera.kernel.org>
+From: jejb@kernel.org
+To: jejb@kernel.org, stable@kernel.org
+Subject: md: do not compute parity unless it is on a failed drive
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+upstream commit: c337869d95011495fa181536786e74aa2d7ff031
+
+If a block is computed (rather than read) then a check/repair operation
+may be lead to believe that the data on disk is correct, when infact it
+isn't.  So only compute blocks for failed devices.
+
+This issue has been around since at least 2.6.12, but has become harder to
+hit in recent kernels since most reads bypass the cache.
+
+echo repair > /sys/block/mdN/md/sync_action will set the parity blocks to the
+correct state.
+
+Cc: <stable@kernel.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Neil Brown <neilb@suse.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/raid5.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1984,6 +1984,7 @@ static int __handle_issuing_new_read_req
+                * have quiesced.
+                */
+               if ((s->uptodate == disks - 1) &&
++                  (s->failed && disk_idx == s->failed_num) &&
+                   !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
+                       set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+                       set_bit(R5_Wantcompute, &dev->flags);
+@@ -2069,7 +2070,9 @@ static void handle_issuing_new_read_requ
+                       /* we would like to get this block, possibly
+                        * by computing it, but we might not be able to
+                        */
+-                      if (s->uptodate == disks-1) {
++                      if ((s->uptodate == disks - 1) &&
++                          (s->failed && (i == r6s->failed_num[0] ||
++                                         i == r6s->failed_num[1]))) {
+                               pr_debug("Computing stripe %llu block %d\n",
+                                      (unsigned long long)sh->sector, i);
+                               compute_block_1(sh, i, 0);
diff --git a/queue-2.6.25/md-fix-prexor-vs-sync_request-race.patch b/queue-2.6.25/md-fix-prexor-vs-sync_request-race.patch
new file mode 100644 (file)
index 0000000..e9088b8
--- /dev/null
@@ -0,0 +1,74 @@
+From stable-bounces@linux.kernel.org  Fri Jun  6 12:17:05 2008
+Date: Fri, 6 Jun 2008 18:43:58 GMT
+Message-Id: <200806061843.m56IhwXl015387@hera.kernel.org>
+From: jejb@kernel.org
+To: jejb@kernel.org, stable@kernel.org
+Subject: md: fix prexor vs sync_request race
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+upstream commit: e0a115e5aa554b93150a8dc1c3fe15467708abb2
+
+During the initial array synchronization process there is a window between
+when a prexor operation is scheduled to a specific stripe and when it
+completes for a sync_request to be scheduled to the same stripe.  When
+this happens the prexor completes and the stripe is unconditionally marked
+"insync", effectively canceling the sync_request for the stripe.  Prior to
+2.6.23 this was not a problem because the prexor operation was done under
+sh->lock.  The effect in older kernels being that the prexor would still
+erroneously mark the stripe "insync", but sync_request would be held off
+and re-mark the stripe as "!in_sync".
+
+Change the write completion logic to not mark the stripe "in_sync" if a
+prexor was performed.  The effect of the change is to sometimes not set
+STRIPE_INSYNC.  The worst this can do is cause the resync to stall waiting
+for STRIPE_INSYNC to be set.  If this were happening, then STRIPE_SYNCING
+would be set and handle_issuing_new_read_requests would cause all
+available blocks to eventually be read, at which point prexor would never
+be used on that stripe any more and STRIPE_INSYNC would eventually be set.
+
+echo repair > /sys/block/mdN/md/sync_action will correct arrays that may
+have lost this race.
+
+Cc: <stable@kernel.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Neil Brown <neilb@suse.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[chrisw: backport to 2.6.25.5]
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/raid5.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2621,6 +2621,7 @@ static void handle_stripe5(struct stripe
+       struct stripe_head_state s;
+       struct r5dev *dev;
+       unsigned long pending = 0;
++      int prexor;
+       memset(&s, 0, sizeof(s));
+       pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d "
+@@ -2740,9 +2741,11 @@ static void handle_stripe5(struct stripe
+       /* leave prexor set until postxor is done, allows us to distinguish
+        * a rmw from a rcw during biodrain
+        */
++      prexor = 0;
+       if (test_bit(STRIPE_OP_PREXOR, &sh->ops.complete) &&
+               test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) {
++              prexor = 1;
+               clear_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
+               clear_bit(STRIPE_OP_PREXOR, &sh->ops.ack);
+               clear_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+@@ -2776,6 +2779,8 @@ static void handle_stripe5(struct stripe
+                               if (!test_and_set_bit(
+                                   STRIPE_OP_IO, &sh->ops.pending))
+                                       sh->ops.count++;
++                              if (prexor)
++                                      continue;
+                               if (!test_bit(R5_Insync, &dev->flags) ||
+                                   (i == sh->pd_idx && s.failed == 0))
+                                       set_bit(STRIPE_INSYNC, &sh->state);
diff --git a/queue-2.6.25/md-fix-uninitialized-use-of-mddev-recovery_wait.patch b/queue-2.6.25/md-fix-uninitialized-use-of-mddev-recovery_wait.patch
new file mode 100644 (file)
index 0000000..405bb2a
--- /dev/null
@@ -0,0 +1,44 @@
+From stable-bounces@linux.kernel.org  Fri Jun  6 12:16:36 2008
+Date: Fri, 6 Jun 2008 18:43:53 GMT
+Message-Id: <200806061843.m56IhrZm015344@hera.kernel.org>
+From: jejb@kernel.org
+To: jejb@kernel.org, stable@kernel.org
+Subject: md: fix uninitialized use of mddev->recovery_wait
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+upstream commit: a6d8113a986c66aeb379a26b6e0062488b3e59e1
+
+If an array was created with --assume-clean we will oops when trying to
+set ->resync_max.
+
+Fix this by initializing ->recovery_wait in mddev_find.
+
+Cc: <stable@kernel.org>
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Neil Brown <neilb@suse.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/md.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -274,6 +274,7 @@ static mddev_t * mddev_find(dev_t unit)
+       atomic_set(&new->active, 1);
+       spin_lock_init(&new->write_lock);
+       init_waitqueue_head(&new->sb_wait);
++      init_waitqueue_head(&new->recovery_wait);
+       new->reshape_position = MaxSector;
+       new->resync_max = MaxSector;
+@@ -5559,7 +5560,6 @@ void md_do_sync(mddev_t *mddev)
+               window/2,(unsigned long long) max_sectors/2);
+       atomic_set(&mddev->recovery_active, 0);
+-      init_waitqueue_head(&mddev->recovery_wait);
+       last_check = 0;
+       if (j>2) {
diff --git a/queue-2.6.25/pagemap-fix-bug-in-add_to_pagemap-require-aligned-length-reads-of-proc-pid-pagemap.patch b/queue-2.6.25/pagemap-fix-bug-in-add_to_pagemap-require-aligned-length-reads-of-proc-pid-pagemap.patch
new file mode 100644 (file)
index 0000000..da0a7e4
--- /dev/null
@@ -0,0 +1,97 @@
+From stable-bounces@linux.kernel.org  Fri Jun  6 12:14:31 2008
+Date: Fri, 6 Jun 2008 18:41:41 GMT
+Message-Id: <200806061841.m56Iffth014836@hera.kernel.org>
+From: jejb@kernel.org
+To: jejb@kernel.org, stable@kernel.org
+Subject: pagemap: fix bug in add_to_pagemap, require aligned-length reads of /proc/pid/pagemap
+
+From: Thomas Tuttle <ttuttle@google.com>
+
+upstream commit: aae8679b0ebcaa92f99c1c3cb0cd651594a43915
+
+Fix a bug in add_to_pagemap.  Previously, since pm->out was a char *,
+put_user was only copying 1 byte of every PFN, resulting in the top 7
+bytes of each PFN not being copied.  By requiring that reads be a multiple
+of 8 bytes, I can make pm->out and pm->end u64*s instead of char*s, which
+makes put_user work properly, and also simplifies the logic in
+add_to_pagemap a bit.
+
+[akpm@linux-foundation.org: coding-style fixes]
+Signed-off-by: Thomas Tuttle <ttuttle@google.com>
+Cc: Matt Mackall <mpm@selenic.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ fs/proc/task_mmu.c |   28 +++++++++-------------------
+ 1 file changed, 9 insertions(+), 19 deletions(-)
+
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -524,7 +524,7 @@ const struct file_operations proc_clear_
+ };
+ struct pagemapread {
+-      char __user *out, *end;
++      u64 __user *out, *end;
+ };
+ #define PM_ENTRY_BYTES      sizeof(u64)
+@@ -547,21 +547,11 @@ struct pagemapread {
+ static int add_to_pagemap(unsigned long addr, u64 pfn,
+                         struct pagemapread *pm)
+ {
+-      /*
+-       * Make sure there's room in the buffer for an
+-       * entire entry.  Otherwise, only copy part of
+-       * the pfn.
+-       */
+-      if (pm->out + PM_ENTRY_BYTES >= pm->end) {
+-              if (copy_to_user(pm->out, &pfn, pm->end - pm->out))
+-                      return -EFAULT;
+-              pm->out = pm->end;
+-              return PM_END_OF_BUFFER;
+-      }
+-
+       if (put_user(pfn, pm->out))
+               return -EFAULT;
+-      pm->out += PM_ENTRY_BYTES;
++      pm->out++;
++      if (pm->out >= pm->end)
++              return PM_END_OF_BUFFER;
+       return 0;
+ }
+@@ -662,7 +652,7 @@ static ssize_t pagemap_read(struct file 
+       ret = -EINVAL;
+       /* file position must be aligned */
+-      if (*ppos % PM_ENTRY_BYTES)
++      if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
+               goto out_task;
+       ret = 0;
+@@ -692,8 +682,8 @@ static ssize_t pagemap_read(struct file 
+               goto out_pages;
+       }
+-      pm.out = buf;
+-      pm.end = buf + count;
++      pm.out = (u64 *)buf;
++      pm.end = (u64 *)(buf + count);
+       if (!ptrace_may_attach(task)) {
+               ret = -EIO;
+@@ -718,9 +708,9 @@ static ssize_t pagemap_read(struct file 
+               if (ret == PM_END_OF_BUFFER)
+                       ret = 0;
+               /* don't need mmap_sem for these, but this looks cleaner */
+-              *ppos += pm.out - buf;
++              *ppos += (char *)pm.out - buf;
+               if (!ret)
+-                      ret = pm.out - buf;
++                      ret = (char *)pm.out - buf;
+       }
+ out_pages:
diff --git a/queue-2.6.25/proc-calculate-the-correct-proc-pid-link-count.patch b/queue-2.6.25/proc-calculate-the-correct-proc-pid-link-count.patch
new file mode 100644 (file)
index 0000000..548899d
--- /dev/null
@@ -0,0 +1,96 @@
+From stable-bounces@linux.kernel.org  Fri Jun  6 12:13:36 2008
+Date: Fri, 6 Jun 2008 18:40:28 GMT
+Message-Id: <200806061840.m56IeS8i014618@hera.kernel.org>
+From: jejb@kernel.org
+To: jejb@kernel.org, stable@kernel.org
+Subject: proc: calculate the correct /proc/<pid> link count
+
+From: Vegard Nossum <vegard.nossum@gmail.com>
+
+upstream commit: aed5417593ad125283f35513573282139a8664b5
+
+This patch:
+
+  commit e9720acd728a46cb40daa52c99a979f7c4ff195c
+  Author: Pavel Emelyanov <xemul@openvz.org>
+  Date:   Fri Mar 7 11:08:40 2008 -0800
+
+    [NET]: Make /proc/net a symlink on /proc/self/net (v3)
+
+introduced a /proc/self/net directory without bumping the corresponding
+link count for /proc/self.
+
+This patch replaces the static link count initializations with a call that
+counts the number of directory entries in the given pid_entry table
+whenever it is instantiated, and thus relieves the burden of manually
+keeping the two in sync.
+
+[akpm@linux-foundation.org: cleanup]
+Acked-by: Eric W. Biederman <ebiederm@xmission.com>
+Cc: Pavel Emelyanov <xemul@openvz.org>
+Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Alexey Dobriyan <adobriyan@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ fs/proc/base.c |   33 +++++++++++++++++++++++++--------
+ 1 file changed, 25 insertions(+), 8 deletions(-)
+
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -126,6 +126,25 @@ struct pid_entry {
+               NULL, &proc_single_file_operations,     \
+               { .proc_show = &proc_##OTYPE } )
++/*
++ * Count the number of hardlinks for the pid_entry table, excluding the .
++ * and .. links.
++ */
++static unsigned int pid_entry_count_dirs(const struct pid_entry *entries,
++      unsigned int n)
++{
++      unsigned int i;
++      unsigned int count;
++
++      count = 0;
++      for (i = 0; i < n; ++i) {
++              if (S_ISDIR(entries[i].mode))
++                      ++count;
++      }
++
++      return count;
++}
++
+ int maps_protect;
+ EXPORT_SYMBOL(maps_protect);
+@@ -2483,10 +2502,9 @@ static struct dentry *proc_pid_instantia
+       inode->i_op = &proc_tgid_base_inode_operations;
+       inode->i_fop = &proc_tgid_base_operations;
+       inode->i_flags|=S_IMMUTABLE;
+-      inode->i_nlink = 5;
+-#ifdef CONFIG_SECURITY
+-      inode->i_nlink += 1;
+-#endif
++
++      inode->i_nlink = 2 + pid_entry_count_dirs(tgid_base_stuff,
++              ARRAY_SIZE(tgid_base_stuff));
+       dentry->d_op = &pid_dentry_operations;
+@@ -2713,10 +2731,9 @@ static struct dentry *proc_task_instanti
+       inode->i_op = &proc_tid_base_inode_operations;
+       inode->i_fop = &proc_tid_base_operations;
+       inode->i_flags|=S_IMMUTABLE;
+-      inode->i_nlink = 4;
+-#ifdef CONFIG_SECURITY
+-      inode->i_nlink += 1;
+-#endif
++
++      inode->i_nlink = 2 + pid_entry_count_dirs(tid_base_stuff,
++              ARRAY_SIZE(tid_base_stuff));
+       dentry->d_op = &pid_dentry_operations;
index 3e9052cbee4289e7ca92b3e0ef0dab47127f63b5..b4694408b0e117c819d3da5a96a1ad100b3b24b9 100644 (file)
@@ -1,3 +1,4 @@
+asn1-additional-sanity-checking-during-ber-decoding.patch
 block-do_mounts-accept-root-non-existant-partition.patch
 powerpc-bolt-in-slb-entry-for-kernel-stack-on-secondary-cpus.patch
 usb-remove-picdem-fs-usb-demo-device-from-ldusb.patch
@@ -37,3 +38,14 @@ x86-fpu-fix-config_preempt-y-corruption-of-application-s-fpu-stack.patch
 netfilter-nf_conntrack_expect-fix-error-path-unwind-in-nf_conntrack_expect_init.patch
 netfilter-xt_connlimit-fix-accouning-when-receive-rst-packet-in-established-state.patch
 netfilter-nf_conntrack_ipv6-fix-inconsistent-lock-state-in-nf_ct_frag6_gather.patch
+atl1-fix-4g-memory-corruption-bug.patch
+smack-fuse-mount-hang-fix.patch
+proc-calculate-the-correct-proc-pid-link-count.patch
+pagemap-fix-bug-in-add_to_pagemap-require-aligned-length-reads-of-proc-pid-pagemap.patch
+brk-make-sys_brk-honor-compat_brk-when-computing-lower-bound.patch
+ecryptfs-remove-unnecessary-page-decrypt-call.patch
+md-do-not-compute-parity-unless-it-is-on-a-failed-drive.patch
+md-fix-uninitialized-use-of-mddev-recovery_wait.patch
+md-fix-prexor-vs-sync_request-race.patch
+capabilities-remain-source-compatible-with-32-bit-raw-legacy-capability-support.patch
+cpufreq-fix-null-object-access-on-transmeta-cpu.patch
diff --git a/queue-2.6.25/smack-fuse-mount-hang-fix.patch b/queue-2.6.25/smack-fuse-mount-hang-fix.patch
new file mode 100644 (file)
index 0000000..d324e05
--- /dev/null
@@ -0,0 +1,43 @@
+From e97dcb0eadbb821eccd549d4987b653cf61e2374 Mon Sep 17 00:00:00 2001
+Message-Id: <20080606111853.d796047a.akpm@linux-foundation.org>
+From: Casey Schaufler <casey@schaufler-ca.com>
+Date: Mon, 2 Jun 2008 10:04:32 -0700
+Subject: Smack: fuse mount hang fix
+
+The d_instantiate hook for Smack can hang on the root inode of a
+filesystem if the file system code has not really done all the set-up.
+Fuse is known to encounter this problem.
+
+This change detects an attempt to instantiate a root inode and addresses
+it early in the processing, before any attempt is made to do something
+that might hang.
+
+Signed-off-by: Casey Schaufler <casey@schaufler-ca.com>
+Tested-by: Luiz Fernando N. Capitulino <lcapitulino@mandriva.com.br>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ security/smack/smack_lsm.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -1865,6 +1865,18 @@ static void smack_d_instantiate(struct d
+       final = sbsp->smk_default;
+       /*
++       * If this is the root inode the superblock
++       * may be in the process of initialization.
++       * If that is the case use the root value out
++       * of the superblock.
++       */
++      if (opt_dentry->d_parent == opt_dentry) {
++              isp->smk_inode = sbsp->smk_root;
++              isp->smk_flags |= SMK_INODE_INSTANT;
++              goto unlockandout;
++      }
++
++      /*
+        * This is pretty hackish.
+        * Casey says that we shouldn't have to do
+        * file system specific code, but it does help