]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
more .29 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Tue, 12 May 2009 21:16:32 +0000 (14:16 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 12 May 2009 21:16:32 +0000 (14:16 -0700)
21 files changed:
queue-2.6.29/cifs-fix-buffer-size-for-tcon-nativefilesystem-field.patch [new file with mode: 0644]
queue-2.6.29/cifs-fix-buffer-size-in-cifs_convertucspath.patch [new file with mode: 0644]
queue-2.6.29/cifs-fix-incorrect-destination-buffer-size-in-cifs_strncpy_to_host.patch [new file with mode: 0644]
queue-2.6.29/cifs-fix-unicode-string-area-word-alignment-in-session-setup.patch [new file with mode: 0644]
queue-2.6.29/cifs-increase-size-of-tmp_buf-in-cifs_readdir-to-avoid-potential-overflows.patch [new file with mode: 0644]
queue-2.6.29/fs-fix-page_mkwrite-error-cases-in-core-code-and-btrfs.patch [new file with mode: 0644]
queue-2.6.29/gfs2-fix-page_mkwrite-return-code.patch [new file with mode: 0644]
queue-2.6.29/lsm-relocate-the-ipv4-security_inet_conn_request-hooks.patch [new file with mode: 0644]
queue-2.6.29/mac80211-minstrel-fix-memory-corruption.patch [new file with mode: 0644]
queue-2.6.29/mac80211-pid-fix-memory-corruption.patch [new file with mode: 0644]
queue-2.6.29/mm-close-page_mkwrite-races.patch [new file with mode: 0644]
queue-2.6.29/mm-page_mkwrite-change-prototype-to-match-fault.patch [new file with mode: 0644]
queue-2.6.29/netlabel-add-cipso-set-del-attr-request_sock-functions.patch [new file with mode: 0644]
queue-2.6.29/netlabel-add-new-netlabel-kapi-interfaces-for-request_sock-security-attributes.patch [new file with mode: 0644]
queue-2.6.29/nfs-close-page_mkwrite-races.patch [new file with mode: 0644]
queue-2.6.29/nfs-fix-the-return-value-in-nfs_page_mkwrite.patch [new file with mode: 0644]
queue-2.6.29/selinux-add-new-netlabel-glue-code-to-handle-labeling-of-connection-requests.patch [new file with mode: 0644]
queue-2.6.29/selinux-remove-dead-code-labeled-networking-code.patch [new file with mode: 0644]
queue-2.6.29/selinux-set-the-proper-netlabel-security-attributes-for-connection-requests.patch [new file with mode: 0644]
queue-2.6.29/series
queue-2.6.29/smack-set-the-proper-netlabel-security-attributes-for-connection-requests.patch [new file with mode: 0644]

diff --git a/queue-2.6.29/cifs-fix-buffer-size-for-tcon-nativefilesystem-field.patch b/queue-2.6.29/cifs-fix-buffer-size-for-tcon-nativefilesystem-field.patch
new file mode 100644 (file)
index 0000000..b7e9b1a
--- /dev/null
@@ -0,0 +1,53 @@
+From stable-bounces@linux.kernel.org  Tue May 12 14:04:56 2009
+From: Suresh Jayaraman <sjayaraman@suse.de>
+Date: Sat, 09 May 2009 11:19:05 +0530
+Subject: cifs: Fix buffer size for tcon->nativeFileSystem field
+To: stable@kernel.org
+Cc: Steve French <smfrench@gmail.com>, Jeff Layton <jlayton@redhat.com>
+Message-ID: <4A051951.5060801@suse.de>
+
+
+From: Jeff Layton <jlayton@redhat.com>
+
+Commit f083def68f84b04fe3f97312498911afce79609e refreshed.
+
+cifs: fix buffer size for tcon->nativeFileSystem field
+
+The buffer for this was resized recently to fix a bug. It's still
+possible however that a malicious server could overflow this field
+by sending characters in it that are >2 bytes in the local charset.
+Double the size of the buffer to account for this possibility.
+
+Also get rid of some really strange and seemingly pointless NULL
+termination. It's NULL terminating the string in the source buffer,
+but by the time that happens, we've already copied the string.
+
+Signed-off-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Cc: Suresh Jayaraman <sjayaraman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/connect.c |    6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3667,16 +3667,12 @@ CIFSTCon(unsigned int xid, struct cifsSe
+                           BCC(smb_buffer_response)) {
+                               kfree(tcon->nativeFileSystem);
+                               tcon->nativeFileSystem =
+-                                  kzalloc(2*(length + 1), GFP_KERNEL);
++                                  kzalloc((4 * length) + 2, GFP_KERNEL);
+                               if (tcon->nativeFileSystem)
+                                       cifs_strfromUCS_le(
+                                               tcon->nativeFileSystem,
+                                               (__le16 *) bcc_ptr,
+                                               length, nls_codepage);
+-                              bcc_ptr += 2 * length;
+-                              bcc_ptr[0] = 0; /* null terminate the string */
+-                              bcc_ptr[1] = 0;
+-                              bcc_ptr += 2;
+                       }
+                       /* else do not bother copying these information fields*/
+               } else {
diff --git a/queue-2.6.29/cifs-fix-buffer-size-in-cifs_convertucspath.patch b/queue-2.6.29/cifs-fix-buffer-size-in-cifs_convertucspath.patch
new file mode 100644 (file)
index 0000000..a6604b7
--- /dev/null
@@ -0,0 +1,46 @@
+From stable-bounces@linux.kernel.org  Tue May 12 14:06:17 2009
+From: Suresh Jayaraman <sjayaraman@suse.de>
+Date: Sat, 09 May 2009 11:33:12 +0530
+Subject: cifs: Fix buffer size in cifs_convertUCSpath
+To: stable@kernel.org
+Cc: Steve French <smfrench@gmail.com>, Jeff Layton <jlayton@redhat.com>
+Message-ID: <4A051CA0.6010401@suse.de>
+
+
+From: Suresh Jayaraman <sjayaraman@suse.de>
+
+Relevant commits 7fabf0c9479fef9fdb9528a5fbdb1cb744a744a4 and
+f58841666bc22e827ca0dcef7b71c7bc2758ce82. The upstream commits adds
+cifs_from_ucs2 that includes functionality of cifs_convertUCSpath and
+does cleanup.
+
+Reported-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de>
+Acked-by: Steve French <sfrench@us.ibm.com>
+Acked-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/misc.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -691,14 +691,15 @@ cifs_convertUCSpath(char *target, const 
+                                               NLS_MAX_CHARSET_SIZE);
+                               if (len > 0) {
+                                       j += len;
+-                                      continue;
++                                      goto overrun_chk;
+                               } else {
+                                       target[j] = '?';
+                               }
+               }
+               j++;
+               /* make sure we do not overrun callers allocated temp buffer */
+-              if (j >= (2 * NAME_MAX))
++overrun_chk:
++              if (j >= UNICODE_NAME_MAX)
+                       break;
+       }
+ cUCS_out:
diff --git a/queue-2.6.29/cifs-fix-incorrect-destination-buffer-size-in-cifs_strncpy_to_host.patch b/queue-2.6.29/cifs-fix-incorrect-destination-buffer-size-in-cifs_strncpy_to_host.patch
new file mode 100644 (file)
index 0000000..938f7dd
--- /dev/null
@@ -0,0 +1,70 @@
+From stable-bounces@linux.kernel.org  Tue May 12 14:05:52 2009
+From: Suresh Jayaraman <sjayaraman@suse.de>
+Date: Sat, 09 May 2009 11:26:44 +0530
+Subject: cifs: Fix incorrect destination buffer size in cifs_strncpy_to_host
+To: stable@kernel.org
+Cc: Steve French <smfrench@gmail.com>, Jeff Layton <jlayton@redhat.com>
+Message-ID: <4A051B1C.4030606@suse.de>
+
+
+From: Suresh Jayaraman <sjayaraman@suse.de>
+
+
+Relevant commits 968460ebd8006d55661dec0fb86712b40d71c413 and 
+066ce6899484d9026acd6ba3a8dbbedb33d7ae1b. Minimal hunks to fix buffer
+size and fix an existing problem pointed out by Guenter Kukuk that length
+of src is used for NULL termination of dst. 
+
+cifs: Rename cifs_strncpy_to_host and fix buffer size
+
+There is a possibility for the path_name and node_name buffers to
+overflow if they contain charcters that are >2 bytes in the local
+charset. Resize the buffer allocation so to avoid this possibility.
+
+Also, as pointed out by Jeff Layton, it would be appropriate to
+rename the function to cifs_strlcpy_to_host to reflect the fact
+that the copied string is always NULL terminated.
+
+Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de>
+Acked-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/cifssmb.c |   17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -91,23 +91,22 @@ static int
+ cifs_strncpy_to_host(char **dst, const char *src, const int maxlen,
+                const bool is_unicode, const struct nls_table *nls_codepage)
+ {
+-      int plen;
++      int src_len, dst_len;
+       if (is_unicode) {
+-              plen = UniStrnlen((wchar_t *)src, maxlen);
+-              *dst = kmalloc(plen + 2, GFP_KERNEL);
++              src_len = UniStrnlen((wchar_t *)src, maxlen);
++              *dst = kmalloc((4 * src_len) + 2, GFP_KERNEL);
+               if (!*dst)
+                       goto cifs_strncpy_to_host_ErrExit;
+-              cifs_strfromUCS_le(*dst, (__le16 *)src, plen, nls_codepage);
++              dst_len = cifs_strfromUCS_le(*dst, (__le16 *)src, src_len, nls_codepage);
++              (*dst)[dst_len + 1] = 0;
+       } else {
+-              plen = strnlen(src, maxlen);
+-              *dst = kmalloc(plen + 2, GFP_KERNEL);
++              src_len = strnlen(src, maxlen);
++              *dst = kmalloc(src_len + 1, GFP_KERNEL);
+               if (!*dst)
+                       goto cifs_strncpy_to_host_ErrExit;
+-              strncpy(*dst, src, plen);
++              strlcpy(*dst, src, src_len + 1);
+       }
+-      (*dst)[plen] = 0;
+-      (*dst)[plen+1] = 0; /* harmless for ASCII case, needed for Unicode */
+       return 0;
+ cifs_strncpy_to_host_ErrExit:
diff --git a/queue-2.6.29/cifs-fix-unicode-string-area-word-alignment-in-session-setup.patch b/queue-2.6.29/cifs-fix-unicode-string-area-word-alignment-in-session-setup.patch
new file mode 100644 (file)
index 0000000..b917a0f
--- /dev/null
@@ -0,0 +1,127 @@
+From stable-bounces@linux.kernel.org  Tue May 12 14:06:45 2009
+From: Jeff Layton <jlayton@redhat.com>
+Date: Sat, 09 May 2009 11:34:21 +0530
+Subject: cifs: Fix unicode string area word alignment in session setup
+To: stable@kernel.org
+Cc: Steve French <smfrench@gmail.com>, Jeff Layton <jlayton@redhat.com>
+Message-ID: <4A051CE5.6040203@suse.de>
+
+
+From: Jeff Layton <jlayton@redhat.com>
+
+commit 27b87fe52baba0a55e9723030e76fce94fabcea4 refreshed.
+
+cifs: fix unicode string area word alignment in session setup
+
+The handling of unicode string area alignment is wrong.
+decode_unicode_ssetup improperly assumes that it will always be preceded
+by a pad byte. This isn't the case if the string area is already
+word-aligned.
+
+This problem, combined with the bad buffer sizing for the serverDomain
+string can cause memory corruption. The bad alignment can make it so
+that the alignment of the characters is off. This can make them
+translate to characters that are greater than 2 bytes each. If this
+happens we can overflow the allocation.
+
+Fix this by fixing the alignment in CIFS_SessSetup instead so we can
+verify it against the head of the response. Also, clean up the
+workaround for improperly terminated strings by checking for a
+odd-length unicode buffers and then forcibly terminating them.
+
+Finally, resize the buffer for serverDomain. Now that we've fixed
+the alignment, it's probably fine, but a malicious server could
+overflow it.
+
+A better solution for handling these strings is still needed, but
+this should be a suitable bandaid.
+
+Signed-off-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Cc: Suresh Jayaraman <sjayaraman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/sess.c |   44 +++++++++++++++++++++++---------------------
+ 1 file changed, 23 insertions(+), 21 deletions(-)
+
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -285,27 +285,26 @@ static int decode_unicode_ssetup(char **
+       int words_left, len;
+       char *data = *pbcc_area;
+-
+-
+       cFYI(1, ("bleft %d", bleft));
+-
+-      /* SMB header is unaligned, so cifs servers word align start of
+-         Unicode strings */
+-      data++;
+-      bleft--; /* Windows servers do not always double null terminate
+-                  their final Unicode string - in which case we
+-                  now will not attempt to decode the byte of junk
+-                  which follows it */
++      /*
++       * Windows servers do not always double null terminate their final
++       * Unicode string. Check to see if there are an uneven number of bytes
++       * left. If so, then add an extra NULL pad byte to the end of the
++       * response.
++       *
++       * See section 2.7.2 in "Implementing CIFS" for details
++       */
++      if (bleft % 2) {
++              data[bleft] = 0;
++              ++bleft;
++      }
+       words_left = bleft / 2;
+       /* save off server operating system */
+       len = UniStrnlen((wchar_t *) data, words_left);
+-/* We look for obvious messed up bcc or strings in response so we do not go off
+-   the end since (at least) WIN2K and Windows XP have a major bug in not null
+-   terminating last Unicode string in response  */
+       if (len >= words_left)
+               return rc;
+@@ -343,13 +342,10 @@ static int decode_unicode_ssetup(char **
+               return rc;
+       kfree(ses->serverDomain);
+-      ses->serverDomain = kzalloc(2 * (len + 1), GFP_KERNEL); /* BB FIXME wrong length */
+-      if (ses->serverDomain != NULL) {
++      ses->serverDomain = kzalloc((4 * len) + 2, GFP_KERNEL);
++      if (ses->serverDomain != NULL)
+               cifs_strfromUCS_le(ses->serverDomain, (__le16 *)data, len,
+                                  nls_cp);
+-              ses->serverDomain[2*len] = 0;
+-              ses->serverDomain[(2*len) + 1] = 0;
+-      }
+       data += 2 * (len + 1);
+       words_left -= len + 1;
+@@ -702,12 +698,18 @@ CIFS_SessSetup(unsigned int xid, struct 
+       }
+       /* BB check if Unicode and decode strings */
+-      if (smb_buf->Flags2 & SMBFLG2_UNICODE)
++      if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
++              /* unicode string area must be word-aligned */
++              if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
++                      ++bcc_ptr;
++                      --bytes_remaining;
++              }
+               rc = decode_unicode_ssetup(&bcc_ptr, bytes_remaining,
+-                                                 ses, nls_cp);
+-      else
++                                         ses, nls_cp);
++      } else {
+               rc = decode_ascii_ssetup(&bcc_ptr, bytes_remaining,
+                                        ses, nls_cp);
++      }
+ ssetup_exit:
+       if (spnego_key) {
diff --git a/queue-2.6.29/cifs-increase-size-of-tmp_buf-in-cifs_readdir-to-avoid-potential-overflows.patch b/queue-2.6.29/cifs-increase-size-of-tmp_buf-in-cifs_readdir-to-avoid-potential-overflows.patch
new file mode 100644 (file)
index 0000000..76535a7
--- /dev/null
@@ -0,0 +1,58 @@
+From stable-bounces@linux.kernel.org  Tue May 12 14:05:28 2009
+From: Suresh Jayaraman <sjayaraman@suse.de>
+Date: Sat, 09 May 2009 11:22:47 +0530
+Subject: cifs: Increase size of tmp_buf in cifs_readdir to avoid potential overflows
+To: stable@kernel.org
+Cc: Steve French <smfrench@gmail.com>, Jeff Layton <jlayton@redhat.com>
+Message-ID: <4A051A2F.50308@suse.de>
+
+
+From: Suresh Jayaraman <sjayaraman@suse.de>
+
+Commit 7b0c8fcff47a885743125dd843db64af41af5a61 refreshed and use
+a #define from commit f58841666bc22e827ca0dcef7b71c7bc2758ce82.
+
+cifs: Increase size of tmp_buf in cifs_readdir to avoid potential overflows
+
+Increase size of tmp_buf to possible maximum to avoid potential
+overflows. Also moved UNICODE_NAME_MAX definition so that it can be used
+elsewhere.
+
+Pointed-out-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de>
+Acked-by: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Steve French <sfrench@us.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/cifs/cifs_unicode.h |    7 +++++++
+ fs/cifs/readdir.c      |    2 +-
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/fs/cifs/cifs_unicode.h
++++ b/fs/cifs/cifs_unicode.h
+@@ -64,6 +64,13 @@ int cifs_strtoUCS(__le16 *, const char *
+ #endif
+ /*
++ * To be safe - for UCS to UTF-8 with strings loaded with the rare long
++ * characters alloc more to account for such multibyte target UTF-8
++ * characters.
++ */
++#define UNICODE_NAME_MAX ((4 * NAME_MAX) + 2)
++
++/*
+  * UniStrcat:  Concatenate the second string to the first
+  *
+  * Returns:
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -1072,7 +1072,7 @@ int cifs_readdir(struct file *file, void
+               with the rare long characters alloc more to account for
+               such multibyte target UTF-8 characters. cifs_unicode.c,
+               which actually does the conversion, has the same limit */
+-              tmp_buf = kmalloc((2 * NAME_MAX) + 4, GFP_KERNEL);
++              tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL);
+               for (i = 0; (i < num_to_fill) && (rc == 0); i++) {
+                       if (current_entry == NULL) {
+                               /* evaluate whether this case is an error */
diff --git a/queue-2.6.29/fs-fix-page_mkwrite-error-cases-in-core-code-and-btrfs.patch b/queue-2.6.29/fs-fix-page_mkwrite-error-cases-in-core-code-and-btrfs.patch
new file mode 100644 (file)
index 0000000..3eb12a5
--- /dev/null
@@ -0,0 +1,97 @@
+From 56a76f8275c379ed73c8a43cfa1dfa2f5e9cfa19 Mon Sep 17 00:00:00 2001
+From: Nick Piggin <npiggin@suse.de>
+Date: Tue, 31 Mar 2009 15:23:23 -0700
+Subject: fs: fix page_mkwrite error cases in core code and btrfs
+
+From: Nick Piggin <npiggin@suse.de>
+
+commit 56a76f8275c379ed73c8a43cfa1dfa2f5e9cfa19 upstream.
+
+page_mkwrite is called with neither the page lock nor the ptl held.  This
+means a page can be concurrently truncated or invalidated out from
+underneath it.  Callers are supposed to prevent truncate races themselves,
+however previously the only thing they can do in case they hit one is to
+raise a SIGBUS.  A sigbus is wrong for the case that the page has been
+invalidated or truncated within i_size (eg.  hole punched).  Callers may
+also have to perform memory allocations in this path, where again, SIGBUS
+would be wrong.
+
+The previous patch ("mm: page_mkwrite change prototype to match fault")
+made it possible to properly specify errors.  Convert the generic buffer.c
+code and btrfs to return sane error values (in the case of page removed
+from pagecache, VM_FAULT_NOPAGE will cause the fault handler to exit
+without doing anything, and the fault will be retried properly).
+
+This fixes core code, and converts btrfs as a template/example.  All other
+filesystems defining their own page_mkwrite should be fixed in a similar
+manner.
+
+Acked-by: Chris Mason <chris.mason@oracle.com>
+Signed-off-by: Nick Piggin <npiggin@suse.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/btrfs/inode.c |   11 +++++++----
+ fs/buffer.c      |   12 ++++++++----
+ 2 files changed, 15 insertions(+), 8 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4307,10 +4307,15 @@ int btrfs_page_mkwrite(struct vm_area_st
+       u64 page_end;
+       ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
+-      if (ret)
++      if (ret) {
++              if (ret == -ENOMEM)
++                      ret = VM_FAULT_OOM;
++              else /* -ENOSPC, -EIO, etc */
++                      ret = VM_FAULT_SIGBUS;
+               goto out;
++      }
+-      ret = -EINVAL;
++      ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
+ again:
+       lock_page(page);
+       size = i_size_read(inode);
+@@ -4363,8 +4368,6 @@ again:
+ out_unlock:
+       unlock_page(page);
+ out:
+-      if (ret)
+-              ret = VM_FAULT_SIGBUS;
+       return ret;
+ }
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -2472,7 +2472,7 @@ block_page_mkwrite(struct vm_area_struct
+       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       unsigned long end;
+       loff_t size;
+-      int ret = -EINVAL;
++      int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
+       lock_page(page);
+       size = i_size_read(inode);
+@@ -2492,10 +2492,14 @@ block_page_mkwrite(struct vm_area_struct
+       if (!ret)
+               ret = block_commit_write(page, 0, end);
+-out_unlock:
+-      if (ret)
+-              ret = VM_FAULT_SIGBUS;
++      if (unlikely(ret)) {
++              if (ret == -ENOMEM)
++                      ret = VM_FAULT_OOM;
++              else /* -ENOSPC, -EIO, etc */
++                      ret = VM_FAULT_SIGBUS;
++      }
++out_unlock:
+       unlock_page(page);
+       return ret;
+ }
diff --git a/queue-2.6.29/gfs2-fix-page_mkwrite-return-code.patch b/queue-2.6.29/gfs2-fix-page_mkwrite-return-code.patch
new file mode 100644 (file)
index 0000000..c2712df
--- /dev/null
@@ -0,0 +1,33 @@
+From e56985da455b9dc0591b8cb2006cc94b6f4fb0f4 Mon Sep 17 00:00:00 2001
+From: Steven Whitehouse <swhiteho@redhat.com>
+Date: Mon, 20 Apr 2009 09:45:54 +0100
+Subject: GFS2: Fix page_mkwrite() return code
+
+From: Steven Whitehouse <swhiteho@redhat.com>
+
+commit e56985da455b9dc0591b8cb2006cc94b6f4fb0f4 upstream.
+
+This allows for the possibility of returning VM_FAULT_OOM as
+well as VM_FAULT_SIGBUS. This ensures that the correct action
+is taken.
+
+Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/gfs2/ops_file.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/gfs2/ops_file.c
++++ b/fs/gfs2/ops_file.c
+@@ -410,7 +410,9 @@ out_unlock:
+       gfs2_glock_dq(&gh);
+ out:
+       gfs2_holder_uninit(&gh);
+-      if (ret)
++      if (ret == -ENOMEM)
++              ret = VM_FAULT_OOM;
++      else if (ret)
+               ret = VM_FAULT_SIGBUS;
+       return ret;
+ }
diff --git a/queue-2.6.29/lsm-relocate-the-ipv4-security_inet_conn_request-hooks.patch b/queue-2.6.29/lsm-relocate-the-ipv4-security_inet_conn_request-hooks.patch
new file mode 100644 (file)
index 0000000..596ace3
--- /dev/null
@@ -0,0 +1,80 @@
+From stable-bounces@linux.kernel.org  Tue May 12 14:01:49 2009
+From: Paul Moore <paul.moore@hp.com>
+Date: Fri, 08 May 2009 17:58:30 -0400
+Subject: lsm: Relocate the IPv4 security_inet_conn_request() hooks
+To: linux-security-module@vger.kernel.org
+Cc: stable@kernel.org
+Message-ID: <20090508215830.12179.42336.stgit@flek.lan>
+
+From: Paul Moore <paul.moore@hp.com>
+
+[NOTE: present in Linus' tree as 284904aa79466a4736f4c775fdbe5c7407fa136c]
+
+The current placement of the security_inet_conn_request() hooks do not allow
+individual LSMs to override the IP options of the connection's request_sock.
+This is a problem as both SELinux and Smack have the ability to use labeled
+networking protocols which make use of IP options to carry security attributes
+and the inability to set the IP options at the start of the TCP handshake is
+problematic.
+
+This patch moves the IPv4 security_inet_conn_request() hooks past the code
+where the request_sock's IP options are set/reset so that the LSM can safely
+manipulate the IP options as needed.  This patch intentionally does not change
+the related IPv6 hooks as IPv6 based labeling protocols which use IPv6 options
+are not currently implemented, once they are we will have a better idea of
+the correct placement for the IPv6 hooks.
+
+Signed-off-by: Paul Moore <paul.moore@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv4/syncookies.c |    9 +++++----
+ net/ipv4/tcp_ipv4.c   |    7 ++++---
+ 2 files changed, 9 insertions(+), 7 deletions(-)
+
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -288,10 +288,6 @@ struct sock *cookie_v4_check(struct sock
+       if (!req)
+               goto out;
+-      if (security_inet_conn_request(sk, skb, req)) {
+-              reqsk_free(req);
+-              goto out;
+-      }
+       ireq = inet_rsk(req);
+       treq = tcp_rsk(req);
+       treq->rcv_isn           = ntohl(th->seq) - 1;
+@@ -322,6 +318,11 @@ struct sock *cookie_v4_check(struct sock
+               }
+       }
++      if (security_inet_conn_request(sk, skb, req)) {
++              reqsk_free(req);
++              goto out;
++      }
++
+       req->expires    = 0UL;
+       req->retrans    = 0;
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1239,14 +1239,15 @@ int tcp_v4_conn_request(struct sock *sk,
+       tcp_openreq_init(req, &tmp_opt, skb);
+-      if (security_inet_conn_request(sk, skb, req))
+-              goto drop_and_free;
+-
+       ireq = inet_rsk(req);
+       ireq->loc_addr = daddr;
+       ireq->rmt_addr = saddr;
+       ireq->no_srccheck = inet_sk(sk)->transparent;
+       ireq->opt = tcp_v4_save_options(sk, skb);
++
++      if (security_inet_conn_request(sk, skb, req))
++              goto drop_and_free;
++
+       if (!want_cookie)
+               TCP_ECN_create_request(req, tcp_hdr(skb));
diff --git a/queue-2.6.29/mac80211-minstrel-fix-memory-corruption.patch b/queue-2.6.29/mac80211-minstrel-fix-memory-corruption.patch
new file mode 100644 (file)
index 0000000..fd0c154
--- /dev/null
@@ -0,0 +1,36 @@
+From 8e532175277d9a5eae49768ed086555081f741a7 Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jirislaby@gmail.com>
+Date: Mon, 4 May 2009 18:04:55 +0200
+Subject: mac80211: minstrel, fix memory corruption
+
+From: Jiri Slaby <jirislaby@gmail.com>
+
+commit 8e532175277d9a5eae49768ed086555081f741a7 upstream.
+
+minstrel doesn't count max rate count in fact, since it doesn't use
+a loop variable `i' and hence allocs space only for bitrates found in
+the first band.
+
+Fix it by involving the `i' as an index so that it traverses all the
+bands now and finds the real max bitrate count.
+
+Signed-off-by: Jiri Slaby <jirislaby@gmail.com>
+Cc: Felix Fietkau <nbd@openwrt.org>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/rc80211_minstrel.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/mac80211/rc80211_minstrel.c
++++ b/net/mac80211/rc80211_minstrel.c
+@@ -476,7 +476,7 @@ minstrel_alloc_sta(void *priv, struct ie
+               return NULL;
+       for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+-              sband = hw->wiphy->bands[hw->conf.channel->band];
++              sband = hw->wiphy->bands[i];
+               if (sband->n_bitrates > max_rates)
+                       max_rates = sband->n_bitrates;
+       }
diff --git a/queue-2.6.29/mac80211-pid-fix-memory-corruption.patch b/queue-2.6.29/mac80211-pid-fix-memory-corruption.patch
new file mode 100644 (file)
index 0000000..bb3f34c
--- /dev/null
@@ -0,0 +1,137 @@
+From 6909268dc93ae4b0b8e1ebb4b2fa70b1a47dd347 Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jirislaby@gmail.com>
+Date: Mon, 4 May 2009 18:10:28 +0200
+Subject: mac80211: pid, fix memory corruption
+
+From: Jiri Slaby <jirislaby@gmail.com>
+
+commit 6909268dc93ae4b0b8e1ebb4b2fa70b1a47dd347 upstream.
+
+pid doesn't count with some band having more bitrates than the one
+associated the first time.
+Fix that by counting the maximal available bitrate count and allocate
+big enough space.
+
+Secondly, fix touching uninitialized memory which causes panics.
+Index sucked from this random memory points to the hell.
+The fix is to sort the rates on each band change.
+
+Signed-off-by: Jiri Slaby <jirislaby@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/rc80211_pid_algo.c |   73 +++++++++++++++++++++-------------------
+ 1 file changed, 39 insertions(+), 34 deletions(-)
+
+--- a/net/mac80211/rc80211_pid_algo.c
++++ b/net/mac80211/rc80211_pid_algo.c
+@@ -317,13 +317,44 @@ rate_control_pid_rate_init(void *priv, s
+                          struct ieee80211_sta *sta, void *priv_sta)
+ {
+       struct rc_pid_sta_info *spinfo = priv_sta;
++      struct rc_pid_info *pinfo = priv;
++      struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
+       struct sta_info *si;
++      int i, j, tmp;
++      bool s;
+       /* TODO: This routine should consider using RSSI from previous packets
+        * as we need to have IEEE 802.1X auth succeed immediately after assoc..
+        * Until that method is implemented, we will use the lowest supported
+        * rate as a workaround. */
++      /* Sort the rates. This is optimized for the most common case (i.e.
++       * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
++       * mapping too. */
++      for (i = 0; i < sband->n_bitrates; i++) {
++              rinfo[i].index = i;
++              rinfo[i].rev_index = i;
++              if (RC_PID_FAST_START)
++                      rinfo[i].diff = 0;
++              else
++                      rinfo[i].diff = i * pinfo->norm_offset;
++      }
++      for (i = 1; i < sband->n_bitrates; i++) {
++              s = 0;
++              for (j = 0; j < sband->n_bitrates - i; j++)
++                      if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
++                                   sband->bitrates[rinfo[j + 1].index].bitrate)) {
++                              tmp = rinfo[j].index;
++                              rinfo[j].index = rinfo[j + 1].index;
++                              rinfo[j + 1].index = tmp;
++                              rinfo[rinfo[j].index].rev_index = j;
++                              rinfo[rinfo[j + 1].index].rev_index = j + 1;
++                              s = 1;
++                      }
++              if (!s)
++                      break;
++      }
++
+       spinfo->txrate_idx = rate_lowest_index(sband, sta);
+       /* HACK */
+       si = container_of(sta, struct sta_info, sta);
+@@ -336,21 +367,22 @@ static void *rate_control_pid_alloc(stru
+       struct rc_pid_info *pinfo;
+       struct rc_pid_rateinfo *rinfo;
+       struct ieee80211_supported_band *sband;
+-      int i, j, tmp;
+-      bool s;
++      int i, max_rates = 0;
+ #ifdef CONFIG_MAC80211_DEBUGFS
+       struct rc_pid_debugfs_entries *de;
+ #endif
+-      sband = hw->wiphy->bands[hw->conf.channel->band];
+-
+       pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
+       if (!pinfo)
+               return NULL;
+-      /* We can safely assume that sband won't change unless we get
+-       * reinitialized. */
+-      rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC);
++      for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
++              sband = hw->wiphy->bands[i];
++              if (sband->n_bitrates > max_rates)
++                      max_rates = sband->n_bitrates;
++      }
++
++      rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
+       if (!rinfo) {
+               kfree(pinfo);
+               return NULL;
+@@ -368,33 +400,6 @@ static void *rate_control_pid_alloc(stru
+       pinfo->rinfo = rinfo;
+       pinfo->oldrate = 0;
+-      /* Sort the rates. This is optimized for the most common case (i.e.
+-       * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
+-       * mapping too. */
+-      for (i = 0; i < sband->n_bitrates; i++) {
+-              rinfo[i].index = i;
+-              rinfo[i].rev_index = i;
+-              if (RC_PID_FAST_START)
+-                      rinfo[i].diff = 0;
+-              else
+-                      rinfo[i].diff = i * pinfo->norm_offset;
+-      }
+-      for (i = 1; i < sband->n_bitrates; i++) {
+-              s = 0;
+-              for (j = 0; j < sband->n_bitrates - i; j++)
+-                      if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
+-                                   sband->bitrates[rinfo[j + 1].index].bitrate)) {
+-                              tmp = rinfo[j].index;
+-                              rinfo[j].index = rinfo[j + 1].index;
+-                              rinfo[j + 1].index = tmp;
+-                              rinfo[rinfo[j].index].rev_index = j;
+-                              rinfo[rinfo[j + 1].index].rev_index = j + 1;
+-                              s = 1;
+-                      }
+-              if (!s)
+-                      break;
+-      }
+-
+ #ifdef CONFIG_MAC80211_DEBUGFS
+       de = &pinfo->dentries;
+       de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
diff --git a/queue-2.6.29/mm-close-page_mkwrite-races.patch b/queue-2.6.29/mm-close-page_mkwrite-races.patch
new file mode 100644 (file)
index 0000000..3f7c926
--- /dev/null
@@ -0,0 +1,311 @@
+From b827e496c893de0c0f142abfaeb8730a2fd6b37f Mon Sep 17 00:00:00 2001
+From: Nick Piggin <npiggin@suse.de>
+Date: Thu, 30 Apr 2009 15:08:16 -0700
+Subject: mm: close page_mkwrite races
+
+From: Nick Piggin <npiggin@suse.de>
+
+commit b827e496c893de0c0f142abfaeb8730a2fd6b37f upstream.
+
+Change page_mkwrite to allow implementations to return with the page
+locked, and also change it's callers (in page fault paths) to hold the
+lock until the page is marked dirty.  This allows the filesystem to have
+full control of page dirtying events coming from the VM.
+
+Rather than simply hold the page locked over the page_mkwrite call, we
+call page_mkwrite with the page unlocked and allow callers to return with
+it locked, so filesystems can avoid LOR conditions with page lock.
+
+The problem with the current scheme is this: a filesystem that wants to
+associate some metadata with a page as long as the page is dirty, will
+perform this manipulation in its ->page_mkwrite.  It currently then must
+return with the page unlocked and may not hold any other locks (according
+to existing page_mkwrite convention).
+
+In this window, the VM could write out the page, clearing page-dirty.  The
+filesystem has no good way to detect that a dirty pte is about to be
+attached, so it will happily write out the page, at which point, the
+filesystem may manipulate the metadata to reflect that the page is no
+longer dirty.
+
+It is not always possible to perform the required metadata manipulation in
+->set_page_dirty, because that function cannot block or fail.  The
+filesystem may need to allocate some data structure, for example.
+
+And the VM cannot mark the pte dirty before page_mkwrite, because
+page_mkwrite is allowed to fail, so we must not allow any window where the
+page could be written to if page_mkwrite does fail.
+
+This solution of holding the page locked over the 3 critical operations
+(page_mkwrite, setting the pte dirty, and finally setting the page dirty)
+closes out races nicely, preventing page cleaning for writeout being
+initiated in that window.  This provides the filesystem with a strong
+synchronisation against the VM here.
+
+- Sage needs this race closed for ceph filesystem.
+- Trond for NFS (http://bugzilla.kernel.org/show_bug.cgi?id=12913).
+- I need it for fsblock.
+- I suspect other filesystems may need it too (eg. btrfs).
+- I have converted buffer.c to the new locking. Even simple block allocation
+  under dirty pages might be susceptible to i_size changing under partial page
+  at the end of file (we also have a buffer.c-side problem here, but it cannot
+  be fixed properly without this patch).
+- Other filesystems (eg. NFS, maybe btrfs) will need to change their
+  page_mkwrite functions themselves.
+
+[ This also moves page_mkwrite another step closer to fault, which should
+  eventually allow page_mkwrite to be moved into ->fault, and thus avoiding a
+  filesystem calldown and page lock/unlock cycle in __do_fault. ]
+
+[akpm@linux-foundation.org: fix derefs of NULL ->mapping]
+Cc: Sage Weil <sage@newdream.net>
+Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
+Signed-off-by: Nick Piggin <npiggin@suse.de>
+Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ Documentation/filesystems/Locking |   24 +++++---
+ fs/buffer.c                       |   10 ++-
+ mm/memory.c                       |  108 ++++++++++++++++++++++++++------------
+ 3 files changed, 98 insertions(+), 44 deletions(-)
+
+--- a/Documentation/filesystems/Locking
++++ b/Documentation/filesystems/Locking
+@@ -509,16 +509,24 @@ locking rules:
+               BKL     mmap_sem        PageLocked(page)
+ open:         no      yes
+ close:                no      yes
+-fault:                no      yes
+-page_mkwrite: no      yes             no
++fault:                no      yes             can return with page locked
++page_mkwrite: no      yes             can return with page locked
+ access:               no      yes
+-      ->page_mkwrite() is called when a previously read-only page is
+-about to become writeable. The file system is responsible for
+-protecting against truncate races. Once appropriate action has been
+-taking to lock out truncate, the page range should be verified to be
+-within i_size. The page mapping should also be checked that it is not
+-NULL.
++      ->fault() is called when a previously not present pte is about
++to be faulted in. The filesystem must find and return the page associated
++with the passed in "pgoff" in the vm_fault structure. If it is possible that
++the page may be truncated and/or invalidated, then the filesystem must lock
++the page, then ensure it is not already truncated (the page lock will block
++subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
++locked. The VM will unlock the page.
++
++      ->page_mkwrite() is called when a previously read-only pte is
++about to become writeable. The filesystem again must ensure that there are
++no truncate/invalidate races, and then return with the page locked. If
++the page has been truncated, the filesystem should not look up a new page
++like the ->fault() handler, but simply return with VM_FAULT_NOPAGE, which
++will cause the VM to retry the fault.
+       ->access() is called when get_user_pages() fails in
+ acces_process_vm(), typically used to debug a process through
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -2479,7 +2479,8 @@ block_page_mkwrite(struct vm_area_struct
+       if ((page->mapping != inode->i_mapping) ||
+           (page_offset(page) > size)) {
+               /* page got truncated out from underneath us */
+-              goto out_unlock;
++              unlock_page(page);
++              goto out;
+       }
+       /* page is wholly or partially inside EOF */
+@@ -2493,14 +2494,15 @@ block_page_mkwrite(struct vm_area_struct
+               ret = block_commit_write(page, 0, end);
+       if (unlikely(ret)) {
++              unlock_page(page);
+               if (ret == -ENOMEM)
+                       ret = VM_FAULT_OOM;
+               else /* -ENOSPC, -EIO, etc */
+                       ret = VM_FAULT_SIGBUS;
+-      }
++      } else
++              ret = VM_FAULT_LOCKED;
+-out_unlock:
+-      unlock_page(page);
++out:
+       return ret;
+ }
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1966,6 +1966,15 @@ static int do_wp_page(struct mm_struct *
+                               ret = tmp;
+                               goto unwritable_page;
+                       }
++                      if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
++                              lock_page(old_page);
++                              if (!old_page->mapping) {
++                                      ret = 0; /* retry the fault */
++                                      unlock_page(old_page);
++                                      goto unwritable_page;
++                              }
++                      } else
++                              VM_BUG_ON(!PageLocked(old_page));
+                       /*
+                        * Since we dropped the lock we need to revalidate
+@@ -1975,9 +1984,11 @@ static int do_wp_page(struct mm_struct *
+                        */
+                       page_table = pte_offset_map_lock(mm, pmd, address,
+                                                        &ptl);
+-                      page_cache_release(old_page);
+-                      if (!pte_same(*page_table, orig_pte))
++                      if (!pte_same(*page_table, orig_pte)) {
++                              unlock_page(old_page);
++                              page_cache_release(old_page);
+                               goto unlock;
++                      }
+                       page_mkwrite = 1;
+               }
+@@ -2089,9 +2100,6 @@ gotten:
+ unlock:
+       pte_unmap_unlock(page_table, ptl);
+       if (dirty_page) {
+-              if (vma->vm_file)
+-                      file_update_time(vma->vm_file);
+-
+               /*
+                * Yes, Virginia, this is actually required to prevent a race
+                * with clear_page_dirty_for_io() from clearing the page dirty
+@@ -2100,16 +2108,41 @@ unlock:
+                *
+                * do_no_page is protected similarly.
+                */
+-              wait_on_page_locked(dirty_page);
+-              set_page_dirty_balance(dirty_page, page_mkwrite);
++              if (!page_mkwrite) {
++                      wait_on_page_locked(dirty_page);
++                      set_page_dirty_balance(dirty_page, page_mkwrite);
++              }
+               put_page(dirty_page);
++              if (page_mkwrite) {
++                      struct address_space *mapping = dirty_page->mapping;
++
++                      set_page_dirty(dirty_page);
++                      unlock_page(dirty_page);
++                      page_cache_release(dirty_page);
++                      if (mapping)    {
++                              /*
++                               * Some device drivers do not set page.mapping
++                               * but still dirty their pages
++                               */
++                              balance_dirty_pages_ratelimited(mapping);
++                      }
++              }
++
++              /* file_update_time outside page_lock */
++              if (vma->vm_file)
++                      file_update_time(vma->vm_file);
+       }
+       return ret;
+ oom_free_new:
+       page_cache_release(new_page);
+ oom:
+-      if (old_page)
++      if (old_page) {
++              if (page_mkwrite) {
++                      unlock_page(old_page);
++                      page_cache_release(old_page);
++              }
+               page_cache_release(old_page);
++      }
+       return VM_FAULT_OOM;
+ unwritable_page:
+@@ -2661,27 +2694,22 @@ static int __do_fault(struct mm_struct *
+                               int tmp;
+                               unlock_page(page);
+-                              vmf.flags |= FAULT_FLAG_MKWRITE;
++                              vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+                               tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
+                               if (unlikely(tmp &
+                                         (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+                                       ret = tmp;
+-                                      anon = 1; /* no anon but release vmf.page */
+-                                      goto out_unlocked;
+-                              }
+-                              lock_page(page);
+-                              /*
+-                               * XXX: this is not quite right (racy vs
+-                               * invalidate) to unlock and relock the page
+-                               * like this, however a better fix requires
+-                               * reworking page_mkwrite locking API, which
+-                               * is better done later.
+-                               */
+-                              if (!page->mapping) {
+-                                      ret = 0;
+-                                      anon = 1; /* no anon but release vmf.page */
+-                                      goto out;
++                                      goto unwritable_page;
+                               }
++                              if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
++                                      lock_page(page);
++                                      if (!page->mapping) {
++                                              ret = 0; /* retry the fault */
++                                              unlock_page(page);
++                                              goto unwritable_page;
++                                      }
++                              } else
++                                      VM_BUG_ON(!PageLocked(page));
+                               page_mkwrite = 1;
+                       }
+               }
+@@ -2733,19 +2761,35 @@ static int __do_fault(struct mm_struct *
+       pte_unmap_unlock(page_table, ptl);
+ out:
+-      unlock_page(vmf.page);
+-out_unlocked:
+-      if (anon)
+-              page_cache_release(vmf.page);
+-      else if (dirty_page) {
+-              if (vma->vm_file)
+-                      file_update_time(vma->vm_file);
++      if (dirty_page) {
++              struct address_space *mapping = page->mapping;
+-              set_page_dirty_balance(dirty_page, page_mkwrite);
++              if (set_page_dirty(dirty_page))
++                      page_mkwrite = 1;
++              unlock_page(dirty_page);
+               put_page(dirty_page);
++              if (page_mkwrite && mapping) {
++                      /*
++                       * Some device drivers do not set page.mapping but still
++                       * dirty their pages
++                       */
++                      balance_dirty_pages_ratelimited(mapping);
++              }
++
++              /* file_update_time outside page_lock */
++              if (vma->vm_file)
++                      file_update_time(vma->vm_file);
++      } else {
++              unlock_page(vmf.page);
++              if (anon)
++                      page_cache_release(vmf.page);
+       }
+       return ret;
++
++unwritable_page:
++      page_cache_release(page);
++      return ret;
+ }
+ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
diff --git a/queue-2.6.29/mm-page_mkwrite-change-prototype-to-match-fault.patch b/queue-2.6.29/mm-page_mkwrite-change-prototype-to-match-fault.patch
new file mode 100644 (file)
index 0000000..5cf1f84
--- /dev/null
@@ -0,0 +1,387 @@
+From c2ec175c39f62949438354f603f4aa170846aabb Mon Sep 17 00:00:00 2001
+From: Nick Piggin <npiggin@suse.de>
+Date: Tue, 31 Mar 2009 15:23:21 -0700
+Subject: mm: page_mkwrite change prototype to match fault
+
+From: Nick Piggin <npiggin@suse.de>
+
+commit c2ec175c39f62949438354f603f4aa170846aabb upstream.
+
+Change the page_mkwrite prototype to take a struct vm_fault, and return
+VM_FAULT_xxx flags.  There should be no functional change.
+
+This makes it possible to return much more detailed error information to
+the VM (and also can provide more information eg.  virtual_address to the
+driver, which might be important in some special cases).
+
+This is required for a subsequent fix.  And will also make it easier to
+merge page_mkwrite() with fault() in future.
+
+Signed-off-by: Nick Piggin <npiggin@suse.de>
+Cc: Chris Mason <chris.mason@oracle.com>
+Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
+Cc: Miklos Szeredi <miklos@szeredi.hu>
+Cc: Steven Whitehouse <swhiteho@redhat.com>
+Cc: Mark Fasheh <mfasheh@suse.com>
+Cc: Joel Becker <joel.becker@oracle.com>
+Cc: Artem Bityutskiy <dedekind@infradead.org>
+Cc: Felix Blyakher <felixb@sgi.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ Documentation/filesystems/Locking |    2 +-
+ drivers/video/fb_defio.c          |    3 ++-
+ fs/btrfs/ctree.h                  |    2 +-
+ fs/btrfs/inode.c                  |    5 ++++-
+ fs/buffer.c                       |    6 +++++-
+ fs/ext4/ext4.h                    |    2 +-
+ fs/ext4/inode.c                   |    5 ++++-
+ fs/fuse/file.c                    |    3 ++-
+ fs/gfs2/ops_file.c                |    5 ++++-
+ fs/nfs/file.c                     |    5 ++++-
+ fs/ocfs2/mmap.c                   |    6 ++++--
+ fs/ubifs/file.c                   |    9 ++++++---
+ fs/xfs/linux-2.6/xfs_file.c       |    4 ++--
+ include/linux/buffer_head.h       |    2 +-
+ include/linux/mm.h                |    3 ++-
+ mm/memory.c                       |   26 ++++++++++++++++++++++----
+ 16 files changed, 65 insertions(+), 23 deletions(-)
+
+--- a/Documentation/filesystems/Locking
++++ b/Documentation/filesystems/Locking
+@@ -502,7 +502,7 @@ prototypes:
+       void (*open)(struct vm_area_struct*);
+       void (*close)(struct vm_area_struct*);
+       int (*fault)(struct vm_area_struct*, struct vm_fault *);
+-      int (*page_mkwrite)(struct vm_area_struct *, struct page *);
++      int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
+       int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
+ locking rules:
+--- a/drivers/video/fb_defio.c
++++ b/drivers/video/fb_defio.c
+@@ -85,8 +85,9 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
+ /* vm_ops->page_mkwrite handler */
+ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
+-                                struct page *page)
++                                struct vm_fault *vmf)
+ {
++      struct page *page = vmf->page;
+       struct fb_info *info = vma->vm_private_data;
+       struct fb_deferred_io *fbdefio = info->fbdefio;
+       struct page *cur;
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2060,7 +2060,7 @@ int btrfs_merge_bio_hook(struct page *pa
+ unsigned long btrfs_force_ra(struct address_space *mapping,
+                             struct file_ra_state *ra, struct file *file,
+                             pgoff_t offset, pgoff_t last_index);
+-int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page);
++int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int btrfs_readpage(struct file *file, struct page *page);
+ void btrfs_delete_inode(struct inode *inode);
+ void btrfs_put_inode(struct inode *inode);
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4292,8 +4292,9 @@ static void btrfs_invalidatepage(struct 
+  * beyond EOF, then the page is guaranteed safe against truncation until we
+  * unlock the page.
+  */
+-int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
++int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
++      struct page *page = vmf->page;
+       struct inode *inode = fdentry(vma->vm_file)->d_inode;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+@@ -4362,6 +4363,8 @@ again:
+ out_unlock:
+       unlock_page(page);
+ out:
++      if (ret)
++              ret = VM_FAULT_SIGBUS;
+       return ret;
+ }
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -2465,9 +2465,10 @@ int block_commit_write(struct page *page
+  * unlock the page.
+  */
+ int
+-block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
++block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+                  get_block_t get_block)
+ {
++      struct page *page = vmf->page;
+       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       unsigned long end;
+       loff_t size;
+@@ -2492,6 +2493,9 @@ block_page_mkwrite(struct vm_area_struct
+               ret = block_commit_write(page, 0, end);
+ out_unlock:
++      if (ret)
++              ret = VM_FAULT_SIGBUS;
++
+       unlock_page(page);
+       return ret;
+ }
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1097,7 +1097,7 @@ extern int ext4_meta_trans_blocks(struct
+ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
+ extern int ext4_block_truncate_page(handle_t *handle,
+               struct address_space *mapping, loff_t from);
+-extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
++extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
+ /* ioctl.c */
+ extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5116,8 +5116,9 @@ static int ext4_bh_unmapped(handle_t *ha
+       return !buffer_mapped(bh);
+ }
+-int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
++int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
++      struct page *page = vmf->page;
+       loff_t size;
+       unsigned long len;
+       int ret = -EINVAL;
+@@ -5169,6 +5170,8 @@ int ext4_page_mkwrite(struct vm_area_str
+               goto out_unlock;
+       ret = 0;
+ out_unlock:
++      if (ret)
++              ret = VM_FAULT_SIGBUS;
+       up_read(&inode->i_alloc_sem);
+       return ret;
+ }
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1234,8 +1234,9 @@ static void fuse_vma_close(struct vm_are
+  * - sync(2)
+  * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
+  */
+-static int fuse_page_mkwrite(struct vm_area_struct *vma, struct page *page)
++static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
++      struct page *page = vmf->page;
+       /*
+        * Don't use page->mapping as it may become NULL from a
+        * concurrent truncate.
+--- a/fs/gfs2/ops_file.c
++++ b/fs/gfs2/ops_file.c
+@@ -336,8 +336,9 @@ static int gfs2_allocate_page_backing(st
+  * blocks allocated on disk to back that page.
+  */
+-static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
++static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
++      struct page *page = vmf->page;
+       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_sbd *sdp = GFS2_SB(inode);
+@@ -409,6 +410,8 @@ out_unlock:
+       gfs2_glock_dq(&gh);
+ out:
+       gfs2_holder_uninit(&gh);
++      if (ret)
++              ret = VM_FAULT_SIGBUS;
+       return ret;
+ }
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -451,8 +451,9 @@ const struct address_space_operations nf
+       .launder_page = nfs_launder_page,
+ };
+-static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
++static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
++      struct page *page = vmf->page;
+       struct file *filp = vma->vm_file;
+       struct dentry *dentry = filp->f_path.dentry;
+       unsigned pagelen;
+@@ -483,6 +484,8 @@ static int nfs_vm_page_mkwrite(struct vm
+               ret = pagelen;
+ out_unlock:
+       unlock_page(page);
++      if (ret)
++              ret = VM_FAULT_SIGBUS;
+       return ret;
+ }
+--- a/fs/ocfs2/mmap.c
++++ b/fs/ocfs2/mmap.c
+@@ -154,8 +154,9 @@ out:
+       return ret;
+ }
+-static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
++static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
++      struct page *page = vmf->page;
+       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct buffer_head *di_bh = NULL;
+       sigset_t blocked, oldset;
+@@ -196,7 +197,8 @@ out:
+       ret2 = ocfs2_vm_op_unblock_sigs(&oldset);
+       if (ret2 < 0)
+               mlog_errno(ret2);
+-
++      if (ret)
++              ret = VM_FAULT_SIGBUS;
+       return ret;
+ }
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -1434,8 +1434,9 @@ static int ubifs_releasepage(struct page
+  * mmap()d file has taken write protection fault and is being made
+  * writable. UBIFS must ensure page is budgeted for.
+  */
+-static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
++static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
++      struct page *page = vmf->page;
+       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct ubifs_info *c = inode->i_sb->s_fs_info;
+       struct timespec now = ubifs_current_time(inode);
+@@ -1447,7 +1448,7 @@ static int ubifs_vm_page_mkwrite(struct 
+       ubifs_assert(!(inode->i_sb->s_flags & MS_RDONLY));
+       if (unlikely(c->ro_media))
+-              return -EROFS;
++              return VM_FAULT_SIGBUS; /* -EROFS */
+       /*
+        * We have not locked @page so far so we may budget for changing the
+@@ -1480,7 +1481,7 @@ static int ubifs_vm_page_mkwrite(struct 
+               if (err == -ENOSPC)
+                       ubifs_warn("out of space for mmapped file "
+                                  "(inode number %lu)", inode->i_ino);
+-              return err;
++              return VM_FAULT_SIGBUS;
+       }
+       lock_page(page);
+@@ -1520,6 +1521,8 @@ static int ubifs_vm_page_mkwrite(struct 
+ out_unlock:
+       unlock_page(page);
+       ubifs_release_budget(c, &req);
++      if (err)
++              err = VM_FAULT_SIGBUS;
+       return err;
+ }
+--- a/fs/xfs/linux-2.6/xfs_file.c
++++ b/fs/xfs/linux-2.6/xfs_file.c
+@@ -234,9 +234,9 @@ xfs_file_mmap(
+ STATIC int
+ xfs_vm_page_mkwrite(
+       struct vm_area_struct   *vma,
+-      struct page             *page)
++      struct vm_fault         *vmf)
+ {
+-      return block_page_mkwrite(vma, page, xfs_get_blocks);
++      return block_page_mkwrite(vma, vmf, xfs_get_blocks);
+ }
+ const struct file_operations xfs_file_operations = {
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -223,7 +223,7 @@ int cont_write_begin(struct file *, stru
+                       get_block_t *, loff_t *);
+ int generic_cont_expand_simple(struct inode *inode, loff_t size);
+ int block_commit_write(struct page *page, unsigned from, unsigned to);
+-int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
++int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+                               get_block_t get_block);
+ void block_sync_page(struct page *);
+ sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -145,6 +145,7 @@ extern pgprot_t protection_map[16];
+ #define FAULT_FLAG_WRITE      0x01    /* Fault was a write access */
+ #define FAULT_FLAG_NONLINEAR  0x02    /* Fault was via a nonlinear mapping */
++#define FAULT_FLAG_MKWRITE    0x04    /* Fault was mkwrite of existing pte */
+ /*
+  * This interface is used by x86 PAT code to identify a pfn mapping that is
+@@ -197,7 +198,7 @@ struct vm_operations_struct {
+       /* notification that a previously read-only page is about to become
+        * writable, if an error is returned it will cause a SIGBUS */
+-      int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
++      int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+       /* called by access_process_vm when get_user_pages() fails, typically
+        * for use by special VMAs that can switch between memory and hardware
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1940,6 +1940,15 @@ static int do_wp_page(struct mm_struct *
+                * get_user_pages(.write=1, .force=1).
+                */
+               if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
++                      struct vm_fault vmf;
++                      int tmp;
++
++                      vmf.virtual_address = (void __user *)(address &
++                                                              PAGE_MASK);
++                      vmf.pgoff = old_page->index;
++                      vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
++                      vmf.page = old_page;
++
+                       /*
+                        * Notify the address space that the page is about to
+                        * become writable so that it can prohibit this or wait
+@@ -1951,8 +1960,12 @@ static int do_wp_page(struct mm_struct *
+                       page_cache_get(old_page);
+                       pte_unmap_unlock(page_table, ptl);
+-                      if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
++                      tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
++                      if (unlikely(tmp &
++                                      (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
++                              ret = tmp;
+                               goto unwritable_page;
++                      }
+                       /*
+                        * Since we dropped the lock we need to revalidate
+@@ -2101,7 +2114,7 @@ oom:
+ unwritable_page:
+       page_cache_release(old_page);
+-      return VM_FAULT_SIGBUS;
++      return ret;
+ }
+ /*
+@@ -2645,9 +2658,14 @@ static int __do_fault(struct mm_struct *
+                        * to become writable
+                        */
+                       if (vma->vm_ops->page_mkwrite) {
++                              int tmp;
++
+                               unlock_page(page);
+-                              if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
+-                                      ret = VM_FAULT_SIGBUS;
++                              vmf.flags |= FAULT_FLAG_MKWRITE;
++                              tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
++                              if (unlikely(tmp &
++                                        (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
++                                      ret = tmp;
+                                       anon = 1; /* no anon but release vmf.page */
+                                       goto out_unlocked;
+                               }
diff --git a/queue-2.6.29/netlabel-add-cipso-set-del-attr-request_sock-functions.patch b/queue-2.6.29/netlabel-add-cipso-set-del-attr-request_sock-functions.patch
new file mode 100644 (file)
index 0000000..66ea0c8
--- /dev/null
@@ -0,0 +1,210 @@
+From linux-security-module-owner@vger.kernel.org  Tue May 12 14:02:42 2009
+From: Paul Moore <paul.moore@hp.com>
+Date: Fri, 08 May 2009 17:58:36 -0400
+Subject: netlabel: Add CIPSO {set, del}attr request_sock functions
+To: linux-security-module@vger.kernel.org
+Cc: stable@kernel.org
+Message-ID: <20090508215836.12179.12931.stgit@flek.lan>
+
+From: Paul Moore <paul.moore@hp.com>
+
+[NOTE: based on 389fb800ac8be2832efedd19978a2b8ced37eb61]
+
+Add the cipso_v4_req_setattr() and cipso_v4_req_delattr() functions to set and
+delete the CIPSO security attributes on a request_sock used during a incoming
+connection request.
+
+Signed-off-by: Paul Moore <paul.moore@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+ include/net/cipso_ipv4.h |   17 ++++++
+ net/ipv4/cipso_ipv4.c    |  130 +++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 147 insertions(+)
+
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -40,6 +40,7 @@
+ #include <linux/net.h>
+ #include <linux/skbuff.h>
+ #include <net/netlabel.h>
++#include <net/request_sock.h>
+ #include <asm/atomic.h>
+ /* known doi values */
+@@ -215,6 +216,10 @@ int cipso_v4_sock_setattr(struct sock *s
+                         const struct netlbl_lsm_secattr *secattr);
+ void cipso_v4_sock_delattr(struct sock *sk);
+ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr);
++int cipso_v4_req_setattr(struct request_sock *req,
++                       const struct cipso_v4_doi *doi_def,
++                       const struct netlbl_lsm_secattr *secattr);
++void cipso_v4_req_delattr(struct request_sock *req);
+ int cipso_v4_skbuff_setattr(struct sk_buff *skb,
+                           const struct cipso_v4_doi *doi_def,
+                           const struct netlbl_lsm_secattr *secattr);
+@@ -247,6 +252,18 @@ static inline int cipso_v4_sock_getattr(
+       return -ENOSYS;
+ }
++static inline int cipso_v4_req_setattr(struct request_sock *req,
++                                     const struct cipso_v4_doi *doi_def,
++                                     const struct netlbl_lsm_secattr *secattr)
++{
++      return -ENOSYS;
++}
++
++static inline void cipso_v4_req_delattr(struct request_sock *req)
++{
++      return;
++}
++
+ static inline int cipso_v4_skbuff_setattr(struct sk_buff *skb,
+                                     const struct cipso_v4_doi *doi_def,
+                                     const struct netlbl_lsm_secattr *secattr)
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1942,6 +1942,72 @@ socket_setattr_failure:
+ }
+ /**
++ * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket
++ * @req: the connection request socket
++ * @doi_def: the CIPSO DOI to use
++ * @secattr: the specific security attributes of the socket
++ *
++ * Description:
++ * Set the CIPSO option on the given socket using the DOI definition and
++ * security attributes passed to the function.  Returns zero on success and
++ * negative values on failure.
++ *
++ */
++int cipso_v4_req_setattr(struct request_sock *req,
++                       const struct cipso_v4_doi *doi_def,
++                       const struct netlbl_lsm_secattr *secattr)
++{
++      int ret_val = -EPERM;
++      unsigned char *buf = NULL;
++      u32 buf_len;
++      u32 opt_len;
++      struct ip_options *opt = NULL;
++      struct inet_request_sock *req_inet;
++
++      /* We allocate the maximum CIPSO option size here so we are probably
++       * being a little wasteful, but it makes our life _much_ easier later
++       * on and after all we are only talking about 40 bytes. */
++      buf_len = CIPSO_V4_OPT_LEN_MAX;
++      buf = kmalloc(buf_len, GFP_ATOMIC);
++      if (buf == NULL) {
++              ret_val = -ENOMEM;
++              goto req_setattr_failure;
++      }
++
++      ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
++      if (ret_val < 0)
++              goto req_setattr_failure;
++      buf_len = ret_val;
++
++      /* We can't use ip_options_get() directly because it makes a call to
++       * ip_options_get_alloc() which allocates memory with GFP_KERNEL and
++       * we won't always have CAP_NET_RAW even though we _always_ want to
++       * set the IPOPT_CIPSO option. */
++      opt_len = (buf_len + 3) & ~3;
++      opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
++      if (opt == NULL) {
++              ret_val = -ENOMEM;
++              goto req_setattr_failure;
++      }
++      memcpy(opt->__data, buf, buf_len);
++      opt->optlen = opt_len;
++      opt->cipso = sizeof(struct iphdr);
++      kfree(buf);
++      buf = NULL;
++
++      req_inet = inet_rsk(req);
++      opt = xchg(&req_inet->opt, opt);
++      kfree(opt);
++
++      return 0;
++
++req_setattr_failure:
++      kfree(buf);
++      kfree(opt);
++      return ret_val;
++}
++
++/**
+  * cipso_v4_sock_delattr - Delete the CIPSO option from a socket
+  * @sk: the socket
+  *
+@@ -2016,6 +2082,70 @@ void cipso_v4_sock_delattr(struct sock *
+ }
+ /**
++ * cipso_v4_req_delattr - Delete the CIPSO option from a request socket
++ * @reg: the request socket
++ *
++ * Description:
++ * Removes the CIPSO option from a request socket, if present.
++ *
++ */
++void cipso_v4_req_delattr(struct request_sock *req)
++{
++      struct ip_options *opt;
++      struct inet_request_sock *req_inet;
++
++      req_inet = inet_rsk(req);
++      opt = req_inet->opt;
++      if (opt == NULL || opt->cipso == 0)
++              return;
++
++      if (opt->srr || opt->rr || opt->ts || opt->router_alert) {
++              u8 cipso_len;
++              u8 cipso_off;
++              unsigned char *cipso_ptr;
++              int iter;
++              int optlen_new;
++
++              cipso_off = opt->cipso - sizeof(struct iphdr);
++              cipso_ptr = &opt->__data[cipso_off];
++              cipso_len = cipso_ptr[1];
++
++              if (opt->srr > opt->cipso)
++                      opt->srr -= cipso_len;
++              if (opt->rr > opt->cipso)
++                      opt->rr -= cipso_len;
++              if (opt->ts > opt->cipso)
++                      opt->ts -= cipso_len;
++              if (opt->router_alert > opt->cipso)
++                      opt->router_alert -= cipso_len;
++              opt->cipso = 0;
++
++              memmove(cipso_ptr, cipso_ptr + cipso_len,
++                      opt->optlen - cipso_off - cipso_len);
++
++              /* determining the new total option length is tricky because of
++               * the padding necessary, the only thing i can think to do at
++               * this point is walk the options one-by-one, skipping the
++               * padding at the end to determine the actual option size and
++               * from there we can determine the new total option length */
++              iter = 0;
++              optlen_new = 0;
++              while (iter < opt->optlen)
++                      if (opt->__data[iter] != IPOPT_NOP) {
++                              iter += opt->__data[iter + 1];
++                              optlen_new = iter;
++                      } else
++                              iter++;
++              opt->optlen = (optlen_new + 3) & ~3;
++      } else {
++              /* only the cipso option was present on the socket so we can
++               * remove the entire option struct */
++              req_inet->opt = NULL;
++              kfree(opt);
++      }
++}
++
++/**
+  * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions
+  * @cipso: the CIPSO v4 option
+  * @secattr: the security attributes
diff --git a/queue-2.6.29/netlabel-add-new-netlabel-kapi-interfaces-for-request_sock-security-attributes.patch b/queue-2.6.29/netlabel-add-new-netlabel-kapi-interfaces-for-request_sock-security-attributes.patch
new file mode 100644 (file)
index 0000000..bc3e7ff
--- /dev/null
@@ -0,0 +1,163 @@
+From linux-security-module-owner@vger.kernel.org  Tue May 12 14:02:57 2009
+From: Paul Moore <paul.moore@hp.com>
+Date: Fri, 08 May 2009 17:58:43 -0400
+Subject: netlabel: Add new NetLabel KAPI interfaces for request_sock security attributes
+To: linux-security-module@vger.kernel.org
+Cc: stable@kernel.org
+Message-ID: <20090508215843.12179.60635.stgit@flek.lan>
+
+From: Paul Moore <paul.moore@hp.com>
+
+[NOTE: based on 389fb800ac8be2832efedd19978a2b8ced37eb61 and
+                07feee8f812f7327a46186f7604df312c8c81962]
+
+This patch adds the netlbl_req_setattr() and netlbl_req_delattr() functions
+which can be used by LSMs to set and remove the NetLabel security attributes
+from request_sock objects used in incoming connection requests.
+
+Signed-off-by: Paul Moore <paul.moore@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+ include/net/netlabel.h       |   15 +++++++
+ net/netlabel/netlabel_kapi.c |   84 +++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 98 insertions(+), 1 deletion(-)
+
+--- a/include/net/netlabel.h
++++ b/include/net/netlabel.h
+@@ -36,6 +36,7 @@
+ #include <linux/in.h>
+ #include <linux/in6.h>
+ #include <net/netlink.h>
++#include <net/request_sock.h>
+ #include <asm/atomic.h>
+ struct cipso_v4_doi;
+@@ -413,6 +414,9 @@ int netlbl_sock_getattr(struct sock *sk,
+ int netlbl_conn_setattr(struct sock *sk,
+                       struct sockaddr *addr,
+                       const struct netlbl_lsm_secattr *secattr);
++int netlbl_req_setattr(struct request_sock *req,
++                     const struct netlbl_lsm_secattr *secattr);
++void netlbl_req_delattr(struct request_sock *req);
+ int netlbl_skbuff_setattr(struct sk_buff *skb,
+                         u16 family,
+                         const struct netlbl_lsm_secattr *secattr);
+@@ -519,7 +523,7 @@ static inline int netlbl_enabled(void)
+       return 0;
+ }
+ static inline int netlbl_sock_setattr(struct sock *sk,
+-                                   const struct netlbl_lsm_secattr *secattr)
++                                    const struct netlbl_lsm_secattr *secattr)
+ {
+       return -ENOSYS;
+ }
+@@ -537,6 +541,15 @@ static inline int netlbl_conn_setattr(st
+ {
+       return -ENOSYS;
+ }
++static inline int netlbl_req_setattr(struct request_sock *req,
++                                   const struct netlbl_lsm_secattr *secattr)
++{
++      return -ENOSYS;
++}
++static inline void netlbl_req_delattr(struct request_sock *req)
++{
++      return;
++}
+ static inline int netlbl_skbuff_setattr(struct sk_buff *skb,
+                                     u16 family,
+                                     const struct netlbl_lsm_secattr *secattr)
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -757,6 +757,90 @@ conn_setattr_return:
+ }
+ /**
++ * netlbl_req_setattr - Label a request socket using the correct protocol
++ * @req: the request socket to label
++ * @secattr: the security attributes
++ *
++ * Description:
++ * Attach the correct label to the given socket using the security attributes
++ * specified in @secattr.  Returns zero on success, negative values on failure.
++ *
++ */
++int netlbl_req_setattr(struct request_sock *req,
++                     const struct netlbl_lsm_secattr *secattr)
++{
++      int ret_val;
++      struct netlbl_dom_map *dom_entry;
++      struct netlbl_domaddr4_map *af4_entry;
++      u32 proto_type;
++      struct cipso_v4_doi *proto_cv4;
++
++      rcu_read_lock();
++      dom_entry = netlbl_domhsh_getentry(secattr->domain);
++      if (dom_entry == NULL) {
++              ret_val = -ENOENT;
++              goto req_setattr_return;
++      }
++      switch (req->rsk_ops->family) {
++      case AF_INET:
++              if (dom_entry->type == NETLBL_NLTYPE_ADDRSELECT) {
++                      struct inet_request_sock *req_inet = inet_rsk(req);
++                      af4_entry = netlbl_domhsh_getentry_af4(secattr->domain,
++                                                          req_inet->rmt_addr);
++                      if (af4_entry == NULL) {
++                              ret_val = -ENOENT;
++                              goto req_setattr_return;
++                      }
++                      proto_type = af4_entry->type;
++                      proto_cv4 = af4_entry->type_def.cipsov4;
++              } else {
++                      proto_type = dom_entry->type;
++                      proto_cv4 = dom_entry->type_def.cipsov4;
++              }
++              switch (proto_type) {
++              case NETLBL_NLTYPE_CIPSOV4:
++                      ret_val = cipso_v4_req_setattr(req, proto_cv4, secattr);
++                      break;
++              case NETLBL_NLTYPE_UNLABELED:
++                      /* just delete the protocols we support for right now
++                       * but we could remove other protocols if needed */
++                      cipso_v4_req_delattr(req);
++                      ret_val = 0;
++                      break;
++              default:
++                      ret_val = -ENOENT;
++              }
++              break;
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++      case AF_INET6:
++              /* since we don't support any IPv6 labeling protocols right
++               * now we can optimize everything away until we do */
++              ret_val = 0;
++              break;
++#endif /* IPv6 */
++      default:
++              ret_val = -EPROTONOSUPPORT;
++      }
++
++req_setattr_return:
++      rcu_read_unlock();
++      return ret_val;
++}
++
++/**
++* netlbl_req_delattr - Delete all the NetLabel labels on a socket
++* @req: the socket
++*
++* Description:
++* Remove all the NetLabel labeling from @req.
++*
++*/
++void netlbl_req_delattr(struct request_sock *req)
++{
++      cipso_v4_req_delattr(req);
++}
++
++/**
+  * netlbl_skbuff_setattr - Label a packet using the correct protocol
+  * @skb: the packet
+  * @family: protocol family
diff --git a/queue-2.6.29/nfs-close-page_mkwrite-races.patch b/queue-2.6.29/nfs-close-page_mkwrite-races.patch
new file mode 100644 (file)
index 0000000..627d69e
--- /dev/null
@@ -0,0 +1,38 @@
+From 7fdf523067666b0eaff330f362401ee50ce187c4 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Fri, 24 Apr 2009 17:32:22 -0400
+Subject: NFS: Close page_mkwrite() races
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 7fdf523067666b0eaff330f362401ee50ce187c4 upstream.
+
+Follow up to Nick Piggin's patches to ensure that nfs_vm_page_mkwrite
+returns with the page lock held, and sets the VM_FAULT_LOCKED flag.
+
+See http://bugzilla.kernel.org/show_bug.cgi?id=12913
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/file.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -481,10 +481,10 @@ static int nfs_vm_page_mkwrite(struct vm
+       ret = nfs_updatepage(filp, page, 0, pagelen);
+ out_unlock:
++      if (!ret)
++              return VM_FAULT_LOCKED;
+       unlock_page(page);
+-      if (ret)
+-              ret = VM_FAULT_SIGBUS;
+-      return ret;
++      return VM_FAULT_SIGBUS;
+ }
+ static struct vm_operations_struct nfs_file_vm_ops = {
diff --git a/queue-2.6.29/nfs-fix-the-return-value-in-nfs_page_mkwrite.patch b/queue-2.6.29/nfs-fix-the-return-value-in-nfs_page_mkwrite.patch
new file mode 100644 (file)
index 0000000..a621be8
--- /dev/null
@@ -0,0 +1,32 @@
+From 2b2ec7554cf7ec5e4412f89a5af6abe8ce950700 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Tue, 7 Apr 2009 14:02:53 -0700
+Subject: NFS: Fix the return value in nfs_page_mkwrite()
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 2b2ec7554cf7ec5e4412f89a5af6abe8ce950700 upstream.
+
+Commit c2ec175c39f62949438354f603f4aa170846aabb ("mm: page_mkwrite
+change prototype to match fault") exposed a bug in the NFS
+implementation of page_mkwrite.  We should be returning 0 on success...
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/file.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -480,8 +480,6 @@ static int nfs_vm_page_mkwrite(struct vm
+               goto out_unlock;
+       ret = nfs_updatepage(filp, page, 0, pagelen);
+-      if (ret == 0)
+-              ret = pagelen;
+ out_unlock:
+       unlock_page(page);
+       if (ret)
diff --git a/queue-2.6.29/selinux-add-new-netlabel-glue-code-to-handle-labeling-of-connection-requests.patch b/queue-2.6.29/selinux-add-new-netlabel-glue-code-to-handle-labeling-of-connection-requests.patch
new file mode 100644 (file)
index 0000000..77a3399
--- /dev/null
@@ -0,0 +1,232 @@
+From stable-bounces@linux.kernel.org  Tue May 12 14:03:14 2009
+From: Paul Moore <paul.moore@hp.com>
+Date: Fri, 08 May 2009 17:58:49 -0400
+Subject: selinux: Add new NetLabel glue code to handle labeling of connection requests
+To: linux-security-module@vger.kernel.org
+Cc: stable@kernel.org
+Message-ID: <20090508215849.12179.59059.stgit@flek.lan>
+
+From: Paul Moore <paul.moore@hp.com>
+
+[NOTE: based on 389fb800ac8be2832efedd19978a2b8ced37eb61]
+
+This patch provides the missing functions to properly handle the labeling of
+responses to incoming connection requests within SELinux.
+
+Signed-off-by: Paul Moore <paul.moore@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+ security/selinux/hooks.c            |    4 -
+ security/selinux/include/netlabel.h |   17 +++--
+ security/selinux/netlabel.c         |  118 ++++++++++++++++++------------------
+ 3 files changed, 72 insertions(+), 67 deletions(-)
+
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -3799,7 +3799,7 @@ static int selinux_socket_post_create(st
+               sksec = sock->sk->sk_security;
+               sksec->sid = isec->sid;
+               sksec->sclass = isec->sclass;
+-              err = selinux_netlbl_socket_post_create(sock);
++              err = selinux_netlbl_socket_post_create(sock->sk, family);
+       }
+       return err;
+@@ -4467,8 +4467,6 @@ static void selinux_inet_conn_establishe
+               family = PF_INET;
+       selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
+-
+-      selinux_netlbl_inet_conn_established(sk, family);
+ }
+ static void selinux_req_classify_flow(const struct request_sock *req,
+--- a/security/selinux/include/netlabel.h
++++ b/security/selinux/include/netlabel.h
+@@ -32,6 +32,7 @@
+ #include <linux/net.h>
+ #include <linux/skbuff.h>
+ #include <net/sock.h>
++#include <net/request_sock.h>
+ #include "avc.h"
+ #include "objsec.h"
+@@ -53,8 +54,9 @@ int selinux_netlbl_skbuff_setsid(struct 
+                                u16 family,
+                                u32 sid);
+-void selinux_netlbl_inet_conn_established(struct sock *sk, u16 family);
+-int selinux_netlbl_socket_post_create(struct socket *sock);
++int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family);
++void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family);
++int selinux_netlbl_socket_post_create(struct sock *sk, u16 family);
+ int selinux_netlbl_inode_permission(struct inode *inode, int mask);
+ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
+                               struct sk_buff *skb,
+@@ -113,12 +115,17 @@ static inline int selinux_netlbl_conn_se
+       return 0;
+ }
+-static inline void selinux_netlbl_inet_conn_established(struct sock *sk,
+-                                                      u16 family)
++static inline int selinux_netlbl_inet_conn_request(struct request_sock *req,
++                                                 u16 family)
++{
++      return 0;
++}
++static inline void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family)
+ {
+       return;
+ }
+-static inline int selinux_netlbl_socket_post_create(struct socket *sock)
++static inline int selinux_netlbl_socket_post_create(struct sock *sk,
++                                                  u16 family)
+ {
+       return 0;
+ }
+--- a/security/selinux/netlabel.c
++++ b/security/selinux/netlabel.c
+@@ -281,73 +281,52 @@ skbuff_setsid_return:
+ }
+ /**
+- * selinux_netlbl_inet_conn_established - Netlabel the newly accepted connection
+- * @sk: the new connection
++ * selinux_netlbl_inet_conn_request - Label an incoming stream connection
++ * @req: incoming connection request socket
+  *
+  * Description:
+- * A new connection has been established on @sk so make sure it is labeled
+- * correctly with the NetLabel susbsystem.
++ * A new incoming connection request is represented by @req, we need to label
++ * the new request_sock here and the stack will ensure the on-the-wire label
++ * will get preserved when a full sock is created once the connection handshake
++ * is complete.  Returns zero on success, negative values on failure.
+  *
+  */
+-void selinux_netlbl_inet_conn_established(struct sock *sk, u16 family)
++int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family)
+ {
+       int rc;
+-      struct sk_security_struct *sksec = sk->sk_security;
+-      struct netlbl_lsm_secattr *secattr;
+-      struct inet_sock *sk_inet = inet_sk(sk);
+-      struct sockaddr_in addr;
++      struct netlbl_lsm_secattr secattr;
+-      if (sksec->nlbl_state != NLBL_REQUIRE)
+-              return;
++      if (family != PF_INET)
++              return 0;
+-      secattr = selinux_netlbl_sock_genattr(sk);
+-      if (secattr == NULL)
+-              return;
++      netlbl_secattr_init(&secattr);
++      rc = security_netlbl_sid_to_secattr(req->secid, &secattr);
++      if (rc != 0)
++              goto inet_conn_request_return;
++      rc = netlbl_req_setattr(req, &secattr);
++inet_conn_request_return:
++      netlbl_secattr_destroy(&secattr);
++      return rc;
++}
+-      rc = netlbl_sock_setattr(sk, secattr);
+-      switch (rc) {
+-      case 0:
++/**
++ * selinux_netlbl_inet_csk_clone - Initialize the newly created sock
++ * @sk: the new sock
++ *
++ * Description:
++ * A new connection has been established using @sk, we've already labeled the
++ * socket via the request_sock struct in selinux_netlbl_inet_conn_request() but
++ * we need to set the NetLabel state here since we now have a sock structure.
++ *
++ */
++void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family)
++{
++      struct sk_security_struct *sksec = sk->sk_security;
++
++      if (family == PF_INET)
+               sksec->nlbl_state = NLBL_LABELED;
+-              break;
+-      case -EDESTADDRREQ:
+-              /* no PF_INET6 support yet because we don't support any IPv6
+-               * labeling protocols */
+-              if (family != PF_INET) {
+-                      sksec->nlbl_state = NLBL_UNSET;
+-                      return;
+-              }
+-
+-              addr.sin_family = family;
+-              addr.sin_addr.s_addr = sk_inet->daddr;
+-              if (netlbl_conn_setattr(sk, (struct sockaddr *)&addr,
+-                                      secattr) != 0) {
+-                      /* we failed to label the connected socket (could be
+-                       * for a variety of reasons, the actual "why" isn't
+-                       * important here) so we have to go to our backup plan,
+-                       * labeling the packets individually in the netfilter
+-                       * local output hook.  this is okay but we need to
+-                       * adjust the MSS of the connection to take into
+-                       * account any labeling overhead, since we don't know
+-                       * the exact overhead at this point we'll use the worst
+-                       * case value which is 40 bytes for IPv4 */
+-                      struct inet_connection_sock *sk_conn = inet_csk(sk);
+-                      sk_conn->icsk_ext_hdr_len += 40 -
+-                                    (sk_inet->opt ? sk_inet->opt->optlen : 0);
+-                      sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
+-
+-                      sksec->nlbl_state = NLBL_REQSKB;
+-              } else
+-                      sksec->nlbl_state = NLBL_CONNLABELED;
+-              break;
+-      default:
+-              /* note that we are failing to label the socket which could be
+-               * a bad thing since it means traffic could leave the system
+-               * without the desired labeling, however, all is not lost as
+-               * we have a check in selinux_netlbl_inode_permission() to
+-               * pick up the pieces that we might drop here because we can't
+-               * return an error code */
+-              break;
+-      }
++      else
++              sksec->nlbl_state = NLBL_UNSET;
+ }
+ /**
+@@ -359,9 +338,30 @@ void selinux_netlbl_inet_conn_establishe
+  * SID.  Returns zero values on success, negative values on failure.
+  *
+  */
+-int selinux_netlbl_socket_post_create(struct socket *sock)
++int selinux_netlbl_socket_post_create(struct sock *sk, u16 family)
+ {
+-      return selinux_netlbl_sock_setsid(sock->sk);
++      int rc;
++      struct sk_security_struct *sksec = sk->sk_security;
++      struct netlbl_lsm_secattr *secattr;
++
++      if (family != PF_INET)
++              return 0;
++
++      secattr = selinux_netlbl_sock_genattr(sk);
++      if (secattr == NULL)
++              return -ENOMEM;
++      rc = netlbl_sock_setattr(sk, secattr);
++      switch (rc) {
++      case 0:
++              sksec->nlbl_state = NLBL_LABELED;
++              break;
++      case -EDESTADDRREQ:
++              sksec->nlbl_state = NLBL_REQSKB;
++              rc = 0;
++              break;
++      }
++
++      return rc;
+ }
+ /**
diff --git a/queue-2.6.29/selinux-remove-dead-code-labeled-networking-code.patch b/queue-2.6.29/selinux-remove-dead-code-labeled-networking-code.patch
new file mode 100644 (file)
index 0000000..8969d32
--- /dev/null
@@ -0,0 +1,114 @@
+From linux-security-module-owner@vger.kernel.org  Tue May 12 14:04:00 2009
+From: Paul Moore <paul.moore@hp.com>
+Date: Fri, 08 May 2009 17:59:02 -0400
+Subject: selinux: Remove dead code labeled networking code
+To: linux-security-module@vger.kernel.org
+Cc: stable@kernel.org
+Message-ID: <20090508215902.12179.5594.stgit@flek.lan>
+
+From: Paul Moore <paul.moore@hp.com>
+
+[NOTE: based on 389fb800ac8be2832efedd19978a2b8ced37eb61]
+
+Remove code that is no longer needed by NetLabel and/or SELinux.
+
+Signed-off-by: Paul Moore <paul.moore@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+ security/selinux/netlabel.c |   77 --------------------------------------------
+ 1 file changed, 77 deletions(-)
+
+--- a/security/selinux/netlabel.c
++++ b/security/selinux/netlabel.c
+@@ -100,41 +100,6 @@ static struct netlbl_lsm_secattr *selinu
+ }
+ /**
+- * selinux_netlbl_sock_setsid - Label a socket using the NetLabel mechanism
+- * @sk: the socket to label
+- *
+- * Description:
+- * Attempt to label a socket using the NetLabel mechanism.  Returns zero values
+- * on success, negative values on failure.
+- *
+- */
+-static int selinux_netlbl_sock_setsid(struct sock *sk)
+-{
+-      int rc;
+-      struct sk_security_struct *sksec = sk->sk_security;
+-      struct netlbl_lsm_secattr *secattr;
+-
+-      if (sksec->nlbl_state != NLBL_REQUIRE)
+-              return 0;
+-
+-      secattr = selinux_netlbl_sock_genattr(sk);
+-      if (secattr == NULL)
+-              return -ENOMEM;
+-      rc = netlbl_sock_setattr(sk, secattr);
+-      switch (rc) {
+-      case 0:
+-              sksec->nlbl_state = NLBL_LABELED;
+-              break;
+-      case -EDESTADDRREQ:
+-              sksec->nlbl_state = NLBL_REQSKB;
+-              rc = 0;
+-              break;
+-      }
+-
+-      return rc;
+-}
+-
+-/**
+  * selinux_netlbl_cache_invalidate - Invalidate the NetLabel cache
+  *
+  * Description:
+@@ -361,48 +326,6 @@ int selinux_netlbl_socket_post_create(st
+ }
+ /**
+- * selinux_netlbl_inode_permission - Verify the socket is NetLabel labeled
+- * @inode: the file descriptor's inode
+- * @mask: the permission mask
+- *
+- * Description:
+- * Looks at a file's inode and if it is marked as a socket protected by
+- * NetLabel then verify that the socket has been labeled, if not try to label
+- * the socket now with the inode's SID.  Returns zero on success, negative
+- * values on failure.
+- *
+- */
+-int selinux_netlbl_inode_permission(struct inode *inode, int mask)
+-{
+-      int rc;
+-      struct sock *sk;
+-      struct socket *sock;
+-      struct sk_security_struct *sksec;
+-
+-      if (!S_ISSOCK(inode->i_mode) ||
+-          ((mask & (MAY_WRITE | MAY_APPEND)) == 0))
+-              return 0;
+-      sock = SOCKET_I(inode);
+-      sk = sock->sk;
+-      if (sk == NULL)
+-              return 0;
+-      sksec = sk->sk_security;
+-      if (sksec == NULL || sksec->nlbl_state != NLBL_REQUIRE)
+-              return 0;
+-
+-      local_bh_disable();
+-      bh_lock_sock_nested(sk);
+-      if (likely(sksec->nlbl_state == NLBL_REQUIRE))
+-              rc = selinux_netlbl_sock_setsid(sk);
+-      else
+-              rc = 0;
+-      bh_unlock_sock(sk);
+-      local_bh_enable();
+-
+-      return rc;
+-}
+-
+-/**
+  * selinux_netlbl_sock_rcv_skb - Do an inbound access check using NetLabel
+  * @sksec: the sock's sk_security_struct
+  * @skb: the packet
diff --git a/queue-2.6.29/selinux-set-the-proper-netlabel-security-attributes-for-connection-requests.patch b/queue-2.6.29/selinux-set-the-proper-netlabel-security-attributes-for-connection-requests.patch
new file mode 100644 (file)
index 0000000..727fe89
--- /dev/null
@@ -0,0 +1,172 @@
+From stable-bounces@linux.kernel.org  Tue May 12 14:03:40 2009
+From: Paul Moore <paul.moore@hp.com>
+Date: Fri, 08 May 2009 17:58:56 -0400
+Subject: selinux: Set the proper NetLabel security attributes for connection requests
+To: linux-security-module@vger.kernel.org
+Cc: stable@kernel.org
+Message-ID: <20090508215855.12179.53798.stgit@flek.lan>
+
+From: Paul Moore <paul.moore@hp.com>
+
+[NOTE: based on 389fb800ac8be2832efedd19978a2b8ced37eb61]
+
+This patch ensures the correct labeling of incoming connection requests
+responses via NetLabel by enabling the recent changes to NetLabel and the
+SELinux/Netlabel glue code.
+
+Signed-off-by: Paul Moore <paul.moore@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+ security/selinux/hooks.c            |   45 +++++++++---------------------------
+ security/selinux/include/netlabel.h |    6 +---
+ security/selinux/netlabel.c         |    8 +-----
+ 3 files changed, 16 insertions(+), 43 deletions(-)
+
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -311,7 +311,7 @@ static int sk_alloc_security(struct sock
+       ssec->sid = SECINITSID_UNLABELED;
+       sk->sk_security = ssec;
+-      selinux_netlbl_sk_security_reset(ssec, family);
++      selinux_netlbl_sk_security_reset(ssec);
+       return 0;
+ }
+@@ -2952,7 +2952,6 @@ static void selinux_inode_getsecid(const
+ static int selinux_revalidate_file_permission(struct file *file, int mask)
+ {
+       const struct cred *cred = current_cred();
+-      int rc;
+       struct inode *inode = file->f_path.dentry->d_inode;
+       if (!mask) {
+@@ -2964,30 +2963,16 @@ static int selinux_revalidate_file_permi
+       if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE))
+               mask |= MAY_APPEND;
+-      rc = file_has_perm(cred, file,
+-                         file_mask_to_av(inode->i_mode, mask));
+-      if (rc)
+-              return rc;
+-
+-      return selinux_netlbl_inode_permission(inode, mask);
++      return file_has_perm(cred, file, file_mask_to_av(inode->i_mode, mask));
+ }
+ static int selinux_file_permission(struct file *file, int mask)
+ {
+-      struct inode *inode = file->f_path.dentry->d_inode;
+-      struct file_security_struct *fsec = file->f_security;
+-      struct inode_security_struct *isec = inode->i_security;
+-      u32 sid = current_sid();
+-
+       if (!mask) {
+               /* No permission to check.  Existence test. */
+               return 0;
+       }
+-      if (sid == fsec->sid && fsec->isid == isec->sid
+-          && fsec->pseqno == avc_policy_seqno())
+-              return selinux_netlbl_inode_permission(inode, mask);
+-
+       return selinux_revalidate_file_permission(file, mask);
+ }
+@@ -3990,13 +3975,7 @@ static int selinux_socket_accept(struct 
+ static int selinux_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+                                 int size)
+ {
+-      int rc;
+-
+-      rc = socket_has_perm(current, sock, SOCKET__WRITE);
+-      if (rc)
+-              return rc;
+-
+-      return selinux_netlbl_inode_permission(SOCK_INODE(sock), MAY_WRITE);
++      return socket_has_perm(current, sock, SOCKET__WRITE);
+ }
+ static int selinux_socket_recvmsg(struct socket *sock, struct msghdr *msg,
+@@ -4384,7 +4363,7 @@ static void selinux_sk_clone_security(co
+       newssec->peer_sid = ssec->peer_sid;
+       newssec->sclass = ssec->sclass;
+-      selinux_netlbl_sk_security_reset(newssec, newsk->sk_family);
++      selinux_netlbl_sk_security_reset(newssec);
+ }
+ static void selinux_sk_getsecid(struct sock *sk, u32 *secid)
+@@ -4429,15 +4408,15 @@ static int selinux_inet_conn_request(str
+               req->secid = sksec->sid;
+               req->peer_secid = SECSID_NULL;
+               return 0;
++      } else {
++              err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
++              if (err)
++                      return err;
++              req->secid = newsid;
++              req->peer_secid = peersid;
+       }
+-      err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
+-      if (err)
+-              return err;
+-
+-      req->secid = newsid;
+-      req->peer_secid = peersid;
+-      return 0;
++      return selinux_netlbl_inet_conn_request(req, family);
+ }
+ static void selinux_inet_csk_clone(struct sock *newsk,
+@@ -4454,7 +4433,7 @@ static void selinux_inet_csk_clone(struc
+       /* We don't need to take any sort of lock here as we are the only
+        * thread with access to newsksec */
+-      selinux_netlbl_sk_security_reset(newsksec, req->rsk_ops->family);
++      selinux_netlbl_inet_csk_clone(newsk, req->rsk_ops->family);
+ }
+ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
+--- a/security/selinux/include/netlabel.h
++++ b/security/selinux/include/netlabel.h
+@@ -43,8 +43,7 @@ void selinux_netlbl_cache_invalidate(voi
+ void selinux_netlbl_err(struct sk_buff *skb, int error, int gateway);
+ void selinux_netlbl_sk_security_free(struct sk_security_struct *ssec);
+-void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec,
+-                                    int family);
++void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec);
+ int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
+                                u16 family,
+@@ -87,8 +86,7 @@ static inline void selinux_netlbl_sk_sec
+ }
+ static inline void selinux_netlbl_sk_security_reset(
+-                                             struct sk_security_struct *ssec,
+-                                             int family)
++                                             struct sk_security_struct *ssec)
+ {
+       return;
+ }
+--- a/security/selinux/netlabel.c
++++ b/security/selinux/netlabel.c
+@@ -188,13 +188,9 @@ void selinux_netlbl_sk_security_free(str
+  * The caller is responsibile for all the NetLabel sk_security_struct locking.
+  *
+  */
+-void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec,
+-                                    int family)
++void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec)
+ {
+-      if (family == PF_INET)
+-              ssec->nlbl_state = NLBL_REQUIRE;
+-      else
+-              ssec->nlbl_state = NLBL_UNSET;
++      ssec->nlbl_state = NLBL_UNSET;
+ }
+ /**
index 35e9e1713e7d847c6867416f843b6e19b5895910..53ddcc38232384916f1a55ff5c872202f027c658 100644 (file)
@@ -14,3 +14,23 @@ i2c-algo-bit-fix-timeout-test.patch
 i2c-algo-pca-let-pca9564-recover-from-unacked-data-byte.patch
 dup2-fix-return-value-with-oldfd-newfd-and-invalid-fd.patch
 ne2k-pci-do-not-register-device-until-initialized.patch
+lsm-relocate-the-ipv4-security_inet_conn_request-hooks.patch
+netlabel-add-cipso-set-del-attr-request_sock-functions.patch
+netlabel-add-new-netlabel-kapi-interfaces-for-request_sock-security-attributes.patch
+selinux-add-new-netlabel-glue-code-to-handle-labeling-of-connection-requests.patch
+selinux-set-the-proper-netlabel-security-attributes-for-connection-requests.patch
+selinux-remove-dead-code-labeled-networking-code.patch
+smack-set-the-proper-netlabel-security-attributes-for-connection-requests.patch
+cifs-fix-buffer-size-for-tcon-nativefilesystem-field.patch
+cifs-increase-size-of-tmp_buf-in-cifs_readdir-to-avoid-potential-overflows.patch
+cifs-fix-incorrect-destination-buffer-size-in-cifs_strncpy_to_host.patch
+cifs-fix-buffer-size-in-cifs_convertucspath.patch
+cifs-fix-unicode-string-area-word-alignment-in-session-setup.patch
+mac80211-pid-fix-memory-corruption.patch
+mac80211-minstrel-fix-memory-corruption.patch
+mm-page_mkwrite-change-prototype-to-match-fault.patch
+fs-fix-page_mkwrite-error-cases-in-core-code-and-btrfs.patch
+mm-close-page_mkwrite-races.patch
+gfs2-fix-page_mkwrite-return-code.patch
+nfs-fix-the-return-value-in-nfs_page_mkwrite.patch
+nfs-close-page_mkwrite-races.patch
diff --git a/queue-2.6.29/smack-set-the-proper-netlabel-security-attributes-for-connection-requests.patch b/queue-2.6.29/smack-set-the-proper-netlabel-security-attributes-for-connection-requests.patch
new file mode 100644 (file)
index 0000000..c5457f0
--- /dev/null
@@ -0,0 +1,257 @@
+From stable-bounces@linux.kernel.org  Tue May 12 14:04:19 2009
+From: Paul Moore <paul.moore@hp.com>
+Date: Fri, 08 May 2009 17:59:09 -0400
+Subject: smack: Set the proper NetLabel security attributes for connection requests
+To: linux-security-module@vger.kernel.org
+Cc: stable@kernel.org
+Message-ID: <20090508215908.12179.80226.stgit@flek.lan>
+
+From: Paul Moore <paul.moore@hp.com>
+
+[NOTE: based on 07feee8f812f7327a46186f7604df312c8c81962]
+
+This patch ensures the correct labeling of new network connection requests
+using Smack and NetLabel.
+
+Signed-off-by: Paul Moore <paul.moore@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+
+ security/smack/smack.h     |    1 
+ security/smack/smack_lsm.c |  130 +++++++++++++++++++++++++--------------------
+ 2 files changed, 75 insertions(+), 56 deletions(-)
+
+--- a/security/smack/smack.h
++++ b/security/smack/smack.h
+@@ -40,7 +40,6 @@ struct superblock_smack {
+ struct socket_smack {
+       char            *smk_out;                       /* outbound label */
+       char            *smk_in;                        /* inbound label */
+-      int             smk_labeled;                    /* label scheme */
+       char            smk_packet[SMK_LABELLEN];       /* TCP peer label */
+ };
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -7,6 +7,8 @@
+  *    Casey Schaufler <casey@schaufler-ca.com>
+  *
+  *  Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
++ *  Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
++ *                Paul Moore <paul.moore@hp.com>
+  *
+  *    This program is free software; you can redistribute it and/or modify
+  *    it under the terms of the GNU General Public License version 2,
+@@ -20,6 +22,7 @@
+ #include <linux/ext2_fs.h>
+ #include <linux/kd.h>
+ #include <asm/ioctls.h>
++#include <linux/ip.h>
+ #include <linux/tcp.h>
+ #include <linux/udp.h>
+ #include <linux/mutex.h>
+@@ -1279,7 +1282,6 @@ static int smack_sk_alloc_security(struc
+       ssp->smk_in = csp;
+       ssp->smk_out = csp;
+-      ssp->smk_labeled = SMACK_CIPSO_SOCKET;
+       ssp->smk_packet[0] = '\0';
+       sk->sk_security = ssp;
+@@ -1397,16 +1399,6 @@ static int smack_netlabel(struct sock *s
+       bh_unlock_sock(sk);
+       local_bh_enable();
+-      /*
+-       * Remember the label scheme used so that it is not
+-       * necessary to do the netlabel setting if it has not
+-       * changed the next time through.
+-       *
+-       * The -EDESTADDRREQ case is an indication that there's
+-       * a single level host involved.
+-       */
+-      if (rc == 0)
+-              ssp->smk_labeled = labeled;
+       return rc;
+ }
+@@ -1551,19 +1543,14 @@ static int smack_socket_connect(struct s
+               return -EINVAL;
+       hostsp = smack_host_label((struct sockaddr_in *)sap);
+-      if (hostsp == NULL) {
+-              if (ssp->smk_labeled != SMACK_CIPSO_SOCKET)
+-                      return smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
+-              return 0;
+-      }
++      if (hostsp == NULL)
++              return smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
+       rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE);
+       if (rc != 0)
+               return rc;
+-      if (ssp->smk_labeled != SMACK_UNLABELED_SOCKET)
+-              return smack_netlabel(sock->sk, SMACK_UNLABELED_SOCKET);
+-      return 0;
++      return smack_netlabel(sock->sk, SMACK_UNLABELED_SOCKET);
+ }
+ /**
+@@ -2275,21 +2262,14 @@ static int smack_socket_sendmsg(struct s
+               return 0;
+       hostsp = smack_host_label(sip);
+-      if (hostsp == NULL) {
+-              if (ssp->smk_labeled != SMACK_CIPSO_SOCKET)
+-                      return smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
+-              return 0;
+-      }
++      if (hostsp == NULL)
++              return smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
+       rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE);
+       if (rc != 0)
+               return rc;
+-      if (ssp->smk_labeled != SMACK_UNLABELED_SOCKET)
+-              return smack_netlabel(sock->sk, SMACK_UNLABELED_SOCKET);
+-
+-      return 0;
+-
++      return smack_netlabel(sock->sk, SMACK_UNLABELED_SOCKET);
+ }
+@@ -2504,22 +2484,14 @@ static int smack_socket_getpeersec_dgram
+ static void smack_sock_graft(struct sock *sk, struct socket *parent)
+ {
+       struct socket_smack *ssp;
+-      int rc;
+-      if (sk == NULL)
+-              return;
+-
+-      if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
++      if (sk == NULL ||
++          (sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
+               return;
+       ssp = sk->sk_security;
+       ssp->smk_in = ssp->smk_out = current_security();
+-      ssp->smk_packet[0] = '\0';
+-
+-      rc = smack_netlabel(sk, SMACK_CIPSO_SOCKET);
+-      if (rc != 0)
+-              printk(KERN_WARNING "Smack: \"%s\" netlbl error %d.\n",
+-                     __func__, -rc);
++      /* cssp->smk_packet is already set in smack_inet_csk_clone() */
+ }
+ /**
+@@ -2534,35 +2506,82 @@ static void smack_sock_graft(struct sock
+ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+                                  struct request_sock *req)
+ {
+-      struct netlbl_lsm_secattr skb_secattr;
++      u16 family = sk->sk_family;
+       struct socket_smack *ssp = sk->sk_security;
++      struct netlbl_lsm_secattr secattr;
++      struct sockaddr_in addr;
++      struct iphdr *hdr;
+       char smack[SMK_LABELLEN];
+       int rc;
+-      if (skb == NULL)
+-              return -EACCES;
++      /* handle mapped IPv4 packets arriving via IPv6 sockets */
++      if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
++              family = PF_INET;
+-      netlbl_secattr_init(&skb_secattr);
+-      rc = netlbl_skbuff_getattr(skb, sk->sk_family, &skb_secattr);
++      netlbl_secattr_init(&secattr);
++      rc = netlbl_skbuff_getattr(skb, family, &secattr);
+       if (rc == 0)
+-              smack_from_secattr(&skb_secattr, smack);
++              smack_from_secattr(&secattr, smack);
+       else
+               strncpy(smack, smack_known_huh.smk_known, SMK_MAXLEN);
+-      netlbl_secattr_destroy(&skb_secattr);
++      netlbl_secattr_destroy(&secattr);
++
+       /*
+-       * Receiving a packet requires that the other end
+-       * be able to write here. Read access is not required.
+-       *
+-       * If the request is successful save the peer's label
+-       * so that SO_PEERCRED can report it.
+-       */
++      * Receiving a packet requires that the other end be able to write
++      * here. Read access is not required.
++      */
+       rc = smk_access(smack, ssp->smk_in, MAY_WRITE);
+-      if (rc == 0)
+-              strncpy(ssp->smk_packet, smack, SMK_MAXLEN);
++      if (rc != 0)
++              return rc;
++
++      /*
++      * Save the peer's label in the request_sock so we can later setup
++      * smk_packet in the child socket so that SO_PEERCRED can report it.
++      */
++      req->peer_secid = smack_to_secid(smack);
++
++      /*
++      * We need to decide if we want to label the incoming connection here
++      * if we do we only need to label the request_sock and the stack will
++      * propogate the wire-label to the sock when it is created.
++      */
++      hdr = ip_hdr(skb);
++      addr.sin_addr.s_addr = hdr->saddr;
++      rcu_read_lock();
++      if (smack_host_label(&addr) == NULL) {
++              rcu_read_unlock();
++              netlbl_secattr_init(&secattr);
++              smack_to_secattr(smack, &secattr);
++              rc = netlbl_req_setattr(req, &secattr);
++              netlbl_secattr_destroy(&secattr);
++      } else {
++              rcu_read_unlock();
++              netlbl_req_delattr(req);
++      }
+       return rc;
+ }
++/**
++* smack_inet_csk_clone - Copy the connection information to the new socket
++* @sk: the new socket
++* @req: the connection's request_sock
++*
++* Transfer the connection's peer label to the newly created socket.
++*/
++static void smack_inet_csk_clone(struct sock *sk,
++                               const struct request_sock *req)
++{
++      struct socket_smack *ssp = sk->sk_security;
++      char *smack;
++
++      if (req->peer_secid != 0) {
++              smack = smack_from_secid(req->peer_secid);
++              strncpy(ssp->smk_packet, smack, SMK_MAXLEN);
++      } else
++              ssp->smk_packet[0] = '\0';
++}
++
+ /*
+  * Key management security hooks
+  *
+@@ -2915,6 +2934,7 @@ struct security_operations smack_ops = {
+       .sk_free_security =             smack_sk_free_security,
+       .sock_graft =                   smack_sock_graft,
+       .inet_conn_request =            smack_inet_conn_request,
++      .inet_csk_clone =               smack_inet_csk_clone,
+  /* key management security hooks */
+ #ifdef CONFIG_KEYS