]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
crypto patches added to 2.6.24 queue
authorChris Wright <chrisw@sous-sol.org>
Wed, 12 Mar 2008 04:26:02 +0000 (21:26 -0700)
committerChris Wright <chrisw@sous-sol.org>
Wed, 12 Mar 2008 04:26:02 +0000 (21:26 -0700)
queue-2.6.24/crypto-xcbc-fix-crash-with-ipsec.patch [new file with mode: 0644]
queue-2.6.24/crypto-xts-use-proper-alignment.patch [new file with mode: 0644]
queue-2.6.24/series

diff --git a/queue-2.6.24/crypto-xcbc-fix-crash-with-ipsec.patch b/queue-2.6.24/crypto-xcbc-fix-crash-with-ipsec.patch
new file mode 100644 (file)
index 0000000..ce78bcb
--- /dev/null
@@ -0,0 +1,52 @@
+From stable-bounces@linux.kernel.org  Tue Mar 11 21:18:58 2008
+Date: Wed, 12 Mar 2008 12:17:45 +0800
+From: Herbert Xu <herbert@gondor.apana.org.au>
+To: stable@kernel.org
+Message-ID: <20080312041745.GA27730@gondor.apana.org.au>
+Subject: CRYPTO xcbc: Fix crash with IPsec
+
+From: Joy Latten <latten@austin.ibm.com>
+
+[ Upstream commit: 2f40a178e70030c4712fe63807c883f34c3645eb ]
+
+When using aes-xcbc-mac for authentication in IPsec,
+the kernel crashes. It seems this algorithm doesn't
+account for the space IPsec may make in scatterlist for authtag.
+Thus when crypto_xcbc_digest_update2() gets called,
+nbytes may be less than sg[i].length.
+Since nbytes is an unsigned number, it wraps
+at the end of the loop allowing us to go back
+into loop and causing crash in memcpy.
+
+I used update function in digest.c to model this fix.
+Please let me know if it looks ok.
+
+Signed-off-by: Joy Latten <latten@austin.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ crypto/xcbc.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/crypto/xcbc.c
++++ b/crypto/xcbc.c
+@@ -124,6 +124,11 @@ static int crypto_xcbc_digest_update2(st
+               unsigned int offset = sg[i].offset;
+               unsigned int slen = sg[i].length;
++              if (unlikely(slen > nbytes))
++                      slen = nbytes;
++
++              nbytes -= slen;
++
+               while (slen > 0) {
+                       unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
+                       char *p = crypto_kmap(pg, 0) + offset;
+@@ -177,7 +182,6 @@ static int crypto_xcbc_digest_update2(st
+                       offset = 0;
+                       pg++;
+               }
+-              nbytes-=sg[i].length;
+               i++;
+       } while (nbytes>0);
diff --git a/queue-2.6.24/crypto-xts-use-proper-alignment.patch b/queue-2.6.24/crypto-xts-use-proper-alignment.patch
new file mode 100644 (file)
index 0000000..30c035a
--- /dev/null
@@ -0,0 +1,79 @@
+From stable-bounces@linux.kernel.org  Tue Mar 11 21:20:01 2008
+Date: Wed, 12 Mar 2008 12:18:36 +0800
+From: Herbert Xu <herbert@gondor.apana.org.au>
+To: stable@kernel.org
+Message-ID: <20080312041836.GA27759@gondor.apana.org.au>
+Subject: CRYPTO xts: Use proper alignment
+
+From: Sebastian Siewior <sebastian@breakpoint.cc>
+
+[ Upstream commit: 6212f2c7f70c591efb0d9f3d50ad29112392fee2 ]
+
+The XTS blockmode uses a copy of the IV which is saved on the stack
+and may or may not be properly aligned. If it is not, it will break
+hardware cipher like the geode or padlock.
+This patch encrypts the IV in place so we don't have to worry about
+alignment.
+
+Signed-off-by: Sebastian Siewior <sebastian@breakpoint.cc>
+Tested-by: Stefan Hellermann <stefan@the2masters.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ crypto/xts.c |   13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/crypto/xts.c
++++ b/crypto/xts.c
+@@ -77,16 +77,16 @@ static int setkey(struct crypto_tfm *par
+ }
+ struct sinfo {
+-      be128 t;
++      be128 *t;
+       struct crypto_tfm *tfm;
+       void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+ };
+ static inline void xts_round(struct sinfo *s, void *dst, const void *src)
+ {
+-      be128_xor(dst, &s->t, src);             /* PP <- T xor P */
++      be128_xor(dst, s->t, src);              /* PP <- T xor P */
+       s->fn(s->tfm, dst, dst);                /* CC <- E(Key1,PP) */
+-      be128_xor(dst, dst, &s->t);             /* C <- T xor CC */
++      be128_xor(dst, dst, s->t);              /* C <- T xor CC */
+ }
+ static int crypt(struct blkcipher_desc *d,
+@@ -101,7 +101,6 @@ static int crypt(struct blkcipher_desc *
+               .tfm = crypto_cipher_tfm(ctx->child),
+               .fn = fn
+       };
+-      be128 *iv;
+       u8 *wsrc;
+       u8 *wdst;
+@@ -109,20 +108,20 @@ static int crypt(struct blkcipher_desc *
+       if (!w->nbytes)
+               return err;
++      s.t = (be128 *)w->iv;
+       avail = w->nbytes;
+       wsrc = w->src.virt.addr;
+       wdst = w->dst.virt.addr;
+       /* calculate first value of T */
+-      iv = (be128 *)w->iv;
+-      tw(crypto_cipher_tfm(ctx->tweak), (void *)&s.t, w->iv);
++      tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv);
+       goto first;
+       for (;;) {
+               do {
+-                      gf128mul_x_ble(&s.t, &s.t);
++                      gf128mul_x_ble(s.t, s.t);
+ first:
+                       xts_round(&s, wdst, wsrc);
index bb5bdcd4dd7b3791884176b2794d589c6671a46c..c675c7103ce7d3c108273fd845f534e4cc76bce5 100644 (file)
@@ -20,3 +20,5 @@ iov_iter_advance-fix.patch
 drivers-fix-dma_get_required_mask.patch
 x86-adjust-enable_nmi_through_lvt0.patch
 scsi-ips-handle-scsi_add_host-failure-and-other-err-cleanups.patch
+crypto-xcbc-fix-crash-with-ipsec.patch
+crypto-xts-use-proper-alignment.patch