--- /dev/null
+From stable-bounces@linux.kernel.org Tue Mar 11 21:18:58 2008
+Date: Wed, 12 Mar 2008 12:17:45 +0800
+From: Herbert Xu <herbert@gondor.apana.org.au>
+To: stable@kernel.org
+Message-ID: <20080312041745.GA27730@gondor.apana.org.au>
+Subject: CRYPTO xcbc: Fix crash with IPsec
+
+From: Joy Latten <latten@austin.ibm.com>
+
+[ Upstream commit: 2f40a178e70030c4712fe63807c883f34c3645eb ]
+
+When using aes-xcbc-mac for authentication in IPsec,
+the kernel crashes. It seems this algorithm doesn't
+account for the space IPsec may make in scatterlist for authtag.
+Thus when crypto_xcbc_digest_update2() gets called,
+nbytes may be less than sg[i].length.
+Since nbytes is an unsigned number, it wraps
+at the end of the loop allowing us to go back
+into loop and causing crash in memcpy.
+
+I used update function in digest.c to model this fix.
+Please let me know if it looks ok.
+
+Signed-off-by: Joy Latten <latten@austin.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ crypto/xcbc.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/crypto/xcbc.c
++++ b/crypto/xcbc.c
+@@ -124,6 +124,11 @@ static int crypto_xcbc_digest_update2(st
+ unsigned int offset = sg[i].offset;
+ unsigned int slen = sg[i].length;
+
++ if (unlikely(slen > nbytes))
++ slen = nbytes;
++
++ nbytes -= slen;
++
+ while (slen > 0) {
+ unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
+ char *p = crypto_kmap(pg, 0) + offset;
+@@ -177,7 +182,6 @@ static int crypto_xcbc_digest_update2(st
+ offset = 0;
+ pg++;
+ }
+- nbytes-=sg[i].length;
+ i++;
+ } while (nbytes>0);
+
--- /dev/null
+From stable-bounces@linux.kernel.org Tue Mar 11 21:20:01 2008
+Date: Wed, 12 Mar 2008 12:18:36 +0800
+From: Herbert Xu <herbert@gondor.apana.org.au>
+To: stable@kernel.org
+Message-ID: <20080312041836.GA27759@gondor.apana.org.au>
+Subject: CRYPTO xts: Use proper alignment
+
+From: Sebastian Siewior <sebastian@breakpoint.cc>
+
+[ Upstream commit: 6212f2c7f70c591efb0d9f3d50ad29112392fee2 ]
+
+The XTS blockmode uses a copy of the IV which is saved on the stack
+and may or may not be properly aligned. If it is not, it will break
+hardware cipher like the geode or padlock.
+This patch encrypts the IV in place so we don't have to worry about
+alignment.
+
+Signed-off-by: Sebastian Siewior <sebastian@breakpoint.cc>
+Tested-by: Stefan Hellermann <stefan@the2masters.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ crypto/xts.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/crypto/xts.c
++++ b/crypto/xts.c
+@@ -77,16 +77,16 @@ static int setkey(struct crypto_tfm *par
+ }
+
+ struct sinfo {
+- be128 t;
++ be128 *t;
+ struct crypto_tfm *tfm;
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+ };
+
+ static inline void xts_round(struct sinfo *s, void *dst, const void *src)
+ {
+- be128_xor(dst, &s->t, src); /* PP <- T xor P */
++ be128_xor(dst, s->t, src); /* PP <- T xor P */
+ s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */
+- be128_xor(dst, dst, &s->t); /* C <- T xor CC */
++ be128_xor(dst, dst, s->t); /* C <- T xor CC */
+ }
+
+ static int crypt(struct blkcipher_desc *d,
+@@ -101,7 +101,6 @@ static int crypt(struct blkcipher_desc *
+ .tfm = crypto_cipher_tfm(ctx->child),
+ .fn = fn
+ };
+- be128 *iv;
+ u8 *wsrc;
+ u8 *wdst;
+
+@@ -109,20 +108,20 @@ static int crypt(struct blkcipher_desc *
+ if (!w->nbytes)
+ return err;
+
++ s.t = (be128 *)w->iv;
+ avail = w->nbytes;
+
+ wsrc = w->src.virt.addr;
+ wdst = w->dst.virt.addr;
+
+ /* calculate first value of T */
+- iv = (be128 *)w->iv;
+- tw(crypto_cipher_tfm(ctx->tweak), (void *)&s.t, w->iv);
++ tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv);
+
+ goto first;
+
+ for (;;) {
+ do {
+- gf128mul_x_ble(&s.t, &s.t);
++ gf128mul_x_ble(s.t, s.t);
+
+ first:
+ xts_round(&s, wdst, wsrc);