]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.38 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Tue, 29 Mar 2011 03:22:25 +0000 (20:22 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 29 Mar 2011 03:22:25 +0000 (20:22 -0700)
queue-2.6.38/crypto-aesni-intel-fixed-problem-with-packets-that-are-not-multiple-of-64bytes.patch [new file with mode: 0644]
queue-2.6.38/series

diff --git a/queue-2.6.38/crypto-aesni-intel-fixed-problem-with-packets-that-are-not-multiple-of-64bytes.patch b/queue-2.6.38/crypto-aesni-intel-fixed-problem-with-packets-that-are-not-multiple-of-64bytes.patch
new file mode 100644 (file)
index 0000000..9cedba2
--- /dev/null
@@ -0,0 +1,112 @@
+From 60af520cf264ea26b2af3a6871bbd71850522aea Mon Sep 17 00:00:00 2001
+From: Tadeusz Struk <tadeusz.struk@intel.com>
+Date: Sun, 13 Mar 2011 16:56:17 +0800
+Subject: crypto: aesni-intel - fixed problem with packets that are not multiple of 64bytes
+
+From: Tadeusz Struk <tadeusz.struk@intel.com>
+
+commit 60af520cf264ea26b2af3a6871bbd71850522aea upstream.
+
+This patch fixes problem with packets that are not multiple of 64bytes.
+
+Signed-off-by: Adrian Hoban <adrian.hoban@intel.com>
+Signed-off-by: Aidan O'Mahony <aidan.o.mahony@intel.com>
+Signed-off-by: Gabriele Paoloni <gabriele.paoloni@intel.com>
+Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/crypto/aesni-intel_asm.S  |    5 ++++-
+ arch/x86/crypto/aesni-intel_glue.c |   14 ++++++++++++--
+ 2 files changed, 16 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -1612,6 +1612,7 @@ _zero_cipher_left_encrypt:
+         movdqa SHUF_MASK(%rip), %xmm10
+       PSHUFB_XMM %xmm10, %xmm0
++
+       ENCRYPT_SINGLE_BLOCK    %xmm0, %xmm1        # Encrypt(K, Yn)
+       sub $16, %r11
+       add %r13, %r11
+@@ -1634,7 +1635,9 @@ _zero_cipher_left_encrypt:
+       # GHASH computation for the last <16 byte block
+       sub     %r13, %r11
+       add     $16, %r11
+-      PSHUFB_XMM %xmm10, %xmm1
++
++      movdqa SHUF_MASK(%rip), %xmm10
++      PSHUFB_XMM %xmm10, %xmm0
+       # shuffle xmm0 back to output as ciphertext
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -828,9 +828,15 @@ static int rfc4106_init(struct crypto_tf
+       struct cryptd_aead *cryptd_tfm;
+       struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
+               PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
++      struct crypto_aead *cryptd_child;
++      struct aesni_rfc4106_gcm_ctx *child_ctx;
+       cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
+       if (IS_ERR(cryptd_tfm))
+               return PTR_ERR(cryptd_tfm);
++
++      cryptd_child = cryptd_aead_child(cryptd_tfm);
++      child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
++      memcpy(child_ctx, ctx, sizeof(*ctx));
+       ctx->cryptd_tfm = cryptd_tfm;
+       tfm->crt_aead.reqsize = sizeof(struct aead_request)
+               + crypto_aead_reqsize(&cryptd_tfm->base);
+@@ -925,6 +931,9 @@ static int rfc4106_set_key(struct crypto
+       int ret = 0;
+       struct crypto_tfm *tfm = crypto_aead_tfm(parent);
+       struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
++      struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
++      struct aesni_rfc4106_gcm_ctx *child_ctx =
++                                 aesni_rfc4106_gcm_ctx_get(cryptd_child);
+       u8 *new_key_mem = NULL;
+       if (key_len < 4) {
+@@ -968,6 +977,7 @@ static int rfc4106_set_key(struct crypto
+               goto exit;
+       }
+       ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
++      memcpy(child_ctx, ctx, sizeof(*ctx));
+ exit:
+       kfree(new_key_mem);
+       return ret;
+@@ -999,7 +1009,6 @@ static int rfc4106_encrypt(struct aead_r
+       int ret;
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+-      struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+       if (!irq_fpu_usable()) {
+               struct aead_request *cryptd_req =
+@@ -1008,6 +1017,7 @@ static int rfc4106_encrypt(struct aead_r
+               aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+               return crypto_aead_encrypt(cryptd_req);
+       } else {
++              struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+               kernel_fpu_begin();
+               ret = cryptd_child->base.crt_aead.encrypt(req);
+               kernel_fpu_end();
+@@ -1020,7 +1030,6 @@ static int rfc4106_decrypt(struct aead_r
+       int ret;
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+-      struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+       if (!irq_fpu_usable()) {
+               struct aead_request *cryptd_req =
+@@ -1029,6 +1038,7 @@ static int rfc4106_decrypt(struct aead_r
+               aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+               return crypto_aead_decrypt(cryptd_req);
+       } else {
++              struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+               kernel_fpu_begin();
+               ret = cryptd_child->base.crt_aead.decrypt(req);
+               kernel_fpu_end();
index 345839aa7fcf3cb5652059c584cd93003ea16ee1..7f1fa22c176f45beefa8faa21c74adedcd7550b6 100644 (file)
@@ -14,3 +14,4 @@ pci-acpi-report-aspm-support-to-bios-if-not-disabled-from-command-line.patch
 x86-64-mm-put-early-page-table-high.patch
 ecryptfs-unlock-page-in-write_begin-error-path.patch
 ecryptfs-ecryptfs_keyring_auth_tok_for_sig-bug-fix.patch
+crypto-aesni-intel-fixed-problem-with-packets-that-are-not-multiple-of-64bytes.patch