]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.7-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 9 Jan 2013 17:10:57 +0000 (09:10 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 9 Jan 2013 17:10:57 +0000 (09:10 -0800)
added patches:
sparc64-fix-aes-ctr-mode-block-size.patch
sparc64-fix-ecb-looping-constructs-in-aes-code.patch
sparc64-fix-unrolled-aes-256-bit-key-loops.patch
sparc64-set-crypto_tfm_req_may_sleep-consistently-in-aes-code.patch
sparc64-set-crypto_tfm_req_may_sleep-consistently-in-camellia-code.patch
sparc64-set-crypto_tfm_req_may_sleep-consistently-in-des-code.patch
sparc-huge_ptep_set_-functions-need-to-call-set_huge_pte_at.patch

queue-3.7/series
queue-3.7/sparc-huge_ptep_set_-functions-need-to-call-set_huge_pte_at.patch [new file with mode: 0644]
queue-3.7/sparc64-fix-aes-ctr-mode-block-size.patch [new file with mode: 0644]
queue-3.7/sparc64-fix-ecb-looping-constructs-in-aes-code.patch [new file with mode: 0644]
queue-3.7/sparc64-fix-unrolled-aes-256-bit-key-loops.patch [new file with mode: 0644]
queue-3.7/sparc64-set-crypto_tfm_req_may_sleep-consistently-in-aes-code.patch [new file with mode: 0644]
queue-3.7/sparc64-set-crypto_tfm_req_may_sleep-consistently-in-camellia-code.patch [new file with mode: 0644]
queue-3.7/sparc64-set-crypto_tfm_req_may_sleep-consistently-in-des-code.patch [new file with mode: 0644]

index 20aa401f20179328476f299b51efa1a5ac760610..a5b3603f531939c5bcc062a2440d6435f51603ca 100644 (file)
@@ -74,3 +74,10 @@ mm-hugetlb-create-hugetlb-cgroup-file-in-hugetlb_init.patch
 staging-drm-omap-fix-include-error-during-make.patch
 smb3-mounts-fail-with-access-denied-to-some-servers.patch
 freezer-add-missing-mb-s-to-freezer_count-and-freezer_should_skip.patch
+sparc-huge_ptep_set_-functions-need-to-call-set_huge_pte_at.patch
+sparc64-fix-unrolled-aes-256-bit-key-loops.patch
+sparc64-fix-aes-ctr-mode-block-size.patch
+sparc64-set-crypto_tfm_req_may_sleep-consistently-in-aes-code.patch
+sparc64-fix-ecb-looping-constructs-in-aes-code.patch
+sparc64-set-crypto_tfm_req_may_sleep-consistently-in-des-code.patch
+sparc64-set-crypto_tfm_req_may_sleep-consistently-in-camellia-code.patch
diff --git a/queue-3.7/sparc-huge_ptep_set_-functions-need-to-call-set_huge_pte_at.patch b/queue-3.7/sparc-huge_ptep_set_-functions-need-to-call-set_huge_pte_at.patch
new file mode 100644 (file)
index 0000000..320044a
--- /dev/null
@@ -0,0 +1,49 @@
+From 42a4fb9082d9fdc88c353fff81e7e5ee8a17dbde Mon Sep 17 00:00:00 2001
+From: Dave Kleikamp <dave.kleikamp@oracle.com>
+Date: Mon, 17 Dec 2012 11:52:47 -0600
+Subject: sparc: huge_ptep_set_* functions need to call set_huge_pte_at()
+
+
+From: Dave Kleikamp <dave.kleikamp@oracle.com>
+
+[ Upstream commit 6cb9c3697585c47977c42c5cc1b9fc49247ac530 ]
+
+Modifying the huge pte's requires that all the underlying pte's be
+modified.
+
+Version 2: added missing flush_tlb_page()
+
+Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: sparclinux@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/include/asm/hugetlb.h |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/sparc/include/asm/hugetlb.h
++++ b/arch/sparc/include/asm/hugetlb.h
+@@ -61,14 +61,20 @@ static inline pte_t huge_pte_wrprotect(p
+ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+ {
+-      ptep_set_wrprotect(mm, addr, ptep);
++      pte_t old_pte = *ptep;
++      set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+ }
+ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+ {
+-      return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
++      int changed = !pte_same(*ptep, pte);
++      if (changed) {
++              set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
++              flush_tlb_page(vma, addr);
++      }
++      return changed;
+ }
+ static inline pte_t huge_ptep_get(pte_t *ptep)
diff --git a/queue-3.7/sparc64-fix-aes-ctr-mode-block-size.patch b/queue-3.7/sparc64-fix-aes-ctr-mode-block-size.patch
new file mode 100644 (file)
index 0000000..be5a8c7
--- /dev/null
@@ -0,0 +1,81 @@
+From eaf6348355bd59598365ef09ebd31f934f202a29 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 19 Dec 2012 15:20:23 -0800
+Subject: sparc64: Fix AES ctr mode block size.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit a8d97cef2168ffe5af1aeed6bf6cdc3ce53f3d0b ]
+
+Like the generic versions, we need to support a block size
+of '1' for CTR mode AES.
+
+This was discovered thanks to all of the new test cases added by
+Jussi Kivilinna.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/crypto/aes_glue.c |   27 ++++++++++++++++++++++++---
+ 1 file changed, 24 insertions(+), 3 deletions(-)
+
+--- a/arch/sparc/crypto/aes_glue.c
++++ b/arch/sparc/crypto/aes_glue.c
+@@ -329,6 +329,22 @@ static int cbc_decrypt(struct blkcipher_
+       return err;
+ }
++static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx,
++                          struct blkcipher_walk *walk)
++{
++      u8 *ctrblk = walk->iv;
++      u64 keystream[AES_BLOCK_SIZE / sizeof(u64)];
++      u8 *src = walk->src.virt.addr;
++      u8 *dst = walk->dst.virt.addr;
++      unsigned int nbytes = walk->nbytes;
++
++      ctx->ops->ecb_encrypt(&ctx->key[0], (const u64 *)ctrblk,
++                            keystream, AES_BLOCK_SIZE);
++      crypto_xor((u8 *) keystream, src, nbytes);
++      memcpy(dst, keystream, nbytes);
++      crypto_inc(ctrblk, AES_BLOCK_SIZE);
++}
++
+ static int ctr_crypt(struct blkcipher_desc *desc,
+                    struct scatterlist *dst, struct scatterlist *src,
+                    unsigned int nbytes)
+@@ -338,10 +354,11 @@ static int ctr_crypt(struct blkcipher_de
+       int err;
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+-      err = blkcipher_walk_virt(desc, &walk);
++      err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       ctx->ops->load_encrypt_keys(&ctx->key[0]);
+-      while ((nbytes = walk.nbytes)) {
++      while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+               unsigned int block_len = nbytes & AES_BLOCK_MASK;
+               if (likely(block_len)) {
+@@ -353,6 +370,10 @@ static int ctr_crypt(struct blkcipher_de
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
++      if (walk.nbytes) {
++              ctr_crypt_final(ctx, &walk);
++              err = blkcipher_walk_done(desc, &walk, 0);
++      }
+       fprs_write(0);
+       return err;
+ }
+@@ -418,7 +439,7 @@ static struct crypto_alg algs[] = { {
+       .cra_driver_name        = "ctr-aes-sparc64",
+       .cra_priority           = SPARC_CR_OPCODE_PRIORITY,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+-      .cra_blocksize          = AES_BLOCK_SIZE,
++      .cra_blocksize          = 1,
+       .cra_ctxsize            = sizeof(struct crypto_sparc64_aes_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
diff --git a/queue-3.7/sparc64-fix-ecb-looping-constructs-in-aes-code.patch b/queue-3.7/sparc64-fix-ecb-looping-constructs-in-aes-code.patch
new file mode 100644 (file)
index 0000000..3ee7b96
--- /dev/null
@@ -0,0 +1,29 @@
+From 8140e3d5ac6d77113f515221de4ffcf925d74196 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 19 Dec 2012 15:30:07 -0800
+Subject: sparc64: Fix ECB looping constructs in AES code.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit ce6889515d5d481a5bd8ce5913dfed18f08310ea ]
+
+Things works better when you increment the source buffer pointer
+properly.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/crypto/des_asm.S |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/sparc/crypto/des_asm.S
++++ b/arch/sparc/crypto/des_asm.S
+@@ -376,6 +376,7 @@ ENTRY(des3_ede_sparc64_ecb_crypt)
+ 1:    ldd     [%o1 + 0x00], %f60
+       DES3_LOOP_BODY(60)
+       std     %f60, [%o2 + 0x00]
++      add     %o1, 0x08, %o1
+       subcc   %o3, 0x08, %o3
+       bne,pt  %icc, 1b
+        add    %o2, 0x08, %o2
diff --git a/queue-3.7/sparc64-fix-unrolled-aes-256-bit-key-loops.patch b/queue-3.7/sparc64-fix-unrolled-aes-256-bit-key-loops.patch
new file mode 100644 (file)
index 0000000..3fa0664
--- /dev/null
@@ -0,0 +1,92 @@
+From 436a669c393b665dc201a86973a6c72d6cce1796 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 19 Dec 2012 15:19:11 -0800
+Subject: sparc64: Fix unrolled AES 256-bit key loops.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 9f28ffc03e93343ac04874fda9edb7affea45165 ]
+
+The basic scheme of the block mode assembler is that we start by
+enabling the FPU, loading the key into the floating point registers,
+then iterate calling the encrypt/decrypt routine for each block.
+
+For the 256-bit key cases, we run short on registers in the unrolled
+loops.
+
+So the {ENCRYPT,DECRYPT}_256_2() macros reload the key registers that
+get clobbered.
+
+The unrolled macros, {ENCRYPT,DECRYPT}_256(), are not mindful of this.
+
+So if we have a mix of multi-block and single-block calls, the
+single-block unrolled 256-bit encrypt/decrypt can run with some
+of the key registers clobbered.
+
+Handle this by always explicitly loading those registers before using
+the non-unrolled 256-bit macro.
+
+This was discovered thanks to all of the new test cases added by
+Jussi Kivilinna.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/crypto/aes_asm.S |   20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+--- a/arch/sparc/crypto/aes_asm.S
++++ b/arch/sparc/crypto/aes_asm.S
+@@ -1024,7 +1024,11 @@ ENTRY(aes_sparc64_ecb_encrypt_256)
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+-10:   ldx             [%o1 + 0x00], %g3
++10:   ldd             [%o0 + 0xd0], %f56
++      ldd             [%o0 + 0xd8], %f58
++      ldd             [%o0 + 0xe0], %f60
++      ldd             [%o0 + 0xe8], %f62
++      ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+@@ -1128,9 +1132,9 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
+       /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
+       ldx             [%o0 - 0x10], %g1
+       subcc           %o3, 0x10, %o3
++      ldx             [%o0 - 0x08], %g2
+       be              10f
+-       ldx            [%o0 - 0x08], %g2
+-      sub             %o0, 0xf0, %o0
++       sub            %o0, 0xf0, %o0
+ 1:    ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       ldx             [%o1 + 0x10], %o4
+@@ -1154,7 +1158,11 @@ ENTRY(aes_sparc64_ecb_decrypt_256)
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+-10:   ldx             [%o1 + 0x00], %g3
++10:   ldd             [%o0 + 0x18], %f56
++      ldd             [%o0 + 0x10], %f58
++      ldd             [%o0 + 0x08], %f60
++      ldd             [%o0 + 0x00], %f62
++      ldx             [%o1 + 0x00], %g3
+       ldx             [%o1 + 0x08], %g7
+       xor             %g1, %g3, %g3
+       xor             %g2, %g7, %g7
+@@ -1511,11 +1519,11 @@ ENTRY(aes_sparc64_ctr_crypt_256)
+        add            %o2, 0x20, %o2
+       brlz,pt         %o3, 11f
+        nop
+-      ldd             [%o0 + 0xd0], %f56
++10:   ldd             [%o0 + 0xd0], %f56
+       ldd             [%o0 + 0xd8], %f58
+       ldd             [%o0 + 0xe0], %f60
+       ldd             [%o0 + 0xe8], %f62
+-10:   xor             %g1, %g3, %o5
++      xor             %g1, %g3, %o5
+       MOVXTOD_O5_F0
+       xor             %g2, %g7, %o5
+       MOVXTOD_O5_F2
diff --git a/queue-3.7/sparc64-set-crypto_tfm_req_may_sleep-consistently-in-aes-code.patch b/queue-3.7/sparc64-set-crypto_tfm_req_may_sleep-consistently-in-aes-code.patch
new file mode 100644 (file)
index 0000000..dda86e1
--- /dev/null
@@ -0,0 +1,53 @@
+From 638ac2bca11a3c1d90d0ba8effa2bad7f77ff8e8 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 19 Dec 2012 15:22:03 -0800
+Subject: sparc64: Set CRYPTO_TFM_REQ_MAY_SLEEP consistently in AES code.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit b35d282ef7345320b594d48d8d70caedfa962a01 ]
+
+We use the FPU and therefore cannot sleep during the crypto
+loops.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/crypto/aes_glue.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/sparc/crypto/aes_glue.c
++++ b/arch/sparc/crypto/aes_glue.c
+@@ -222,6 +222,7 @@ static int ecb_encrypt(struct blkcipher_
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       ctx->ops->load_encrypt_keys(&ctx->key[0]);
+       while ((nbytes = walk.nbytes)) {
+@@ -251,6 +252,7 @@ static int ecb_decrypt(struct blkcipher_
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       ctx->ops->load_decrypt_keys(&ctx->key[0]);
+       key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
+@@ -280,6 +282,7 @@ static int cbc_encrypt(struct blkcipher_
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       ctx->ops->load_encrypt_keys(&ctx->key[0]);
+       while ((nbytes = walk.nbytes)) {
+@@ -309,6 +312,7 @@ static int cbc_decrypt(struct blkcipher_
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       ctx->ops->load_decrypt_keys(&ctx->key[0]);
+       key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
diff --git a/queue-3.7/sparc64-set-crypto_tfm_req_may_sleep-consistently-in-camellia-code.patch b/queue-3.7/sparc64-set-crypto_tfm_req_may_sleep-consistently-in-camellia-code.patch
new file mode 100644 (file)
index 0000000..a5cb7c8
--- /dev/null
@@ -0,0 +1,45 @@
+From 802a6fe1b7b7cf19450c29fb349017694f620776 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 19 Dec 2012 15:44:31 -0800
+Subject: sparc64: Set CRYPTO_TFM_REQ_MAY_SLEEP consistently in CAMELLIA code.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 62ba63dc892cf836ecb9ce4fdb7644d45c95070b ]
+
+We use the FPU and therefore cannot sleep during the crypto
+loops.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/crypto/camellia_glue.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/sparc/crypto/camellia_glue.c
++++ b/arch/sparc/crypto/camellia_glue.c
+@@ -98,6 +98,7 @@ static int __ecb_crypt(struct blkcipher_
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       if (encrypt)
+               key = &ctx->encrypt_key[0];
+@@ -160,6 +161,7 @@ static int cbc_encrypt(struct blkcipher_
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       key = &ctx->encrypt_key[0];
+       camellia_sparc64_load_keys(key, ctx->key_len);
+@@ -198,6 +200,7 @@ static int cbc_decrypt(struct blkcipher_
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       key = &ctx->decrypt_key[0];
+       camellia_sparc64_load_keys(key, ctx->key_len);
diff --git a/queue-3.7/sparc64-set-crypto_tfm_req_may_sleep-consistently-in-des-code.patch b/queue-3.7/sparc64-set-crypto_tfm_req_may_sleep-consistently-in-des-code.patch
new file mode 100644 (file)
index 0000000..d3f1b3a
--- /dev/null
@@ -0,0 +1,69 @@
+From b1c40d4a8e7a651f6a0ec1e4041bb04b79b7793d Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 19 Dec 2012 15:43:38 -0800
+Subject: sparc64: Set CRYPTO_TFM_REQ_MAY_SLEEP consistently in DES code.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit b3a37947074fa0a488d6c7ede58125b2278ab4e8 ]
+
+We use the FPU and therefore cannot sleep during the crypto
+loops.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/crypto/des_glue.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/sparc/crypto/des_glue.c
++++ b/arch/sparc/crypto/des_glue.c
+@@ -100,6 +100,7 @@ static int __ecb_crypt(struct blkcipher_
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       if (encrypt)
+               des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
+@@ -147,6 +148,7 @@ static int cbc_encrypt(struct blkcipher_
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
+       while ((nbytes = walk.nbytes)) {
+@@ -177,6 +179,7 @@ static int cbc_decrypt(struct blkcipher_
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
+       while ((nbytes = walk.nbytes)) {
+@@ -266,6 +269,7 @@ static int __ecb3_crypt(struct blkcipher
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       if (encrypt)
+               K = &ctx->encrypt_expkey[0];
+@@ -317,6 +321,7 @@ static int cbc3_encrypt(struct blkcipher
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       K = &ctx->encrypt_expkey[0];
+       des3_ede_sparc64_load_keys(K);
+@@ -352,6 +357,7 @@ static int cbc3_decrypt(struct blkcipher
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
++      desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       K = &ctx->decrypt_expkey[0];
+       des3_ede_sparc64_load_keys(K);