]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.19.49/s390-crypto-fix-possible-sleep-during-spinlock-aquired.patch
Linux 4.19.49
[thirdparty/kernel/stable-queue.git] / releases / 4.19.49 / s390-crypto-fix-possible-sleep-during-spinlock-aquired.patch
1 From 1c2c7029c008922d4d48902cc386250502e73d51 Mon Sep 17 00:00:00 2001
2 From: Harald Freudenberger <freude@linux.ibm.com>
3 Date: Mon, 27 May 2019 15:24:20 +0200
4 Subject: s390/crypto: fix possible sleep during spinlock aquired
5
6 From: Harald Freudenberger <freude@linux.ibm.com>
7
8 commit 1c2c7029c008922d4d48902cc386250502e73d51 upstream.
9
10 This patch fixes a complain about possible sleep during
11 spinlock aquired
12 "BUG: sleeping function called from invalid context at
13 include/crypto/algapi.h:426"
14 for the ctr(aes) and ctr(des) s390 specific ciphers.
15
16 Instead of using a spinlock this patch introduces a mutex
17 which is save to be held in sleeping context. Please note
18 a deadlock is not possible as mutex_trylock() is used.
19
20 Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
21 Reported-by: Julian Wiedmann <jwi@linux.ibm.com>
22 Cc: stable@vger.kernel.org
23 Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
24 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
25
26 ---
27 arch/s390/crypto/aes_s390.c | 8 ++++----
28 arch/s390/crypto/des_s390.c | 7 ++++---
29 2 files changed, 8 insertions(+), 7 deletions(-)
30
31 --- a/arch/s390/crypto/aes_s390.c
32 +++ b/arch/s390/crypto/aes_s390.c
33 @@ -27,14 +27,14 @@
34 #include <linux/module.h>
35 #include <linux/cpufeature.h>
36 #include <linux/init.h>
37 -#include <linux/spinlock.h>
38 +#include <linux/mutex.h>
39 #include <linux/fips.h>
40 #include <linux/string.h>
41 #include <crypto/xts.h>
42 #include <asm/cpacf.h>
43
44 static u8 *ctrblk;
45 -static DEFINE_SPINLOCK(ctrblk_lock);
46 +static DEFINE_MUTEX(ctrblk_lock);
47
48 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
49 kma_functions;
50 @@ -698,7 +698,7 @@ static int ctr_aes_crypt(struct blkciphe
51 unsigned int n, nbytes;
52 int ret, locked;
53
54 - locked = spin_trylock(&ctrblk_lock);
55 + locked = mutex_trylock(&ctrblk_lock);
56
57 ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
58 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
59 @@ -716,7 +716,7 @@ static int ctr_aes_crypt(struct blkciphe
60 ret = blkcipher_walk_done(desc, walk, nbytes - n);
61 }
62 if (locked)
63 - spin_unlock(&ctrblk_lock);
64 + mutex_unlock(&ctrblk_lock);
65 /*
66 * final block may be < AES_BLOCK_SIZE, copy only nbytes
67 */
68 --- a/arch/s390/crypto/des_s390.c
69 +++ b/arch/s390/crypto/des_s390.c
70 @@ -14,6 +14,7 @@
71 #include <linux/cpufeature.h>
72 #include <linux/crypto.h>
73 #include <linux/fips.h>
74 +#include <linux/mutex.h>
75 #include <crypto/algapi.h>
76 #include <crypto/des.h>
77 #include <asm/cpacf.h>
78 @@ -21,7 +22,7 @@
79 #define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
80
81 static u8 *ctrblk;
82 -static DEFINE_SPINLOCK(ctrblk_lock);
83 +static DEFINE_MUTEX(ctrblk_lock);
84
85 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
86
87 @@ -387,7 +388,7 @@ static int ctr_desall_crypt(struct blkci
88 unsigned int n, nbytes;
89 int ret, locked;
90
91 - locked = spin_trylock(&ctrblk_lock);
92 + locked = mutex_trylock(&ctrblk_lock);
93
94 ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
95 while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
96 @@ -404,7 +405,7 @@ static int ctr_desall_crypt(struct blkci
97 ret = blkcipher_walk_done(desc, walk, nbytes - n);
98 }
99 if (locked)
100 - spin_unlock(&ctrblk_lock);
101 + mutex_unlock(&ctrblk_lock);
102 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
103 if (nbytes) {
104 cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,