]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.4.165/crypto-arm64-sha-avoid-non-standard-inline-asm-tricks.patch
Fixes for 4.19
[thirdparty/kernel/stable-queue.git] / releases / 4.4.165 / crypto-arm64-sha-avoid-non-standard-inline-asm-tricks.patch
1 From foo@baz Wed Nov 21 18:50:39 CET 2018
2 From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
3 Date: Wed, 26 Apr 2017 17:11:32 +0100
4 Subject: crypto: arm64/sha - avoid non-standard inline asm tricks
5
6 From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
7
8 commit f4857f4c2ee9aa4e2aacac1a845352b00197fb57 upstream.
9
10 Replace the inline asm which exports struct offsets as ELF symbols
11 with proper const variables exposing the same values. This works
12 around an issue with Clang which does not interpret the "i" (or "I")
13 constraints in the same way as GCC.
14
15 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
16 Tested-by: Matthias Kaehlcke <mka@chromium.org>
17 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
18 Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
19 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
20 ---
21 arch/arm64/crypto/sha1-ce-core.S | 6 ++++--
22 arch/arm64/crypto/sha1-ce-glue.c | 11 +++--------
23 arch/arm64/crypto/sha2-ce-core.S | 6 ++++--
24 arch/arm64/crypto/sha2-ce-glue.c | 13 +++++--------
25 4 files changed, 16 insertions(+), 20 deletions(-)
26
27 --- a/arch/arm64/crypto/sha1-ce-core.S
28 +++ b/arch/arm64/crypto/sha1-ce-core.S
29 @@ -82,7 +82,8 @@ ENTRY(sha1_ce_transform)
30 ldr dgb, [x0, #16]
31
32 /* load sha1_ce_state::finalize */
33 - ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
34 + ldr_l w4, sha1_ce_offsetof_finalize, x4
35 + ldr w4, [x0, x4]
36
37 /* load input */
38 0: ld1 {v8.4s-v11.4s}, [x1], #64
39 @@ -132,7 +133,8 @@ CPU_LE( rev32 v11.16b, v11.16b )
40 * the padding is handled by the C code in that case.
41 */
42 cbz x4, 3f
43 - ldr x4, [x0, #:lo12:sha1_ce_offsetof_count]
44 + ldr_l w4, sha1_ce_offsetof_count, x4
45 + ldr x4, [x0, x4]
46 movi v9.2d, #0
47 mov x8, #0x80000000
48 movi v10.2d, #0
49 --- a/arch/arm64/crypto/sha1-ce-glue.c
50 +++ b/arch/arm64/crypto/sha1-ce-glue.c
51 @@ -17,9 +17,6 @@
52 #include <linux/crypto.h>
53 #include <linux/module.h>
54
55 -#define ASM_EXPORT(sym, val) \
56 - asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
57 -
58 MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
59 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
60 MODULE_LICENSE("GPL v2");
61 @@ -32,6 +29,9 @@ struct sha1_ce_state {
62 asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
63 int blocks);
64
65 +const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
66 +const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
67 +
68 static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
69 unsigned int len)
70 {
71 @@ -52,11 +52,6 @@ static int sha1_ce_finup(struct shash_de
72 struct sha1_ce_state *sctx = shash_desc_ctx(desc);
73 bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
74
75 - ASM_EXPORT(sha1_ce_offsetof_count,
76 - offsetof(struct sha1_ce_state, sst.count));
77 - ASM_EXPORT(sha1_ce_offsetof_finalize,
78 - offsetof(struct sha1_ce_state, finalize));
79 -
80 /*
81 * Allow the asm code to perform the finalization if there is no
82 * partial data and the input is a round multiple of the block size.
83 --- a/arch/arm64/crypto/sha2-ce-core.S
84 +++ b/arch/arm64/crypto/sha2-ce-core.S
85 @@ -88,7 +88,8 @@ ENTRY(sha2_ce_transform)
86 ld1 {dgav.4s, dgbv.4s}, [x0]
87
88 /* load sha256_ce_state::finalize */
89 - ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
90 + ldr_l w4, sha256_ce_offsetof_finalize, x4
91 + ldr w4, [x0, x4]
92
93 /* load input */
94 0: ld1 {v16.4s-v19.4s}, [x1], #64
95 @@ -136,7 +137,8 @@ CPU_LE( rev32 v19.16b, v19.16b )
96 * the padding is handled by the C code in that case.
97 */
98 cbz x4, 3f
99 - ldr x4, [x0, #:lo12:sha256_ce_offsetof_count]
100 + ldr_l w4, sha256_ce_offsetof_count, x4
101 + ldr x4, [x0, x4]
102 movi v17.2d, #0
103 mov x8, #0x80000000
104 movi v18.2d, #0
105 --- a/arch/arm64/crypto/sha2-ce-glue.c
106 +++ b/arch/arm64/crypto/sha2-ce-glue.c
107 @@ -17,9 +17,6 @@
108 #include <linux/crypto.h>
109 #include <linux/module.h>
110
111 -#define ASM_EXPORT(sym, val) \
112 - asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
113 -
114 MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
115 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
116 MODULE_LICENSE("GPL v2");
117 @@ -32,6 +29,11 @@ struct sha256_ce_state {
118 asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
119 int blocks);
120
121 +const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
122 + sst.count);
123 +const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
124 + finalize);
125 +
126 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
127 unsigned int len)
128 {
129 @@ -52,11 +54,6 @@ static int sha256_ce_finup(struct shash_
130 struct sha256_ce_state *sctx = shash_desc_ctx(desc);
131 bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
132
133 - ASM_EXPORT(sha256_ce_offsetof_count,
134 - offsetof(struct sha256_ce_state, sst.count));
135 - ASM_EXPORT(sha256_ce_offsetof_finalize,
136 - offsetof(struct sha256_ce_state, finalize));
137 -
138 /*
139 * Allow the asm code to perform the finalization if there is no
140 * partial data and the input is a round multiple of the block size.