]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.14/crypto-aes_ti-disable-interrupts-while-accessing-s-b.patch
autosel patches for 4.14
[thirdparty/kernel/stable-queue.git] / queue-4.14 / crypto-aes_ti-disable-interrupts-while-accessing-s-b.patch
1 From 6c24c4ca0ba16bcaf51d3a4ab363280b8570ee60 Mon Sep 17 00:00:00 2001
2 From: Eric Biggers <ebiggers@google.com>
3 Date: Wed, 17 Oct 2018 21:37:58 -0700
4 Subject: crypto: aes_ti - disable interrupts while accessing S-box
5
6 [ Upstream commit 0a6a40c2a8c184a2fb467efacfb1cd338d719e0b ]
7
8 In the "aes-fixed-time" AES implementation, disable interrupts while
9 accessing the S-box, in order to make cache-timing attacks more
10 difficult. Previously it was possible for the CPU to be interrupted
11 while the S-box was loaded into L1 cache, potentially evicting the
12 cachelines and causing later table lookups to be time-variant.
13
14 In tests I did on x86 and ARM, this doesn't affect performance
15 significantly. Responsiveness is potentially a concern, but interrupts
16 are only disabled for a single AES block.
17
18 Note that even after this change, the implementation still isn't
19 necessarily guaranteed to be constant-time; see
20 https://cr.yp.to/antiforgery/cachetiming-20050414.pdf for a discussion
21 of the many difficulties involved in writing truly constant-time AES
22 software. But it's valuable to make such attacks more difficult.
23
24 Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
25 Signed-off-by: Eric Biggers <ebiggers@google.com>
26 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
27 Signed-off-by: Sasha Levin <sashal@kernel.org>
28 ---
29 crypto/Kconfig | 3 ++-
30 crypto/aes_ti.c | 18 ++++++++++++++++++
31 2 files changed, 20 insertions(+), 1 deletion(-)
32
33 diff --git a/crypto/Kconfig b/crypto/Kconfig
34 index 5579eb88d460..84f99f8eca4b 100644
35 --- a/crypto/Kconfig
36 +++ b/crypto/Kconfig
37 @@ -930,7 +930,8 @@ config CRYPTO_AES_TI
38 8 for decryption), this implementation only uses just two S-boxes of
39 256 bytes each, and attempts to eliminate data dependent latencies by
40 prefetching the entire table into the cache at the start of each
41 - block.
42 + block. Interrupts are also disabled to avoid races where cachelines
43 + are evicted when the CPU is interrupted to do something else.
44
45 config CRYPTO_AES_586
46 tristate "AES cipher algorithms (i586)"
47 diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c
48 index 03023b2290e8..1ff9785b30f5 100644
49 --- a/crypto/aes_ti.c
50 +++ b/crypto/aes_ti.c
51 @@ -269,6 +269,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
52 const u32 *rkp = ctx->key_enc + 4;
53 int rounds = 6 + ctx->key_length / 4;
54 u32 st0[4], st1[4];
55 + unsigned long flags;
56 int round;
57
58 st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
59 @@ -276,6 +277,12 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
60 st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
61 st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
62
63 + /*
64 + * Temporarily disable interrupts to avoid races where cachelines are
65 + * evicted when the CPU is interrupted to do something else.
66 + */
67 + local_irq_save(flags);
68 +
69 st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128];
70 st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
71 st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
72 @@ -300,6 +307,8 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
73 put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
74 put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
75 put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
76 +
77 + local_irq_restore(flags);
78 }
79
80 static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
81 @@ -308,6 +317,7 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
82 const u32 *rkp = ctx->key_dec + 4;
83 int rounds = 6 + ctx->key_length / 4;
84 u32 st0[4], st1[4];
85 + unsigned long flags;
86 int round;
87
88 st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
89 @@ -315,6 +325,12 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
90 st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
91 st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
92
93 + /*
94 + * Temporarily disable interrupts to avoid races where cachelines are
95 + * evicted when the CPU is interrupted to do something else.
96 + */
97 + local_irq_save(flags);
98 +
99 st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128];
100 st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
101 st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
102 @@ -339,6 +355,8 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
103 put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
104 put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
105 put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
106 +
107 + local_irq_restore(flags);
108 }
109
110 static struct crypto_alg aes_alg = {
111 --
112 2.19.1
113