1 From 57fe93b374a6b8711995c2d466c502af9f3a08bb Mon Sep 17 00:00:00 2001
2 From: David S. Miller <davem@davemloft.net>
3 Date: Wed, 10 Nov 2010 10:38:24 -0800
4 Subject: filter: make sure filters dont read uninitialized memory
6 From: David S. Miller <davem@davemloft.net>
8 commit 57fe93b374a6b8711995c2d466c502af9f3a08bb upstream.
10 There is a possibility malicious users can get limited information about
11 uninitialized stack mem array. Even if sk_run_filter() result is bound
12 to packet length (0 .. 65535), we could imagine this can be used by
15 Initializing mem[] array, like Dan Rosenberg suggested in his patch is
16 expensive since most filters dont even use this array.
18 Its hard to make the filter validation in sk_chk_filter(), because of
19 the jumps. This might be done later.
21 In this patch, I use a bitmap (a single long var) so that only filters
22 using mem[] loads/stores pay the price of added security checks.
24 For other filters, additional cost is a single instruction.
26 [ Since we access fentry->k a lot now, cache it in a local variable
27 and mark filter entry pointer as const. -DaveM ]
29 Reported-by: Dan Rosenberg <drosenberg@vsecurity.com>
30 Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
31 Signed-off-by: David S. Miller <davem@davemloft.net>
32 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
35 net/core/filter.c | 64 +++++++++++++++++++++++++++++-------------------------
36 1 file changed, 35 insertions(+), 29 deletions(-)
38 --- a/net/core/filter.c
39 +++ b/net/core/filter.c
40 @@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter);
42 unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
44 - struct sock_filter *fentry; /* We walk down these */
46 u32 A = 0; /* Accumulator */
47 u32 X = 0; /* Index Register */
48 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
49 + unsigned long memvalid = 0;
54 + BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
56 * Process array of filter instructions.
58 for (pc = 0; pc < flen; pc++) {
59 - fentry = &filter[pc];
60 + const struct sock_filter *fentry = &filter[pc];
61 + u32 f_k = fentry->k;
63 switch (fentry->code) {
87 @@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buf
108 case BPF_S_ALU_LSH_X:
111 case BPF_S_ALU_LSH_K:
115 case BPF_S_ALU_RSH_X:
118 case BPF_S_ALU_RSH_K:
129 case BPF_S_JMP_JGT_K:
130 - pc += (A > fentry->k) ? fentry->jt : fentry->jf;
131 + pc += (A > f_k) ? fentry->jt : fentry->jf;
133 case BPF_S_JMP_JGE_K:
134 - pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
135 + pc += (A >= f_k) ? fentry->jt : fentry->jf;
137 case BPF_S_JMP_JEQ_K:
138 - pc += (A == fentry->k) ? fentry->jt : fentry->jf;
139 + pc += (A == f_k) ? fentry->jt : fentry->jf;
141 case BPF_S_JMP_JSET_K:
142 - pc += (A & fentry->k) ? fentry->jt : fentry->jf;
143 + pc += (A & f_k) ? fentry->jt : fentry->jf;
145 case BPF_S_JMP_JGT_X:
146 pc += (A > X) ? fentry->jt : fentry->jf;
147 @@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buf
148 pc += (A & X) ? fentry->jt : fentry->jf;
154 ptr = load_pointer(skb, k, 4, &tmp);
156 @@ -218,7 +220,7 @@ load_w:
163 ptr = load_pointer(skb, k, 2, &tmp);
165 @@ -227,7 +229,7 @@ load_h:
172 ptr = load_pointer(skb, k, 1, &tmp);
174 @@ -242,32 +244,34 @@ load_b:
189 case BPF_S_LDX_B_MSH:
190 - ptr = load_pointer(skb, fentry->k, 1, &tmp);
191 + ptr = load_pointer(skb, f_k, 1, &tmp);
193 X = (*(u8 *)ptr & 0xf) << 2;
206 - A = mem[fentry->k];
207 + A = (memvalid & (1UL << f_k)) ?
211 - X = mem[fentry->k];
212 + X = (memvalid & (1UL << f_k)) ?
217 @@ -276,14 +280,16 @@ load_b:
226 - mem[fentry->k] = A;
227 + memvalid |= 1UL << f_k;
231 - mem[fentry->k] = X;
232 + memvalid |= 1UL << f_k;