]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/2.6.36.2/filter-make-sure-filters-dont-read-uninitialized-memory.patch
Remove duplicated commits
[thirdparty/kernel/stable-queue.git] / releases / 2.6.36.2 / filter-make-sure-filters-dont-read-uninitialized-memory.patch
1 From 57fe93b374a6b8711995c2d466c502af9f3a08bb Mon Sep 17 00:00:00 2001
2 From: David S. Miller <davem@davemloft.net>
3 Date: Wed, 10 Nov 2010 10:38:24 -0800
4 Subject: filter: make sure filters dont read uninitialized memory
5
6 From: David S. Miller <davem@davemloft.net>
7
8 commit 57fe93b374a6b8711995c2d466c502af9f3a08bb upstream.
9
10 There is a possibility malicious users can get limited information about
11 uninitialized stack mem array. Even if sk_run_filter() result is bound
12 to packet length (0 .. 65535), we could imagine this can be used by
13 hostile user.
14
15 Initializing mem[] array, like Dan Rosenberg suggested in his patch is
16 expensive since most filters dont even use this array.
17
18 Its hard to make the filter validation in sk_chk_filter(), because of
19 the jumps. This might be done later.
20
21 In this patch, I use a bitmap (a single long var) so that only filters
22 using mem[] loads/stores pay the price of added security checks.
23
24 For other filters, additional cost is a single instruction.
25
26 [ Since we access fentry->k a lot now, cache it in a local variable
27 and mark filter entry pointer as const. -DaveM ]
28
29 Reported-by: Dan Rosenberg <drosenberg@vsecurity.com>
30 Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
31 Signed-off-by: David S. Miller <davem@davemloft.net>
32 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
33
34 ---
35 net/core/filter.c | 64 +++++++++++++++++++++++++++++-------------------------
36 1 file changed, 35 insertions(+), 29 deletions(-)
37
38 --- a/net/core/filter.c
39 +++ b/net/core/filter.c
40 @@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter);
41 */
42 unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
43 {
44 - struct sock_filter *fentry; /* We walk down these */
45 void *ptr;
46 u32 A = 0; /* Accumulator */
47 u32 X = 0; /* Index Register */
48 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
49 + unsigned long memvalid = 0;
50 u32 tmp;
51 int k;
52 int pc;
53
54 + BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
55 /*
56 * Process array of filter instructions.
57 */
58 for (pc = 0; pc < flen; pc++) {
59 - fentry = &filter[pc];
60 + const struct sock_filter *fentry = &filter[pc];
61 + u32 f_k = fentry->k;
62
63 switch (fentry->code) {
64 case BPF_S_ALU_ADD_X:
65 A += X;
66 continue;
67 case BPF_S_ALU_ADD_K:
68 - A += fentry->k;
69 + A += f_k;
70 continue;
71 case BPF_S_ALU_SUB_X:
72 A -= X;
73 continue;
74 case BPF_S_ALU_SUB_K:
75 - A -= fentry->k;
76 + A -= f_k;
77 continue;
78 case BPF_S_ALU_MUL_X:
79 A *= X;
80 continue;
81 case BPF_S_ALU_MUL_K:
82 - A *= fentry->k;
83 + A *= f_k;
84 continue;
85 case BPF_S_ALU_DIV_X:
86 if (X == 0)
87 @@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buf
88 A /= X;
89 continue;
90 case BPF_S_ALU_DIV_K:
91 - A /= fentry->k;
92 + A /= f_k;
93 continue;
94 case BPF_S_ALU_AND_X:
95 A &= X;
96 continue;
97 case BPF_S_ALU_AND_K:
98 - A &= fentry->k;
99 + A &= f_k;
100 continue;
101 case BPF_S_ALU_OR_X:
102 A |= X;
103 continue;
104 case BPF_S_ALU_OR_K:
105 - A |= fentry->k;
106 + A |= f_k;
107 continue;
108 case BPF_S_ALU_LSH_X:
109 A <<= X;
110 continue;
111 case BPF_S_ALU_LSH_K:
112 - A <<= fentry->k;
113 + A <<= f_k;
114 continue;
115 case BPF_S_ALU_RSH_X:
116 A >>= X;
117 continue;
118 case BPF_S_ALU_RSH_K:
119 - A >>= fentry->k;
120 + A >>= f_k;
121 continue;
122 case BPF_S_ALU_NEG:
123 A = -A;
124 continue;
125 case BPF_S_JMP_JA:
126 - pc += fentry->k;
127 + pc += f_k;
128 continue;
129 case BPF_S_JMP_JGT_K:
130 - pc += (A > fentry->k) ? fentry->jt : fentry->jf;
131 + pc += (A > f_k) ? fentry->jt : fentry->jf;
132 continue;
133 case BPF_S_JMP_JGE_K:
134 - pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
135 + pc += (A >= f_k) ? fentry->jt : fentry->jf;
136 continue;
137 case BPF_S_JMP_JEQ_K:
138 - pc += (A == fentry->k) ? fentry->jt : fentry->jf;
139 + pc += (A == f_k) ? fentry->jt : fentry->jf;
140 continue;
141 case BPF_S_JMP_JSET_K:
142 - pc += (A & fentry->k) ? fentry->jt : fentry->jf;
143 + pc += (A & f_k) ? fentry->jt : fentry->jf;
144 continue;
145 case BPF_S_JMP_JGT_X:
146 pc += (A > X) ? fentry->jt : fentry->jf;
147 @@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buf
148 pc += (A & X) ? fentry->jt : fentry->jf;
149 continue;
150 case BPF_S_LD_W_ABS:
151 - k = fentry->k;
152 + k = f_k;
153 load_w:
154 ptr = load_pointer(skb, k, 4, &tmp);
155 if (ptr != NULL) {
156 @@ -218,7 +220,7 @@ load_w:
157 }
158 break;
159 case BPF_S_LD_H_ABS:
160 - k = fentry->k;
161 + k = f_k;
162 load_h:
163 ptr = load_pointer(skb, k, 2, &tmp);
164 if (ptr != NULL) {
165 @@ -227,7 +229,7 @@ load_h:
166 }
167 break;
168 case BPF_S_LD_B_ABS:
169 - k = fentry->k;
170 + k = f_k;
171 load_b:
172 ptr = load_pointer(skb, k, 1, &tmp);
173 if (ptr != NULL) {
174 @@ -242,32 +244,34 @@ load_b:
175 X = skb->len;
176 continue;
177 case BPF_S_LD_W_IND:
178 - k = X + fentry->k;
179 + k = X + f_k;
180 goto load_w;
181 case BPF_S_LD_H_IND:
182 - k = X + fentry->k;
183 + k = X + f_k;
184 goto load_h;
185 case BPF_S_LD_B_IND:
186 - k = X + fentry->k;
187 + k = X + f_k;
188 goto load_b;
189 case BPF_S_LDX_B_MSH:
190 - ptr = load_pointer(skb, fentry->k, 1, &tmp);
191 + ptr = load_pointer(skb, f_k, 1, &tmp);
192 if (ptr != NULL) {
193 X = (*(u8 *)ptr & 0xf) << 2;
194 continue;
195 }
196 return 0;
197 case BPF_S_LD_IMM:
198 - A = fentry->k;
199 + A = f_k;
200 continue;
201 case BPF_S_LDX_IMM:
202 - X = fentry->k;
203 + X = f_k;
204 continue;
205 case BPF_S_LD_MEM:
206 - A = mem[fentry->k];
207 + A = (memvalid & (1UL << f_k)) ?
208 + mem[f_k] : 0;
209 continue;
210 case BPF_S_LDX_MEM:
211 - X = mem[fentry->k];
212 + X = (memvalid & (1UL << f_k)) ?
213 + mem[f_k] : 0;
214 continue;
215 case BPF_S_MISC_TAX:
216 X = A;
217 @@ -276,14 +280,16 @@ load_b:
218 A = X;
219 continue;
220 case BPF_S_RET_K:
221 - return fentry->k;
222 + return f_k;
223 case BPF_S_RET_A:
224 return A;
225 case BPF_S_ST:
226 - mem[fentry->k] = A;
227 + memvalid |= 1UL << f_k;
228 + mem[f_k] = A;
229 continue;
230 case BPF_S_STX:
231 - mem[fentry->k] = X;
232 + memvalid |= 1UL << f_k;
233 + mem[f_k] = X;
234 continue;
235 default:
236 WARN_ON(1);