]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From: Peter Zijlstra <a.p.zijlstra@chello.nl> |
2 | Subject: netvm: INET reserves. | |
3 | Patch-mainline: No | |
4 | References: FATE#303834 | |
5 | ||
6 | Add reserves for INET. | |
7 | ||
8 | The two big users seem to be the route cache and ip-fragment cache. | |
9 | ||
10 | Reserve the route cache under generic RX reserve, its usage is bounded by | |
11 | the high reclaim watermark, and thus does not need further accounting. | |
12 | ||
13 | Reserve the ip-fragement caches under SKB data reserve, these add to the | |
14 | SKB RX limit. By ensuring we can at least receive as much data as fits in | |
15 | the reassmbly line we avoid fragment attack deadlocks. | |
16 | ||
17 | Adds to the reserve tree: | |
18 | ||
19 | total network reserve | |
20 | network TX reserve | |
21 | protocol TX pages | |
22 | network RX reserve | |
23 | + IPv6 route cache | |
24 | + IPv4 route cache | |
25 | SKB data reserve | |
26 | + IPv6 fragment cache | |
27 | + IPv4 fragment cache | |
28 | ||
29 | Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> | |
30 | Acked-by: Neil Brown <neilb@suse.de> | |
31 | Acked-by: Suresh Jayaraman <sjayaraman@suse.de> | |
32 | ||
33 | --- | |
34 | include/net/inet_frag.h | 7 +++ | |
35 | include/net/netns/ipv6.h | 4 ++ | |
36 | net/ipv4/inet_fragment.c | 3 + | |
37 | net/ipv4/ip_fragment.c | 87 +++++++++++++++++++++++++++++++++++++++++++++-- | |
38 | net/ipv4/route.c | 71 +++++++++++++++++++++++++++++++++++++- | |
39 | net/ipv6/reassembly.c | 86 +++++++++++++++++++++++++++++++++++++++++++++- | |
40 | net/ipv6/route.c | 78 +++++++++++++++++++++++++++++++++++++++++- | |
41 | 7 files changed, 329 insertions(+), 7 deletions(-) | |
42 | ||
43 | Index: linux-2.6.26/net/ipv4/ip_fragment.c | |
44 | =================================================================== | |
45 | --- linux-2.6.26.orig/net/ipv4/ip_fragment.c | |
46 | +++ linux-2.6.26/net/ipv4/ip_fragment.c | |
47 | @@ -42,6 +42,8 @@ | |
48 | #include <linux/udp.h> | |
49 | #include <linux/inet.h> | |
50 | #include <linux/netfilter_ipv4.h> | |
51 | +#include <linux/reserve.h> | |
52 | +#include <linux/nsproxy.h> | |
53 | ||
54 | /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 | |
55 | * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c | |
56 | @@ -599,6 +601,64 @@ int ip_defrag(struct sk_buff *skb, u32 u | |
57 | } | |
58 | ||
59 | #ifdef CONFIG_SYSCTL | |
60 | +static int proc_dointvec_fragment(struct ctl_table *table, int write, | |
61 | + struct file *filp, void __user *buffer, size_t *lenp, | |
62 | + loff_t *ppos) | |
63 | +{ | |
64 | + struct net *net = container_of(table->data, struct net, | |
65 | + ipv4.frags.high_thresh); | |
66 | + ctl_table tmp = *table; | |
67 | + int new_bytes, ret; | |
68 | + | |
69 | + mutex_lock(&net->ipv4.frags.lock); | |
70 | + if (write) { | |
71 | + tmp.data = &new_bytes; | |
72 | + table = &tmp; | |
73 | + } | |
74 | + | |
75 | + ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); | |
76 | + | |
77 | + if (!ret && write) { | |
78 | + ret = mem_reserve_kmalloc_set(&net->ipv4.frags.reserve, | |
79 | + new_bytes); | |
80 | + if (!ret) | |
81 | + net->ipv4.frags.high_thresh = new_bytes; | |
82 | + } | |
83 | + mutex_unlock(&net->ipv4.frags.lock); | |
84 | + | |
85 | + return ret; | |
86 | +} | |
87 | + | |
88 | +static int sysctl_intvec_fragment(struct ctl_table *table, | |
89 | + int __user *name, int nlen, | |
90 | + void __user *oldval, size_t __user *oldlenp, | |
91 | + void __user *newval, size_t newlen) | |
92 | +{ | |
93 | + struct net *net = container_of(table->data, struct net, | |
94 | + ipv4.frags.high_thresh); | |
95 | + int write = (newval && newlen); | |
96 | + ctl_table tmp = *table; | |
97 | + int new_bytes, ret; | |
98 | + | |
99 | + mutex_lock(&net->ipv4.frags.lock); | |
100 | + if (write) { | |
101 | + tmp.data = &new_bytes; | |
102 | + table = &tmp; | |
103 | + } | |
104 | + | |
105 | + ret = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen); | |
106 | + | |
107 | + if (!ret && write) { | |
108 | + ret = mem_reserve_kmalloc_set(&net->ipv4.frags.reserve, | |
109 | + new_bytes); | |
110 | + if (!ret) | |
111 | + net->ipv4.frags.high_thresh = new_bytes; | |
112 | + } | |
113 | + mutex_unlock(&net->ipv4.frags.lock); | |
114 | + | |
115 | + return ret; | |
116 | +} | |
117 | + | |
118 | static int zero; | |
119 | ||
120 | static struct ctl_table ip4_frags_ns_ctl_table[] = { | |
121 | @@ -608,7 +668,8 @@ static struct ctl_table ip4_frags_ns_ctl | |
122 | .data = &init_net.ipv4.frags.high_thresh, | |
123 | .maxlen = sizeof(int), | |
124 | .mode = 0644, | |
125 | - .proc_handler = &proc_dointvec | |
126 | + .proc_handler = &proc_dointvec_fragment, | |
127 | + .strategy = &sysctl_intvec_fragment, | |
128 | }, | |
129 | { | |
130 | .ctl_name = NET_IPV4_IPFRAG_LOW_THRESH, | |
131 | @@ -711,6 +772,8 @@ static inline void ip4_frags_ctl_registe | |
132 | ||
133 | static int ipv4_frags_init_net(struct net *net) | |
134 | { | |
135 | + int ret; | |
136 | + | |
137 | /* | |
138 | * Fragment cache limits. We will commit 256K at one time. Should we | |
139 | * cross that limit we will prune down to 192K. This should cope with | |
140 | @@ -728,11 +791,31 @@ static int ipv4_frags_init_net(struct ne | |
141 | ||
142 | inet_frags_init_net(&net->ipv4.frags); | |
143 | ||
144 | - return ip4_frags_ns_ctl_register(net); | |
145 | + ret = ip4_frags_ns_ctl_register(net); | |
146 | + if (ret) | |
147 | + goto out_reg; | |
148 | + | |
149 | + mem_reserve_init(&net->ipv4.frags.reserve, "IPv4 fragment cache", | |
150 | + &net_skb_reserve); | |
151 | + ret = mem_reserve_kmalloc_set(&net->ipv4.frags.reserve, | |
152 | + net->ipv4.frags.high_thresh); | |
153 | + if (ret) | |
154 | + goto out_reserve; | |
155 | + | |
156 | + return 0; | |
157 | + | |
158 | +out_reserve: | |
159 | + mem_reserve_disconnect(&net->ipv4.frags.reserve); | |
160 | + ip4_frags_ns_ctl_unregister(net); | |
161 | +out_reg: | |
162 | + inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); | |
163 | + | |
164 | + return ret; | |
165 | } | |
166 | ||
167 | static void ipv4_frags_exit_net(struct net *net) | |
168 | { | |
169 | + mem_reserve_disconnect(&net->ipv4.frags.reserve); | |
170 | ip4_frags_ns_ctl_unregister(net); | |
171 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); | |
172 | } | |
173 | Index: linux-2.6.26/net/ipv6/reassembly.c | |
174 | =================================================================== | |
175 | --- linux-2.6.26.orig/net/ipv6/reassembly.c | |
176 | +++ linux-2.6.26/net/ipv6/reassembly.c | |
177 | @@ -41,6 +41,7 @@ | |
178 | #include <linux/random.h> | |
179 | #include <linux/jhash.h> | |
180 | #include <linux/skbuff.h> | |
181 | +#include <linux/reserve.h> | |
182 | ||
183 | #include <net/sock.h> | |
184 | #include <net/snmp.h> | |
185 | @@ -632,6 +633,64 @@ static struct inet6_protocol frag_protoc | |
186 | }; | |
187 | ||
188 | #ifdef CONFIG_SYSCTL | |
189 | +static int proc_dointvec_fragment(struct ctl_table *table, int write, | |
190 | + struct file *filp, void __user *buffer, size_t *lenp, | |
191 | + loff_t *ppos) | |
192 | +{ | |
193 | + struct net *net = container_of(table->data, struct net, | |
194 | + ipv6.frags.high_thresh); | |
195 | + ctl_table tmp = *table; | |
196 | + int new_bytes, ret; | |
197 | + | |
198 | + mutex_lock(&net->ipv6.frags.lock); | |
199 | + if (write) { | |
200 | + tmp.data = &new_bytes; | |
201 | + table = &tmp; | |
202 | + } | |
203 | + | |
204 | + ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); | |
205 | + | |
206 | + if (!ret && write) { | |
207 | + ret = mem_reserve_kmalloc_set(&net->ipv6.frags.reserve, | |
208 | + new_bytes); | |
209 | + if (!ret) | |
210 | + net->ipv6.frags.high_thresh = new_bytes; | |
211 | + } | |
212 | + mutex_unlock(&net->ipv6.frags.lock); | |
213 | + | |
214 | + return ret; | |
215 | +} | |
216 | + | |
217 | +static int sysctl_intvec_fragment(struct ctl_table *table, | |
218 | + int __user *name, int nlen, | |
219 | + void __user *oldval, size_t __user *oldlenp, | |
220 | + void __user *newval, size_t newlen) | |
221 | +{ | |
222 | + struct net *net = container_of(table->data, struct net, | |
223 | + ipv6.frags.high_thresh); | |
224 | + int write = (newval && newlen); | |
225 | + ctl_table tmp = *table; | |
226 | + int new_bytes, ret; | |
227 | + | |
228 | + mutex_lock(&net->ipv6.frags.lock); | |
229 | + if (write) { | |
230 | + tmp.data = &new_bytes; | |
231 | + table = &tmp; | |
232 | + } | |
233 | + | |
234 | + ret = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen); | |
235 | + | |
236 | + if (!ret && write) { | |
237 | + ret = mem_reserve_kmalloc_set(&net->ipv6.frags.reserve, | |
238 | + new_bytes); | |
239 | + if (!ret) | |
240 | + net->ipv6.frags.high_thresh = new_bytes; | |
241 | + } | |
242 | + mutex_unlock(&net->ipv6.frags.lock); | |
243 | + | |
244 | + return ret; | |
245 | +} | |
246 | + | |
247 | static struct ctl_table ip6_frags_ns_ctl_table[] = { | |
248 | { | |
249 | .ctl_name = NET_IPV6_IP6FRAG_HIGH_THRESH, | |
250 | @@ -639,7 +698,8 @@ static struct ctl_table ip6_frags_ns_ctl | |
251 | .data = &init_net.ipv6.frags.high_thresh, | |
252 | .maxlen = sizeof(int), | |
253 | .mode = 0644, | |
254 | - .proc_handler = &proc_dointvec | |
255 | + .proc_handler = &proc_dointvec_fragment, | |
256 | + .strategy = &sysctl_intvec_fragment, | |
257 | }, | |
258 | { | |
259 | .ctl_name = NET_IPV6_IP6FRAG_LOW_THRESH, | |
260 | @@ -748,17 +808,39 @@ static inline void ip6_frags_sysctl_unre | |
261 | ||
262 | static int ipv6_frags_init_net(struct net *net) | |
263 | { | |
264 | + int ret; | |
265 | + | |
266 | net->ipv6.frags.high_thresh = 256 * 1024; | |
267 | net->ipv6.frags.low_thresh = 192 * 1024; | |
268 | net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; | |
269 | ||
270 | inet_frags_init_net(&net->ipv6.frags); | |
271 | ||
272 | - return ip6_frags_ns_sysctl_register(net); | |
273 | + ret = ip6_frags_ns_sysctl_register(net); | |
274 | + if (ret) | |
275 | + goto out_reg; | |
276 | + | |
277 | + mem_reserve_init(&net->ipv6.frags.reserve, "IPv6 fragment cache", | |
278 | + &net_skb_reserve); | |
279 | + ret = mem_reserve_kmalloc_set(&net->ipv6.frags.reserve, | |
280 | + net->ipv6.frags.high_thresh); | |
281 | + if (ret) | |
282 | + goto out_reserve; | |
283 | + | |
284 | + return 0; | |
285 | + | |
286 | +out_reserve: | |
287 | + mem_reserve_disconnect(&net->ipv6.frags.reserve); | |
288 | + ip6_frags_ns_sysctl_unregister(net); | |
289 | +out_reg: | |
290 | + inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); | |
291 | + | |
292 | + return ret; | |
293 | } | |
294 | ||
295 | static void ipv6_frags_exit_net(struct net *net) | |
296 | { | |
297 | + mem_reserve_disconnect(&net->ipv6.frags.reserve); | |
298 | ip6_frags_ns_sysctl_unregister(net); | |
299 | inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); | |
300 | } | |
301 | Index: linux-2.6.26/net/ipv4/route.c | |
302 | =================================================================== | |
303 | --- linux-2.6.26.orig/net/ipv4/route.c | |
304 | +++ linux-2.6.26/net/ipv4/route.c | |
305 | @@ -107,6 +107,7 @@ | |
306 | #ifdef CONFIG_SYSCTL | |
307 | #include <linux/sysctl.h> | |
308 | #endif | |
309 | +#include <linux/reserve.h> | |
310 | ||
311 | #define RT_FL_TOS(oldflp) \ | |
312 | ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK))) | |
313 | @@ -269,6 +270,8 @@ static inline int rt_genid(struct net *n | |
314 | return atomic_read(&net->ipv4.rt_genid); | |
315 | } | |
316 | ||
317 | +static struct mem_reserve ipv4_route_reserve; | |
318 | + | |
319 | #ifdef CONFIG_PROC_FS | |
320 | struct rt_cache_iter_state { | |
321 | struct seq_net_private p; | |
322 | @@ -393,6 +396,62 @@ static int rt_cache_seq_show(struct seq_ | |
323 | return 0; | |
324 | } | |
325 | ||
326 | +static struct mutex ipv4_route_lock; | |
327 | + | |
328 | +static int proc_dointvec_route(struct ctl_table *table, int write, | |
329 | + struct file *filp, void __user *buffer, size_t *lenp, | |
330 | + loff_t *ppos) | |
331 | +{ | |
332 | + ctl_table tmp = *table; | |
333 | + int new_size, ret; | |
334 | + | |
335 | + mutex_lock(&ipv4_route_lock); | |
336 | + if (write) { | |
337 | + tmp.data = &new_size; | |
338 | + table = &tmp; | |
339 | + } | |
340 | + | |
341 | + ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); | |
342 | + | |
343 | + if (!ret && write) { | |
344 | + ret = mem_reserve_kmem_cache_set(&ipv4_route_reserve, | |
345 | + ipv4_dst_ops.kmem_cachep, new_size); | |
346 | + if (!ret) | |
347 | + ip_rt_max_size = new_size; | |
348 | + } | |
349 | + mutex_unlock(&ipv4_route_lock); | |
350 | + | |
351 | + return ret; | |
352 | +} | |
353 | + | |
354 | +static int sysctl_intvec_route(struct ctl_table *table, | |
355 | + int __user *name, int nlen, | |
356 | + void __user *oldval, size_t __user *oldlenp, | |
357 | + void __user *newval, size_t newlen) | |
358 | +{ | |
359 | + int write = (newval && newlen); | |
360 | + ctl_table tmp = *table; | |
361 | + int new_size, ret; | |
362 | + | |
363 | + mutex_lock(&ipv4_route_lock); | |
364 | + if (write) { | |
365 | + tmp.data = &new_size; | |
366 | + table = &tmp; | |
367 | + } | |
368 | + | |
369 | + ret = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen); | |
370 | + | |
371 | + if (!ret && write) { | |
372 | + ret = mem_reserve_kmem_cache_set(&ipv4_route_reserve, | |
373 | + ipv4_dst_ops.kmem_cachep, new_size); | |
374 | + if (!ret) | |
375 | + ip_rt_max_size = new_size; | |
376 | + } | |
377 | + mutex_unlock(&ipv4_route_lock); | |
378 | + | |
379 | + return ret; | |
380 | +} | |
381 | + | |
382 | static const struct seq_operations rt_cache_seq_ops = { | |
383 | .start = rt_cache_seq_start, | |
384 | .next = rt_cache_seq_next, | |
385 | @@ -2991,7 +3050,8 @@ static ctl_table ipv4_route_table[] = { | |
386 | .data = &ip_rt_max_size, | |
387 | .maxlen = sizeof(int), | |
388 | .mode = 0644, | |
389 | - .proc_handler = &proc_dointvec, | |
390 | + .proc_handler = &proc_dointvec_route, | |
391 | + .strategy = &sysctl_intvec_route, | |
392 | }, | |
393 | { | |
394 | /* Deprecated. Use gc_min_interval_ms */ | |
395 | @@ -3270,6 +3330,15 @@ int __init ip_rt_init(void) | |
396 | ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1); | |
397 | ip_rt_max_size = (rt_hash_mask + 1) * 16; | |
398 | ||
399 | +#ifdef CONFIG_PROCFS | |
400 | + mutex_init(&ipv4_route_lock); | |
401 | +#endif | |
402 | + | |
403 | + mem_reserve_init(&ipv4_route_reserve, "IPv4 route cache", | |
404 | + &net_rx_reserve); | |
405 | + mem_reserve_kmem_cache_set(&ipv4_route_reserve, | |
406 | + ipv4_dst_ops.kmem_cachep, ip_rt_max_size); | |
407 | + | |
408 | devinet_init(); | |
409 | ip_fib_init(); | |
410 | ||
411 | Index: linux-2.6.26/net/ipv6/route.c | |
412 | =================================================================== | |
413 | --- linux-2.6.26.orig/net/ipv6/route.c | |
414 | +++ linux-2.6.26/net/ipv6/route.c | |
415 | @@ -37,6 +37,7 @@ | |
416 | #include <linux/mroute6.h> | |
417 | #include <linux/init.h> | |
418 | #include <linux/if_arp.h> | |
419 | +#include <linux/reserve.h> | |
420 | #include <linux/proc_fs.h> | |
421 | #include <linux/seq_file.h> | |
422 | #include <linux/nsproxy.h> | |
423 | @@ -2473,6 +2474,64 @@ int ipv6_sysctl_rtcache_flush(ctl_table | |
424 | return -EINVAL; | |
425 | } | |
426 | ||
427 | +static int proc_dointvec_route(struct ctl_table *table, int write, | |
428 | + struct file *filp, void __user *buffer, size_t *lenp, | |
429 | + loff_t *ppos) | |
430 | +{ | |
431 | + struct net *net = container_of(table->data, struct net, | |
432 | + ipv6.sysctl.ip6_rt_max_size); | |
433 | + ctl_table tmp = *table; | |
434 | + int new_size, ret; | |
435 | + | |
436 | + mutex_lock(&net->ipv6.sysctl.ip6_rt_lock); | |
437 | + if (write) { | |
438 | + tmp.data = &new_size; | |
439 | + table = &tmp; | |
440 | + } | |
441 | + | |
442 | + ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); | |
443 | + | |
444 | + if (!ret && write) { | |
445 | + ret = mem_reserve_kmem_cache_set(&net->ipv6.ip6_rt_reserve, | |
446 | + net->ipv6.ip6_dst_ops->kmem_cachep, new_size); | |
447 | + if (!ret) | |
448 | + net->ipv6.sysctl.ip6_rt_max_size = new_size; | |
449 | + } | |
450 | + mutex_unlock(&net->ipv6.sysctl.ip6_rt_lock); | |
451 | + | |
452 | + return ret; | |
453 | +} | |
454 | + | |
455 | +static int sysctl_intvec_route(struct ctl_table *table, | |
456 | + int __user *name, int nlen, | |
457 | + void __user *oldval, size_t __user *oldlenp, | |
458 | + void __user *newval, size_t newlen) | |
459 | +{ | |
460 | + struct net *net = container_of(table->data, struct net, | |
461 | + ipv6.sysctl.ip6_rt_max_size); | |
462 | + int write = (newval && newlen); | |
463 | + ctl_table tmp = *table; | |
464 | + int new_size, ret; | |
465 | + | |
466 | + mutex_lock(&net->ipv6.sysctl.ip6_rt_lock); | |
467 | + if (write) { | |
468 | + tmp.data = &new_size; | |
469 | + table = &tmp; | |
470 | + } | |
471 | + | |
472 | + ret = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen); | |
473 | + | |
474 | + if (!ret && write) { | |
475 | + ret = mem_reserve_kmem_cache_set(&net->ipv6.ip6_rt_reserve, | |
476 | + net->ipv6.ip6_dst_ops->kmem_cachep, new_size); | |
477 | + if (!ret) | |
478 | + net->ipv6.sysctl.ip6_rt_max_size = new_size; | |
479 | + } | |
480 | + mutex_unlock(&net->ipv6.sysctl.ip6_rt_lock); | |
481 | + | |
482 | + return ret; | |
483 | +} | |
484 | + | |
485 | ctl_table ipv6_route_table_template[] = { | |
486 | { | |
487 | .procname = "flush", | |
488 | @@ -2495,7 +2554,8 @@ ctl_table ipv6_route_table_template[] = | |
489 | .data = &init_net.ipv6.sysctl.ip6_rt_max_size, | |
490 | .maxlen = sizeof(int), | |
491 | .mode = 0644, | |
492 | - .proc_handler = &proc_dointvec, | |
493 | + .proc_handler = &proc_dointvec_route, | |
494 | + .strategy = &sysctl_intvec_route, | |
495 | }, | |
496 | { | |
497 | .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL, | |
498 | @@ -2583,6 +2643,8 @@ struct ctl_table *ipv6_route_sysctl_init | |
499 | table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; | |
500 | } | |
501 | ||
502 | + mutex_init(&net->ipv6.sysctl.ip6_rt_lock); | |
503 | + | |
504 | return table; | |
505 | } | |
506 | #endif | |
507 | @@ -2636,6 +2698,14 @@ static int ip6_route_net_init(struct net | |
508 | net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; | |
509 | net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; | |
510 | ||
511 | + mem_reserve_init(&net->ipv6.ip6_rt_reserve, "IPv6 route cache", | |
512 | + &net_rx_reserve); | |
513 | + ret = mem_reserve_kmem_cache_set(&net->ipv6.ip6_rt_reserve, | |
514 | + net->ipv6.ip6_dst_ops->kmem_cachep, | |
515 | + net->ipv6.sysctl.ip6_rt_max_size); | |
516 | + if (ret) | |
517 | + goto out_reserve_fail; | |
518 | + | |
519 | #ifdef CONFIG_PROC_FS | |
520 | proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops); | |
521 | proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); | |
522 | @@ -2646,12 +2716,15 @@ static int ip6_route_net_init(struct net | |
523 | out: | |
524 | return ret; | |
525 | ||
526 | +out_reserve_fail: | |
527 | + mem_reserve_disconnect(&net->ipv6.ip6_rt_reserve); | |
528 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | |
529 | + kfree(net->ipv6.ip6_blk_hole_entry); | |
530 | out_ip6_prohibit_entry: | |
531 | kfree(net->ipv6.ip6_prohibit_entry); | |
532 | out_ip6_null_entry: | |
533 | - kfree(net->ipv6.ip6_null_entry); | |
534 | #endif | |
535 | + kfree(net->ipv6.ip6_null_entry); | |
536 | out_ip6_dst_ops: | |
537 | release_net(net->ipv6.ip6_dst_ops->dst_net); | |
538 | kfree(net->ipv6.ip6_dst_ops); | |
539 | @@ -2664,6 +2737,7 @@ static void ip6_route_net_exit(struct ne | |
540 | proc_net_remove(net, "ipv6_route"); | |
541 | proc_net_remove(net, "rt6_stats"); | |
542 | #endif | |
543 | + mem_reserve_disconnect(&net->ipv6.ip6_rt_reserve); | |
544 | kfree(net->ipv6.ip6_null_entry); | |
545 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | |
546 | kfree(net->ipv6.ip6_prohibit_entry); | |
547 | Index: linux-2.6.26/include/net/inet_frag.h | |
548 | =================================================================== | |
549 | --- linux-2.6.26.orig/include/net/inet_frag.h | |
550 | +++ linux-2.6.26/include/net/inet_frag.h | |
551 | @@ -1,6 +1,9 @@ | |
552 | #ifndef __NET_FRAG_H__ | |
553 | #define __NET_FRAG_H__ | |
554 | ||
555 | +#include <linux/reserve.h> | |
556 | +#include <linux/mutex.h> | |
557 | + | |
558 | struct netns_frags { | |
559 | int nqueues; | |
560 | atomic_t mem; | |
561 | @@ -10,6 +13,10 @@ struct netns_frags { | |
562 | int timeout; | |
563 | int high_thresh; | |
564 | int low_thresh; | |
565 | + | |
566 | + /* reserves */ | |
567 | + struct mutex lock; | |
568 | + struct mem_reserve reserve; | |
569 | }; | |
570 | ||
571 | struct inet_frag_queue { | |
572 | Index: linux-2.6.26/net/ipv4/inet_fragment.c | |
573 | =================================================================== | |
574 | --- linux-2.6.26.orig/net/ipv4/inet_fragment.c | |
575 | +++ linux-2.6.26/net/ipv4/inet_fragment.c | |
576 | @@ -19,6 +19,7 @@ | |
577 | #include <linux/random.h> | |
578 | #include <linux/skbuff.h> | |
579 | #include <linux/rtnetlink.h> | |
580 | +#include <linux/reserve.h> | |
581 | ||
582 | #include <net/inet_frag.h> | |
583 | ||
584 | @@ -74,6 +75,8 @@ void inet_frags_init_net(struct netns_fr | |
585 | nf->nqueues = 0; | |
586 | atomic_set(&nf->mem, 0); | |
587 | INIT_LIST_HEAD(&nf->lru_list); | |
588 | + mutex_init(&nf->lock); | |
589 | + mem_reserve_init(&nf->reserve, "IP fragement cache", NULL); | |
590 | } | |
591 | EXPORT_SYMBOL(inet_frags_init_net); | |
592 | ||
593 | Index: linux-2.6.26/include/net/netns/ipv6.h | |
594 | =================================================================== | |
595 | --- linux-2.6.26.orig/include/net/netns/ipv6.h | |
596 | +++ linux-2.6.26/include/net/netns/ipv6.h | |
597 | @@ -24,6 +24,8 @@ struct netns_sysctl_ipv6 { | |
598 | int ip6_rt_mtu_expires; | |
599 | int ip6_rt_min_advmss; | |
600 | int icmpv6_time; | |
601 | + | |
602 | + struct mutex ip6_rt_lock; | |
603 | }; | |
604 | ||
605 | struct netns_ipv6 { | |
606 | @@ -55,5 +57,7 @@ struct netns_ipv6 { | |
607 | struct sock *ndisc_sk; | |
608 | struct sock *tcp_sk; | |
609 | struct sock *igmp_sk; | |
610 | + | |
611 | + struct mem_reserve ip6_rt_reserve; | |
612 | }; | |
613 | #endif |