]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/ipv4/inet_fragment.c
hlist: drop the node parameter from iterators
[thirdparty/kernel/stable.git] / net / ipv4 / inet_fragment.c
1 /*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23
24 #include <net/inet_frag.h>
25
26 static void inet_frag_secret_rebuild(unsigned long dummy)
27 {
28 struct inet_frags *f = (struct inet_frags *)dummy;
29 unsigned long now = jiffies;
30 int i;
31
32 write_lock(&f->lock);
33 get_random_bytes(&f->rnd, sizeof(u32));
34 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
35 struct inet_frag_queue *q;
36 struct hlist_node *n;
37
38 hlist_for_each_entry_safe(q, n, &f->hash[i], list) {
39 unsigned int hval = f->hashfn(q);
40
41 if (hval != i) {
42 hlist_del(&q->list);
43
44 /* Relink to new hash chain. */
45 hlist_add_head(&q->list, &f->hash[hval]);
46 }
47 }
48 }
49 write_unlock(&f->lock);
50
51 mod_timer(&f->secret_timer, now + f->secret_interval);
52 }
53
54 void inet_frags_init(struct inet_frags *f)
55 {
56 int i;
57
58 for (i = 0; i < INETFRAGS_HASHSZ; i++)
59 INIT_HLIST_HEAD(&f->hash[i]);
60
61 rwlock_init(&f->lock);
62
63 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
64 (jiffies ^ (jiffies >> 6)));
65
66 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
67 (unsigned long)f);
68 f->secret_timer.expires = jiffies + f->secret_interval;
69 add_timer(&f->secret_timer);
70 }
71 EXPORT_SYMBOL(inet_frags_init);
72
73 void inet_frags_init_net(struct netns_frags *nf)
74 {
75 nf->nqueues = 0;
76 init_frag_mem_limit(nf);
77 INIT_LIST_HEAD(&nf->lru_list);
78 spin_lock_init(&nf->lru_lock);
79 }
80 EXPORT_SYMBOL(inet_frags_init_net);
81
82 void inet_frags_fini(struct inet_frags *f)
83 {
84 del_timer(&f->secret_timer);
85 }
86 EXPORT_SYMBOL(inet_frags_fini);
87
88 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
89 {
90 nf->low_thresh = 0;
91
92 local_bh_disable();
93 inet_frag_evictor(nf, f, true);
94 local_bh_enable();
95
96 percpu_counter_destroy(&nf->mem);
97 }
98 EXPORT_SYMBOL(inet_frags_exit_net);
99
100 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
101 {
102 write_lock(&f->lock);
103 hlist_del(&fq->list);
104 fq->net->nqueues--;
105 write_unlock(&f->lock);
106 inet_frag_lru_del(fq);
107 }
108
109 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
110 {
111 if (del_timer(&fq->timer))
112 atomic_dec(&fq->refcnt);
113
114 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
115 fq_unlink(fq, f);
116 atomic_dec(&fq->refcnt);
117 fq->last_in |= INET_FRAG_COMPLETE;
118 }
119 }
120 EXPORT_SYMBOL(inet_frag_kill);
121
122 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
123 struct sk_buff *skb)
124 {
125 if (f->skb_free)
126 f->skb_free(skb);
127 kfree_skb(skb);
128 }
129
130 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
131 int *work)
132 {
133 struct sk_buff *fp;
134 struct netns_frags *nf;
135 unsigned int sum, sum_truesize = 0;
136
137 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
138 WARN_ON(del_timer(&q->timer) != 0);
139
140 /* Release all fragment data. */
141 fp = q->fragments;
142 nf = q->net;
143 while (fp) {
144 struct sk_buff *xp = fp->next;
145
146 sum_truesize += fp->truesize;
147 frag_kfree_skb(nf, f, fp);
148 fp = xp;
149 }
150 sum = sum_truesize + f->qsize;
151 if (work)
152 *work -= sum;
153 sub_frag_mem_limit(q, sum);
154
155 if (f->destructor)
156 f->destructor(q);
157 kfree(q);
158
159 }
160 EXPORT_SYMBOL(inet_frag_destroy);
161
162 int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
163 {
164 struct inet_frag_queue *q;
165 int work, evicted = 0;
166
167 if (!force) {
168 if (frag_mem_limit(nf) <= nf->high_thresh)
169 return 0;
170 }
171
172 work = frag_mem_limit(nf) - nf->low_thresh;
173 while (work > 0) {
174 spin_lock(&nf->lru_lock);
175
176 if (list_empty(&nf->lru_list)) {
177 spin_unlock(&nf->lru_lock);
178 break;
179 }
180
181 q = list_first_entry(&nf->lru_list,
182 struct inet_frag_queue, lru_list);
183 atomic_inc(&q->refcnt);
184 spin_unlock(&nf->lru_lock);
185
186 spin_lock(&q->lock);
187 if (!(q->last_in & INET_FRAG_COMPLETE))
188 inet_frag_kill(q, f);
189 spin_unlock(&q->lock);
190
191 if (atomic_dec_and_test(&q->refcnt))
192 inet_frag_destroy(q, f, &work);
193 evicted++;
194 }
195
196 return evicted;
197 }
198 EXPORT_SYMBOL(inet_frag_evictor);
199
200 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
201 struct inet_frag_queue *qp_in, struct inet_frags *f,
202 void *arg)
203 {
204 struct inet_frag_queue *qp;
205 #ifdef CONFIG_SMP
206 #endif
207 unsigned int hash;
208
209 write_lock(&f->lock);
210 /*
211 * While we stayed w/o the lock other CPU could update
212 * the rnd seed, so we need to re-calculate the hash
213 * chain. Fortunatelly the qp_in can be used to get one.
214 */
215 hash = f->hashfn(qp_in);
216 #ifdef CONFIG_SMP
217 /* With SMP race we have to recheck hash table, because
218 * such entry could be created on other cpu, while we
219 * promoted read lock to write lock.
220 */
221 hlist_for_each_entry(qp, &f->hash[hash], list) {
222 if (qp->net == nf && f->match(qp, arg)) {
223 atomic_inc(&qp->refcnt);
224 write_unlock(&f->lock);
225 qp_in->last_in |= INET_FRAG_COMPLETE;
226 inet_frag_put(qp_in, f);
227 return qp;
228 }
229 }
230 #endif
231 qp = qp_in;
232 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
233 atomic_inc(&qp->refcnt);
234
235 atomic_inc(&qp->refcnt);
236 hlist_add_head(&qp->list, &f->hash[hash]);
237 nf->nqueues++;
238 write_unlock(&f->lock);
239 inet_frag_lru_add(nf, qp);
240 return qp;
241 }
242
243 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
244 struct inet_frags *f, void *arg)
245 {
246 struct inet_frag_queue *q;
247
248 q = kzalloc(f->qsize, GFP_ATOMIC);
249 if (q == NULL)
250 return NULL;
251
252 q->net = nf;
253 f->constructor(q, arg);
254 add_frag_mem_limit(q, f->qsize);
255
256 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
257 spin_lock_init(&q->lock);
258 atomic_set(&q->refcnt, 1);
259
260 return q;
261 }
262
263 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
264 struct inet_frags *f, void *arg)
265 {
266 struct inet_frag_queue *q;
267
268 q = inet_frag_alloc(nf, f, arg);
269 if (q == NULL)
270 return NULL;
271
272 return inet_frag_intern(nf, q, f, arg);
273 }
274
275 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
276 struct inet_frags *f, void *key, unsigned int hash)
277 __releases(&f->lock)
278 {
279 struct inet_frag_queue *q;
280
281 hlist_for_each_entry(q, &f->hash[hash], list) {
282 if (q->net == nf && f->match(q, key)) {
283 atomic_inc(&q->refcnt);
284 read_unlock(&f->lock);
285 return q;
286 }
287 }
288 read_unlock(&f->lock);
289
290 return inet_frag_create(nf, f, key);
291 }
292 EXPORT_SYMBOL(inet_frag_find);