]> git.ipfire.org Git - people/arne_f/kernel.git/blob - net/xfrm/xfrm_policy.c
xfrm: fix a warning in xfrm_policy_insert_list
[people/arne_f/kernel.git] / net / xfrm / xfrm_policy.c
1 /*
2 * xfrm_policy.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * Kazunori MIYAZAWA @USAGI
10 * YOSHIFUJI Hideaki
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
13 *
14 */
15
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/cpu.h>
28 #include <linux/audit.h>
29 #include <net/dst.h>
30 #include <net/flow.h>
31 #include <net/xfrm.h>
32 #include <net/ip.h>
33 #ifdef CONFIG_XFRM_STATISTICS
34 #include <net/snmp.h>
35 #endif
36
37 #include "xfrm_hash.h"
38
39 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
40 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
41 #define XFRM_MAX_QUEUE_LEN 100
42
43 struct xfrm_flo {
44 struct dst_entry *dst_orig;
45 u8 flags;
46 };
47
48 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
49 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
50 __read_mostly;
51
52 static struct kmem_cache *xfrm_dst_cache __read_mostly;
53 static __read_mostly seqcount_t xfrm_policy_hash_generation;
54
55 static void xfrm_init_pmtu(struct dst_entry *dst);
56 static int stale_bundle(struct dst_entry *dst);
57 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
58 static void xfrm_policy_queue_process(unsigned long arg);
59
60 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
61 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
62 int dir);
63
64 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
65 {
66 return refcount_inc_not_zero(&policy->refcnt);
67 }
68
69 static inline bool
70 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
71 {
72 const struct flowi4 *fl4 = &fl->u.ip4;
73
74 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
75 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
76 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
77 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
78 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
79 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
80 }
81
82 static inline bool
83 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
84 {
85 const struct flowi6 *fl6 = &fl->u.ip6;
86
87 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
88 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
89 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
90 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
91 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
92 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
93 }
94
95 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
96 unsigned short family)
97 {
98 switch (family) {
99 case AF_INET:
100 return __xfrm4_selector_match(sel, fl);
101 case AF_INET6:
102 return __xfrm6_selector_match(sel, fl);
103 }
104 return false;
105 }
106
107 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
108 {
109 const struct xfrm_policy_afinfo *afinfo;
110
111 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
112 return NULL;
113 rcu_read_lock();
114 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
115 if (unlikely(!afinfo))
116 rcu_read_unlock();
117 return afinfo;
118 }
119
120 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
121 const xfrm_address_t *saddr,
122 const xfrm_address_t *daddr,
123 int family, u32 mark)
124 {
125 const struct xfrm_policy_afinfo *afinfo;
126 struct dst_entry *dst;
127
128 afinfo = xfrm_policy_get_afinfo(family);
129 if (unlikely(afinfo == NULL))
130 return ERR_PTR(-EAFNOSUPPORT);
131
132 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
133
134 rcu_read_unlock();
135
136 return dst;
137 }
138 EXPORT_SYMBOL(__xfrm_dst_lookup);
139
140 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
141 int tos, int oif,
142 xfrm_address_t *prev_saddr,
143 xfrm_address_t *prev_daddr,
144 int family, u32 mark)
145 {
146 struct net *net = xs_net(x);
147 xfrm_address_t *saddr = &x->props.saddr;
148 xfrm_address_t *daddr = &x->id.daddr;
149 struct dst_entry *dst;
150
151 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
152 saddr = x->coaddr;
153 daddr = prev_daddr;
154 }
155 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
156 saddr = prev_saddr;
157 daddr = x->coaddr;
158 }
159
160 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
161
162 if (!IS_ERR(dst)) {
163 if (prev_saddr != saddr)
164 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
165 if (prev_daddr != daddr)
166 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
167 }
168
169 return dst;
170 }
171
172 static inline unsigned long make_jiffies(long secs)
173 {
174 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
175 return MAX_SCHEDULE_TIMEOUT-1;
176 else
177 return secs*HZ;
178 }
179
180 static void xfrm_policy_timer(unsigned long data)
181 {
182 struct xfrm_policy *xp = (struct xfrm_policy *)data;
183 unsigned long now = get_seconds();
184 long next = LONG_MAX;
185 int warn = 0;
186 int dir;
187
188 read_lock(&xp->lock);
189
190 if (unlikely(xp->walk.dead))
191 goto out;
192
193 dir = xfrm_policy_id2dir(xp->index);
194
195 if (xp->lft.hard_add_expires_seconds) {
196 long tmo = xp->lft.hard_add_expires_seconds +
197 xp->curlft.add_time - now;
198 if (tmo <= 0)
199 goto expired;
200 if (tmo < next)
201 next = tmo;
202 }
203 if (xp->lft.hard_use_expires_seconds) {
204 long tmo = xp->lft.hard_use_expires_seconds +
205 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
206 if (tmo <= 0)
207 goto expired;
208 if (tmo < next)
209 next = tmo;
210 }
211 if (xp->lft.soft_add_expires_seconds) {
212 long tmo = xp->lft.soft_add_expires_seconds +
213 xp->curlft.add_time - now;
214 if (tmo <= 0) {
215 warn = 1;
216 tmo = XFRM_KM_TIMEOUT;
217 }
218 if (tmo < next)
219 next = tmo;
220 }
221 if (xp->lft.soft_use_expires_seconds) {
222 long tmo = xp->lft.soft_use_expires_seconds +
223 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
224 if (tmo <= 0) {
225 warn = 1;
226 tmo = XFRM_KM_TIMEOUT;
227 }
228 if (tmo < next)
229 next = tmo;
230 }
231
232 if (warn)
233 km_policy_expired(xp, dir, 0, 0);
234 if (next != LONG_MAX &&
235 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
236 xfrm_pol_hold(xp);
237
238 out:
239 read_unlock(&xp->lock);
240 xfrm_pol_put(xp);
241 return;
242
243 expired:
244 read_unlock(&xp->lock);
245 if (!xfrm_policy_delete(xp, dir))
246 km_policy_expired(xp, dir, 1, 0);
247 xfrm_pol_put(xp);
248 }
249
250 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
251 * SPD calls.
252 */
253
254 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
255 {
256 struct xfrm_policy *policy;
257
258 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
259
260 if (policy) {
261 write_pnet(&policy->xp_net, net);
262 INIT_LIST_HEAD(&policy->walk.all);
263 INIT_HLIST_NODE(&policy->bydst);
264 INIT_HLIST_NODE(&policy->byidx);
265 rwlock_init(&policy->lock);
266 refcount_set(&policy->refcnt, 1);
267 skb_queue_head_init(&policy->polq.hold_queue);
268 setup_timer(&policy->timer, xfrm_policy_timer,
269 (unsigned long)policy);
270 setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
271 (unsigned long)policy);
272 }
273 return policy;
274 }
275 EXPORT_SYMBOL(xfrm_policy_alloc);
276
277 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
278 {
279 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
280
281 security_xfrm_policy_free(policy->security);
282 kfree(policy);
283 }
284
285 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
286
287 void xfrm_policy_destroy(struct xfrm_policy *policy)
288 {
289 BUG_ON(!policy->walk.dead);
290
291 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
292 BUG();
293
294 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
295 }
296 EXPORT_SYMBOL(xfrm_policy_destroy);
297
298 /* Rule must be locked. Release descendant resources, announce
299 * entry dead. The rule must be unlinked from lists to the moment.
300 */
301
302 static void xfrm_policy_kill(struct xfrm_policy *policy)
303 {
304 write_lock_bh(&policy->lock);
305 policy->walk.dead = 1;
306 write_unlock_bh(&policy->lock);
307
308 atomic_inc(&policy->genid);
309
310 if (del_timer(&policy->polq.hold_timer))
311 xfrm_pol_put(policy);
312 skb_queue_purge(&policy->polq.hold_queue);
313
314 if (del_timer(&policy->timer))
315 xfrm_pol_put(policy);
316
317 xfrm_pol_put(policy);
318 }
319
320 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
321
322 static inline unsigned int idx_hash(struct net *net, u32 index)
323 {
324 return __idx_hash(index, net->xfrm.policy_idx_hmask);
325 }
326
327 /* calculate policy hash thresholds */
328 static void __get_hash_thresh(struct net *net,
329 unsigned short family, int dir,
330 u8 *dbits, u8 *sbits)
331 {
332 switch (family) {
333 case AF_INET:
334 *dbits = net->xfrm.policy_bydst[dir].dbits4;
335 *sbits = net->xfrm.policy_bydst[dir].sbits4;
336 break;
337
338 case AF_INET6:
339 *dbits = net->xfrm.policy_bydst[dir].dbits6;
340 *sbits = net->xfrm.policy_bydst[dir].sbits6;
341 break;
342
343 default:
344 *dbits = 0;
345 *sbits = 0;
346 }
347 }
348
349 static struct hlist_head *policy_hash_bysel(struct net *net,
350 const struct xfrm_selector *sel,
351 unsigned short family, int dir)
352 {
353 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
354 unsigned int hash;
355 u8 dbits;
356 u8 sbits;
357
358 __get_hash_thresh(net, family, dir, &dbits, &sbits);
359 hash = __sel_hash(sel, family, hmask, dbits, sbits);
360
361 if (hash == hmask + 1)
362 return &net->xfrm.policy_inexact[dir];
363
364 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
365 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
366 }
367
368 static struct hlist_head *policy_hash_direct(struct net *net,
369 const xfrm_address_t *daddr,
370 const xfrm_address_t *saddr,
371 unsigned short family, int dir)
372 {
373 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
374 unsigned int hash;
375 u8 dbits;
376 u8 sbits;
377
378 __get_hash_thresh(net, family, dir, &dbits, &sbits);
379 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
380
381 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
382 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
383 }
384
385 static void xfrm_dst_hash_transfer(struct net *net,
386 struct hlist_head *list,
387 struct hlist_head *ndsttable,
388 unsigned int nhashmask,
389 int dir)
390 {
391 struct hlist_node *tmp, *entry0 = NULL;
392 struct xfrm_policy *pol;
393 unsigned int h0 = 0;
394 u8 dbits;
395 u8 sbits;
396
397 redo:
398 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
399 unsigned int h;
400
401 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
402 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
403 pol->family, nhashmask, dbits, sbits);
404 if (!entry0) {
405 hlist_del_rcu(&pol->bydst);
406 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
407 h0 = h;
408 } else {
409 if (h != h0)
410 continue;
411 hlist_del_rcu(&pol->bydst);
412 hlist_add_behind_rcu(&pol->bydst, entry0);
413 }
414 entry0 = &pol->bydst;
415 }
416 if (!hlist_empty(list)) {
417 entry0 = NULL;
418 goto redo;
419 }
420 }
421
422 static void xfrm_idx_hash_transfer(struct hlist_head *list,
423 struct hlist_head *nidxtable,
424 unsigned int nhashmask)
425 {
426 struct hlist_node *tmp;
427 struct xfrm_policy *pol;
428
429 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
430 unsigned int h;
431
432 h = __idx_hash(pol->index, nhashmask);
433 hlist_add_head(&pol->byidx, nidxtable+h);
434 }
435 }
436
437 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
438 {
439 return ((old_hmask + 1) << 1) - 1;
440 }
441
442 static void xfrm_bydst_resize(struct net *net, int dir)
443 {
444 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
445 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
446 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
447 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
448 struct hlist_head *odst;
449 int i;
450
451 if (!ndst)
452 return;
453
454 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
455 write_seqcount_begin(&xfrm_policy_hash_generation);
456
457 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
458 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
459
460 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
461 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
462
463 for (i = hmask; i >= 0; i--)
464 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
465
466 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
467 net->xfrm.policy_bydst[dir].hmask = nhashmask;
468
469 write_seqcount_end(&xfrm_policy_hash_generation);
470 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
471
472 synchronize_rcu();
473
474 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
475 }
476
477 static void xfrm_byidx_resize(struct net *net, int total)
478 {
479 unsigned int hmask = net->xfrm.policy_idx_hmask;
480 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
481 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
482 struct hlist_head *oidx = net->xfrm.policy_byidx;
483 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
484 int i;
485
486 if (!nidx)
487 return;
488
489 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
490
491 for (i = hmask; i >= 0; i--)
492 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
493
494 net->xfrm.policy_byidx = nidx;
495 net->xfrm.policy_idx_hmask = nhashmask;
496
497 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
498
499 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
500 }
501
502 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
503 {
504 unsigned int cnt = net->xfrm.policy_count[dir];
505 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
506
507 if (total)
508 *total += cnt;
509
510 if ((hmask + 1) < xfrm_policy_hashmax &&
511 cnt > hmask)
512 return 1;
513
514 return 0;
515 }
516
517 static inline int xfrm_byidx_should_resize(struct net *net, int total)
518 {
519 unsigned int hmask = net->xfrm.policy_idx_hmask;
520
521 if ((hmask + 1) < xfrm_policy_hashmax &&
522 total > hmask)
523 return 1;
524
525 return 0;
526 }
527
528 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
529 {
530 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
531 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
532 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
533 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
534 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
535 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
536 si->spdhcnt = net->xfrm.policy_idx_hmask;
537 si->spdhmcnt = xfrm_policy_hashmax;
538 }
539 EXPORT_SYMBOL(xfrm_spd_getinfo);
540
541 static DEFINE_MUTEX(hash_resize_mutex);
542 static void xfrm_hash_resize(struct work_struct *work)
543 {
544 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
545 int dir, total;
546
547 mutex_lock(&hash_resize_mutex);
548
549 total = 0;
550 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
551 if (xfrm_bydst_should_resize(net, dir, &total))
552 xfrm_bydst_resize(net, dir);
553 }
554 if (xfrm_byidx_should_resize(net, total))
555 xfrm_byidx_resize(net, total);
556
557 mutex_unlock(&hash_resize_mutex);
558 }
559
560 static void xfrm_hash_rebuild(struct work_struct *work)
561 {
562 struct net *net = container_of(work, struct net,
563 xfrm.policy_hthresh.work);
564 unsigned int hmask;
565 struct xfrm_policy *pol;
566 struct xfrm_policy *policy;
567 struct hlist_head *chain;
568 struct hlist_head *odst;
569 struct hlist_node *newpos;
570 int i;
571 int dir;
572 unsigned seq;
573 u8 lbits4, rbits4, lbits6, rbits6;
574
575 mutex_lock(&hash_resize_mutex);
576
577 /* read selector prefixlen thresholds */
578 do {
579 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
580
581 lbits4 = net->xfrm.policy_hthresh.lbits4;
582 rbits4 = net->xfrm.policy_hthresh.rbits4;
583 lbits6 = net->xfrm.policy_hthresh.lbits6;
584 rbits6 = net->xfrm.policy_hthresh.rbits6;
585 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
586
587 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
588
589 /* reset the bydst and inexact table in all directions */
590 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
591 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
592 hmask = net->xfrm.policy_bydst[dir].hmask;
593 odst = net->xfrm.policy_bydst[dir].table;
594 for (i = hmask; i >= 0; i--)
595 INIT_HLIST_HEAD(odst + i);
596 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
597 /* dir out => dst = remote, src = local */
598 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
599 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
600 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
601 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
602 } else {
603 /* dir in/fwd => dst = local, src = remote */
604 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
605 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
606 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
607 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
608 }
609 }
610
611 /* re-insert all policies by order of creation */
612 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
613 if (policy->walk.dead ||
614 xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
615 /* skip socket policies */
616 continue;
617 }
618 newpos = NULL;
619 chain = policy_hash_bysel(net, &policy->selector,
620 policy->family,
621 xfrm_policy_id2dir(policy->index));
622 hlist_for_each_entry(pol, chain, bydst) {
623 if (policy->priority >= pol->priority)
624 newpos = &pol->bydst;
625 else
626 break;
627 }
628 if (newpos)
629 hlist_add_behind_rcu(&policy->bydst, newpos);
630 else
631 hlist_add_head_rcu(&policy->bydst, chain);
632 }
633
634 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
635
636 mutex_unlock(&hash_resize_mutex);
637 }
638
639 void xfrm_policy_hash_rebuild(struct net *net)
640 {
641 schedule_work(&net->xfrm.policy_hthresh.work);
642 }
643 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
644
645 /* Generate new index... KAME seems to generate them ordered by cost
646 * of an absolute inpredictability of ordering of rules. This will not pass. */
647 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
648 {
649 static u32 idx_generator;
650
651 for (;;) {
652 struct hlist_head *list;
653 struct xfrm_policy *p;
654 u32 idx;
655 int found;
656
657 if (!index) {
658 idx = (idx_generator | dir);
659 idx_generator += 8;
660 } else {
661 idx = index;
662 index = 0;
663 }
664
665 if (idx == 0)
666 idx = 8;
667 list = net->xfrm.policy_byidx + idx_hash(net, idx);
668 found = 0;
669 hlist_for_each_entry(p, list, byidx) {
670 if (p->index == idx) {
671 found = 1;
672 break;
673 }
674 }
675 if (!found)
676 return idx;
677 }
678 }
679
680 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
681 {
682 u32 *p1 = (u32 *) s1;
683 u32 *p2 = (u32 *) s2;
684 int len = sizeof(struct xfrm_selector) / sizeof(u32);
685 int i;
686
687 for (i = 0; i < len; i++) {
688 if (p1[i] != p2[i])
689 return 1;
690 }
691
692 return 0;
693 }
694
695 static void xfrm_policy_requeue(struct xfrm_policy *old,
696 struct xfrm_policy *new)
697 {
698 struct xfrm_policy_queue *pq = &old->polq;
699 struct sk_buff_head list;
700
701 if (skb_queue_empty(&pq->hold_queue))
702 return;
703
704 __skb_queue_head_init(&list);
705
706 spin_lock_bh(&pq->hold_queue.lock);
707 skb_queue_splice_init(&pq->hold_queue, &list);
708 if (del_timer(&pq->hold_timer))
709 xfrm_pol_put(old);
710 spin_unlock_bh(&pq->hold_queue.lock);
711
712 pq = &new->polq;
713
714 spin_lock_bh(&pq->hold_queue.lock);
715 skb_queue_splice(&list, &pq->hold_queue);
716 pq->timeout = XFRM_QUEUE_TMO_MIN;
717 if (!mod_timer(&pq->hold_timer, jiffies))
718 xfrm_pol_hold(new);
719 spin_unlock_bh(&pq->hold_queue.lock);
720 }
721
722 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
723 struct xfrm_policy *pol)
724 {
725 if (policy->mark.v == pol->mark.v &&
726 policy->priority == pol->priority)
727 return true;
728
729 return false;
730 }
731
732 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
733 {
734 struct net *net = xp_net(policy);
735 struct xfrm_policy *pol;
736 struct xfrm_policy *delpol;
737 struct hlist_head *chain;
738 struct hlist_node *newpos;
739
740 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
741 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
742 delpol = NULL;
743 newpos = NULL;
744 hlist_for_each_entry(pol, chain, bydst) {
745 if (pol->type == policy->type &&
746 !selector_cmp(&pol->selector, &policy->selector) &&
747 xfrm_policy_mark_match(policy, pol) &&
748 xfrm_sec_ctx_match(pol->security, policy->security) &&
749 !WARN_ON(delpol)) {
750 if (excl) {
751 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
752 return -EEXIST;
753 }
754 delpol = pol;
755 if (policy->priority > pol->priority)
756 continue;
757 } else if (policy->priority >= pol->priority) {
758 newpos = &pol->bydst;
759 continue;
760 }
761 if (delpol)
762 break;
763 }
764 if (newpos)
765 hlist_add_behind_rcu(&policy->bydst, newpos);
766 else
767 hlist_add_head_rcu(&policy->bydst, chain);
768 __xfrm_policy_link(policy, dir);
769
770 /* After previous checking, family can either be AF_INET or AF_INET6 */
771 if (policy->family == AF_INET)
772 rt_genid_bump_ipv4(net);
773 else
774 rt_genid_bump_ipv6(net);
775
776 if (delpol) {
777 xfrm_policy_requeue(delpol, policy);
778 __xfrm_policy_unlink(delpol, dir);
779 }
780 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
781 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
782 policy->curlft.add_time = get_seconds();
783 policy->curlft.use_time = 0;
784 if (!mod_timer(&policy->timer, jiffies + HZ))
785 xfrm_pol_hold(policy);
786 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
787
788 if (delpol)
789 xfrm_policy_kill(delpol);
790 else if (xfrm_bydst_should_resize(net, dir, NULL))
791 schedule_work(&net->xfrm.policy_hash_work);
792
793 return 0;
794 }
795 EXPORT_SYMBOL(xfrm_policy_insert);
796
797 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
798 int dir, struct xfrm_selector *sel,
799 struct xfrm_sec_ctx *ctx, int delete,
800 int *err)
801 {
802 struct xfrm_policy *pol, *ret;
803 struct hlist_head *chain;
804
805 *err = 0;
806 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
807 chain = policy_hash_bysel(net, sel, sel->family, dir);
808 ret = NULL;
809 hlist_for_each_entry(pol, chain, bydst) {
810 if (pol->type == type &&
811 (mark & pol->mark.m) == pol->mark.v &&
812 !selector_cmp(sel, &pol->selector) &&
813 xfrm_sec_ctx_match(ctx, pol->security)) {
814 xfrm_pol_hold(pol);
815 if (delete) {
816 *err = security_xfrm_policy_delete(
817 pol->security);
818 if (*err) {
819 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
820 return pol;
821 }
822 __xfrm_policy_unlink(pol, dir);
823 }
824 ret = pol;
825 break;
826 }
827 }
828 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
829
830 if (ret && delete)
831 xfrm_policy_kill(ret);
832 return ret;
833 }
834 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
835
836 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
837 int dir, u32 id, int delete, int *err)
838 {
839 struct xfrm_policy *pol, *ret;
840 struct hlist_head *chain;
841
842 *err = -ENOENT;
843 if (xfrm_policy_id2dir(id) != dir)
844 return NULL;
845
846 *err = 0;
847 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
848 chain = net->xfrm.policy_byidx + idx_hash(net, id);
849 ret = NULL;
850 hlist_for_each_entry(pol, chain, byidx) {
851 if (pol->type == type && pol->index == id &&
852 (mark & pol->mark.m) == pol->mark.v) {
853 xfrm_pol_hold(pol);
854 if (delete) {
855 *err = security_xfrm_policy_delete(
856 pol->security);
857 if (*err) {
858 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
859 return pol;
860 }
861 __xfrm_policy_unlink(pol, dir);
862 }
863 ret = pol;
864 break;
865 }
866 }
867 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
868
869 if (ret && delete)
870 xfrm_policy_kill(ret);
871 return ret;
872 }
873 EXPORT_SYMBOL(xfrm_policy_byid);
874
875 #ifdef CONFIG_SECURITY_NETWORK_XFRM
876 static inline int
877 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
878 {
879 int dir, err = 0;
880
881 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
882 struct xfrm_policy *pol;
883 int i;
884
885 hlist_for_each_entry(pol,
886 &net->xfrm.policy_inexact[dir], bydst) {
887 if (pol->type != type)
888 continue;
889 err = security_xfrm_policy_delete(pol->security);
890 if (err) {
891 xfrm_audit_policy_delete(pol, 0, task_valid);
892 return err;
893 }
894 }
895 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
896 hlist_for_each_entry(pol,
897 net->xfrm.policy_bydst[dir].table + i,
898 bydst) {
899 if (pol->type != type)
900 continue;
901 err = security_xfrm_policy_delete(
902 pol->security);
903 if (err) {
904 xfrm_audit_policy_delete(pol, 0,
905 task_valid);
906 return err;
907 }
908 }
909 }
910 }
911 return err;
912 }
913 #else
914 static inline int
915 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
916 {
917 return 0;
918 }
919 #endif
920
921 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
922 {
923 int dir, err = 0, cnt = 0;
924
925 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
926
927 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
928 if (err)
929 goto out;
930
931 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
932 struct xfrm_policy *pol;
933 int i;
934
935 again1:
936 hlist_for_each_entry(pol,
937 &net->xfrm.policy_inexact[dir], bydst) {
938 if (pol->type != type)
939 continue;
940 __xfrm_policy_unlink(pol, dir);
941 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
942 cnt++;
943
944 xfrm_audit_policy_delete(pol, 1, task_valid);
945
946 xfrm_policy_kill(pol);
947
948 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
949 goto again1;
950 }
951
952 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
953 again2:
954 hlist_for_each_entry(pol,
955 net->xfrm.policy_bydst[dir].table + i,
956 bydst) {
957 if (pol->type != type)
958 continue;
959 __xfrm_policy_unlink(pol, dir);
960 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
961 cnt++;
962
963 xfrm_audit_policy_delete(pol, 1, task_valid);
964 xfrm_policy_kill(pol);
965
966 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
967 goto again2;
968 }
969 }
970
971 }
972 if (!cnt)
973 err = -ESRCH;
974 out:
975 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
976 return err;
977 }
978 EXPORT_SYMBOL(xfrm_policy_flush);
979
980 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
981 int (*func)(struct xfrm_policy *, int, int, void*),
982 void *data)
983 {
984 struct xfrm_policy *pol;
985 struct xfrm_policy_walk_entry *x;
986 int error = 0;
987
988 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
989 walk->type != XFRM_POLICY_TYPE_ANY)
990 return -EINVAL;
991
992 if (list_empty(&walk->walk.all) && walk->seq != 0)
993 return 0;
994
995 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
996 if (list_empty(&walk->walk.all))
997 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
998 else
999 x = list_first_entry(&walk->walk.all,
1000 struct xfrm_policy_walk_entry, all);
1001
1002 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1003 if (x->dead)
1004 continue;
1005 pol = container_of(x, struct xfrm_policy, walk);
1006 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1007 walk->type != pol->type)
1008 continue;
1009 error = func(pol, xfrm_policy_id2dir(pol->index),
1010 walk->seq, data);
1011 if (error) {
1012 list_move_tail(&walk->walk.all, &x->all);
1013 goto out;
1014 }
1015 walk->seq++;
1016 }
1017 if (walk->seq == 0) {
1018 error = -ENOENT;
1019 goto out;
1020 }
1021 list_del_init(&walk->walk.all);
1022 out:
1023 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1024 return error;
1025 }
1026 EXPORT_SYMBOL(xfrm_policy_walk);
1027
1028 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1029 {
1030 INIT_LIST_HEAD(&walk->walk.all);
1031 walk->walk.dead = 1;
1032 walk->type = type;
1033 walk->seq = 0;
1034 }
1035 EXPORT_SYMBOL(xfrm_policy_walk_init);
1036
1037 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1038 {
1039 if (list_empty(&walk->walk.all))
1040 return;
1041
1042 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1043 list_del(&walk->walk.all);
1044 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1045 }
1046 EXPORT_SYMBOL(xfrm_policy_walk_done);
1047
1048 /*
1049 * Find policy to apply to this flow.
1050 *
1051 * Returns 0 if policy found, else an -errno.
1052 */
1053 static int xfrm_policy_match(const struct xfrm_policy *pol,
1054 const struct flowi *fl,
1055 u8 type, u16 family, int dir)
1056 {
1057 const struct xfrm_selector *sel = &pol->selector;
1058 int ret = -ESRCH;
1059 bool match;
1060
1061 if (pol->family != family ||
1062 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1063 pol->type != type)
1064 return ret;
1065
1066 match = xfrm_selector_match(sel, fl, family);
1067 if (match)
1068 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1069 dir);
1070
1071 return ret;
1072 }
1073
1074 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1075 const struct flowi *fl,
1076 u16 family, u8 dir)
1077 {
1078 int err;
1079 struct xfrm_policy *pol, *ret;
1080 const xfrm_address_t *daddr, *saddr;
1081 struct hlist_head *chain;
1082 unsigned int sequence;
1083 u32 priority;
1084
1085 daddr = xfrm_flowi_daddr(fl, family);
1086 saddr = xfrm_flowi_saddr(fl, family);
1087 if (unlikely(!daddr || !saddr))
1088 return NULL;
1089
1090 rcu_read_lock();
1091 retry:
1092 do {
1093 sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
1094 chain = policy_hash_direct(net, daddr, saddr, family, dir);
1095 } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
1096
1097 priority = ~0U;
1098 ret = NULL;
1099 hlist_for_each_entry_rcu(pol, chain, bydst) {
1100 err = xfrm_policy_match(pol, fl, type, family, dir);
1101 if (err) {
1102 if (err == -ESRCH)
1103 continue;
1104 else {
1105 ret = ERR_PTR(err);
1106 goto fail;
1107 }
1108 } else {
1109 ret = pol;
1110 priority = ret->priority;
1111 break;
1112 }
1113 }
1114 chain = &net->xfrm.policy_inexact[dir];
1115 hlist_for_each_entry_rcu(pol, chain, bydst) {
1116 if ((pol->priority >= priority) && ret)
1117 break;
1118
1119 err = xfrm_policy_match(pol, fl, type, family, dir);
1120 if (err) {
1121 if (err == -ESRCH)
1122 continue;
1123 else {
1124 ret = ERR_PTR(err);
1125 goto fail;
1126 }
1127 } else {
1128 ret = pol;
1129 break;
1130 }
1131 }
1132
1133 if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
1134 goto retry;
1135
1136 if (ret && !xfrm_pol_hold_rcu(ret))
1137 goto retry;
1138 fail:
1139 rcu_read_unlock();
1140
1141 return ret;
1142 }
1143
1144 static struct xfrm_policy *
1145 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
1146 {
1147 #ifdef CONFIG_XFRM_SUB_POLICY
1148 struct xfrm_policy *pol;
1149
1150 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1151 if (pol != NULL)
1152 return pol;
1153 #endif
1154 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1155 }
1156
1157 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1158 const struct flowi *fl, u16 family)
1159 {
1160 struct xfrm_policy *pol;
1161
1162 rcu_read_lock();
1163 again:
1164 pol = rcu_dereference(sk->sk_policy[dir]);
1165 if (pol != NULL) {
1166 bool match;
1167 int err = 0;
1168
1169 if (pol->family != family) {
1170 pol = NULL;
1171 goto out;
1172 }
1173
1174 match = xfrm_selector_match(&pol->selector, fl, family);
1175 if (match) {
1176 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1177 pol = NULL;
1178 goto out;
1179 }
1180 err = security_xfrm_policy_lookup(pol->security,
1181 fl->flowi_secid,
1182 dir);
1183 if (!err) {
1184 if (!xfrm_pol_hold_rcu(pol))
1185 goto again;
1186 } else if (err == -ESRCH) {
1187 pol = NULL;
1188 } else {
1189 pol = ERR_PTR(err);
1190 }
1191 } else
1192 pol = NULL;
1193 }
1194 out:
1195 rcu_read_unlock();
1196 return pol;
1197 }
1198
1199 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1200 {
1201 struct net *net = xp_net(pol);
1202
1203 list_add(&pol->walk.all, &net->xfrm.policy_all);
1204 net->xfrm.policy_count[dir]++;
1205 xfrm_pol_hold(pol);
1206 }
1207
1208 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1209 int dir)
1210 {
1211 struct net *net = xp_net(pol);
1212
1213 if (list_empty(&pol->walk.all))
1214 return NULL;
1215
1216 /* Socket policies are not hashed. */
1217 if (!hlist_unhashed(&pol->bydst)) {
1218 hlist_del_rcu(&pol->bydst);
1219 hlist_del(&pol->byidx);
1220 }
1221
1222 list_del_init(&pol->walk.all);
1223 net->xfrm.policy_count[dir]--;
1224
1225 return pol;
1226 }
1227
1228 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
1229 {
1230 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
1231 }
1232
1233 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
1234 {
1235 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
1236 }
1237
1238 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1239 {
1240 struct net *net = xp_net(pol);
1241
1242 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1243 pol = __xfrm_policy_unlink(pol, dir);
1244 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1245 if (pol) {
1246 xfrm_policy_kill(pol);
1247 return 0;
1248 }
1249 return -ENOENT;
1250 }
1251 EXPORT_SYMBOL(xfrm_policy_delete);
1252
1253 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1254 {
1255 struct net *net = sock_net(sk);
1256 struct xfrm_policy *old_pol;
1257
1258 #ifdef CONFIG_XFRM_SUB_POLICY
1259 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1260 return -EINVAL;
1261 #endif
1262
1263 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1264 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1265 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1266 if (pol) {
1267 pol->curlft.add_time = get_seconds();
1268 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1269 xfrm_sk_policy_link(pol, dir);
1270 }
1271 rcu_assign_pointer(sk->sk_policy[dir], pol);
1272 if (old_pol) {
1273 if (pol)
1274 xfrm_policy_requeue(old_pol, pol);
1275
1276 /* Unlinking succeeds always. This is the only function
1277 * allowed to delete or replace socket policy.
1278 */
1279 xfrm_sk_policy_unlink(old_pol, dir);
1280 }
1281 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1282
1283 if (old_pol) {
1284 xfrm_policy_kill(old_pol);
1285 }
1286 return 0;
1287 }
1288
1289 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1290 {
1291 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1292 struct net *net = xp_net(old);
1293
1294 if (newp) {
1295 newp->selector = old->selector;
1296 if (security_xfrm_policy_clone(old->security,
1297 &newp->security)) {
1298 kfree(newp);
1299 return NULL; /* ENOMEM */
1300 }
1301 newp->lft = old->lft;
1302 newp->curlft = old->curlft;
1303 newp->mark = old->mark;
1304 newp->action = old->action;
1305 newp->flags = old->flags;
1306 newp->xfrm_nr = old->xfrm_nr;
1307 newp->index = old->index;
1308 newp->type = old->type;
1309 newp->family = old->family;
1310 memcpy(newp->xfrm_vec, old->xfrm_vec,
1311 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1312 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1313 xfrm_sk_policy_link(newp, dir);
1314 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1315 xfrm_pol_put(newp);
1316 }
1317 return newp;
1318 }
1319
1320 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1321 {
1322 const struct xfrm_policy *p;
1323 struct xfrm_policy *np;
1324 int i, ret = 0;
1325
1326 rcu_read_lock();
1327 for (i = 0; i < 2; i++) {
1328 p = rcu_dereference(osk->sk_policy[i]);
1329 if (p) {
1330 np = clone_policy(p, i);
1331 if (unlikely(!np)) {
1332 ret = -ENOMEM;
1333 break;
1334 }
1335 rcu_assign_pointer(sk->sk_policy[i], np);
1336 }
1337 }
1338 rcu_read_unlock();
1339 return ret;
1340 }
1341
1342 static int
1343 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
1344 xfrm_address_t *remote, unsigned short family, u32 mark)
1345 {
1346 int err;
1347 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1348
1349 if (unlikely(afinfo == NULL))
1350 return -EINVAL;
1351 err = afinfo->get_saddr(net, oif, local, remote, mark);
1352 rcu_read_unlock();
1353 return err;
1354 }
1355
1356 /* Resolve list of templates for the flow, given policy. */
1357
1358 static int
1359 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1360 struct xfrm_state **xfrm, unsigned short family)
1361 {
1362 struct net *net = xp_net(policy);
1363 int nx;
1364 int i, error;
1365 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1366 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1367 xfrm_address_t tmp;
1368
1369 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1370 struct xfrm_state *x;
1371 xfrm_address_t *remote = daddr;
1372 xfrm_address_t *local = saddr;
1373 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1374
1375 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1376 tmpl->mode == XFRM_MODE_BEET) {
1377 remote = &tmpl->id.daddr;
1378 local = &tmpl->saddr;
1379 if (xfrm_addr_any(local, tmpl->encap_family)) {
1380 error = xfrm_get_saddr(net, fl->flowi_oif,
1381 &tmp, remote,
1382 tmpl->encap_family, 0);
1383 if (error)
1384 goto fail;
1385 local = &tmp;
1386 }
1387 }
1388
1389 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1390
1391 if (x && x->km.state == XFRM_STATE_VALID) {
1392 xfrm[nx++] = x;
1393 daddr = remote;
1394 saddr = local;
1395 continue;
1396 }
1397 if (x) {
1398 error = (x->km.state == XFRM_STATE_ERROR ?
1399 -EINVAL : -EAGAIN);
1400 xfrm_state_put(x);
1401 } else if (error == -ESRCH) {
1402 error = -EAGAIN;
1403 }
1404
1405 if (!tmpl->optional)
1406 goto fail;
1407 }
1408 return nx;
1409
1410 fail:
1411 for (nx--; nx >= 0; nx--)
1412 xfrm_state_put(xfrm[nx]);
1413 return error;
1414 }
1415
1416 static int
1417 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1418 struct xfrm_state **xfrm, unsigned short family)
1419 {
1420 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1421 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1422 int cnx = 0;
1423 int error;
1424 int ret;
1425 int i;
1426
1427 for (i = 0; i < npols; i++) {
1428 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1429 error = -ENOBUFS;
1430 goto fail;
1431 }
1432
1433 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1434 if (ret < 0) {
1435 error = ret;
1436 goto fail;
1437 } else
1438 cnx += ret;
1439 }
1440
1441 /* found states are sorted for outbound processing */
1442 if (npols > 1)
1443 xfrm_state_sort(xfrm, tpp, cnx, family);
1444
1445 return cnx;
1446
1447 fail:
1448 for (cnx--; cnx >= 0; cnx--)
1449 xfrm_state_put(tpp[cnx]);
1450 return error;
1451
1452 }
1453
1454 static int xfrm_get_tos(const struct flowi *fl, int family)
1455 {
1456 const struct xfrm_policy_afinfo *afinfo;
1457 int tos;
1458
1459 afinfo = xfrm_policy_get_afinfo(family);
1460 if (!afinfo)
1461 return 0;
1462
1463 tos = afinfo->get_tos(fl);
1464
1465 rcu_read_unlock();
1466
1467 return tos;
1468 }
1469
1470 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1471 {
1472 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1473 struct dst_ops *dst_ops;
1474 struct xfrm_dst *xdst;
1475
1476 if (!afinfo)
1477 return ERR_PTR(-EINVAL);
1478
1479 switch (family) {
1480 case AF_INET:
1481 dst_ops = &net->xfrm.xfrm4_dst_ops;
1482 break;
1483 #if IS_ENABLED(CONFIG_IPV6)
1484 case AF_INET6:
1485 dst_ops = &net->xfrm.xfrm6_dst_ops;
1486 break;
1487 #endif
1488 default:
1489 BUG();
1490 }
1491 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
1492
1493 if (likely(xdst)) {
1494 struct dst_entry *dst = &xdst->u.dst;
1495
1496 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1497 } else
1498 xdst = ERR_PTR(-ENOBUFS);
1499
1500 rcu_read_unlock();
1501
1502 return xdst;
1503 }
1504
1505 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1506 int nfheader_len)
1507 {
1508 const struct xfrm_policy_afinfo *afinfo =
1509 xfrm_policy_get_afinfo(dst->ops->family);
1510 int err;
1511
1512 if (!afinfo)
1513 return -EINVAL;
1514
1515 err = afinfo->init_path(path, dst, nfheader_len);
1516
1517 rcu_read_unlock();
1518
1519 return err;
1520 }
1521
1522 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1523 const struct flowi *fl)
1524 {
1525 const struct xfrm_policy_afinfo *afinfo =
1526 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1527 int err;
1528
1529 if (!afinfo)
1530 return -EINVAL;
1531
1532 err = afinfo->fill_dst(xdst, dev, fl);
1533
1534 rcu_read_unlock();
1535
1536 return err;
1537 }
1538
1539
1540 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1541 * all the metrics... Shortly, bundle a bundle.
1542 */
1543
1544 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1545 struct xfrm_state **xfrm, int nx,
1546 const struct flowi *fl,
1547 struct dst_entry *dst)
1548 {
1549 struct net *net = xp_net(policy);
1550 unsigned long now = jiffies;
1551 struct net_device *dev;
1552 struct xfrm_mode *inner_mode;
1553 struct dst_entry *dst_prev = NULL;
1554 struct dst_entry *dst0 = NULL;
1555 int i = 0;
1556 int err;
1557 int header_len = 0;
1558 int nfheader_len = 0;
1559 int trailer_len = 0;
1560 int tos;
1561 int family = policy->selector.family;
1562 xfrm_address_t saddr, daddr;
1563
1564 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1565
1566 tos = xfrm_get_tos(fl, family);
1567
1568 dst_hold(dst);
1569
1570 for (; i < nx; i++) {
1571 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1572 struct dst_entry *dst1 = &xdst->u.dst;
1573
1574 err = PTR_ERR(xdst);
1575 if (IS_ERR(xdst)) {
1576 dst_release(dst);
1577 goto put_states;
1578 }
1579
1580 if (!dst_prev)
1581 dst0 = dst1;
1582 else
1583 /* Ref count is taken during xfrm_alloc_dst()
1584 * No need to do dst_clone() on dst1
1585 */
1586 dst_prev->child = dst1;
1587
1588 if (xfrm[i]->sel.family == AF_UNSPEC) {
1589 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1590 xfrm_af2proto(family));
1591 if (!inner_mode) {
1592 err = -EAFNOSUPPORT;
1593 dst_release(dst);
1594 goto put_states;
1595 }
1596 } else
1597 inner_mode = xfrm[i]->inner_mode;
1598
1599 xdst->route = dst;
1600 dst_copy_metrics(dst1, dst);
1601
1602 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1603 family = xfrm[i]->props.family;
1604 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1605 &saddr, &daddr, family,
1606 xfrm[i]->props.output_mark);
1607 err = PTR_ERR(dst);
1608 if (IS_ERR(dst))
1609 goto put_states;
1610 } else
1611 dst_hold(dst);
1612
1613 dst1->xfrm = xfrm[i];
1614 xdst->xfrm_genid = xfrm[i]->genid;
1615
1616 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1617 dst1->flags |= DST_HOST;
1618 dst1->lastuse = now;
1619
1620 dst1->input = dst_discard;
1621 dst1->output = inner_mode->afinfo->output;
1622
1623 dst1->next = dst_prev;
1624 dst_prev = dst1;
1625
1626 header_len += xfrm[i]->props.header_len;
1627 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1628 nfheader_len += xfrm[i]->props.header_len;
1629 trailer_len += xfrm[i]->props.trailer_len;
1630 }
1631
1632 dst_prev->child = dst;
1633 dst0->path = dst;
1634
1635 err = -ENODEV;
1636 dev = dst->dev;
1637 if (!dev)
1638 goto free_dst;
1639
1640 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1641 xfrm_init_pmtu(dst_prev);
1642
1643 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1644 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1645
1646 err = xfrm_fill_dst(xdst, dev, fl);
1647 if (err)
1648 goto free_dst;
1649
1650 dst_prev->header_len = header_len;
1651 dst_prev->trailer_len = trailer_len;
1652 header_len -= xdst->u.dst.xfrm->props.header_len;
1653 trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1654 }
1655
1656 out:
1657 return dst0;
1658
1659 put_states:
1660 for (; i < nx; i++)
1661 xfrm_state_put(xfrm[i]);
1662 free_dst:
1663 if (dst0)
1664 dst_release_immediate(dst0);
1665 dst0 = ERR_PTR(err);
1666 goto out;
1667 }
1668
1669 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1670 struct xfrm_policy **pols,
1671 int *num_pols, int *num_xfrms)
1672 {
1673 int i;
1674
1675 if (*num_pols == 0 || !pols[0]) {
1676 *num_pols = 0;
1677 *num_xfrms = 0;
1678 return 0;
1679 }
1680 if (IS_ERR(pols[0]))
1681 return PTR_ERR(pols[0]);
1682
1683 *num_xfrms = pols[0]->xfrm_nr;
1684
1685 #ifdef CONFIG_XFRM_SUB_POLICY
1686 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1687 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1688 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1689 XFRM_POLICY_TYPE_MAIN,
1690 fl, family,
1691 XFRM_POLICY_OUT);
1692 if (pols[1]) {
1693 if (IS_ERR(pols[1])) {
1694 xfrm_pols_put(pols, *num_pols);
1695 return PTR_ERR(pols[1]);
1696 }
1697 (*num_pols)++;
1698 (*num_xfrms) += pols[1]->xfrm_nr;
1699 }
1700 }
1701 #endif
1702 for (i = 0; i < *num_pols; i++) {
1703 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1704 *num_xfrms = -1;
1705 break;
1706 }
1707 }
1708
1709 return 0;
1710
1711 }
1712
1713 static struct xfrm_dst *
1714 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1715 const struct flowi *fl, u16 family,
1716 struct dst_entry *dst_orig)
1717 {
1718 struct net *net = xp_net(pols[0]);
1719 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1720 struct xfrm_dst *xdst;
1721 struct dst_entry *dst;
1722 int err;
1723
1724 /* Try to instantiate a bundle */
1725 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1726 if (err <= 0) {
1727 if (err == 0)
1728 return NULL;
1729
1730 if (err != -EAGAIN)
1731 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1732 return ERR_PTR(err);
1733 }
1734
1735 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1736 if (IS_ERR(dst)) {
1737 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1738 return ERR_CAST(dst);
1739 }
1740
1741 xdst = (struct xfrm_dst *)dst;
1742 xdst->num_xfrms = err;
1743 xdst->num_pols = num_pols;
1744 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1745 xdst->policy_genid = atomic_read(&pols[0]->genid);
1746
1747 return xdst;
1748 }
1749
1750 static void xfrm_policy_queue_process(unsigned long arg)
1751 {
1752 struct sk_buff *skb;
1753 struct sock *sk;
1754 struct dst_entry *dst;
1755 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1756 struct net *net = xp_net(pol);
1757 struct xfrm_policy_queue *pq = &pol->polq;
1758 struct flowi fl;
1759 struct sk_buff_head list;
1760
1761 spin_lock(&pq->hold_queue.lock);
1762 skb = skb_peek(&pq->hold_queue);
1763 if (!skb) {
1764 spin_unlock(&pq->hold_queue.lock);
1765 goto out;
1766 }
1767 dst = skb_dst(skb);
1768 sk = skb->sk;
1769 xfrm_decode_session(skb, &fl, dst->ops->family);
1770 spin_unlock(&pq->hold_queue.lock);
1771
1772 dst_hold(dst->path);
1773 dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
1774 if (IS_ERR(dst))
1775 goto purge_queue;
1776
1777 if (dst->flags & DST_XFRM_QUEUE) {
1778 dst_release(dst);
1779
1780 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1781 goto purge_queue;
1782
1783 pq->timeout = pq->timeout << 1;
1784 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1785 xfrm_pol_hold(pol);
1786 goto out;
1787 }
1788
1789 dst_release(dst);
1790
1791 __skb_queue_head_init(&list);
1792
1793 spin_lock(&pq->hold_queue.lock);
1794 pq->timeout = 0;
1795 skb_queue_splice_init(&pq->hold_queue, &list);
1796 spin_unlock(&pq->hold_queue.lock);
1797
1798 while (!skb_queue_empty(&list)) {
1799 skb = __skb_dequeue(&list);
1800
1801 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1802 dst_hold(skb_dst(skb)->path);
1803 dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
1804 if (IS_ERR(dst)) {
1805 kfree_skb(skb);
1806 continue;
1807 }
1808
1809 nf_reset(skb);
1810 skb_dst_drop(skb);
1811 skb_dst_set(skb, dst);
1812
1813 dst_output(net, skb->sk, skb);
1814 }
1815
1816 out:
1817 xfrm_pol_put(pol);
1818 return;
1819
1820 purge_queue:
1821 pq->timeout = 0;
1822 skb_queue_purge(&pq->hold_queue);
1823 xfrm_pol_put(pol);
1824 }
1825
1826 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1827 {
1828 unsigned long sched_next;
1829 struct dst_entry *dst = skb_dst(skb);
1830 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1831 struct xfrm_policy *pol = xdst->pols[0];
1832 struct xfrm_policy_queue *pq = &pol->polq;
1833
1834 if (unlikely(skb_fclone_busy(sk, skb))) {
1835 kfree_skb(skb);
1836 return 0;
1837 }
1838
1839 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1840 kfree_skb(skb);
1841 return -EAGAIN;
1842 }
1843
1844 skb_dst_force(skb);
1845
1846 spin_lock_bh(&pq->hold_queue.lock);
1847
1848 if (!pq->timeout)
1849 pq->timeout = XFRM_QUEUE_TMO_MIN;
1850
1851 sched_next = jiffies + pq->timeout;
1852
1853 if (del_timer(&pq->hold_timer)) {
1854 if (time_before(pq->hold_timer.expires, sched_next))
1855 sched_next = pq->hold_timer.expires;
1856 xfrm_pol_put(pol);
1857 }
1858
1859 __skb_queue_tail(&pq->hold_queue, skb);
1860 if (!mod_timer(&pq->hold_timer, sched_next))
1861 xfrm_pol_hold(pol);
1862
1863 spin_unlock_bh(&pq->hold_queue.lock);
1864
1865 return 0;
1866 }
1867
1868 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1869 struct xfrm_flo *xflo,
1870 const struct flowi *fl,
1871 int num_xfrms,
1872 u16 family)
1873 {
1874 int err;
1875 struct net_device *dev;
1876 struct dst_entry *dst;
1877 struct dst_entry *dst1;
1878 struct xfrm_dst *xdst;
1879
1880 xdst = xfrm_alloc_dst(net, family);
1881 if (IS_ERR(xdst))
1882 return xdst;
1883
1884 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
1885 net->xfrm.sysctl_larval_drop ||
1886 num_xfrms <= 0)
1887 return xdst;
1888
1889 dst = xflo->dst_orig;
1890 dst1 = &xdst->u.dst;
1891 dst_hold(dst);
1892 xdst->route = dst;
1893
1894 dst_copy_metrics(dst1, dst);
1895
1896 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1897 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1898 dst1->lastuse = jiffies;
1899
1900 dst1->input = dst_discard;
1901 dst1->output = xdst_queue_output;
1902
1903 dst_hold(dst);
1904 dst1->child = dst;
1905 dst1->path = dst;
1906
1907 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1908
1909 err = -ENODEV;
1910 dev = dst->dev;
1911 if (!dev)
1912 goto free_dst;
1913
1914 err = xfrm_fill_dst(xdst, dev, fl);
1915 if (err)
1916 goto free_dst;
1917
1918 out:
1919 return xdst;
1920
1921 free_dst:
1922 dst_release(dst1);
1923 xdst = ERR_PTR(err);
1924 goto out;
1925 }
1926
1927 static struct xfrm_dst *
1928 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct xfrm_flo *xflo)
1929 {
1930 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1931 int num_pols = 0, num_xfrms = 0, err;
1932 struct xfrm_dst *xdst;
1933
1934 /* Resolve policies to use if we couldn't get them from
1935 * previous cache entry */
1936 num_pols = 1;
1937 pols[0] = xfrm_policy_lookup(net, fl, family, dir);
1938 err = xfrm_expand_policies(fl, family, pols,
1939 &num_pols, &num_xfrms);
1940 if (err < 0)
1941 goto inc_error;
1942 if (num_pols == 0)
1943 return NULL;
1944 if (num_xfrms <= 0)
1945 goto make_dummy_bundle;
1946
1947 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
1948 xflo->dst_orig);
1949 if (IS_ERR(xdst)) {
1950 err = PTR_ERR(xdst);
1951 if (err != -EAGAIN)
1952 goto error;
1953 goto make_dummy_bundle;
1954 } else if (xdst == NULL) {
1955 num_xfrms = 0;
1956 goto make_dummy_bundle;
1957 }
1958
1959 return xdst;
1960
1961 make_dummy_bundle:
1962 /* We found policies, but there's no bundles to instantiate:
1963 * either because the policy blocks, has no transformations or
1964 * we could not build template (no xfrm_states).*/
1965 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
1966 if (IS_ERR(xdst)) {
1967 xfrm_pols_put(pols, num_pols);
1968 return ERR_CAST(xdst);
1969 }
1970 xdst->num_pols = num_pols;
1971 xdst->num_xfrms = num_xfrms;
1972 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1973
1974 return xdst;
1975
1976 inc_error:
1977 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1978 error:
1979 xfrm_pols_put(pols, num_pols);
1980 return ERR_PTR(err);
1981 }
1982
1983 static struct dst_entry *make_blackhole(struct net *net, u16 family,
1984 struct dst_entry *dst_orig)
1985 {
1986 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1987 struct dst_entry *ret;
1988
1989 if (!afinfo) {
1990 dst_release(dst_orig);
1991 return ERR_PTR(-EINVAL);
1992 } else {
1993 ret = afinfo->blackhole_route(net, dst_orig);
1994 }
1995 rcu_read_unlock();
1996
1997 return ret;
1998 }
1999
2000 /* Main function: finds/creates a bundle for given flow.
2001 *
2002 * At the moment we eat a raw IP route. Mostly to speed up lookups
2003 * on interfaces with disabled IPsec.
2004 */
2005 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2006 const struct flowi *fl,
2007 const struct sock *sk, int flags)
2008 {
2009 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2010 struct xfrm_dst *xdst;
2011 struct dst_entry *dst, *route;
2012 u16 family = dst_orig->ops->family;
2013 u8 dir = XFRM_POLICY_OUT;
2014 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2015
2016 dst = NULL;
2017 xdst = NULL;
2018 route = NULL;
2019
2020 sk = sk_const_to_full_sk(sk);
2021 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2022 num_pols = 1;
2023 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
2024 err = xfrm_expand_policies(fl, family, pols,
2025 &num_pols, &num_xfrms);
2026 if (err < 0)
2027 goto dropdst;
2028
2029 if (num_pols) {
2030 if (num_xfrms <= 0) {
2031 drop_pols = num_pols;
2032 goto no_transform;
2033 }
2034
2035 xdst = xfrm_resolve_and_create_bundle(
2036 pols, num_pols, fl,
2037 family, dst_orig);
2038
2039 if (IS_ERR(xdst)) {
2040 xfrm_pols_put(pols, num_pols);
2041 err = PTR_ERR(xdst);
2042 goto dropdst;
2043 } else if (xdst == NULL) {
2044 num_xfrms = 0;
2045 drop_pols = num_pols;
2046 goto no_transform;
2047 }
2048
2049 route = xdst->route;
2050 }
2051 }
2052
2053 if (xdst == NULL) {
2054 struct xfrm_flo xflo;
2055
2056 xflo.dst_orig = dst_orig;
2057 xflo.flags = flags;
2058
2059 /* To accelerate a bit... */
2060 if ((dst_orig->flags & DST_NOXFRM) ||
2061 !net->xfrm.policy_count[XFRM_POLICY_OUT])
2062 goto nopol;
2063
2064 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo);
2065 if (xdst == NULL)
2066 goto nopol;
2067 if (IS_ERR(xdst)) {
2068 err = PTR_ERR(xdst);
2069 goto dropdst;
2070 }
2071
2072 num_pols = xdst->num_pols;
2073 num_xfrms = xdst->num_xfrms;
2074 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2075 route = xdst->route;
2076 }
2077
2078 dst = &xdst->u.dst;
2079 if (route == NULL && num_xfrms > 0) {
2080 /* The only case when xfrm_bundle_lookup() returns a
2081 * bundle with null route, is when the template could
2082 * not be resolved. It means policies are there, but
2083 * bundle could not be created, since we don't yet
2084 * have the xfrm_state's. We need to wait for KM to
2085 * negotiate new SA's or bail out with error.*/
2086 if (net->xfrm.sysctl_larval_drop) {
2087 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2088 err = -EREMOTE;
2089 goto error;
2090 }
2091
2092 err = -EAGAIN;
2093
2094 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2095 goto error;
2096 }
2097
2098 no_transform:
2099 if (num_pols == 0)
2100 goto nopol;
2101
2102 if ((flags & XFRM_LOOKUP_ICMP) &&
2103 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2104 err = -ENOENT;
2105 goto error;
2106 }
2107
2108 for (i = 0; i < num_pols; i++)
2109 pols[i]->curlft.use_time = get_seconds();
2110
2111 if (num_xfrms < 0) {
2112 /* Prohibit the flow */
2113 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2114 err = -EPERM;
2115 goto error;
2116 } else if (num_xfrms > 0) {
2117 /* Flow transformed */
2118 dst_release(dst_orig);
2119 } else {
2120 /* Flow passes untransformed */
2121 dst_release(dst);
2122 dst = dst_orig;
2123 }
2124 ok:
2125 xfrm_pols_put(pols, drop_pols);
2126 if (dst && dst->xfrm &&
2127 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2128 dst->flags |= DST_XFRM_TUNNEL;
2129 return dst;
2130
2131 nopol:
2132 if (!(flags & XFRM_LOOKUP_ICMP)) {
2133 dst = dst_orig;
2134 goto ok;
2135 }
2136 err = -ENOENT;
2137 error:
2138 dst_release(dst);
2139 dropdst:
2140 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2141 dst_release(dst_orig);
2142 xfrm_pols_put(pols, drop_pols);
2143 return ERR_PTR(err);
2144 }
2145 EXPORT_SYMBOL(xfrm_lookup);
2146
2147 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
2148 * Otherwise we may send out blackholed packets.
2149 */
2150 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2151 const struct flowi *fl,
2152 const struct sock *sk, int flags)
2153 {
2154 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2155 flags | XFRM_LOOKUP_QUEUE |
2156 XFRM_LOOKUP_KEEP_DST_REF);
2157
2158 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2159 return make_blackhole(net, dst_orig->ops->family, dst_orig);
2160
2161 if (IS_ERR(dst))
2162 dst_release(dst_orig);
2163
2164 return dst;
2165 }
2166 EXPORT_SYMBOL(xfrm_lookup_route);
2167
2168 static inline int
2169 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2170 {
2171 struct xfrm_state *x;
2172
2173 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2174 return 0;
2175 x = skb->sp->xvec[idx];
2176 if (!x->type->reject)
2177 return 0;
2178 return x->type->reject(x, skb, fl);
2179 }
2180
2181 /* When skb is transformed back to its "native" form, we have to
2182 * check policy restrictions. At the moment we make this in maximally
2183 * stupid way. Shame on me. :-) Of course, connected sockets must
2184 * have policy cached at them.
2185 */
2186
2187 static inline int
2188 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2189 unsigned short family)
2190 {
2191 if (xfrm_state_kern(x))
2192 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2193 return x->id.proto == tmpl->id.proto &&
2194 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2195 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2196 x->props.mode == tmpl->mode &&
2197 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2198 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2199 !(x->props.mode != XFRM_MODE_TRANSPORT &&
2200 xfrm_state_addr_cmp(tmpl, x, family));
2201 }
2202
2203 /*
2204 * 0 or more than 0 is returned when validation is succeeded (either bypass
2205 * because of optional transport mode, or next index of the mathced secpath
2206 * state with the template.
2207 * -1 is returned when no matching template is found.
2208 * Otherwise "-2 - errored_index" is returned.
2209 */
2210 static inline int
2211 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2212 unsigned short family)
2213 {
2214 int idx = start;
2215
2216 if (tmpl->optional) {
2217 if (tmpl->mode == XFRM_MODE_TRANSPORT)
2218 return start;
2219 } else
2220 start = -1;
2221 for (; idx < sp->len; idx++) {
2222 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2223 return ++idx;
2224 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2225 if (start == -1)
2226 start = -2-idx;
2227 break;
2228 }
2229 }
2230 return start;
2231 }
2232
2233 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2234 unsigned int family, int reverse)
2235 {
2236 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2237 int err;
2238
2239 if (unlikely(afinfo == NULL))
2240 return -EAFNOSUPPORT;
2241
2242 afinfo->decode_session(skb, fl, reverse);
2243 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2244 rcu_read_unlock();
2245 return err;
2246 }
2247 EXPORT_SYMBOL(__xfrm_decode_session);
2248
2249 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2250 {
2251 for (; k < sp->len; k++) {
2252 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2253 *idxp = k;
2254 return 1;
2255 }
2256 }
2257
2258 return 0;
2259 }
2260
2261 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2262 unsigned short family)
2263 {
2264 struct net *net = dev_net(skb->dev);
2265 struct xfrm_policy *pol;
2266 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2267 int npols = 0;
2268 int xfrm_nr;
2269 int pi;
2270 int reverse;
2271 struct flowi fl;
2272 int xerr_idx = -1;
2273
2274 reverse = dir & ~XFRM_POLICY_MASK;
2275 dir &= XFRM_POLICY_MASK;
2276
2277 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2278 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2279 return 0;
2280 }
2281
2282 nf_nat_decode_session(skb, &fl, family);
2283
2284 /* First, check used SA against their selectors. */
2285 if (skb->sp) {
2286 int i;
2287
2288 for (i = skb->sp->len-1; i >= 0; i--) {
2289 struct xfrm_state *x = skb->sp->xvec[i];
2290 if (!xfrm_selector_match(&x->sel, &fl, family)) {
2291 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2292 return 0;
2293 }
2294 }
2295 }
2296
2297 pol = NULL;
2298 sk = sk_to_full_sk(sk);
2299 if (sk && sk->sk_policy[dir]) {
2300 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
2301 if (IS_ERR(pol)) {
2302 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2303 return 0;
2304 }
2305 }
2306
2307 if (!pol)
2308 pol = xfrm_policy_lookup(net, &fl, family, dir);
2309
2310 if (IS_ERR(pol)) {
2311 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2312 return 0;
2313 }
2314
2315 if (!pol) {
2316 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2317 xfrm_secpath_reject(xerr_idx, skb, &fl);
2318 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2319 return 0;
2320 }
2321 return 1;
2322 }
2323
2324 pol->curlft.use_time = get_seconds();
2325
2326 pols[0] = pol;
2327 npols++;
2328 #ifdef CONFIG_XFRM_SUB_POLICY
2329 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2330 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2331 &fl, family,
2332 XFRM_POLICY_IN);
2333 if (pols[1]) {
2334 if (IS_ERR(pols[1])) {
2335 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2336 return 0;
2337 }
2338 pols[1]->curlft.use_time = get_seconds();
2339 npols++;
2340 }
2341 }
2342 #endif
2343
2344 if (pol->action == XFRM_POLICY_ALLOW) {
2345 struct sec_path *sp;
2346 static struct sec_path dummy;
2347 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2348 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2349 struct xfrm_tmpl **tpp = tp;
2350 int ti = 0;
2351 int i, k;
2352
2353 if ((sp = skb->sp) == NULL)
2354 sp = &dummy;
2355
2356 for (pi = 0; pi < npols; pi++) {
2357 if (pols[pi] != pol &&
2358 pols[pi]->action != XFRM_POLICY_ALLOW) {
2359 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2360 goto reject;
2361 }
2362 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2363 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2364 goto reject_error;
2365 }
2366 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2367 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2368 }
2369 xfrm_nr = ti;
2370 if (npols > 1) {
2371 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2372 tpp = stp;
2373 }
2374
2375 /* For each tunnel xfrm, find the first matching tmpl.
2376 * For each tmpl before that, find corresponding xfrm.
2377 * Order is _important_. Later we will implement
2378 * some barriers, but at the moment barriers
2379 * are implied between each two transformations.
2380 */
2381 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2382 k = xfrm_policy_ok(tpp[i], sp, k, family);
2383 if (k < 0) {
2384 if (k < -1)
2385 /* "-2 - errored_index" returned */
2386 xerr_idx = -(2+k);
2387 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2388 goto reject;
2389 }
2390 }
2391
2392 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2393 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2394 goto reject;
2395 }
2396
2397 xfrm_pols_put(pols, npols);
2398 return 1;
2399 }
2400 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2401
2402 reject:
2403 xfrm_secpath_reject(xerr_idx, skb, &fl);
2404 reject_error:
2405 xfrm_pols_put(pols, npols);
2406 return 0;
2407 }
2408 EXPORT_SYMBOL(__xfrm_policy_check);
2409
2410 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2411 {
2412 struct net *net = dev_net(skb->dev);
2413 struct flowi fl;
2414 struct dst_entry *dst;
2415 int res = 1;
2416
2417 if (xfrm_decode_session(skb, &fl, family) < 0) {
2418 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2419 return 0;
2420 }
2421
2422 skb_dst_force(skb);
2423 if (!skb_dst(skb)) {
2424 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2425 return 0;
2426 }
2427
2428 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2429 if (IS_ERR(dst)) {
2430 res = 0;
2431 dst = NULL;
2432 }
2433 skb_dst_set(skb, dst);
2434 return res;
2435 }
2436 EXPORT_SYMBOL(__xfrm_route_forward);
2437
2438 /* Optimize later using cookies and generation ids. */
2439
2440 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2441 {
2442 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2443 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2444 * get validated by dst_ops->check on every use. We do this
2445 * because when a normal route referenced by an XFRM dst is
2446 * obsoleted we do not go looking around for all parent
2447 * referencing XFRM dsts so that we can invalidate them. It
2448 * is just too much work. Instead we make the checks here on
2449 * every use. For example:
2450 *
2451 * XFRM dst A --> IPv4 dst X
2452 *
2453 * X is the "xdst->route" of A (X is also the "dst->path" of A
2454 * in this example). If X is marked obsolete, "A" will not
2455 * notice. That's what we are validating here via the
2456 * stale_bundle() check.
2457 *
2458 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
2459 * be marked on it.
2460 * This will force stale_bundle() to fail on any xdst bundle with
2461 * this dst linked in it.
2462 */
2463 if (dst->obsolete < 0 && !stale_bundle(dst))
2464 return dst;
2465
2466 return NULL;
2467 }
2468
2469 static int stale_bundle(struct dst_entry *dst)
2470 {
2471 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2472 }
2473
2474 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2475 {
2476 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2477 dst->dev = dev_net(dev)->loopback_dev;
2478 dev_hold(dst->dev);
2479 dev_put(dev);
2480 }
2481 }
2482 EXPORT_SYMBOL(xfrm_dst_ifdown);
2483
2484 static void xfrm_link_failure(struct sk_buff *skb)
2485 {
2486 /* Impossible. Such dst must be popped before reaches point of failure. */
2487 }
2488
2489 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2490 {
2491 if (dst) {
2492 if (dst->obsolete) {
2493 dst_release(dst);
2494 dst = NULL;
2495 }
2496 }
2497 return dst;
2498 }
2499
2500 static void xfrm_init_pmtu(struct dst_entry *dst)
2501 {
2502 do {
2503 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2504 u32 pmtu, route_mtu_cached;
2505
2506 pmtu = dst_mtu(dst->child);
2507 xdst->child_mtu_cached = pmtu;
2508
2509 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2510
2511 route_mtu_cached = dst_mtu(xdst->route);
2512 xdst->route_mtu_cached = route_mtu_cached;
2513
2514 if (pmtu > route_mtu_cached)
2515 pmtu = route_mtu_cached;
2516
2517 dst_metric_set(dst, RTAX_MTU, pmtu);
2518 } while ((dst = dst->next));
2519 }
2520
2521 /* Check that the bundle accepts the flow and its components are
2522 * still valid.
2523 */
2524
2525 static int xfrm_bundle_ok(struct xfrm_dst *first)
2526 {
2527 struct dst_entry *dst = &first->u.dst;
2528 struct xfrm_dst *last;
2529 u32 mtu;
2530
2531 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2532 (dst->dev && !netif_running(dst->dev)))
2533 return 0;
2534
2535 if (dst->flags & DST_XFRM_QUEUE)
2536 return 1;
2537
2538 last = NULL;
2539
2540 do {
2541 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2542
2543 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2544 return 0;
2545 if (xdst->xfrm_genid != dst->xfrm->genid)
2546 return 0;
2547 if (xdst->num_pols > 0 &&
2548 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2549 return 0;
2550
2551 mtu = dst_mtu(dst->child);
2552 if (xdst->child_mtu_cached != mtu) {
2553 last = xdst;
2554 xdst->child_mtu_cached = mtu;
2555 }
2556
2557 if (!dst_check(xdst->route, xdst->route_cookie))
2558 return 0;
2559 mtu = dst_mtu(xdst->route);
2560 if (xdst->route_mtu_cached != mtu) {
2561 last = xdst;
2562 xdst->route_mtu_cached = mtu;
2563 }
2564
2565 dst = dst->child;
2566 } while (dst->xfrm);
2567
2568 if (likely(!last))
2569 return 1;
2570
2571 mtu = last->child_mtu_cached;
2572 for (;;) {
2573 dst = &last->u.dst;
2574
2575 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2576 if (mtu > last->route_mtu_cached)
2577 mtu = last->route_mtu_cached;
2578 dst_metric_set(dst, RTAX_MTU, mtu);
2579
2580 if (last == first)
2581 break;
2582
2583 last = (struct xfrm_dst *)last->u.dst.next;
2584 last->child_mtu_cached = mtu;
2585 }
2586
2587 return 1;
2588 }
2589
2590 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2591 {
2592 return dst_metric_advmss(dst->path);
2593 }
2594
2595 static unsigned int xfrm_mtu(const struct dst_entry *dst)
2596 {
2597 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2598
2599 return mtu ? : dst_mtu(dst->path);
2600 }
2601
2602 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2603 const void *daddr)
2604 {
2605 const struct dst_entry *path = dst->path;
2606
2607 for (; dst != path; dst = dst->child) {
2608 const struct xfrm_state *xfrm = dst->xfrm;
2609
2610 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2611 continue;
2612 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2613 daddr = xfrm->coaddr;
2614 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2615 daddr = &xfrm->id.daddr;
2616 }
2617 return daddr;
2618 }
2619
2620 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2621 struct sk_buff *skb,
2622 const void *daddr)
2623 {
2624 const struct dst_entry *path = dst->path;
2625
2626 if (!skb)
2627 daddr = xfrm_get_dst_nexthop(dst, daddr);
2628 return path->ops->neigh_lookup(path, skb, daddr);
2629 }
2630
2631 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
2632 {
2633 const struct dst_entry *path = dst->path;
2634
2635 daddr = xfrm_get_dst_nexthop(dst, daddr);
2636 path->ops->confirm_neigh(path, daddr);
2637 }
2638
2639 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
2640 {
2641 int err = 0;
2642
2643 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
2644 return -EAFNOSUPPORT;
2645
2646 spin_lock(&xfrm_policy_afinfo_lock);
2647 if (unlikely(xfrm_policy_afinfo[family] != NULL))
2648 err = -EEXIST;
2649 else {
2650 struct dst_ops *dst_ops = afinfo->dst_ops;
2651 if (likely(dst_ops->kmem_cachep == NULL))
2652 dst_ops->kmem_cachep = xfrm_dst_cache;
2653 if (likely(dst_ops->check == NULL))
2654 dst_ops->check = xfrm_dst_check;
2655 if (likely(dst_ops->default_advmss == NULL))
2656 dst_ops->default_advmss = xfrm_default_advmss;
2657 if (likely(dst_ops->mtu == NULL))
2658 dst_ops->mtu = xfrm_mtu;
2659 if (likely(dst_ops->negative_advice == NULL))
2660 dst_ops->negative_advice = xfrm_negative_advice;
2661 if (likely(dst_ops->link_failure == NULL))
2662 dst_ops->link_failure = xfrm_link_failure;
2663 if (likely(dst_ops->neigh_lookup == NULL))
2664 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2665 if (likely(!dst_ops->confirm_neigh))
2666 dst_ops->confirm_neigh = xfrm_confirm_neigh;
2667 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
2668 }
2669 spin_unlock(&xfrm_policy_afinfo_lock);
2670
2671 return err;
2672 }
2673 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2674
2675 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
2676 {
2677 struct dst_ops *dst_ops = afinfo->dst_ops;
2678 int i;
2679
2680 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
2681 if (xfrm_policy_afinfo[i] != afinfo)
2682 continue;
2683 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
2684 break;
2685 }
2686
2687 synchronize_rcu();
2688
2689 dst_ops->kmem_cachep = NULL;
2690 dst_ops->check = NULL;
2691 dst_ops->negative_advice = NULL;
2692 dst_ops->link_failure = NULL;
2693 }
2694 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2695
2696 #ifdef CONFIG_XFRM_STATISTICS
2697 static int __net_init xfrm_statistics_init(struct net *net)
2698 {
2699 int rv;
2700 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2701 if (!net->mib.xfrm_statistics)
2702 return -ENOMEM;
2703 rv = xfrm_proc_init(net);
2704 if (rv < 0)
2705 free_percpu(net->mib.xfrm_statistics);
2706 return rv;
2707 }
2708
2709 static void xfrm_statistics_fini(struct net *net)
2710 {
2711 xfrm_proc_fini(net);
2712 free_percpu(net->mib.xfrm_statistics);
2713 }
2714 #else
2715 static int __net_init xfrm_statistics_init(struct net *net)
2716 {
2717 return 0;
2718 }
2719
2720 static void xfrm_statistics_fini(struct net *net)
2721 {
2722 }
2723 #endif
2724
2725 static int __net_init xfrm_policy_init(struct net *net)
2726 {
2727 unsigned int hmask, sz;
2728 int dir;
2729
2730 if (net_eq(net, &init_net))
2731 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2732 sizeof(struct xfrm_dst),
2733 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2734 NULL);
2735
2736 hmask = 8 - 1;
2737 sz = (hmask+1) * sizeof(struct hlist_head);
2738
2739 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2740 if (!net->xfrm.policy_byidx)
2741 goto out_byidx;
2742 net->xfrm.policy_idx_hmask = hmask;
2743
2744 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2745 struct xfrm_policy_hash *htab;
2746
2747 net->xfrm.policy_count[dir] = 0;
2748 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
2749 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2750
2751 htab = &net->xfrm.policy_bydst[dir];
2752 htab->table = xfrm_hash_alloc(sz);
2753 if (!htab->table)
2754 goto out_bydst;
2755 htab->hmask = hmask;
2756 htab->dbits4 = 32;
2757 htab->sbits4 = 32;
2758 htab->dbits6 = 128;
2759 htab->sbits6 = 128;
2760 }
2761 net->xfrm.policy_hthresh.lbits4 = 32;
2762 net->xfrm.policy_hthresh.rbits4 = 32;
2763 net->xfrm.policy_hthresh.lbits6 = 128;
2764 net->xfrm.policy_hthresh.rbits6 = 128;
2765
2766 seqlock_init(&net->xfrm.policy_hthresh.lock);
2767
2768 INIT_LIST_HEAD(&net->xfrm.policy_all);
2769 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2770 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
2771 if (net_eq(net, &init_net))
2772 xfrm_dev_init();
2773 return 0;
2774
2775 out_bydst:
2776 for (dir--; dir >= 0; dir--) {
2777 struct xfrm_policy_hash *htab;
2778
2779 htab = &net->xfrm.policy_bydst[dir];
2780 xfrm_hash_free(htab->table, sz);
2781 }
2782 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2783 out_byidx:
2784 return -ENOMEM;
2785 }
2786
2787 static void xfrm_policy_fini(struct net *net)
2788 {
2789 unsigned int sz;
2790 int dir;
2791
2792 flush_work(&net->xfrm.policy_hash_work);
2793 #ifdef CONFIG_XFRM_SUB_POLICY
2794 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
2795 #endif
2796 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
2797
2798 WARN_ON(!list_empty(&net->xfrm.policy_all));
2799
2800 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2801 struct xfrm_policy_hash *htab;
2802
2803 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2804
2805 htab = &net->xfrm.policy_bydst[dir];
2806 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2807 WARN_ON(!hlist_empty(htab->table));
2808 xfrm_hash_free(htab->table, sz);
2809 }
2810
2811 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2812 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2813 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2814 }
2815
2816 static int __net_init xfrm_net_init(struct net *net)
2817 {
2818 int rv;
2819
2820 /* Initialize the per-net locks here */
2821 spin_lock_init(&net->xfrm.xfrm_state_lock);
2822 spin_lock_init(&net->xfrm.xfrm_policy_lock);
2823 mutex_init(&net->xfrm.xfrm_cfg_mutex);
2824
2825 rv = xfrm_statistics_init(net);
2826 if (rv < 0)
2827 goto out_statistics;
2828 rv = xfrm_state_init(net);
2829 if (rv < 0)
2830 goto out_state;
2831 rv = xfrm_policy_init(net);
2832 if (rv < 0)
2833 goto out_policy;
2834 rv = xfrm_sysctl_init(net);
2835 if (rv < 0)
2836 goto out_sysctl;
2837
2838 return 0;
2839
2840 out_sysctl:
2841 xfrm_policy_fini(net);
2842 out_policy:
2843 xfrm_state_fini(net);
2844 out_state:
2845 xfrm_statistics_fini(net);
2846 out_statistics:
2847 return rv;
2848 }
2849
2850 static void __net_exit xfrm_net_exit(struct net *net)
2851 {
2852 xfrm_sysctl_fini(net);
2853 xfrm_policy_fini(net);
2854 xfrm_state_fini(net);
2855 xfrm_statistics_fini(net);
2856 }
2857
2858 static struct pernet_operations __net_initdata xfrm_net_ops = {
2859 .init = xfrm_net_init,
2860 .exit = xfrm_net_exit,
2861 };
2862
2863 void __init xfrm_init(void)
2864 {
2865 register_pernet_subsys(&xfrm_net_ops);
2866 seqcount_init(&xfrm_policy_hash_generation);
2867 xfrm_input_init();
2868 }
2869
2870 #ifdef CONFIG_AUDITSYSCALL
2871 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2872 struct audit_buffer *audit_buf)
2873 {
2874 struct xfrm_sec_ctx *ctx = xp->security;
2875 struct xfrm_selector *sel = &xp->selector;
2876
2877 if (ctx)
2878 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2879 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2880
2881 switch (sel->family) {
2882 case AF_INET:
2883 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2884 if (sel->prefixlen_s != 32)
2885 audit_log_format(audit_buf, " src_prefixlen=%d",
2886 sel->prefixlen_s);
2887 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2888 if (sel->prefixlen_d != 32)
2889 audit_log_format(audit_buf, " dst_prefixlen=%d",
2890 sel->prefixlen_d);
2891 break;
2892 case AF_INET6:
2893 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2894 if (sel->prefixlen_s != 128)
2895 audit_log_format(audit_buf, " src_prefixlen=%d",
2896 sel->prefixlen_s);
2897 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2898 if (sel->prefixlen_d != 128)
2899 audit_log_format(audit_buf, " dst_prefixlen=%d",
2900 sel->prefixlen_d);
2901 break;
2902 }
2903 }
2904
2905 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
2906 {
2907 struct audit_buffer *audit_buf;
2908
2909 audit_buf = xfrm_audit_start("SPD-add");
2910 if (audit_buf == NULL)
2911 return;
2912 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2913 audit_log_format(audit_buf, " res=%u", result);
2914 xfrm_audit_common_policyinfo(xp, audit_buf);
2915 audit_log_end(audit_buf);
2916 }
2917 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
2918
2919 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
2920 bool task_valid)
2921 {
2922 struct audit_buffer *audit_buf;
2923
2924 audit_buf = xfrm_audit_start("SPD-delete");
2925 if (audit_buf == NULL)
2926 return;
2927 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2928 audit_log_format(audit_buf, " res=%u", result);
2929 xfrm_audit_common_policyinfo(xp, audit_buf);
2930 audit_log_end(audit_buf);
2931 }
2932 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
2933 #endif
2934
2935 #ifdef CONFIG_XFRM_MIGRATE
2936 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
2937 const struct xfrm_selector *sel_tgt)
2938 {
2939 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2940 if (sel_tgt->family == sel_cmp->family &&
2941 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
2942 sel_cmp->family) &&
2943 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
2944 sel_cmp->family) &&
2945 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2946 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2947 return true;
2948 }
2949 } else {
2950 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2951 return true;
2952 }
2953 }
2954 return false;
2955 }
2956
2957 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
2958 u8 dir, u8 type, struct net *net)
2959 {
2960 struct xfrm_policy *pol, *ret = NULL;
2961 struct hlist_head *chain;
2962 u32 priority = ~0U;
2963
2964 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2965 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
2966 hlist_for_each_entry(pol, chain, bydst) {
2967 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2968 pol->type == type) {
2969 ret = pol;
2970 priority = ret->priority;
2971 break;
2972 }
2973 }
2974 chain = &net->xfrm.policy_inexact[dir];
2975 hlist_for_each_entry(pol, chain, bydst) {
2976 if ((pol->priority >= priority) && ret)
2977 break;
2978
2979 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2980 pol->type == type) {
2981 ret = pol;
2982 break;
2983 }
2984 }
2985
2986 xfrm_pol_hold(ret);
2987
2988 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2989
2990 return ret;
2991 }
2992
2993 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
2994 {
2995 int match = 0;
2996
2997 if (t->mode == m->mode && t->id.proto == m->proto &&
2998 (m->reqid == 0 || t->reqid == m->reqid)) {
2999 switch (t->mode) {
3000 case XFRM_MODE_TUNNEL:
3001 case XFRM_MODE_BEET:
3002 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3003 m->old_family) &&
3004 xfrm_addr_equal(&t->saddr, &m->old_saddr,
3005 m->old_family)) {
3006 match = 1;
3007 }
3008 break;
3009 case XFRM_MODE_TRANSPORT:
3010 /* in case of transport mode, template does not store
3011 any IP addresses, hence we just compare mode and
3012 protocol */
3013 match = 1;
3014 break;
3015 default:
3016 break;
3017 }
3018 }
3019 return match;
3020 }
3021
3022 /* update endpoint address(es) of template(s) */
3023 static int xfrm_policy_migrate(struct xfrm_policy *pol,
3024 struct xfrm_migrate *m, int num_migrate)
3025 {
3026 struct xfrm_migrate *mp;
3027 int i, j, n = 0;
3028
3029 write_lock_bh(&pol->lock);
3030 if (unlikely(pol->walk.dead)) {
3031 /* target policy has been deleted */
3032 write_unlock_bh(&pol->lock);
3033 return -ENOENT;
3034 }
3035
3036 for (i = 0; i < pol->xfrm_nr; i++) {
3037 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3038 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3039 continue;
3040 n++;
3041 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3042 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3043 continue;
3044 /* update endpoints */
3045 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3046 sizeof(pol->xfrm_vec[i].id.daddr));
3047 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3048 sizeof(pol->xfrm_vec[i].saddr));
3049 pol->xfrm_vec[i].encap_family = mp->new_family;
3050 /* flush bundles */
3051 atomic_inc(&pol->genid);
3052 }
3053 }
3054
3055 write_unlock_bh(&pol->lock);
3056
3057 if (!n)
3058 return -ENODATA;
3059
3060 return 0;
3061 }
3062
3063 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3064 {
3065 int i, j;
3066
3067 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3068 return -EINVAL;
3069
3070 for (i = 0; i < num_migrate; i++) {
3071 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3072 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3073 return -EINVAL;
3074
3075 /* check if there is any duplicated entry */
3076 for (j = i + 1; j < num_migrate; j++) {
3077 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3078 sizeof(m[i].old_daddr)) &&
3079 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3080 sizeof(m[i].old_saddr)) &&
3081 m[i].proto == m[j].proto &&
3082 m[i].mode == m[j].mode &&
3083 m[i].reqid == m[j].reqid &&
3084 m[i].old_family == m[j].old_family)
3085 return -EINVAL;
3086 }
3087 }
3088
3089 return 0;
3090 }
3091
3092 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3093 struct xfrm_migrate *m, int num_migrate,
3094 struct xfrm_kmaddress *k, struct net *net,
3095 struct xfrm_encap_tmpl *encap)
3096 {
3097 int i, err, nx_cur = 0, nx_new = 0;
3098 struct xfrm_policy *pol = NULL;
3099 struct xfrm_state *x, *xc;
3100 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3101 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3102 struct xfrm_migrate *mp;
3103
3104 /* Stage 0 - sanity checks */
3105 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3106 goto out;
3107
3108 if (dir >= XFRM_POLICY_MAX) {
3109 err = -EINVAL;
3110 goto out;
3111 }
3112
3113 /* Stage 1 - find policy */
3114 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3115 err = -ENOENT;
3116 goto out;
3117 }
3118
3119 /* Stage 2 - find and update state(s) */
3120 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3121 if ((x = xfrm_migrate_state_find(mp, net))) {
3122 x_cur[nx_cur] = x;
3123 nx_cur++;
3124 xc = xfrm_state_migrate(x, mp, encap);
3125 if (xc) {
3126 x_new[nx_new] = xc;
3127 nx_new++;
3128 } else {
3129 err = -ENODATA;
3130 goto restore_state;
3131 }
3132 }
3133 }
3134
3135 /* Stage 3 - update policy */
3136 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3137 goto restore_state;
3138
3139 /* Stage 4 - delete old state(s) */
3140 if (nx_cur) {
3141 xfrm_states_put(x_cur, nx_cur);
3142 xfrm_states_delete(x_cur, nx_cur);
3143 }
3144
3145 /* Stage 5 - announce */
3146 km_migrate(sel, dir, type, m, num_migrate, k, encap);
3147
3148 xfrm_pol_put(pol);
3149
3150 return 0;
3151 out:
3152 return err;
3153
3154 restore_state:
3155 if (pol)
3156 xfrm_pol_put(pol);
3157 if (nx_cur)
3158 xfrm_states_put(x_cur, nx_cur);
3159 if (nx_new)
3160 xfrm_states_delete(x_new, nx_new);
3161
3162 return err;
3163 }
3164 EXPORT_SYMBOL(xfrm_migrate);
3165 #endif