]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/xfrm/xfrm_policy.c
treewide: Add SPDX license identifier for missed files
[thirdparty/kernel/stable.git] / net / xfrm / xfrm_policy.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * xfrm_policy.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * Kazunori MIYAZAWA @USAGI
11 * YOSHIFUJI Hideaki
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
14 *
15 */
16
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <net/dst.h>
33 #include <net/flow.h>
34 #include <net/xfrm.h>
35 #include <net/ip.h>
36 #if IS_ENABLED(CONFIG_IPV6_MIP6)
37 #include <net/mip6.h>
38 #endif
39 #ifdef CONFIG_XFRM_STATISTICS
40 #include <net/snmp.h>
41 #endif
42
43 #include "xfrm_hash.h"
44
45 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
46 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
47 #define XFRM_MAX_QUEUE_LEN 100
48
49 struct xfrm_flo {
50 struct dst_entry *dst_orig;
51 u8 flags;
52 };
53
54 /* prefixes smaller than this are stored in lists, not trees. */
55 #define INEXACT_PREFIXLEN_IPV4 16
56 #define INEXACT_PREFIXLEN_IPV6 48
57
58 struct xfrm_pol_inexact_node {
59 struct rb_node node;
60 union {
61 xfrm_address_t addr;
62 struct rcu_head rcu;
63 };
64 u8 prefixlen;
65
66 struct rb_root root;
67
68 /* the policies matching this node, can be empty list */
69 struct hlist_head hhead;
70 };
71
72 /* xfrm inexact policy search tree:
73 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
74 * |
75 * +---- root_d: sorted by daddr:prefix
76 * | |
77 * | xfrm_pol_inexact_node
78 * | |
79 * | +- root: sorted by saddr/prefix
80 * | | |
81 * | | xfrm_pol_inexact_node
82 * | | |
83 * | | + root: unused
84 * | | |
85 * | | + hhead: saddr:daddr policies
86 * | |
87 * | +- coarse policies and all any:daddr policies
88 * |
89 * +---- root_s: sorted by saddr:prefix
90 * | |
91 * | xfrm_pol_inexact_node
92 * | |
93 * | + root: unused
94 * | |
95 * | + hhead: saddr:any policies
96 * |
97 * +---- coarse policies and all any:any policies
98 *
99 * Lookups return four candidate lists:
100 * 1. any:any list from top-level xfrm_pol_inexact_bin
101 * 2. any:daddr list from daddr tree
102 * 3. saddr:daddr list from 2nd level daddr tree
103 * 4. saddr:any list from saddr tree
104 *
105 * This result set then needs to be searched for the policy with
106 * the lowest priority. If two results have same prio, youngest one wins.
107 */
108
109 struct xfrm_pol_inexact_key {
110 possible_net_t net;
111 u32 if_id;
112 u16 family;
113 u8 dir, type;
114 };
115
116 struct xfrm_pol_inexact_bin {
117 struct xfrm_pol_inexact_key k;
118 struct rhash_head head;
119 /* list containing '*:*' policies */
120 struct hlist_head hhead;
121
122 seqcount_t count;
123 /* tree sorted by daddr/prefix */
124 struct rb_root root_d;
125
126 /* tree sorted by saddr/prefix */
127 struct rb_root root_s;
128
129 /* slow path below */
130 struct list_head inexact_bins;
131 struct rcu_head rcu;
132 };
133
134 enum xfrm_pol_inexact_candidate_type {
135 XFRM_POL_CAND_BOTH,
136 XFRM_POL_CAND_SADDR,
137 XFRM_POL_CAND_DADDR,
138 XFRM_POL_CAND_ANY,
139
140 XFRM_POL_CAND_MAX,
141 };
142
143 struct xfrm_pol_inexact_candidates {
144 struct hlist_head *res[XFRM_POL_CAND_MAX];
145 };
146
147 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
148 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
149
150 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
151 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
152 __read_mostly;
153
154 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
155 static __read_mostly seqcount_t xfrm_policy_hash_generation;
156
157 static struct rhashtable xfrm_policy_inexact_table;
158 static const struct rhashtable_params xfrm_pol_inexact_params;
159
160 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
161 static int stale_bundle(struct dst_entry *dst);
162 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
163 static void xfrm_policy_queue_process(struct timer_list *t);
164
165 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
166 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
167 int dir);
168
169 static struct xfrm_pol_inexact_bin *
170 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
171 u32 if_id);
172
173 static struct xfrm_pol_inexact_bin *
174 xfrm_policy_inexact_lookup_rcu(struct net *net,
175 u8 type, u16 family, u8 dir, u32 if_id);
176 static struct xfrm_policy *
177 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
178 bool excl);
179 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
180 struct xfrm_policy *policy);
181
182 static bool
183 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
184 struct xfrm_pol_inexact_bin *b,
185 const xfrm_address_t *saddr,
186 const xfrm_address_t *daddr);
187
188 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
189 {
190 return refcount_inc_not_zero(&policy->refcnt);
191 }
192
193 static inline bool
194 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
195 {
196 const struct flowi4 *fl4 = &fl->u.ip4;
197
198 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
199 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
200 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
201 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
202 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
203 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
204 }
205
206 static inline bool
207 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
208 {
209 const struct flowi6 *fl6 = &fl->u.ip6;
210
211 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
212 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
213 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
214 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
215 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
216 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
217 }
218
219 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
220 unsigned short family)
221 {
222 switch (family) {
223 case AF_INET:
224 return __xfrm4_selector_match(sel, fl);
225 case AF_INET6:
226 return __xfrm6_selector_match(sel, fl);
227 }
228 return false;
229 }
230
231 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
232 {
233 const struct xfrm_policy_afinfo *afinfo;
234
235 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
236 return NULL;
237 rcu_read_lock();
238 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
239 if (unlikely(!afinfo))
240 rcu_read_unlock();
241 return afinfo;
242 }
243
244 /* Called with rcu_read_lock(). */
245 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
246 {
247 return rcu_dereference(xfrm_if_cb);
248 }
249
250 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
251 const xfrm_address_t *saddr,
252 const xfrm_address_t *daddr,
253 int family, u32 mark)
254 {
255 const struct xfrm_policy_afinfo *afinfo;
256 struct dst_entry *dst;
257
258 afinfo = xfrm_policy_get_afinfo(family);
259 if (unlikely(afinfo == NULL))
260 return ERR_PTR(-EAFNOSUPPORT);
261
262 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
263
264 rcu_read_unlock();
265
266 return dst;
267 }
268 EXPORT_SYMBOL(__xfrm_dst_lookup);
269
270 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
271 int tos, int oif,
272 xfrm_address_t *prev_saddr,
273 xfrm_address_t *prev_daddr,
274 int family, u32 mark)
275 {
276 struct net *net = xs_net(x);
277 xfrm_address_t *saddr = &x->props.saddr;
278 xfrm_address_t *daddr = &x->id.daddr;
279 struct dst_entry *dst;
280
281 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
282 saddr = x->coaddr;
283 daddr = prev_daddr;
284 }
285 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
286 saddr = prev_saddr;
287 daddr = x->coaddr;
288 }
289
290 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
291
292 if (!IS_ERR(dst)) {
293 if (prev_saddr != saddr)
294 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
295 if (prev_daddr != daddr)
296 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
297 }
298
299 return dst;
300 }
301
302 static inline unsigned long make_jiffies(long secs)
303 {
304 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
305 return MAX_SCHEDULE_TIMEOUT-1;
306 else
307 return secs*HZ;
308 }
309
310 static void xfrm_policy_timer(struct timer_list *t)
311 {
312 struct xfrm_policy *xp = from_timer(xp, t, timer);
313 time64_t now = ktime_get_real_seconds();
314 time64_t next = TIME64_MAX;
315 int warn = 0;
316 int dir;
317
318 read_lock(&xp->lock);
319
320 if (unlikely(xp->walk.dead))
321 goto out;
322
323 dir = xfrm_policy_id2dir(xp->index);
324
325 if (xp->lft.hard_add_expires_seconds) {
326 time64_t tmo = xp->lft.hard_add_expires_seconds +
327 xp->curlft.add_time - now;
328 if (tmo <= 0)
329 goto expired;
330 if (tmo < next)
331 next = tmo;
332 }
333 if (xp->lft.hard_use_expires_seconds) {
334 time64_t tmo = xp->lft.hard_use_expires_seconds +
335 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
336 if (tmo <= 0)
337 goto expired;
338 if (tmo < next)
339 next = tmo;
340 }
341 if (xp->lft.soft_add_expires_seconds) {
342 time64_t tmo = xp->lft.soft_add_expires_seconds +
343 xp->curlft.add_time - now;
344 if (tmo <= 0) {
345 warn = 1;
346 tmo = XFRM_KM_TIMEOUT;
347 }
348 if (tmo < next)
349 next = tmo;
350 }
351 if (xp->lft.soft_use_expires_seconds) {
352 time64_t tmo = xp->lft.soft_use_expires_seconds +
353 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
354 if (tmo <= 0) {
355 warn = 1;
356 tmo = XFRM_KM_TIMEOUT;
357 }
358 if (tmo < next)
359 next = tmo;
360 }
361
362 if (warn)
363 km_policy_expired(xp, dir, 0, 0);
364 if (next != TIME64_MAX &&
365 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
366 xfrm_pol_hold(xp);
367
368 out:
369 read_unlock(&xp->lock);
370 xfrm_pol_put(xp);
371 return;
372
373 expired:
374 read_unlock(&xp->lock);
375 if (!xfrm_policy_delete(xp, dir))
376 km_policy_expired(xp, dir, 1, 0);
377 xfrm_pol_put(xp);
378 }
379
380 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
381 * SPD calls.
382 */
383
384 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
385 {
386 struct xfrm_policy *policy;
387
388 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
389
390 if (policy) {
391 write_pnet(&policy->xp_net, net);
392 INIT_LIST_HEAD(&policy->walk.all);
393 INIT_HLIST_NODE(&policy->bydst_inexact_list);
394 INIT_HLIST_NODE(&policy->bydst);
395 INIT_HLIST_NODE(&policy->byidx);
396 rwlock_init(&policy->lock);
397 refcount_set(&policy->refcnt, 1);
398 skb_queue_head_init(&policy->polq.hold_queue);
399 timer_setup(&policy->timer, xfrm_policy_timer, 0);
400 timer_setup(&policy->polq.hold_timer,
401 xfrm_policy_queue_process, 0);
402 }
403 return policy;
404 }
405 EXPORT_SYMBOL(xfrm_policy_alloc);
406
407 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
408 {
409 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
410
411 security_xfrm_policy_free(policy->security);
412 kfree(policy);
413 }
414
415 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
416
417 void xfrm_policy_destroy(struct xfrm_policy *policy)
418 {
419 BUG_ON(!policy->walk.dead);
420
421 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
422 BUG();
423
424 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
425 }
426 EXPORT_SYMBOL(xfrm_policy_destroy);
427
428 /* Rule must be locked. Release descendant resources, announce
429 * entry dead. The rule must be unlinked from lists to the moment.
430 */
431
432 static void xfrm_policy_kill(struct xfrm_policy *policy)
433 {
434 policy->walk.dead = 1;
435
436 atomic_inc(&policy->genid);
437
438 if (del_timer(&policy->polq.hold_timer))
439 xfrm_pol_put(policy);
440 skb_queue_purge(&policy->polq.hold_queue);
441
442 if (del_timer(&policy->timer))
443 xfrm_pol_put(policy);
444
445 xfrm_pol_put(policy);
446 }
447
448 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
449
450 static inline unsigned int idx_hash(struct net *net, u32 index)
451 {
452 return __idx_hash(index, net->xfrm.policy_idx_hmask);
453 }
454
455 /* calculate policy hash thresholds */
456 static void __get_hash_thresh(struct net *net,
457 unsigned short family, int dir,
458 u8 *dbits, u8 *sbits)
459 {
460 switch (family) {
461 case AF_INET:
462 *dbits = net->xfrm.policy_bydst[dir].dbits4;
463 *sbits = net->xfrm.policy_bydst[dir].sbits4;
464 break;
465
466 case AF_INET6:
467 *dbits = net->xfrm.policy_bydst[dir].dbits6;
468 *sbits = net->xfrm.policy_bydst[dir].sbits6;
469 break;
470
471 default:
472 *dbits = 0;
473 *sbits = 0;
474 }
475 }
476
477 static struct hlist_head *policy_hash_bysel(struct net *net,
478 const struct xfrm_selector *sel,
479 unsigned short family, int dir)
480 {
481 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
482 unsigned int hash;
483 u8 dbits;
484 u8 sbits;
485
486 __get_hash_thresh(net, family, dir, &dbits, &sbits);
487 hash = __sel_hash(sel, family, hmask, dbits, sbits);
488
489 if (hash == hmask + 1)
490 return NULL;
491
492 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
493 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
494 }
495
496 static struct hlist_head *policy_hash_direct(struct net *net,
497 const xfrm_address_t *daddr,
498 const xfrm_address_t *saddr,
499 unsigned short family, int dir)
500 {
501 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
502 unsigned int hash;
503 u8 dbits;
504 u8 sbits;
505
506 __get_hash_thresh(net, family, dir, &dbits, &sbits);
507 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
508
509 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
510 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
511 }
512
513 static void xfrm_dst_hash_transfer(struct net *net,
514 struct hlist_head *list,
515 struct hlist_head *ndsttable,
516 unsigned int nhashmask,
517 int dir)
518 {
519 struct hlist_node *tmp, *entry0 = NULL;
520 struct xfrm_policy *pol;
521 unsigned int h0 = 0;
522 u8 dbits;
523 u8 sbits;
524
525 redo:
526 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
527 unsigned int h;
528
529 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
530 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
531 pol->family, nhashmask, dbits, sbits);
532 if (!entry0) {
533 hlist_del_rcu(&pol->bydst);
534 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
535 h0 = h;
536 } else {
537 if (h != h0)
538 continue;
539 hlist_del_rcu(&pol->bydst);
540 hlist_add_behind_rcu(&pol->bydst, entry0);
541 }
542 entry0 = &pol->bydst;
543 }
544 if (!hlist_empty(list)) {
545 entry0 = NULL;
546 goto redo;
547 }
548 }
549
550 static void xfrm_idx_hash_transfer(struct hlist_head *list,
551 struct hlist_head *nidxtable,
552 unsigned int nhashmask)
553 {
554 struct hlist_node *tmp;
555 struct xfrm_policy *pol;
556
557 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
558 unsigned int h;
559
560 h = __idx_hash(pol->index, nhashmask);
561 hlist_add_head(&pol->byidx, nidxtable+h);
562 }
563 }
564
565 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
566 {
567 return ((old_hmask + 1) << 1) - 1;
568 }
569
570 static void xfrm_bydst_resize(struct net *net, int dir)
571 {
572 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
573 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
574 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
575 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
576 struct hlist_head *odst;
577 int i;
578
579 if (!ndst)
580 return;
581
582 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
583 write_seqcount_begin(&xfrm_policy_hash_generation);
584
585 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
586 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
587
588 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
589 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
590
591 for (i = hmask; i >= 0; i--)
592 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
593
594 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
595 net->xfrm.policy_bydst[dir].hmask = nhashmask;
596
597 write_seqcount_end(&xfrm_policy_hash_generation);
598 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
599
600 synchronize_rcu();
601
602 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
603 }
604
605 static void xfrm_byidx_resize(struct net *net, int total)
606 {
607 unsigned int hmask = net->xfrm.policy_idx_hmask;
608 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
609 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
610 struct hlist_head *oidx = net->xfrm.policy_byidx;
611 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
612 int i;
613
614 if (!nidx)
615 return;
616
617 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
618
619 for (i = hmask; i >= 0; i--)
620 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
621
622 net->xfrm.policy_byidx = nidx;
623 net->xfrm.policy_idx_hmask = nhashmask;
624
625 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
626
627 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
628 }
629
630 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
631 {
632 unsigned int cnt = net->xfrm.policy_count[dir];
633 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
634
635 if (total)
636 *total += cnt;
637
638 if ((hmask + 1) < xfrm_policy_hashmax &&
639 cnt > hmask)
640 return 1;
641
642 return 0;
643 }
644
645 static inline int xfrm_byidx_should_resize(struct net *net, int total)
646 {
647 unsigned int hmask = net->xfrm.policy_idx_hmask;
648
649 if ((hmask + 1) < xfrm_policy_hashmax &&
650 total > hmask)
651 return 1;
652
653 return 0;
654 }
655
656 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
657 {
658 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
659 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
660 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
661 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
662 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
663 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
664 si->spdhcnt = net->xfrm.policy_idx_hmask;
665 si->spdhmcnt = xfrm_policy_hashmax;
666 }
667 EXPORT_SYMBOL(xfrm_spd_getinfo);
668
669 static DEFINE_MUTEX(hash_resize_mutex);
670 static void xfrm_hash_resize(struct work_struct *work)
671 {
672 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
673 int dir, total;
674
675 mutex_lock(&hash_resize_mutex);
676
677 total = 0;
678 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
679 if (xfrm_bydst_should_resize(net, dir, &total))
680 xfrm_bydst_resize(net, dir);
681 }
682 if (xfrm_byidx_should_resize(net, total))
683 xfrm_byidx_resize(net, total);
684
685 mutex_unlock(&hash_resize_mutex);
686 }
687
688 /* Make sure *pol can be inserted into fastbin.
689 * Useful to check that later insert requests will be sucessful
690 * (provided xfrm_policy_lock is held throughout).
691 */
692 static struct xfrm_pol_inexact_bin *
693 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
694 {
695 struct xfrm_pol_inexact_bin *bin, *prev;
696 struct xfrm_pol_inexact_key k = {
697 .family = pol->family,
698 .type = pol->type,
699 .dir = dir,
700 .if_id = pol->if_id,
701 };
702 struct net *net = xp_net(pol);
703
704 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
705
706 write_pnet(&k.net, net);
707 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
708 xfrm_pol_inexact_params);
709 if (bin)
710 return bin;
711
712 bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
713 if (!bin)
714 return NULL;
715
716 bin->k = k;
717 INIT_HLIST_HEAD(&bin->hhead);
718 bin->root_d = RB_ROOT;
719 bin->root_s = RB_ROOT;
720 seqcount_init(&bin->count);
721
722 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
723 &bin->k, &bin->head,
724 xfrm_pol_inexact_params);
725 if (!prev) {
726 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
727 return bin;
728 }
729
730 kfree(bin);
731
732 return IS_ERR(prev) ? NULL : prev;
733 }
734
735 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
736 int family, u8 prefixlen)
737 {
738 if (xfrm_addr_any(addr, family))
739 return true;
740
741 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
742 return true;
743
744 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
745 return true;
746
747 return false;
748 }
749
750 static bool
751 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
752 {
753 const xfrm_address_t *addr;
754 bool saddr_any, daddr_any;
755 u8 prefixlen;
756
757 addr = &policy->selector.saddr;
758 prefixlen = policy->selector.prefixlen_s;
759
760 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
761 policy->family,
762 prefixlen);
763 addr = &policy->selector.daddr;
764 prefixlen = policy->selector.prefixlen_d;
765 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
766 policy->family,
767 prefixlen);
768 return saddr_any && daddr_any;
769 }
770
771 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
772 const xfrm_address_t *addr, u8 prefixlen)
773 {
774 node->addr = *addr;
775 node->prefixlen = prefixlen;
776 }
777
778 static struct xfrm_pol_inexact_node *
779 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
780 {
781 struct xfrm_pol_inexact_node *node;
782
783 node = kzalloc(sizeof(*node), GFP_ATOMIC);
784 if (node)
785 xfrm_pol_inexact_node_init(node, addr, prefixlen);
786
787 return node;
788 }
789
790 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
791 const xfrm_address_t *b,
792 u8 prefixlen, u16 family)
793 {
794 unsigned int pdw, pbi;
795 int delta = 0;
796
797 switch (family) {
798 case AF_INET:
799 if (sizeof(long) == 4 && prefixlen == 0)
800 return ntohl(a->a4) - ntohl(b->a4);
801 return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) -
802 (ntohl(b->a4) & ((~0UL << (32 - prefixlen))));
803 case AF_INET6:
804 pdw = prefixlen >> 5;
805 pbi = prefixlen & 0x1f;
806
807 if (pdw) {
808 delta = memcmp(a->a6, b->a6, pdw << 2);
809 if (delta)
810 return delta;
811 }
812 if (pbi) {
813 u32 mask = ~0u << (32 - pbi);
814
815 delta = (ntohl(a->a6[pdw]) & mask) -
816 (ntohl(b->a6[pdw]) & mask);
817 }
818 break;
819 default:
820 break;
821 }
822
823 return delta;
824 }
825
826 static void xfrm_policy_inexact_list_reinsert(struct net *net,
827 struct xfrm_pol_inexact_node *n,
828 u16 family)
829 {
830 unsigned int matched_s, matched_d;
831 struct xfrm_policy *policy, *p;
832
833 matched_s = 0;
834 matched_d = 0;
835
836 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
837 struct hlist_node *newpos = NULL;
838 bool matches_s, matches_d;
839
840 if (!policy->bydst_reinsert)
841 continue;
842
843 WARN_ON_ONCE(policy->family != family);
844
845 policy->bydst_reinsert = false;
846 hlist_for_each_entry(p, &n->hhead, bydst) {
847 if (policy->priority > p->priority)
848 newpos = &p->bydst;
849 else if (policy->priority == p->priority &&
850 policy->pos > p->pos)
851 newpos = &p->bydst;
852 else
853 break;
854 }
855
856 if (newpos)
857 hlist_add_behind_rcu(&policy->bydst, newpos);
858 else
859 hlist_add_head_rcu(&policy->bydst, &n->hhead);
860
861 /* paranoia checks follow.
862 * Check that the reinserted policy matches at least
863 * saddr or daddr for current node prefix.
864 *
865 * Matching both is fine, matching saddr in one policy
866 * (but not daddr) and then matching only daddr in another
867 * is a bug.
868 */
869 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
870 &n->addr,
871 n->prefixlen,
872 family) == 0;
873 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
874 &n->addr,
875 n->prefixlen,
876 family) == 0;
877 if (matches_s && matches_d)
878 continue;
879
880 WARN_ON_ONCE(!matches_s && !matches_d);
881 if (matches_s)
882 matched_s++;
883 if (matches_d)
884 matched_d++;
885 WARN_ON_ONCE(matched_s && matched_d);
886 }
887 }
888
889 static void xfrm_policy_inexact_node_reinsert(struct net *net,
890 struct xfrm_pol_inexact_node *n,
891 struct rb_root *new,
892 u16 family)
893 {
894 struct xfrm_pol_inexact_node *node;
895 struct rb_node **p, *parent;
896
897 /* we should not have another subtree here */
898 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
899 restart:
900 parent = NULL;
901 p = &new->rb_node;
902 while (*p) {
903 u8 prefixlen;
904 int delta;
905
906 parent = *p;
907 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
908
909 prefixlen = min(node->prefixlen, n->prefixlen);
910
911 delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
912 prefixlen, family);
913 if (delta < 0) {
914 p = &parent->rb_left;
915 } else if (delta > 0) {
916 p = &parent->rb_right;
917 } else {
918 struct xfrm_policy *tmp;
919
920 hlist_for_each_entry(tmp, &n->hhead, bydst) {
921 tmp->bydst_reinsert = true;
922 hlist_del_rcu(&tmp->bydst);
923 }
924
925 xfrm_policy_inexact_list_reinsert(net, node, family);
926
927 if (node->prefixlen == n->prefixlen) {
928 kfree_rcu(n, rcu);
929 return;
930 }
931
932 rb_erase(*p, new);
933 kfree_rcu(n, rcu);
934 n = node;
935 n->prefixlen = prefixlen;
936 goto restart;
937 }
938 }
939
940 rb_link_node_rcu(&n->node, parent, p);
941 rb_insert_color(&n->node, new);
942 }
943
944 /* merge nodes v and n */
945 static void xfrm_policy_inexact_node_merge(struct net *net,
946 struct xfrm_pol_inexact_node *v,
947 struct xfrm_pol_inexact_node *n,
948 u16 family)
949 {
950 struct xfrm_pol_inexact_node *node;
951 struct xfrm_policy *tmp;
952 struct rb_node *rnode;
953
954 /* To-be-merged node v has a subtree.
955 *
956 * Dismantle it and insert its nodes to n->root.
957 */
958 while ((rnode = rb_first(&v->root)) != NULL) {
959 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
960 rb_erase(&node->node, &v->root);
961 xfrm_policy_inexact_node_reinsert(net, node, &n->root,
962 family);
963 }
964
965 hlist_for_each_entry(tmp, &v->hhead, bydst) {
966 tmp->bydst_reinsert = true;
967 hlist_del_rcu(&tmp->bydst);
968 }
969
970 xfrm_policy_inexact_list_reinsert(net, n, family);
971 }
972
973 static struct xfrm_pol_inexact_node *
974 xfrm_policy_inexact_insert_node(struct net *net,
975 struct rb_root *root,
976 xfrm_address_t *addr,
977 u16 family, u8 prefixlen, u8 dir)
978 {
979 struct xfrm_pol_inexact_node *cached = NULL;
980 struct rb_node **p, *parent = NULL;
981 struct xfrm_pol_inexact_node *node;
982
983 p = &root->rb_node;
984 while (*p) {
985 int delta;
986
987 parent = *p;
988 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
989
990 delta = xfrm_policy_addr_delta(addr, &node->addr,
991 node->prefixlen,
992 family);
993 if (delta == 0 && prefixlen >= node->prefixlen) {
994 WARN_ON_ONCE(cached); /* ipsec policies got lost */
995 return node;
996 }
997
998 if (delta < 0)
999 p = &parent->rb_left;
1000 else
1001 p = &parent->rb_right;
1002
1003 if (prefixlen < node->prefixlen) {
1004 delta = xfrm_policy_addr_delta(addr, &node->addr,
1005 prefixlen,
1006 family);
1007 if (delta)
1008 continue;
1009
1010 /* This node is a subnet of the new prefix. It needs
1011 * to be removed and re-inserted with the smaller
1012 * prefix and all nodes that are now also covered
1013 * by the reduced prefixlen.
1014 */
1015 rb_erase(&node->node, root);
1016
1017 if (!cached) {
1018 xfrm_pol_inexact_node_init(node, addr,
1019 prefixlen);
1020 cached = node;
1021 } else {
1022 /* This node also falls within the new
1023 * prefixlen. Merge the to-be-reinserted
1024 * node and this one.
1025 */
1026 xfrm_policy_inexact_node_merge(net, node,
1027 cached, family);
1028 kfree_rcu(node, rcu);
1029 }
1030
1031 /* restart */
1032 p = &root->rb_node;
1033 parent = NULL;
1034 }
1035 }
1036
1037 node = cached;
1038 if (!node) {
1039 node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1040 if (!node)
1041 return NULL;
1042 }
1043
1044 rb_link_node_rcu(&node->node, parent, p);
1045 rb_insert_color(&node->node, root);
1046
1047 return node;
1048 }
1049
1050 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1051 {
1052 struct xfrm_pol_inexact_node *node;
1053 struct rb_node *rn = rb_first(r);
1054
1055 while (rn) {
1056 node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1057
1058 xfrm_policy_inexact_gc_tree(&node->root, rm);
1059 rn = rb_next(rn);
1060
1061 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1062 WARN_ON_ONCE(rm);
1063 continue;
1064 }
1065
1066 rb_erase(&node->node, r);
1067 kfree_rcu(node, rcu);
1068 }
1069 }
1070
1071 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1072 {
1073 write_seqcount_begin(&b->count);
1074 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1075 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1076 write_seqcount_end(&b->count);
1077
1078 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1079 !hlist_empty(&b->hhead)) {
1080 WARN_ON_ONCE(net_exit);
1081 return;
1082 }
1083
1084 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1085 xfrm_pol_inexact_params) == 0) {
1086 list_del(&b->inexact_bins);
1087 kfree_rcu(b, rcu);
1088 }
1089 }
1090
1091 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1092 {
1093 struct net *net = read_pnet(&b->k.net);
1094
1095 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1096 __xfrm_policy_inexact_prune_bin(b, false);
1097 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1098 }
1099
1100 static void __xfrm_policy_inexact_flush(struct net *net)
1101 {
1102 struct xfrm_pol_inexact_bin *bin, *t;
1103
1104 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1105
1106 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1107 __xfrm_policy_inexact_prune_bin(bin, false);
1108 }
1109
1110 static struct hlist_head *
1111 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1112 struct xfrm_policy *policy, u8 dir)
1113 {
1114 struct xfrm_pol_inexact_node *n;
1115 struct net *net;
1116
1117 net = xp_net(policy);
1118 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1119
1120 if (xfrm_policy_inexact_insert_use_any_list(policy))
1121 return &bin->hhead;
1122
1123 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1124 policy->family,
1125 policy->selector.prefixlen_d)) {
1126 write_seqcount_begin(&bin->count);
1127 n = xfrm_policy_inexact_insert_node(net,
1128 &bin->root_s,
1129 &policy->selector.saddr,
1130 policy->family,
1131 policy->selector.prefixlen_s,
1132 dir);
1133 write_seqcount_end(&bin->count);
1134 if (!n)
1135 return NULL;
1136
1137 return &n->hhead;
1138 }
1139
1140 /* daddr is fixed */
1141 write_seqcount_begin(&bin->count);
1142 n = xfrm_policy_inexact_insert_node(net,
1143 &bin->root_d,
1144 &policy->selector.daddr,
1145 policy->family,
1146 policy->selector.prefixlen_d, dir);
1147 write_seqcount_end(&bin->count);
1148 if (!n)
1149 return NULL;
1150
1151 /* saddr is wildcard */
1152 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1153 policy->family,
1154 policy->selector.prefixlen_s))
1155 return &n->hhead;
1156
1157 write_seqcount_begin(&bin->count);
1158 n = xfrm_policy_inexact_insert_node(net,
1159 &n->root,
1160 &policy->selector.saddr,
1161 policy->family,
1162 policy->selector.prefixlen_s, dir);
1163 write_seqcount_end(&bin->count);
1164 if (!n)
1165 return NULL;
1166
1167 return &n->hhead;
1168 }
1169
1170 static struct xfrm_policy *
1171 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1172 {
1173 struct xfrm_pol_inexact_bin *bin;
1174 struct xfrm_policy *delpol;
1175 struct hlist_head *chain;
1176 struct net *net;
1177
1178 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1179 if (!bin)
1180 return ERR_PTR(-ENOMEM);
1181
1182 net = xp_net(policy);
1183 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1184
1185 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1186 if (!chain) {
1187 __xfrm_policy_inexact_prune_bin(bin, false);
1188 return ERR_PTR(-ENOMEM);
1189 }
1190
1191 delpol = xfrm_policy_insert_list(chain, policy, excl);
1192 if (delpol && excl) {
1193 __xfrm_policy_inexact_prune_bin(bin, false);
1194 return ERR_PTR(-EEXIST);
1195 }
1196
1197 chain = &net->xfrm.policy_inexact[dir];
1198 xfrm_policy_insert_inexact_list(chain, policy);
1199
1200 if (delpol)
1201 __xfrm_policy_inexact_prune_bin(bin, false);
1202
1203 return delpol;
1204 }
1205
1206 static void xfrm_hash_rebuild(struct work_struct *work)
1207 {
1208 struct net *net = container_of(work, struct net,
1209 xfrm.policy_hthresh.work);
1210 unsigned int hmask;
1211 struct xfrm_policy *pol;
1212 struct xfrm_policy *policy;
1213 struct hlist_head *chain;
1214 struct hlist_head *odst;
1215 struct hlist_node *newpos;
1216 int i;
1217 int dir;
1218 unsigned seq;
1219 u8 lbits4, rbits4, lbits6, rbits6;
1220
1221 mutex_lock(&hash_resize_mutex);
1222
1223 /* read selector prefixlen thresholds */
1224 do {
1225 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1226
1227 lbits4 = net->xfrm.policy_hthresh.lbits4;
1228 rbits4 = net->xfrm.policy_hthresh.rbits4;
1229 lbits6 = net->xfrm.policy_hthresh.lbits6;
1230 rbits6 = net->xfrm.policy_hthresh.rbits6;
1231 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1232
1233 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1234 write_seqcount_begin(&xfrm_policy_hash_generation);
1235
1236 /* make sure that we can insert the indirect policies again before
1237 * we start with destructive action.
1238 */
1239 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1240 struct xfrm_pol_inexact_bin *bin;
1241 u8 dbits, sbits;
1242
1243 dir = xfrm_policy_id2dir(policy->index);
1244 if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
1245 continue;
1246
1247 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1248 if (policy->family == AF_INET) {
1249 dbits = rbits4;
1250 sbits = lbits4;
1251 } else {
1252 dbits = rbits6;
1253 sbits = lbits6;
1254 }
1255 } else {
1256 if (policy->family == AF_INET) {
1257 dbits = lbits4;
1258 sbits = rbits4;
1259 } else {
1260 dbits = lbits6;
1261 sbits = rbits6;
1262 }
1263 }
1264
1265 if (policy->selector.prefixlen_d < dbits ||
1266 policy->selector.prefixlen_s < sbits)
1267 continue;
1268
1269 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1270 if (!bin)
1271 goto out_unlock;
1272
1273 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1274 goto out_unlock;
1275 }
1276
1277 /* reset the bydst and inexact table in all directions */
1278 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1279 struct hlist_node *n;
1280
1281 hlist_for_each_entry_safe(policy, n,
1282 &net->xfrm.policy_inexact[dir],
1283 bydst_inexact_list)
1284 hlist_del_init(&policy->bydst_inexact_list);
1285
1286 hmask = net->xfrm.policy_bydst[dir].hmask;
1287 odst = net->xfrm.policy_bydst[dir].table;
1288 for (i = hmask; i >= 0; i--)
1289 INIT_HLIST_HEAD(odst + i);
1290 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1291 /* dir out => dst = remote, src = local */
1292 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1293 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1294 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1295 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1296 } else {
1297 /* dir in/fwd => dst = local, src = remote */
1298 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1299 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1300 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1301 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1302 }
1303 }
1304
1305 /* re-insert all policies by order of creation */
1306 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1307 if (policy->walk.dead)
1308 continue;
1309 dir = xfrm_policy_id2dir(policy->index);
1310 if (dir >= XFRM_POLICY_MAX) {
1311 /* skip socket policies */
1312 continue;
1313 }
1314 newpos = NULL;
1315 chain = policy_hash_bysel(net, &policy->selector,
1316 policy->family, dir);
1317
1318 hlist_del_rcu(&policy->bydst);
1319
1320 if (!chain) {
1321 void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1322
1323 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1324 continue;
1325 }
1326
1327 hlist_for_each_entry(pol, chain, bydst) {
1328 if (policy->priority >= pol->priority)
1329 newpos = &pol->bydst;
1330 else
1331 break;
1332 }
1333 if (newpos)
1334 hlist_add_behind_rcu(&policy->bydst, newpos);
1335 else
1336 hlist_add_head_rcu(&policy->bydst, chain);
1337 }
1338
1339 out_unlock:
1340 __xfrm_policy_inexact_flush(net);
1341 write_seqcount_end(&xfrm_policy_hash_generation);
1342 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1343
1344 mutex_unlock(&hash_resize_mutex);
1345 }
1346
1347 void xfrm_policy_hash_rebuild(struct net *net)
1348 {
1349 schedule_work(&net->xfrm.policy_hthresh.work);
1350 }
1351 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1352
1353 /* Generate new index... KAME seems to generate them ordered by cost
1354 * of an absolute inpredictability of ordering of rules. This will not pass. */
1355 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1356 {
1357 static u32 idx_generator;
1358
1359 for (;;) {
1360 struct hlist_head *list;
1361 struct xfrm_policy *p;
1362 u32 idx;
1363 int found;
1364
1365 if (!index) {
1366 idx = (idx_generator | dir);
1367 idx_generator += 8;
1368 } else {
1369 idx = index;
1370 index = 0;
1371 }
1372
1373 if (idx == 0)
1374 idx = 8;
1375 list = net->xfrm.policy_byidx + idx_hash(net, idx);
1376 found = 0;
1377 hlist_for_each_entry(p, list, byidx) {
1378 if (p->index == idx) {
1379 found = 1;
1380 break;
1381 }
1382 }
1383 if (!found)
1384 return idx;
1385 }
1386 }
1387
1388 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1389 {
1390 u32 *p1 = (u32 *) s1;
1391 u32 *p2 = (u32 *) s2;
1392 int len = sizeof(struct xfrm_selector) / sizeof(u32);
1393 int i;
1394
1395 for (i = 0; i < len; i++) {
1396 if (p1[i] != p2[i])
1397 return 1;
1398 }
1399
1400 return 0;
1401 }
1402
1403 static void xfrm_policy_requeue(struct xfrm_policy *old,
1404 struct xfrm_policy *new)
1405 {
1406 struct xfrm_policy_queue *pq = &old->polq;
1407 struct sk_buff_head list;
1408
1409 if (skb_queue_empty(&pq->hold_queue))
1410 return;
1411
1412 __skb_queue_head_init(&list);
1413
1414 spin_lock_bh(&pq->hold_queue.lock);
1415 skb_queue_splice_init(&pq->hold_queue, &list);
1416 if (del_timer(&pq->hold_timer))
1417 xfrm_pol_put(old);
1418 spin_unlock_bh(&pq->hold_queue.lock);
1419
1420 pq = &new->polq;
1421
1422 spin_lock_bh(&pq->hold_queue.lock);
1423 skb_queue_splice(&list, &pq->hold_queue);
1424 pq->timeout = XFRM_QUEUE_TMO_MIN;
1425 if (!mod_timer(&pq->hold_timer, jiffies))
1426 xfrm_pol_hold(new);
1427 spin_unlock_bh(&pq->hold_queue.lock);
1428 }
1429
1430 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
1431 struct xfrm_policy *pol)
1432 {
1433 u32 mark = policy->mark.v & policy->mark.m;
1434
1435 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
1436 return true;
1437
1438 if ((mark & pol->mark.m) == pol->mark.v &&
1439 policy->priority == pol->priority)
1440 return true;
1441
1442 return false;
1443 }
1444
1445 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1446 {
1447 const struct xfrm_pol_inexact_key *k = data;
1448 u32 a = k->type << 24 | k->dir << 16 | k->family;
1449
1450 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1451 seed);
1452 }
1453
1454 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1455 {
1456 const struct xfrm_pol_inexact_bin *b = data;
1457
1458 return xfrm_pol_bin_key(&b->k, 0, seed);
1459 }
1460
1461 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1462 const void *ptr)
1463 {
1464 const struct xfrm_pol_inexact_key *key = arg->key;
1465 const struct xfrm_pol_inexact_bin *b = ptr;
1466 int ret;
1467
1468 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1469 return -1;
1470
1471 ret = b->k.dir ^ key->dir;
1472 if (ret)
1473 return ret;
1474
1475 ret = b->k.type ^ key->type;
1476 if (ret)
1477 return ret;
1478
1479 ret = b->k.family ^ key->family;
1480 if (ret)
1481 return ret;
1482
1483 return b->k.if_id ^ key->if_id;
1484 }
1485
1486 static const struct rhashtable_params xfrm_pol_inexact_params = {
1487 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
1488 .hashfn = xfrm_pol_bin_key,
1489 .obj_hashfn = xfrm_pol_bin_obj,
1490 .obj_cmpfn = xfrm_pol_bin_cmp,
1491 .automatic_shrinking = true,
1492 };
1493
1494 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1495 struct xfrm_policy *policy)
1496 {
1497 struct xfrm_policy *pol, *delpol = NULL;
1498 struct hlist_node *newpos = NULL;
1499 int i = 0;
1500
1501 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1502 if (pol->type == policy->type &&
1503 pol->if_id == policy->if_id &&
1504 !selector_cmp(&pol->selector, &policy->selector) &&
1505 xfrm_policy_mark_match(policy, pol) &&
1506 xfrm_sec_ctx_match(pol->security, policy->security) &&
1507 !WARN_ON(delpol)) {
1508 delpol = pol;
1509 if (policy->priority > pol->priority)
1510 continue;
1511 } else if (policy->priority >= pol->priority) {
1512 newpos = &pol->bydst_inexact_list;
1513 continue;
1514 }
1515 if (delpol)
1516 break;
1517 }
1518
1519 if (newpos)
1520 hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1521 else
1522 hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1523
1524 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1525 pol->pos = i;
1526 i++;
1527 }
1528 }
1529
1530 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1531 struct xfrm_policy *policy,
1532 bool excl)
1533 {
1534 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1535
1536 hlist_for_each_entry(pol, chain, bydst) {
1537 if (pol->type == policy->type &&
1538 pol->if_id == policy->if_id &&
1539 !selector_cmp(&pol->selector, &policy->selector) &&
1540 xfrm_policy_mark_match(policy, pol) &&
1541 xfrm_sec_ctx_match(pol->security, policy->security) &&
1542 !WARN_ON(delpol)) {
1543 if (excl)
1544 return ERR_PTR(-EEXIST);
1545 delpol = pol;
1546 if (policy->priority > pol->priority)
1547 continue;
1548 } else if (policy->priority >= pol->priority) {
1549 newpos = pol;
1550 continue;
1551 }
1552 if (delpol)
1553 break;
1554 }
1555
1556 if (newpos)
1557 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1558 else
1559 hlist_add_head_rcu(&policy->bydst, chain);
1560
1561 return delpol;
1562 }
1563
1564 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1565 {
1566 struct net *net = xp_net(policy);
1567 struct xfrm_policy *delpol;
1568 struct hlist_head *chain;
1569
1570 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1571 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1572 if (chain)
1573 delpol = xfrm_policy_insert_list(chain, policy, excl);
1574 else
1575 delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1576
1577 if (IS_ERR(delpol)) {
1578 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1579 return PTR_ERR(delpol);
1580 }
1581
1582 __xfrm_policy_link(policy, dir);
1583
1584 /* After previous checking, family can either be AF_INET or AF_INET6 */
1585 if (policy->family == AF_INET)
1586 rt_genid_bump_ipv4(net);
1587 else
1588 rt_genid_bump_ipv6(net);
1589
1590 if (delpol) {
1591 xfrm_policy_requeue(delpol, policy);
1592 __xfrm_policy_unlink(delpol, dir);
1593 }
1594 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1595 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1596 policy->curlft.add_time = ktime_get_real_seconds();
1597 policy->curlft.use_time = 0;
1598 if (!mod_timer(&policy->timer, jiffies + HZ))
1599 xfrm_pol_hold(policy);
1600 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1601
1602 if (delpol)
1603 xfrm_policy_kill(delpol);
1604 else if (xfrm_bydst_should_resize(net, dir, NULL))
1605 schedule_work(&net->xfrm.policy_hash_work);
1606
1607 return 0;
1608 }
1609 EXPORT_SYMBOL(xfrm_policy_insert);
1610
1611 static struct xfrm_policy *
1612 __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
1613 u8 type, int dir,
1614 struct xfrm_selector *sel,
1615 struct xfrm_sec_ctx *ctx)
1616 {
1617 struct xfrm_policy *pol;
1618
1619 if (!chain)
1620 return NULL;
1621
1622 hlist_for_each_entry(pol, chain, bydst) {
1623 if (pol->type == type &&
1624 pol->if_id == if_id &&
1625 (mark & pol->mark.m) == pol->mark.v &&
1626 !selector_cmp(sel, &pol->selector) &&
1627 xfrm_sec_ctx_match(ctx, pol->security))
1628 return pol;
1629 }
1630
1631 return NULL;
1632 }
1633
1634 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
1635 u8 type, int dir,
1636 struct xfrm_selector *sel,
1637 struct xfrm_sec_ctx *ctx, int delete,
1638 int *err)
1639 {
1640 struct xfrm_pol_inexact_bin *bin = NULL;
1641 struct xfrm_policy *pol, *ret = NULL;
1642 struct hlist_head *chain;
1643
1644 *err = 0;
1645 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1646 chain = policy_hash_bysel(net, sel, sel->family, dir);
1647 if (!chain) {
1648 struct xfrm_pol_inexact_candidates cand;
1649 int i;
1650
1651 bin = xfrm_policy_inexact_lookup(net, type,
1652 sel->family, dir, if_id);
1653 if (!bin) {
1654 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1655 return NULL;
1656 }
1657
1658 if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1659 &sel->saddr,
1660 &sel->daddr)) {
1661 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1662 return NULL;
1663 }
1664
1665 pol = NULL;
1666 for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1667 struct xfrm_policy *tmp;
1668
1669 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1670 if_id, type, dir,
1671 sel, ctx);
1672 if (!tmp)
1673 continue;
1674
1675 if (!pol || tmp->pos < pol->pos)
1676 pol = tmp;
1677 }
1678 } else {
1679 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1680 sel, ctx);
1681 }
1682
1683 if (pol) {
1684 xfrm_pol_hold(pol);
1685 if (delete) {
1686 *err = security_xfrm_policy_delete(pol->security);
1687 if (*err) {
1688 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1689 return pol;
1690 }
1691 __xfrm_policy_unlink(pol, dir);
1692 }
1693 ret = pol;
1694 }
1695 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1696
1697 if (ret && delete)
1698 xfrm_policy_kill(ret);
1699 if (bin && delete)
1700 xfrm_policy_inexact_prune_bin(bin);
1701 return ret;
1702 }
1703 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1704
1705 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
1706 u8 type, int dir, u32 id, int delete,
1707 int *err)
1708 {
1709 struct xfrm_policy *pol, *ret;
1710 struct hlist_head *chain;
1711
1712 *err = -ENOENT;
1713 if (xfrm_policy_id2dir(id) != dir)
1714 return NULL;
1715
1716 *err = 0;
1717 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1718 chain = net->xfrm.policy_byidx + idx_hash(net, id);
1719 ret = NULL;
1720 hlist_for_each_entry(pol, chain, byidx) {
1721 if (pol->type == type && pol->index == id &&
1722 pol->if_id == if_id &&
1723 (mark & pol->mark.m) == pol->mark.v) {
1724 xfrm_pol_hold(pol);
1725 if (delete) {
1726 *err = security_xfrm_policy_delete(
1727 pol->security);
1728 if (*err) {
1729 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1730 return pol;
1731 }
1732 __xfrm_policy_unlink(pol, dir);
1733 }
1734 ret = pol;
1735 break;
1736 }
1737 }
1738 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1739
1740 if (ret && delete)
1741 xfrm_policy_kill(ret);
1742 return ret;
1743 }
1744 EXPORT_SYMBOL(xfrm_policy_byid);
1745
1746 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1747 static inline int
1748 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1749 {
1750 struct xfrm_policy *pol;
1751 int err = 0;
1752
1753 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1754 if (pol->walk.dead ||
1755 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1756 pol->type != type)
1757 continue;
1758
1759 err = security_xfrm_policy_delete(pol->security);
1760 if (err) {
1761 xfrm_audit_policy_delete(pol, 0, task_valid);
1762 return err;
1763 }
1764 }
1765 return err;
1766 }
1767 #else
1768 static inline int
1769 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1770 {
1771 return 0;
1772 }
1773 #endif
1774
1775 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1776 {
1777 int dir, err = 0, cnt = 0;
1778 struct xfrm_policy *pol;
1779
1780 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1781
1782 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1783 if (err)
1784 goto out;
1785
1786 again:
1787 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1788 dir = xfrm_policy_id2dir(pol->index);
1789 if (pol->walk.dead ||
1790 dir >= XFRM_POLICY_MAX ||
1791 pol->type != type)
1792 continue;
1793
1794 __xfrm_policy_unlink(pol, dir);
1795 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1796 cnt++;
1797 xfrm_audit_policy_delete(pol, 1, task_valid);
1798 xfrm_policy_kill(pol);
1799 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1800 goto again;
1801 }
1802 if (cnt)
1803 __xfrm_policy_inexact_flush(net);
1804 else
1805 err = -ESRCH;
1806 out:
1807 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1808 return err;
1809 }
1810 EXPORT_SYMBOL(xfrm_policy_flush);
1811
1812 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1813 int (*func)(struct xfrm_policy *, int, int, void*),
1814 void *data)
1815 {
1816 struct xfrm_policy *pol;
1817 struct xfrm_policy_walk_entry *x;
1818 int error = 0;
1819
1820 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1821 walk->type != XFRM_POLICY_TYPE_ANY)
1822 return -EINVAL;
1823
1824 if (list_empty(&walk->walk.all) && walk->seq != 0)
1825 return 0;
1826
1827 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1828 if (list_empty(&walk->walk.all))
1829 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1830 else
1831 x = list_first_entry(&walk->walk.all,
1832 struct xfrm_policy_walk_entry, all);
1833
1834 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1835 if (x->dead)
1836 continue;
1837 pol = container_of(x, struct xfrm_policy, walk);
1838 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1839 walk->type != pol->type)
1840 continue;
1841 error = func(pol, xfrm_policy_id2dir(pol->index),
1842 walk->seq, data);
1843 if (error) {
1844 list_move_tail(&walk->walk.all, &x->all);
1845 goto out;
1846 }
1847 walk->seq++;
1848 }
1849 if (walk->seq == 0) {
1850 error = -ENOENT;
1851 goto out;
1852 }
1853 list_del_init(&walk->walk.all);
1854 out:
1855 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1856 return error;
1857 }
1858 EXPORT_SYMBOL(xfrm_policy_walk);
1859
1860 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1861 {
1862 INIT_LIST_HEAD(&walk->walk.all);
1863 walk->walk.dead = 1;
1864 walk->type = type;
1865 walk->seq = 0;
1866 }
1867 EXPORT_SYMBOL(xfrm_policy_walk_init);
1868
1869 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1870 {
1871 if (list_empty(&walk->walk.all))
1872 return;
1873
1874 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1875 list_del(&walk->walk.all);
1876 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1877 }
1878 EXPORT_SYMBOL(xfrm_policy_walk_done);
1879
1880 /*
1881 * Find policy to apply to this flow.
1882 *
1883 * Returns 0 if policy found, else an -errno.
1884 */
1885 static int xfrm_policy_match(const struct xfrm_policy *pol,
1886 const struct flowi *fl,
1887 u8 type, u16 family, int dir, u32 if_id)
1888 {
1889 const struct xfrm_selector *sel = &pol->selector;
1890 int ret = -ESRCH;
1891 bool match;
1892
1893 if (pol->family != family ||
1894 pol->if_id != if_id ||
1895 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1896 pol->type != type)
1897 return ret;
1898
1899 match = xfrm_selector_match(sel, fl, family);
1900 if (match)
1901 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1902 dir);
1903 return ret;
1904 }
1905
1906 static struct xfrm_pol_inexact_node *
1907 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1908 seqcount_t *count,
1909 const xfrm_address_t *addr, u16 family)
1910 {
1911 const struct rb_node *parent;
1912 int seq;
1913
1914 again:
1915 seq = read_seqcount_begin(count);
1916
1917 parent = rcu_dereference_raw(r->rb_node);
1918 while (parent) {
1919 struct xfrm_pol_inexact_node *node;
1920 int delta;
1921
1922 node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1923
1924 delta = xfrm_policy_addr_delta(addr, &node->addr,
1925 node->prefixlen, family);
1926 if (delta < 0) {
1927 parent = rcu_dereference_raw(parent->rb_left);
1928 continue;
1929 } else if (delta > 0) {
1930 parent = rcu_dereference_raw(parent->rb_right);
1931 continue;
1932 }
1933
1934 return node;
1935 }
1936
1937 if (read_seqcount_retry(count, seq))
1938 goto again;
1939
1940 return NULL;
1941 }
1942
1943 static bool
1944 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1945 struct xfrm_pol_inexact_bin *b,
1946 const xfrm_address_t *saddr,
1947 const xfrm_address_t *daddr)
1948 {
1949 struct xfrm_pol_inexact_node *n;
1950 u16 family;
1951
1952 if (!b)
1953 return false;
1954
1955 family = b->k.family;
1956 memset(cand, 0, sizeof(*cand));
1957 cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1958
1959 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
1960 family);
1961 if (n) {
1962 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1963 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
1964 family);
1965 if (n)
1966 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
1967 }
1968
1969 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
1970 family);
1971 if (n)
1972 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
1973
1974 return true;
1975 }
1976
1977 static struct xfrm_pol_inexact_bin *
1978 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
1979 u8 dir, u32 if_id)
1980 {
1981 struct xfrm_pol_inexact_key k = {
1982 .family = family,
1983 .type = type,
1984 .dir = dir,
1985 .if_id = if_id,
1986 };
1987
1988 write_pnet(&k.net, net);
1989
1990 return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
1991 xfrm_pol_inexact_params);
1992 }
1993
1994 static struct xfrm_pol_inexact_bin *
1995 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
1996 u8 dir, u32 if_id)
1997 {
1998 struct xfrm_pol_inexact_bin *bin;
1999
2000 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2001
2002 rcu_read_lock();
2003 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2004 rcu_read_unlock();
2005
2006 return bin;
2007 }
2008
2009 static struct xfrm_policy *
2010 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2011 struct xfrm_policy *prefer,
2012 const struct flowi *fl,
2013 u8 type, u16 family, int dir, u32 if_id)
2014 {
2015 u32 priority = prefer ? prefer->priority : ~0u;
2016 struct xfrm_policy *pol;
2017
2018 if (!chain)
2019 return NULL;
2020
2021 hlist_for_each_entry_rcu(pol, chain, bydst) {
2022 int err;
2023
2024 if (pol->priority > priority)
2025 break;
2026
2027 err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2028 if (err) {
2029 if (err != -ESRCH)
2030 return ERR_PTR(err);
2031
2032 continue;
2033 }
2034
2035 if (prefer) {
2036 /* matches. Is it older than *prefer? */
2037 if (pol->priority == priority &&
2038 prefer->pos < pol->pos)
2039 return prefer;
2040 }
2041
2042 return pol;
2043 }
2044
2045 return NULL;
2046 }
2047
2048 static struct xfrm_policy *
2049 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2050 struct xfrm_policy *prefer,
2051 const struct flowi *fl,
2052 u8 type, u16 family, int dir, u32 if_id)
2053 {
2054 struct xfrm_policy *tmp;
2055 int i;
2056
2057 for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2058 tmp = __xfrm_policy_eval_candidates(cand->res[i],
2059 prefer,
2060 fl, type, family, dir,
2061 if_id);
2062 if (!tmp)
2063 continue;
2064
2065 if (IS_ERR(tmp))
2066 return tmp;
2067 prefer = tmp;
2068 }
2069
2070 return prefer;
2071 }
2072
2073 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2074 const struct flowi *fl,
2075 u16 family, u8 dir,
2076 u32 if_id)
2077 {
2078 struct xfrm_pol_inexact_candidates cand;
2079 const xfrm_address_t *daddr, *saddr;
2080 struct xfrm_pol_inexact_bin *bin;
2081 struct xfrm_policy *pol, *ret;
2082 struct hlist_head *chain;
2083 unsigned int sequence;
2084 int err;
2085
2086 daddr = xfrm_flowi_daddr(fl, family);
2087 saddr = xfrm_flowi_saddr(fl, family);
2088 if (unlikely(!daddr || !saddr))
2089 return NULL;
2090
2091 rcu_read_lock();
2092 retry:
2093 do {
2094 sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
2095 chain = policy_hash_direct(net, daddr, saddr, family, dir);
2096 } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
2097
2098 ret = NULL;
2099 hlist_for_each_entry_rcu(pol, chain, bydst) {
2100 err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2101 if (err) {
2102 if (err == -ESRCH)
2103 continue;
2104 else {
2105 ret = ERR_PTR(err);
2106 goto fail;
2107 }
2108 } else {
2109 ret = pol;
2110 break;
2111 }
2112 }
2113 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2114 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2115 daddr))
2116 goto skip_inexact;
2117
2118 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2119 family, dir, if_id);
2120 if (pol) {
2121 ret = pol;
2122 if (IS_ERR(pol))
2123 goto fail;
2124 }
2125
2126 skip_inexact:
2127 if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
2128 goto retry;
2129
2130 if (ret && !xfrm_pol_hold_rcu(ret))
2131 goto retry;
2132 fail:
2133 rcu_read_unlock();
2134
2135 return ret;
2136 }
2137
2138 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2139 const struct flowi *fl,
2140 u16 family, u8 dir, u32 if_id)
2141 {
2142 #ifdef CONFIG_XFRM_SUB_POLICY
2143 struct xfrm_policy *pol;
2144
2145 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2146 dir, if_id);
2147 if (pol != NULL)
2148 return pol;
2149 #endif
2150 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2151 dir, if_id);
2152 }
2153
2154 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2155 const struct flowi *fl,
2156 u16 family, u32 if_id)
2157 {
2158 struct xfrm_policy *pol;
2159
2160 rcu_read_lock();
2161 again:
2162 pol = rcu_dereference(sk->sk_policy[dir]);
2163 if (pol != NULL) {
2164 bool match;
2165 int err = 0;
2166
2167 if (pol->family != family) {
2168 pol = NULL;
2169 goto out;
2170 }
2171
2172 match = xfrm_selector_match(&pol->selector, fl, family);
2173 if (match) {
2174 if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2175 pol->if_id != if_id) {
2176 pol = NULL;
2177 goto out;
2178 }
2179 err = security_xfrm_policy_lookup(pol->security,
2180 fl->flowi_secid,
2181 dir);
2182 if (!err) {
2183 if (!xfrm_pol_hold_rcu(pol))
2184 goto again;
2185 } else if (err == -ESRCH) {
2186 pol = NULL;
2187 } else {
2188 pol = ERR_PTR(err);
2189 }
2190 } else
2191 pol = NULL;
2192 }
2193 out:
2194 rcu_read_unlock();
2195 return pol;
2196 }
2197
2198 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2199 {
2200 struct net *net = xp_net(pol);
2201
2202 list_add(&pol->walk.all, &net->xfrm.policy_all);
2203 net->xfrm.policy_count[dir]++;
2204 xfrm_pol_hold(pol);
2205 }
2206
2207 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2208 int dir)
2209 {
2210 struct net *net = xp_net(pol);
2211
2212 if (list_empty(&pol->walk.all))
2213 return NULL;
2214
2215 /* Socket policies are not hashed. */
2216 if (!hlist_unhashed(&pol->bydst)) {
2217 hlist_del_rcu(&pol->bydst);
2218 hlist_del_init(&pol->bydst_inexact_list);
2219 hlist_del(&pol->byidx);
2220 }
2221
2222 list_del_init(&pol->walk.all);
2223 net->xfrm.policy_count[dir]--;
2224
2225 return pol;
2226 }
2227
2228 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2229 {
2230 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2231 }
2232
2233 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2234 {
2235 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2236 }
2237
2238 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2239 {
2240 struct net *net = xp_net(pol);
2241
2242 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2243 pol = __xfrm_policy_unlink(pol, dir);
2244 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2245 if (pol) {
2246 xfrm_policy_kill(pol);
2247 return 0;
2248 }
2249 return -ENOENT;
2250 }
2251 EXPORT_SYMBOL(xfrm_policy_delete);
2252
2253 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2254 {
2255 struct net *net = sock_net(sk);
2256 struct xfrm_policy *old_pol;
2257
2258 #ifdef CONFIG_XFRM_SUB_POLICY
2259 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2260 return -EINVAL;
2261 #endif
2262
2263 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2264 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2265 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2266 if (pol) {
2267 pol->curlft.add_time = ktime_get_real_seconds();
2268 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2269 xfrm_sk_policy_link(pol, dir);
2270 }
2271 rcu_assign_pointer(sk->sk_policy[dir], pol);
2272 if (old_pol) {
2273 if (pol)
2274 xfrm_policy_requeue(old_pol, pol);
2275
2276 /* Unlinking succeeds always. This is the only function
2277 * allowed to delete or replace socket policy.
2278 */
2279 xfrm_sk_policy_unlink(old_pol, dir);
2280 }
2281 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2282
2283 if (old_pol) {
2284 xfrm_policy_kill(old_pol);
2285 }
2286 return 0;
2287 }
2288
2289 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2290 {
2291 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2292 struct net *net = xp_net(old);
2293
2294 if (newp) {
2295 newp->selector = old->selector;
2296 if (security_xfrm_policy_clone(old->security,
2297 &newp->security)) {
2298 kfree(newp);
2299 return NULL; /* ENOMEM */
2300 }
2301 newp->lft = old->lft;
2302 newp->curlft = old->curlft;
2303 newp->mark = old->mark;
2304 newp->if_id = old->if_id;
2305 newp->action = old->action;
2306 newp->flags = old->flags;
2307 newp->xfrm_nr = old->xfrm_nr;
2308 newp->index = old->index;
2309 newp->type = old->type;
2310 newp->family = old->family;
2311 memcpy(newp->xfrm_vec, old->xfrm_vec,
2312 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2313 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2314 xfrm_sk_policy_link(newp, dir);
2315 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2316 xfrm_pol_put(newp);
2317 }
2318 return newp;
2319 }
2320
2321 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2322 {
2323 const struct xfrm_policy *p;
2324 struct xfrm_policy *np;
2325 int i, ret = 0;
2326
2327 rcu_read_lock();
2328 for (i = 0; i < 2; i++) {
2329 p = rcu_dereference(osk->sk_policy[i]);
2330 if (p) {
2331 np = clone_policy(p, i);
2332 if (unlikely(!np)) {
2333 ret = -ENOMEM;
2334 break;
2335 }
2336 rcu_assign_pointer(sk->sk_policy[i], np);
2337 }
2338 }
2339 rcu_read_unlock();
2340 return ret;
2341 }
2342
2343 static int
2344 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2345 xfrm_address_t *remote, unsigned short family, u32 mark)
2346 {
2347 int err;
2348 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2349
2350 if (unlikely(afinfo == NULL))
2351 return -EINVAL;
2352 err = afinfo->get_saddr(net, oif, local, remote, mark);
2353 rcu_read_unlock();
2354 return err;
2355 }
2356
2357 /* Resolve list of templates for the flow, given policy. */
2358
2359 static int
2360 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2361 struct xfrm_state **xfrm, unsigned short family)
2362 {
2363 struct net *net = xp_net(policy);
2364 int nx;
2365 int i, error;
2366 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2367 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2368 xfrm_address_t tmp;
2369
2370 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2371 struct xfrm_state *x;
2372 xfrm_address_t *remote = daddr;
2373 xfrm_address_t *local = saddr;
2374 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2375
2376 if (tmpl->mode == XFRM_MODE_TUNNEL ||
2377 tmpl->mode == XFRM_MODE_BEET) {
2378 remote = &tmpl->id.daddr;
2379 local = &tmpl->saddr;
2380 if (xfrm_addr_any(local, tmpl->encap_family)) {
2381 error = xfrm_get_saddr(net, fl->flowi_oif,
2382 &tmp, remote,
2383 tmpl->encap_family, 0);
2384 if (error)
2385 goto fail;
2386 local = &tmp;
2387 }
2388 }
2389
2390 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2391 family, policy->if_id);
2392
2393 if (x && x->km.state == XFRM_STATE_VALID) {
2394 xfrm[nx++] = x;
2395 daddr = remote;
2396 saddr = local;
2397 continue;
2398 }
2399 if (x) {
2400 error = (x->km.state == XFRM_STATE_ERROR ?
2401 -EINVAL : -EAGAIN);
2402 xfrm_state_put(x);
2403 } else if (error == -ESRCH) {
2404 error = -EAGAIN;
2405 }
2406
2407 if (!tmpl->optional)
2408 goto fail;
2409 }
2410 return nx;
2411
2412 fail:
2413 for (nx--; nx >= 0; nx--)
2414 xfrm_state_put(xfrm[nx]);
2415 return error;
2416 }
2417
2418 static int
2419 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2420 struct xfrm_state **xfrm, unsigned short family)
2421 {
2422 struct xfrm_state *tp[XFRM_MAX_DEPTH];
2423 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2424 int cnx = 0;
2425 int error;
2426 int ret;
2427 int i;
2428
2429 for (i = 0; i < npols; i++) {
2430 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2431 error = -ENOBUFS;
2432 goto fail;
2433 }
2434
2435 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2436 if (ret < 0) {
2437 error = ret;
2438 goto fail;
2439 } else
2440 cnx += ret;
2441 }
2442
2443 /* found states are sorted for outbound processing */
2444 if (npols > 1)
2445 xfrm_state_sort(xfrm, tpp, cnx, family);
2446
2447 return cnx;
2448
2449 fail:
2450 for (cnx--; cnx >= 0; cnx--)
2451 xfrm_state_put(tpp[cnx]);
2452 return error;
2453
2454 }
2455
2456 static int xfrm_get_tos(const struct flowi *fl, int family)
2457 {
2458 if (family == AF_INET)
2459 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2460
2461 return 0;
2462 }
2463
2464 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2465 {
2466 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2467 struct dst_ops *dst_ops;
2468 struct xfrm_dst *xdst;
2469
2470 if (!afinfo)
2471 return ERR_PTR(-EINVAL);
2472
2473 switch (family) {
2474 case AF_INET:
2475 dst_ops = &net->xfrm.xfrm4_dst_ops;
2476 break;
2477 #if IS_ENABLED(CONFIG_IPV6)
2478 case AF_INET6:
2479 dst_ops = &net->xfrm.xfrm6_dst_ops;
2480 break;
2481 #endif
2482 default:
2483 BUG();
2484 }
2485 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2486
2487 if (likely(xdst)) {
2488 struct dst_entry *dst = &xdst->u.dst;
2489
2490 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
2491 } else
2492 xdst = ERR_PTR(-ENOBUFS);
2493
2494 rcu_read_unlock();
2495
2496 return xdst;
2497 }
2498
2499 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2500 int nfheader_len)
2501 {
2502 if (dst->ops->family == AF_INET6) {
2503 struct rt6_info *rt = (struct rt6_info *)dst;
2504 path->path_cookie = rt6_get_cookie(rt);
2505 path->u.rt6.rt6i_nfheader_len = nfheader_len;
2506 }
2507 }
2508
2509 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2510 const struct flowi *fl)
2511 {
2512 const struct xfrm_policy_afinfo *afinfo =
2513 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2514 int err;
2515
2516 if (!afinfo)
2517 return -EINVAL;
2518
2519 err = afinfo->fill_dst(xdst, dev, fl);
2520
2521 rcu_read_unlock();
2522
2523 return err;
2524 }
2525
2526
2527 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2528 * all the metrics... Shortly, bundle a bundle.
2529 */
2530
2531 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2532 struct xfrm_state **xfrm,
2533 struct xfrm_dst **bundle,
2534 int nx,
2535 const struct flowi *fl,
2536 struct dst_entry *dst)
2537 {
2538 const struct xfrm_state_afinfo *afinfo;
2539 const struct xfrm_mode *inner_mode;
2540 struct net *net = xp_net(policy);
2541 unsigned long now = jiffies;
2542 struct net_device *dev;
2543 struct xfrm_dst *xdst_prev = NULL;
2544 struct xfrm_dst *xdst0 = NULL;
2545 int i = 0;
2546 int err;
2547 int header_len = 0;
2548 int nfheader_len = 0;
2549 int trailer_len = 0;
2550 int tos;
2551 int family = policy->selector.family;
2552 xfrm_address_t saddr, daddr;
2553
2554 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2555
2556 tos = xfrm_get_tos(fl, family);
2557
2558 dst_hold(dst);
2559
2560 for (; i < nx; i++) {
2561 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2562 struct dst_entry *dst1 = &xdst->u.dst;
2563
2564 err = PTR_ERR(xdst);
2565 if (IS_ERR(xdst)) {
2566 dst_release(dst);
2567 goto put_states;
2568 }
2569
2570 bundle[i] = xdst;
2571 if (!xdst_prev)
2572 xdst0 = xdst;
2573 else
2574 /* Ref count is taken during xfrm_alloc_dst()
2575 * No need to do dst_clone() on dst1
2576 */
2577 xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2578
2579 if (xfrm[i]->sel.family == AF_UNSPEC) {
2580 inner_mode = xfrm_ip2inner_mode(xfrm[i],
2581 xfrm_af2proto(family));
2582 if (!inner_mode) {
2583 err = -EAFNOSUPPORT;
2584 dst_release(dst);
2585 goto put_states;
2586 }
2587 } else
2588 inner_mode = &xfrm[i]->inner_mode;
2589
2590 xdst->route = dst;
2591 dst_copy_metrics(dst1, dst);
2592
2593 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2594 __u32 mark = 0;
2595
2596 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2597 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2598
2599 family = xfrm[i]->props.family;
2600 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
2601 &saddr, &daddr, family, mark);
2602 err = PTR_ERR(dst);
2603 if (IS_ERR(dst))
2604 goto put_states;
2605 } else
2606 dst_hold(dst);
2607
2608 dst1->xfrm = xfrm[i];
2609 xdst->xfrm_genid = xfrm[i]->genid;
2610
2611 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2612 dst1->flags |= DST_HOST;
2613 dst1->lastuse = now;
2614
2615 dst1->input = dst_discard;
2616
2617 rcu_read_lock();
2618 afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2619 if (likely(afinfo))
2620 dst1->output = afinfo->output;
2621 else
2622 dst1->output = dst_discard_out;
2623 rcu_read_unlock();
2624
2625 xdst_prev = xdst;
2626
2627 header_len += xfrm[i]->props.header_len;
2628 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2629 nfheader_len += xfrm[i]->props.header_len;
2630 trailer_len += xfrm[i]->props.trailer_len;
2631 }
2632
2633 xfrm_dst_set_child(xdst_prev, dst);
2634 xdst0->path = dst;
2635
2636 err = -ENODEV;
2637 dev = dst->dev;
2638 if (!dev)
2639 goto free_dst;
2640
2641 xfrm_init_path(xdst0, dst, nfheader_len);
2642 xfrm_init_pmtu(bundle, nx);
2643
2644 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2645 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2646 err = xfrm_fill_dst(xdst_prev, dev, fl);
2647 if (err)
2648 goto free_dst;
2649
2650 xdst_prev->u.dst.header_len = header_len;
2651 xdst_prev->u.dst.trailer_len = trailer_len;
2652 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2653 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2654 }
2655
2656 return &xdst0->u.dst;
2657
2658 put_states:
2659 for (; i < nx; i++)
2660 xfrm_state_put(xfrm[i]);
2661 free_dst:
2662 if (xdst0)
2663 dst_release_immediate(&xdst0->u.dst);
2664
2665 return ERR_PTR(err);
2666 }
2667
2668 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2669 struct xfrm_policy **pols,
2670 int *num_pols, int *num_xfrms)
2671 {
2672 int i;
2673
2674 if (*num_pols == 0 || !pols[0]) {
2675 *num_pols = 0;
2676 *num_xfrms = 0;
2677 return 0;
2678 }
2679 if (IS_ERR(pols[0]))
2680 return PTR_ERR(pols[0]);
2681
2682 *num_xfrms = pols[0]->xfrm_nr;
2683
2684 #ifdef CONFIG_XFRM_SUB_POLICY
2685 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
2686 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2687 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2688 XFRM_POLICY_TYPE_MAIN,
2689 fl, family,
2690 XFRM_POLICY_OUT,
2691 pols[0]->if_id);
2692 if (pols[1]) {
2693 if (IS_ERR(pols[1])) {
2694 xfrm_pols_put(pols, *num_pols);
2695 return PTR_ERR(pols[1]);
2696 }
2697 (*num_pols)++;
2698 (*num_xfrms) += pols[1]->xfrm_nr;
2699 }
2700 }
2701 #endif
2702 for (i = 0; i < *num_pols; i++) {
2703 if (pols[i]->action != XFRM_POLICY_ALLOW) {
2704 *num_xfrms = -1;
2705 break;
2706 }
2707 }
2708
2709 return 0;
2710
2711 }
2712
2713 static struct xfrm_dst *
2714 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2715 const struct flowi *fl, u16 family,
2716 struct dst_entry *dst_orig)
2717 {
2718 struct net *net = xp_net(pols[0]);
2719 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2720 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2721 struct xfrm_dst *xdst;
2722 struct dst_entry *dst;
2723 int err;
2724
2725 /* Try to instantiate a bundle */
2726 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2727 if (err <= 0) {
2728 if (err == 0)
2729 return NULL;
2730
2731 if (err != -EAGAIN)
2732 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2733 return ERR_PTR(err);
2734 }
2735
2736 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2737 if (IS_ERR(dst)) {
2738 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2739 return ERR_CAST(dst);
2740 }
2741
2742 xdst = (struct xfrm_dst *)dst;
2743 xdst->num_xfrms = err;
2744 xdst->num_pols = num_pols;
2745 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2746 xdst->policy_genid = atomic_read(&pols[0]->genid);
2747
2748 return xdst;
2749 }
2750
2751 static void xfrm_policy_queue_process(struct timer_list *t)
2752 {
2753 struct sk_buff *skb;
2754 struct sock *sk;
2755 struct dst_entry *dst;
2756 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2757 struct net *net = xp_net(pol);
2758 struct xfrm_policy_queue *pq = &pol->polq;
2759 struct flowi fl;
2760 struct sk_buff_head list;
2761
2762 spin_lock(&pq->hold_queue.lock);
2763 skb = skb_peek(&pq->hold_queue);
2764 if (!skb) {
2765 spin_unlock(&pq->hold_queue.lock);
2766 goto out;
2767 }
2768 dst = skb_dst(skb);
2769 sk = skb->sk;
2770 xfrm_decode_session(skb, &fl, dst->ops->family);
2771 spin_unlock(&pq->hold_queue.lock);
2772
2773 dst_hold(xfrm_dst_path(dst));
2774 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2775 if (IS_ERR(dst))
2776 goto purge_queue;
2777
2778 if (dst->flags & DST_XFRM_QUEUE) {
2779 dst_release(dst);
2780
2781 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2782 goto purge_queue;
2783
2784 pq->timeout = pq->timeout << 1;
2785 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2786 xfrm_pol_hold(pol);
2787 goto out;
2788 }
2789
2790 dst_release(dst);
2791
2792 __skb_queue_head_init(&list);
2793
2794 spin_lock(&pq->hold_queue.lock);
2795 pq->timeout = 0;
2796 skb_queue_splice_init(&pq->hold_queue, &list);
2797 spin_unlock(&pq->hold_queue.lock);
2798
2799 while (!skb_queue_empty(&list)) {
2800 skb = __skb_dequeue(&list);
2801
2802 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2803 dst_hold(xfrm_dst_path(skb_dst(skb)));
2804 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2805 if (IS_ERR(dst)) {
2806 kfree_skb(skb);
2807 continue;
2808 }
2809
2810 nf_reset(skb);
2811 skb_dst_drop(skb);
2812 skb_dst_set(skb, dst);
2813
2814 dst_output(net, skb->sk, skb);
2815 }
2816
2817 out:
2818 xfrm_pol_put(pol);
2819 return;
2820
2821 purge_queue:
2822 pq->timeout = 0;
2823 skb_queue_purge(&pq->hold_queue);
2824 xfrm_pol_put(pol);
2825 }
2826
2827 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2828 {
2829 unsigned long sched_next;
2830 struct dst_entry *dst = skb_dst(skb);
2831 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2832 struct xfrm_policy *pol = xdst->pols[0];
2833 struct xfrm_policy_queue *pq = &pol->polq;
2834
2835 if (unlikely(skb_fclone_busy(sk, skb))) {
2836 kfree_skb(skb);
2837 return 0;
2838 }
2839
2840 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2841 kfree_skb(skb);
2842 return -EAGAIN;
2843 }
2844
2845 skb_dst_force(skb);
2846
2847 spin_lock_bh(&pq->hold_queue.lock);
2848
2849 if (!pq->timeout)
2850 pq->timeout = XFRM_QUEUE_TMO_MIN;
2851
2852 sched_next = jiffies + pq->timeout;
2853
2854 if (del_timer(&pq->hold_timer)) {
2855 if (time_before(pq->hold_timer.expires, sched_next))
2856 sched_next = pq->hold_timer.expires;
2857 xfrm_pol_put(pol);
2858 }
2859
2860 __skb_queue_tail(&pq->hold_queue, skb);
2861 if (!mod_timer(&pq->hold_timer, sched_next))
2862 xfrm_pol_hold(pol);
2863
2864 spin_unlock_bh(&pq->hold_queue.lock);
2865
2866 return 0;
2867 }
2868
2869 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2870 struct xfrm_flo *xflo,
2871 const struct flowi *fl,
2872 int num_xfrms,
2873 u16 family)
2874 {
2875 int err;
2876 struct net_device *dev;
2877 struct dst_entry *dst;
2878 struct dst_entry *dst1;
2879 struct xfrm_dst *xdst;
2880
2881 xdst = xfrm_alloc_dst(net, family);
2882 if (IS_ERR(xdst))
2883 return xdst;
2884
2885 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2886 net->xfrm.sysctl_larval_drop ||
2887 num_xfrms <= 0)
2888 return xdst;
2889
2890 dst = xflo->dst_orig;
2891 dst1 = &xdst->u.dst;
2892 dst_hold(dst);
2893 xdst->route = dst;
2894
2895 dst_copy_metrics(dst1, dst);
2896
2897 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2898 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2899 dst1->lastuse = jiffies;
2900
2901 dst1->input = dst_discard;
2902 dst1->output = xdst_queue_output;
2903
2904 dst_hold(dst);
2905 xfrm_dst_set_child(xdst, dst);
2906 xdst->path = dst;
2907
2908 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2909
2910 err = -ENODEV;
2911 dev = dst->dev;
2912 if (!dev)
2913 goto free_dst;
2914
2915 err = xfrm_fill_dst(xdst, dev, fl);
2916 if (err)
2917 goto free_dst;
2918
2919 out:
2920 return xdst;
2921
2922 free_dst:
2923 dst_release(dst1);
2924 xdst = ERR_PTR(err);
2925 goto out;
2926 }
2927
2928 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
2929 const struct flowi *fl,
2930 u16 family, u8 dir,
2931 struct xfrm_flo *xflo, u32 if_id)
2932 {
2933 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2934 int num_pols = 0, num_xfrms = 0, err;
2935 struct xfrm_dst *xdst;
2936
2937 /* Resolve policies to use if we couldn't get them from
2938 * previous cache entry */
2939 num_pols = 1;
2940 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2941 err = xfrm_expand_policies(fl, family, pols,
2942 &num_pols, &num_xfrms);
2943 if (err < 0)
2944 goto inc_error;
2945 if (num_pols == 0)
2946 return NULL;
2947 if (num_xfrms <= 0)
2948 goto make_dummy_bundle;
2949
2950 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2951 xflo->dst_orig);
2952 if (IS_ERR(xdst)) {
2953 err = PTR_ERR(xdst);
2954 if (err == -EREMOTE) {
2955 xfrm_pols_put(pols, num_pols);
2956 return NULL;
2957 }
2958
2959 if (err != -EAGAIN)
2960 goto error;
2961 goto make_dummy_bundle;
2962 } else if (xdst == NULL) {
2963 num_xfrms = 0;
2964 goto make_dummy_bundle;
2965 }
2966
2967 return xdst;
2968
2969 make_dummy_bundle:
2970 /* We found policies, but there's no bundles to instantiate:
2971 * either because the policy blocks, has no transformations or
2972 * we could not build template (no xfrm_states).*/
2973 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2974 if (IS_ERR(xdst)) {
2975 xfrm_pols_put(pols, num_pols);
2976 return ERR_CAST(xdst);
2977 }
2978 xdst->num_pols = num_pols;
2979 xdst->num_xfrms = num_xfrms;
2980 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2981
2982 return xdst;
2983
2984 inc_error:
2985 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2986 error:
2987 xfrm_pols_put(pols, num_pols);
2988 return ERR_PTR(err);
2989 }
2990
2991 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2992 struct dst_entry *dst_orig)
2993 {
2994 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2995 struct dst_entry *ret;
2996
2997 if (!afinfo) {
2998 dst_release(dst_orig);
2999 return ERR_PTR(-EINVAL);
3000 } else {
3001 ret = afinfo->blackhole_route(net, dst_orig);
3002 }
3003 rcu_read_unlock();
3004
3005 return ret;
3006 }
3007
3008 /* Finds/creates a bundle for given flow and if_id
3009 *
3010 * At the moment we eat a raw IP route. Mostly to speed up lookups
3011 * on interfaces with disabled IPsec.
3012 *
3013 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3014 * compatibility
3015 */
3016 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3017 struct dst_entry *dst_orig,
3018 const struct flowi *fl,
3019 const struct sock *sk,
3020 int flags, u32 if_id)
3021 {
3022 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3023 struct xfrm_dst *xdst;
3024 struct dst_entry *dst, *route;
3025 u16 family = dst_orig->ops->family;
3026 u8 dir = XFRM_POLICY_OUT;
3027 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3028
3029 dst = NULL;
3030 xdst = NULL;
3031 route = NULL;
3032
3033 sk = sk_const_to_full_sk(sk);
3034 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3035 num_pols = 1;
3036 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3037 if_id);
3038 err = xfrm_expand_policies(fl, family, pols,
3039 &num_pols, &num_xfrms);
3040 if (err < 0)
3041 goto dropdst;
3042
3043 if (num_pols) {
3044 if (num_xfrms <= 0) {
3045 drop_pols = num_pols;
3046 goto no_transform;
3047 }
3048
3049 xdst = xfrm_resolve_and_create_bundle(
3050 pols, num_pols, fl,
3051 family, dst_orig);
3052
3053 if (IS_ERR(xdst)) {
3054 xfrm_pols_put(pols, num_pols);
3055 err = PTR_ERR(xdst);
3056 if (err == -EREMOTE)
3057 goto nopol;
3058
3059 goto dropdst;
3060 } else if (xdst == NULL) {
3061 num_xfrms = 0;
3062 drop_pols = num_pols;
3063 goto no_transform;
3064 }
3065
3066 route = xdst->route;
3067 }
3068 }
3069
3070 if (xdst == NULL) {
3071 struct xfrm_flo xflo;
3072
3073 xflo.dst_orig = dst_orig;
3074 xflo.flags = flags;
3075
3076 /* To accelerate a bit... */
3077 if ((dst_orig->flags & DST_NOXFRM) ||
3078 !net->xfrm.policy_count[XFRM_POLICY_OUT])
3079 goto nopol;
3080
3081 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3082 if (xdst == NULL)
3083 goto nopol;
3084 if (IS_ERR(xdst)) {
3085 err = PTR_ERR(xdst);
3086 goto dropdst;
3087 }
3088
3089 num_pols = xdst->num_pols;
3090 num_xfrms = xdst->num_xfrms;
3091 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3092 route = xdst->route;
3093 }
3094
3095 dst = &xdst->u.dst;
3096 if (route == NULL && num_xfrms > 0) {
3097 /* The only case when xfrm_bundle_lookup() returns a
3098 * bundle with null route, is when the template could
3099 * not be resolved. It means policies are there, but
3100 * bundle could not be created, since we don't yet
3101 * have the xfrm_state's. We need to wait for KM to
3102 * negotiate new SA's or bail out with error.*/
3103 if (net->xfrm.sysctl_larval_drop) {
3104 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3105 err = -EREMOTE;
3106 goto error;
3107 }
3108
3109 err = -EAGAIN;
3110
3111 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3112 goto error;
3113 }
3114
3115 no_transform:
3116 if (num_pols == 0)
3117 goto nopol;
3118
3119 if ((flags & XFRM_LOOKUP_ICMP) &&
3120 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3121 err = -ENOENT;
3122 goto error;
3123 }
3124
3125 for (i = 0; i < num_pols; i++)
3126 pols[i]->curlft.use_time = ktime_get_real_seconds();
3127
3128 if (num_xfrms < 0) {
3129 /* Prohibit the flow */
3130 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3131 err = -EPERM;
3132 goto error;
3133 } else if (num_xfrms > 0) {
3134 /* Flow transformed */
3135 dst_release(dst_orig);
3136 } else {
3137 /* Flow passes untransformed */
3138 dst_release(dst);
3139 dst = dst_orig;
3140 }
3141 ok:
3142 xfrm_pols_put(pols, drop_pols);
3143 if (dst && dst->xfrm &&
3144 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3145 dst->flags |= DST_XFRM_TUNNEL;
3146 return dst;
3147
3148 nopol:
3149 if (!(flags & XFRM_LOOKUP_ICMP)) {
3150 dst = dst_orig;
3151 goto ok;
3152 }
3153 err = -ENOENT;
3154 error:
3155 dst_release(dst);
3156 dropdst:
3157 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3158 dst_release(dst_orig);
3159 xfrm_pols_put(pols, drop_pols);
3160 return ERR_PTR(err);
3161 }
3162 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3163
3164 /* Main function: finds/creates a bundle for given flow.
3165 *
3166 * At the moment we eat a raw IP route. Mostly to speed up lookups
3167 * on interfaces with disabled IPsec.
3168 */
3169 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3170 const struct flowi *fl, const struct sock *sk,
3171 int flags)
3172 {
3173 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3174 }
3175 EXPORT_SYMBOL(xfrm_lookup);
3176
3177 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3178 * Otherwise we may send out blackholed packets.
3179 */
3180 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3181 const struct flowi *fl,
3182 const struct sock *sk, int flags)
3183 {
3184 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3185 flags | XFRM_LOOKUP_QUEUE |
3186 XFRM_LOOKUP_KEEP_DST_REF);
3187
3188 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
3189 return make_blackhole(net, dst_orig->ops->family, dst_orig);
3190
3191 if (IS_ERR(dst))
3192 dst_release(dst_orig);
3193
3194 return dst;
3195 }
3196 EXPORT_SYMBOL(xfrm_lookup_route);
3197
3198 static inline int
3199 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3200 {
3201 struct sec_path *sp = skb_sec_path(skb);
3202 struct xfrm_state *x;
3203
3204 if (!sp || idx < 0 || idx >= sp->len)
3205 return 0;
3206 x = sp->xvec[idx];
3207 if (!x->type->reject)
3208 return 0;
3209 return x->type->reject(x, skb, fl);
3210 }
3211
3212 /* When skb is transformed back to its "native" form, we have to
3213 * check policy restrictions. At the moment we make this in maximally
3214 * stupid way. Shame on me. :-) Of course, connected sockets must
3215 * have policy cached at them.
3216 */
3217
3218 static inline int
3219 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3220 unsigned short family)
3221 {
3222 if (xfrm_state_kern(x))
3223 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3224 return x->id.proto == tmpl->id.proto &&
3225 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3226 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3227 x->props.mode == tmpl->mode &&
3228 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3229 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3230 !(x->props.mode != XFRM_MODE_TRANSPORT &&
3231 xfrm_state_addr_cmp(tmpl, x, family));
3232 }
3233
3234 /*
3235 * 0 or more than 0 is returned when validation is succeeded (either bypass
3236 * because of optional transport mode, or next index of the mathced secpath
3237 * state with the template.
3238 * -1 is returned when no matching template is found.
3239 * Otherwise "-2 - errored_index" is returned.
3240 */
3241 static inline int
3242 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3243 unsigned short family)
3244 {
3245 int idx = start;
3246
3247 if (tmpl->optional) {
3248 if (tmpl->mode == XFRM_MODE_TRANSPORT)
3249 return start;
3250 } else
3251 start = -1;
3252 for (; idx < sp->len; idx++) {
3253 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
3254 return ++idx;
3255 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3256 if (start == -1)
3257 start = -2-idx;
3258 break;
3259 }
3260 }
3261 return start;
3262 }
3263
3264 static void
3265 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3266 {
3267 const struct iphdr *iph = ip_hdr(skb);
3268 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
3269 struct flowi4 *fl4 = &fl->u.ip4;
3270 int oif = 0;
3271
3272 if (skb_dst(skb))
3273 oif = skb_dst(skb)->dev->ifindex;
3274
3275 memset(fl4, 0, sizeof(struct flowi4));
3276 fl4->flowi4_mark = skb->mark;
3277 fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3278
3279 if (!ip_is_fragment(iph)) {
3280 switch (iph->protocol) {
3281 case IPPROTO_UDP:
3282 case IPPROTO_UDPLITE:
3283 case IPPROTO_TCP:
3284 case IPPROTO_SCTP:
3285 case IPPROTO_DCCP:
3286 if (xprth + 4 < skb->data ||
3287 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3288 __be16 *ports;
3289
3290 xprth = skb_network_header(skb) + iph->ihl * 4;
3291 ports = (__be16 *)xprth;
3292
3293 fl4->fl4_sport = ports[!!reverse];
3294 fl4->fl4_dport = ports[!reverse];
3295 }
3296 break;
3297 case IPPROTO_ICMP:
3298 if (xprth + 2 < skb->data ||
3299 pskb_may_pull(skb, xprth + 2 - skb->data)) {
3300 u8 *icmp;
3301
3302 xprth = skb_network_header(skb) + iph->ihl * 4;
3303 icmp = xprth;
3304
3305 fl4->fl4_icmp_type = icmp[0];
3306 fl4->fl4_icmp_code = icmp[1];
3307 }
3308 break;
3309 case IPPROTO_ESP:
3310 if (xprth + 4 < skb->data ||
3311 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3312 __be32 *ehdr;
3313
3314 xprth = skb_network_header(skb) + iph->ihl * 4;
3315 ehdr = (__be32 *)xprth;
3316
3317 fl4->fl4_ipsec_spi = ehdr[0];
3318 }
3319 break;
3320 case IPPROTO_AH:
3321 if (xprth + 8 < skb->data ||
3322 pskb_may_pull(skb, xprth + 8 - skb->data)) {
3323 __be32 *ah_hdr;
3324
3325 xprth = skb_network_header(skb) + iph->ihl * 4;
3326 ah_hdr = (__be32 *)xprth;
3327
3328 fl4->fl4_ipsec_spi = ah_hdr[1];
3329 }
3330 break;
3331 case IPPROTO_COMP:
3332 if (xprth + 4 < skb->data ||
3333 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3334 __be16 *ipcomp_hdr;
3335
3336 xprth = skb_network_header(skb) + iph->ihl * 4;
3337 ipcomp_hdr = (__be16 *)xprth;
3338
3339 fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
3340 }
3341 break;
3342 case IPPROTO_GRE:
3343 if (xprth + 12 < skb->data ||
3344 pskb_may_pull(skb, xprth + 12 - skb->data)) {
3345 __be16 *greflags;
3346 __be32 *gre_hdr;
3347
3348 xprth = skb_network_header(skb) + iph->ihl * 4;
3349 greflags = (__be16 *)xprth;
3350 gre_hdr = (__be32 *)xprth;
3351
3352 if (greflags[0] & GRE_KEY) {
3353 if (greflags[0] & GRE_CSUM)
3354 gre_hdr++;
3355 fl4->fl4_gre_key = gre_hdr[1];
3356 }
3357 }
3358 break;
3359 default:
3360 fl4->fl4_ipsec_spi = 0;
3361 break;
3362 }
3363 }
3364 fl4->flowi4_proto = iph->protocol;
3365 fl4->daddr = reverse ? iph->saddr : iph->daddr;
3366 fl4->saddr = reverse ? iph->daddr : iph->saddr;
3367 fl4->flowi4_tos = iph->tos;
3368 }
3369
3370 #if IS_ENABLED(CONFIG_IPV6)
3371 static void
3372 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3373 {
3374 struct flowi6 *fl6 = &fl->u.ip6;
3375 int onlyproto = 0;
3376 const struct ipv6hdr *hdr = ipv6_hdr(skb);
3377 u32 offset = sizeof(*hdr);
3378 struct ipv6_opt_hdr *exthdr;
3379 const unsigned char *nh = skb_network_header(skb);
3380 u16 nhoff = IP6CB(skb)->nhoff;
3381 int oif = 0;
3382 u8 nexthdr;
3383
3384 if (!nhoff)
3385 nhoff = offsetof(struct ipv6hdr, nexthdr);
3386
3387 nexthdr = nh[nhoff];
3388
3389 if (skb_dst(skb))
3390 oif = skb_dst(skb)->dev->ifindex;
3391
3392 memset(fl6, 0, sizeof(struct flowi6));
3393 fl6->flowi6_mark = skb->mark;
3394 fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3395
3396 fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3397 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3398
3399 while (nh + offset + sizeof(*exthdr) < skb->data ||
3400 pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3401 nh = skb_network_header(skb);
3402 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3403
3404 switch (nexthdr) {
3405 case NEXTHDR_FRAGMENT:
3406 onlyproto = 1;
3407 /* fall through */
3408 case NEXTHDR_ROUTING:
3409 case NEXTHDR_HOP:
3410 case NEXTHDR_DEST:
3411 offset += ipv6_optlen(exthdr);
3412 nexthdr = exthdr->nexthdr;
3413 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3414 break;
3415 case IPPROTO_UDP:
3416 case IPPROTO_UDPLITE:
3417 case IPPROTO_TCP:
3418 case IPPROTO_SCTP:
3419 case IPPROTO_DCCP:
3420 if (!onlyproto && (nh + offset + 4 < skb->data ||
3421 pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3422 __be16 *ports;
3423
3424 nh = skb_network_header(skb);
3425 ports = (__be16 *)(nh + offset);
3426 fl6->fl6_sport = ports[!!reverse];
3427 fl6->fl6_dport = ports[!reverse];
3428 }
3429 fl6->flowi6_proto = nexthdr;
3430 return;
3431 case IPPROTO_ICMPV6:
3432 if (!onlyproto && (nh + offset + 2 < skb->data ||
3433 pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3434 u8 *icmp;
3435
3436 nh = skb_network_header(skb);
3437 icmp = (u8 *)(nh + offset);
3438 fl6->fl6_icmp_type = icmp[0];
3439 fl6->fl6_icmp_code = icmp[1];
3440 }
3441 fl6->flowi6_proto = nexthdr;
3442 return;
3443 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3444 case IPPROTO_MH:
3445 offset += ipv6_optlen(exthdr);
3446 if (!onlyproto && (nh + offset + 3 < skb->data ||
3447 pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3448 struct ip6_mh *mh;
3449
3450 nh = skb_network_header(skb);
3451 mh = (struct ip6_mh *)(nh + offset);
3452 fl6->fl6_mh_type = mh->ip6mh_type;
3453 }
3454 fl6->flowi6_proto = nexthdr;
3455 return;
3456 #endif
3457 /* XXX Why are there these headers? */
3458 case IPPROTO_AH:
3459 case IPPROTO_ESP:
3460 case IPPROTO_COMP:
3461 default:
3462 fl6->fl6_ipsec_spi = 0;
3463 fl6->flowi6_proto = nexthdr;
3464 return;
3465 }
3466 }
3467 }
3468 #endif
3469
3470 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3471 unsigned int family, int reverse)
3472 {
3473 switch (family) {
3474 case AF_INET:
3475 decode_session4(skb, fl, reverse);
3476 break;
3477 #if IS_ENABLED(CONFIG_IPV6)
3478 case AF_INET6:
3479 decode_session6(skb, fl, reverse);
3480 break;
3481 #endif
3482 default:
3483 return -EAFNOSUPPORT;
3484 }
3485
3486 return security_xfrm_decode_session(skb, &fl->flowi_secid);
3487 }
3488 EXPORT_SYMBOL(__xfrm_decode_session);
3489
3490 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3491 {
3492 for (; k < sp->len; k++) {
3493 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3494 *idxp = k;
3495 return 1;
3496 }
3497 }
3498
3499 return 0;
3500 }
3501
3502 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3503 unsigned short family)
3504 {
3505 struct net *net = dev_net(skb->dev);
3506 struct xfrm_policy *pol;
3507 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3508 int npols = 0;
3509 int xfrm_nr;
3510 int pi;
3511 int reverse;
3512 struct flowi fl;
3513 int xerr_idx = -1;
3514 const struct xfrm_if_cb *ifcb;
3515 struct sec_path *sp;
3516 struct xfrm_if *xi;
3517 u32 if_id = 0;
3518
3519 rcu_read_lock();
3520 ifcb = xfrm_if_get_cb();
3521
3522 if (ifcb) {
3523 xi = ifcb->decode_session(skb, family);
3524 if (xi) {
3525 if_id = xi->p.if_id;
3526 net = xi->net;
3527 }
3528 }
3529 rcu_read_unlock();
3530
3531 reverse = dir & ~XFRM_POLICY_MASK;
3532 dir &= XFRM_POLICY_MASK;
3533
3534 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3535 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3536 return 0;
3537 }
3538
3539 nf_nat_decode_session(skb, &fl, family);
3540
3541 /* First, check used SA against their selectors. */
3542 sp = skb_sec_path(skb);
3543 if (sp) {
3544 int i;
3545
3546 for (i = sp->len - 1; i >= 0; i--) {
3547 struct xfrm_state *x = sp->xvec[i];
3548 if (!xfrm_selector_match(&x->sel, &fl, family)) {
3549 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3550 return 0;
3551 }
3552 }
3553 }
3554
3555 pol = NULL;
3556 sk = sk_to_full_sk(sk);
3557 if (sk && sk->sk_policy[dir]) {
3558 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3559 if (IS_ERR(pol)) {
3560 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3561 return 0;
3562 }
3563 }
3564
3565 if (!pol)
3566 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3567
3568 if (IS_ERR(pol)) {
3569 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3570 return 0;
3571 }
3572
3573 if (!pol) {
3574 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3575 xfrm_secpath_reject(xerr_idx, skb, &fl);
3576 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3577 return 0;
3578 }
3579 return 1;
3580 }
3581
3582 pol->curlft.use_time = ktime_get_real_seconds();
3583
3584 pols[0] = pol;
3585 npols++;
3586 #ifdef CONFIG_XFRM_SUB_POLICY
3587 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3588 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3589 &fl, family,
3590 XFRM_POLICY_IN, if_id);
3591 if (pols[1]) {
3592 if (IS_ERR(pols[1])) {
3593 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3594 return 0;
3595 }
3596 pols[1]->curlft.use_time = ktime_get_real_seconds();
3597 npols++;
3598 }
3599 }
3600 #endif
3601
3602 if (pol->action == XFRM_POLICY_ALLOW) {
3603 static struct sec_path dummy;
3604 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3605 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3606 struct xfrm_tmpl **tpp = tp;
3607 int ti = 0;
3608 int i, k;
3609
3610 sp = skb_sec_path(skb);
3611 if (!sp)
3612 sp = &dummy;
3613
3614 for (pi = 0; pi < npols; pi++) {
3615 if (pols[pi] != pol &&
3616 pols[pi]->action != XFRM_POLICY_ALLOW) {
3617 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3618 goto reject;
3619 }
3620 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3621 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3622 goto reject_error;
3623 }
3624 for (i = 0; i < pols[pi]->xfrm_nr; i++)
3625 tpp[ti++] = &pols[pi]->xfrm_vec[i];
3626 }
3627 xfrm_nr = ti;
3628 if (npols > 1) {
3629 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
3630 tpp = stp;
3631 }
3632
3633 /* For each tunnel xfrm, find the first matching tmpl.
3634 * For each tmpl before that, find corresponding xfrm.
3635 * Order is _important_. Later we will implement
3636 * some barriers, but at the moment barriers
3637 * are implied between each two transformations.
3638 */
3639 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3640 k = xfrm_policy_ok(tpp[i], sp, k, family);
3641 if (k < 0) {
3642 if (k < -1)
3643 /* "-2 - errored_index" returned */
3644 xerr_idx = -(2+k);
3645 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3646 goto reject;
3647 }
3648 }
3649
3650 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3651 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3652 goto reject;
3653 }
3654
3655 xfrm_pols_put(pols, npols);
3656 return 1;
3657 }
3658 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3659
3660 reject:
3661 xfrm_secpath_reject(xerr_idx, skb, &fl);
3662 reject_error:
3663 xfrm_pols_put(pols, npols);
3664 return 0;
3665 }
3666 EXPORT_SYMBOL(__xfrm_policy_check);
3667
3668 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3669 {
3670 struct net *net = dev_net(skb->dev);
3671 struct flowi fl;
3672 struct dst_entry *dst;
3673 int res = 1;
3674
3675 if (xfrm_decode_session(skb, &fl, family) < 0) {
3676 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3677 return 0;
3678 }
3679
3680 skb_dst_force(skb);
3681 if (!skb_dst(skb)) {
3682 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3683 return 0;
3684 }
3685
3686 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3687 if (IS_ERR(dst)) {
3688 res = 0;
3689 dst = NULL;
3690 }
3691 skb_dst_set(skb, dst);
3692 return res;
3693 }
3694 EXPORT_SYMBOL(__xfrm_route_forward);
3695
3696 /* Optimize later using cookies and generation ids. */
3697
3698 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3699 {
3700 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3701 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3702 * get validated by dst_ops->check on every use. We do this
3703 * because when a normal route referenced by an XFRM dst is
3704 * obsoleted we do not go looking around for all parent
3705 * referencing XFRM dsts so that we can invalidate them. It
3706 * is just too much work. Instead we make the checks here on
3707 * every use. For example:
3708 *
3709 * XFRM dst A --> IPv4 dst X
3710 *
3711 * X is the "xdst->route" of A (X is also the "dst->path" of A
3712 * in this example). If X is marked obsolete, "A" will not
3713 * notice. That's what we are validating here via the
3714 * stale_bundle() check.
3715 *
3716 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3717 * be marked on it.
3718 * This will force stale_bundle() to fail on any xdst bundle with
3719 * this dst linked in it.
3720 */
3721 if (dst->obsolete < 0 && !stale_bundle(dst))
3722 return dst;
3723
3724 return NULL;
3725 }
3726
3727 static int stale_bundle(struct dst_entry *dst)
3728 {
3729 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3730 }
3731
3732 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3733 {
3734 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3735 dst->dev = dev_net(dev)->loopback_dev;
3736 dev_hold(dst->dev);
3737 dev_put(dev);
3738 }
3739 }
3740 EXPORT_SYMBOL(xfrm_dst_ifdown);
3741
3742 static void xfrm_link_failure(struct sk_buff *skb)
3743 {
3744 /* Impossible. Such dst must be popped before reaches point of failure. */
3745 }
3746
3747 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3748 {
3749 if (dst) {
3750 if (dst->obsolete) {
3751 dst_release(dst);
3752 dst = NULL;
3753 }
3754 }
3755 return dst;
3756 }
3757
3758 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3759 {
3760 while (nr--) {
3761 struct xfrm_dst *xdst = bundle[nr];
3762 u32 pmtu, route_mtu_cached;
3763 struct dst_entry *dst;
3764
3765 dst = &xdst->u.dst;
3766 pmtu = dst_mtu(xfrm_dst_child(dst));
3767 xdst->child_mtu_cached = pmtu;
3768
3769 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3770
3771 route_mtu_cached = dst_mtu(xdst->route);
3772 xdst->route_mtu_cached = route_mtu_cached;
3773
3774 if (pmtu > route_mtu_cached)
3775 pmtu = route_mtu_cached;
3776
3777 dst_metric_set(dst, RTAX_MTU, pmtu);
3778 }
3779 }
3780
3781 /* Check that the bundle accepts the flow and its components are
3782 * still valid.
3783 */
3784
3785 static int xfrm_bundle_ok(struct xfrm_dst *first)
3786 {
3787 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3788 struct dst_entry *dst = &first->u.dst;
3789 struct xfrm_dst *xdst;
3790 int start_from, nr;
3791 u32 mtu;
3792
3793 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3794 (dst->dev && !netif_running(dst->dev)))
3795 return 0;
3796
3797 if (dst->flags & DST_XFRM_QUEUE)
3798 return 1;
3799
3800 start_from = nr = 0;
3801 do {
3802 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3803
3804 if (dst->xfrm->km.state != XFRM_STATE_VALID)
3805 return 0;
3806 if (xdst->xfrm_genid != dst->xfrm->genid)
3807 return 0;
3808 if (xdst->num_pols > 0 &&
3809 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3810 return 0;
3811
3812 bundle[nr++] = xdst;
3813
3814 mtu = dst_mtu(xfrm_dst_child(dst));
3815 if (xdst->child_mtu_cached != mtu) {
3816 start_from = nr;
3817 xdst->child_mtu_cached = mtu;
3818 }
3819
3820 if (!dst_check(xdst->route, xdst->route_cookie))
3821 return 0;
3822 mtu = dst_mtu(xdst->route);
3823 if (xdst->route_mtu_cached != mtu) {
3824 start_from = nr;
3825 xdst->route_mtu_cached = mtu;
3826 }
3827
3828 dst = xfrm_dst_child(dst);
3829 } while (dst->xfrm);
3830
3831 if (likely(!start_from))
3832 return 1;
3833
3834 xdst = bundle[start_from - 1];
3835 mtu = xdst->child_mtu_cached;
3836 while (start_from--) {
3837 dst = &xdst->u.dst;
3838
3839 mtu = xfrm_state_mtu(dst->xfrm, mtu);
3840 if (mtu > xdst->route_mtu_cached)
3841 mtu = xdst->route_mtu_cached;
3842 dst_metric_set(dst, RTAX_MTU, mtu);
3843 if (!start_from)
3844 break;
3845
3846 xdst = bundle[start_from - 1];
3847 xdst->child_mtu_cached = mtu;
3848 }
3849
3850 return 1;
3851 }
3852
3853 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3854 {
3855 return dst_metric_advmss(xfrm_dst_path(dst));
3856 }
3857
3858 static unsigned int xfrm_mtu(const struct dst_entry *dst)
3859 {
3860 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3861
3862 return mtu ? : dst_mtu(xfrm_dst_path(dst));
3863 }
3864
3865 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3866 const void *daddr)
3867 {
3868 while (dst->xfrm) {
3869 const struct xfrm_state *xfrm = dst->xfrm;
3870
3871 dst = xfrm_dst_child(dst);
3872
3873 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3874 continue;
3875 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3876 daddr = xfrm->coaddr;
3877 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3878 daddr = &xfrm->id.daddr;
3879 }
3880 return daddr;
3881 }
3882
3883 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3884 struct sk_buff *skb,
3885 const void *daddr)
3886 {
3887 const struct dst_entry *path = xfrm_dst_path(dst);
3888
3889 if (!skb)
3890 daddr = xfrm_get_dst_nexthop(dst, daddr);
3891 return path->ops->neigh_lookup(path, skb, daddr);
3892 }
3893
3894 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
3895 {
3896 const struct dst_entry *path = xfrm_dst_path(dst);
3897
3898 daddr = xfrm_get_dst_nexthop(dst, daddr);
3899 path->ops->confirm_neigh(path, daddr);
3900 }
3901
3902 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
3903 {
3904 int err = 0;
3905
3906 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
3907 return -EAFNOSUPPORT;
3908
3909 spin_lock(&xfrm_policy_afinfo_lock);
3910 if (unlikely(xfrm_policy_afinfo[family] != NULL))
3911 err = -EEXIST;
3912 else {
3913 struct dst_ops *dst_ops = afinfo->dst_ops;
3914 if (likely(dst_ops->kmem_cachep == NULL))
3915 dst_ops->kmem_cachep = xfrm_dst_cache;
3916 if (likely(dst_ops->check == NULL))
3917 dst_ops->check = xfrm_dst_check;
3918 if (likely(dst_ops->default_advmss == NULL))
3919 dst_ops->default_advmss = xfrm_default_advmss;
3920 if (likely(dst_ops->mtu == NULL))
3921 dst_ops->mtu = xfrm_mtu;
3922 if (likely(dst_ops->negative_advice == NULL))
3923 dst_ops->negative_advice = xfrm_negative_advice;
3924 if (likely(dst_ops->link_failure == NULL))
3925 dst_ops->link_failure = xfrm_link_failure;
3926 if (likely(dst_ops->neigh_lookup == NULL))
3927 dst_ops->neigh_lookup = xfrm_neigh_lookup;
3928 if (likely(!dst_ops->confirm_neigh))
3929 dst_ops->confirm_neigh = xfrm_confirm_neigh;
3930 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
3931 }
3932 spin_unlock(&xfrm_policy_afinfo_lock);
3933
3934 return err;
3935 }
3936 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3937
3938 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
3939 {
3940 struct dst_ops *dst_ops = afinfo->dst_ops;
3941 int i;
3942
3943 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
3944 if (xfrm_policy_afinfo[i] != afinfo)
3945 continue;
3946 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
3947 break;
3948 }
3949
3950 synchronize_rcu();
3951
3952 dst_ops->kmem_cachep = NULL;
3953 dst_ops->check = NULL;
3954 dst_ops->negative_advice = NULL;
3955 dst_ops->link_failure = NULL;
3956 }
3957 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
3958
3959 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
3960 {
3961 spin_lock(&xfrm_if_cb_lock);
3962 rcu_assign_pointer(xfrm_if_cb, ifcb);
3963 spin_unlock(&xfrm_if_cb_lock);
3964 }
3965 EXPORT_SYMBOL(xfrm_if_register_cb);
3966
3967 void xfrm_if_unregister_cb(void)
3968 {
3969 RCU_INIT_POINTER(xfrm_if_cb, NULL);
3970 synchronize_rcu();
3971 }
3972 EXPORT_SYMBOL(xfrm_if_unregister_cb);
3973
3974 #ifdef CONFIG_XFRM_STATISTICS
3975 static int __net_init xfrm_statistics_init(struct net *net)
3976 {
3977 int rv;
3978 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
3979 if (!net->mib.xfrm_statistics)
3980 return -ENOMEM;
3981 rv = xfrm_proc_init(net);
3982 if (rv < 0)
3983 free_percpu(net->mib.xfrm_statistics);
3984 return rv;
3985 }
3986
3987 static void xfrm_statistics_fini(struct net *net)
3988 {
3989 xfrm_proc_fini(net);
3990 free_percpu(net->mib.xfrm_statistics);
3991 }
3992 #else
3993 static int __net_init xfrm_statistics_init(struct net *net)
3994 {
3995 return 0;
3996 }
3997
3998 static void xfrm_statistics_fini(struct net *net)
3999 {
4000 }
4001 #endif
4002
4003 static int __net_init xfrm_policy_init(struct net *net)
4004 {
4005 unsigned int hmask, sz;
4006 int dir, err;
4007
4008 if (net_eq(net, &init_net)) {
4009 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
4010 sizeof(struct xfrm_dst),
4011 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4012 NULL);
4013 err = rhashtable_init(&xfrm_policy_inexact_table,
4014 &xfrm_pol_inexact_params);
4015 BUG_ON(err);
4016 }
4017
4018 hmask = 8 - 1;
4019 sz = (hmask+1) * sizeof(struct hlist_head);
4020
4021 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4022 if (!net->xfrm.policy_byidx)
4023 goto out_byidx;
4024 net->xfrm.policy_idx_hmask = hmask;
4025
4026 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4027 struct xfrm_policy_hash *htab;
4028
4029 net->xfrm.policy_count[dir] = 0;
4030 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4031 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4032
4033 htab = &net->xfrm.policy_bydst[dir];
4034 htab->table = xfrm_hash_alloc(sz);
4035 if (!htab->table)
4036 goto out_bydst;
4037 htab->hmask = hmask;
4038 htab->dbits4 = 32;
4039 htab->sbits4 = 32;
4040 htab->dbits6 = 128;
4041 htab->sbits6 = 128;
4042 }
4043 net->xfrm.policy_hthresh.lbits4 = 32;
4044 net->xfrm.policy_hthresh.rbits4 = 32;
4045 net->xfrm.policy_hthresh.lbits6 = 128;
4046 net->xfrm.policy_hthresh.rbits6 = 128;
4047
4048 seqlock_init(&net->xfrm.policy_hthresh.lock);
4049
4050 INIT_LIST_HEAD(&net->xfrm.policy_all);
4051 INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4052 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4053 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4054 return 0;
4055
4056 out_bydst:
4057 for (dir--; dir >= 0; dir--) {
4058 struct xfrm_policy_hash *htab;
4059
4060 htab = &net->xfrm.policy_bydst[dir];
4061 xfrm_hash_free(htab->table, sz);
4062 }
4063 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4064 out_byidx:
4065 return -ENOMEM;
4066 }
4067
4068 static void xfrm_policy_fini(struct net *net)
4069 {
4070 struct xfrm_pol_inexact_bin *b, *t;
4071 unsigned int sz;
4072 int dir;
4073
4074 flush_work(&net->xfrm.policy_hash_work);
4075 #ifdef CONFIG_XFRM_SUB_POLICY
4076 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4077 #endif
4078 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4079
4080 WARN_ON(!list_empty(&net->xfrm.policy_all));
4081
4082 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4083 struct xfrm_policy_hash *htab;
4084
4085 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4086
4087 htab = &net->xfrm.policy_bydst[dir];
4088 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4089 WARN_ON(!hlist_empty(htab->table));
4090 xfrm_hash_free(htab->table, sz);
4091 }
4092
4093 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4094 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4095 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4096
4097 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4098 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4099 __xfrm_policy_inexact_prune_bin(b, true);
4100 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4101 }
4102
4103 static int __net_init xfrm_net_init(struct net *net)
4104 {
4105 int rv;
4106
4107 /* Initialize the per-net locks here */
4108 spin_lock_init(&net->xfrm.xfrm_state_lock);
4109 spin_lock_init(&net->xfrm.xfrm_policy_lock);
4110 mutex_init(&net->xfrm.xfrm_cfg_mutex);
4111
4112 rv = xfrm_statistics_init(net);
4113 if (rv < 0)
4114 goto out_statistics;
4115 rv = xfrm_state_init(net);
4116 if (rv < 0)
4117 goto out_state;
4118 rv = xfrm_policy_init(net);
4119 if (rv < 0)
4120 goto out_policy;
4121 rv = xfrm_sysctl_init(net);
4122 if (rv < 0)
4123 goto out_sysctl;
4124
4125 return 0;
4126
4127 out_sysctl:
4128 xfrm_policy_fini(net);
4129 out_policy:
4130 xfrm_state_fini(net);
4131 out_state:
4132 xfrm_statistics_fini(net);
4133 out_statistics:
4134 return rv;
4135 }
4136
4137 static void __net_exit xfrm_net_exit(struct net *net)
4138 {
4139 xfrm_sysctl_fini(net);
4140 xfrm_policy_fini(net);
4141 xfrm_state_fini(net);
4142 xfrm_statistics_fini(net);
4143 }
4144
4145 static struct pernet_operations __net_initdata xfrm_net_ops = {
4146 .init = xfrm_net_init,
4147 .exit = xfrm_net_exit,
4148 };
4149
4150 void __init xfrm_init(void)
4151 {
4152 register_pernet_subsys(&xfrm_net_ops);
4153 xfrm_dev_init();
4154 seqcount_init(&xfrm_policy_hash_generation);
4155 xfrm_input_init();
4156
4157 RCU_INIT_POINTER(xfrm_if_cb, NULL);
4158 synchronize_rcu();
4159 }
4160
4161 #ifdef CONFIG_AUDITSYSCALL
4162 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4163 struct audit_buffer *audit_buf)
4164 {
4165 struct xfrm_sec_ctx *ctx = xp->security;
4166 struct xfrm_selector *sel = &xp->selector;
4167
4168 if (ctx)
4169 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4170 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4171
4172 switch (sel->family) {
4173 case AF_INET:
4174 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4175 if (sel->prefixlen_s != 32)
4176 audit_log_format(audit_buf, " src_prefixlen=%d",
4177 sel->prefixlen_s);
4178 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4179 if (sel->prefixlen_d != 32)
4180 audit_log_format(audit_buf, " dst_prefixlen=%d",
4181 sel->prefixlen_d);
4182 break;
4183 case AF_INET6:
4184 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4185 if (sel->prefixlen_s != 128)
4186 audit_log_format(audit_buf, " src_prefixlen=%d",
4187 sel->prefixlen_s);
4188 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4189 if (sel->prefixlen_d != 128)
4190 audit_log_format(audit_buf, " dst_prefixlen=%d",
4191 sel->prefixlen_d);
4192 break;
4193 }
4194 }
4195
4196 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4197 {
4198 struct audit_buffer *audit_buf;
4199
4200 audit_buf = xfrm_audit_start("SPD-add");
4201 if (audit_buf == NULL)
4202 return;
4203 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4204 audit_log_format(audit_buf, " res=%u", result);
4205 xfrm_audit_common_policyinfo(xp, audit_buf);
4206 audit_log_end(audit_buf);
4207 }
4208 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4209
4210 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4211 bool task_valid)
4212 {
4213 struct audit_buffer *audit_buf;
4214
4215 audit_buf = xfrm_audit_start("SPD-delete");
4216 if (audit_buf == NULL)
4217 return;
4218 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4219 audit_log_format(audit_buf, " res=%u", result);
4220 xfrm_audit_common_policyinfo(xp, audit_buf);
4221 audit_log_end(audit_buf);
4222 }
4223 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4224 #endif
4225
4226 #ifdef CONFIG_XFRM_MIGRATE
4227 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4228 const struct xfrm_selector *sel_tgt)
4229 {
4230 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4231 if (sel_tgt->family == sel_cmp->family &&
4232 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4233 sel_cmp->family) &&
4234 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4235 sel_cmp->family) &&
4236 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4237 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4238 return true;
4239 }
4240 } else {
4241 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4242 return true;
4243 }
4244 }
4245 return false;
4246 }
4247
4248 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4249 u8 dir, u8 type, struct net *net)
4250 {
4251 struct xfrm_policy *pol, *ret = NULL;
4252 struct hlist_head *chain;
4253 u32 priority = ~0U;
4254
4255 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4256 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4257 hlist_for_each_entry(pol, chain, bydst) {
4258 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4259 pol->type == type) {
4260 ret = pol;
4261 priority = ret->priority;
4262 break;
4263 }
4264 }
4265 chain = &net->xfrm.policy_inexact[dir];
4266 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4267 if ((pol->priority >= priority) && ret)
4268 break;
4269
4270 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4271 pol->type == type) {
4272 ret = pol;
4273 break;
4274 }
4275 }
4276
4277 xfrm_pol_hold(ret);
4278
4279 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4280
4281 return ret;
4282 }
4283
4284 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4285 {
4286 int match = 0;
4287
4288 if (t->mode == m->mode && t->id.proto == m->proto &&
4289 (m->reqid == 0 || t->reqid == m->reqid)) {
4290 switch (t->mode) {
4291 case XFRM_MODE_TUNNEL:
4292 case XFRM_MODE_BEET:
4293 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4294 m->old_family) &&
4295 xfrm_addr_equal(&t->saddr, &m->old_saddr,
4296 m->old_family)) {
4297 match = 1;
4298 }
4299 break;
4300 case XFRM_MODE_TRANSPORT:
4301 /* in case of transport mode, template does not store
4302 any IP addresses, hence we just compare mode and
4303 protocol */
4304 match = 1;
4305 break;
4306 default:
4307 break;
4308 }
4309 }
4310 return match;
4311 }
4312
4313 /* update endpoint address(es) of template(s) */
4314 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4315 struct xfrm_migrate *m, int num_migrate)
4316 {
4317 struct xfrm_migrate *mp;
4318 int i, j, n = 0;
4319
4320 write_lock_bh(&pol->lock);
4321 if (unlikely(pol->walk.dead)) {
4322 /* target policy has been deleted */
4323 write_unlock_bh(&pol->lock);
4324 return -ENOENT;
4325 }
4326
4327 for (i = 0; i < pol->xfrm_nr; i++) {
4328 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4329 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4330 continue;
4331 n++;
4332 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4333 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4334 continue;
4335 /* update endpoints */
4336 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4337 sizeof(pol->xfrm_vec[i].id.daddr));
4338 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4339 sizeof(pol->xfrm_vec[i].saddr));
4340 pol->xfrm_vec[i].encap_family = mp->new_family;
4341 /* flush bundles */
4342 atomic_inc(&pol->genid);
4343 }
4344 }
4345
4346 write_unlock_bh(&pol->lock);
4347
4348 if (!n)
4349 return -ENODATA;
4350
4351 return 0;
4352 }
4353
4354 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4355 {
4356 int i, j;
4357
4358 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
4359 return -EINVAL;
4360
4361 for (i = 0; i < num_migrate; i++) {
4362 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4363 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
4364 return -EINVAL;
4365
4366 /* check if there is any duplicated entry */
4367 for (j = i + 1; j < num_migrate; j++) {
4368 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4369 sizeof(m[i].old_daddr)) &&
4370 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4371 sizeof(m[i].old_saddr)) &&
4372 m[i].proto == m[j].proto &&
4373 m[i].mode == m[j].mode &&
4374 m[i].reqid == m[j].reqid &&
4375 m[i].old_family == m[j].old_family)
4376 return -EINVAL;
4377 }
4378 }
4379
4380 return 0;
4381 }
4382
4383 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4384 struct xfrm_migrate *m, int num_migrate,
4385 struct xfrm_kmaddress *k, struct net *net,
4386 struct xfrm_encap_tmpl *encap)
4387 {
4388 int i, err, nx_cur = 0, nx_new = 0;
4389 struct xfrm_policy *pol = NULL;
4390 struct xfrm_state *x, *xc;
4391 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4392 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4393 struct xfrm_migrate *mp;
4394
4395 /* Stage 0 - sanity checks */
4396 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
4397 goto out;
4398
4399 if (dir >= XFRM_POLICY_MAX) {
4400 err = -EINVAL;
4401 goto out;
4402 }
4403
4404 /* Stage 1 - find policy */
4405 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
4406 err = -ENOENT;
4407 goto out;
4408 }
4409
4410 /* Stage 2 - find and update state(s) */
4411 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4412 if ((x = xfrm_migrate_state_find(mp, net))) {
4413 x_cur[nx_cur] = x;
4414 nx_cur++;
4415 xc = xfrm_state_migrate(x, mp, encap);
4416 if (xc) {
4417 x_new[nx_new] = xc;
4418 nx_new++;
4419 } else {
4420 err = -ENODATA;
4421 goto restore_state;
4422 }
4423 }
4424 }
4425
4426 /* Stage 3 - update policy */
4427 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
4428 goto restore_state;
4429
4430 /* Stage 4 - delete old state(s) */
4431 if (nx_cur) {
4432 xfrm_states_put(x_cur, nx_cur);
4433 xfrm_states_delete(x_cur, nx_cur);
4434 }
4435
4436 /* Stage 5 - announce */
4437 km_migrate(sel, dir, type, m, num_migrate, k, encap);
4438
4439 xfrm_pol_put(pol);
4440
4441 return 0;
4442 out:
4443 return err;
4444
4445 restore_state:
4446 if (pol)
4447 xfrm_pol_put(pol);
4448 if (nx_cur)
4449 xfrm_states_put(x_cur, nx_cur);
4450 if (nx_new)
4451 xfrm_states_delete(x_new, nx_new);
4452
4453 return err;
4454 }
4455 EXPORT_SYMBOL(xfrm_migrate);
4456 #endif