1 // SPDX-License-Identifier: GPL-2.0-only
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 * Kazunori MIYAZAWA @USAGI
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
34 #include <net/inet_ecn.h>
38 #if IS_ENABLED(CONFIG_IPV6_MIP6)
41 #ifdef CONFIG_XFRM_STATISTICS
44 #ifdef CONFIG_XFRM_ESPINTCP
45 #include <net/espintcp.h>
48 #include "xfrm_hash.h"
50 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
51 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
52 #define XFRM_MAX_QUEUE_LEN 100
55 struct dst_entry
*dst_orig
;
59 /* prefixes smaller than this are stored in lists, not trees. */
60 #define INEXACT_PREFIXLEN_IPV4 16
61 #define INEXACT_PREFIXLEN_IPV6 48
63 struct xfrm_pol_inexact_node
{
73 /* the policies matching this node, can be empty list */
74 struct hlist_head hhead
;
77 /* xfrm inexact policy search tree:
78 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
80 * +---- root_d: sorted by daddr:prefix
82 * | xfrm_pol_inexact_node
84 * | +- root: sorted by saddr/prefix
86 * | | xfrm_pol_inexact_node
90 * | | + hhead: saddr:daddr policies
92 * | +- coarse policies and all any:daddr policies
94 * +---- root_s: sorted by saddr:prefix
96 * | xfrm_pol_inexact_node
100 * | + hhead: saddr:any policies
102 * +---- coarse policies and all any:any policies
104 * Lookups return four candidate lists:
105 * 1. any:any list from top-level xfrm_pol_inexact_bin
106 * 2. any:daddr list from daddr tree
107 * 3. saddr:daddr list from 2nd level daddr tree
108 * 4. saddr:any list from saddr tree
110 * This result set then needs to be searched for the policy with
111 * the lowest priority. If two results have same prio, youngest one wins.
114 struct xfrm_pol_inexact_key
{
121 struct xfrm_pol_inexact_bin
{
122 struct xfrm_pol_inexact_key k
;
123 struct rhash_head head
;
124 /* list containing '*:*' policies */
125 struct hlist_head hhead
;
127 seqcount_spinlock_t count
;
128 /* tree sorted by daddr/prefix */
129 struct rb_root root_d
;
131 /* tree sorted by saddr/prefix */
132 struct rb_root root_s
;
134 /* slow path below */
135 struct list_head inexact_bins
;
139 enum xfrm_pol_inexact_candidate_type
{
148 struct xfrm_pol_inexact_candidates
{
149 struct hlist_head
*res
[XFRM_POL_CAND_MAX
];
152 static DEFINE_SPINLOCK(xfrm_if_cb_lock
);
153 static struct xfrm_if_cb
const __rcu
*xfrm_if_cb __read_mostly
;
155 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock
);
156 static struct xfrm_policy_afinfo
const __rcu
*xfrm_policy_afinfo
[AF_INET6
+ 1]
159 static struct kmem_cache
*xfrm_dst_cache __ro_after_init
;
161 static struct rhashtable xfrm_policy_inexact_table
;
162 static const struct rhashtable_params xfrm_pol_inexact_params
;
164 static void xfrm_init_pmtu(struct xfrm_dst
**bundle
, int nr
);
165 static int stale_bundle(struct dst_entry
*dst
);
166 static int xfrm_bundle_ok(struct xfrm_dst
*xdst
);
167 static void xfrm_policy_queue_process(struct timer_list
*t
);
169 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
);
170 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
173 static struct xfrm_pol_inexact_bin
*
174 xfrm_policy_inexact_lookup(struct net
*net
, u8 type
, u16 family
, u8 dir
,
177 static struct xfrm_pol_inexact_bin
*
178 xfrm_policy_inexact_lookup_rcu(struct net
*net
,
179 u8 type
, u16 family
, u8 dir
, u32 if_id
);
180 static struct xfrm_policy
*
181 xfrm_policy_insert_list(struct hlist_head
*chain
, struct xfrm_policy
*policy
,
183 static void xfrm_policy_insert_inexact_list(struct hlist_head
*chain
,
184 struct xfrm_policy
*policy
);
187 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates
*cand
,
188 struct xfrm_pol_inexact_bin
*b
,
189 const xfrm_address_t
*saddr
,
190 const xfrm_address_t
*daddr
);
192 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy
*policy
)
194 return refcount_inc_not_zero(&policy
->refcnt
);
198 __xfrm4_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
200 const struct flowi4
*fl4
= &fl
->u
.ip4
;
202 return addr4_match(fl4
->daddr
, sel
->daddr
.a4
, sel
->prefixlen_d
) &&
203 addr4_match(fl4
->saddr
, sel
->saddr
.a4
, sel
->prefixlen_s
) &&
204 !((xfrm_flowi_dport(fl
, &fl4
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
205 !((xfrm_flowi_sport(fl
, &fl4
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
206 (fl4
->flowi4_proto
== sel
->proto
|| !sel
->proto
) &&
207 (fl4
->flowi4_oif
== sel
->ifindex
|| !sel
->ifindex
);
211 __xfrm6_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
)
213 const struct flowi6
*fl6
= &fl
->u
.ip6
;
215 return addr_match(&fl6
->daddr
, &sel
->daddr
, sel
->prefixlen_d
) &&
216 addr_match(&fl6
->saddr
, &sel
->saddr
, sel
->prefixlen_s
) &&
217 !((xfrm_flowi_dport(fl
, &fl6
->uli
) ^ sel
->dport
) & sel
->dport_mask
) &&
218 !((xfrm_flowi_sport(fl
, &fl6
->uli
) ^ sel
->sport
) & sel
->sport_mask
) &&
219 (fl6
->flowi6_proto
== sel
->proto
|| !sel
->proto
) &&
220 (fl6
->flowi6_oif
== sel
->ifindex
|| !sel
->ifindex
);
223 bool xfrm_selector_match(const struct xfrm_selector
*sel
, const struct flowi
*fl
,
224 unsigned short family
)
228 return __xfrm4_selector_match(sel
, fl
);
230 return __xfrm6_selector_match(sel
, fl
);
235 static const struct xfrm_policy_afinfo
*xfrm_policy_get_afinfo(unsigned short family
)
237 const struct xfrm_policy_afinfo
*afinfo
;
239 if (unlikely(family
>= ARRAY_SIZE(xfrm_policy_afinfo
)))
242 afinfo
= rcu_dereference(xfrm_policy_afinfo
[family
]);
243 if (unlikely(!afinfo
))
248 /* Called with rcu_read_lock(). */
249 static const struct xfrm_if_cb
*xfrm_if_get_cb(void)
251 return rcu_dereference(xfrm_if_cb
);
254 struct dst_entry
*__xfrm_dst_lookup(struct net
*net
, int tos
, int oif
,
255 const xfrm_address_t
*saddr
,
256 const xfrm_address_t
*daddr
,
257 int family
, u32 mark
)
259 const struct xfrm_policy_afinfo
*afinfo
;
260 struct dst_entry
*dst
;
262 afinfo
= xfrm_policy_get_afinfo(family
);
263 if (unlikely(afinfo
== NULL
))
264 return ERR_PTR(-EAFNOSUPPORT
);
266 dst
= afinfo
->dst_lookup(net
, tos
, oif
, saddr
, daddr
, mark
);
272 EXPORT_SYMBOL(__xfrm_dst_lookup
);
274 static inline struct dst_entry
*xfrm_dst_lookup(struct xfrm_state
*x
,
276 xfrm_address_t
*prev_saddr
,
277 xfrm_address_t
*prev_daddr
,
278 int family
, u32 mark
)
280 struct net
*net
= xs_net(x
);
281 xfrm_address_t
*saddr
= &x
->props
.saddr
;
282 xfrm_address_t
*daddr
= &x
->id
.daddr
;
283 struct dst_entry
*dst
;
285 if (x
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
) {
289 if (x
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
) {
294 dst
= __xfrm_dst_lookup(net
, tos
, oif
, saddr
, daddr
, family
, mark
);
297 if (prev_saddr
!= saddr
)
298 memcpy(prev_saddr
, saddr
, sizeof(*prev_saddr
));
299 if (prev_daddr
!= daddr
)
300 memcpy(prev_daddr
, daddr
, sizeof(*prev_daddr
));
306 static inline unsigned long make_jiffies(long secs
)
308 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
309 return MAX_SCHEDULE_TIMEOUT
-1;
314 static void xfrm_policy_timer(struct timer_list
*t
)
316 struct xfrm_policy
*xp
= from_timer(xp
, t
, timer
);
317 time64_t now
= ktime_get_real_seconds();
318 time64_t next
= TIME64_MAX
;
322 read_lock(&xp
->lock
);
324 if (unlikely(xp
->walk
.dead
))
327 dir
= xfrm_policy_id2dir(xp
->index
);
329 if (xp
->lft
.hard_add_expires_seconds
) {
330 time64_t tmo
= xp
->lft
.hard_add_expires_seconds
+
331 xp
->curlft
.add_time
- now
;
337 if (xp
->lft
.hard_use_expires_seconds
) {
338 time64_t tmo
= xp
->lft
.hard_use_expires_seconds
+
339 (READ_ONCE(xp
->curlft
.use_time
) ? : xp
->curlft
.add_time
) - now
;
345 if (xp
->lft
.soft_add_expires_seconds
) {
346 time64_t tmo
= xp
->lft
.soft_add_expires_seconds
+
347 xp
->curlft
.add_time
- now
;
350 tmo
= XFRM_KM_TIMEOUT
;
355 if (xp
->lft
.soft_use_expires_seconds
) {
356 time64_t tmo
= xp
->lft
.soft_use_expires_seconds
+
357 (READ_ONCE(xp
->curlft
.use_time
) ? : xp
->curlft
.add_time
) - now
;
360 tmo
= XFRM_KM_TIMEOUT
;
367 km_policy_expired(xp
, dir
, 0, 0);
368 if (next
!= TIME64_MAX
&&
369 !mod_timer(&xp
->timer
, jiffies
+ make_jiffies(next
)))
373 read_unlock(&xp
->lock
);
378 read_unlock(&xp
->lock
);
379 if (!xfrm_policy_delete(xp
, dir
))
380 km_policy_expired(xp
, dir
, 1, 0);
384 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
388 struct xfrm_policy
*xfrm_policy_alloc(struct net
*net
, gfp_t gfp
)
390 struct xfrm_policy
*policy
;
392 policy
= kzalloc(sizeof(struct xfrm_policy
), gfp
);
395 write_pnet(&policy
->xp_net
, net
);
396 INIT_LIST_HEAD(&policy
->walk
.all
);
397 INIT_HLIST_NODE(&policy
->bydst_inexact_list
);
398 INIT_HLIST_NODE(&policy
->bydst
);
399 INIT_HLIST_NODE(&policy
->byidx
);
400 rwlock_init(&policy
->lock
);
401 refcount_set(&policy
->refcnt
, 1);
402 skb_queue_head_init(&policy
->polq
.hold_queue
);
403 timer_setup(&policy
->timer
, xfrm_policy_timer
, 0);
404 timer_setup(&policy
->polq
.hold_timer
,
405 xfrm_policy_queue_process
, 0);
409 EXPORT_SYMBOL(xfrm_policy_alloc
);
411 static void xfrm_policy_destroy_rcu(struct rcu_head
*head
)
413 struct xfrm_policy
*policy
= container_of(head
, struct xfrm_policy
, rcu
);
415 security_xfrm_policy_free(policy
->security
);
419 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
421 void xfrm_policy_destroy(struct xfrm_policy
*policy
)
423 BUG_ON(!policy
->walk
.dead
);
425 if (del_timer(&policy
->timer
) || del_timer(&policy
->polq
.hold_timer
))
428 xfrm_dev_policy_free(policy
);
429 call_rcu(&policy
->rcu
, xfrm_policy_destroy_rcu
);
431 EXPORT_SYMBOL(xfrm_policy_destroy
);
433 /* Rule must be locked. Release descendant resources, announce
434 * entry dead. The rule must be unlinked from lists to the moment.
437 static void xfrm_policy_kill(struct xfrm_policy
*policy
)
439 write_lock_bh(&policy
->lock
);
440 policy
->walk
.dead
= 1;
441 write_unlock_bh(&policy
->lock
);
443 atomic_inc(&policy
->genid
);
445 if (del_timer(&policy
->polq
.hold_timer
))
446 xfrm_pol_put(policy
);
447 skb_queue_purge(&policy
->polq
.hold_queue
);
449 if (del_timer(&policy
->timer
))
450 xfrm_pol_put(policy
);
452 xfrm_pol_put(policy
);
455 static unsigned int xfrm_policy_hashmax __read_mostly
= 1 * 1024 * 1024;
457 static inline unsigned int idx_hash(struct net
*net
, u32 index
)
459 return __idx_hash(index
, net
->xfrm
.policy_idx_hmask
);
462 /* calculate policy hash thresholds */
463 static void __get_hash_thresh(struct net
*net
,
464 unsigned short family
, int dir
,
465 u8
*dbits
, u8
*sbits
)
469 *dbits
= net
->xfrm
.policy_bydst
[dir
].dbits4
;
470 *sbits
= net
->xfrm
.policy_bydst
[dir
].sbits4
;
474 *dbits
= net
->xfrm
.policy_bydst
[dir
].dbits6
;
475 *sbits
= net
->xfrm
.policy_bydst
[dir
].sbits6
;
484 static struct hlist_head
*policy_hash_bysel(struct net
*net
,
485 const struct xfrm_selector
*sel
,
486 unsigned short family
, int dir
)
488 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
493 __get_hash_thresh(net
, family
, dir
, &dbits
, &sbits
);
494 hash
= __sel_hash(sel
, family
, hmask
, dbits
, sbits
);
496 if (hash
== hmask
+ 1)
499 return rcu_dereference_check(net
->xfrm
.policy_bydst
[dir
].table
,
500 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
)) + hash
;
503 static struct hlist_head
*policy_hash_direct(struct net
*net
,
504 const xfrm_address_t
*daddr
,
505 const xfrm_address_t
*saddr
,
506 unsigned short family
, int dir
)
508 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
513 __get_hash_thresh(net
, family
, dir
, &dbits
, &sbits
);
514 hash
= __addr_hash(daddr
, saddr
, family
, hmask
, dbits
, sbits
);
516 return rcu_dereference_check(net
->xfrm
.policy_bydst
[dir
].table
,
517 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
)) + hash
;
520 static void xfrm_dst_hash_transfer(struct net
*net
,
521 struct hlist_head
*list
,
522 struct hlist_head
*ndsttable
,
523 unsigned int nhashmask
,
526 struct hlist_node
*tmp
, *entry0
= NULL
;
527 struct xfrm_policy
*pol
;
533 hlist_for_each_entry_safe(pol
, tmp
, list
, bydst
) {
536 __get_hash_thresh(net
, pol
->family
, dir
, &dbits
, &sbits
);
537 h
= __addr_hash(&pol
->selector
.daddr
, &pol
->selector
.saddr
,
538 pol
->family
, nhashmask
, dbits
, sbits
);
539 if (!entry0
|| pol
->xdo
.type
== XFRM_DEV_OFFLOAD_PACKET
) {
540 hlist_del_rcu(&pol
->bydst
);
541 hlist_add_head_rcu(&pol
->bydst
, ndsttable
+ h
);
546 hlist_del_rcu(&pol
->bydst
);
547 hlist_add_behind_rcu(&pol
->bydst
, entry0
);
549 entry0
= &pol
->bydst
;
551 if (!hlist_empty(list
)) {
557 static void xfrm_idx_hash_transfer(struct hlist_head
*list
,
558 struct hlist_head
*nidxtable
,
559 unsigned int nhashmask
)
561 struct hlist_node
*tmp
;
562 struct xfrm_policy
*pol
;
564 hlist_for_each_entry_safe(pol
, tmp
, list
, byidx
) {
567 h
= __idx_hash(pol
->index
, nhashmask
);
568 hlist_add_head(&pol
->byidx
, nidxtable
+h
);
572 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask
)
574 return ((old_hmask
+ 1) << 1) - 1;
577 static void xfrm_bydst_resize(struct net
*net
, int dir
)
579 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
580 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
581 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
582 struct hlist_head
*ndst
= xfrm_hash_alloc(nsize
);
583 struct hlist_head
*odst
;
589 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
590 write_seqcount_begin(&net
->xfrm
.xfrm_policy_hash_generation
);
592 odst
= rcu_dereference_protected(net
->xfrm
.policy_bydst
[dir
].table
,
593 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
));
595 for (i
= hmask
; i
>= 0; i
--)
596 xfrm_dst_hash_transfer(net
, odst
+ i
, ndst
, nhashmask
, dir
);
598 rcu_assign_pointer(net
->xfrm
.policy_bydst
[dir
].table
, ndst
);
599 net
->xfrm
.policy_bydst
[dir
].hmask
= nhashmask
;
601 write_seqcount_end(&net
->xfrm
.xfrm_policy_hash_generation
);
602 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
606 xfrm_hash_free(odst
, (hmask
+ 1) * sizeof(struct hlist_head
));
609 static void xfrm_byidx_resize(struct net
*net
)
611 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
612 unsigned int nhashmask
= xfrm_new_hash_mask(hmask
);
613 unsigned int nsize
= (nhashmask
+ 1) * sizeof(struct hlist_head
);
614 struct hlist_head
*oidx
= net
->xfrm
.policy_byidx
;
615 struct hlist_head
*nidx
= xfrm_hash_alloc(nsize
);
621 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
623 for (i
= hmask
; i
>= 0; i
--)
624 xfrm_idx_hash_transfer(oidx
+ i
, nidx
, nhashmask
);
626 net
->xfrm
.policy_byidx
= nidx
;
627 net
->xfrm
.policy_idx_hmask
= nhashmask
;
629 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
631 xfrm_hash_free(oidx
, (hmask
+ 1) * sizeof(struct hlist_head
));
634 static inline int xfrm_bydst_should_resize(struct net
*net
, int dir
, int *total
)
636 unsigned int cnt
= net
->xfrm
.policy_count
[dir
];
637 unsigned int hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
642 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
649 static inline int xfrm_byidx_should_resize(struct net
*net
, int total
)
651 unsigned int hmask
= net
->xfrm
.policy_idx_hmask
;
653 if ((hmask
+ 1) < xfrm_policy_hashmax
&&
660 void xfrm_spd_getinfo(struct net
*net
, struct xfrmk_spdinfo
*si
)
662 si
->incnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
];
663 si
->outcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
];
664 si
->fwdcnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
];
665 si
->inscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_IN
+XFRM_POLICY_MAX
];
666 si
->outscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_OUT
+XFRM_POLICY_MAX
];
667 si
->fwdscnt
= net
->xfrm
.policy_count
[XFRM_POLICY_FWD
+XFRM_POLICY_MAX
];
668 si
->spdhcnt
= net
->xfrm
.policy_idx_hmask
;
669 si
->spdhmcnt
= xfrm_policy_hashmax
;
671 EXPORT_SYMBOL(xfrm_spd_getinfo
);
673 static DEFINE_MUTEX(hash_resize_mutex
);
674 static void xfrm_hash_resize(struct work_struct
*work
)
676 struct net
*net
= container_of(work
, struct net
, xfrm
.policy_hash_work
);
679 mutex_lock(&hash_resize_mutex
);
682 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
683 if (xfrm_bydst_should_resize(net
, dir
, &total
))
684 xfrm_bydst_resize(net
, dir
);
686 if (xfrm_byidx_should_resize(net
, total
))
687 xfrm_byidx_resize(net
);
689 mutex_unlock(&hash_resize_mutex
);
692 /* Make sure *pol can be inserted into fastbin.
693 * Useful to check that later insert requests will be successful
694 * (provided xfrm_policy_lock is held throughout).
696 static struct xfrm_pol_inexact_bin
*
697 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy
*pol
, u8 dir
)
699 struct xfrm_pol_inexact_bin
*bin
, *prev
;
700 struct xfrm_pol_inexact_key k
= {
701 .family
= pol
->family
,
706 struct net
*net
= xp_net(pol
);
708 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
710 write_pnet(&k
.net
, net
);
711 bin
= rhashtable_lookup_fast(&xfrm_policy_inexact_table
, &k
,
712 xfrm_pol_inexact_params
);
716 bin
= kzalloc(sizeof(*bin
), GFP_ATOMIC
);
721 INIT_HLIST_HEAD(&bin
->hhead
);
722 bin
->root_d
= RB_ROOT
;
723 bin
->root_s
= RB_ROOT
;
724 seqcount_spinlock_init(&bin
->count
, &net
->xfrm
.xfrm_policy_lock
);
726 prev
= rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table
,
728 xfrm_pol_inexact_params
);
730 list_add(&bin
->inexact_bins
, &net
->xfrm
.inexact_bins
);
736 return IS_ERR(prev
) ? NULL
: prev
;
739 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t
*addr
,
740 int family
, u8 prefixlen
)
742 if (xfrm_addr_any(addr
, family
))
745 if (family
== AF_INET6
&& prefixlen
< INEXACT_PREFIXLEN_IPV6
)
748 if (family
== AF_INET
&& prefixlen
< INEXACT_PREFIXLEN_IPV4
)
755 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy
*policy
)
757 const xfrm_address_t
*addr
;
758 bool saddr_any
, daddr_any
;
761 addr
= &policy
->selector
.saddr
;
762 prefixlen
= policy
->selector
.prefixlen_s
;
764 saddr_any
= xfrm_pol_inexact_addr_use_any_list(addr
,
767 addr
= &policy
->selector
.daddr
;
768 prefixlen
= policy
->selector
.prefixlen_d
;
769 daddr_any
= xfrm_pol_inexact_addr_use_any_list(addr
,
772 return saddr_any
&& daddr_any
;
775 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node
*node
,
776 const xfrm_address_t
*addr
, u8 prefixlen
)
779 node
->prefixlen
= prefixlen
;
782 static struct xfrm_pol_inexact_node
*
783 xfrm_pol_inexact_node_alloc(const xfrm_address_t
*addr
, u8 prefixlen
)
785 struct xfrm_pol_inexact_node
*node
;
787 node
= kzalloc(sizeof(*node
), GFP_ATOMIC
);
789 xfrm_pol_inexact_node_init(node
, addr
, prefixlen
);
794 static int xfrm_policy_addr_delta(const xfrm_address_t
*a
,
795 const xfrm_address_t
*b
,
796 u8 prefixlen
, u16 family
)
799 unsigned int pdw
, pbi
;
806 mask
= ~0U << (32 - prefixlen
);
807 ma
= ntohl(a
->a4
) & mask
;
808 mb
= ntohl(b
->a4
) & mask
;
815 pdw
= prefixlen
>> 5;
816 pbi
= prefixlen
& 0x1f;
819 delta
= memcmp(a
->a6
, b
->a6
, pdw
<< 2);
824 mask
= ~0U << (32 - pbi
);
825 ma
= ntohl(a
->a6
[pdw
]) & mask
;
826 mb
= ntohl(b
->a6
[pdw
]) & mask
;
840 static void xfrm_policy_inexact_list_reinsert(struct net
*net
,
841 struct xfrm_pol_inexact_node
*n
,
844 unsigned int matched_s
, matched_d
;
845 struct xfrm_policy
*policy
, *p
;
850 list_for_each_entry_reverse(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
851 struct hlist_node
*newpos
= NULL
;
852 bool matches_s
, matches_d
;
854 if (!policy
->bydst_reinsert
)
857 WARN_ON_ONCE(policy
->family
!= family
);
859 policy
->bydst_reinsert
= false;
860 hlist_for_each_entry(p
, &n
->hhead
, bydst
) {
861 if (policy
->priority
> p
->priority
)
863 else if (policy
->priority
== p
->priority
&&
864 policy
->pos
> p
->pos
)
870 if (newpos
&& policy
->xdo
.type
!= XFRM_DEV_OFFLOAD_PACKET
)
871 hlist_add_behind_rcu(&policy
->bydst
, newpos
);
873 hlist_add_head_rcu(&policy
->bydst
, &n
->hhead
);
875 /* paranoia checks follow.
876 * Check that the reinserted policy matches at least
877 * saddr or daddr for current node prefix.
879 * Matching both is fine, matching saddr in one policy
880 * (but not daddr) and then matching only daddr in another
883 matches_s
= xfrm_policy_addr_delta(&policy
->selector
.saddr
,
887 matches_d
= xfrm_policy_addr_delta(&policy
->selector
.daddr
,
891 if (matches_s
&& matches_d
)
894 WARN_ON_ONCE(!matches_s
&& !matches_d
);
899 WARN_ON_ONCE(matched_s
&& matched_d
);
903 static void xfrm_policy_inexact_node_reinsert(struct net
*net
,
904 struct xfrm_pol_inexact_node
*n
,
908 struct xfrm_pol_inexact_node
*node
;
909 struct rb_node
**p
, *parent
;
911 /* we should not have another subtree here */
912 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n
->root
));
921 node
= rb_entry(*p
, struct xfrm_pol_inexact_node
, node
);
923 prefixlen
= min(node
->prefixlen
, n
->prefixlen
);
925 delta
= xfrm_policy_addr_delta(&n
->addr
, &node
->addr
,
928 p
= &parent
->rb_left
;
929 } else if (delta
> 0) {
930 p
= &parent
->rb_right
;
932 bool same_prefixlen
= node
->prefixlen
== n
->prefixlen
;
933 struct xfrm_policy
*tmp
;
935 hlist_for_each_entry(tmp
, &n
->hhead
, bydst
) {
936 tmp
->bydst_reinsert
= true;
937 hlist_del_rcu(&tmp
->bydst
);
940 node
->prefixlen
= prefixlen
;
942 xfrm_policy_inexact_list_reinsert(net
, node
, family
);
944 if (same_prefixlen
) {
956 rb_link_node_rcu(&n
->node
, parent
, p
);
957 rb_insert_color(&n
->node
, new);
960 /* merge nodes v and n */
961 static void xfrm_policy_inexact_node_merge(struct net
*net
,
962 struct xfrm_pol_inexact_node
*v
,
963 struct xfrm_pol_inexact_node
*n
,
966 struct xfrm_pol_inexact_node
*node
;
967 struct xfrm_policy
*tmp
;
968 struct rb_node
*rnode
;
970 /* To-be-merged node v has a subtree.
972 * Dismantle it and insert its nodes to n->root.
974 while ((rnode
= rb_first(&v
->root
)) != NULL
) {
975 node
= rb_entry(rnode
, struct xfrm_pol_inexact_node
, node
);
976 rb_erase(&node
->node
, &v
->root
);
977 xfrm_policy_inexact_node_reinsert(net
, node
, &n
->root
,
981 hlist_for_each_entry(tmp
, &v
->hhead
, bydst
) {
982 tmp
->bydst_reinsert
= true;
983 hlist_del_rcu(&tmp
->bydst
);
986 xfrm_policy_inexact_list_reinsert(net
, n
, family
);
989 static struct xfrm_pol_inexact_node
*
990 xfrm_policy_inexact_insert_node(struct net
*net
,
991 struct rb_root
*root
,
992 xfrm_address_t
*addr
,
993 u16 family
, u8 prefixlen
, u8 dir
)
995 struct xfrm_pol_inexact_node
*cached
= NULL
;
996 struct rb_node
**p
, *parent
= NULL
;
997 struct xfrm_pol_inexact_node
*node
;
1004 node
= rb_entry(*p
, struct xfrm_pol_inexact_node
, node
);
1006 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
1009 if (delta
== 0 && prefixlen
>= node
->prefixlen
) {
1010 WARN_ON_ONCE(cached
); /* ipsec policies got lost */
1015 p
= &parent
->rb_left
;
1017 p
= &parent
->rb_right
;
1019 if (prefixlen
< node
->prefixlen
) {
1020 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
1026 /* This node is a subnet of the new prefix. It needs
1027 * to be removed and re-inserted with the smaller
1028 * prefix and all nodes that are now also covered
1029 * by the reduced prefixlen.
1031 rb_erase(&node
->node
, root
);
1034 xfrm_pol_inexact_node_init(node
, addr
,
1038 /* This node also falls within the new
1039 * prefixlen. Merge the to-be-reinserted
1040 * node and this one.
1042 xfrm_policy_inexact_node_merge(net
, node
,
1044 kfree_rcu(node
, rcu
);
1055 node
= xfrm_pol_inexact_node_alloc(addr
, prefixlen
);
1060 rb_link_node_rcu(&node
->node
, parent
, p
);
1061 rb_insert_color(&node
->node
, root
);
1066 static void xfrm_policy_inexact_gc_tree(struct rb_root
*r
, bool rm
)
1068 struct xfrm_pol_inexact_node
*node
;
1069 struct rb_node
*rn
= rb_first(r
);
1072 node
= rb_entry(rn
, struct xfrm_pol_inexact_node
, node
);
1074 xfrm_policy_inexact_gc_tree(&node
->root
, rm
);
1077 if (!hlist_empty(&node
->hhead
) || !RB_EMPTY_ROOT(&node
->root
)) {
1082 rb_erase(&node
->node
, r
);
1083 kfree_rcu(node
, rcu
);
1087 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin
*b
, bool net_exit
)
1089 write_seqcount_begin(&b
->count
);
1090 xfrm_policy_inexact_gc_tree(&b
->root_d
, net_exit
);
1091 xfrm_policy_inexact_gc_tree(&b
->root_s
, net_exit
);
1092 write_seqcount_end(&b
->count
);
1094 if (!RB_EMPTY_ROOT(&b
->root_d
) || !RB_EMPTY_ROOT(&b
->root_s
) ||
1095 !hlist_empty(&b
->hhead
)) {
1096 WARN_ON_ONCE(net_exit
);
1100 if (rhashtable_remove_fast(&xfrm_policy_inexact_table
, &b
->head
,
1101 xfrm_pol_inexact_params
) == 0) {
1102 list_del(&b
->inexact_bins
);
1107 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin
*b
)
1109 struct net
*net
= read_pnet(&b
->k
.net
);
1111 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1112 __xfrm_policy_inexact_prune_bin(b
, false);
1113 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1116 static void __xfrm_policy_inexact_flush(struct net
*net
)
1118 struct xfrm_pol_inexact_bin
*bin
, *t
;
1120 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1122 list_for_each_entry_safe(bin
, t
, &net
->xfrm
.inexact_bins
, inexact_bins
)
1123 __xfrm_policy_inexact_prune_bin(bin
, false);
1126 static struct hlist_head
*
1127 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin
*bin
,
1128 struct xfrm_policy
*policy
, u8 dir
)
1130 struct xfrm_pol_inexact_node
*n
;
1133 net
= xp_net(policy
);
1134 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1136 if (xfrm_policy_inexact_insert_use_any_list(policy
))
1139 if (xfrm_pol_inexact_addr_use_any_list(&policy
->selector
.daddr
,
1141 policy
->selector
.prefixlen_d
)) {
1142 write_seqcount_begin(&bin
->count
);
1143 n
= xfrm_policy_inexact_insert_node(net
,
1145 &policy
->selector
.saddr
,
1147 policy
->selector
.prefixlen_s
,
1149 write_seqcount_end(&bin
->count
);
1156 /* daddr is fixed */
1157 write_seqcount_begin(&bin
->count
);
1158 n
= xfrm_policy_inexact_insert_node(net
,
1160 &policy
->selector
.daddr
,
1162 policy
->selector
.prefixlen_d
, dir
);
1163 write_seqcount_end(&bin
->count
);
1167 /* saddr is wildcard */
1168 if (xfrm_pol_inexact_addr_use_any_list(&policy
->selector
.saddr
,
1170 policy
->selector
.prefixlen_s
))
1173 write_seqcount_begin(&bin
->count
);
1174 n
= xfrm_policy_inexact_insert_node(net
,
1176 &policy
->selector
.saddr
,
1178 policy
->selector
.prefixlen_s
, dir
);
1179 write_seqcount_end(&bin
->count
);
1186 static struct xfrm_policy
*
1187 xfrm_policy_inexact_insert(struct xfrm_policy
*policy
, u8 dir
, int excl
)
1189 struct xfrm_pol_inexact_bin
*bin
;
1190 struct xfrm_policy
*delpol
;
1191 struct hlist_head
*chain
;
1194 bin
= xfrm_policy_inexact_alloc_bin(policy
, dir
);
1196 return ERR_PTR(-ENOMEM
);
1198 net
= xp_net(policy
);
1199 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
1201 chain
= xfrm_policy_inexact_alloc_chain(bin
, policy
, dir
);
1203 __xfrm_policy_inexact_prune_bin(bin
, false);
1204 return ERR_PTR(-ENOMEM
);
1207 delpol
= xfrm_policy_insert_list(chain
, policy
, excl
);
1208 if (delpol
&& excl
) {
1209 __xfrm_policy_inexact_prune_bin(bin
, false);
1210 return ERR_PTR(-EEXIST
);
1213 chain
= &net
->xfrm
.policy_inexact
[dir
];
1214 xfrm_policy_insert_inexact_list(chain
, policy
);
1217 __xfrm_policy_inexact_prune_bin(bin
, false);
1222 static void xfrm_hash_rebuild(struct work_struct
*work
)
1224 struct net
*net
= container_of(work
, struct net
,
1225 xfrm
.policy_hthresh
.work
);
1227 struct xfrm_policy
*pol
;
1228 struct xfrm_policy
*policy
;
1229 struct hlist_head
*chain
;
1230 struct hlist_head
*odst
;
1231 struct hlist_node
*newpos
;
1235 u8 lbits4
, rbits4
, lbits6
, rbits6
;
1237 mutex_lock(&hash_resize_mutex
);
1239 /* read selector prefixlen thresholds */
1241 seq
= read_seqbegin(&net
->xfrm
.policy_hthresh
.lock
);
1243 lbits4
= net
->xfrm
.policy_hthresh
.lbits4
;
1244 rbits4
= net
->xfrm
.policy_hthresh
.rbits4
;
1245 lbits6
= net
->xfrm
.policy_hthresh
.lbits6
;
1246 rbits6
= net
->xfrm
.policy_hthresh
.rbits6
;
1247 } while (read_seqretry(&net
->xfrm
.policy_hthresh
.lock
, seq
));
1249 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1250 write_seqcount_begin(&net
->xfrm
.xfrm_policy_hash_generation
);
1252 /* make sure that we can insert the indirect policies again before
1253 * we start with destructive action.
1255 list_for_each_entry(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
1256 struct xfrm_pol_inexact_bin
*bin
;
1259 dir
= xfrm_policy_id2dir(policy
->index
);
1260 if (policy
->walk
.dead
|| dir
>= XFRM_POLICY_MAX
)
1263 if ((dir
& XFRM_POLICY_MASK
) == XFRM_POLICY_OUT
) {
1264 if (policy
->family
== AF_INET
) {
1272 if (policy
->family
== AF_INET
) {
1281 if (policy
->selector
.prefixlen_d
< dbits
||
1282 policy
->selector
.prefixlen_s
< sbits
)
1285 bin
= xfrm_policy_inexact_alloc_bin(policy
, dir
);
1289 if (!xfrm_policy_inexact_alloc_chain(bin
, policy
, dir
))
1293 /* reset the bydst and inexact table in all directions */
1294 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
1295 struct hlist_node
*n
;
1297 hlist_for_each_entry_safe(policy
, n
,
1298 &net
->xfrm
.policy_inexact
[dir
],
1299 bydst_inexact_list
) {
1300 hlist_del_rcu(&policy
->bydst
);
1301 hlist_del_init(&policy
->bydst_inexact_list
);
1304 hmask
= net
->xfrm
.policy_bydst
[dir
].hmask
;
1305 odst
= net
->xfrm
.policy_bydst
[dir
].table
;
1306 for (i
= hmask
; i
>= 0; i
--) {
1307 hlist_for_each_entry_safe(policy
, n
, odst
+ i
, bydst
)
1308 hlist_del_rcu(&policy
->bydst
);
1310 if ((dir
& XFRM_POLICY_MASK
) == XFRM_POLICY_OUT
) {
1311 /* dir out => dst = remote, src = local */
1312 net
->xfrm
.policy_bydst
[dir
].dbits4
= rbits4
;
1313 net
->xfrm
.policy_bydst
[dir
].sbits4
= lbits4
;
1314 net
->xfrm
.policy_bydst
[dir
].dbits6
= rbits6
;
1315 net
->xfrm
.policy_bydst
[dir
].sbits6
= lbits6
;
1317 /* dir in/fwd => dst = local, src = remote */
1318 net
->xfrm
.policy_bydst
[dir
].dbits4
= lbits4
;
1319 net
->xfrm
.policy_bydst
[dir
].sbits4
= rbits4
;
1320 net
->xfrm
.policy_bydst
[dir
].dbits6
= lbits6
;
1321 net
->xfrm
.policy_bydst
[dir
].sbits6
= rbits6
;
1325 /* re-insert all policies by order of creation */
1326 list_for_each_entry_reverse(policy
, &net
->xfrm
.policy_all
, walk
.all
) {
1327 if (policy
->walk
.dead
)
1329 dir
= xfrm_policy_id2dir(policy
->index
);
1330 if (dir
>= XFRM_POLICY_MAX
) {
1331 /* skip socket policies */
1335 chain
= policy_hash_bysel(net
, &policy
->selector
,
1336 policy
->family
, dir
);
1339 void *p
= xfrm_policy_inexact_insert(policy
, dir
, 0);
1341 WARN_ONCE(IS_ERR(p
), "reinsert: %ld\n", PTR_ERR(p
));
1345 hlist_for_each_entry(pol
, chain
, bydst
) {
1346 if (policy
->priority
>= pol
->priority
)
1347 newpos
= &pol
->bydst
;
1351 if (newpos
&& policy
->xdo
.type
!= XFRM_DEV_OFFLOAD_PACKET
)
1352 hlist_add_behind_rcu(&policy
->bydst
, newpos
);
1354 hlist_add_head_rcu(&policy
->bydst
, chain
);
1358 __xfrm_policy_inexact_flush(net
);
1359 write_seqcount_end(&net
->xfrm
.xfrm_policy_hash_generation
);
1360 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1362 mutex_unlock(&hash_resize_mutex
);
1365 void xfrm_policy_hash_rebuild(struct net
*net
)
1367 schedule_work(&net
->xfrm
.policy_hthresh
.work
);
1369 EXPORT_SYMBOL(xfrm_policy_hash_rebuild
);
1371 /* Generate new index... KAME seems to generate them ordered by cost
1372 * of an absolute inpredictability of ordering of rules. This will not pass. */
1373 static u32
xfrm_gen_index(struct net
*net
, int dir
, u32 index
)
1375 static u32 idx_generator
;
1378 struct hlist_head
*list
;
1379 struct xfrm_policy
*p
;
1384 idx
= (idx_generator
| dir
);
1393 list
= net
->xfrm
.policy_byidx
+ idx_hash(net
, idx
);
1395 hlist_for_each_entry(p
, list
, byidx
) {
1396 if (p
->index
== idx
) {
1406 static inline int selector_cmp(struct xfrm_selector
*s1
, struct xfrm_selector
*s2
)
1408 u32
*p1
= (u32
*) s1
;
1409 u32
*p2
= (u32
*) s2
;
1410 int len
= sizeof(struct xfrm_selector
) / sizeof(u32
);
1413 for (i
= 0; i
< len
; i
++) {
1421 static void xfrm_policy_requeue(struct xfrm_policy
*old
,
1422 struct xfrm_policy
*new)
1424 struct xfrm_policy_queue
*pq
= &old
->polq
;
1425 struct sk_buff_head list
;
1427 if (skb_queue_empty(&pq
->hold_queue
))
1430 __skb_queue_head_init(&list
);
1432 spin_lock_bh(&pq
->hold_queue
.lock
);
1433 skb_queue_splice_init(&pq
->hold_queue
, &list
);
1434 if (del_timer(&pq
->hold_timer
))
1436 spin_unlock_bh(&pq
->hold_queue
.lock
);
1440 spin_lock_bh(&pq
->hold_queue
.lock
);
1441 skb_queue_splice(&list
, &pq
->hold_queue
);
1442 pq
->timeout
= XFRM_QUEUE_TMO_MIN
;
1443 if (!mod_timer(&pq
->hold_timer
, jiffies
))
1445 spin_unlock_bh(&pq
->hold_queue
.lock
);
1448 static inline bool xfrm_policy_mark_match(const struct xfrm_mark
*mark
,
1449 struct xfrm_policy
*pol
)
1451 return mark
->v
== pol
->mark
.v
&& mark
->m
== pol
->mark
.m
;
1454 static u32
xfrm_pol_bin_key(const void *data
, u32 len
, u32 seed
)
1456 const struct xfrm_pol_inexact_key
*k
= data
;
1457 u32 a
= k
->type
<< 24 | k
->dir
<< 16 | k
->family
;
1459 return jhash_3words(a
, k
->if_id
, net_hash_mix(read_pnet(&k
->net
)),
1463 static u32
xfrm_pol_bin_obj(const void *data
, u32 len
, u32 seed
)
1465 const struct xfrm_pol_inexact_bin
*b
= data
;
1467 return xfrm_pol_bin_key(&b
->k
, 0, seed
);
1470 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg
*arg
,
1473 const struct xfrm_pol_inexact_key
*key
= arg
->key
;
1474 const struct xfrm_pol_inexact_bin
*b
= ptr
;
1477 if (!net_eq(read_pnet(&b
->k
.net
), read_pnet(&key
->net
)))
1480 ret
= b
->k
.dir
^ key
->dir
;
1484 ret
= b
->k
.type
^ key
->type
;
1488 ret
= b
->k
.family
^ key
->family
;
1492 return b
->k
.if_id
^ key
->if_id
;
1495 static const struct rhashtable_params xfrm_pol_inexact_params
= {
1496 .head_offset
= offsetof(struct xfrm_pol_inexact_bin
, head
),
1497 .hashfn
= xfrm_pol_bin_key
,
1498 .obj_hashfn
= xfrm_pol_bin_obj
,
1499 .obj_cmpfn
= xfrm_pol_bin_cmp
,
1500 .automatic_shrinking
= true,
1503 static void xfrm_policy_insert_inexact_list(struct hlist_head
*chain
,
1504 struct xfrm_policy
*policy
)
1506 struct xfrm_policy
*pol
, *delpol
= NULL
;
1507 struct hlist_node
*newpos
= NULL
;
1510 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
1511 if (pol
->type
== policy
->type
&&
1512 pol
->if_id
== policy
->if_id
&&
1513 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
1514 xfrm_policy_mark_match(&policy
->mark
, pol
) &&
1515 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
1518 if (policy
->priority
> pol
->priority
)
1520 } else if (policy
->priority
>= pol
->priority
) {
1521 newpos
= &pol
->bydst_inexact_list
;
1528 if (newpos
&& policy
->xdo
.type
!= XFRM_DEV_OFFLOAD_PACKET
)
1529 hlist_add_behind_rcu(&policy
->bydst_inexact_list
, newpos
);
1531 hlist_add_head_rcu(&policy
->bydst_inexact_list
, chain
);
1533 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
1539 static struct xfrm_policy
*xfrm_policy_insert_list(struct hlist_head
*chain
,
1540 struct xfrm_policy
*policy
,
1543 struct xfrm_policy
*pol
, *newpos
= NULL
, *delpol
= NULL
;
1545 hlist_for_each_entry(pol
, chain
, bydst
) {
1546 if (pol
->type
== policy
->type
&&
1547 pol
->if_id
== policy
->if_id
&&
1548 !selector_cmp(&pol
->selector
, &policy
->selector
) &&
1549 xfrm_policy_mark_match(&policy
->mark
, pol
) &&
1550 xfrm_sec_ctx_match(pol
->security
, policy
->security
) &&
1553 return ERR_PTR(-EEXIST
);
1555 if (policy
->priority
> pol
->priority
)
1557 } else if (policy
->priority
>= pol
->priority
) {
1565 if (newpos
&& policy
->xdo
.type
!= XFRM_DEV_OFFLOAD_PACKET
)
1566 hlist_add_behind_rcu(&policy
->bydst
, &newpos
->bydst
);
1568 /* Packet offload policies enter to the head
1569 * to speed-up lookups.
1571 hlist_add_head_rcu(&policy
->bydst
, chain
);
1576 int xfrm_policy_insert(int dir
, struct xfrm_policy
*policy
, int excl
)
1578 struct net
*net
= xp_net(policy
);
1579 struct xfrm_policy
*delpol
;
1580 struct hlist_head
*chain
;
1582 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1583 chain
= policy_hash_bysel(net
, &policy
->selector
, policy
->family
, dir
);
1585 delpol
= xfrm_policy_insert_list(chain
, policy
, excl
);
1587 delpol
= xfrm_policy_inexact_insert(policy
, dir
, excl
);
1589 if (IS_ERR(delpol
)) {
1590 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1591 return PTR_ERR(delpol
);
1594 __xfrm_policy_link(policy
, dir
);
1596 /* After previous checking, family can either be AF_INET or AF_INET6 */
1597 if (policy
->family
== AF_INET
)
1598 rt_genid_bump_ipv4(net
);
1600 rt_genid_bump_ipv6(net
);
1603 xfrm_policy_requeue(delpol
, policy
);
1604 __xfrm_policy_unlink(delpol
, dir
);
1606 policy
->index
= delpol
? delpol
->index
: xfrm_gen_index(net
, dir
, policy
->index
);
1607 hlist_add_head(&policy
->byidx
, net
->xfrm
.policy_byidx
+idx_hash(net
, policy
->index
));
1608 policy
->curlft
.add_time
= ktime_get_real_seconds();
1609 policy
->curlft
.use_time
= 0;
1610 if (!mod_timer(&policy
->timer
, jiffies
+ HZ
))
1611 xfrm_pol_hold(policy
);
1612 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1615 xfrm_policy_kill(delpol
);
1616 else if (xfrm_bydst_should_resize(net
, dir
, NULL
))
1617 schedule_work(&net
->xfrm
.policy_hash_work
);
1621 EXPORT_SYMBOL(xfrm_policy_insert
);
1623 static struct xfrm_policy
*
1624 __xfrm_policy_bysel_ctx(struct hlist_head
*chain
, const struct xfrm_mark
*mark
,
1625 u32 if_id
, u8 type
, int dir
, struct xfrm_selector
*sel
,
1626 struct xfrm_sec_ctx
*ctx
)
1628 struct xfrm_policy
*pol
;
1633 hlist_for_each_entry(pol
, chain
, bydst
) {
1634 if (pol
->type
== type
&&
1635 pol
->if_id
== if_id
&&
1636 xfrm_policy_mark_match(mark
, pol
) &&
1637 !selector_cmp(sel
, &pol
->selector
) &&
1638 xfrm_sec_ctx_match(ctx
, pol
->security
))
1645 struct xfrm_policy
*
1646 xfrm_policy_bysel_ctx(struct net
*net
, const struct xfrm_mark
*mark
, u32 if_id
,
1647 u8 type
, int dir
, struct xfrm_selector
*sel
,
1648 struct xfrm_sec_ctx
*ctx
, int delete, int *err
)
1650 struct xfrm_pol_inexact_bin
*bin
= NULL
;
1651 struct xfrm_policy
*pol
, *ret
= NULL
;
1652 struct hlist_head
*chain
;
1655 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1656 chain
= policy_hash_bysel(net
, sel
, sel
->family
, dir
);
1658 struct xfrm_pol_inexact_candidates cand
;
1661 bin
= xfrm_policy_inexact_lookup(net
, type
,
1662 sel
->family
, dir
, if_id
);
1664 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1668 if (!xfrm_policy_find_inexact_candidates(&cand
, bin
,
1671 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1676 for (i
= 0; i
< ARRAY_SIZE(cand
.res
); i
++) {
1677 struct xfrm_policy
*tmp
;
1679 tmp
= __xfrm_policy_bysel_ctx(cand
.res
[i
], mark
,
1685 if (!pol
|| tmp
->pos
< pol
->pos
)
1689 pol
= __xfrm_policy_bysel_ctx(chain
, mark
, if_id
, type
, dir
,
1696 *err
= security_xfrm_policy_delete(pol
->security
);
1698 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1701 __xfrm_policy_unlink(pol
, dir
);
1705 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1708 xfrm_policy_kill(ret
);
1710 xfrm_policy_inexact_prune_bin(bin
);
1713 EXPORT_SYMBOL(xfrm_policy_bysel_ctx
);
1715 struct xfrm_policy
*
1716 xfrm_policy_byid(struct net
*net
, const struct xfrm_mark
*mark
, u32 if_id
,
1717 u8 type
, int dir
, u32 id
, int delete, int *err
)
1719 struct xfrm_policy
*pol
, *ret
;
1720 struct hlist_head
*chain
;
1723 if (xfrm_policy_id2dir(id
) != dir
)
1727 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1728 chain
= net
->xfrm
.policy_byidx
+ idx_hash(net
, id
);
1730 hlist_for_each_entry(pol
, chain
, byidx
) {
1731 if (pol
->type
== type
&& pol
->index
== id
&&
1732 pol
->if_id
== if_id
&& xfrm_policy_mark_match(mark
, pol
)) {
1735 *err
= security_xfrm_policy_delete(
1738 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1741 __xfrm_policy_unlink(pol
, dir
);
1747 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1750 xfrm_policy_kill(ret
);
1753 EXPORT_SYMBOL(xfrm_policy_byid
);
1755 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1757 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, bool task_valid
)
1759 struct xfrm_policy
*pol
;
1762 list_for_each_entry(pol
, &net
->xfrm
.policy_all
, walk
.all
) {
1763 if (pol
->walk
.dead
||
1764 xfrm_policy_id2dir(pol
->index
) >= XFRM_POLICY_MAX
||
1768 err
= security_xfrm_policy_delete(pol
->security
);
1770 xfrm_audit_policy_delete(pol
, 0, task_valid
);
1777 static inline int xfrm_dev_policy_flush_secctx_check(struct net
*net
,
1778 struct net_device
*dev
,
1781 struct xfrm_policy
*pol
;
1784 list_for_each_entry(pol
, &net
->xfrm
.policy_all
, walk
.all
) {
1785 if (pol
->walk
.dead
||
1786 xfrm_policy_id2dir(pol
->index
) >= XFRM_POLICY_MAX
||
1787 pol
->xdo
.dev
!= dev
)
1790 err
= security_xfrm_policy_delete(pol
->security
);
1792 xfrm_audit_policy_delete(pol
, 0, task_valid
);
1800 xfrm_policy_flush_secctx_check(struct net
*net
, u8 type
, bool task_valid
)
1805 static inline int xfrm_dev_policy_flush_secctx_check(struct net
*net
,
1806 struct net_device
*dev
,
1813 int xfrm_policy_flush(struct net
*net
, u8 type
, bool task_valid
)
1815 int dir
, err
= 0, cnt
= 0;
1816 struct xfrm_policy
*pol
;
1818 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1820 err
= xfrm_policy_flush_secctx_check(net
, type
, task_valid
);
1825 list_for_each_entry(pol
, &net
->xfrm
.policy_all
, walk
.all
) {
1826 dir
= xfrm_policy_id2dir(pol
->index
);
1827 if (pol
->walk
.dead
||
1828 dir
>= XFRM_POLICY_MAX
||
1832 __xfrm_policy_unlink(pol
, dir
);
1833 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1834 xfrm_dev_policy_delete(pol
);
1836 xfrm_audit_policy_delete(pol
, 1, task_valid
);
1837 xfrm_policy_kill(pol
);
1838 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1842 __xfrm_policy_inexact_flush(net
);
1846 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1849 EXPORT_SYMBOL(xfrm_policy_flush
);
1851 int xfrm_dev_policy_flush(struct net
*net
, struct net_device
*dev
,
1854 int dir
, err
= 0, cnt
= 0;
1855 struct xfrm_policy
*pol
;
1857 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1859 err
= xfrm_dev_policy_flush_secctx_check(net
, dev
, task_valid
);
1864 list_for_each_entry(pol
, &net
->xfrm
.policy_all
, walk
.all
) {
1865 dir
= xfrm_policy_id2dir(pol
->index
);
1866 if (pol
->walk
.dead
||
1867 dir
>= XFRM_POLICY_MAX
||
1868 pol
->xdo
.dev
!= dev
)
1871 __xfrm_policy_unlink(pol
, dir
);
1872 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1873 xfrm_dev_policy_delete(pol
);
1875 xfrm_audit_policy_delete(pol
, 1, task_valid
);
1876 xfrm_policy_kill(pol
);
1877 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1881 __xfrm_policy_inexact_flush(net
);
1885 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1888 EXPORT_SYMBOL(xfrm_dev_policy_flush
);
1890 int xfrm_policy_walk(struct net
*net
, struct xfrm_policy_walk
*walk
,
1891 int (*func
)(struct xfrm_policy
*, int, int, void*),
1894 struct xfrm_policy
*pol
;
1895 struct xfrm_policy_walk_entry
*x
;
1898 if (walk
->type
>= XFRM_POLICY_TYPE_MAX
&&
1899 walk
->type
!= XFRM_POLICY_TYPE_ANY
)
1902 if (list_empty(&walk
->walk
.all
) && walk
->seq
!= 0)
1905 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
1906 if (list_empty(&walk
->walk
.all
))
1907 x
= list_first_entry(&net
->xfrm
.policy_all
, struct xfrm_policy_walk_entry
, all
);
1909 x
= list_first_entry(&walk
->walk
.all
,
1910 struct xfrm_policy_walk_entry
, all
);
1912 list_for_each_entry_from(x
, &net
->xfrm
.policy_all
, all
) {
1915 pol
= container_of(x
, struct xfrm_policy
, walk
);
1916 if (walk
->type
!= XFRM_POLICY_TYPE_ANY
&&
1917 walk
->type
!= pol
->type
)
1919 error
= func(pol
, xfrm_policy_id2dir(pol
->index
),
1922 list_move_tail(&walk
->walk
.all
, &x
->all
);
1927 if (walk
->seq
== 0) {
1931 list_del_init(&walk
->walk
.all
);
1933 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1936 EXPORT_SYMBOL(xfrm_policy_walk
);
1938 void xfrm_policy_walk_init(struct xfrm_policy_walk
*walk
, u8 type
)
1940 INIT_LIST_HEAD(&walk
->walk
.all
);
1941 walk
->walk
.dead
= 1;
1945 EXPORT_SYMBOL(xfrm_policy_walk_init
);
1947 void xfrm_policy_walk_done(struct xfrm_policy_walk
*walk
, struct net
*net
)
1949 if (list_empty(&walk
->walk
.all
))
1952 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
); /*FIXME where is net? */
1953 list_del(&walk
->walk
.all
);
1954 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
1956 EXPORT_SYMBOL(xfrm_policy_walk_done
);
1959 * Find policy to apply to this flow.
1961 * Returns 0 if policy found, else an -errno.
1963 static int xfrm_policy_match(const struct xfrm_policy
*pol
,
1964 const struct flowi
*fl
,
1965 u8 type
, u16 family
, u32 if_id
)
1967 const struct xfrm_selector
*sel
= &pol
->selector
;
1971 if (pol
->family
!= family
||
1972 pol
->if_id
!= if_id
||
1973 (fl
->flowi_mark
& pol
->mark
.m
) != pol
->mark
.v
||
1977 match
= xfrm_selector_match(sel
, fl
, family
);
1979 ret
= security_xfrm_policy_lookup(pol
->security
, fl
->flowi_secid
);
1983 static struct xfrm_pol_inexact_node
*
1984 xfrm_policy_lookup_inexact_addr(const struct rb_root
*r
,
1985 seqcount_spinlock_t
*count
,
1986 const xfrm_address_t
*addr
, u16 family
)
1988 const struct rb_node
*parent
;
1992 seq
= read_seqcount_begin(count
);
1994 parent
= rcu_dereference_raw(r
->rb_node
);
1996 struct xfrm_pol_inexact_node
*node
;
1999 node
= rb_entry(parent
, struct xfrm_pol_inexact_node
, node
);
2001 delta
= xfrm_policy_addr_delta(addr
, &node
->addr
,
2002 node
->prefixlen
, family
);
2004 parent
= rcu_dereference_raw(parent
->rb_left
);
2006 } else if (delta
> 0) {
2007 parent
= rcu_dereference_raw(parent
->rb_right
);
2014 if (read_seqcount_retry(count
, seq
))
2021 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates
*cand
,
2022 struct xfrm_pol_inexact_bin
*b
,
2023 const xfrm_address_t
*saddr
,
2024 const xfrm_address_t
*daddr
)
2026 struct xfrm_pol_inexact_node
*n
;
2032 family
= b
->k
.family
;
2033 memset(cand
, 0, sizeof(*cand
));
2034 cand
->res
[XFRM_POL_CAND_ANY
] = &b
->hhead
;
2036 n
= xfrm_policy_lookup_inexact_addr(&b
->root_d
, &b
->count
, daddr
,
2039 cand
->res
[XFRM_POL_CAND_DADDR
] = &n
->hhead
;
2040 n
= xfrm_policy_lookup_inexact_addr(&n
->root
, &b
->count
, saddr
,
2043 cand
->res
[XFRM_POL_CAND_BOTH
] = &n
->hhead
;
2046 n
= xfrm_policy_lookup_inexact_addr(&b
->root_s
, &b
->count
, saddr
,
2049 cand
->res
[XFRM_POL_CAND_SADDR
] = &n
->hhead
;
2054 static struct xfrm_pol_inexact_bin
*
2055 xfrm_policy_inexact_lookup_rcu(struct net
*net
, u8 type
, u16 family
,
2058 struct xfrm_pol_inexact_key k
= {
2065 write_pnet(&k
.net
, net
);
2067 return rhashtable_lookup(&xfrm_policy_inexact_table
, &k
,
2068 xfrm_pol_inexact_params
);
2071 static struct xfrm_pol_inexact_bin
*
2072 xfrm_policy_inexact_lookup(struct net
*net
, u8 type
, u16 family
,
2075 struct xfrm_pol_inexact_bin
*bin
;
2077 lockdep_assert_held(&net
->xfrm
.xfrm_policy_lock
);
2080 bin
= xfrm_policy_inexact_lookup_rcu(net
, type
, family
, dir
, if_id
);
2086 static struct xfrm_policy
*
2087 __xfrm_policy_eval_candidates(struct hlist_head
*chain
,
2088 struct xfrm_policy
*prefer
,
2089 const struct flowi
*fl
,
2090 u8 type
, u16 family
, u32 if_id
)
2092 u32 priority
= prefer
? prefer
->priority
: ~0u;
2093 struct xfrm_policy
*pol
;
2098 hlist_for_each_entry_rcu(pol
, chain
, bydst
) {
2101 if (pol
->priority
> priority
)
2104 err
= xfrm_policy_match(pol
, fl
, type
, family
, if_id
);
2107 return ERR_PTR(err
);
2113 /* matches. Is it older than *prefer? */
2114 if (pol
->priority
== priority
&&
2115 prefer
->pos
< pol
->pos
)
2125 static struct xfrm_policy
*
2126 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates
*cand
,
2127 struct xfrm_policy
*prefer
,
2128 const struct flowi
*fl
,
2129 u8 type
, u16 family
, u32 if_id
)
2131 struct xfrm_policy
*tmp
;
2134 for (i
= 0; i
< ARRAY_SIZE(cand
->res
); i
++) {
2135 tmp
= __xfrm_policy_eval_candidates(cand
->res
[i
],
2137 fl
, type
, family
, if_id
);
2149 static struct xfrm_policy
*xfrm_policy_lookup_bytype(struct net
*net
, u8 type
,
2150 const struct flowi
*fl
,
2154 struct xfrm_pol_inexact_candidates cand
;
2155 const xfrm_address_t
*daddr
, *saddr
;
2156 struct xfrm_pol_inexact_bin
*bin
;
2157 struct xfrm_policy
*pol
, *ret
;
2158 struct hlist_head
*chain
;
2159 unsigned int sequence
;
2162 daddr
= xfrm_flowi_daddr(fl
, family
);
2163 saddr
= xfrm_flowi_saddr(fl
, family
);
2164 if (unlikely(!daddr
|| !saddr
))
2170 sequence
= read_seqcount_begin(&net
->xfrm
.xfrm_policy_hash_generation
);
2171 chain
= policy_hash_direct(net
, daddr
, saddr
, family
, dir
);
2172 } while (read_seqcount_retry(&net
->xfrm
.xfrm_policy_hash_generation
, sequence
));
2175 hlist_for_each_entry_rcu(pol
, chain
, bydst
) {
2176 err
= xfrm_policy_match(pol
, fl
, type
, family
, if_id
);
2189 if (ret
&& ret
->xdo
.type
== XFRM_DEV_OFFLOAD_PACKET
)
2192 bin
= xfrm_policy_inexact_lookup_rcu(net
, type
, family
, dir
, if_id
);
2193 if (!bin
|| !xfrm_policy_find_inexact_candidates(&cand
, bin
, saddr
,
2197 pol
= xfrm_policy_eval_candidates(&cand
, ret
, fl
, type
,
2206 if (read_seqcount_retry(&net
->xfrm
.xfrm_policy_hash_generation
, sequence
))
2209 if (ret
&& !xfrm_pol_hold_rcu(ret
))
2217 static struct xfrm_policy
*xfrm_policy_lookup(struct net
*net
,
2218 const struct flowi
*fl
,
2219 u16 family
, u8 dir
, u32 if_id
)
2221 #ifdef CONFIG_XFRM_SUB_POLICY
2222 struct xfrm_policy
*pol
;
2224 pol
= xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_SUB
, fl
, family
,
2229 return xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
, fl
, family
,
2233 static struct xfrm_policy
*xfrm_sk_policy_lookup(const struct sock
*sk
, int dir
,
2234 const struct flowi
*fl
,
2235 u16 family
, u32 if_id
)
2237 struct xfrm_policy
*pol
;
2241 pol
= rcu_dereference(sk
->sk_policy
[dir
]);
2246 if (pol
->family
!= family
) {
2251 match
= xfrm_selector_match(&pol
->selector
, fl
, family
);
2253 if ((READ_ONCE(sk
->sk_mark
) & pol
->mark
.m
) != pol
->mark
.v
||
2254 pol
->if_id
!= if_id
) {
2258 err
= security_xfrm_policy_lookup(pol
->security
,
2261 if (!xfrm_pol_hold_rcu(pol
))
2263 } else if (err
== -ESRCH
) {
2276 static void __xfrm_policy_link(struct xfrm_policy
*pol
, int dir
)
2278 struct net
*net
= xp_net(pol
);
2280 list_add(&pol
->walk
.all
, &net
->xfrm
.policy_all
);
2281 net
->xfrm
.policy_count
[dir
]++;
2285 static struct xfrm_policy
*__xfrm_policy_unlink(struct xfrm_policy
*pol
,
2288 struct net
*net
= xp_net(pol
);
2290 if (list_empty(&pol
->walk
.all
))
2293 /* Socket policies are not hashed. */
2294 if (!hlist_unhashed(&pol
->bydst
)) {
2295 hlist_del_rcu(&pol
->bydst
);
2296 hlist_del_init(&pol
->bydst_inexact_list
);
2297 hlist_del(&pol
->byidx
);
2300 list_del_init(&pol
->walk
.all
);
2301 net
->xfrm
.policy_count
[dir
]--;
2306 static void xfrm_sk_policy_link(struct xfrm_policy
*pol
, int dir
)
2308 __xfrm_policy_link(pol
, XFRM_POLICY_MAX
+ dir
);
2311 static void xfrm_sk_policy_unlink(struct xfrm_policy
*pol
, int dir
)
2313 __xfrm_policy_unlink(pol
, XFRM_POLICY_MAX
+ dir
);
2316 int xfrm_policy_delete(struct xfrm_policy
*pol
, int dir
)
2318 struct net
*net
= xp_net(pol
);
2320 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2321 pol
= __xfrm_policy_unlink(pol
, dir
);
2322 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2324 xfrm_dev_policy_delete(pol
);
2325 xfrm_policy_kill(pol
);
2330 EXPORT_SYMBOL(xfrm_policy_delete
);
2332 int xfrm_sk_policy_insert(struct sock
*sk
, int dir
, struct xfrm_policy
*pol
)
2334 struct net
*net
= sock_net(sk
);
2335 struct xfrm_policy
*old_pol
;
2337 #ifdef CONFIG_XFRM_SUB_POLICY
2338 if (pol
&& pol
->type
!= XFRM_POLICY_TYPE_MAIN
)
2342 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2343 old_pol
= rcu_dereference_protected(sk
->sk_policy
[dir
],
2344 lockdep_is_held(&net
->xfrm
.xfrm_policy_lock
));
2346 pol
->curlft
.add_time
= ktime_get_real_seconds();
2347 pol
->index
= xfrm_gen_index(net
, XFRM_POLICY_MAX
+dir
, 0);
2348 xfrm_sk_policy_link(pol
, dir
);
2350 rcu_assign_pointer(sk
->sk_policy
[dir
], pol
);
2353 xfrm_policy_requeue(old_pol
, pol
);
2355 /* Unlinking succeeds always. This is the only function
2356 * allowed to delete or replace socket policy.
2358 xfrm_sk_policy_unlink(old_pol
, dir
);
2360 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2363 xfrm_policy_kill(old_pol
);
2368 static struct xfrm_policy
*clone_policy(const struct xfrm_policy
*old
, int dir
)
2370 struct xfrm_policy
*newp
= xfrm_policy_alloc(xp_net(old
), GFP_ATOMIC
);
2371 struct net
*net
= xp_net(old
);
2374 newp
->selector
= old
->selector
;
2375 if (security_xfrm_policy_clone(old
->security
,
2378 return NULL
; /* ENOMEM */
2380 newp
->lft
= old
->lft
;
2381 newp
->curlft
= old
->curlft
;
2382 newp
->mark
= old
->mark
;
2383 newp
->if_id
= old
->if_id
;
2384 newp
->action
= old
->action
;
2385 newp
->flags
= old
->flags
;
2386 newp
->xfrm_nr
= old
->xfrm_nr
;
2387 newp
->index
= old
->index
;
2388 newp
->type
= old
->type
;
2389 newp
->family
= old
->family
;
2390 memcpy(newp
->xfrm_vec
, old
->xfrm_vec
,
2391 newp
->xfrm_nr
*sizeof(struct xfrm_tmpl
));
2392 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
2393 xfrm_sk_policy_link(newp
, dir
);
2394 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
2400 int __xfrm_sk_clone_policy(struct sock
*sk
, const struct sock
*osk
)
2402 const struct xfrm_policy
*p
;
2403 struct xfrm_policy
*np
;
2407 for (i
= 0; i
< 2; i
++) {
2408 p
= rcu_dereference(osk
->sk_policy
[i
]);
2410 np
= clone_policy(p
, i
);
2411 if (unlikely(!np
)) {
2415 rcu_assign_pointer(sk
->sk_policy
[i
], np
);
2423 xfrm_get_saddr(struct net
*net
, int oif
, xfrm_address_t
*local
,
2424 xfrm_address_t
*remote
, unsigned short family
, u32 mark
)
2427 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2429 if (unlikely(afinfo
== NULL
))
2431 err
= afinfo
->get_saddr(net
, oif
, local
, remote
, mark
);
2436 /* Resolve list of templates for the flow, given policy. */
2439 xfrm_tmpl_resolve_one(struct xfrm_policy
*policy
, const struct flowi
*fl
,
2440 struct xfrm_state
**xfrm
, unsigned short family
)
2442 struct net
*net
= xp_net(policy
);
2445 xfrm_address_t
*daddr
= xfrm_flowi_daddr(fl
, family
);
2446 xfrm_address_t
*saddr
= xfrm_flowi_saddr(fl
, family
);
2449 for (nx
= 0, i
= 0; i
< policy
->xfrm_nr
; i
++) {
2450 struct xfrm_state
*x
;
2451 xfrm_address_t
*remote
= daddr
;
2452 xfrm_address_t
*local
= saddr
;
2453 struct xfrm_tmpl
*tmpl
= &policy
->xfrm_vec
[i
];
2455 if (tmpl
->mode
== XFRM_MODE_TUNNEL
||
2456 tmpl
->mode
== XFRM_MODE_BEET
) {
2457 remote
= &tmpl
->id
.daddr
;
2458 local
= &tmpl
->saddr
;
2459 if (xfrm_addr_any(local
, tmpl
->encap_family
)) {
2460 error
= xfrm_get_saddr(net
, fl
->flowi_oif
,
2462 tmpl
->encap_family
, 0);
2469 x
= xfrm_state_find(remote
, local
, fl
, tmpl
, policy
, &error
,
2470 family
, policy
->if_id
);
2472 if (x
&& x
->km
.state
== XFRM_STATE_VALID
) {
2479 error
= (x
->km
.state
== XFRM_STATE_ERROR
?
2482 } else if (error
== -ESRCH
) {
2486 if (!tmpl
->optional
)
2492 for (nx
--; nx
>= 0; nx
--)
2493 xfrm_state_put(xfrm
[nx
]);
2498 xfrm_tmpl_resolve(struct xfrm_policy
**pols
, int npols
, const struct flowi
*fl
,
2499 struct xfrm_state
**xfrm
, unsigned short family
)
2501 struct xfrm_state
*tp
[XFRM_MAX_DEPTH
];
2502 struct xfrm_state
**tpp
= (npols
> 1) ? tp
: xfrm
;
2508 for (i
= 0; i
< npols
; i
++) {
2509 if (cnx
+ pols
[i
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
2514 ret
= xfrm_tmpl_resolve_one(pols
[i
], fl
, &tpp
[cnx
], family
);
2522 /* found states are sorted for outbound processing */
2524 xfrm_state_sort(xfrm
, tpp
, cnx
, family
);
2529 for (cnx
--; cnx
>= 0; cnx
--)
2530 xfrm_state_put(tpp
[cnx
]);
2535 static int xfrm_get_tos(const struct flowi
*fl
, int family
)
2537 if (family
== AF_INET
)
2538 return IPTOS_RT_MASK
& fl
->u
.ip4
.flowi4_tos
;
2543 static inline struct xfrm_dst
*xfrm_alloc_dst(struct net
*net
, int family
)
2545 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
2546 struct dst_ops
*dst_ops
;
2547 struct xfrm_dst
*xdst
;
2550 return ERR_PTR(-EINVAL
);
2554 dst_ops
= &net
->xfrm
.xfrm4_dst_ops
;
2556 #if IS_ENABLED(CONFIG_IPV6)
2558 dst_ops
= &net
->xfrm
.xfrm6_dst_ops
;
2564 xdst
= dst_alloc(dst_ops
, NULL
, 1, DST_OBSOLETE_NONE
, 0);
2567 memset_after(xdst
, 0, u
.dst
);
2569 xdst
= ERR_PTR(-ENOBUFS
);
2576 static void xfrm_init_path(struct xfrm_dst
*path
, struct dst_entry
*dst
,
2579 if (dst
->ops
->family
== AF_INET6
) {
2580 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
2581 path
->path_cookie
= rt6_get_cookie(rt
);
2582 path
->u
.rt6
.rt6i_nfheader_len
= nfheader_len
;
2586 static inline int xfrm_fill_dst(struct xfrm_dst
*xdst
, struct net_device
*dev
,
2587 const struct flowi
*fl
)
2589 const struct xfrm_policy_afinfo
*afinfo
=
2590 xfrm_policy_get_afinfo(xdst
->u
.dst
.ops
->family
);
2596 err
= afinfo
->fill_dst(xdst
, dev
, fl
);
2604 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2605 * all the metrics... Shortly, bundle a bundle.
2608 static struct dst_entry
*xfrm_bundle_create(struct xfrm_policy
*policy
,
2609 struct xfrm_state
**xfrm
,
2610 struct xfrm_dst
**bundle
,
2612 const struct flowi
*fl
,
2613 struct dst_entry
*dst
)
2615 const struct xfrm_state_afinfo
*afinfo
;
2616 const struct xfrm_mode
*inner_mode
;
2617 struct net
*net
= xp_net(policy
);
2618 unsigned long now
= jiffies
;
2619 struct net_device
*dev
;
2620 struct xfrm_dst
*xdst_prev
= NULL
;
2621 struct xfrm_dst
*xdst0
= NULL
;
2625 int nfheader_len
= 0;
2626 int trailer_len
= 0;
2628 int family
= policy
->selector
.family
;
2629 xfrm_address_t saddr
, daddr
;
2631 xfrm_flowi_addr_get(fl
, &saddr
, &daddr
, family
);
2633 tos
= xfrm_get_tos(fl
, family
);
2637 for (; i
< nx
; i
++) {
2638 struct xfrm_dst
*xdst
= xfrm_alloc_dst(net
, family
);
2639 struct dst_entry
*dst1
= &xdst
->u
.dst
;
2641 err
= PTR_ERR(xdst
);
2651 /* Ref count is taken during xfrm_alloc_dst()
2652 * No need to do dst_clone() on dst1
2654 xfrm_dst_set_child(xdst_prev
, &xdst
->u
.dst
);
2656 if (xfrm
[i
]->sel
.family
== AF_UNSPEC
) {
2657 inner_mode
= xfrm_ip2inner_mode(xfrm
[i
],
2658 xfrm_af2proto(family
));
2660 err
= -EAFNOSUPPORT
;
2665 inner_mode
= &xfrm
[i
]->inner_mode
;
2668 dst_copy_metrics(dst1
, dst
);
2670 if (xfrm
[i
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
2674 if (xfrm
[i
]->props
.smark
.v
|| xfrm
[i
]->props
.smark
.m
)
2675 mark
= xfrm_smark_get(fl
->flowi_mark
, xfrm
[i
]);
2677 family
= xfrm
[i
]->props
.family
;
2678 oif
= fl
->flowi_oif
? : fl
->flowi_l3mdev
;
2679 dst
= xfrm_dst_lookup(xfrm
[i
], tos
, oif
,
2680 &saddr
, &daddr
, family
, mark
);
2687 dst1
->xfrm
= xfrm
[i
];
2688 xdst
->xfrm_genid
= xfrm
[i
]->genid
;
2690 dst1
->obsolete
= DST_OBSOLETE_FORCE_CHK
;
2691 dst1
->lastuse
= now
;
2693 dst1
->input
= dst_discard
;
2696 afinfo
= xfrm_state_afinfo_get_rcu(inner_mode
->family
);
2698 dst1
->output
= afinfo
->output
;
2700 dst1
->output
= dst_discard_out
;
2705 header_len
+= xfrm
[i
]->props
.header_len
;
2706 if (xfrm
[i
]->type
->flags
& XFRM_TYPE_NON_FRAGMENT
)
2707 nfheader_len
+= xfrm
[i
]->props
.header_len
;
2708 trailer_len
+= xfrm
[i
]->props
.trailer_len
;
2711 xfrm_dst_set_child(xdst_prev
, dst
);
2719 xfrm_init_path(xdst0
, dst
, nfheader_len
);
2720 xfrm_init_pmtu(bundle
, nx
);
2722 for (xdst_prev
= xdst0
; xdst_prev
!= (struct xfrm_dst
*)dst
;
2723 xdst_prev
= (struct xfrm_dst
*) xfrm_dst_child(&xdst_prev
->u
.dst
)) {
2724 err
= xfrm_fill_dst(xdst_prev
, dev
, fl
);
2728 xdst_prev
->u
.dst
.header_len
= header_len
;
2729 xdst_prev
->u
.dst
.trailer_len
= trailer_len
;
2730 header_len
-= xdst_prev
->u
.dst
.xfrm
->props
.header_len
;
2731 trailer_len
-= xdst_prev
->u
.dst
.xfrm
->props
.trailer_len
;
2734 return &xdst0
->u
.dst
;
2738 xfrm_state_put(xfrm
[i
]);
2741 dst_release_immediate(&xdst0
->u
.dst
);
2743 return ERR_PTR(err
);
2746 static int xfrm_expand_policies(const struct flowi
*fl
, u16 family
,
2747 struct xfrm_policy
**pols
,
2748 int *num_pols
, int *num_xfrms
)
2752 if (*num_pols
== 0 || !pols
[0]) {
2757 if (IS_ERR(pols
[0])) {
2759 return PTR_ERR(pols
[0]);
2762 *num_xfrms
= pols
[0]->xfrm_nr
;
2764 #ifdef CONFIG_XFRM_SUB_POLICY
2765 if (pols
[0]->action
== XFRM_POLICY_ALLOW
&&
2766 pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
2767 pols
[1] = xfrm_policy_lookup_bytype(xp_net(pols
[0]),
2768 XFRM_POLICY_TYPE_MAIN
,
2773 if (IS_ERR(pols
[1])) {
2774 xfrm_pols_put(pols
, *num_pols
);
2776 return PTR_ERR(pols
[1]);
2779 (*num_xfrms
) += pols
[1]->xfrm_nr
;
2783 for (i
= 0; i
< *num_pols
; i
++) {
2784 if (pols
[i
]->action
!= XFRM_POLICY_ALLOW
) {
2794 static struct xfrm_dst
*
2795 xfrm_resolve_and_create_bundle(struct xfrm_policy
**pols
, int num_pols
,
2796 const struct flowi
*fl
, u16 family
,
2797 struct dst_entry
*dst_orig
)
2799 struct net
*net
= xp_net(pols
[0]);
2800 struct xfrm_state
*xfrm
[XFRM_MAX_DEPTH
];
2801 struct xfrm_dst
*bundle
[XFRM_MAX_DEPTH
];
2802 struct xfrm_dst
*xdst
;
2803 struct dst_entry
*dst
;
2806 /* Try to instantiate a bundle */
2807 err
= xfrm_tmpl_resolve(pols
, num_pols
, fl
, xfrm
, family
);
2813 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
2814 return ERR_PTR(err
);
2817 dst
= xfrm_bundle_create(pols
[0], xfrm
, bundle
, err
, fl
, dst_orig
);
2819 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTBUNDLEGENERROR
);
2820 return ERR_CAST(dst
);
2823 xdst
= (struct xfrm_dst
*)dst
;
2824 xdst
->num_xfrms
= err
;
2825 xdst
->num_pols
= num_pols
;
2826 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
2827 xdst
->policy_genid
= atomic_read(&pols
[0]->genid
);
2832 static void xfrm_policy_queue_process(struct timer_list
*t
)
2834 struct sk_buff
*skb
;
2836 struct dst_entry
*dst
;
2837 struct xfrm_policy
*pol
= from_timer(pol
, t
, polq
.hold_timer
);
2838 struct net
*net
= xp_net(pol
);
2839 struct xfrm_policy_queue
*pq
= &pol
->polq
;
2841 struct sk_buff_head list
;
2844 spin_lock(&pq
->hold_queue
.lock
);
2845 skb
= skb_peek(&pq
->hold_queue
);
2847 spin_unlock(&pq
->hold_queue
.lock
);
2853 /* Fixup the mark to support VTI. */
2854 skb_mark
= skb
->mark
;
2855 skb
->mark
= pol
->mark
.v
;
2856 xfrm_decode_session(skb
, &fl
, dst
->ops
->family
);
2857 skb
->mark
= skb_mark
;
2858 spin_unlock(&pq
->hold_queue
.lock
);
2860 dst_hold(xfrm_dst_path(dst
));
2861 dst
= xfrm_lookup(net
, xfrm_dst_path(dst
), &fl
, sk
, XFRM_LOOKUP_QUEUE
);
2865 if (dst
->flags
& DST_XFRM_QUEUE
) {
2868 if (pq
->timeout
>= XFRM_QUEUE_TMO_MAX
)
2871 pq
->timeout
= pq
->timeout
<< 1;
2872 if (!mod_timer(&pq
->hold_timer
, jiffies
+ pq
->timeout
))
2879 __skb_queue_head_init(&list
);
2881 spin_lock(&pq
->hold_queue
.lock
);
2883 skb_queue_splice_init(&pq
->hold_queue
, &list
);
2884 spin_unlock(&pq
->hold_queue
.lock
);
2886 while (!skb_queue_empty(&list
)) {
2887 skb
= __skb_dequeue(&list
);
2889 /* Fixup the mark to support VTI. */
2890 skb_mark
= skb
->mark
;
2891 skb
->mark
= pol
->mark
.v
;
2892 xfrm_decode_session(skb
, &fl
, skb_dst(skb
)->ops
->family
);
2893 skb
->mark
= skb_mark
;
2895 dst_hold(xfrm_dst_path(skb_dst(skb
)));
2896 dst
= xfrm_lookup(net
, xfrm_dst_path(skb_dst(skb
)), &fl
, skb
->sk
, 0);
2904 skb_dst_set(skb
, dst
);
2906 dst_output(net
, skb
->sk
, skb
);
2915 skb_queue_purge(&pq
->hold_queue
);
2919 static int xdst_queue_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
2921 unsigned long sched_next
;
2922 struct dst_entry
*dst
= skb_dst(skb
);
2923 struct xfrm_dst
*xdst
= (struct xfrm_dst
*) dst
;
2924 struct xfrm_policy
*pol
= xdst
->pols
[0];
2925 struct xfrm_policy_queue
*pq
= &pol
->polq
;
2927 if (unlikely(skb_fclone_busy(sk
, skb
))) {
2932 if (pq
->hold_queue
.qlen
> XFRM_MAX_QUEUE_LEN
) {
2939 spin_lock_bh(&pq
->hold_queue
.lock
);
2942 pq
->timeout
= XFRM_QUEUE_TMO_MIN
;
2944 sched_next
= jiffies
+ pq
->timeout
;
2946 if (del_timer(&pq
->hold_timer
)) {
2947 if (time_before(pq
->hold_timer
.expires
, sched_next
))
2948 sched_next
= pq
->hold_timer
.expires
;
2952 __skb_queue_tail(&pq
->hold_queue
, skb
);
2953 if (!mod_timer(&pq
->hold_timer
, sched_next
))
2956 spin_unlock_bh(&pq
->hold_queue
.lock
);
2961 static struct xfrm_dst
*xfrm_create_dummy_bundle(struct net
*net
,
2962 struct xfrm_flo
*xflo
,
2963 const struct flowi
*fl
,
2968 struct net_device
*dev
;
2969 struct dst_entry
*dst
;
2970 struct dst_entry
*dst1
;
2971 struct xfrm_dst
*xdst
;
2973 xdst
= xfrm_alloc_dst(net
, family
);
2977 if (!(xflo
->flags
& XFRM_LOOKUP_QUEUE
) ||
2978 net
->xfrm
.sysctl_larval_drop
||
2982 dst
= xflo
->dst_orig
;
2983 dst1
= &xdst
->u
.dst
;
2987 dst_copy_metrics(dst1
, dst
);
2989 dst1
->obsolete
= DST_OBSOLETE_FORCE_CHK
;
2990 dst1
->flags
|= DST_XFRM_QUEUE
;
2991 dst1
->lastuse
= jiffies
;
2993 dst1
->input
= dst_discard
;
2994 dst1
->output
= xdst_queue_output
;
2997 xfrm_dst_set_child(xdst
, dst
);
3000 xfrm_init_path((struct xfrm_dst
*)dst1
, dst
, 0);
3007 err
= xfrm_fill_dst(xdst
, dev
, fl
);
3016 xdst
= ERR_PTR(err
);
3020 static struct xfrm_dst
*xfrm_bundle_lookup(struct net
*net
,
3021 const struct flowi
*fl
,
3023 struct xfrm_flo
*xflo
, u32 if_id
)
3025 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
3026 int num_pols
= 0, num_xfrms
= 0, err
;
3027 struct xfrm_dst
*xdst
;
3029 /* Resolve policies to use if we couldn't get them from
3030 * previous cache entry */
3032 pols
[0] = xfrm_policy_lookup(net
, fl
, family
, dir
, if_id
);
3033 err
= xfrm_expand_policies(fl
, family
, pols
,
3034 &num_pols
, &num_xfrms
);
3040 goto make_dummy_bundle
;
3042 xdst
= xfrm_resolve_and_create_bundle(pols
, num_pols
, fl
, family
,
3045 err
= PTR_ERR(xdst
);
3046 if (err
== -EREMOTE
) {
3047 xfrm_pols_put(pols
, num_pols
);
3053 goto make_dummy_bundle
;
3054 } else if (xdst
== NULL
) {
3056 goto make_dummy_bundle
;
3062 /* We found policies, but there's no bundles to instantiate:
3063 * either because the policy blocks, has no transformations or
3064 * we could not build template (no xfrm_states).*/
3065 xdst
= xfrm_create_dummy_bundle(net
, xflo
, fl
, num_xfrms
, family
);
3067 xfrm_pols_put(pols
, num_pols
);
3068 return ERR_CAST(xdst
);
3070 xdst
->num_pols
= num_pols
;
3071 xdst
->num_xfrms
= num_xfrms
;
3072 memcpy(xdst
->pols
, pols
, sizeof(struct xfrm_policy
*) * num_pols
);
3077 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLERROR
);
3079 xfrm_pols_put(pols
, num_pols
);
3080 return ERR_PTR(err
);
3083 static struct dst_entry
*make_blackhole(struct net
*net
, u16 family
,
3084 struct dst_entry
*dst_orig
)
3086 const struct xfrm_policy_afinfo
*afinfo
= xfrm_policy_get_afinfo(family
);
3087 struct dst_entry
*ret
;
3090 dst_release(dst_orig
);
3091 return ERR_PTR(-EINVAL
);
3093 ret
= afinfo
->blackhole_route(net
, dst_orig
);
3100 /* Finds/creates a bundle for given flow and if_id
3102 * At the moment we eat a raw IP route. Mostly to speed up lookups
3103 * on interfaces with disabled IPsec.
3105 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3108 struct dst_entry
*xfrm_lookup_with_ifid(struct net
*net
,
3109 struct dst_entry
*dst_orig
,
3110 const struct flowi
*fl
,
3111 const struct sock
*sk
,
3112 int flags
, u32 if_id
)
3114 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
3115 struct xfrm_dst
*xdst
;
3116 struct dst_entry
*dst
, *route
;
3117 u16 family
= dst_orig
->ops
->family
;
3118 u8 dir
= XFRM_POLICY_OUT
;
3119 int i
, err
, num_pols
, num_xfrms
= 0, drop_pols
= 0;
3125 sk
= sk_const_to_full_sk(sk
);
3126 if (sk
&& sk
->sk_policy
[XFRM_POLICY_OUT
]) {
3128 pols
[0] = xfrm_sk_policy_lookup(sk
, XFRM_POLICY_OUT
, fl
, family
,
3130 err
= xfrm_expand_policies(fl
, family
, pols
,
3131 &num_pols
, &num_xfrms
);
3136 if (num_xfrms
<= 0) {
3137 drop_pols
= num_pols
;
3141 xdst
= xfrm_resolve_and_create_bundle(
3146 xfrm_pols_put(pols
, num_pols
);
3147 err
= PTR_ERR(xdst
);
3148 if (err
== -EREMOTE
)
3152 } else if (xdst
== NULL
) {
3154 drop_pols
= num_pols
;
3158 route
= xdst
->route
;
3163 struct xfrm_flo xflo
;
3165 xflo
.dst_orig
= dst_orig
;
3168 /* To accelerate a bit... */
3169 if (!if_id
&& ((dst_orig
->flags
& DST_NOXFRM
) ||
3170 !net
->xfrm
.policy_count
[XFRM_POLICY_OUT
]))
3173 xdst
= xfrm_bundle_lookup(net
, fl
, family
, dir
, &xflo
, if_id
);
3177 err
= PTR_ERR(xdst
);
3181 num_pols
= xdst
->num_pols
;
3182 num_xfrms
= xdst
->num_xfrms
;
3183 memcpy(pols
, xdst
->pols
, sizeof(struct xfrm_policy
*) * num_pols
);
3184 route
= xdst
->route
;
3188 if (route
== NULL
&& num_xfrms
> 0) {
3189 /* The only case when xfrm_bundle_lookup() returns a
3190 * bundle with null route, is when the template could
3191 * not be resolved. It means policies are there, but
3192 * bundle could not be created, since we don't yet
3193 * have the xfrm_state's. We need to wait for KM to
3194 * negotiate new SA's or bail out with error.*/
3195 if (net
->xfrm
.sysctl_larval_drop
) {
3196 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
3203 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTNOSTATES
);
3211 if ((flags
& XFRM_LOOKUP_ICMP
) &&
3212 !(pols
[0]->flags
& XFRM_POLICY_ICMP
)) {
3217 for (i
= 0; i
< num_pols
; i
++)
3218 pols
[i
]->curlft
.use_time
= ktime_get_real_seconds();
3220 if (num_xfrms
< 0) {
3221 /* Prohibit the flow */
3222 XFRM_INC_STATS(net
, LINUX_MIB_XFRMOUTPOLBLOCK
);
3225 } else if (num_xfrms
> 0) {
3226 /* Flow transformed */
3227 dst_release(dst_orig
);
3229 /* Flow passes untransformed */
3234 xfrm_pols_put(pols
, drop_pols
);
3235 if (dst
&& dst
->xfrm
&&
3236 dst
->xfrm
->props
.mode
== XFRM_MODE_TUNNEL
)
3237 dst
->flags
|= DST_XFRM_TUNNEL
;
3241 if ((!dst_orig
->dev
|| !(dst_orig
->dev
->flags
& IFF_LOOPBACK
)) &&
3242 net
->xfrm
.policy_default
[dir
] == XFRM_USERPOLICY_BLOCK
) {
3246 if (!(flags
& XFRM_LOOKUP_ICMP
)) {
3254 if (!(flags
& XFRM_LOOKUP_KEEP_DST_REF
))
3255 dst_release(dst_orig
);
3256 xfrm_pols_put(pols
, drop_pols
);
3257 return ERR_PTR(err
);
3259 EXPORT_SYMBOL(xfrm_lookup_with_ifid
);
3261 /* Main function: finds/creates a bundle for given flow.
3263 * At the moment we eat a raw IP route. Mostly to speed up lookups
3264 * on interfaces with disabled IPsec.
3266 struct dst_entry
*xfrm_lookup(struct net
*net
, struct dst_entry
*dst_orig
,
3267 const struct flowi
*fl
, const struct sock
*sk
,
3270 return xfrm_lookup_with_ifid(net
, dst_orig
, fl
, sk
, flags
, 0);
3272 EXPORT_SYMBOL(xfrm_lookup
);
3274 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3275 * Otherwise we may send out blackholed packets.
3277 struct dst_entry
*xfrm_lookup_route(struct net
*net
, struct dst_entry
*dst_orig
,
3278 const struct flowi
*fl
,
3279 const struct sock
*sk
, int flags
)
3281 struct dst_entry
*dst
= xfrm_lookup(net
, dst_orig
, fl
, sk
,
3282 flags
| XFRM_LOOKUP_QUEUE
|
3283 XFRM_LOOKUP_KEEP_DST_REF
);
3285 if (PTR_ERR(dst
) == -EREMOTE
)
3286 return make_blackhole(net
, dst_orig
->ops
->family
, dst_orig
);
3289 dst_release(dst_orig
);
3293 EXPORT_SYMBOL(xfrm_lookup_route
);
3296 xfrm_secpath_reject(int idx
, struct sk_buff
*skb
, const struct flowi
*fl
)
3298 struct sec_path
*sp
= skb_sec_path(skb
);
3299 struct xfrm_state
*x
;
3301 if (!sp
|| idx
< 0 || idx
>= sp
->len
)
3304 if (!x
->type
->reject
)
3306 return x
->type
->reject(x
, skb
, fl
);
3309 /* When skb is transformed back to its "native" form, we have to
3310 * check policy restrictions. At the moment we make this in maximally
3311 * stupid way. Shame on me. :-) Of course, connected sockets must
3312 * have policy cached at them.
3316 xfrm_state_ok(const struct xfrm_tmpl
*tmpl
, const struct xfrm_state
*x
,
3317 unsigned short family
, u32 if_id
)
3319 if (xfrm_state_kern(x
))
3320 return tmpl
->optional
&& !xfrm_state_addr_cmp(tmpl
, x
, tmpl
->encap_family
);
3321 return x
->id
.proto
== tmpl
->id
.proto
&&
3322 (x
->id
.spi
== tmpl
->id
.spi
|| !tmpl
->id
.spi
) &&
3323 (x
->props
.reqid
== tmpl
->reqid
|| !tmpl
->reqid
) &&
3324 x
->props
.mode
== tmpl
->mode
&&
3325 (tmpl
->allalgs
|| (tmpl
->aalgos
& (1<<x
->props
.aalgo
)) ||
3326 !(xfrm_id_proto_match(tmpl
->id
.proto
, IPSEC_PROTO_ANY
))) &&
3327 !(x
->props
.mode
!= XFRM_MODE_TRANSPORT
&&
3328 xfrm_state_addr_cmp(tmpl
, x
, family
)) &&
3329 (if_id
== 0 || if_id
== x
->if_id
);
3333 * 0 or more than 0 is returned when validation is succeeded (either bypass
3334 * because of optional transport mode, or next index of the matched secpath
3335 * state with the template.
3336 * -1 is returned when no matching template is found.
3337 * Otherwise "-2 - errored_index" is returned.
3340 xfrm_policy_ok(const struct xfrm_tmpl
*tmpl
, const struct sec_path
*sp
, int start
,
3341 unsigned short family
, u32 if_id
)
3345 if (tmpl
->optional
) {
3346 if (tmpl
->mode
== XFRM_MODE_TRANSPORT
)
3350 for (; idx
< sp
->len
; idx
++) {
3351 if (xfrm_state_ok(tmpl
, sp
->xvec
[idx
], family
, if_id
))
3353 if (sp
->xvec
[idx
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
3354 if (idx
< sp
->verified_cnt
) {
3355 /* Secpath entry previously verified, consider optional and
3356 * continue searching
3370 decode_session4(struct sk_buff
*skb
, struct flowi
*fl
, bool reverse
)
3372 const struct iphdr
*iph
= ip_hdr(skb
);
3374 u8
*xprth
= skb_network_header(skb
) + ihl
* 4;
3375 struct flowi4
*fl4
= &fl
->u
.ip4
;
3378 if (skb_dst(skb
) && skb_dst(skb
)->dev
)
3379 oif
= skb_dst(skb
)->dev
->ifindex
;
3381 memset(fl4
, 0, sizeof(struct flowi4
));
3382 fl4
->flowi4_mark
= skb
->mark
;
3383 fl4
->flowi4_oif
= reverse
? skb
->skb_iif
: oif
;
3385 fl4
->flowi4_proto
= iph
->protocol
;
3386 fl4
->daddr
= reverse
? iph
->saddr
: iph
->daddr
;
3387 fl4
->saddr
= reverse
? iph
->daddr
: iph
->saddr
;
3388 fl4
->flowi4_tos
= iph
->tos
& ~INET_ECN_MASK
;
3390 if (!ip_is_fragment(iph
)) {
3391 switch (iph
->protocol
) {
3393 case IPPROTO_UDPLITE
:
3397 if (xprth
+ 4 < skb
->data
||
3398 pskb_may_pull(skb
, xprth
+ 4 - skb
->data
)) {
3401 xprth
= skb_network_header(skb
) + ihl
* 4;
3402 ports
= (__be16
*)xprth
;
3404 fl4
->fl4_sport
= ports
[!!reverse
];
3405 fl4
->fl4_dport
= ports
[!reverse
];
3409 if (xprth
+ 2 < skb
->data
||
3410 pskb_may_pull(skb
, xprth
+ 2 - skb
->data
)) {
3413 xprth
= skb_network_header(skb
) + ihl
* 4;
3416 fl4
->fl4_icmp_type
= icmp
[0];
3417 fl4
->fl4_icmp_code
= icmp
[1];
3421 if (xprth
+ 12 < skb
->data
||
3422 pskb_may_pull(skb
, xprth
+ 12 - skb
->data
)) {
3426 xprth
= skb_network_header(skb
) + ihl
* 4;
3427 greflags
= (__be16
*)xprth
;
3428 gre_hdr
= (__be32
*)xprth
;
3430 if (greflags
[0] & GRE_KEY
) {
3431 if (greflags
[0] & GRE_CSUM
)
3433 fl4
->fl4_gre_key
= gre_hdr
[1];
3443 #if IS_ENABLED(CONFIG_IPV6)
3445 decode_session6(struct sk_buff
*skb
, struct flowi
*fl
, bool reverse
)
3447 struct flowi6
*fl6
= &fl
->u
.ip6
;
3449 const struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
3450 u32 offset
= sizeof(*hdr
);
3451 struct ipv6_opt_hdr
*exthdr
;
3452 const unsigned char *nh
= skb_network_header(skb
);
3453 u16 nhoff
= IP6CB(skb
)->nhoff
;
3458 nhoff
= offsetof(struct ipv6hdr
, nexthdr
);
3460 nexthdr
= nh
[nhoff
];
3462 if (skb_dst(skb
) && skb_dst(skb
)->dev
)
3463 oif
= skb_dst(skb
)->dev
->ifindex
;
3465 memset(fl6
, 0, sizeof(struct flowi6
));
3466 fl6
->flowi6_mark
= skb
->mark
;
3467 fl6
->flowi6_oif
= reverse
? skb
->skb_iif
: oif
;
3469 fl6
->daddr
= reverse
? hdr
->saddr
: hdr
->daddr
;
3470 fl6
->saddr
= reverse
? hdr
->daddr
: hdr
->saddr
;
3472 while (nh
+ offset
+ sizeof(*exthdr
) < skb
->data
||
3473 pskb_may_pull(skb
, nh
+ offset
+ sizeof(*exthdr
) - skb
->data
)) {
3474 nh
= skb_network_header(skb
);
3475 exthdr
= (struct ipv6_opt_hdr
*)(nh
+ offset
);
3478 case NEXTHDR_FRAGMENT
:
3481 case NEXTHDR_ROUTING
:
3484 offset
+= ipv6_optlen(exthdr
);
3485 nexthdr
= exthdr
->nexthdr
;
3488 case IPPROTO_UDPLITE
:
3492 if (!onlyproto
&& (nh
+ offset
+ 4 < skb
->data
||
3493 pskb_may_pull(skb
, nh
+ offset
+ 4 - skb
->data
))) {
3496 nh
= skb_network_header(skb
);
3497 ports
= (__be16
*)(nh
+ offset
);
3498 fl6
->fl6_sport
= ports
[!!reverse
];
3499 fl6
->fl6_dport
= ports
[!reverse
];
3501 fl6
->flowi6_proto
= nexthdr
;
3503 case IPPROTO_ICMPV6
:
3504 if (!onlyproto
&& (nh
+ offset
+ 2 < skb
->data
||
3505 pskb_may_pull(skb
, nh
+ offset
+ 2 - skb
->data
))) {
3508 nh
= skb_network_header(skb
);
3509 icmp
= (u8
*)(nh
+ offset
);
3510 fl6
->fl6_icmp_type
= icmp
[0];
3511 fl6
->fl6_icmp_code
= icmp
[1];
3513 fl6
->flowi6_proto
= nexthdr
;
3517 (nh
+ offset
+ 12 < skb
->data
||
3518 pskb_may_pull(skb
, nh
+ offset
+ 12 - skb
->data
))) {
3519 struct gre_base_hdr
*gre_hdr
;
3522 nh
= skb_network_header(skb
);
3523 gre_hdr
= (struct gre_base_hdr
*)(nh
+ offset
);
3524 gre_key
= (__be32
*)(gre_hdr
+ 1);
3526 if (gre_hdr
->flags
& GRE_KEY
) {
3527 if (gre_hdr
->flags
& GRE_CSUM
)
3529 fl6
->fl6_gre_key
= *gre_key
;
3532 fl6
->flowi6_proto
= nexthdr
;
3535 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3537 offset
+= ipv6_optlen(exthdr
);
3538 if (!onlyproto
&& (nh
+ offset
+ 3 < skb
->data
||
3539 pskb_may_pull(skb
, nh
+ offset
+ 3 - skb
->data
))) {
3542 nh
= skb_network_header(skb
);
3543 mh
= (struct ip6_mh
*)(nh
+ offset
);
3544 fl6
->fl6_mh_type
= mh
->ip6mh_type
;
3546 fl6
->flowi6_proto
= nexthdr
;
3550 fl6
->flowi6_proto
= nexthdr
;
3557 int __xfrm_decode_session(struct sk_buff
*skb
, struct flowi
*fl
,
3558 unsigned int family
, int reverse
)
3562 decode_session4(skb
, fl
, reverse
);
3564 #if IS_ENABLED(CONFIG_IPV6)
3566 decode_session6(skb
, fl
, reverse
);
3570 return -EAFNOSUPPORT
;
3573 return security_xfrm_decode_session(skb
, &fl
->flowi_secid
);
3575 EXPORT_SYMBOL(__xfrm_decode_session
);
3577 static inline int secpath_has_nontransport(const struct sec_path
*sp
, int k
, int *idxp
)
3579 for (; k
< sp
->len
; k
++) {
3580 if (sp
->xvec
[k
]->props
.mode
!= XFRM_MODE_TRANSPORT
) {
3589 int __xfrm_policy_check(struct sock
*sk
, int dir
, struct sk_buff
*skb
,
3590 unsigned short family
)
3592 struct net
*net
= dev_net(skb
->dev
);
3593 struct xfrm_policy
*pol
;
3594 struct xfrm_policy
*pols
[XFRM_POLICY_TYPE_MAX
];
3601 const struct xfrm_if_cb
*ifcb
;
3602 struct sec_path
*sp
;
3606 ifcb
= xfrm_if_get_cb();
3609 struct xfrm_if_decode_session_result r
;
3611 if (ifcb
->decode_session(skb
, family
, &r
)) {
3618 reverse
= dir
& ~XFRM_POLICY_MASK
;
3619 dir
&= XFRM_POLICY_MASK
;
3621 if (__xfrm_decode_session(skb
, &fl
, family
, reverse
) < 0) {
3622 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINHDRERROR
);
3626 nf_nat_decode_session(skb
, &fl
, family
);
3628 /* First, check used SA against their selectors. */
3629 sp
= skb_sec_path(skb
);
3633 for (i
= sp
->len
- 1; i
>= 0; i
--) {
3634 struct xfrm_state
*x
= sp
->xvec
[i
];
3635 if (!xfrm_selector_match(&x
->sel
, &fl
, family
)) {
3636 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINSTATEMISMATCH
);
3643 sk
= sk_to_full_sk(sk
);
3644 if (sk
&& sk
->sk_policy
[dir
]) {
3645 pol
= xfrm_sk_policy_lookup(sk
, dir
, &fl
, family
, if_id
);
3647 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3653 pol
= xfrm_policy_lookup(net
, &fl
, family
, dir
, if_id
);
3656 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3661 if (net
->xfrm
.policy_default
[dir
] == XFRM_USERPOLICY_BLOCK
) {
3662 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINNOPOLS
);
3666 if (sp
&& secpath_has_nontransport(sp
, 0, &xerr_idx
)) {
3667 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
3668 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINNOPOLS
);
3674 /* This lockless write can happen from different cpus. */
3675 WRITE_ONCE(pol
->curlft
.use_time
, ktime_get_real_seconds());
3679 #ifdef CONFIG_XFRM_SUB_POLICY
3680 if (pols
[0]->type
!= XFRM_POLICY_TYPE_MAIN
) {
3681 pols
[1] = xfrm_policy_lookup_bytype(net
, XFRM_POLICY_TYPE_MAIN
,
3683 XFRM_POLICY_IN
, if_id
);
3685 if (IS_ERR(pols
[1])) {
3686 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLERROR
);
3687 xfrm_pol_put(pols
[0]);
3690 /* This write can happen from different cpus. */
3691 WRITE_ONCE(pols
[1]->curlft
.use_time
,
3692 ktime_get_real_seconds());
3698 if (pol
->action
== XFRM_POLICY_ALLOW
) {
3699 static struct sec_path dummy
;
3700 struct xfrm_tmpl
*tp
[XFRM_MAX_DEPTH
];
3701 struct xfrm_tmpl
*stp
[XFRM_MAX_DEPTH
];
3702 struct xfrm_tmpl
**tpp
= tp
;
3706 sp
= skb_sec_path(skb
);
3710 for (pi
= 0; pi
< npols
; pi
++) {
3711 if (pols
[pi
] != pol
&&
3712 pols
[pi
]->action
!= XFRM_POLICY_ALLOW
) {
3713 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
3716 if (ti
+ pols
[pi
]->xfrm_nr
>= XFRM_MAX_DEPTH
) {
3717 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINBUFFERERROR
);
3720 for (i
= 0; i
< pols
[pi
]->xfrm_nr
; i
++)
3721 tpp
[ti
++] = &pols
[pi
]->xfrm_vec
[i
];
3726 xfrm_tmpl_sort(stp
, tpp
, xfrm_nr
, family
);
3730 /* For each tunnel xfrm, find the first matching tmpl.
3731 * For each tmpl before that, find corresponding xfrm.
3732 * Order is _important_. Later we will implement
3733 * some barriers, but at the moment barriers
3734 * are implied between each two transformations.
3735 * Upon success, marks secpath entries as having been
3736 * verified to allow them to be skipped in future policy
3737 * checks (e.g. nested tunnels).
3739 for (i
= xfrm_nr
-1, k
= 0; i
>= 0; i
--) {
3740 k
= xfrm_policy_ok(tpp
[i
], sp
, k
, family
, if_id
);
3743 /* "-2 - errored_index" returned */
3745 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
3750 if (secpath_has_nontransport(sp
, k
, &xerr_idx
)) {
3751 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINTMPLMISMATCH
);
3755 xfrm_pols_put(pols
, npols
);
3756 sp
->verified_cnt
= k
;
3760 XFRM_INC_STATS(net
, LINUX_MIB_XFRMINPOLBLOCK
);
3763 xfrm_secpath_reject(xerr_idx
, skb
, &fl
);
3765 xfrm_pols_put(pols
, npols
);
3768 EXPORT_SYMBOL(__xfrm_policy_check
);
3770 int __xfrm_route_forward(struct sk_buff
*skb
, unsigned short family
)
3772 struct net
*net
= dev_net(skb
->dev
);
3774 struct dst_entry
*dst
;
3777 if (xfrm_decode_session(skb
, &fl
, family
) < 0) {
3778 XFRM_INC_STATS(net
, LINUX_MIB_XFRMFWDHDRERROR
);
3783 if (!skb_dst(skb
)) {
3784 XFRM_INC_STATS(net
, LINUX_MIB_XFRMFWDHDRERROR
);
3788 dst
= xfrm_lookup(net
, skb_dst(skb
), &fl
, NULL
, XFRM_LOOKUP_QUEUE
);
3793 skb_dst_set(skb
, dst
);
3796 EXPORT_SYMBOL(__xfrm_route_forward
);
3798 /* Optimize later using cookies and generation ids. */
3800 static struct dst_entry
*xfrm_dst_check(struct dst_entry
*dst
, u32 cookie
)
3802 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3803 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3804 * get validated by dst_ops->check on every use. We do this
3805 * because when a normal route referenced by an XFRM dst is
3806 * obsoleted we do not go looking around for all parent
3807 * referencing XFRM dsts so that we can invalidate them. It
3808 * is just too much work. Instead we make the checks here on
3809 * every use. For example:
3811 * XFRM dst A --> IPv4 dst X
3813 * X is the "xdst->route" of A (X is also the "dst->path" of A
3814 * in this example). If X is marked obsolete, "A" will not
3815 * notice. That's what we are validating here via the
3816 * stale_bundle() check.
3818 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3820 * This will force stale_bundle() to fail on any xdst bundle with
3821 * this dst linked in it.
3823 if (dst
->obsolete
< 0 && !stale_bundle(dst
))
3829 static int stale_bundle(struct dst_entry
*dst
)
3831 return !xfrm_bundle_ok((struct xfrm_dst
*)dst
);
3834 void xfrm_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
)
3836 while ((dst
= xfrm_dst_child(dst
)) && dst
->xfrm
&& dst
->dev
== dev
) {
3837 dst
->dev
= blackhole_netdev
;
3842 EXPORT_SYMBOL(xfrm_dst_ifdown
);
3844 static void xfrm_link_failure(struct sk_buff
*skb
)
3846 /* Impossible. Such dst must be popped before reaches point of failure. */
3849 static struct dst_entry
*xfrm_negative_advice(struct dst_entry
*dst
)
3852 if (dst
->obsolete
) {
3860 static void xfrm_init_pmtu(struct xfrm_dst
**bundle
, int nr
)
3863 struct xfrm_dst
*xdst
= bundle
[nr
];
3864 u32 pmtu
, route_mtu_cached
;
3865 struct dst_entry
*dst
;
3868 pmtu
= dst_mtu(xfrm_dst_child(dst
));
3869 xdst
->child_mtu_cached
= pmtu
;
3871 pmtu
= xfrm_state_mtu(dst
->xfrm
, pmtu
);
3873 route_mtu_cached
= dst_mtu(xdst
->route
);
3874 xdst
->route_mtu_cached
= route_mtu_cached
;
3876 if (pmtu
> route_mtu_cached
)
3877 pmtu
= route_mtu_cached
;
3879 dst_metric_set(dst
, RTAX_MTU
, pmtu
);
3883 /* Check that the bundle accepts the flow and its components are
3887 static int xfrm_bundle_ok(struct xfrm_dst
*first
)
3889 struct xfrm_dst
*bundle
[XFRM_MAX_DEPTH
];
3890 struct dst_entry
*dst
= &first
->u
.dst
;
3891 struct xfrm_dst
*xdst
;
3895 if (!dst_check(xfrm_dst_path(dst
), ((struct xfrm_dst
*)dst
)->path_cookie
) ||
3896 (dst
->dev
&& !netif_running(dst
->dev
)))
3899 if (dst
->flags
& DST_XFRM_QUEUE
)
3902 start_from
= nr
= 0;
3904 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
3906 if (dst
->xfrm
->km
.state
!= XFRM_STATE_VALID
)
3908 if (xdst
->xfrm_genid
!= dst
->xfrm
->genid
)
3910 if (xdst
->num_pols
> 0 &&
3911 xdst
->policy_genid
!= atomic_read(&xdst
->pols
[0]->genid
))
3914 bundle
[nr
++] = xdst
;
3916 mtu
= dst_mtu(xfrm_dst_child(dst
));
3917 if (xdst
->child_mtu_cached
!= mtu
) {
3919 xdst
->child_mtu_cached
= mtu
;
3922 if (!dst_check(xdst
->route
, xdst
->route_cookie
))
3924 mtu
= dst_mtu(xdst
->route
);
3925 if (xdst
->route_mtu_cached
!= mtu
) {
3927 xdst
->route_mtu_cached
= mtu
;
3930 dst
= xfrm_dst_child(dst
);
3931 } while (dst
->xfrm
);
3933 if (likely(!start_from
))
3936 xdst
= bundle
[start_from
- 1];
3937 mtu
= xdst
->child_mtu_cached
;
3938 while (start_from
--) {
3941 mtu
= xfrm_state_mtu(dst
->xfrm
, mtu
);
3942 if (mtu
> xdst
->route_mtu_cached
)
3943 mtu
= xdst
->route_mtu_cached
;
3944 dst_metric_set(dst
, RTAX_MTU
, mtu
);
3948 xdst
= bundle
[start_from
- 1];
3949 xdst
->child_mtu_cached
= mtu
;
3955 static unsigned int xfrm_default_advmss(const struct dst_entry
*dst
)
3957 return dst_metric_advmss(xfrm_dst_path(dst
));
3960 static unsigned int xfrm_mtu(const struct dst_entry
*dst
)
3962 unsigned int mtu
= dst_metric_raw(dst
, RTAX_MTU
);
3964 return mtu
? : dst_mtu(xfrm_dst_path(dst
));
3967 static const void *xfrm_get_dst_nexthop(const struct dst_entry
*dst
,
3971 const struct xfrm_state
*xfrm
= dst
->xfrm
;
3973 dst
= xfrm_dst_child(dst
);
3975 if (xfrm
->props
.mode
== XFRM_MODE_TRANSPORT
)
3977 if (xfrm
->type
->flags
& XFRM_TYPE_REMOTE_COADDR
)
3978 daddr
= xfrm
->coaddr
;
3979 else if (!(xfrm
->type
->flags
& XFRM_TYPE_LOCAL_COADDR
))
3980 daddr
= &xfrm
->id
.daddr
;
3985 static struct neighbour
*xfrm_neigh_lookup(const struct dst_entry
*dst
,
3986 struct sk_buff
*skb
,
3989 const struct dst_entry
*path
= xfrm_dst_path(dst
);
3992 daddr
= xfrm_get_dst_nexthop(dst
, daddr
);
3993 return path
->ops
->neigh_lookup(path
, skb
, daddr
);
3996 static void xfrm_confirm_neigh(const struct dst_entry
*dst
, const void *daddr
)
3998 const struct dst_entry
*path
= xfrm_dst_path(dst
);
4000 daddr
= xfrm_get_dst_nexthop(dst
, daddr
);
4001 path
->ops
->confirm_neigh(path
, daddr
);
4004 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo
*afinfo
, int family
)
4008 if (WARN_ON(family
>= ARRAY_SIZE(xfrm_policy_afinfo
)))
4009 return -EAFNOSUPPORT
;
4011 spin_lock(&xfrm_policy_afinfo_lock
);
4012 if (unlikely(xfrm_policy_afinfo
[family
] != NULL
))
4015 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
4016 if (likely(dst_ops
->kmem_cachep
== NULL
))
4017 dst_ops
->kmem_cachep
= xfrm_dst_cache
;
4018 if (likely(dst_ops
->check
== NULL
))
4019 dst_ops
->check
= xfrm_dst_check
;
4020 if (likely(dst_ops
->default_advmss
== NULL
))
4021 dst_ops
->default_advmss
= xfrm_default_advmss
;
4022 if (likely(dst_ops
->mtu
== NULL
))
4023 dst_ops
->mtu
= xfrm_mtu
;
4024 if (likely(dst_ops
->negative_advice
== NULL
))
4025 dst_ops
->negative_advice
= xfrm_negative_advice
;
4026 if (likely(dst_ops
->link_failure
== NULL
))
4027 dst_ops
->link_failure
= xfrm_link_failure
;
4028 if (likely(dst_ops
->neigh_lookup
== NULL
))
4029 dst_ops
->neigh_lookup
= xfrm_neigh_lookup
;
4030 if (likely(!dst_ops
->confirm_neigh
))
4031 dst_ops
->confirm_neigh
= xfrm_confirm_neigh
;
4032 rcu_assign_pointer(xfrm_policy_afinfo
[family
], afinfo
);
4034 spin_unlock(&xfrm_policy_afinfo_lock
);
4038 EXPORT_SYMBOL(xfrm_policy_register_afinfo
);
4040 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo
*afinfo
)
4042 struct dst_ops
*dst_ops
= afinfo
->dst_ops
;
4045 for (i
= 0; i
< ARRAY_SIZE(xfrm_policy_afinfo
); i
++) {
4046 if (xfrm_policy_afinfo
[i
] != afinfo
)
4048 RCU_INIT_POINTER(xfrm_policy_afinfo
[i
], NULL
);
4054 dst_ops
->kmem_cachep
= NULL
;
4055 dst_ops
->check
= NULL
;
4056 dst_ops
->negative_advice
= NULL
;
4057 dst_ops
->link_failure
= NULL
;
4059 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo
);
4061 void xfrm_if_register_cb(const struct xfrm_if_cb
*ifcb
)
4063 spin_lock(&xfrm_if_cb_lock
);
4064 rcu_assign_pointer(xfrm_if_cb
, ifcb
);
4065 spin_unlock(&xfrm_if_cb_lock
);
4067 EXPORT_SYMBOL(xfrm_if_register_cb
);
4069 void xfrm_if_unregister_cb(void)
4071 RCU_INIT_POINTER(xfrm_if_cb
, NULL
);
4074 EXPORT_SYMBOL(xfrm_if_unregister_cb
);
4076 #ifdef CONFIG_XFRM_STATISTICS
4077 static int __net_init
xfrm_statistics_init(struct net
*net
)
4080 net
->mib
.xfrm_statistics
= alloc_percpu(struct linux_xfrm_mib
);
4081 if (!net
->mib
.xfrm_statistics
)
4083 rv
= xfrm_proc_init(net
);
4085 free_percpu(net
->mib
.xfrm_statistics
);
4089 static void xfrm_statistics_fini(struct net
*net
)
4091 xfrm_proc_fini(net
);
4092 free_percpu(net
->mib
.xfrm_statistics
);
4095 static int __net_init
xfrm_statistics_init(struct net
*net
)
4100 static void xfrm_statistics_fini(struct net
*net
)
4105 static int __net_init
xfrm_policy_init(struct net
*net
)
4107 unsigned int hmask
, sz
;
4110 if (net_eq(net
, &init_net
)) {
4111 xfrm_dst_cache
= kmem_cache_create("xfrm_dst_cache",
4112 sizeof(struct xfrm_dst
),
4113 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
4115 err
= rhashtable_init(&xfrm_policy_inexact_table
,
4116 &xfrm_pol_inexact_params
);
4121 sz
= (hmask
+1) * sizeof(struct hlist_head
);
4123 net
->xfrm
.policy_byidx
= xfrm_hash_alloc(sz
);
4124 if (!net
->xfrm
.policy_byidx
)
4126 net
->xfrm
.policy_idx_hmask
= hmask
;
4128 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
4129 struct xfrm_policy_hash
*htab
;
4131 net
->xfrm
.policy_count
[dir
] = 0;
4132 net
->xfrm
.policy_count
[XFRM_POLICY_MAX
+ dir
] = 0;
4133 INIT_HLIST_HEAD(&net
->xfrm
.policy_inexact
[dir
]);
4135 htab
= &net
->xfrm
.policy_bydst
[dir
];
4136 htab
->table
= xfrm_hash_alloc(sz
);
4139 htab
->hmask
= hmask
;
4145 net
->xfrm
.policy_hthresh
.lbits4
= 32;
4146 net
->xfrm
.policy_hthresh
.rbits4
= 32;
4147 net
->xfrm
.policy_hthresh
.lbits6
= 128;
4148 net
->xfrm
.policy_hthresh
.rbits6
= 128;
4150 seqlock_init(&net
->xfrm
.policy_hthresh
.lock
);
4152 INIT_LIST_HEAD(&net
->xfrm
.policy_all
);
4153 INIT_LIST_HEAD(&net
->xfrm
.inexact_bins
);
4154 INIT_WORK(&net
->xfrm
.policy_hash_work
, xfrm_hash_resize
);
4155 INIT_WORK(&net
->xfrm
.policy_hthresh
.work
, xfrm_hash_rebuild
);
4159 for (dir
--; dir
>= 0; dir
--) {
4160 struct xfrm_policy_hash
*htab
;
4162 htab
= &net
->xfrm
.policy_bydst
[dir
];
4163 xfrm_hash_free(htab
->table
, sz
);
4165 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
4170 static void xfrm_policy_fini(struct net
*net
)
4172 struct xfrm_pol_inexact_bin
*b
, *t
;
4176 flush_work(&net
->xfrm
.policy_hash_work
);
4177 #ifdef CONFIG_XFRM_SUB_POLICY
4178 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_SUB
, false);
4180 xfrm_policy_flush(net
, XFRM_POLICY_TYPE_MAIN
, false);
4182 WARN_ON(!list_empty(&net
->xfrm
.policy_all
));
4184 for (dir
= 0; dir
< XFRM_POLICY_MAX
; dir
++) {
4185 struct xfrm_policy_hash
*htab
;
4187 WARN_ON(!hlist_empty(&net
->xfrm
.policy_inexact
[dir
]));
4189 htab
= &net
->xfrm
.policy_bydst
[dir
];
4190 sz
= (htab
->hmask
+ 1) * sizeof(struct hlist_head
);
4191 WARN_ON(!hlist_empty(htab
->table
));
4192 xfrm_hash_free(htab
->table
, sz
);
4195 sz
= (net
->xfrm
.policy_idx_hmask
+ 1) * sizeof(struct hlist_head
);
4196 WARN_ON(!hlist_empty(net
->xfrm
.policy_byidx
));
4197 xfrm_hash_free(net
->xfrm
.policy_byidx
, sz
);
4199 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
4200 list_for_each_entry_safe(b
, t
, &net
->xfrm
.inexact_bins
, inexact_bins
)
4201 __xfrm_policy_inexact_prune_bin(b
, true);
4202 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
4205 static int __net_init
xfrm_net_init(struct net
*net
)
4209 /* Initialize the per-net locks here */
4210 spin_lock_init(&net
->xfrm
.xfrm_state_lock
);
4211 spin_lock_init(&net
->xfrm
.xfrm_policy_lock
);
4212 seqcount_spinlock_init(&net
->xfrm
.xfrm_policy_hash_generation
, &net
->xfrm
.xfrm_policy_lock
);
4213 mutex_init(&net
->xfrm
.xfrm_cfg_mutex
);
4214 net
->xfrm
.policy_default
[XFRM_POLICY_IN
] = XFRM_USERPOLICY_ACCEPT
;
4215 net
->xfrm
.policy_default
[XFRM_POLICY_FWD
] = XFRM_USERPOLICY_ACCEPT
;
4216 net
->xfrm
.policy_default
[XFRM_POLICY_OUT
] = XFRM_USERPOLICY_ACCEPT
;
4218 rv
= xfrm_statistics_init(net
);
4220 goto out_statistics
;
4221 rv
= xfrm_state_init(net
);
4224 rv
= xfrm_policy_init(net
);
4227 rv
= xfrm_sysctl_init(net
);
4234 xfrm_policy_fini(net
);
4236 xfrm_state_fini(net
);
4238 xfrm_statistics_fini(net
);
4243 static void __net_exit
xfrm_net_exit(struct net
*net
)
4245 xfrm_sysctl_fini(net
);
4246 xfrm_policy_fini(net
);
4247 xfrm_state_fini(net
);
4248 xfrm_statistics_fini(net
);
4251 static struct pernet_operations __net_initdata xfrm_net_ops
= {
4252 .init
= xfrm_net_init
,
4253 .exit
= xfrm_net_exit
,
4256 void __init
xfrm_init(void)
4258 register_pernet_subsys(&xfrm_net_ops
);
4262 #ifdef CONFIG_XFRM_ESPINTCP
4267 #ifdef CONFIG_AUDITSYSCALL
4268 static void xfrm_audit_common_policyinfo(struct xfrm_policy
*xp
,
4269 struct audit_buffer
*audit_buf
)
4271 struct xfrm_sec_ctx
*ctx
= xp
->security
;
4272 struct xfrm_selector
*sel
= &xp
->selector
;
4275 audit_log_format(audit_buf
, " sec_alg=%u sec_doi=%u sec_obj=%s",
4276 ctx
->ctx_alg
, ctx
->ctx_doi
, ctx
->ctx_str
);
4278 switch (sel
->family
) {
4280 audit_log_format(audit_buf
, " src=%pI4", &sel
->saddr
.a4
);
4281 if (sel
->prefixlen_s
!= 32)
4282 audit_log_format(audit_buf
, " src_prefixlen=%d",
4284 audit_log_format(audit_buf
, " dst=%pI4", &sel
->daddr
.a4
);
4285 if (sel
->prefixlen_d
!= 32)
4286 audit_log_format(audit_buf
, " dst_prefixlen=%d",
4290 audit_log_format(audit_buf
, " src=%pI6", sel
->saddr
.a6
);
4291 if (sel
->prefixlen_s
!= 128)
4292 audit_log_format(audit_buf
, " src_prefixlen=%d",
4294 audit_log_format(audit_buf
, " dst=%pI6", sel
->daddr
.a6
);
4295 if (sel
->prefixlen_d
!= 128)
4296 audit_log_format(audit_buf
, " dst_prefixlen=%d",
4302 void xfrm_audit_policy_add(struct xfrm_policy
*xp
, int result
, bool task_valid
)
4304 struct audit_buffer
*audit_buf
;
4306 audit_buf
= xfrm_audit_start("SPD-add");
4307 if (audit_buf
== NULL
)
4309 xfrm_audit_helper_usrinfo(task_valid
, audit_buf
);
4310 audit_log_format(audit_buf
, " res=%u", result
);
4311 xfrm_audit_common_policyinfo(xp
, audit_buf
);
4312 audit_log_end(audit_buf
);
4314 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add
);
4316 void xfrm_audit_policy_delete(struct xfrm_policy
*xp
, int result
,
4319 struct audit_buffer
*audit_buf
;
4321 audit_buf
= xfrm_audit_start("SPD-delete");
4322 if (audit_buf
== NULL
)
4324 xfrm_audit_helper_usrinfo(task_valid
, audit_buf
);
4325 audit_log_format(audit_buf
, " res=%u", result
);
4326 xfrm_audit_common_policyinfo(xp
, audit_buf
);
4327 audit_log_end(audit_buf
);
4329 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete
);
4332 #ifdef CONFIG_XFRM_MIGRATE
4333 static bool xfrm_migrate_selector_match(const struct xfrm_selector
*sel_cmp
,
4334 const struct xfrm_selector
*sel_tgt
)
4336 if (sel_cmp
->proto
== IPSEC_ULPROTO_ANY
) {
4337 if (sel_tgt
->family
== sel_cmp
->family
&&
4338 xfrm_addr_equal(&sel_tgt
->daddr
, &sel_cmp
->daddr
,
4340 xfrm_addr_equal(&sel_tgt
->saddr
, &sel_cmp
->saddr
,
4342 sel_tgt
->prefixlen_d
== sel_cmp
->prefixlen_d
&&
4343 sel_tgt
->prefixlen_s
== sel_cmp
->prefixlen_s
) {
4347 if (memcmp(sel_tgt
, sel_cmp
, sizeof(*sel_tgt
)) == 0) {
4354 static struct xfrm_policy
*xfrm_migrate_policy_find(const struct xfrm_selector
*sel
,
4355 u8 dir
, u8 type
, struct net
*net
, u32 if_id
)
4357 struct xfrm_policy
*pol
, *ret
= NULL
;
4358 struct hlist_head
*chain
;
4361 spin_lock_bh(&net
->xfrm
.xfrm_policy_lock
);
4362 chain
= policy_hash_direct(net
, &sel
->daddr
, &sel
->saddr
, sel
->family
, dir
);
4363 hlist_for_each_entry(pol
, chain
, bydst
) {
4364 if ((if_id
== 0 || pol
->if_id
== if_id
) &&
4365 xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
4366 pol
->type
== type
) {
4368 priority
= ret
->priority
;
4372 chain
= &net
->xfrm
.policy_inexact
[dir
];
4373 hlist_for_each_entry(pol
, chain
, bydst_inexact_list
) {
4374 if ((pol
->priority
>= priority
) && ret
)
4377 if ((if_id
== 0 || pol
->if_id
== if_id
) &&
4378 xfrm_migrate_selector_match(sel
, &pol
->selector
) &&
4379 pol
->type
== type
) {
4387 spin_unlock_bh(&net
->xfrm
.xfrm_policy_lock
);
4392 static int migrate_tmpl_match(const struct xfrm_migrate
*m
, const struct xfrm_tmpl
*t
)
4396 if (t
->mode
== m
->mode
&& t
->id
.proto
== m
->proto
&&
4397 (m
->reqid
== 0 || t
->reqid
== m
->reqid
)) {
4399 case XFRM_MODE_TUNNEL
:
4400 case XFRM_MODE_BEET
:
4401 if (xfrm_addr_equal(&t
->id
.daddr
, &m
->old_daddr
,
4403 xfrm_addr_equal(&t
->saddr
, &m
->old_saddr
,
4408 case XFRM_MODE_TRANSPORT
:
4409 /* in case of transport mode, template does not store
4410 any IP addresses, hence we just compare mode and
4421 /* update endpoint address(es) of template(s) */
4422 static int xfrm_policy_migrate(struct xfrm_policy
*pol
,
4423 struct xfrm_migrate
*m
, int num_migrate
,
4424 struct netlink_ext_ack
*extack
)
4426 struct xfrm_migrate
*mp
;
4429 write_lock_bh(&pol
->lock
);
4430 if (unlikely(pol
->walk
.dead
)) {
4431 /* target policy has been deleted */
4432 NL_SET_ERR_MSG(extack
, "Target policy not found");
4433 write_unlock_bh(&pol
->lock
);
4437 for (i
= 0; i
< pol
->xfrm_nr
; i
++) {
4438 for (j
= 0, mp
= m
; j
< num_migrate
; j
++, mp
++) {
4439 if (!migrate_tmpl_match(mp
, &pol
->xfrm_vec
[i
]))
4442 if (pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_TUNNEL
&&
4443 pol
->xfrm_vec
[i
].mode
!= XFRM_MODE_BEET
)
4445 /* update endpoints */
4446 memcpy(&pol
->xfrm_vec
[i
].id
.daddr
, &mp
->new_daddr
,
4447 sizeof(pol
->xfrm_vec
[i
].id
.daddr
));
4448 memcpy(&pol
->xfrm_vec
[i
].saddr
, &mp
->new_saddr
,
4449 sizeof(pol
->xfrm_vec
[i
].saddr
));
4450 pol
->xfrm_vec
[i
].encap_family
= mp
->new_family
;
4452 atomic_inc(&pol
->genid
);
4456 write_unlock_bh(&pol
->lock
);
4464 static int xfrm_migrate_check(const struct xfrm_migrate
*m
, int num_migrate
,
4465 struct netlink_ext_ack
*extack
)
4469 if (num_migrate
< 1 || num_migrate
> XFRM_MAX_DEPTH
) {
4470 NL_SET_ERR_MSG(extack
, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
4474 for (i
= 0; i
< num_migrate
; i
++) {
4475 if (xfrm_addr_any(&m
[i
].new_daddr
, m
[i
].new_family
) ||
4476 xfrm_addr_any(&m
[i
].new_saddr
, m
[i
].new_family
)) {
4477 NL_SET_ERR_MSG(extack
, "Addresses in the MIGRATE attribute's list cannot be null");
4481 /* check if there is any duplicated entry */
4482 for (j
= i
+ 1; j
< num_migrate
; j
++) {
4483 if (!memcmp(&m
[i
].old_daddr
, &m
[j
].old_daddr
,
4484 sizeof(m
[i
].old_daddr
)) &&
4485 !memcmp(&m
[i
].old_saddr
, &m
[j
].old_saddr
,
4486 sizeof(m
[i
].old_saddr
)) &&
4487 m
[i
].proto
== m
[j
].proto
&&
4488 m
[i
].mode
== m
[j
].mode
&&
4489 m
[i
].reqid
== m
[j
].reqid
&&
4490 m
[i
].old_family
== m
[j
].old_family
) {
4491 NL_SET_ERR_MSG(extack
, "Entries in the MIGRATE attribute's list must be unique");
4500 int xfrm_migrate(const struct xfrm_selector
*sel
, u8 dir
, u8 type
,
4501 struct xfrm_migrate
*m
, int num_migrate
,
4502 struct xfrm_kmaddress
*k
, struct net
*net
,
4503 struct xfrm_encap_tmpl
*encap
, u32 if_id
,
4504 struct netlink_ext_ack
*extack
)
4506 int i
, err
, nx_cur
= 0, nx_new
= 0;
4507 struct xfrm_policy
*pol
= NULL
;
4508 struct xfrm_state
*x
, *xc
;
4509 struct xfrm_state
*x_cur
[XFRM_MAX_DEPTH
];
4510 struct xfrm_state
*x_new
[XFRM_MAX_DEPTH
];
4511 struct xfrm_migrate
*mp
;
4513 /* Stage 0 - sanity checks */
4514 err
= xfrm_migrate_check(m
, num_migrate
, extack
);
4518 if (dir
>= XFRM_POLICY_MAX
) {
4519 NL_SET_ERR_MSG(extack
, "Invalid policy direction");
4524 /* Stage 1 - find policy */
4525 pol
= xfrm_migrate_policy_find(sel
, dir
, type
, net
, if_id
);
4527 NL_SET_ERR_MSG(extack
, "Target policy not found");
4532 /* Stage 2 - find and update state(s) */
4533 for (i
= 0, mp
= m
; i
< num_migrate
; i
++, mp
++) {
4534 if ((x
= xfrm_migrate_state_find(mp
, net
, if_id
))) {
4537 xc
= xfrm_state_migrate(x
, mp
, encap
);
4548 /* Stage 3 - update policy */
4549 err
= xfrm_policy_migrate(pol
, m
, num_migrate
, extack
);
4553 /* Stage 4 - delete old state(s) */
4555 xfrm_states_put(x_cur
, nx_cur
);
4556 xfrm_states_delete(x_cur
, nx_cur
);
4559 /* Stage 5 - announce */
4560 km_migrate(sel
, dir
, type
, m
, num_migrate
, k
, encap
);
4572 xfrm_states_put(x_cur
, nx_cur
);
4574 xfrm_states_delete(x_new
, nx_new
);
4578 EXPORT_SYMBOL(xfrm_migrate
);