2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic INET transport hashtables
8 * Authors: Lotsa people, from code originally in tcp
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
22 #include <net/inet_connection_sock.h>
23 #include <net/inet_hashtables.h>
24 #include <net/secure_seq.h>
28 * Allocate and initialize a new local port bind bucket.
29 * The bindhash mutex for snum's hash chain must be held here.
31 struct inet_bind_bucket
*inet_bind_bucket_create(struct kmem_cache
*cachep
,
33 struct inet_bind_hashbucket
*head
,
34 const unsigned short snum
)
36 struct inet_bind_bucket
*tb
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
39 write_pnet(&tb
->ib_net
, hold_net(net
));
42 tb
->fastreuseport
= 0;
44 INIT_HLIST_HEAD(&tb
->owners
);
45 hlist_add_head(&tb
->node
, &head
->chain
);
51 * Caller must hold hashbucket lock for this tb with local BH disabled
53 void inet_bind_bucket_destroy(struct kmem_cache
*cachep
, struct inet_bind_bucket
*tb
)
55 if (hlist_empty(&tb
->owners
)) {
56 __hlist_del(&tb
->node
);
57 release_net(ib_net(tb
));
58 kmem_cache_free(cachep
, tb
);
62 void inet_bind_hash(struct sock
*sk
, struct inet_bind_bucket
*tb
,
63 const unsigned short snum
)
65 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
67 atomic_inc(&hashinfo
->bsockets
);
69 inet_sk(sk
)->inet_num
= snum
;
70 sk_add_bind_node(sk
, &tb
->owners
);
72 inet_csk(sk
)->icsk_bind_hash
= tb
;
76 * Get rid of any references to a local port held by the given sock.
78 static void __inet_put_port(struct sock
*sk
)
80 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
81 const int bhash
= inet_bhashfn(sock_net(sk
), inet_sk(sk
)->inet_num
,
82 hashinfo
->bhash_size
);
83 struct inet_bind_hashbucket
*head
= &hashinfo
->bhash
[bhash
];
84 struct inet_bind_bucket
*tb
;
86 atomic_dec(&hashinfo
->bsockets
);
88 spin_lock(&head
->lock
);
89 tb
= inet_csk(sk
)->icsk_bind_hash
;
90 __sk_del_bind_node(sk
);
92 inet_csk(sk
)->icsk_bind_hash
= NULL
;
93 inet_sk(sk
)->inet_num
= 0;
94 inet_bind_bucket_destroy(hashinfo
->bind_bucket_cachep
, tb
);
95 spin_unlock(&head
->lock
);
98 void inet_put_port(struct sock
*sk
)
104 EXPORT_SYMBOL(inet_put_port
);
106 int __inet_inherit_port(struct sock
*sk
, struct sock
*child
)
108 struct inet_hashinfo
*table
= sk
->sk_prot
->h
.hashinfo
;
109 unsigned short port
= inet_sk(child
)->inet_num
;
110 const int bhash
= inet_bhashfn(sock_net(sk
), port
,
112 struct inet_bind_hashbucket
*head
= &table
->bhash
[bhash
];
113 struct inet_bind_bucket
*tb
;
115 spin_lock(&head
->lock
);
116 tb
= inet_csk(sk
)->icsk_bind_hash
;
117 if (tb
->port
!= port
) {
118 /* NOTE: using tproxy and redirecting skbs to a proxy
119 * on a different listener port breaks the assumption
120 * that the listener socket's icsk_bind_hash is the same
121 * as that of the child socket. We have to look up or
122 * create a new bind bucket for the child here. */
123 inet_bind_bucket_for_each(tb
, &head
->chain
) {
124 if (net_eq(ib_net(tb
), sock_net(sk
)) &&
129 tb
= inet_bind_bucket_create(table
->bind_bucket_cachep
,
130 sock_net(sk
), head
, port
);
132 spin_unlock(&head
->lock
);
137 inet_bind_hash(child
, tb
, port
);
138 spin_unlock(&head
->lock
);
142 EXPORT_SYMBOL_GPL(__inet_inherit_port
);
144 static inline int compute_score(struct sock
*sk
, struct net
*net
,
145 const unsigned short hnum
, const __be32 daddr
,
149 struct inet_sock
*inet
= inet_sk(sk
);
151 if (net_eq(sock_net(sk
), net
) && inet
->inet_num
== hnum
&&
152 !ipv6_only_sock(sk
)) {
153 __be32 rcv_saddr
= inet
->inet_rcv_saddr
;
154 score
= sk
->sk_family
== PF_INET
? 2 : 1;
156 if (rcv_saddr
!= daddr
)
160 if (sk
->sk_bound_dev_if
) {
161 if (sk
->sk_bound_dev_if
!= dif
)
170 * Don't inline this cruft. Here are some nice properties to exploit here. The
171 * BSD API does not allow a listening sock to specify the remote port nor the
172 * remote address for the connection. So always assume those are both
173 * wildcarded during the search since they can never be otherwise.
177 struct sock
*__inet_lookup_listener(struct net
*net
,
178 struct inet_hashinfo
*hashinfo
,
179 const __be32 saddr
, __be16 sport
,
180 const __be32 daddr
, const unsigned short hnum
,
183 struct sock
*sk
, *result
;
184 struct hlist_nulls_node
*node
;
185 unsigned int hash
= inet_lhashfn(net
, hnum
);
186 struct inet_listen_hashbucket
*ilb
= &hashinfo
->listening_hash
[hash
];
187 int score
, hiscore
, matches
= 0, reuseport
= 0;
194 sk_nulls_for_each_rcu(sk
, node
, &ilb
->head
) {
195 score
= compute_score(sk
, net
, hnum
, daddr
, dif
);
196 if (score
> hiscore
) {
199 reuseport
= sk
->sk_reuseport
;
201 phash
= inet_ehashfn(net
, daddr
, hnum
,
205 } else if (score
== hiscore
&& reuseport
) {
207 if (((u64
)phash
* matches
) >> 32 == 0)
209 phash
= next_pseudo_random32(phash
);
213 * if the nulls value we got at the end of this lookup is
214 * not the expected one, we must restart lookup.
215 * We probably met an item that was moved to another chain.
217 if (get_nulls_value(node
) != hash
+ LISTENING_NULLS_BASE
)
220 if (unlikely(!atomic_inc_not_zero(&result
->sk_refcnt
)))
222 else if (unlikely(compute_score(result
, net
, hnum
, daddr
,
231 EXPORT_SYMBOL_GPL(__inet_lookup_listener
);
233 /* All sockets share common refcount, but have different destructors */
234 void sock_gen_put(struct sock
*sk
)
236 if (!atomic_dec_and_test(&sk
->sk_refcnt
))
239 if (sk
->sk_state
== TCP_TIME_WAIT
)
240 inet_twsk_free(inet_twsk(sk
));
244 EXPORT_SYMBOL_GPL(sock_gen_put
);
246 struct sock
*__inet_lookup_established(struct net
*net
,
247 struct inet_hashinfo
*hashinfo
,
248 const __be32 saddr
, const __be16 sport
,
249 const __be32 daddr
, const u16 hnum
,
252 INET_ADDR_COOKIE(acookie
, saddr
, daddr
)
253 const __portpair ports
= INET_COMBINED_PORTS(sport
, hnum
);
255 const struct hlist_nulls_node
*node
;
256 /* Optimize here for direct hit, only listening connections can
257 * have wildcards anyways.
259 unsigned int hash
= inet_ehashfn(net
, daddr
, hnum
, saddr
, sport
);
260 unsigned int slot
= hash
& hashinfo
->ehash_mask
;
261 struct inet_ehash_bucket
*head
= &hashinfo
->ehash
[slot
];
265 sk_nulls_for_each_rcu(sk
, node
, &head
->chain
) {
266 if (sk
->sk_hash
!= hash
)
268 if (likely(INET_MATCH(sk
, net
, acookie
,
269 saddr
, daddr
, ports
, dif
))) {
270 if (unlikely(!atomic_inc_not_zero(&sk
->sk_refcnt
)))
272 if (unlikely(!INET_MATCH(sk
, net
, acookie
,
273 saddr
, daddr
, ports
, dif
))) {
281 * if the nulls value we got at the end of this lookup is
282 * not the expected one, we must restart lookup.
283 * We probably met an item that was moved to another chain.
285 if (get_nulls_value(node
) != slot
)
293 EXPORT_SYMBOL_GPL(__inet_lookup_established
);
295 /* called with local bh disabled */
296 static int __inet_check_established(struct inet_timewait_death_row
*death_row
,
297 struct sock
*sk
, __u16 lport
,
298 struct inet_timewait_sock
**twp
)
300 struct inet_hashinfo
*hinfo
= death_row
->hashinfo
;
301 struct inet_sock
*inet
= inet_sk(sk
);
302 __be32 daddr
= inet
->inet_rcv_saddr
;
303 __be32 saddr
= inet
->inet_daddr
;
304 int dif
= sk
->sk_bound_dev_if
;
305 INET_ADDR_COOKIE(acookie
, saddr
, daddr
)
306 const __portpair ports
= INET_COMBINED_PORTS(inet
->inet_dport
, lport
);
307 struct net
*net
= sock_net(sk
);
308 unsigned int hash
= inet_ehashfn(net
, daddr
, lport
,
309 saddr
, inet
->inet_dport
);
310 struct inet_ehash_bucket
*head
= inet_ehash_bucket(hinfo
, hash
);
311 spinlock_t
*lock
= inet_ehash_lockp(hinfo
, hash
);
313 const struct hlist_nulls_node
*node
;
314 struct inet_timewait_sock
*tw
= NULL
;
319 sk_nulls_for_each(sk2
, node
, &head
->chain
) {
320 if (sk2
->sk_hash
!= hash
)
323 if (likely(INET_MATCH(sk2
, net
, acookie
,
324 saddr
, daddr
, ports
, dif
))) {
325 if (sk2
->sk_state
== TCP_TIME_WAIT
) {
327 if (twsk_unique(sk
, sk2
, twp
))
334 /* Must record num and sport now. Otherwise we will see
335 * in hash table socket with a funny identity.
337 inet
->inet_num
= lport
;
338 inet
->inet_sport
= htons(lport
);
340 WARN_ON(!sk_unhashed(sk
));
341 __sk_nulls_add_node_rcu(sk
, &head
->chain
);
343 twrefcnt
= inet_twsk_unhash(tw
);
344 NET_INC_STATS_BH(net
, LINUX_MIB_TIMEWAITRECYCLED
);
349 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
354 /* Silly. Should hash-dance instead... */
355 inet_twsk_deschedule(tw
, death_row
);
363 return -EADDRNOTAVAIL
;
366 static inline u32
inet_sk_port_offset(const struct sock
*sk
)
368 const struct inet_sock
*inet
= inet_sk(sk
);
369 return secure_ipv4_port_ephemeral(inet
->inet_rcv_saddr
,
374 int __inet_hash_nolisten(struct sock
*sk
, struct inet_timewait_sock
*tw
)
376 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
377 struct hlist_nulls_head
*list
;
379 struct inet_ehash_bucket
*head
;
382 WARN_ON(!sk_unhashed(sk
));
384 sk
->sk_hash
= inet_sk_ehashfn(sk
);
385 head
= inet_ehash_bucket(hashinfo
, sk
->sk_hash
);
387 lock
= inet_ehash_lockp(hashinfo
, sk
->sk_hash
);
390 __sk_nulls_add_node_rcu(sk
, list
);
392 WARN_ON(sk
->sk_hash
!= tw
->tw_hash
);
393 twrefcnt
= inet_twsk_unhash(tw
);
396 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
399 EXPORT_SYMBOL_GPL(__inet_hash_nolisten
);
401 static void __inet_hash(struct sock
*sk
)
403 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
404 struct inet_listen_hashbucket
*ilb
;
406 if (sk
->sk_state
!= TCP_LISTEN
) {
407 __inet_hash_nolisten(sk
, NULL
);
411 WARN_ON(!sk_unhashed(sk
));
412 ilb
= &hashinfo
->listening_hash
[inet_sk_listen_hashfn(sk
)];
414 spin_lock(&ilb
->lock
);
415 __sk_nulls_add_node_rcu(sk
, &ilb
->head
);
416 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
417 spin_unlock(&ilb
->lock
);
420 void inet_hash(struct sock
*sk
)
422 if (sk
->sk_state
!= TCP_CLOSE
) {
428 EXPORT_SYMBOL_GPL(inet_hash
);
430 void inet_unhash(struct sock
*sk
)
432 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
439 if (sk
->sk_state
== TCP_LISTEN
)
440 lock
= &hashinfo
->listening_hash
[inet_sk_listen_hashfn(sk
)].lock
;
442 lock
= inet_ehash_lockp(hashinfo
, sk
->sk_hash
);
445 done
= __sk_nulls_del_node_init_rcu(sk
);
447 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
448 spin_unlock_bh(lock
);
450 EXPORT_SYMBOL_GPL(inet_unhash
);
452 int __inet_hash_connect(struct inet_timewait_death_row
*death_row
,
453 struct sock
*sk
, u32 port_offset
,
454 int (*check_established
)(struct inet_timewait_death_row
*,
455 struct sock
*, __u16
, struct inet_timewait_sock
**),
456 int (*hash
)(struct sock
*sk
, struct inet_timewait_sock
*twp
))
458 struct inet_hashinfo
*hinfo
= death_row
->hashinfo
;
459 const unsigned short snum
= inet_sk(sk
)->inet_num
;
460 struct inet_bind_hashbucket
*head
;
461 struct inet_bind_bucket
*tb
;
463 struct net
*net
= sock_net(sk
);
467 int i
, remaining
, low
, high
, port
;
469 u32 offset
= hint
+ port_offset
;
470 struct inet_timewait_sock
*tw
= NULL
;
472 inet_get_local_port_range(net
, &low
, &high
);
473 remaining
= (high
- low
) + 1;
476 for (i
= 1; i
<= remaining
; i
++) {
477 port
= low
+ (i
+ offset
) % remaining
;
478 if (inet_is_reserved_local_port(port
))
480 head
= &hinfo
->bhash
[inet_bhashfn(net
, port
,
482 spin_lock(&head
->lock
);
484 /* Does not bother with rcv_saddr checks,
485 * because the established check is already
488 inet_bind_bucket_for_each(tb
, &head
->chain
) {
489 if (net_eq(ib_net(tb
), net
) &&
491 if (tb
->fastreuse
>= 0 ||
492 tb
->fastreuseport
>= 0)
494 WARN_ON(hlist_empty(&tb
->owners
));
495 if (!check_established(death_row
, sk
,
502 tb
= inet_bind_bucket_create(hinfo
->bind_bucket_cachep
,
505 spin_unlock(&head
->lock
);
509 tb
->fastreuseport
= -1;
513 spin_unlock(&head
->lock
);
517 return -EADDRNOTAVAIL
;
522 /* Head lock still held and bh's disabled */
523 inet_bind_hash(sk
, tb
, port
);
524 if (sk_unhashed(sk
)) {
525 inet_sk(sk
)->inet_sport
= htons(port
);
526 twrefcnt
+= hash(sk
, tw
);
529 twrefcnt
+= inet_twsk_bind_unhash(tw
, hinfo
);
530 spin_unlock(&head
->lock
);
533 inet_twsk_deschedule(tw
, death_row
);
544 head
= &hinfo
->bhash
[inet_bhashfn(net
, snum
, hinfo
->bhash_size
)];
545 tb
= inet_csk(sk
)->icsk_bind_hash
;
546 spin_lock_bh(&head
->lock
);
547 if (sk_head(&tb
->owners
) == sk
&& !sk
->sk_bind_node
.next
) {
549 spin_unlock_bh(&head
->lock
);
552 spin_unlock(&head
->lock
);
553 /* No definite answer... Walk to established hash table */
554 ret
= check_established(death_row
, sk
, snum
, NULL
);
562 * Bind a port for a connect operation and hash it.
564 int inet_hash_connect(struct inet_timewait_death_row
*death_row
,
567 return __inet_hash_connect(death_row
, sk
, inet_sk_port_offset(sk
),
568 __inet_check_established
, __inet_hash_nolisten
);
570 EXPORT_SYMBOL_GPL(inet_hash_connect
);
572 void inet_hashinfo_init(struct inet_hashinfo
*h
)
576 atomic_set(&h
->bsockets
, 0);
577 for (i
= 0; i
< INET_LHTABLE_SIZE
; i
++) {
578 spin_lock_init(&h
->listening_hash
[i
].lock
);
579 INIT_HLIST_NULLS_HEAD(&h
->listening_hash
[i
].head
,
580 i
+ LISTENING_NULLS_BASE
);
583 EXPORT_SYMBOL_GPL(inet_hashinfo_init
);