]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> | |
11 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
12 | * Florian La Roche, <flla@stud.uni-sb.de> | |
13 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> | |
14 | * Linus Torvalds, <torvalds@cs.helsinki.fi> | |
15 | * Alan Cox, <gw4pts@gw4pts.ampr.org> | |
16 | * Matthew Dillon, <dillon@apollo.west.oic.com> | |
17 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
18 | * Jorge Cwik, <jorge@laser.satlink.net> | |
19 | */ | |
20 | ||
21 | #include <linux/module.h> | |
5a0e3ad6 | 22 | #include <linux/gfp.h> |
1da177e4 LT |
23 | #include <net/tcp.h> |
24 | ||
36e31b0a | 25 | int sysctl_tcp_thin_linear_timeouts __read_mostly; |
1da177e4 | 26 | |
c380d37e RS |
27 | /** |
28 | * tcp_write_err() - close socket and save error info | |
29 | * @sk: The socket the error has appeared on. | |
30 | * | |
31 | * Returns: Nothing (void) | |
32 | */ | |
33 | ||
1da177e4 LT |
34 | static void tcp_write_err(struct sock *sk) |
35 | { | |
36 | sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; | |
37 | sk->sk_error_report(sk); | |
38 | ||
e44c1733 | 39 | tcp_write_queue_purge(sk); |
1da177e4 | 40 | tcp_done(sk); |
02a1d6e7 | 41 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); |
1da177e4 LT |
42 | } |
43 | ||
c380d37e RS |
44 | /** |
45 | * tcp_out_of_resources() - Close socket if out of resources | |
46 | * @sk: pointer to current socket | |
47 | * @do_reset: send a last packet with reset flag | |
1da177e4 | 48 | * |
c380d37e RS |
49 | * Do not allow orphaned sockets to eat all our resources. |
50 | * This is direct violation of TCP specs, but it is required | |
51 | * to prevent DoS attacks. It is called when a retransmission timeout | |
52 | * or zero probe timeout occurs on orphaned socket. | |
53 | * | |
32e57f8c DS |
54 | * Also close if our net namespace is exiting; in that case there is no |
55 | * hope of ever communicating again since all netns interfaces are already | |
56 | * down (or about to be down), and we need to release our dst references, | |
57 | * which have been moved to the netns loopback interface, so the namespace | |
58 | * can finish exiting. This condition is only possible if we are a kernel | |
59 | * socket, as those do not hold references to the namespace. | |
60 | * | |
c380d37e RS |
61 | * Criteria is still not confirmed experimentally and may change. |
62 | * We kill the socket, if: | |
63 | * 1. If number of orphaned sockets exceeds an administratively configured | |
64 | * limit. | |
65 | * 2. If we have strong memory pressure. | |
32e57f8c | 66 | * 3. If our net namespace is exiting. |
1da177e4 | 67 | */ |
b248230c | 68 | static int tcp_out_of_resources(struct sock *sk, bool do_reset) |
1da177e4 LT |
69 | { |
70 | struct tcp_sock *tp = tcp_sk(sk); | |
ad1af0fe | 71 | int shift = 0; |
1da177e4 | 72 | |
e905a9ed | 73 | /* If peer does not open window for long time, or did not transmit |
1da177e4 | 74 | * anything for long time, penalize it. */ |
d635fbe2 | 75 | if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) |
ad1af0fe | 76 | shift++; |
1da177e4 LT |
77 | |
78 | /* If some dubious ICMP arrived, penalize even more. */ | |
79 | if (sk->sk_err_soft) | |
ad1af0fe | 80 | shift++; |
1da177e4 | 81 | |
efcdbf24 | 82 | if (tcp_check_oom(sk, shift)) { |
1da177e4 LT |
83 | /* Catch exceptional cases, when connection requires reset. |
84 | * 1. Last segment was sent recently. */ | |
d635fbe2 | 85 | if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN || |
1da177e4 LT |
86 | /* 2. Window is closed. */ |
87 | (!tp->snd_wnd && !tp->packets_out)) | |
b248230c | 88 | do_reset = true; |
1da177e4 LT |
89 | if (do_reset) |
90 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
91 | tcp_done(sk); | |
02a1d6e7 | 92 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); |
1da177e4 LT |
93 | return 1; |
94 | } | |
32e57f8c DS |
95 | |
96 | if (!check_net(sock_net(sk))) { | |
97 | /* Not possible to send reset; just close */ | |
98 | tcp_done(sk); | |
99 | return 1; | |
100 | } | |
101 | ||
1da177e4 LT |
102 | return 0; |
103 | } | |
104 | ||
c380d37e RS |
105 | /** |
106 | * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket | |
107 | * @sk: Pointer to the current socket. | |
108 | * @alive: bool, socket alive state | |
109 | */ | |
7533ce30 | 110 | static int tcp_orphan_retries(struct sock *sk, bool alive) |
1da177e4 | 111 | { |
c402d9be | 112 | int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */ |
1da177e4 LT |
113 | |
114 | /* We know from an ICMP that something is wrong. */ | |
115 | if (sk->sk_err_soft && !alive) | |
116 | retries = 0; | |
117 | ||
118 | /* However, if socket sent something recently, select some safe | |
119 | * number of retries. 8 corresponds to >100 seconds with minimal | |
120 | * RTO of 200msec. */ | |
121 | if (retries == 0 && alive) | |
122 | retries = 8; | |
123 | return retries; | |
124 | } | |
125 | ||
ce55dd36 ED |
126 | static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) |
127 | { | |
b0f9ca53 FD |
128 | struct net *net = sock_net(sk); |
129 | ||
ce55dd36 | 130 | /* Black hole detection */ |
b0f9ca53 | 131 | if (net->ipv4.sysctl_tcp_mtu_probing) { |
ce55dd36 ED |
132 | if (!icsk->icsk_mtup.enabled) { |
133 | icsk->icsk_mtup.enabled = 1; | |
c74df29a | 134 | icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; |
ce55dd36 ED |
135 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
136 | } else { | |
b0f9ca53 | 137 | struct net *net = sock_net(sk); |
ce55dd36 | 138 | struct tcp_sock *tp = tcp_sk(sk); |
829942c1 DM |
139 | int mss; |
140 | ||
8beb5c5f | 141 | mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; |
b0f9ca53 | 142 | mss = min(net->ipv4.sysctl_tcp_base_mss, mss); |
ce55dd36 | 143 | mss = max(mss, 68 - tp->tcp_header_len); |
f2aa4f1a | 144 | mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss); |
ce55dd36 ED |
145 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); |
146 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | |
147 | } | |
148 | } | |
149 | } | |
150 | ||
c380d37e RS |
151 | |
152 | /** | |
153 | * retransmits_timed_out() - returns true if this connection has timed out | |
154 | * @sk: The current socket | |
155 | * @boundary: max number of retransmissions | |
156 | * @timeout: A custom timeout value. | |
157 | * If set to 0 the default timeout is calculated and used. | |
158 | * Using TCP_RTO_MIN and the number of unsuccessful retransmits. | |
c380d37e RS |
159 | * |
160 | * The default "timeout" value this function can calculate and use | |
161 | * is equivalent to the timeout of a TCP Connection | |
162 | * after "boundary" unsuccessful, exponentially backed-off | |
ce682ef6 | 163 | * retransmissions with an initial RTO of TCP_RTO_MIN. |
2f7de571 DL |
164 | */ |
165 | static bool retransmits_timed_out(struct sock *sk, | |
dca43c75 | 166 | unsigned int boundary, |
ce682ef6 | 167 | unsigned int timeout) |
2f7de571 | 168 | { |
ce682ef6 | 169 | const unsigned int rto_base = TCP_RTO_MIN; |
9a568de4 | 170 | unsigned int linear_backoff_thresh, start_ts; |
2f7de571 DL |
171 | |
172 | if (!inet_csk(sk)->icsk_retransmits) | |
173 | return false; | |
174 | ||
7faee5c0 ED |
175 | start_ts = tcp_sk(sk)->retrans_stamp; |
176 | if (unlikely(!start_ts)) | |
177 | start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk)); | |
2f7de571 | 178 | |
dca43c75 | 179 | if (likely(timeout == 0)) { |
21a180cd | 180 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); |
2f7de571 | 181 | |
dca43c75 | 182 | if (boundary <= linear_backoff_thresh) |
21a180cd | 183 | timeout = ((2 << boundary) - 1) * rto_base; |
dca43c75 | 184 | else |
21a180cd | 185 | timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + |
dca43c75 JC |
186 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; |
187 | } | |
9a568de4 | 188 | return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout); |
2f7de571 DL |
189 | } |
190 | ||
1da177e4 LT |
191 | /* A write timeout has occurred. Process the after effects. */ |
192 | static int tcp_write_timeout(struct sock *sk) | |
193 | { | |
5d424d5a | 194 | struct inet_connection_sock *icsk = inet_csk(sk); |
c968601d | 195 | struct tcp_sock *tp = tcp_sk(sk); |
6fa25166 | 196 | struct net *net = sock_net(sk); |
ce682ef6 | 197 | bool expired, do_reset; |
1da177e4 LT |
198 | int retry_until; |
199 | ||
200 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | |
c968601d | 201 | if (icsk->icsk_retransmits) { |
b6c6712a | 202 | dst_negative_advice(sk); |
c968601d | 203 | if (tp->syn_fastopen || tp->syn_data) |
2646c831 | 204 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); |
dd52bc2b | 205 | if (tp->syn_data && icsk->icsk_retransmits == 1) |
c10d9310 ED |
206 | NET_INC_STATS(sock_net(sk), |
207 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); | |
3acf3ec3 LB |
208 | } else if (!tp->syn_data && !tp->syn_fastopen) { |
209 | sk_rethink_txhash(sk); | |
c968601d | 210 | } |
6fa25166 | 211 | retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; |
ce682ef6 | 212 | expired = icsk->icsk_retransmits >= retry_until; |
1da177e4 | 213 | } else { |
ce682ef6 | 214 | if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) { |
0e45f4da YC |
215 | /* Some middle-boxes may black-hole Fast Open _after_ |
216 | * the handshake. Therefore we conservatively disable | |
59450f8d WW |
217 | * Fast Open on this path on recurring timeouts after |
218 | * successful Fast Open. | |
0e45f4da | 219 | */ |
59450f8d | 220 | if (tp->syn_data_acked) { |
0e45f4da | 221 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); |
ae5c3f40 | 222 | if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1) |
c10d9310 ED |
223 | NET_INC_STATS(sock_net(sk), |
224 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); | |
0e45f4da | 225 | } |
5d424d5a | 226 | /* Black hole detection */ |
ce55dd36 | 227 | tcp_mtu_probing(icsk, sk); |
1da177e4 | 228 | |
b6c6712a | 229 | dst_negative_advice(sk); |
3acf3ec3 LB |
230 | } else { |
231 | sk_rethink_txhash(sk); | |
1da177e4 LT |
232 | } |
233 | ||
c6214a97 | 234 | retry_until = net->ipv4.sysctl_tcp_retries2; |
1da177e4 | 235 | if (sock_flag(sk, SOCK_DEAD)) { |
7533ce30 | 236 | const bool alive = icsk->icsk_rto < TCP_RTO_MAX; |
e905a9ed | 237 | |
1da177e4 | 238 | retry_until = tcp_orphan_retries(sk, alive); |
6fa12c85 | 239 | do_reset = alive || |
ce682ef6 | 240 | !retransmits_timed_out(sk, retry_until, 0); |
1da177e4 | 241 | |
6fa12c85 | 242 | if (tcp_out_of_resources(sk, do_reset)) |
1da177e4 LT |
243 | return 1; |
244 | } | |
ce682ef6 ED |
245 | expired = retransmits_timed_out(sk, retry_until, |
246 | icsk->icsk_user_timeout); | |
1da177e4 | 247 | } |
ce682ef6 | 248 | if (expired) { |
1da177e4 LT |
249 | /* Has it gone just too far? */ |
250 | tcp_write_err(sk); | |
251 | return 1; | |
252 | } | |
253 | return 0; | |
254 | } | |
255 | ||
c10d9310 | 256 | /* Called with BH disabled */ |
6f458dfb | 257 | void tcp_delack_timer_handler(struct sock *sk) |
1da177e4 | 258 | { |
463c84b9 | 259 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 | 260 | |
9993e7d3 | 261 | sk_mem_reclaim_partial(sk); |
1da177e4 | 262 | |
02b2faaf ED |
263 | if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || |
264 | !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) | |
1da177e4 LT |
265 | goto out; |
266 | ||
463c84b9 ACM |
267 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { |
268 | sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); | |
1da177e4 LT |
269 | goto out; |
270 | } | |
463c84b9 | 271 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; |
1da177e4 | 272 | |
463c84b9 ACM |
273 | if (inet_csk_ack_scheduled(sk)) { |
274 | if (!icsk->icsk_ack.pingpong) { | |
1da177e4 | 275 | /* Delayed ACK missed: inflate ATO. */ |
463c84b9 | 276 | icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); |
1da177e4 LT |
277 | } else { |
278 | /* Delayed ACK missed: leave pingpong mode and | |
279 | * deflate ATO. | |
280 | */ | |
463c84b9 ACM |
281 | icsk->icsk_ack.pingpong = 0; |
282 | icsk->icsk_ack.ato = TCP_ATO_MIN; | |
1da177e4 | 283 | } |
5504319c | 284 | tcp_mstamp_refresh(tcp_sk(sk)); |
1da177e4 | 285 | tcp_send_ack(sk); |
02a1d6e7 | 286 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
1da177e4 | 287 | } |
1da177e4 LT |
288 | |
289 | out: | |
b8da51eb | 290 | if (tcp_under_memory_pressure(sk)) |
3ab224be | 291 | sk_mem_reclaim(sk); |
6f458dfb ED |
292 | } |
293 | ||
c380d37e RS |
294 | |
295 | /** | |
296 | * tcp_delack_timer() - The TCP delayed ACK timeout handler | |
297 | * @data: Pointer to the current socket. (gets casted to struct sock *) | |
298 | * | |
299 | * This function gets (indirectly) called when the kernel timer for a TCP packet | |
300 | * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work. | |
301 | * | |
302 | * Returns: Nothing (void) | |
303 | */ | |
6f458dfb ED |
304 | static void tcp_delack_timer(unsigned long data) |
305 | { | |
306 | struct sock *sk = (struct sock *)data; | |
307 | ||
308 | bh_lock_sock(sk); | |
309 | if (!sock_owned_by_user(sk)) { | |
310 | tcp_delack_timer_handler(sk); | |
311 | } else { | |
312 | inet_csk(sk)->icsk_ack.blocked = 1; | |
02a1d6e7 | 313 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); |
6f458dfb | 314 | /* deleguate our work to tcp_release_cb() */ |
7aa5470c | 315 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags)) |
144d56e9 | 316 | sock_hold(sk); |
6f458dfb | 317 | } |
1da177e4 LT |
318 | bh_unlock_sock(sk); |
319 | sock_put(sk); | |
320 | } | |
321 | ||
322 | static void tcp_probe_timer(struct sock *sk) | |
323 | { | |
6687e988 | 324 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
325 | struct tcp_sock *tp = tcp_sk(sk); |
326 | int max_probes; | |
b248230c | 327 | u32 start_ts; |
1da177e4 | 328 | |
fe067e8a | 329 | if (tp->packets_out || !tcp_send_head(sk)) { |
6687e988 | 330 | icsk->icsk_probes_out = 0; |
1da177e4 LT |
331 | return; |
332 | } | |
333 | ||
b248230c YC |
334 | /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as |
335 | * long as the receiver continues to respond probes. We support this by | |
336 | * default and reset icsk_probes_out with incoming ACKs. But if the | |
337 | * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we | |
338 | * kill the socket when the retry count and the time exceeds the | |
339 | * corresponding system limit. We also implement similar policy when | |
340 | * we use RTO to probe window in tcp_retransmit_timer(). | |
1da177e4 | 341 | */ |
b248230c YC |
342 | start_ts = tcp_skb_timestamp(tcp_send_head(sk)); |
343 | if (!start_ts) | |
385e2070 | 344 | tcp_send_head(sk)->skb_mstamp = tp->tcp_mstamp; |
b248230c | 345 | else if (icsk->icsk_user_timeout && |
4ab68879 ED |
346 | (s32)(tcp_time_stamp(tp) - start_ts) > |
347 | jiffies_to_msecs(icsk->icsk_user_timeout)) | |
b248230c | 348 | goto abort; |
1da177e4 | 349 | |
c6214a97 | 350 | max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; |
1da177e4 | 351 | if (sock_flag(sk, SOCK_DEAD)) { |
7533ce30 | 352 | const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; |
e905a9ed | 353 | |
1da177e4 | 354 | max_probes = tcp_orphan_retries(sk, alive); |
b248230c YC |
355 | if (!alive && icsk->icsk_backoff >= max_probes) |
356 | goto abort; | |
357 | if (tcp_out_of_resources(sk, true)) | |
1da177e4 LT |
358 | return; |
359 | } | |
360 | ||
6687e988 | 361 | if (icsk->icsk_probes_out > max_probes) { |
b248230c | 362 | abort: tcp_write_err(sk); |
1da177e4 LT |
363 | } else { |
364 | /* Only send another probe if we didn't close things up. */ | |
365 | tcp_send_probe0(sk); | |
366 | } | |
367 | } | |
368 | ||
8336886f JC |
369 | /* |
370 | * Timer for Fast Open socket to retransmit SYNACK. Note that the | |
371 | * sk here is the child socket, not the parent (listener) socket. | |
372 | */ | |
373 | static void tcp_fastopen_synack_timer(struct sock *sk) | |
374 | { | |
375 | struct inet_connection_sock *icsk = inet_csk(sk); | |
376 | int max_retries = icsk->icsk_syn_retries ? : | |
7c083ecb | 377 | sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */ |
8336886f JC |
378 | struct request_sock *req; |
379 | ||
380 | req = tcp_sk(sk)->fastopen_rsk; | |
42cb80a2 | 381 | req->rsk_ops->syn_ack_timeout(req); |
8336886f | 382 | |
e6c022a4 | 383 | if (req->num_timeout >= max_retries) { |
8336886f JC |
384 | tcp_write_err(sk); |
385 | return; | |
386 | } | |
387 | /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error | |
388 | * returned from rtx_syn_ack() to make it more persistent like | |
389 | * regular retransmit because if the child socket has been accepted | |
390 | * it's not good to give up too easily. | |
391 | */ | |
e6c022a4 ED |
392 | inet_rtx_syn_ack(sk, req); |
393 | req->num_timeout++; | |
7e32b443 | 394 | icsk->icsk_retransmits++; |
8336886f | 395 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
e6c022a4 | 396 | TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); |
8336886f JC |
397 | } |
398 | ||
1da177e4 | 399 | |
c380d37e RS |
400 | /** |
401 | * tcp_retransmit_timer() - The TCP retransmit timeout handler | |
402 | * @sk: Pointer to the current socket. | |
403 | * | |
404 | * This function gets called when the kernel timer for a TCP packet | |
405 | * of this socket expires. | |
406 | * | |
407 | * It handles retransmission, timer adjustment and other necesarry measures. | |
408 | * | |
409 | * Returns: Nothing (void) | |
410 | */ | |
f1ecd5d9 | 411 | void tcp_retransmit_timer(struct sock *sk) |
1da177e4 LT |
412 | { |
413 | struct tcp_sock *tp = tcp_sk(sk); | |
ae5c3f40 | 414 | struct net *net = sock_net(sk); |
463c84b9 | 415 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 | 416 | |
8336886f | 417 | if (tp->fastopen_rsk) { |
37561f68 JC |
418 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && |
419 | sk->sk_state != TCP_FIN_WAIT1); | |
8336886f JC |
420 | tcp_fastopen_synack_timer(sk); |
421 | /* Before we receive ACK to our SYN-ACK don't retransmit | |
422 | * anything else (e.g., data or FIN segments). | |
423 | */ | |
424 | return; | |
425 | } | |
1da177e4 LT |
426 | if (!tp->packets_out) |
427 | goto out; | |
428 | ||
547b792c | 429 | WARN_ON(tcp_write_queue_empty(sk)); |
1da177e4 | 430 | |
9b717a8d ND |
431 | tp->tlp_high_seq = 0; |
432 | ||
1da177e4 LT |
433 | if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && |
434 | !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { | |
435 | /* Receiver dastardly shrinks window. Our retransmits | |
436 | * become zero probes, but we should not timeout this | |
437 | * connection. If the socket is an orphan, time it out, | |
438 | * we cannot allow such beasts to hang infinitely. | |
439 | */ | |
569508c9 YH |
440 | struct inet_sock *inet = inet_sk(sk); |
441 | if (sk->sk_family == AF_INET) { | |
ba7a46f1 JP |
442 | net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", |
443 | &inet->inet_daddr, | |
444 | ntohs(inet->inet_dport), | |
445 | inet->inet_num, | |
446 | tp->snd_una, tp->snd_nxt); | |
1da177e4 | 447 | } |
dfd56b8b | 448 | #if IS_ENABLED(CONFIG_IPV6) |
569508c9 | 449 | else if (sk->sk_family == AF_INET6) { |
ba7a46f1 JP |
450 | net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", |
451 | &sk->sk_v6_daddr, | |
452 | ntohs(inet->inet_dport), | |
453 | inet->inet_num, | |
454 | tp->snd_una, tp->snd_nxt); | |
569508c9 | 455 | } |
1da177e4 | 456 | #endif |
70eabf0e | 457 | if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) { |
1da177e4 LT |
458 | tcp_write_err(sk); |
459 | goto out; | |
460 | } | |
5ae344c9 | 461 | tcp_enter_loss(sk); |
10d3be56 | 462 | tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1); |
1da177e4 LT |
463 | __sk_dst_reset(sk); |
464 | goto out_reset_timer; | |
465 | } | |
466 | ||
467 | if (tcp_write_timeout(sk)) | |
468 | goto out; | |
469 | ||
463c84b9 | 470 | if (icsk->icsk_retransmits == 0) { |
40b215e5 PE |
471 | int mib_idx; |
472 | ||
c60ce4e2 | 473 | if (icsk->icsk_ca_state == TCP_CA_Recovery) { |
bc079e9e IJ |
474 | if (tcp_is_sack(tp)) |
475 | mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; | |
476 | else | |
477 | mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; | |
6687e988 | 478 | } else if (icsk->icsk_ca_state == TCP_CA_Loss) { |
40b215e5 | 479 | mib_idx = LINUX_MIB_TCPLOSSFAILURES; |
c60ce4e2 IJ |
480 | } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || |
481 | tp->sacked_out) { | |
482 | if (tcp_is_sack(tp)) | |
483 | mib_idx = LINUX_MIB_TCPSACKFAILURES; | |
484 | else | |
485 | mib_idx = LINUX_MIB_TCPRENOFAILURES; | |
1da177e4 | 486 | } else { |
40b215e5 | 487 | mib_idx = LINUX_MIB_TCPTIMEOUTS; |
1da177e4 | 488 | } |
02a1d6e7 | 489 | __NET_INC_STATS(sock_net(sk), mib_idx); |
1da177e4 LT |
490 | } |
491 | ||
5ae344c9 | 492 | tcp_enter_loss(sk); |
1da177e4 | 493 | |
10d3be56 | 494 | if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1) > 0) { |
1da177e4 LT |
495 | /* Retransmission failed because of local congestion, |
496 | * do not backoff. | |
497 | */ | |
463c84b9 ACM |
498 | if (!icsk->icsk_retransmits) |
499 | icsk->icsk_retransmits = 1; | |
500 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | |
3f421baa ACM |
501 | min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), |
502 | TCP_RTO_MAX); | |
1da177e4 LT |
503 | goto out; |
504 | } | |
505 | ||
506 | /* Increase the timeout each time we retransmit. Note that | |
507 | * we do not increase the rtt estimate. rto is initialized | |
508 | * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests | |
509 | * that doubling rto each time is the least we can get away with. | |
510 | * In KA9Q, Karn uses this for the first few times, and then | |
511 | * goes to quadratic. netBSD doubles, but only goes up to *64, | |
512 | * and clamps at 1 to 64 sec afterwards. Note that 120 sec is | |
513 | * defined in the protocol as the maximum possible RTT. I guess | |
514 | * we'll have to use something other than TCP to talk to the | |
515 | * University of Mars. | |
516 | * | |
517 | * PAWS allows us longer timeouts and large windows, so once | |
518 | * implemented ftp to mars will work nicely. We will have to fix | |
519 | * the 120 second clamps though! | |
520 | */ | |
463c84b9 ACM |
521 | icsk->icsk_backoff++; |
522 | icsk->icsk_retransmits++; | |
1da177e4 LT |
523 | |
524 | out_reset_timer: | |
36e31b0a AP |
525 | /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is |
526 | * used to reset timer, set to 0. Recalculate 'icsk_rto' as this | |
527 | * might be increased if the stream oscillates between thin and thick, | |
528 | * thus the old value might already be too high compared to the value | |
529 | * set by 'tcp_set_rto' in tcp_input.c which resets the rto without | |
530 | * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating | |
531 | * exponential backoff behaviour to avoid continue hammering | |
532 | * linear-timeout retransmissions into a black hole | |
533 | */ | |
534 | if (sk->sk_state == TCP_ESTABLISHED && | |
535 | (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) && | |
536 | tcp_stream_is_thin(tp) && | |
537 | icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { | |
538 | icsk->icsk_backoff = 0; | |
539 | icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); | |
540 | } else { | |
541 | /* Use normal (exponential) backoff */ | |
542 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); | |
543 | } | |
3f421baa | 544 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); |
ce682ef6 | 545 | if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0)) |
1da177e4 LT |
546 | __sk_dst_reset(sk); |
547 | ||
548 | out:; | |
549 | } | |
550 | ||
c380d37e RS |
551 | /* Called with bottom-half processing disabled. |
552 | Called by tcp_write_timer() */ | |
6f458dfb | 553 | void tcp_write_timer_handler(struct sock *sk) |
1da177e4 | 554 | { |
463c84b9 | 555 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
556 | int event; |
557 | ||
02b2faaf ED |
558 | if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || |
559 | !icsk->icsk_pending) | |
1da177e4 LT |
560 | goto out; |
561 | ||
463c84b9 ACM |
562 | if (time_after(icsk->icsk_timeout, jiffies)) { |
563 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); | |
1da177e4 LT |
564 | goto out; |
565 | } | |
566 | ||
9a568de4 | 567 | tcp_mstamp_refresh(tcp_sk(sk)); |
463c84b9 | 568 | event = icsk->icsk_pending; |
1da177e4 LT |
569 | |
570 | switch (event) { | |
57dde7f7 YC |
571 | case ICSK_TIME_REO_TIMEOUT: |
572 | tcp_rack_reo_timeout(sk); | |
573 | break; | |
6ba8a3b1 ND |
574 | case ICSK_TIME_LOSS_PROBE: |
575 | tcp_send_loss_probe(sk); | |
576 | break; | |
463c84b9 | 577 | case ICSK_TIME_RETRANS: |
6ba8a3b1 | 578 | icsk->icsk_pending = 0; |
1da177e4 LT |
579 | tcp_retransmit_timer(sk); |
580 | break; | |
463c84b9 | 581 | case ICSK_TIME_PROBE0: |
6ba8a3b1 | 582 | icsk->icsk_pending = 0; |
1da177e4 LT |
583 | tcp_probe_timer(sk); |
584 | break; | |
585 | } | |
1da177e4 LT |
586 | |
587 | out: | |
3ab224be | 588 | sk_mem_reclaim(sk); |
6f458dfb ED |
589 | } |
590 | ||
591 | static void tcp_write_timer(unsigned long data) | |
592 | { | |
593 | struct sock *sk = (struct sock *)data; | |
594 | ||
595 | bh_lock_sock(sk); | |
596 | if (!sock_owned_by_user(sk)) { | |
597 | tcp_write_timer_handler(sk); | |
598 | } else { | |
c380d37e | 599 | /* delegate our work to tcp_release_cb() */ |
7aa5470c | 600 | if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags)) |
144d56e9 | 601 | sock_hold(sk); |
6f458dfb | 602 | } |
1da177e4 LT |
603 | bh_unlock_sock(sk); |
604 | sock_put(sk); | |
605 | } | |
606 | ||
42cb80a2 | 607 | void tcp_syn_ack_timeout(const struct request_sock *req) |
72659ecc | 608 | { |
42cb80a2 ED |
609 | struct net *net = read_pnet(&inet_rsk(req)->ireq_net); |
610 | ||
02a1d6e7 | 611 | __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS); |
72659ecc OP |
612 | } |
613 | EXPORT_SYMBOL(tcp_syn_ack_timeout); | |
614 | ||
1da177e4 LT |
615 | void tcp_set_keepalive(struct sock *sk, int val) |
616 | { | |
617 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) | |
618 | return; | |
619 | ||
620 | if (val && !sock_flag(sk, SOCK_KEEPOPEN)) | |
463c84b9 | 621 | inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); |
1da177e4 | 622 | else if (!val) |
463c84b9 | 623 | inet_csk_delete_keepalive_timer(sk); |
1da177e4 | 624 | } |
4b9d07a4 | 625 | EXPORT_SYMBOL_GPL(tcp_set_keepalive); |
1da177e4 LT |
626 | |
627 | ||
628 | static void tcp_keepalive_timer (unsigned long data) | |
629 | { | |
630 | struct sock *sk = (struct sock *) data; | |
6687e988 | 631 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 | 632 | struct tcp_sock *tp = tcp_sk(sk); |
6c37e5de | 633 | u32 elapsed; |
1da177e4 LT |
634 | |
635 | /* Only process if socket is not in use. */ | |
636 | bh_lock_sock(sk); | |
637 | if (sock_owned_by_user(sk)) { | |
e905a9ed | 638 | /* Try again later. */ |
463c84b9 | 639 | inet_csk_reset_keepalive_timer (sk, HZ/20); |
1da177e4 LT |
640 | goto out; |
641 | } | |
642 | ||
643 | if (sk->sk_state == TCP_LISTEN) { | |
fa76ce73 | 644 | pr_err("Hmm... keepalive on a LISTEN ???\n"); |
1da177e4 LT |
645 | goto out; |
646 | } | |
647 | ||
5504319c | 648 | tcp_mstamp_refresh(tp); |
1da177e4 LT |
649 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { |
650 | if (tp->linger2 >= 0) { | |
463c84b9 | 651 | const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; |
1da177e4 LT |
652 | |
653 | if (tmo > 0) { | |
654 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | |
655 | goto out; | |
656 | } | |
657 | } | |
658 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
659 | goto death; | |
660 | } | |
661 | ||
2dda6400 ED |
662 | if (!sock_flag(sk, SOCK_KEEPOPEN) || |
663 | ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) | |
1da177e4 LT |
664 | goto out; |
665 | ||
666 | elapsed = keepalive_time_when(tp); | |
667 | ||
668 | /* It is alive without keepalive 8) */ | |
fe067e8a | 669 | if (tp->packets_out || tcp_send_head(sk)) |
1da177e4 LT |
670 | goto resched; |
671 | ||
6c37e5de | 672 | elapsed = keepalive_time_elapsed(tp); |
1da177e4 LT |
673 | |
674 | if (elapsed >= keepalive_time_when(tp)) { | |
dca43c75 JC |
675 | /* If the TCP_USER_TIMEOUT option is enabled, use that |
676 | * to determine when to timeout instead. | |
677 | */ | |
678 | if ((icsk->icsk_user_timeout != 0 && | |
679 | elapsed >= icsk->icsk_user_timeout && | |
680 | icsk->icsk_probes_out > 0) || | |
681 | (icsk->icsk_user_timeout == 0 && | |
682 | icsk->icsk_probes_out >= keepalive_probes(tp))) { | |
1da177e4 LT |
683 | tcp_send_active_reset(sk, GFP_ATOMIC); |
684 | tcp_write_err(sk); | |
685 | goto out; | |
686 | } | |
e520af48 | 687 | if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) { |
6687e988 | 688 | icsk->icsk_probes_out++; |
1da177e4 LT |
689 | elapsed = keepalive_intvl_when(tp); |
690 | } else { | |
691 | /* If keepalive was lost due to local congestion, | |
692 | * try harder. | |
693 | */ | |
694 | elapsed = TCP_RESOURCE_PROBE_INTERVAL; | |
695 | } | |
696 | } else { | |
697 | /* It is tp->rcv_tstamp + keepalive_time_when(tp) */ | |
698 | elapsed = keepalive_time_when(tp) - elapsed; | |
699 | } | |
700 | ||
3ab224be | 701 | sk_mem_reclaim(sk); |
1da177e4 LT |
702 | |
703 | resched: | |
463c84b9 | 704 | inet_csk_reset_keepalive_timer (sk, elapsed); |
1da177e4 LT |
705 | goto out; |
706 | ||
e905a9ed | 707 | death: |
1da177e4 LT |
708 | tcp_done(sk); |
709 | ||
710 | out: | |
711 | bh_unlock_sock(sk); | |
712 | sock_put(sk); | |
713 | } | |
6f458dfb ED |
714 | |
715 | void tcp_init_xmit_timers(struct sock *sk) | |
716 | { | |
717 | inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, | |
718 | &tcp_keepalive_timer); | |
218af599 ED |
719 | hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC, |
720 | HRTIMER_MODE_ABS_PINNED); | |
721 | tcp_sk(sk)->pacing_timer.function = tcp_pace_kick; | |
6f458dfb | 722 | } |