]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - include/net/tcp.h
Merge tag 'mmc-v6.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[thirdparty/kernel/linux.git] / include / net / tcp.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
1da177e4
LT
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the TCP module.
8 *
9 * Version: @(#)tcp.h 1.0.5 05/23/93
10 *
02c30a84 11 * Authors: Ross Biro
1da177e4 12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
1da177e4
LT
13 */
14#ifndef _TCP_H
15#define _TCP_H
16
1da177e4
LT
17#define FASTRETRANS_DEBUG 1
18
1da177e4
LT
19#include <linux/list.h>
20#include <linux/tcp.h>
187f1882 21#include <linux/bug.h>
1da177e4
LT
22#include <linux/slab.h>
23#include <linux/cache.h>
24#include <linux/percpu.h>
fb286bb2 25#include <linux/skbuff.h>
435cf559 26#include <linux/kref.h>
740b0f18 27#include <linux/ktime.h>
05e22e83 28#include <linux/indirect_call_wrapper.h>
0114a91d 29#include <linux/bits.h>
3f421baa
ACM
30
31#include <net/inet_connection_sock.h>
295ff7ed 32#include <net/inet_timewait_sock.h>
77d8bf9c 33#include <net/inet_hashtables.h>
1da177e4 34#include <net/checksum.h>
2e6599cb 35#include <net/request_sock.h>
40a1227e 36#include <net/sock_reuseport.h>
1da177e4
LT
37#include <net/sock.h>
38#include <net/snmp.h>
39#include <net/ip.h>
c752f073 40#include <net/tcp_states.h>
c845f5f3 41#include <net/tcp_ao.h>
bdf1ee5d 42#include <net/inet_ecn.h>
0c266898 43#include <net/dst.h>
85712484 44#include <net/mptcp.h>
9b6412e6 45#include <net/xfrm.h>
c752f073 46
1da177e4 47#include <linux/seq_file.h>
180d8cd9 48#include <linux/memcontrol.h>
40304b2a 49#include <linux/bpf-cgroup.h>
438ac880 50#include <linux/siphash.h>
40304b2a 51
6e04e021 52extern struct inet_hashinfo tcp_hashinfo;
1da177e4 53
19757ceb
ED
54DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
55int tcp_orphan_count_sum(void);
56
41eecbd7
ED
57DECLARE_PER_CPU(u32, tcp_tw_isn);
58
5c9f3023 59void tcp_time_wait(struct sock *sk, int state, int timeo);
1da177e4 60
9bacd256 61#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
33ad798c 62#define MAX_TCP_OPTION_SPACE 40
3b4929f6
ED
63#define TCP_MIN_SND_MSS 48
64#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
1da177e4 65
105970f6 66/*
1da177e4 67 * Never offer a window over 32767 without using window scaling. Some
105970f6 68 * poor stacks do signed 16bit maths!
1da177e4
LT
69 */
70#define MAX_TCP_WINDOW 32767U
71
72/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
73#define TCP_MIN_MSS 88U
74
1555e6fd 75/* The initial MTU to use for probing */
dcd8fb85 76#define TCP_BASE_MSS 1024
5d424d5a 77
05cbc0db
FD
78/* probing interval, default to 10 minutes as per RFC4821 */
79#define TCP_PROBE_INTERVAL 600
80
6b58e0a5
FD
81/* Specify interval when tcp mtu probing will stop */
82#define TCP_PROBE_THRESHOLD 8
83
1da177e4
LT
84/* After receiving this amount of duplicate ACKs fast retransmit starts. */
85#define TCP_FASTRETRANS_THRESH 3
86
1da177e4
LT
87/* Maximal number of ACKs sent quickly to accelerate slow-start. */
88#define TCP_MAX_QUICKACKS 16U
89
589c49cb
GF
90/* Maximal number of window scale according to RFC1323 */
91#define TCP_MAX_WSCALE 14U
92
1da177e4
LT
93/* urg_data states */
94#define TCP_URG_VALID 0x0100
95#define TCP_URG_NOTYET 0x0200
96#define TCP_URG_READ 0x0400
97
98#define TCP_RETR1 3 /*
99 * This is how many retries it does before it
100 * tries to figure out if the gateway is
101 * down. Minimal RFC value is 3; it corresponds
102 * to ~3sec-8min depending on RTO.
103 */
104
105#define TCP_RETR2 15 /*
106 * This should take at least
107 * 90 minutes to time out.
108 * RFC1122 says that the limit is 100 sec.
109 * 15 is ~13-30min depending on RTO.
110 */
111
6c9ff979
AB
112#define TCP_SYN_RETRIES 6 /* This is how many retries are done
113 * when active opening a connection.
114 * RFC1122 says the minimum retry MUST
115 * be at least 180secs. Nevertheless
116 * this value is corresponding to
117 * 63secs of retransmission with the
118 * current initial RTO.
119 */
1da177e4 120
6c9ff979
AB
121#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
122 * when passive opening a connection.
123 * This is corresponding to 31secs of
124 * retransmission with the current
125 * initial RTO.
126 */
1da177e4 127
1da177e4
LT
128#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
129 * state, about 60 seconds */
130#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
131 /* BSD style FIN_WAIT2 deadlock breaker.
132 * It used to be 3min, new value is 60sec,
133 * to combine FIN-WAIT-2 timeout with
134 * TIME-WAIT timer.
135 */
f0628c52 136#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
1da177e4
LT
137
138#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
95b9a87c
DM
139static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
140
1da177e4
LT
141#if HZ >= 100
142#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
143#define TCP_ATO_MIN ((unsigned)(HZ/25))
144#else
145#define TCP_DELACK_MIN 4U
146#define TCP_ATO_MIN 4U
147#endif
54a378f4
ED
148#define TCP_RTO_MAX_SEC 120
149#define TCP_RTO_MAX ((unsigned)(TCP_RTO_MAX_SEC * HZ))
150#define TCP_RTO_MIN ((unsigned)(HZ / 5))
bb4d991a 151#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
1c2709cf
NC
152
153#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
154
fd4f2cea 155#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
9ad7c049
JC
156#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
157 * used as a fallback RTO for the
158 * initial data transmission if no
159 * valid RTT sample has been acquired,
160 * most likely due to retrans in 3WHS.
161 */
1da177e4
LT
162
163#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
164 * for local resources.
165 */
1da177e4
LT
166#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
167#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
168#define TCP_KEEPALIVE_INTVL (75*HZ)
169
170#define MAX_TCP_KEEPIDLE 32767
171#define MAX_TCP_KEEPINTVL 32767
172#define MAX_TCP_KEEPCNT 127
173#define MAX_TCP_SYNCNT 127
174
af772144
ED
175/* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
176 * to avoid overflows. This assumes a clock smaller than 1 Mhz.
177 * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
178 */
179#define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
180
1da177e4
LT
181#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
182 * after this time. It should be equal
183 * (or greater than) TCP_TIMEWAIT_LEN
184 * to provide reliability equal to one
185 * provided by timewait state.
186 */
187#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
188 * timestamps. It must be less than
189 * minimal timewait lifetime.
190 */
1da177e4
LT
191/*
192 * TCP option
193 */
105970f6 194
1da177e4
LT
195#define TCPOPT_NOP 1 /* Padding */
196#define TCPOPT_EOL 0 /* End of options */
197#define TCPOPT_MSS 2 /* Segment size negotiating */
198#define TCPOPT_WINDOW 3 /* Window scaling */
199#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
200#define TCPOPT_SACK 5 /* SACK Block */
201#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
cfb6eeb4 202#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
1e03d32b 203#define TCPOPT_AO 29 /* Authentication Option (RFC5925) */
c74a39c8 204#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
7f9b838b 205#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
2100c8d2
YC
206#define TCPOPT_EXP 254 /* Experimental */
207/* Magic number to be after the option value for sharing TCP
208 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
209 */
210#define TCPOPT_FASTOPEN_MAGIC 0xF989
60e2a778 211#define TCPOPT_SMC_MAGIC 0xE2D4C3D9
1da177e4
LT
212
213/*
214 * TCP option lengths
215 */
216
217#define TCPOLEN_MSS 4
218#define TCPOLEN_WINDOW 3
219#define TCPOLEN_SACK_PERM 2
220#define TCPOLEN_TIMESTAMP 10
cfb6eeb4 221#define TCPOLEN_MD5SIG 18
7f9b838b 222#define TCPOLEN_FASTOPEN_BASE 2
2100c8d2 223#define TCPOLEN_EXP_FASTOPEN_BASE 4
60e2a778 224#define TCPOLEN_EXP_SMC_BASE 6
1da177e4
LT
225
226/* But this is what stacks really send out. */
227#define TCPOLEN_TSTAMP_ALIGNED 12
228#define TCPOLEN_WSCALE_ALIGNED 4
229#define TCPOLEN_SACKPERM_ALIGNED 4
230#define TCPOLEN_SACK_BASE 2
231#define TCPOLEN_SACK_BASE_ALIGNED 4
232#define TCPOLEN_SACK_PERBLOCK 8
cfb6eeb4 233#define TCPOLEN_MD5SIG_ALIGNED 20
33ad798c 234#define TCPOLEN_MSS_ALIGNED 4
60e2a778 235#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
1da177e4 236
1da177e4
LT
237/* Flags in tp->nonagle */
238#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
239#define TCP_NAGLE_CORK 2 /* Socket is corked */
caa20d9a 240#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
1da177e4 241
36e31b0a
AP
242/* TCP thin-stream limits */
243#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
244
21603fc4 245/* TCP initial congestion window as per rfc6928 */
442b9635
DM
246#define TCP_INIT_CWND 10
247
cf60af03
YC
248/* Bit Flags for sysctl_tcp_fastopen */
249#define TFO_CLIENT_ENABLE 1
10467163 250#define TFO_SERVER_ENABLE 2
67da22d2 251#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
cf60af03 252
10467163
JC
253/* Accept SYN data w/o any cookie option */
254#define TFO_SERVER_COOKIE_NOT_REQD 0x200
255
256/* Force enable TFO on all listeners, i.e., not requiring the
cebc5cba 257 * TCP_FASTOPEN socket option.
10467163
JC
258 */
259#define TFO_SERVER_WO_SOCKOPT1 0x400
10467163 260
295ff7ed 261
1da177e4 262/* sysctl variables for tcp */
1da177e4 263extern int sysctl_tcp_max_orphans;
a4fe34bf 264extern long sysctl_tcp_mem[3];
e20223f1 265
a0370b3f 266#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
1f255691 267#define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
20b654df 268#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
a0370b3f 269
8d987e5c 270extern atomic_long_t tcp_memory_allocated;
0defbb0a
ED
271DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
272
1748376b 273extern struct percpu_counter tcp_sockets_allocated;
06044751 274extern unsigned long tcp_memory_pressure;
1da177e4 275
b8da51eb
ED
276/* optimized version of sk_under_memory_pressure() for TCP sockets */
277static inline bool tcp_under_memory_pressure(const struct sock *sk)
278{
baac50bb
JW
279 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
280 mem_cgroup_under_socket_pressure(sk->sk_memcg))
e805605c 281 return true;
b8da51eb 282
1f142c17 283 return READ_ONCE(tcp_memory_pressure);
b8da51eb 284}
1da177e4
LT
285/*
286 * The next routines deal with comparing 32 bit unsigned ints
287 * and worry about wraparound (automatic with unsigned arithmetic).
288 */
289
a2a385d6 290static inline bool before(__u32 seq1, __u32 seq2)
1da177e4 291{
0d630cc0 292 return (__s32)(seq1-seq2) < 0;
1da177e4 293}
9a036b9c 294#define after(seq2, seq1) before(seq1, seq2)
1da177e4
LT
295
296/* is s2<=s1<=s3 ? */
a2a385d6 297static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
1da177e4
LT
298{
299 return seq3 - seq2 >= seq1 - seq2;
300}
301
03271f3a
TA
302static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
303{
304 sk_wmem_queued_add(sk, -skb->truesize);
9b65b17d
TA
305 if (!skb_zcopy_pure(skb))
306 sk_mem_uncharge(sk, skb->truesize);
307 else
308 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
03271f3a
TA
309 __kfree_skb(skb);
310}
311
a6c5ea4c
ED
312void sk_forced_mem_schedule(struct sock *sk, int size);
313
dda4d96a 314bool tcp_check_oom(const struct sock *sk, int shift);
efcdbf24 315
a0f82f64 316
1da177e4
LT
317extern struct proto tcp_prot;
318
57ef42d5 319#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
13415e46 320#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
57ef42d5 321#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
aa2ea058 322#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
1da177e4 323
5c9f3023
JP
324void tcp_tasklet_init(void);
325
32bbd879 326int tcp_v4_err(struct sk_buff *skb, u32);
5c9f3023
JP
327
328void tcp_shutdown(struct sock *sk, int how);
329
7487449c 330int tcp_v4_early_demux(struct sk_buff *skb);
5c9f3023
JP
331int tcp_v4_rcv(struct sk_buff *skb);
332
27728ba8 333void tcp_remove_empty_skb(struct sock *sk);
1b784140 334int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
306b13eb 335int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
3242abeb
BH
336int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
337 size_t size, struct ubuf_info *uarg);
1d7e4538 338void tcp_splice_eof(struct socket *sock);
35b2c321 339int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
fbf93406 340int tcp_wmem_schedule(struct sock *sk, int copy);
35b2c321
MM
341void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
342 int size_goal);
5c9f3023
JP
343void tcp_release_cb(struct sock *sk);
344void tcp_wfree(struct sk_buff *skb);
345void tcp_write_timer_handler(struct sock *sk);
346void tcp_delack_timer_handler(struct sock *sk);
e1d001fa 347int tcp_ioctl(struct sock *sk, int cmd, int *karg);
7d6ed9af 348enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
3d97d88e 349void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
5c9f3023 350void tcp_rcv_space_adjust(struct sock *sk);
5c9f3023
JP
351int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
352void tcp_twsk_destructor(struct sock *sk);
1eeb5043 353void tcp_twsk_purge(struct list_head *net_exit_list);
5c9f3023
JP
354ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
355 struct pipe_inode_info *pipe, size_t len,
356 unsigned int flags);
5882efff 357struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
f8dd3b8d 358 bool force_schedule);
9c55e01c 359
059217c1 360static inline void tcp_dec_quickack_mode(struct sock *sk)
1da177e4 361{
463c84b9 362 struct inet_connection_sock *icsk = inet_csk(sk);
fc6415bc 363
463c84b9 364 if (icsk->icsk_ack.quick) {
059217c1
NC
365 /* How many ACKs S/ACKing new data have we sent? */
366 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
367
463c84b9
ACM
368 if (pkts >= icsk->icsk_ack.quick) {
369 icsk->icsk_ack.quick = 0;
fc6415bc 370 /* Leaving quickack mode we deflate ATO. */
463c84b9 371 icsk->icsk_ack.ato = TCP_ATO_MIN;
fc6415bc 372 } else
463c84b9 373 icsk->icsk_ack.quick -= pkts;
1da177e4
LT
374 }
375}
376
041fb11d
IJ
377#define TCP_ECN_MODE_RFC3168 BIT(0)
378#define TCP_ECN_QUEUE_CWR BIT(1)
379#define TCP_ECN_DEMAND_CWR BIT(2)
380#define TCP_ECN_SEEN BIT(3)
381#define TCP_ECN_MODE_ACCECN BIT(4)
382
383#define TCP_ECN_DISABLED 0
384#define TCP_ECN_MODE_PENDING (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
385#define TCP_ECN_MODE_ANY (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
386
387static inline bool tcp_ecn_mode_any(const struct tcp_sock *tp)
388{
389 return tp->ecn_flags & TCP_ECN_MODE_ANY;
390}
391
392static inline bool tcp_ecn_mode_rfc3168(const struct tcp_sock *tp)
393{
394 return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_RFC3168;
395}
396
397static inline bool tcp_ecn_mode_accecn(const struct tcp_sock *tp)
398{
399 return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_ACCECN;
400}
401
402static inline bool tcp_ecn_disabled(const struct tcp_sock *tp)
403{
404 return !tcp_ecn_mode_any(tp);
405}
406
407static inline bool tcp_ecn_mode_pending(const struct tcp_sock *tp)
408{
409 return (tp->ecn_flags & TCP_ECN_MODE_PENDING) == TCP_ECN_MODE_PENDING;
410}
411
412static inline void tcp_ecn_mode_set(struct tcp_sock *tp, u8 mode)
413{
414 tp->ecn_flags &= ~TCP_ECN_MODE_ANY;
415 tp->ecn_flags |= mode;
416}
bdf1ee5d 417
fd2c3ef7 418enum tcp_tw_status {
1da177e4
LT
419 TCP_TW_SUCCESS = 0,
420 TCP_TW_RST = 1,
421 TCP_TW_ACK = 2,
4618e195
IJ
422 TCP_TW_SYN = 3,
423 TCP_TW_ACK_OOW = 4
1da177e4
LT
424};
425
426
5c9f3023
JP
427enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
428 struct sk_buff *skb,
41eecbd7 429 const struct tcphdr *th,
04271411
JC
430 u32 *tw_isn,
431 enum skb_drop_reason *drop_reason);
5c9f3023 432struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
e0f9759f 433 struct request_sock *req, bool fastopen,
e34100c2 434 bool *lost_race, enum skb_drop_reason *drop_reason);
b9825695
JX
435enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
436 struct sk_buff *skb);
5ae344c9 437void tcp_enter_loss(struct sock *sk);
7e901ee7 438void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
5c9f3023
JP
439void tcp_clear_retrans(struct tcp_sock *tp);
440void tcp_update_metrics(struct sock *sk);
441void tcp_init_metrics(struct sock *sk);
442void tcp_metrics_init(void);
d82bae12 443bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
77c3c956 444void __tcp_close(struct sock *sk, long timeout);
5c9f3023
JP
445void tcp_close(struct sock *sk, long timeout);
446void tcp_init_sock(struct sock *sk);
72be0fe6 447void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
a11e1d43
LT
448__poll_t tcp_poll(struct file *file, struct socket *sock,
449 struct poll_table_struct *wait);
273b7f0f
MKL
450int do_tcp_getsockopt(struct sock *sk, int level,
451 int optname, sockptr_t optval, sockptr_t optlen);
5c9f3023
JP
452int tcp_getsockopt(struct sock *sk, int level, int optname,
453 char __user *optval, int __user *optlen);
9cacf81f 454bool tcp_bpf_bypass_getsockopt(int level, int optname);
0c751f70
MKL
455int do_tcp_setsockopt(struct sock *sk, int level, int optname,
456 sockptr_t optval, unsigned int optlen);
a7b75c5a
CH
457int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
458 unsigned int optlen);
be258f65 459void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
5c9f3023 460void tcp_set_keepalive(struct sock *sk, int val);
42cb80a2 461void tcp_syn_ack_timeout(const struct request_sock *req);
ec095263 462int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
1b784140 463 int flags, int *addr_len);
d1361840 464int tcp_set_rcvlowat(struct sock *sk, int val);
cb811109 465int tcp_set_window_clamp(struct sock *sk, int val);
892bfd3d
FW
466void tcp_update_recv_tstamps(struct sk_buff *skb,
467 struct scm_timestamping_internal *tss);
468void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
469 struct scm_timestamping_internal *tss);
03f45c88 470void tcp_data_ready(struct sock *sk);
340a6f3d 471#ifdef CONFIG_MMU
93ab6cc6
ED
472int tcp_mmap(struct file *file, struct socket *sock,
473 struct vm_area_struct *vma);
340a6f3d 474#endif
eed29f17 475void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
5c9f3023
JP
476 struct tcp_options_received *opt_rx,
477 int estab, struct tcp_fastopen_cookie *foc);
7d5d5525 478
9349d600
PP
479/*
480 * BPF SKB-less helpers
481 */
482u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
483 struct tcphdr *th, u32 *cookie);
484u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
485 struct tcphdr *th, u32 *cookie);
33bf9885 486u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
9349d600
PP
487u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
488 const struct tcp_request_sock_ops *af_ops,
489 struct sock *sk, struct tcphdr *th);
1da177e4
LT
490/*
491 * TCP v4 functions exported for the inet6 API
492 */
493
5c9f3023 494void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
4fab9071 495void tcp_v4_mtu_reduced(struct sock *sk);
9cf74903 496void tcp_req_err(struct sock *sk, u32 seq, bool abort);
d2924569 497void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
5c9f3023 498int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
c28c6f04 499struct sock *tcp_create_openreq_child(const struct sock *sk,
5c9f3023
JP
500 struct request_sock *req,
501 struct sk_buff *skb);
81164413 502void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
0c27171e 503struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
5c9f3023 504 struct request_sock *req,
5e0724d0
ED
505 struct dst_entry *dst,
506 struct request_sock *req_unhash,
507 bool *own_req);
5c9f3023
JP
508int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
509int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
510int tcp_connect(struct sock *sk);
b3d05147
ED
511enum tcp_synack_type {
512 TCP_SYNACK_NORMAL,
513 TCP_SYNACK_FASTOPEN,
514 TCP_SYNACK_COOKIE,
515};
5d062de7 516struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
5c9f3023 517 struct request_sock *req,
ca6fb065 518 struct tcp_fastopen_cookie *foc,
331fca43
MKL
519 enum tcp_synack_type synack_type,
520 struct sk_buff *syn_skb);
5c9f3023 521int tcp_disconnect(struct sock *sk, int flags);
1da177e4 522
370816ae 523void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
292e8d8c 524int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
63d02d15 525void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
1da177e4 526
1da177e4 527/* From syncookies.c */
b80c0e78
ED
528struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
529 struct request_sock *req,
efce3d1f 530 struct dst_entry *dst);
7577bc82 531int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
461b74c3 532struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
6fc8c827 533struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
8e7bab6b
KI
534 struct sock *sk, struct sk_buff *skb,
535 struct tcp_options_received *tcp_opt,
536 int mss, u32 tsoff);
537
b3f086a7
KI
538#if IS_ENABLED(CONFIG_BPF)
539struct bpf_tcp_req_attrs {
540 u32 rcv_tsval;
541 u32 rcv_tsecr;
542 u16 mss;
543 u8 rcv_wscale;
544 u8 snd_wscale;
545 u8 ecn_ok;
546 u8 wscale_ok;
547 u8 sack_ok;
548 u8 tstamp_ok;
549 u8 usec_ts_ok;
550 u8 reserved[3];
551};
552#endif
553
e05c82d3 554#ifdef CONFIG_SYN_COOKIES
8c27bd75 555
63262315 556/* Syncookies use a monotonic timer which increments every 60 seconds.
8c27bd75
FW
557 * This counter is used both as a hash input and partially encoded into
558 * the cookie value. A cookie is only validated further if the delta
559 * between the current counter value and the encoded one is less than this,
63262315 560 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
8c27bd75
FW
561 * the counter advances immediately after a cookie is generated).
562 */
264ea103
ED
563#define MAX_SYNCOOKIE_AGE 2
564#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
565#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
566
567/* syncookies: remember time of last synqueue overflow
568 * But do not dirty this field too often (once per second is enough)
3f684b4b 569 * It is racy as we do not hold a lock, but race is very minor.
264ea103 570 */
3f684b4b 571static inline void tcp_synq_overflow(const struct sock *sk)
264ea103 572{
40a1227e 573 unsigned int last_overflow;
cca9bab1 574 unsigned int now = jiffies;
264ea103 575
40a1227e
MKL
576 if (sk->sk_reuseport) {
577 struct sock_reuseport *reuse;
578
579 reuse = rcu_dereference(sk->sk_reuseport_cb);
580 if (likely(reuse)) {
581 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
04d26e7b
GN
582 if (!time_between32(now, last_overflow,
583 last_overflow + HZ))
40a1227e
MKL
584 WRITE_ONCE(reuse->synq_overflow_ts, now);
585 return;
586 }
587 }
588
721c8daf 589 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
04d26e7b 590 if (!time_between32(now, last_overflow, last_overflow + HZ))
e9d9da91 591 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
264ea103
ED
592}
593
594/* syncookies: no recent synqueue overflow on this listening socket? */
595static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
596{
40a1227e 597 unsigned int last_overflow;
cca9bab1 598 unsigned int now = jiffies;
264ea103 599
40a1227e
MKL
600 if (sk->sk_reuseport) {
601 struct sock_reuseport *reuse;
602
603 reuse = rcu_dereference(sk->sk_reuseport_cb);
604 if (likely(reuse)) {
605 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
cb44a08f
GN
606 return !time_between32(now, last_overflow - HZ,
607 last_overflow +
608 TCP_SYNCOOKIE_VALID);
40a1227e
MKL
609 }
610 }
611
721c8daf 612 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
cb44a08f
GN
613
614 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
615 * then we're under synflood. However, we have to use
616 * 'last_overflow - HZ' as lower bound. That's because a concurrent
617 * tcp_synq_overflow() could update .ts_recent_stamp after we read
618 * jiffies but before we store .ts_recent_stamp into last_overflow,
619 * which could lead to rejecting a valid syncookie.
620 */
621 return !time_between32(now, last_overflow - HZ,
622 last_overflow + TCP_SYNCOOKIE_VALID);
264ea103 623}
8c27bd75
FW
624
625static inline u32 tcp_cookie_time(void)
626{
63262315
ED
627 u64 val = get_jiffies_64();
628
264ea103 629 do_div(val, TCP_SYNCOOKIE_PERIOD);
63262315 630 return val;
8c27bd75
FW
631}
632
b18afb6f
KI
633/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
634static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
635{
636 if (usec_ts)
637 return div_u64(val, NSEC_PER_USEC);
638
639 return div_u64(val, NSEC_PER_MSEC);
640}
641
5c9f3023
JP
642u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
643 u16 *mssp);
3f684b4b 644__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
200ecef6 645u64 cookie_init_timestamp(struct request_sock *req, u64 now);
f9301034
ED
646bool cookie_timestamp_decode(const struct net *net,
647 struct tcp_options_received *opt);
8e7bab6b
KI
648
649static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
650{
651 return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
652 dst_feature(dst, RTAX_FEATURE_ECN);
653}
4dfc2817 654
695751e3
KI
655#if IS_ENABLED(CONFIG_BPF)
656static inline bool cookie_bpf_ok(struct sk_buff *skb)
657{
658 return skb->sk;
659}
660
661struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
662#else
663static inline bool cookie_bpf_ok(struct sk_buff *skb)
664{
665 return false;
666}
667
668static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
669 struct sk_buff *skb)
670{
671 return NULL;
672}
673#endif
674
c6aefafb 675/* From net/ipv6/syncookies.c */
7577bc82 676int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
5c9f3023 677struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
f1673381 678
5c9f3023
JP
679u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
680 const struct tcphdr *th, u16 *mssp);
3f684b4b 681__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
e05c82d3 682#endif
1da177e4
LT
683/* tcp_output.c */
684
04d8825c
PA
685void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
686void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
5c9f3023
JP
687void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
688 int nonagle);
10d3be56
ED
689int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
690int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
5c9f3023
JP
691void tcp_retransmit_timer(struct sock *sk);
692void tcp_xmit_retransmit_queue(struct sock *);
693void tcp_simple_retransmit(struct sock *);
57dde7f7 694void tcp_enter_recovery(struct sock *sk, bool ece_ack);
5c9f3023 695int tcp_trim_head(struct sock *, struct sk_buff *, u32);
75c119af
ED
696enum tcp_queue {
697 TCP_FRAG_IN_WRITE_QUEUE,
698 TCP_FRAG_IN_RTX_QUEUE,
699};
700int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
701 struct sk_buff *skb, u32 len,
702 unsigned int mss_now, gfp_t gfp);
5c9f3023
JP
703
704void tcp_send_probe0(struct sock *);
e520af48 705int tcp_write_wakeup(struct sock *, int mib);
5c9f3023 706void tcp_send_fin(struct sock *sk);
5691276b
JX
707void tcp_send_active_reset(struct sock *sk, gfp_t priority,
708 enum sk_rst_reason reason);
5c9f3023 709int tcp_send_synack(struct sock *);
5c9f3023 710void tcp_push_one(struct sock *, unsigned int mss_now);
9866884c 711void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags);
5c9f3023
JP
712void tcp_send_ack(struct sock *sk);
713void tcp_send_delayed_ack(struct sock *sk);
714void tcp_send_loss_probe(struct sock *sk);
ed66dfaf 715bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
cfea5a68
MKL
716void tcp_skb_collapse_tstamp(struct sk_buff *skb,
717 const struct sk_buff *next_skb);
1da177e4 718
a762a980 719/* tcp_input.c */
5c9f3023 720void tcp_rearm_rto(struct sock *sk);
0f1c28ae 721void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
5e514f1c 722void tcp_done_with_error(struct sock *sk, int err);
049fe386 723void tcp_reset(struct sock *sk, struct sk_buff *skb);
e3e17b77 724void tcp_fin(struct sock *sk);
4bfe744f 725void tcp_check_space(struct sock *sk);
30c6f0bf 726void tcp_sack_compress_send_ack(struct sock *sk);
a762a980 727
9b6412e6
SD
728static inline void tcp_cleanup_skb(struct sk_buff *skb)
729{
730 skb_dst_drop(skb);
731 secpath_reset(skb);
732}
733
734static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
735{
736 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
737 DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
738 __skb_queue_tail(&sk->sk_receive_queue, skb);
739}
740
1da177e4 741/* tcp_timer.c */
5c9f3023 742void tcp_init_xmit_timers(struct sock *);
463c84b9
ACM
743static inline void tcp_clear_xmit_timers(struct sock *sk)
744{
73a6bab5 745 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
cf0dd203 746 __sock_put(sk);
73a6bab5 747
5d9f4262
ED
748 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
749 __sock_put(sk);
750
463c84b9
ACM
751 inet_csk_clear_xmit_timers(sk);
752}
1da177e4 753
5c9f3023
JP
754unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
755unsigned int tcp_current_mss(struct sock *sk);
344db93a 756u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
0c54b85f
IJ
757
758/* Bound MSS / TSO packet size with the half of the window */
759static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
760{
01f83d69
AK
761 int cutoff;
762
763 /* When peer uses tiny windows, there is no use in packetizing
764 * to sub-MSS pieces for the sake of SWS or making sure there
765 * are enough packets in the pipe for fast recovery.
766 *
767 * On the other hand, for extremely large MSS devices, handling
768 * smaller than MSS windows in this way does make sense.
769 */
2631b79f 770 if (tp->max_window > TCP_MSS_DEFAULT)
01f83d69
AK
771 cutoff = (tp->max_window >> 1);
772 else
773 cutoff = tp->max_window;
774
775 if (cutoff && pktsize > cutoff)
776 return max_t(int, cutoff, 68U - tp->tcp_header_len);
0c54b85f
IJ
777 else
778 return pktsize;
779}
1da177e4 780
17b085ea 781/* tcp.c */
0df48c26 782void tcp_get_info(struct sock *, struct tcp_info *);
1da177e4
LT
783
784/* Read 'sendfile()'-style from a TCP socket */
5c9f3023
JP
785int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
786 sk_read_actor_t recv_actor);
36b62df5
JC
787int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
788 sk_read_actor_t recv_actor, bool noack,
789 u32 *copied_seq);
965b57b4 790int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
3f92a64e
JK
791struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
792void tcp_read_done(struct sock *sk, size_t len);
1da177e4 793
5c9f3023 794void tcp_initialize_rcv_mss(struct sock *sk);
1da177e4 795
5c9f3023
JP
796int tcp_mtu_to_mss(struct sock *sk, int pmtu);
797int tcp_mss_to_mtu(struct sock *sk, int mss);
798void tcp_mtup_init(struct sock *sk);
5d424d5a 799
54a378f4
ED
800static inline unsigned int tcp_rto_max(const struct sock *sk)
801{
802 return READ_ONCE(inet_csk(sk)->icsk_rto_max);
803}
804
58169ec9 805static inline void tcp_bound_rto(struct sock *sk)
f1ecd5d9 806{
54a378f4 807 inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk));
f1ecd5d9
DL
808}
809
810static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
811{
740b0f18 812 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
f1ecd5d9
DL
813}
814
31770e34
FW
815static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
816{
71158bb1
PA
817 /* mptcp hooks are only on the slow path */
818 if (sk_is_mptcp((struct sock *)tp))
819 return;
820
31770e34
FW
821 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
822 ntohl(TCP_FLAG_ACK) |
823 snd_wnd);
824}
825
826static inline void tcp_fast_path_on(struct tcp_sock *tp)
827{
828 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
829}
830
831static inline void tcp_fast_path_check(struct sock *sk)
832{
833 struct tcp_sock *tp = tcp_sk(sk);
834
835 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
836 tp->rcv_wnd &&
837 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
838 !tp->urg_data)
839 tcp_fast_path_on(tp);
840}
841
bbf80d71
ED
842u32 tcp_delack_max(const struct sock *sk);
843
0c266898 844/* Compute the actual rto_min value */
f68a181f 845static inline u32 tcp_rto_min(const struct sock *sk)
0c266898 846{
cf533ea5 847 const struct dst_entry *dst = __sk_dst_get(sk);
f38805c5 848 u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min);
0c266898
SS
849
850 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
851 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
852 return rto_min;
853}
854
f68a181f 855static inline u32 tcp_rto_min_us(const struct sock *sk)
740b0f18
ED
856{
857 return jiffies_to_usecs(tcp_rto_min(sk));
858}
859
81164413
DB
860static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
861{
862 return dst_metric_locked(dst, RTAX_CC_ALGO);
863}
864
f6722583
YC
865/* Minimum RTT in usec. ~0 means not available. */
866static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
867{
64033892 868 return minmax_get(&tp->rtt_min);
f6722583
YC
869}
870
1da177e4
LT
871/* Compute the actual receive window we are currently advertising.
872 * Rcv_nxt can be after the window if our peer push more data
873 * than the offered window.
874 */
40efc6fa 875static inline u32 tcp_receive_window(const struct tcp_sock *tp)
1da177e4
LT
876{
877 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
878
879 if (win < 0)
880 win = 0;
881 return (u32) win;
882}
883
884/* Choose a new window, without checks for shrinking, and without
885 * scaling applied to the result. The caller does these things
886 * if necessary. This is a "raw" window selection.
887 */
5c9f3023 888u32 __tcp_select_window(struct sock *sk);
1da177e4 889
ee995283
PE
890void tcp_send_window_probe(struct sock *sk);
891
ec66eda8
ED
892/* TCP uses 32bit jiffies to save some space.
893 * Note that this is different from tcp_time_stamp, which
894 * historically has been the same until linux-4.13.
895 */
896#define tcp_jiffies32 ((u32)jiffies)
897
9a568de4
ED
898/*
899 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
900 * It is no longer tied to jiffies, but to 1 ms clock.
901 * Note: double check if you want to use tcp_jiffies32 instead of this.
902 */
903#define TCP_TS_HZ 1000
904
905static inline u64 tcp_clock_ns(void)
906{
fb420d5d 907 return ktime_get_ns();
9a568de4
ED
908}
909
910static inline u64 tcp_clock_us(void)
911{
912 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
913}
914
2a7c8d29
ED
915static inline u64 tcp_clock_ms(void)
916{
917 return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
918}
919
16cf6477
ED
920/* TCP Timestamp included in TS option (RFC 1323) can either use ms
921 * or usec resolution. Each socket carries a flag to select one or other
922 * resolution, as the route attribute could change anytime.
923 * Each flow must stick to initial resolution.
924 */
925static inline u32 tcp_clock_ts(bool usec_ts)
926{
927 return usec_ts ? tcp_clock_us() : tcp_clock_ms();
928}
929
9d0c00f5 930static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
9a568de4 931{
9d0c00f5 932 return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
9a568de4
ED
933}
934
9d0c00f5 935static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
99d67955 936{
614e8316
ED
937 if (tp->tcp_usec_ts)
938 return tp->tcp_mstamp;
9d0c00f5 939 return tcp_time_stamp_ms(tp);
99d67955
ED
940}
941
9799ccb0 942void tcp_mstamp_refresh(struct tcp_sock *tp);
9a568de4
ED
943
944static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
945{
946 return max_t(s64, t1 - t0, 0);
947}
1da177e4 948
2fd66ffb
ED
949/* provide the departure time in us unit */
950static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
951{
d3edd06e 952 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
2fd66ffb
ED
953}
954
d1a02ed6
ED
955/* Provide skb TSval in usec or ms unit */
956static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
957{
958 if (usec_ts)
959 return tcp_skb_timestamp_us(skb);
960
961 return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
962}
963
16cf6477
ED
964static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
965{
614e8316 966 return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
16cf6477
ED
967}
968
969static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
970{
614e8316 971 return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
16cf6477 972}
7faee5c0 973
a3433f35
CG
974#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
975
0114a91d
CYC
976#define TCPHDR_FIN BIT(0)
977#define TCPHDR_SYN BIT(1)
978#define TCPHDR_RST BIT(2)
979#define TCPHDR_PSH BIT(3)
980#define TCPHDR_ACK BIT(4)
981#define TCPHDR_URG BIT(5)
982#define TCPHDR_ECE BIT(6)
983#define TCPHDR_CWR BIT(7)
2c2f08d3
IJ
984#define TCPHDR_AE BIT(8)
985#define TCPHDR_FLAGS_MASK (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
986 TCPHDR_PSH | TCPHDR_ACK | TCPHDR_URG | \
987 TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
988#define tcp_flags_ntohs(th) (ntohs(*(__be16 *)&tcp_flag_word(th)) & \
989 TCPHDR_FLAGS_MASK)
990
991#define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
49213555
DB
992#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
993
14b5fb21
PL
994/* State flags for sacked in struct tcp_skb_cb */
995enum tcp_skb_cb_sacked_flags {
996 TCPCB_SACKED_ACKED = (1 << 0), /* SKB ACK'd by a SACK block */
997 TCPCB_SACKED_RETRANS = (1 << 1), /* SKB retransmitted */
998 TCPCB_LOST = (1 << 2), /* SKB is lost */
999 TCPCB_TAGBITS = (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS |
1000 TCPCB_LOST), /* All tag bits */
1001 TCPCB_REPAIRED = (1 << 4), /* SKB repaired (no skb_mstamp_ns) */
1002 TCPCB_EVER_RETRANS = (1 << 7), /* Ever retransmitted frame */
1003 TCPCB_RETRANS = (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS |
1004 TCPCB_REPAIRED),
1005};
1006
caa20d9a 1007/* This is what the send packet queuing engine uses to pass
f86586fa
ED
1008 * TCP per-packet control information to the transmission code.
1009 * We also store the host-order sequence numbers in here too.
1010 * This is 44 bytes if IPV6 is enabled.
1011 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
1da177e4
LT
1012 */
1013struct tcp_skb_cb {
1da177e4
LT
1014 __u32 seq; /* Starting sequence number */
1015 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
cd7d8498 1016 union {
41eecbd7 1017 /* Note :
f69ad292
ED
1018 * tcp_gso_segs/size are used in write queue only,
1019 * cf tcp_skb_pcount()/tcp_skb_mss()
cd7d8498 1020 */
f69ad292
ED
1021 struct {
1022 u16 tcp_gso_segs;
1023 u16 tcp_gso_size;
1024 };
cd7d8498 1025 };
2c2f08d3 1026 __u16 tcp_flags; /* TCP header flags (tcp[12-13])*/
f4f9f6e7 1027
713bafea 1028 __u8 sacked; /* State flags for SACK. */
f4f9f6e7 1029 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
b3b81e6b
JX
1030#define TSTAMP_ACK_SK 0x1
1031#define TSTAMP_ACK_BPF 0x2
1032 __u8 txstamp_ack:2, /* Record TX timestamp for ack? */
c134ecb8 1033 eor:1, /* Is skb MSG_EOR marked? */
98aaa913 1034 has_rxtstamp:1, /* SKB has a RX timestamp */
b3b81e6b 1035 unused:4;
1da177e4 1036 __u32 ack_seq; /* Sequence number ACK'd */
971f10ec 1037 union {
b75803d5 1038 struct {
40bc6063 1039#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
b9f64820 1040 /* There is space for up to 24 bytes */
40bc6063
YC
1041 __u32 is_app_limited:1, /* cwnd not fully used? */
1042 delivered_ce:20,
1043 unused:11;
b9f64820
YC
1044 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
1045 __u32 delivered;
1046 /* start of send pipeline phase */
9a568de4 1047 u64 first_tx_mstamp;
b9f64820 1048 /* when we reached the "delivered" count */
9a568de4 1049 u64 delivered_mstamp;
b75803d5
LB
1050 } tx; /* only used for outgoing skbs */
1051 union {
1052 struct inet_skb_parm h4;
971f10ec 1053#if IS_ENABLED(CONFIG_IPV6)
b75803d5 1054 struct inet6_skb_parm h6;
971f10ec 1055#endif
b75803d5
LB
1056 } header; /* For incoming skbs */
1057 };
1da177e4
LT
1058};
1059
1060#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
1061
9b9e2f25
ED
1062extern const struct inet_connection_sock_af_ops ipv4_specific;
1063
815afe17 1064#if IS_ENABLED(CONFIG_IPV6)
870c3151
ED
1065/* This is the variant of inet6_iif() that must be used by TCP,
1066 * as TCP moves IP6CB into a different location in skb->cb[]
1067 */
1068static inline int tcp_v6_iif(const struct sk_buff *skb)
24b711ed
DA
1069{
1070 return TCP_SKB_CB(skb)->header.h6.iif;
1071}
1072
1073static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
870c3151 1074{
a04a480d 1075 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
74b20582
DA
1076
1077 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
870c3151 1078}
4297a0ef
DA
1079
1080/* TCP_SKB_CB reference means this can not be used from early demux */
1081static inline int tcp_v6_sdif(const struct sk_buff *skb)
1082{
1083#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1084 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
1085 return TCP_SKB_CB(skb)->header.h6.iif;
1086#endif
1087 return 0;
1088}
dd2e0b86 1089
b03d2142
ED
1090extern const struct inet_connection_sock_af_ops ipv6_specific;
1091
dd2e0b86 1092INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
243600ee 1093INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
11052589 1094void tcp_v6_early_demux(struct sk_buff *skb);
dd2e0b86 1095
815afe17 1096#endif
870c3151 1097
3fa6f616
DA
1098/* TCP_SKB_CB reference means this can not be used from early demux */
1099static inline int tcp_v4_sdif(struct sk_buff *skb)
1100{
1101#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1102 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
1103 return TCP_SKB_CB(skb)->header.h4.iif;
1104#endif
1105 return 0;
1106}
1107
1da177e4
LT
1108/* Due to TSO, an SKB can be composed of multiple actual
1109 * packets. To keep these tracked properly, we use this.
bd14b1b2 1110 */
1da177e4 1111static inline int tcp_skb_pcount(const struct sk_buff *skb)
bd14b1b2 1112{
cd7d8498
ED
1113 return TCP_SKB_CB(skb)->tcp_gso_segs;
1114}
bd14b1b2 1115
cd7d8498
ED
1116static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
1117{
1118 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
bd14b1b2
ED
1119}
1120
cd7d8498 1121static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1da177e4 1122{
cd7d8498 1123 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1da177e4
LT
1124}
1125
f69ad292 1126/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
1da177e4
LT
1127static inline int tcp_skb_mss(const struct sk_buff *skb)
1128{
f69ad292 1129 return TCP_SKB_CB(skb)->tcp_gso_size;
1da177e4
LT
1130}
1131
c134ecb8
MKL
1132static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
1133{
1134 return likely(!TCP_SKB_CB(skb)->eor);
1135}
1136
85712484
MM
1137static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
1138 const struct sk_buff *from)
1139{
1be68a87 1140 /* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
85712484 1141 return likely(tcp_skb_can_collapse_to(to) &&
9b65b17d 1142 mptcp_skb_can_collapse(to, from) &&
65249feb
MA
1143 skb_pure_zcopy_same(to, from) &&
1144 skb_frags_readable(to) == skb_frags_readable(from));
85712484
MM
1145}
1146
07111530
JK
1147static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
1148 const struct sk_buff *from)
1149{
1150 return likely(mptcp_skb_can_collapse(to, from) &&
1151 !skb_cmp_decrypted(to, from));
1152}
1153
317a76f9
SH
1154/* Events passed to congestion control interface */
1155enum tcp_ca_event {
1156 CA_EVENT_TX_START, /* first transmit when no packets in flight */
1157 CA_EVENT_CWND_RESTART, /* congestion window restart */
1158 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
317a76f9 1159 CA_EVENT_LOSS, /* loss timeout */
9890092e
FW
1160 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
1161 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
7354c8c3
FW
1162};
1163
9890092e 1164/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
7354c8c3 1165enum tcp_ca_ack_event_flags {
c1d2b4c3
FW
1166 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
1167 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
1168 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
317a76f9
SH
1169};
1170
1171/*
1172 * Interface for adding new TCP congestion control handlers
1173 */
1174#define TCP_CA_NAME_MAX 16
3ff825b2
SH
1175#define TCP_CA_MAX 128
1176#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
1177
c5c6a8ab
DB
1178#define TCP_CA_UNSPEC 0
1179
30e502a3 1180/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
0114a91d 1181#define TCP_CONG_NON_RESTRICTED BIT(0)
30e502a3 1182/* Requires ECN/ECT set on all packets */
0114a91d 1183#define TCP_CONG_NEEDS_ECN BIT(1)
0baf26b0 1184#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
164891aa 1185
64f40ff5
ED
1186union tcp_cc_info;
1187
756ee172
LB
1188struct ack_sample {
1189 u32 pkts_acked;
1190 s32 rtt_us;
6f094b9e 1191 u32 in_flight;
756ee172
LB
1192};
1193
b9f64820
YC
1194/* A rate sample measures the number of (original/retransmitted) data
1195 * packets delivered "delivered" over an interval of time "interval_us".
1196 * The tcp_rate.c code fills in the rate sample, and congestion
1197 * control modules that define a cong_control function to run at the end
1198 * of ACK processing can optionally chose to consult this sample when
1199 * setting cwnd and pacing rate.
1200 * A sample is invalid if "delivered" or "interval_us" is negative.
1201 */
1202struct rate_sample {
9a568de4 1203 u64 prior_mstamp; /* starting timestamp for interval */
b9f64820 1204 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
40bc6063 1205 u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
b9f64820 1206 s32 delivered; /* number of packets delivered over interval */
40bc6063 1207 s32 delivered_ce; /* number of packets delivered w/ CE marks*/
b9f64820 1208 long interval_us; /* time for tp->delivered to incr "delivered" */
4929c942
DR
1209 u32 snd_interval_us; /* snd interval for delivered packets */
1210 u32 rcv_interval_us; /* rcv interval for delivered packets */
b9f64820
YC
1211 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
1212 int losses; /* number of packets marked lost upon ACK */
1213 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
1214 u32 prior_in_flight; /* in flight before this ACK */
b253a068 1215 u32 last_end_seq; /* end_seq of most recently ACKed packet */
d7722e85 1216 bool is_app_limited; /* is sample from packet with bubble in pipe? */
b9f64820 1217 bool is_retrans; /* is sample from retransmission? */
e4286603 1218 bool is_ack_delayed; /* is this (likely) a delayed ACK? */
b9f64820
YC
1219};
1220
317a76f9 1221struct tcp_congestion_ops {
82506665 1222/* fast path fields are put first to fill one cache line */
317a76f9
SH
1223
1224 /* return slow start threshold (required) */
6687e988 1225 u32 (*ssthresh)(struct sock *sk);
82506665 1226
317a76f9 1227 /* do new cwnd calculation (required) */
24901551 1228 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
82506665 1229
317a76f9 1230 /* call before changing ca_state (optional) */
6687e988 1231 void (*set_state)(struct sock *sk, u8 new_state);
82506665 1232
317a76f9 1233 /* call when cwnd event occurs (optional) */
6687e988 1234 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
82506665 1235
7354c8c3
FW
1236 /* call when ack arrives (optional) */
1237 void (*in_ack_event)(struct sock *sk, u32 flags);
82506665 1238
317a76f9 1239 /* hook for packet ack accounting (optional) */
756ee172 1240 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
82506665 1241
dcb8c9b4
ED
1242 /* override sysctl_tcp_min_tso_segs */
1243 u32 (*min_tso_segs)(struct sock *sk);
82506665 1244
c0402760
YC
1245 /* call when packets are delivered to update cwnd and pacing rate,
1246 * after all the ca_state processing. (optional)
1247 */
57bfc760 1248 void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
82506665
ED
1249
1250
1251 /* new value of cwnd after loss (required) */
1252 u32 (*undo_cwnd)(struct sock *sk);
1253 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1254 u32 (*sndbuf_expand)(struct sock *sk);
1255
1256/* control/slow paths put last */
73c1f4a0 1257 /* get info for inet_diag (optional) */
64f40ff5
ED
1258 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1259 union tcp_cc_info *info);
317a76f9 1260
82506665
ED
1261 char name[TCP_CA_NAME_MAX];
1262 struct module *owner;
1263 struct list_head list;
1264 u32 key;
1265 u32 flags;
1266
1267 /* initialize private data (optional) */
1268 void (*init)(struct sock *sk);
1269 /* cleanup private data (optional) */
1270 void (*release)(struct sock *sk);
1271} ____cacheline_aligned_in_smp;
317a76f9 1272
5c9f3023
JP
1273int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1274void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
8fb1a76a
KFL
1275int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1276 struct tcp_congestion_ops *old_type);
1277int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
317a76f9 1278
55d8694f 1279void tcp_assign_congestion_control(struct sock *sk);
5c9f3023
JP
1280void tcp_init_congestion_control(struct sock *sk);
1281void tcp_cleanup_congestion_control(struct sock *sk);
6670e152
SH
1282int tcp_set_default_congestion_control(struct net *net, const char *name);
1283void tcp_get_default_congestion_control(struct net *net, char *name);
5c9f3023
JP
1284void tcp_get_available_congestion_control(char *buf, size_t len);
1285void tcp_get_allowed_congestion_control(char *buf, size_t len);
1286int tcp_set_allowed_congestion_control(char *allowed);
8d650cde 1287int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
29a94932 1288 bool cap_net_admin);
e73ebb08
NC
1289u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1290void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
317a76f9 1291
5c9f3023 1292u32 tcp_reno_ssthresh(struct sock *sk);
e9799183 1293u32 tcp_reno_undo_cwnd(struct sock *sk);
24901551 1294void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
a8acfbac 1295extern struct tcp_congestion_ops tcp_reno;
317a76f9 1296
0baf26b0 1297struct tcp_congestion_ops *tcp_ca_find(const char *name);
c5c6a8ab 1298struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
61e2bbaf 1299u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
ea697639 1300#ifdef CONFIG_INET
c5c6a8ab 1301char *tcp_ca_get_name_by_key(u32 key, char *buffer);
ea697639
DB
1302#else
1303static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1304{
1305 return NULL;
1306}
1307#endif
c5c6a8ab 1308
30e502a3
DB
1309static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1310{
1311 const struct inet_connection_sock *icsk = inet_csk(sk);
1312
1313 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1314}
1315
6687e988 1316static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
317a76f9 1317{
6687e988
ACM
1318 const struct inet_connection_sock *icsk = inet_csk(sk);
1319
1320 if (icsk->icsk_ca_ops->cwnd_event)
1321 icsk->icsk_ca_ops->cwnd_event(sk, event);
317a76f9
SH
1322}
1323
15fcdf6a
PG
1324/* From tcp_cong.c */
1325void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1326
b9f64820
YC
1327/* From tcp_rate.c */
1328void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1329void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1330 struct rate_sample *rs);
1331void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
d4761754 1332 bool is_sack_reneg, struct rate_sample *rs);
d7722e85 1333void tcp_rate_check_app_limited(struct sock *sk);
b9f64820 1334
b253a068
PY
1335static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1336{
1337 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1338}
1339
e60402d0
IJ
1340/* These functions determine how the current flow behaves in respect of SACK
1341 * handling. SACK is negotiated with the peer, and therefore it can vary
1342 * between different flows.
1343 *
1344 * tcp_is_sack - SACK enabled
1345 * tcp_is_reno - No SACK
e60402d0
IJ
1346 */
1347static inline int tcp_is_sack(const struct tcp_sock *tp)
1348{
ebeef4bc 1349 return likely(tp->rx_opt.sack_ok);
e60402d0
IJ
1350}
1351
a2a385d6 1352static inline bool tcp_is_reno(const struct tcp_sock *tp)
e60402d0
IJ
1353{
1354 return !tcp_is_sack(tp);
1355}
1356
83ae4088
IJ
1357static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1358{
1359 return tp->sacked_out + tp->lost_out;
1360}
1361
1da177e4
LT
1362/* This determines how many packets are "in the network" to the best
1363 * of our knowledge. In many cases it is conservative, but where
1364 * detailed information is available from the receiver (via SACK
1365 * blocks etc.) we can make more aggressive calculations.
1366 *
1367 * Use this for decisions involving congestion control, use just
1368 * tp->packets_out to determine if the send queue is empty or not.
1369 *
1370 * Read this equation as:
1371 *
1372 * "Packets sent once on transmission queue" MINUS
1373 * "Packets left network, but not honestly ACKed yet" PLUS
1374 * "Packets fast retransmitted"
1375 */
40efc6fa 1376static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1da177e4 1377{
83ae4088 1378 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1da177e4
LT
1379}
1380
0b6a05c1
IJ
1381#define TCP_INFINITE_SSTHRESH 0x7fffffff
1382
40570375
ED
1383static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1384{
1385 return tp->snd_cwnd;
1386}
1387
1388static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1389{
1390 WARN_ON_ONCE((int)val <= 0);
1391 tp->snd_cwnd = val;
1392}
1393
071d5080
YC
1394static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1395{
40570375 1396 return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
071d5080
YC
1397}
1398
0b6a05c1
IJ
1399static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1400{
1401 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1402}
1403
684bad11
YC
1404static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1405{
1406 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1407 (1 << inet_csk(sk)->icsk_ca_state);
1408}
1409
1da177e4 1410/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
684bad11 1411 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1da177e4
LT
1412 * ssthresh.
1413 */
6687e988 1414static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1da177e4 1415{
6687e988 1416 const struct tcp_sock *tp = tcp_sk(sk);
cf533ea5 1417
684bad11 1418 if (tcp_in_cwnd_reduction(sk))
1da177e4
LT
1419 return tp->snd_ssthresh;
1420 else
1421 return max(tp->snd_ssthresh,
40570375
ED
1422 ((tcp_snd_cwnd(tp) >> 1) +
1423 (tcp_snd_cwnd(tp) >> 2)));
1da177e4
LT
1424}
1425
b9c4595b
IJ
1426/* Use define here intentionally to get WARN_ON location shown at the caller */
1427#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1da177e4 1428
5ee2c941 1429void tcp_enter_cwr(struct sock *sk);
5c9f3023 1430__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1da177e4 1431
6b5a5c0d
NC
1432/* The maximum number of MSS of available cwnd for which TSO defers
1433 * sending if not using sysctl_tcp_tso_win_divisor.
1434 */
1435static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1436{
1437 return 3;
1438}
1439
90840def
IJ
1440/* Returns end sequence number of the receiver's advertised window */
1441static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1442{
1443 return tp->snd_una + tp->snd_wnd;
1444}
e114a710
ED
1445
1446/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1447 * flexible approach. The RFC suggests cwnd should not be raised unless
ca8a2263
NC
1448 * it was fully used previously. And that's exactly what we do in
1449 * congestion avoidance mode. But in slow start we allow cwnd to grow
1450 * as long as the application has used half the cwnd.
e114a710
ED
1451 * Example :
1452 * cwnd is 10 (IW10), but application sends 9 frames.
1453 * We allow cwnd to reach 18 when all frames are ACKed.
1454 * This check is safe because it's as aggressive as slow start which already
1455 * risks 100% overshoot. The advantage is that we discourage application to
1456 * either send more filler packets or data to artificially blow up the cwnd
1457 * usage, and allow application-limited process to probe bw more aggressively.
e114a710 1458 */
24901551 1459static inline bool tcp_is_cwnd_limited(const struct sock *sk)
e114a710
ED
1460{
1461 const struct tcp_sock *tp = tcp_sk(sk);
1462
f4ce91ce
NC
1463 if (tp->is_cwnd_limited)
1464 return true;
1465
ca8a2263 1466 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
071d5080 1467 if (tcp_in_slow_start(tp))
40570375 1468 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
ca8a2263 1469
f4ce91ce 1470 return false;
e114a710 1471}
f4805ede 1472
cadefe5f
ED
1473/* BBR congestion control needs pacing.
1474 * Same remark for SO_MAX_PACING_RATE.
1475 * sch_fq packet scheduler is efficiently handling pacing,
1476 * but is not always installed/used.
1477 * Return true if TCP stack should pace packets itself.
1478 */
1479static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1480{
1481 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1482}
1483
8dc242ad
ED
1484/* Estimates in how many jiffies next packet for this flow can be sent.
1485 * Scheduling a retransmit timer too early would be silly.
3f80e08f 1486 */
8dc242ad 1487static inline unsigned long tcp_pacing_delay(const struct sock *sk)
3f80e08f 1488{
8dc242ad 1489 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
3f80e08f 1490
8dc242ad 1491 return delay > 0 ? nsecs_to_jiffies(delay) : 0;
3f80e08f
ED
1492}
1493
1494static inline void tcp_reset_xmit_timer(struct sock *sk,
1495 const int what,
7baa0301
ED
1496 unsigned long when,
1497 bool pace_delay)
3f80e08f 1498{
7baa0301
ED
1499 if (pace_delay)
1500 when += tcp_pacing_delay(sk);
54a378f4
ED
1501 inet_csk_reset_xmit_timer(sk, what, when,
1502 tcp_rto_max(sk));
3f80e08f
ED
1503}
1504
21c8fe99 1505/* Something is really bad, we could not queue an additional packet,
3f80e08f 1506 * because qdisc is full or receiver sent a 0 window, or we are paced.
21c8fe99
ED
1507 * We do not want to add fuel to the fire, or abort too early,
1508 * so make sure the timer we arm now is at least 200ms in the future,
1509 * regardless of current icsk_rto value (as it could be ~2ms)
1510 */
1511static inline unsigned long tcp_probe0_base(const struct sock *sk)
1da177e4 1512{
21c8fe99
ED
1513 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1514}
9e412ba7 1515
21c8fe99
ED
1516/* Variant of inet_csk_rto_backoff() used for zero window probes */
1517static inline unsigned long tcp_probe0_when(const struct sock *sk,
1518 unsigned long max_when)
1519{
6d4634d1
CZ
1520 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1521 inet_csk(sk)->icsk_backoff);
1522 u64 when = (u64)tcp_probe0_base(sk) << backoff;
21c8fe99
ED
1523
1524 return (unsigned long)min_t(u64, when, max_when);
1525}
1526
1527static inline void tcp_check_probe_timer(struct sock *sk)
1528{
1529 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
3f80e08f 1530 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
7baa0301 1531 tcp_probe0_base(sk), true);
1da177e4
LT
1532}
1533
ee7537b6 1534static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1da177e4
LT
1535{
1536 tp->snd_wl1 = seq;
1537}
1538
ee7537b6 1539static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1da177e4
LT
1540{
1541 tp->snd_wl1 = seq;
1542}
1543
1da177e4
LT
1544/*
1545 * Calculate(/check) TCP checksum
1546 */
ba7808ea
FD
1547static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1548 __be32 daddr, __wsum base)
1da177e4 1549{
0b13c9bb 1550 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1da177e4
LT
1551}
1552
a2a385d6 1553static inline bool tcp_checksum_complete(struct sk_buff *skb)
1da177e4 1554{
60476372 1555 return !skb_csum_unnecessary(skb) &&
6ab6dfa6 1556 __skb_checksum_complete(skb);
1da177e4
LT
1557}
1558
7a26dc9e
MD
1559bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1560 enum skb_drop_reason *reason);
f35f8219 1561
f35f8219 1562
ac6e7800 1563int tcp_filter(struct sock *sk, struct sk_buff *skb);
5c9f3023 1564void tcp_set_state(struct sock *sk, int state);
5c9f3023 1565void tcp_done(struct sock *sk);
c1e64e29
LC
1566int tcp_abort(struct sock *sk, int err);
1567
40efc6fa 1568static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1da177e4
LT
1569{
1570 rx_opt->dsack = 0;
1da177e4
LT
1571 rx_opt->num_sacks = 0;
1572}
1573
6f021c62
ED
1574void tcp_cwnd_restart(struct sock *sk, s32 delta);
1575
1576static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1577{
1b1fc3fd 1578 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
6f021c62
ED
1579 struct tcp_sock *tp = tcp_sk(sk);
1580 s32 delta;
1581
4845b571
KI
1582 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1583 tp->packets_out || ca_ops->cong_control)
6f021c62 1584 return;
d635fbe2 1585 delta = tcp_jiffies32 - tp->lsndtime;
6f021c62
ED
1586 if (delta > inet_csk(sk)->icsk_rto)
1587 tcp_cwnd_restart(sk, delta);
1588}
85f16525 1589
1da177e4 1590/* Determine a window scaling and initial window to offer. */
ceef9ab6
ED
1591void tcp_select_initial_window(const struct sock *sk, int __space,
1592 __u32 mss, __u32 *rcv_wnd,
5c9f3023
JP
1593 __u32 *window_clamp, int wscale_ok,
1594 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1da177e4 1595
b8dc6d6c 1596static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
1da177e4 1597{
b8dc6d6c 1598 s64 scaled_space = (s64)space * scaling_ratio;
c4836742 1599
dfa2f048
ED
1600 return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1601}
1602
b8dc6d6c
PA
1603static inline int tcp_win_from_space(const struct sock *sk, int space)
1604{
1605 return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
1606}
1607
1608/* inverse of __tcp_win_from_space() */
1609static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
dfa2f048
ED
1610{
1611 u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1612
b8dc6d6c 1613 do_div(val, scaling_ratio);
dfa2f048
ED
1614 return val;
1615}
1616
b8dc6d6c
PA
1617static inline int tcp_space_from_win(const struct sock *sk, int win)
1618{
1619 return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
1620}
1621
697a6c8c 1622/* Assume a 50% default for skb->len/skb->truesize ratio.
849ee75a
PA
1623 * This may be adjusted later in tcp_measure_rcv_mss().
1624 */
697a6c8c 1625#define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
849ee75a 1626
dfa2f048
ED
1627static inline void tcp_scaling_ratio_init(struct sock *sk)
1628{
849ee75a 1629 tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
1da177e4
LT
1630}
1631
105970f6 1632/* Note: caller must be prepared to deal with negative returns */
1da177e4
LT
1633static inline int tcp_space(const struct sock *sk)
1634{
ebb3b78d 1635 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
70c26558 1636 READ_ONCE(sk->sk_backlog.len) -
1da177e4 1637 atomic_read(&sk->sk_rmem_alloc));
105970f6 1638}
1da177e4
LT
1639
1640static inline int tcp_full_space(const struct sock *sk)
1641{
ebb3b78d 1642 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1da177e4
LT
1643}
1644
58d3aade 1645static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
053f3684
WW
1646{
1647 int unused_mem = sk_unused_reserved_mem(sk);
1648 struct tcp_sock *tp = tcp_sk(sk);
1649
58d3aade 1650 tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
053f3684
WW
1651 if (unused_mem)
1652 tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1653 tcp_win_from_space(sk, unused_mem));
1654}
1655
58d3aade
PA
1656static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1657{
1658 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
1659}
1660
c76c6956 1661void tcp_cleanup_rbuf(struct sock *sk, int copied);
e5c6de5f
JF
1662void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1663
c76c6956 1664
24adbc16
ED
1665/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1666 * If 87.5 % (7/8) of the space has been consumed, we want to override
1667 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1668 * len/truesize ratio.
1669 */
1670static inline bool tcp_rmem_pressure(const struct sock *sk)
1671{
f969dc5a
ED
1672 int rcvbuf, threshold;
1673
1674 if (tcp_under_memory_pressure(sk))
1675 return true;
1676
1677 rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1678 threshold = rcvbuf - (rcvbuf >> 3);
24adbc16
ED
1679
1680 return atomic_read(&sk->sk_rmem_alloc) > threshold;
1681}
1682
05dc72ab
ED
1683static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1684{
1685 const struct tcp_sock *tp = tcp_sk(sk);
1686 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1687
1688 if (avail <= 0)
1689 return false;
1690
1691 return (avail >= target) || tcp_rmem_pressure(sk) ||
1692 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1693}
1694
843f4a55 1695extern void tcp_openreq_init_rwin(struct request_sock *req,
b1964b5f
ED
1696 const struct sock *sk_listener,
1697 const struct dst_entry *dst);
843f4a55 1698
5c9f3023 1699void tcp_enter_memory_pressure(struct sock *sk);
06044751 1700void tcp_leave_memory_pressure(struct sock *sk);
1da177e4 1701
1da177e4
LT
1702static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1703{
b840d15d 1704 struct net *net = sock_net((struct sock *)tp);
5ecf9d4f 1705 int val;
b840d15d 1706
5ecf9d4f
ED
1707 /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1708 * and do_tcp_setsockopt().
1709 */
1710 val = READ_ONCE(tp->keepalive_intvl);
b840d15d 1711
5ecf9d4f 1712 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1da177e4
LT
1713}
1714
1715static inline int keepalive_time_when(const struct tcp_sock *tp)
1716{
13b287e8 1717 struct net *net = sock_net((struct sock *)tp);
4164245c 1718 int val;
13b287e8 1719
4164245c
ED
1720 /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1721 val = READ_ONCE(tp->keepalive_time);
1722
1723 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1da177e4
LT
1724}
1725
df19a626
ED
1726static inline int keepalive_probes(const struct tcp_sock *tp)
1727{
9bd6861b 1728 struct net *net = sock_net((struct sock *)tp);
6e5e1de6 1729 int val;
9bd6861b 1730
6e5e1de6
ED
1731 /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1732 * and do_tcp_setsockopt().
1733 */
1734 val = READ_ONCE(tp->keepalive_probes);
9bd6861b 1735
6e5e1de6 1736 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
df19a626
ED
1737}
1738
6c37e5de
FL
1739static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1740{
1741 const struct inet_connection_sock *icsk = &tp->inet_conn;
1742
70eabf0e
ED
1743 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1744 tcp_jiffies32 - tp->rcv_tstamp);
6c37e5de
FL
1745}
1746
463c84b9 1747static inline int tcp_fin_time(const struct sock *sk)
1da177e4 1748{
39e24435
KI
1749 int fin_timeout = tcp_sk(sk)->linger2 ? :
1750 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
463c84b9 1751 const int rto = inet_csk(sk)->icsk_rto;
1da177e4 1752
463c84b9
ACM
1753 if (fin_timeout < (rto << 2) - (rto >> 1))
1754 fin_timeout = (rto << 2) - (rto >> 1);
1da177e4
LT
1755
1756 return fin_timeout;
1757}
1758
a2a385d6
ED
1759static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1760 int paws_win)
1da177e4 1761{
c887e6d2 1762 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
a2a385d6 1763 return true;
cca9bab1 1764 if (unlikely(!time_before32(ktime_get_seconds(),
af772144 1765 rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
a2a385d6 1766 return true;
bc2ce894
ED
1767 /*
1768 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1769 * then following tcp messages have valid values. Ignore 0 value,
1770 * or else 'negative' tsval might forbid us to accept their packets.
1771 */
1772 if (!rx_opt->ts_recent)
a2a385d6
ED
1773 return true;
1774 return false;
c887e6d2
IJ
1775}
1776
a2a385d6
ED
1777static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1778 int rst)
c887e6d2
IJ
1779{
1780 if (tcp_paws_check(rx_opt, 0))
a2a385d6 1781 return false;
1da177e4
LT
1782
1783 /* RST segments are not recommended to carry timestamp,
1784 and, if they do, it is recommended to ignore PAWS because
1785 "their cleanup function should take precedence over timestamps."
1786 Certainly, it is mistake. It is necessary to understand the reasons
1787 of this constraint to relax it: if peer reboots, clock may go
1788 out-of-sync and half-open connections will not be reset.
1789 Actually, the problem would be not existing if all
1790 the implementations followed draft about maintaining clock
1791 via reboots. Linux-2.2 DOES NOT!
1792
1793 However, we can relax time bounds for RST segments to MSL.
1794 */
cca9bab1
AB
1795 if (rst && !time_before32(ktime_get_seconds(),
1796 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
a2a385d6
ED
1797 return false;
1798 return true;
1da177e4
LT
1799}
1800
7970ddc8
ED
1801bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1802 int mib_idx, u32 *last_oow_ack_time);
032ee423 1803
a9c19329 1804static inline void tcp_mib_init(struct net *net)
1da177e4
LT
1805{
1806 /* See RFC 2012 */
6aef70a8
ED
1807 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1808 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1809 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1810 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1da177e4
LT
1811}
1812
5af4ec23 1813/* from STCP */
ef9da47c 1814static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
0800f170 1815{
6a438bbe 1816 tp->lost_skb_hint = NULL;
ef9da47c
IJ
1817}
1818
1819static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1820{
1821 tcp_clear_retrans_hints_partial(tp);
6a438bbe 1822 tp->retransmit_skb_hint = NULL;
b7689205
IJ
1823}
1824
c845f5f3 1825#define tcp_md5_addr tcp_ao_addr
a915da9b 1826
cfb6eeb4
YH
1827/* - key database */
1828struct tcp_md5sig_key {
a915da9b 1829 struct hlist_node node;
cfb6eeb4 1830 u8 keylen;
a915da9b 1831 u8 family; /* AF_INET or AF_INET6 */
6797318e 1832 u8 prefixlen;
a76c2315 1833 u8 flags;
dea53bb8
DA
1834 union tcp_md5_addr addr;
1835 int l3index; /* set if key added with L3 scope */
a915da9b
ED
1836 u8 key[TCP_MD5SIG_MAXKEYLEN];
1837 struct rcu_head rcu;
cfb6eeb4
YH
1838};
1839
1840/* - sock block */
1841struct tcp_md5sig_info {
a915da9b 1842 struct hlist_head head;
a8afca03 1843 struct rcu_head rcu;
cfb6eeb4
YH
1844};
1845
1846/* - pseudo header */
1847struct tcp4_pseudohdr {
1848 __be32 saddr;
1849 __be32 daddr;
1850 __u8 pad;
1851 __u8 protocol;
1852 __be16 len;
1853};
1854
1855struct tcp6_pseudohdr {
1856 struct in6_addr saddr;
1857 struct in6_addr daddr;
1858 __be32 len;
1859 __be32 protocol; /* including padding */
1860};
1861
1862union tcp_md5sum_block {
1863 struct tcp4_pseudohdr ip4;
dfd56b8b 1864#if IS_ENABLED(CONFIG_IPV6)
cfb6eeb4
YH
1865 struct tcp6_pseudohdr ip6;
1866#endif
1867};
1868
8c73b263
DS
1869/*
1870 * struct tcp_sigpool - per-CPU pool of ahash_requests
1871 * @scratch: per-CPU temporary area, that can be used between
1872 * tcp_sigpool_start() and tcp_sigpool_end() to perform
1873 * crypto request
1874 * @req: pre-allocated ahash request
1875 */
1876struct tcp_sigpool {
1877 void *scratch;
1878 struct ahash_request *req;
cfb6eeb4
YH
1879};
1880
8c73b263
DS
1881int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
1882void tcp_sigpool_get(unsigned int id);
1883void tcp_sigpool_release(unsigned int id);
1884int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
1885 const struct sk_buff *skb,
1886 unsigned int header_len);
1887
1888/**
1889 * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
1890 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
1891 * @c: returned tcp_sigpool for usage (uninitialized on failure)
1892 *
3f330db3 1893 * Returns: 0 on success, error otherwise.
8c73b263
DS
1894 */
1895int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
1896/**
1897 * tcp_sigpool_end - enable bh and stop using tcp_sigpool
1898 * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
1899 */
1900void tcp_sigpool_end(struct tcp_sigpool *c);
1901size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
cfb6eeb4 1902/* - functions */
39f8e58e
ED
1903int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1904 const struct sock *sk, const struct sk_buff *skb);
5c9f3023 1905int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
a76c2315 1906 int family, u8 prefixlen, int l3index, u8 flags,
459837b5
DS
1907 const u8 *newkey, u8 newkeylen);
1908int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1909 int family, u8 prefixlen, int l3index,
1910 struct tcp_md5sig_key *key);
1911
5c9f3023 1912int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
a76c2315 1913 int family, u8 prefixlen, int l3index, u8 flags);
0aadc739 1914void tcp_clear_md5_list(struct sock *sk);
b83e3deb 1915struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
fd3a154a 1916 const struct sock *addr_sk);
cfb6eeb4 1917
9501f972 1918#ifdef CONFIG_TCP_MD5SIG
dea53bb8 1919struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
6015c71e 1920 const union tcp_md5_addr *addr,
0aadc739 1921 int family, bool any_l3index);
6015c71e 1922static inline struct tcp_md5sig_key *
dea53bb8
DA
1923tcp_md5_do_lookup(const struct sock *sk, int l3index,
1924 const union tcp_md5_addr *addr, int family)
6015c71e 1925{
459837b5 1926 if (!static_branch_unlikely(&tcp_md5_needed.key))
6015c71e 1927 return NULL;
0aadc739
DS
1928 return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
1929}
1930
1931static inline struct tcp_md5sig_key *
1932tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1933 const union tcp_md5_addr *addr, int family)
1934{
1935 if (!static_branch_unlikely(&tcp_md5_needed.key))
1936 return NULL;
1937 return __tcp_md5_do_lookup(sk, 0, addr, family, true);
6015c71e 1938}
1330b6ef 1939
a915da9b 1940#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
9501f972 1941#else
dea53bb8
DA
1942static inline struct tcp_md5sig_key *
1943tcp_md5_do_lookup(const struct sock *sk, int l3index,
1944 const union tcp_md5_addr *addr, int family)
a915da9b
ED
1945{
1946 return NULL;
1947}
1330b6ef 1948
0aadc739
DS
1949static inline struct tcp_md5sig_key *
1950tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1951 const union tcp_md5_addr *addr, int family)
1952{
1953 return NULL;
1954}
1955
9501f972
YH
1956#define tcp_twsk_md5_key(twsk) NULL
1957#endif
1958
8c73b263
DS
1959int tcp_md5_alloc_sigpool(void);
1960void tcp_md5_release_sigpool(void);
1961void tcp_md5_add_sigpool(void);
1962extern int tcp_md5_sigpool_id;
35790c04 1963
8c73b263 1964int tcp_md5_hash_key(struct tcp_sigpool *hp,
5c9f3023 1965 const struct tcp_md5sig_key *key);
cfb6eeb4 1966
10467163 1967/* From tcp_fastopen.c */
5c9f3023 1968void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
7268586b 1969 struct tcp_fastopen_cookie *cookie);
5c9f3023 1970void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
2646c831
DL
1971 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1972 u16 try_exp);
783237e8
YC
1973struct tcp_fastopen_request {
1974 /* Fast Open cookie. Size 0 means a cookie request */
1975 struct tcp_fastopen_cookie cookie;
1976 struct msghdr *data; /* data in MSG_FASTOPEN */
f5ddcbbb
ED
1977 size_t size;
1978 int copied; /* queued in tcp_connect() */
f859a448 1979 struct ubuf_info *uarg;
783237e8 1980};
783237e8 1981void tcp_free_fastopen_req(struct tcp_sock *tp);
1fba70e5 1982void tcp_fastopen_destroy_cipher(struct sock *sk);
43713848 1983void tcp_fastopen_ctx_destroy(struct net *net);
1fba70e5 1984int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
438ac880 1985 void *primary_key, void *backup_key);
f19008e6
JB
1986int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1987 u64 *key);
61d2bcae 1988void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
7c85af88
ED
1989struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1990 struct request_sock *req,
71c02379
CP
1991 struct tcp_fastopen_cookie *foc,
1992 const struct dst_entry *dst);
43713848 1993void tcp_fastopen_init_key_once(struct net *net);
065263f4
WW
1994bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1995 struct tcp_fastopen_cookie *cookie);
19f6d3f3 1996bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
438ac880 1997#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
9092a76d
JB
1998#define TCP_FASTOPEN_KEY_MAX 2
1999#define TCP_FASTOPEN_KEY_BUF_LENGTH \
2000 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
10467163
JC
2001
2002/* Fastopen key context */
2003struct tcp_fastopen_context {
438ac880 2004 siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
c681edae
AB
2005 int num;
2006 struct rcu_head rcu;
10467163
JC
2007};
2008
46c2fa39 2009void tcp_fastopen_active_disable(struct sock *sk);
cf1ef3f0
WW
2010bool tcp_fastopen_active_should_disable(struct sock *sk);
2011void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
7268586b 2012void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
cf1ef3f0 2013
9092a76d
JB
2014/* Caller needs to wrap with rcu_read_(un)lock() */
2015static inline
2016struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
2017{
2018 struct tcp_fastopen_context *ctx;
2019
2020 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
2021 if (!ctx)
2022 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
2023 return ctx;
2024}
2025
2026static inline
2027bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
2028 const struct tcp_fastopen_cookie *orig)
2029{
2030 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
2031 orig->len == foc->len &&
2032 !memcmp(orig->val, foc->val, foc->len))
2033 return true;
2034 return false;
2035}
2036
2037static inline
2038int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
2039{
c681edae 2040 return ctx->num;
9092a76d
JB
2041}
2042
05b055e8
FY
2043/* Latencies incurred by various limits for a sender. They are
2044 * chronograph-like stats that are mutually exclusive.
2045 */
2046enum tcp_chrono {
2047 TCP_CHRONO_UNSPEC,
2048 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
2049 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
2050 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
2051 __TCP_CHRONO_MAX,
2052};
2053
2054void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
2055void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
2056
e2080072
ED
2057/* This helper is needed, because skb->tcp_tsorted_anchor uses
2058 * the same memory storage than skb->destructor/_skb_refdst
2059 */
2060static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
2061{
2062 skb->destructor = NULL;
2063 skb->_skb_refdst = 0UL;
2064}
2065
2066#define tcp_skb_tsorted_save(skb) { \
2067 unsigned long _save = skb->_skb_refdst; \
2068 skb->_skb_refdst = 0UL;
2069
2070#define tcp_skb_tsorted_restore(skb) \
2071 skb->_skb_refdst = _save; \
2072}
2073
ac3f09ba 2074void tcp_write_queue_purge(struct sock *sk);
fe067e8a 2075
75c119af
ED
2076static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
2077{
2078 return skb_rb_first(&sk->tcp_rtx_queue);
2079}
2080
b617158d
ED
2081static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
2082{
2083 return skb_rb_last(&sk->tcp_rtx_queue);
2084}
2085
cf533ea5 2086static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
fe067e8a 2087{
cd07a8ea 2088 return skb_peek_tail(&sk->sk_write_queue);
fe067e8a
DM
2089}
2090
234b6860 2091#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
cd07a8ea 2092 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
234b6860 2093
cf533ea5 2094static inline struct sk_buff *tcp_send_head(const struct sock *sk)
fe067e8a 2095{
75c119af 2096 return skb_peek(&sk->sk_write_queue);
fe067e8a
DM
2097}
2098
cd07a8ea
DM
2099static inline bool tcp_skb_is_last(const struct sock *sk,
2100 const struct sk_buff *skb)
2101{
2102 return skb_queue_is_last(&sk->sk_write_queue, skb);
2103}
2104
ee2aabd3
ED
2105/**
2106 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
2107 * @sk: socket
2108 *
2109 * Since the write queue can have a temporary empty skb in it,
2110 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
2111 */
75c119af 2112static inline bool tcp_write_queue_empty(const struct sock *sk)
fe067e8a 2113{
ee2aabd3
ED
2114 const struct tcp_sock *tp = tcp_sk(sk);
2115
2116 return tp->write_seq == tp->snd_nxt;
75c119af
ED
2117}
2118
2119static inline bool tcp_rtx_queue_empty(const struct sock *sk)
2120{
2121 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
2122}
2123
2124static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
2125{
2126 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
fe067e8a
DM
2127}
2128
fe067e8a
DM
2129static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
2130{
a43e052b 2131 __skb_queue_tail(&sk->sk_write_queue, skb);
fe067e8a
DM
2132
2133 /* Queue it, remembering where we must start sending. */
50895b9d 2134 if (sk->sk_write_queue.next == skb)
0f87230d 2135 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
fe067e8a
DM
2136}
2137
43f59c89 2138/* Insert new before skb on the write queue of sk. */
fe067e8a
DM
2139static inline void tcp_insert_write_queue_before(struct sk_buff *new,
2140 struct sk_buff *skb,
2141 struct sock *sk)
2142{
43f59c89 2143 __skb_queue_before(&sk->sk_write_queue, skb, new);
fe067e8a
DM
2144}
2145
2146static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
2147{
4a269818 2148 tcp_skb_tsorted_anchor_cleanup(skb);
fe067e8a
DM
2149 __skb_unlink(skb, &sk->sk_write_queue);
2150}
2151
75c119af
ED
2152void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
2153
2154static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
fe067e8a 2155{
75c119af
ED
2156 tcp_skb_tsorted_anchor_cleanup(skb);
2157 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
2158}
2159
2160static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
2161{
2162 list_del(&skb->tcp_tsorted_anchor);
2163 tcp_rtx_queue_unlink(skb, sk);
03271f3a 2164 tcp_wmem_free_skb(sk, skb);
fe067e8a
DM
2165}
2166
1be68a87
JK
2167static inline void tcp_write_collapse_fence(struct sock *sk)
2168{
2169 struct sk_buff *skb = tcp_write_queue_tail(sk);
2170
2171 if (skb)
2172 TCP_SKB_CB(skb)->eor = 1;
2173}
2174
12d50c46
KK
2175static inline void tcp_push_pending_frames(struct sock *sk)
2176{
2177 if (tcp_send_head(sk)) {
2178 struct tcp_sock *tp = tcp_sk(sk);
2179
2180 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
2181 }
2182}
2183
ecb97192
NC
2184/* Start sequence of the skb just after the highest skb with SACKed
2185 * bit, valid only if sacked_out > 0 or when the caller has ensured
2186 * validity by itself.
a47e5a98
IJ
2187 */
2188static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
2189{
2190 if (!tp->sacked_out)
2191 return tp->snd_una;
6859d494
IJ
2192
2193 if (tp->highest_sack == NULL)
2194 return tp->snd_nxt;
2195
a47e5a98
IJ
2196 return TCP_SKB_CB(tp->highest_sack)->seq;
2197}
2198
6859d494
IJ
2199static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
2200{
50895b9d 2201 tcp_sk(sk)->highest_sack = skb_rb_next(skb);
6859d494
IJ
2202}
2203
2204static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
2205{
2206 return tcp_sk(sk)->highest_sack;
2207}
2208
2209static inline void tcp_highest_sack_reset(struct sock *sk)
2210{
50895b9d 2211 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
6859d494
IJ
2212}
2213
2b7cda9c
ED
2214/* Called when old skb is about to be deleted and replaced by new skb */
2215static inline void tcp_highest_sack_replace(struct sock *sk,
6859d494
IJ
2216 struct sk_buff *old,
2217 struct sk_buff *new)
2218{
2b7cda9c 2219 if (old == tcp_highest_sack(sk))
6859d494
IJ
2220 tcp_sk(sk)->highest_sack = new;
2221}
2222
b1f0a0e9
FW
2223/* This helper checks if socket has IP_TRANSPARENT set */
2224static inline bool inet_sk_transparent(const struct sock *sk)
2225{
2226 switch (sk->sk_state) {
2227 case TCP_TIME_WAIT:
2228 return inet_twsk(sk)->tw_transparent;
2229 case TCP_NEW_SYN_RECV:
2230 return inet_rsk(inet_reqsk(sk))->no_srccheck;
2231 }
4bd0623f 2232 return inet_test_bit(TRANSPARENT, sk);
b1f0a0e9
FW
2233}
2234
5aa4b32f
AP
2235/* Determines whether this is a thin stream (which may suffer from
2236 * increased latency). Used to trigger latency-reducing mechanisms.
2237 */
a2a385d6 2238static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
5aa4b32f
AP
2239{
2240 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2241}
2242
1da177e4
LT
2243/* /proc */
2244enum tcp_seq_states {
2245 TCP_SEQ_STATE_LISTENING,
1da177e4 2246 TCP_SEQ_STATE_ESTABLISHED,
1da177e4
LT
2247};
2248
37d849bb
CH
2249void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2250void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2251void tcp_seq_stop(struct seq_file *seq, void *v);
73cb88ec 2252
1da177e4 2253struct tcp_seq_afinfo {
73cb88ec 2254 sa_family_t family;
1da177e4
LT
2255};
2256
2257struct tcp_iter_state {
a4146b1b 2258 struct seq_net_private p;
1da177e4
LT
2259 enum tcp_seq_states state;
2260 struct sock *syn_wait_sk;
a7cb5a49 2261 int bucket, offset, sbucket, num;
a8b690f9 2262 loff_t last_pos;
1da177e4
LT
2263};
2264
20380731 2265extern struct request_sock_ops tcp_request_sock_ops;
c6aefafb 2266extern struct request_sock_ops tcp6_request_sock_ops;
20380731 2267
5c9f3023 2268void tcp_v4_destroy_sock(struct sock *sk);
20380731 2269
28be6e07 2270struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
5c9f3023 2271 netdev_features_t features);
7516b27c 2272struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb);
80e85fbd 2273struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
7516b27c
FF
2274struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
2275 struct tcphdr *th);
5521d95e
ED
2276INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2277INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2278INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
2279INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
e411a8e3 2280#ifdef CONFIG_INET
b1f2abcf 2281void tcp_gro_complete(struct sk_buff *skb);
e411a8e3
JK
2282#else
2283static inline void tcp_gro_complete(struct sk_buff *skb) { }
2284#endif
28850dc7 2285
5c9f3023 2286void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
f4c50d99 2287
c9bee3b7
ED
2288static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2289{
4979f2d9 2290 struct net *net = sock_net((struct sock *)tp);
1aeb87bc
ED
2291 u32 val;
2292
2293 val = READ_ONCE(tp->notsent_lowat);
2294
2295 return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
c9bee3b7
ED
2296}
2297
d3cd4924 2298bool tcp_stream_memory_free(const struct sock *sk, int wake);
c9bee3b7 2299
20380731 2300#ifdef CONFIG_PROC_FS
5c9f3023
JP
2301int tcp4_proc_init(void);
2302void tcp4_proc_exit(void);
20380731
ACM
2303#endif
2304
ea3bea3a 2305int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1fb6f159
OP
2306int tcp_conn_request(struct request_sock_ops *rsk_ops,
2307 const struct tcp_request_sock_ops *af_ops,
2308 struct sock *sk, struct sk_buff *skb);
5db92c99 2309
cfb6eeb4
YH
2310/* TCP af-specific functions */
2311struct tcp_sock_af_ops {
2312#ifdef CONFIG_TCP_MD5SIG
b83e3deb 2313 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
fd3a154a 2314 const struct sock *addr_sk);
39f8e58e
ED
2315 int (*calc_md5_hash)(char *location,
2316 const struct tcp_md5sig_key *md5,
2317 const struct sock *sk,
2318 const struct sk_buff *skb);
2319 int (*md5_parse)(struct sock *sk,
8917a777 2320 int optname,
d4c19c49 2321 sockptr_t optval,
39f8e58e 2322 int optlen);
cfb6eeb4 2323#endif
4954f17d
DS
2324#ifdef CONFIG_TCP_AO
2325 int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
0aadc739
DS
2326 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2327 struct sock *addr_sk,
2328 int sndid, int rcvid);
7c2ffaf2
DS
2329 int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
2330 const struct sock *sk,
2331 __be32 sisn, __be32 disn, bool send);
1e03d32b
DS
2332 int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
2333 const struct sock *sk, const struct sk_buff *skb,
2334 const u8 *tkey, int hash_offset, u32 sne);
4954f17d 2335#endif
cfb6eeb4
YH
2336};
2337
2338struct tcp_request_sock_ops {
2aec4a29 2339 u16 mss_clamp;
cfb6eeb4 2340#ifdef CONFIG_TCP_MD5SIG
b83e3deb 2341 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
fd3a154a 2342 const struct sock *addr_sk);
39f8e58e
ED
2343 int (*calc_md5_hash) (char *location,
2344 const struct tcp_md5sig_key *md5,
2345 const struct sock *sk,
2346 const struct sk_buff *skb);
cfb6eeb4 2347#endif
06b22ef2
DS
2348#ifdef CONFIG_TCP_AO
2349 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2350 struct request_sock *req,
2351 int sndid, int rcvid);
2352 int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
9427c6aa
DS
2353 int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
2354 struct request_sock *req, const struct sk_buff *skb,
2355 int hash_offset, u32 sne);
06b22ef2 2356#endif
fb7b37a7 2357#ifdef CONFIG_SYN_COOKIES
3f684b4b 2358 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
fb7b37a7
OP
2359 __u16 *mss);
2360#endif
7ea851d1
FW
2361 struct dst_entry *(*route_req)(const struct sock *sk,
2362 struct sk_buff *skb,
2363 struct flowi *fl,
b9e81040
ED
2364 struct request_sock *req,
2365 u32 tw_isn);
84b114b9 2366 u32 (*init_seq)(const struct sk_buff *skb);
5d2ed052 2367 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
0f935dbe 2368 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
d6274bd8 2369 struct flowi *fl, struct request_sock *req,
dc6ef6be 2370 struct tcp_fastopen_cookie *foc,
331fca43
MKL
2371 enum tcp_synack_type synack_type,
2372 struct sk_buff *syn_skb);
cfb6eeb4
YH
2373};
2374
35b2c321
MM
2375extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2376#if IS_ENABLED(CONFIG_IPV6)
2377extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2378#endif
2379
fb7b37a7
OP
2380#ifdef CONFIG_SYN_COOKIES
2381static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
3f684b4b 2382 const struct sock *sk, struct sk_buff *skb,
fb7b37a7
OP
2383 __u16 *mss)
2384{
3f684b4b 2385 tcp_synq_overflow(sk);
02a1d6e7 2386 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
3f684b4b 2387 return ops->cookie_init_seq(skb, mss);
fb7b37a7
OP
2388}
2389#else
2390static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
3f684b4b 2391 const struct sock *sk, struct sk_buff *skb,
fb7b37a7
OP
2392 __u16 *mss)
2393{
2394 return 0;
2395}
2396#endif
2397
1e03d32b
DS
2398struct tcp_key {
2399 union {
ba7783ad
DS
2400 struct {
2401 struct tcp_ao_key *ao_key;
2402 char *traffic_key;
2403 u32 sne;
2404 u8 rcv_next;
2405 };
1e03d32b
DS
2406 struct tcp_md5sig_key *md5_key;
2407 };
2408 enum {
2409 TCP_KEY_NONE = 0,
2410 TCP_KEY_MD5,
2411 TCP_KEY_AO,
2412 } type;
2413};
2414
2415static inline void tcp_get_current_key(const struct sock *sk,
2416 struct tcp_key *out)
2417{
2418#if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
2419 const struct tcp_sock *tp = tcp_sk(sk);
2420#endif
1e03d32b 2421
67fa83f7
DS
2422#ifdef CONFIG_TCP_AO
2423 if (static_branch_unlikely(&tcp_ao_needed.key)) {
2424 struct tcp_ao_info *ao;
2425
2426 ao = rcu_dereference_protected(tp->ao_info,
2427 lockdep_sock_is_held(sk));
2428 if (ao) {
2429 out->ao_key = READ_ONCE(ao->current_key);
2430 out->type = TCP_KEY_AO;
2431 return;
2432 }
1e03d32b
DS
2433 }
2434#endif
2435#ifdef CONFIG_TCP_MD5SIG
2436 if (static_branch_unlikely(&tcp_md5_needed.key) &&
2437 rcu_access_pointer(tp->md5sig_info)) {
2438 out->md5_key = tp->af_specific->md5_lookup(sk, sk);
2439 if (out->md5_key) {
2440 out->type = TCP_KEY_MD5;
2441 return;
2442 }
2443 }
2444#endif
2445 out->type = TCP_KEY_NONE;
2446}
2447
2448static inline bool tcp_key_is_md5(const struct tcp_key *key)
2449{
3966a668
DS
2450 if (static_branch_tcp_md5())
2451 return key->type == TCP_KEY_MD5;
1e03d32b
DS
2452 return false;
2453}
2454
2455static inline bool tcp_key_is_ao(const struct tcp_key *key)
2456{
3966a668
DS
2457 if (static_branch_tcp_ao())
2458 return key->type == TCP_KEY_AO;
1e03d32b
DS
2459 return false;
2460}
2461
5c9f3023 2462int tcpv4_offload_init(void);
28850dc7 2463
5c9f3023
JP
2464void tcp_v4_init(void);
2465void tcp_init(void);
20380731 2466
659a8ad5 2467/* tcp_recovery.c */
d716bfdb 2468void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
6ac06ecd 2469void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
b8fef65a
YC
2470extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2471 u32 reo_wnd);
62d9f1a6 2472extern bool tcp_rack_mark_lost(struct sock *sk);
1d0833df 2473extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
9a568de4 2474 u64 xmit_time);
57dde7f7 2475extern void tcp_rack_reo_timeout(struct sock *sk);
1f255691 2476extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
659a8ad5 2477
1a91bb7c
MAQ
2478/* tcp_plb.c */
2479
2480/*
2481 * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2482 * expects cong_ratio which represents fraction of traffic that experienced
2483 * congestion over a single RTT. In order to avoid floating point operations,
2484 * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2485 */
2486#define TCP_PLB_SCALE 8
2487
2488/* State for PLB (Protective Load Balancing) for a single TCP connection. */
2489struct tcp_plb_state {
2490 u8 consec_cong_rounds:5, /* consecutive congested rounds */
2491 unused:3;
2492 u32 pause_until; /* jiffies32 when PLB can resume rerouting */
2493};
2494
2495static inline void tcp_plb_init(const struct sock *sk,
2496 struct tcp_plb_state *plb)
2497{
2498 plb->consec_cong_rounds = 0;
2499 plb->pause_until = 0;
2500}
2501void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2502 const int cong_ratio);
2503void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2504void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2505
386c2b87
JX
2506static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
2507{
2508 WARN_ONCE(cond,
668d6639 2509 "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
386c2b87 2510 str,
668d6639 2511 tcp_snd_cwnd(tcp_sk(sk)),
386c2b87
JX
2512 tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
2513 tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
2514 tcp_sk(sk)->tlp_high_seq, sk->sk_state,
2515 inet_csk(sk)->icsk_ca_state,
2516 tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
2517 inet_csk(sk)->icsk_pmtu_cookie);
2518}
2519
e1a10ef7
NC
2520/* At how many usecs into the future should the RTO fire? */
2521static inline s64 tcp_rto_delta_us(const struct sock *sk)
2522{
75c119af 2523 const struct sk_buff *skb = tcp_rtx_queue_head(sk);
e1a10ef7 2524 u32 rto = inet_csk(sk)->icsk_rto;
e1a10ef7 2525
c8770db2
JH
2526 if (likely(skb)) {
2527 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2528
2529 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2530 } else {
386c2b87 2531 tcp_warn_once(sk, 1, "rtx queue empty: ");
c8770db2
JH
2532 return jiffies_to_usecs(rto);
2533 }
2534
e1a10ef7
NC
2535}
2536
e25f866f
CW
2537/*
2538 * Save and compile IPv4 options, return a pointer to it
2539 */
91ed1e66
PA
2540static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2541 struct sk_buff *skb)
e25f866f
CW
2542{
2543 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2544 struct ip_options_rcu *dopt = NULL;
2545
461b74c3 2546 if (opt->optlen) {
e25f866f
CW
2547 int opt_size = sizeof(*dopt) + opt->optlen;
2548
2549 dopt = kmalloc(opt_size, GFP_ATOMIC);
91ed1e66 2550 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
e25f866f
CW
2551 kfree(dopt);
2552 dopt = NULL;
2553 }
2554 }
2555 return dopt;
2556}
2557
98781965
ED
2558/* locally generated TCP pure ACKs have skb->truesize == 2
2559 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2560 * This is much faster than dissecting the packet to find out.
2561 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2562 */
2563static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2564{
2565 return skb->truesize == 2;
2566}
2567
2568static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2569{
2570 skb->truesize = 2;
2571}
2572
473bd239
TH
2573static inline int tcp_inq(struct sock *sk)
2574{
2575 struct tcp_sock *tp = tcp_sk(sk);
2576 int answ;
2577
2578 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2579 answ = 0;
2580 } else if (sock_flag(sk, SOCK_URGINLINE) ||
2581 !tp->urg_data ||
2582 before(tp->urg_seq, tp->copied_seq) ||
2583 !before(tp->urg_seq, tp->rcv_nxt)) {
2584
2585 answ = tp->rcv_nxt - tp->copied_seq;
2586
2587 /* Subtract 1, if FIN was received */
2588 if (answ && sock_flag(sk, SOCK_DONE))
2589 answ--;
2590 } else {
2591 answ = tp->urg_seq - tp->copied_seq;
2592 }
2593
2594 return answ;
2595}
2596
32035585
TH
2597int tcp_peek_len(struct socket *sock);
2598
a44d6eac
MKL
2599static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2600{
2601 u16 segs_in;
2602
2603 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
0307a0b7
ED
2604
2605 /* We update these fields while other threads might
2606 * read them from tcp_get_info()
2607 */
2608 WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
a44d6eac 2609 if (skb->len > tcp_hdrlen(skb))
0307a0b7 2610 WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
a44d6eac
MKL
2611}
2612
9caad864
ED
2613/*
2614 * TCP listen path runs lockless.
2615 * We forced "struct sock" to be const qualified to make sure
2616 * we don't modify one of its field by mistake.
2617 * Here, we increment sk_drops which is an atomic_t, so we can safely
2618 * make sock writable again.
2619 */
2620static inline void tcp_listendrop(const struct sock *sk)
2621{
2622 atomic_inc(&((struct sock *)sk)->sk_drops);
02a1d6e7 2623 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
9caad864
ED
2624}
2625
218af599
ED
2626enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2627
734942cc
DW
2628/*
2629 * Interface for adding Upper Level Protocols over TCP
2630 */
2631
2632#define TCP_ULP_NAME_MAX 16
2633#define TCP_ULP_MAX 128
2634#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2635
2636struct tcp_ulp_ops {
2637 struct list_head list;
2638
2639 /* initialize ulp */
2640 int (*init)(struct sock *sk);
95fa1454 2641 /* update ulp */
33bfe20d
JF
2642 void (*update)(struct sock *sk, struct proto *p,
2643 void (*write_space)(struct sock *sk));
734942cc
DW
2644 /* cleanup ulp */
2645 void (*release)(struct sock *sk);
61723b39 2646 /* diagnostic */
0d7336f8
MBN
2647 int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin);
2648 size_t (*get_info_size)(const struct sock *sk, bool net_admin);
13230593
MM
2649 /* clone ulp */
2650 void (*clone)(const struct request_sock *req, struct sock *newsk,
2651 const gfp_t priority);
734942cc
DW
2652
2653 char name[TCP_ULP_NAME_MAX];
2654 struct module *owner;
2655};
2656int tcp_register_ulp(struct tcp_ulp_ops *type);
2657void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2658int tcp_set_ulp(struct sock *sk, const char *name);
2659void tcp_get_available_ulp(char *buf, size_t len);
2660void tcp_cleanup_ulp(struct sock *sk);
33bfe20d
JF
2661void tcp_update_ulp(struct sock *sk, struct proto *p,
2662 void (*write_space)(struct sock *sk));
734942cc 2663
037b0b86
DB
2664#define MODULE_ALIAS_TCP_ULP(name) \
2665 __MODULE_INFO(alias, alias_userspace, name); \
2666 __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2667
88759609 2668#ifdef CONFIG_NET_SOCK_MSG
604326b4
DB
2669struct sk_msg;
2670struct sk_psock;
2671
88759609 2672#ifdef CONFIG_BPF_SYSCALL
51e0158a 2673int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
f747632b 2674void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
36b62df5
JC
2675#ifdef CONFIG_BPF_STREAM_PARSER
2676struct strparser;
2677int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
2678 sk_read_actor_t recv_actor);
2679#endif /* CONFIG_BPF_STREAM_PARSER */
88759609 2680#endif /* CONFIG_BPF_SYSCALL */
f747632b 2681
e5c6de5f
JF
2682#ifdef CONFIG_INET
2683void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2684#else
2685static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2686{
2687}
2688#endif
2689
a351d608
PY
2690int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2691 struct sk_msg *msg, u32 bytes, int flags);
5da00404 2692#endif /* CONFIG_NET_SOCK_MSG */
604326b4 2693
88759609
CW
2694#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2695static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2696{
2697}
2698#endif
2699
0813a841 2700#ifdef CONFIG_CGROUP_BPF
0813a841
MKL
2701static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2702 struct sk_buff *skb,
2703 unsigned int end_offset)
2704{
2705 skops->skb = skb;
2706 skops->skb_data_end = skb->data + end_offset;
2707}
2708#else
0813a841
MKL
2709static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2710 struct sk_buff *skb,
2711 unsigned int end_offset)
2712{
2713}
2714#endif
2715
40304b2a
LB
2716/* Call BPF_SOCK_OPS program that returns an int. If the return value
2717 * is < 0, then the BPF op failed (for example if the loaded BPF
2718 * program does not support the chosen operation or there is no BPF
2719 * program loaded).
2720 */
2721#ifdef CONFIG_BPF
de525be2 2722static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
40304b2a
LB
2723{
2724 struct bpf_sock_ops_kern sock_ops;
2725 int ret;
2726
b73042b8 2727 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
f19397a5
LB
2728 if (sk_fullsock(sk)) {
2729 sock_ops.is_fullsock = 1;
fd93eaff 2730 sock_ops.is_locked_tcp_sock = 1;
40304b2a 2731 sock_owned_by_me(sk);
f19397a5 2732 }
40304b2a 2733
40304b2a
LB
2734 sock_ops.sk = sk;
2735 sock_ops.op = op;
de525be2
LB
2736 if (nargs > 0)
2737 memcpy(sock_ops.args, args, nargs * sizeof(*args));
40304b2a
LB
2738
2739 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2740 if (ret == 0)
2741 ret = sock_ops.reply;
2742 else
2743 ret = -1;
2744 return ret;
2745}
de525be2
LB
2746
2747static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2748{
2749 u32 args[2] = {arg1, arg2};
2750
2751 return tcp_call_bpf(sk, op, 2, args);
2752}
2753
2754static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2755 u32 arg3)
2756{
2757 u32 args[3] = {arg1, arg2, arg3};
2758
2759 return tcp_call_bpf(sk, op, 3, args);
2760}
2761
40304b2a 2762#else
de525be2 2763static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
40304b2a
LB
2764{
2765 return -EPERM;
2766}
de525be2
LB
2767
2768static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2769{
2770 return -EPERM;
2771}
2772
2773static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2774 u32 arg3)
2775{
2776 return -EPERM;
2777}
2778
40304b2a
LB
2779#endif
2780
8550f328
LB
2781static inline u32 tcp_timeout_init(struct sock *sk)
2782{
2783 int timeout;
2784
de525be2 2785 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
8550f328
LB
2786
2787 if (timeout <= 0)
2788 timeout = TCP_TIMEOUT_INIT;
5903123f 2789 return min_t(int, timeout, TCP_RTO_MAX);
8550f328
LB
2790}
2791
13d3b1eb
LB
2792static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2793{
2794 int rwnd;
2795
de525be2 2796 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
13d3b1eb
LB
2797
2798 if (rwnd < 0)
2799 rwnd = 0;
2800 return rwnd;
2801}
91b5b21c
LB
2802
2803static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2804{
de525be2 2805 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
91b5b21c 2806}
60e2a778 2807
48e2cd3e 2808static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt)
23729ff2 2809{
bef8e263 2810 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
48e2cd3e 2811 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt);
23729ff2
SF
2812}
2813
60e2a778
UB
2814#if IS_ENABLED(CONFIG_SMC)
2815extern struct static_key_false tcp_have_smc;
2816#endif
6dac1523
IL
2817
2818#if IS_ENABLED(CONFIG_TLS_DEVICE)
1937a0be 2819void clean_acked_data_enable(struct tcp_sock *tp,
6dac1523 2820 void (*cad)(struct sock *sk, u32 ack_seq));
1937a0be 2821void clean_acked_data_disable(struct tcp_sock *tp);
494bc1d2 2822void clean_acked_data_flush(void);
6dac1523
IL
2823#endif
2824
a842fe14
ED
2825DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2826static inline void tcp_add_tx_delay(struct sk_buff *skb,
2827 const struct tcp_sock *tp)
2828{
2829 if (static_branch_unlikely(&tcp_tx_delay_enabled))
2830 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2831}
2832
d6fb396c
ED
2833/* Compute Earliest Departure Time for some control packets
2834 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2835 */
2836static inline u64 tcp_transmit_time(const struct sock *sk)
a842fe14
ED
2837{
2838 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2839 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2840 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2841
d6fb396c 2842 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
a842fe14 2843 }
d6fb396c 2844 return 0;
a842fe14
ED
2845}
2846
f7dca36f
DS
2847static inline int tcp_parse_auth_options(const struct tcphdr *th,
2848 const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
2849{
2850 const u8 *md5_tmp, *ao_tmp;
2851 int ret;
2852
2853 ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
2854 if (ret)
2855 return ret;
2856
2857 if (md5_hash)
2858 *md5_hash = md5_tmp;
2859
2860 if (aoh) {
2861 if (!ao_tmp)
2862 *aoh = NULL;
2863 else
2864 *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
2865 }
2866
2867 return 0;
2868}
2869
0aadc739 2870static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
248411b8 2871 int family, int l3index, bool stat_inc)
0aadc739
DS
2872{
2873#ifdef CONFIG_TCP_AO
2874 struct tcp_ao_info *ao_info;
2875 struct tcp_ao_key *ao_key;
2876
67fa83f7
DS
2877 if (!static_branch_unlikely(&tcp_ao_needed.key))
2878 return false;
2879
0aadc739
DS
2880 ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
2881 lockdep_sock_is_held(sk));
2882 if (!ao_info)
2883 return false;
2884
248411b8 2885 ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
af09a341
DS
2886 if (ao_info->ao_required || ao_key) {
2887 if (stat_inc) {
2888 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
2889 atomic64_inc(&ao_info->counters.ao_required);
2890 }
0aadc739 2891 return true;
af09a341 2892 }
0aadc739
DS
2893#endif
2894 return false;
2895}
2896
811efc06
DS
2897enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
2898 const struct request_sock *req, const struct sk_buff *skb,
2899 const void *saddr, const void *daddr,
2900 int family, int dif, int sdif);
0a3a8090 2901
1da177e4 2902#endif /* _TCP_H */