]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - net/mptcp/ctrl.c
Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[thirdparty/kernel/linux.git] / net / mptcp / ctrl.c
CommitLineData
784325e9
MB
1// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2019, Tessares SA.
5 */
6
804c72ee 7#ifdef CONFIG_SYSCTL
784325e9 8#include <linux/sysctl.h>
804c72ee 9#endif
784325e9
MB
10
11#include <net/net_namespace.h>
12#include <net/netns/generic.h>
13
14#include "protocol.h"
6982826f 15#include "mib.h"
784325e9
MB
16
17#define MPTCP_SYSCTL_PATH "net/mptcp"
18
19static int mptcp_pernet_id;
6bb63ccc
MM
20
21#ifdef CONFIG_SYSCTL
22static int mptcp_pm_type_max = __MPTCP_PM_TYPE_MAX;
23#endif
24
784325e9 25struct mptcp_pernet {
804c72ee 26#ifdef CONFIG_SYSCTL
784325e9 27 struct ctl_table_header *ctl_table_hdr;
804c72ee 28#endif
784325e9 29
93f323b9 30 unsigned int add_addr_timeout;
27069e7c 31 unsigned int blackhole_timeout;
d866ae9a 32 unsigned int close_timeout;
ff5a0b42 33 unsigned int stale_loss_cnt;
27069e7c 34 atomic_t active_disable_times;
5b4fd353 35 u8 syn_retrans_before_tcp_fallback;
27069e7c 36 unsigned long active_disable_stamp;
6da14d74 37 u8 mptcp_enabled;
fc3c82ee 38 u8 checksum_enabled;
d2f77960 39 u8 allow_join_initial_addr_port;
6bb63ccc 40 u8 pm_type;
e3b2870b 41 char scheduler[MPTCP_SCHED_NAME_MAX];
595c26d1 42 char path_manager[MPTCP_PM_NAME_MAX];
784325e9
MB
43};
44
6da14d74 45static struct mptcp_pernet *mptcp_get_pernet(const struct net *net)
784325e9
MB
46{
47 return net_generic(net, mptcp_pernet_id);
48}
49
6da14d74 50int mptcp_is_enabled(const struct net *net)
784325e9
MB
51{
52 return mptcp_get_pernet(net)->mptcp_enabled;
53}
54
6da14d74 55unsigned int mptcp_get_add_addr_timeout(const struct net *net)
93f323b9
GT
56{
57 return mptcp_get_pernet(net)->add_addr_timeout;
58}
59
6da14d74 60int mptcp_is_checksum_enabled(const struct net *net)
fc3c82ee
GT
61{
62 return mptcp_get_pernet(net)->checksum_enabled;
63}
64
6da14d74 65int mptcp_allow_join_id0(const struct net *net)
d2f77960
GT
66{
67 return mptcp_get_pernet(net)->allow_join_initial_addr_port;
68}
69
ff5a0b42
PA
70unsigned int mptcp_stale_loss_cnt(const struct net *net)
71{
72 return mptcp_get_pernet(net)->stale_loss_cnt;
73}
74
d866ae9a
PA
75unsigned int mptcp_close_timeout(const struct sock *sk)
76{
77 if (sock_flag(sk, SOCK_DEAD))
78 return TCP_TIMEWAIT_LEN;
79 return mptcp_get_pernet(sock_net(sk))->close_timeout;
80}
81
6bb63ccc
MM
82int mptcp_get_pm_type(const struct net *net)
83{
84 return mptcp_get_pernet(net)->pm_type;
85}
86
595c26d1
GT
87const char *mptcp_get_path_manager(const struct net *net)
88{
89 return mptcp_get_pernet(net)->path_manager;
90}
91
e3b2870b
GT
92const char *mptcp_get_scheduler(const struct net *net)
93{
94 return mptcp_get_pernet(net)->scheduler;
95}
96
804c72ee
MB
97static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
98{
99 pernet->mptcp_enabled = 1;
100 pernet->add_addr_timeout = TCP_RTO_MAX;
27069e7c 101 pernet->blackhole_timeout = 3600;
5b4fd353 102 pernet->syn_retrans_before_tcp_fallback = 2;
27069e7c 103 atomic_set(&pernet->active_disable_times, 0);
d866ae9a 104 pernet->close_timeout = TCP_TIMEWAIT_LEN;
fc3c82ee 105 pernet->checksum_enabled = 0;
d2f77960 106 pernet->allow_join_initial_addr_port = 1;
ff5a0b42 107 pernet->stale_loss_cnt = 4;
6bb63ccc 108 pernet->pm_type = MPTCP_PM_TYPE_KERNEL;
5eae7a82 109 strscpy(pernet->scheduler, "default", sizeof(pernet->scheduler));
595c26d1 110 strscpy(pernet->path_manager, "kernel", sizeof(pernet->path_manager));
804c72ee
MB
111}
112
113#ifdef CONFIG_SYSCTL
d38e26e3 114static int mptcp_set_scheduler(char *scheduler, const char *name)
6963c508 115{
6963c508
GD
116 struct mptcp_sched_ops *sched;
117 int ret = 0;
118
119 rcu_read_lock();
120 sched = mptcp_sched_find(name);
121 if (sched)
d38e26e3 122 strscpy(scheduler, name, MPTCP_SCHED_NAME_MAX);
6963c508
GD
123 else
124 ret = -ENOENT;
125 rcu_read_unlock();
126
127 return ret;
128}
129
78eb4ea2 130static int proc_scheduler(const struct ctl_table *ctl, int write,
6963c508
GD
131 void *buffer, size_t *lenp, loff_t *ppos)
132{
d38e26e3 133 char (*scheduler)[MPTCP_SCHED_NAME_MAX] = ctl->data;
6963c508
GD
134 char val[MPTCP_SCHED_NAME_MAX];
135 struct ctl_table tbl = {
136 .data = val,
137 .maxlen = MPTCP_SCHED_NAME_MAX,
138 };
139 int ret;
140
d38e26e3 141 strscpy(val, *scheduler, MPTCP_SCHED_NAME_MAX);
6963c508
GD
142
143 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
144 if (write && ret == 0)
d38e26e3 145 ret = mptcp_set_scheduler(*scheduler, val);
6963c508
GD
146
147 return ret;
148}
149
78eb4ea2 150static int proc_available_schedulers(const struct ctl_table *ctl,
73c900aa
GD
151 int write, void *buffer,
152 size_t *lenp, loff_t *ppos)
153{
154 struct ctl_table tbl = { .maxlen = MPTCP_SCHED_BUF_MAX, };
155 int ret;
156
157 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
158 if (!tbl.data)
159 return -ENOMEM;
160
161 mptcp_get_available_schedulers(tbl.data, MPTCP_SCHED_BUF_MAX);
162 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
163 kfree(tbl.data);
164
165 return ret;
166}
167
27069e7c
MBN
168static int proc_blackhole_detect_timeout(const struct ctl_table *table,
169 int write, void *buffer, size_t *lenp,
170 loff_t *ppos)
171{
92cf7a51
MBN
172 struct mptcp_pernet *pernet = container_of(table->data,
173 struct mptcp_pernet,
174 blackhole_timeout);
27069e7c
MBN
175 int ret;
176
177 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
178 if (write && ret == 0)
179 atomic_set(&pernet->active_disable_times, 0);
180
181 return ret;
182}
183
595c26d1
GT
184static int mptcp_set_path_manager(char *path_manager, const char *name)
185{
186 struct mptcp_pm_ops *pm_ops;
187 int ret = 0;
188
189 rcu_read_lock();
190 pm_ops = mptcp_pm_find(name);
191 if (pm_ops)
192 strscpy(path_manager, name, MPTCP_PM_NAME_MAX);
193 else
194 ret = -ENOENT;
195 rcu_read_unlock();
196
197 return ret;
198}
199
200static int proc_path_manager(const struct ctl_table *ctl, int write,
201 void *buffer, size_t *lenp, loff_t *ppos)
202{
573b6534
GT
203 struct mptcp_pernet *pernet = container_of(ctl->data,
204 struct mptcp_pernet,
205 path_manager);
595c26d1
GT
206 char (*path_manager)[MPTCP_PM_NAME_MAX] = ctl->data;
207 char pm_name[MPTCP_PM_NAME_MAX];
208 const struct ctl_table tbl = {
209 .data = pm_name,
210 .maxlen = MPTCP_PM_NAME_MAX,
211 };
212 int ret;
213
214 strscpy(pm_name, *path_manager, MPTCP_PM_NAME_MAX);
215
216 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
573b6534 217 if (write && ret == 0) {
595c26d1 218 ret = mptcp_set_path_manager(*path_manager, pm_name);
573b6534
GT
219 if (ret == 0) {
220 u8 pm_type = __MPTCP_PM_TYPE_NR;
221
222 if (strncmp(pm_name, "kernel", MPTCP_PM_NAME_MAX) == 0)
223 pm_type = MPTCP_PM_TYPE_KERNEL;
224 else if (strncmp(pm_name, "userspace", MPTCP_PM_NAME_MAX) == 0)
225 pm_type = MPTCP_PM_TYPE_USERSPACE;
226 pernet->pm_type = pm_type;
227 }
228 }
595c26d1
GT
229
230 return ret;
231}
232
7982ed0e
GT
233static int proc_pm_type(const struct ctl_table *ctl, int write,
234 void *buffer, size_t *lenp, loff_t *ppos)
235{
236 struct mptcp_pernet *pernet = container_of(ctl->data,
237 struct mptcp_pernet,
238 pm_type);
239 int ret;
240
241 ret = proc_dou8vec_minmax(ctl, write, buffer, lenp, ppos);
242 if (write && ret == 0) {
243 u8 pm_type = READ_ONCE(*(u8 *)ctl->data);
244 char *pm_name = "";
245
246 if (pm_type == MPTCP_PM_TYPE_KERNEL)
247 pm_name = "kernel";
248 else if (pm_type == MPTCP_PM_TYPE_USERSPACE)
249 pm_name = "userspace";
250 mptcp_set_path_manager(pernet->path_manager, pm_name);
251 }
252
253 return ret;
254}
255
fa3ee9dd
GT
256static int proc_available_path_managers(const struct ctl_table *ctl,
257 int write, void *buffer,
258 size_t *lenp, loff_t *ppos)
259{
260 struct ctl_table tbl = { .maxlen = MPTCP_PM_BUF_MAX, };
261 int ret;
262
263 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
264 if (!tbl.data)
265 return -ENOMEM;
266
267 mptcp_pm_get_available(tbl.data, MPTCP_PM_BUF_MAX);
268 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
269 kfree(tbl.data);
270
271 return ret;
272}
273
784325e9
MB
274static struct ctl_table mptcp_sysctl_table[] = {
275 {
276 .procname = "enabled",
744ee140 277 .maxlen = sizeof(u8),
784325e9
MB
278 .mode = 0644,
279 /* users with CAP_NET_ADMIN or root (not and) can change this
280 * value, same as other sysctl or the 'net' tree.
281 */
744ee140
MB
282 .proc_handler = proc_dou8vec_minmax,
283 .extra1 = SYSCTL_ZERO,
284 .extra2 = SYSCTL_ONE
784325e9 285 },
93f323b9
GT
286 {
287 .procname = "add_addr_timeout",
288 .maxlen = sizeof(unsigned int),
289 .mode = 0644,
290 .proc_handler = proc_dointvec_jiffies,
291 },
fc3c82ee
GT
292 {
293 .procname = "checksum_enabled",
294 .maxlen = sizeof(u8),
295 .mode = 0644,
296 .proc_handler = proc_dou8vec_minmax,
297 .extra1 = SYSCTL_ZERO,
298 .extra2 = SYSCTL_ONE
299 },
d2f77960
GT
300 {
301 .procname = "allow_join_initial_addr_port",
302 .maxlen = sizeof(u8),
303 .mode = 0644,
304 .proc_handler = proc_dou8vec_minmax,
305 .extra1 = SYSCTL_ZERO,
306 .extra2 = SYSCTL_ONE
307 },
ff5a0b42
PA
308 {
309 .procname = "stale_loss_cnt",
310 .maxlen = sizeof(unsigned int),
311 .mode = 0644,
312 .proc_handler = proc_douintvec_minmax,
313 },
6bb63ccc
MM
314 {
315 .procname = "pm_type",
316 .maxlen = sizeof(u8),
317 .mode = 0644,
7982ed0e 318 .proc_handler = proc_pm_type,
6bb63ccc
MM
319 .extra1 = SYSCTL_ZERO,
320 .extra2 = &mptcp_pm_type_max
321 },
e3b2870b
GT
322 {
323 .procname = "scheduler",
324 .maxlen = MPTCP_SCHED_NAME_MAX,
325 .mode = 0644,
6963c508 326 .proc_handler = proc_scheduler,
e3b2870b 327 },
73c900aa
GD
328 {
329 .procname = "available_schedulers",
330 .maxlen = MPTCP_SCHED_BUF_MAX,
771ec78d 331 .mode = 0444,
73c900aa
GD
332 .proc_handler = proc_available_schedulers,
333 },
d866ae9a
PA
334 {
335 .procname = "close_timeout",
336 .maxlen = sizeof(unsigned int),
337 .mode = 0644,
338 .proc_handler = proc_dointvec_jiffies,
339 },
27069e7c
MBN
340 {
341 .procname = "blackhole_timeout",
342 .maxlen = sizeof(unsigned int),
343 .mode = 0644,
344 .proc_handler = proc_blackhole_detect_timeout,
345 .extra1 = SYSCTL_ZERO,
346 },
5b4fd353
MBN
347 {
348 .procname = "syn_retrans_before_tcp_fallback",
349 .maxlen = sizeof(u8),
350 .mode = 0644,
351 .proc_handler = proc_dou8vec_minmax,
352 },
595c26d1
GT
353 {
354 .procname = "path_manager",
355 .maxlen = MPTCP_PM_NAME_MAX,
356 .mode = 0644,
357 .proc_handler = proc_path_manager,
358 },
fa3ee9dd
GT
359 {
360 .procname = "available_path_managers",
361 .maxlen = MPTCP_PM_BUF_MAX,
362 .mode = 0444,
363 .proc_handler = proc_available_path_managers,
364 },
784325e9
MB
365};
366
784325e9
MB
367static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
368{
369 struct ctl_table_header *hdr;
370 struct ctl_table *table;
371
372 table = mptcp_sysctl_table;
373 if (!net_eq(net, &init_net)) {
374 table = kmemdup(table, sizeof(mptcp_sysctl_table), GFP_KERNEL);
375 if (!table)
376 goto err_alloc;
377 }
378
379 table[0].data = &pernet->mptcp_enabled;
93f323b9 380 table[1].data = &pernet->add_addr_timeout;
fc3c82ee 381 table[2].data = &pernet->checksum_enabled;
d2f77960 382 table[3].data = &pernet->allow_join_initial_addr_port;
ff5a0b42 383 table[4].data = &pernet->stale_loss_cnt;
6bb63ccc 384 table[5].data = &pernet->pm_type;
e3b2870b 385 table[6].data = &pernet->scheduler;
73c900aa
GD
386 /* table[7] is for available_schedulers which is read-only info */
387 table[8].data = &pernet->close_timeout;
27069e7c 388 table[9].data = &pernet->blackhole_timeout;
5b4fd353 389 table[10].data = &pernet->syn_retrans_before_tcp_fallback;
595c26d1 390 table[11].data = &pernet->path_manager;
fa3ee9dd 391 /* table[12] is for available_path_managers which is read-only info */
784325e9 392
c899710f
JG
393 hdr = register_net_sysctl_sz(net, MPTCP_SYSCTL_PATH, table,
394 ARRAY_SIZE(mptcp_sysctl_table));
784325e9
MB
395 if (!hdr)
396 goto err_reg;
397
398 pernet->ctl_table_hdr = hdr;
399
400 return 0;
401
402err_reg:
403 if (!net_eq(net, &init_net))
404 kfree(table);
405err_alloc:
406 return -ENOMEM;
407}
408
409static void mptcp_pernet_del_table(struct mptcp_pernet *pernet)
410{
bfa858f2 411 const struct ctl_table *table = pernet->ctl_table_hdr->ctl_table_arg;
784325e9
MB
412
413 unregister_net_sysctl_table(pernet->ctl_table_hdr);
414
415 kfree(table);
416}
417
804c72ee
MB
418#else
419
420static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
421{
422 return 0;
423}
424
425static void mptcp_pernet_del_table(struct mptcp_pernet *pernet) {}
426
427#endif /* CONFIG_SYSCTL */
428
27069e7c
MBN
429/* The following code block is to deal with middle box issues with MPTCP,
430 * similar to what is done with TFO.
431 * The proposed solution is to disable active MPTCP globally when SYN+MPC are
432 * dropped, while SYN without MPC aren't. In this case, active side MPTCP is
433 * disabled globally for 1hr at first. Then if it happens again, it is disabled
434 * for 2h, then 4h, 8h, ...
435 * The timeout is reset back to 1hr when a successful active MPTCP connection is
436 * fully established.
437 */
438
439/* Disable active MPTCP and record current jiffies and active_disable_times */
440void mptcp_active_disable(struct sock *sk)
441{
442 struct net *net = sock_net(sk);
443 struct mptcp_pernet *pernet;
444
445 pernet = mptcp_get_pernet(net);
446
447 if (!READ_ONCE(pernet->blackhole_timeout))
448 return;
449
450 /* Paired with READ_ONCE() in mptcp_active_should_disable() */
451 WRITE_ONCE(pernet->active_disable_stamp, jiffies);
452
453 /* Paired with smp_rmb() in mptcp_active_should_disable().
454 * We want pernet->active_disable_stamp to be updated first.
455 */
456 smp_mb__before_atomic();
457 atomic_inc(&pernet->active_disable_times);
458
459 MPTCP_INC_STATS(net, MPTCP_MIB_BLACKHOLE);
460}
461
462/* Calculate timeout for MPTCP active disable
463 * Return true if we are still in the active MPTCP disable period
464 * Return false if timeout already expired and we should use active MPTCP
465 */
466bool mptcp_active_should_disable(struct sock *ssk)
467{
468 struct net *net = sock_net(ssk);
469 unsigned int blackhole_timeout;
470 struct mptcp_pernet *pernet;
471 unsigned long timeout;
472 int disable_times;
473 int multiplier;
474
475 pernet = mptcp_get_pernet(net);
476 blackhole_timeout = READ_ONCE(pernet->blackhole_timeout);
477
478 if (!blackhole_timeout)
479 return false;
480
481 disable_times = atomic_read(&pernet->active_disable_times);
482 if (!disable_times)
483 return false;
484
485 /* Paired with smp_mb__before_atomic() in mptcp_active_disable() */
486 smp_rmb();
487
488 /* Limit timeout to max: 2^6 * initial timeout */
489 multiplier = 1 << min(disable_times - 1, 6);
490
491 /* Paired with the WRITE_ONCE() in mptcp_active_disable(). */
492 timeout = READ_ONCE(pernet->active_disable_stamp) +
493 multiplier * blackhole_timeout * HZ;
494
495 return time_before(jiffies, timeout);
496}
497
498/* Enable active MPTCP and reset active_disable_times if needed */
499void mptcp_active_enable(struct sock *sk)
500{
501 struct mptcp_pernet *pernet = mptcp_get_pernet(sock_net(sk));
502
503 if (atomic_read(&pernet->active_disable_times)) {
504 struct dst_entry *dst = sk_dst_get(sk);
505
506 if (dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))
507 atomic_set(&pernet->active_disable_times, 0);
508 }
509}
510
6982826f
MBN
511/* Check the number of retransmissions, and fallback to TCP if needed */
512void mptcp_active_detect_blackhole(struct sock *ssk, bool expired)
513{
514 struct mptcp_subflow_context *subflow;
8275ac79
MBN
515 u8 timeouts, to_max;
516 struct net *net;
6982826f 517
8275ac79
MBN
518 /* Only check MPTCP SYN ... */
519 if (likely(!sk_is_mptcp(ssk) || ssk->sk_state != TCP_SYN_SENT))
6982826f
MBN
520 return;
521
6982826f
MBN
522 subflow = mptcp_subflow_ctx(ssk);
523
8275ac79
MBN
524 /* ... + MP_CAPABLE */
525 if (!subflow->request_mptcp) {
526 /* Mark as blackhole iif the 1st non-MPTCP SYN is accepted */
527 subflow->mpc_drop = 0;
528 return;
529 }
5b4fd353 530
8275ac79
MBN
531 net = sock_net(ssk);
532 timeouts = inet_csk(ssk)->icsk_retransmits;
533 to_max = mptcp_get_pernet(net)->syn_retrans_before_tcp_fallback;
5b4fd353 534
8275ac79
MBN
535 if (timeouts == to_max || (timeouts < to_max && expired)) {
536 MPTCP_INC_STATS(net, MPTCP_MIB_MPCAPABLEACTIVEDROP);
537 subflow->mpc_drop = 1;
538 mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow);
6982826f
MBN
539 }
540}
541
784325e9
MB
542static int __net_init mptcp_net_init(struct net *net)
543{
544 struct mptcp_pernet *pernet = mptcp_get_pernet(net);
545
546 mptcp_pernet_set_defaults(pernet);
547
548 return mptcp_pernet_new_table(net, pernet);
549}
550
551/* Note: the callback will only be called per extra netns */
552static void __net_exit mptcp_net_exit(struct net *net)
553{
554 struct mptcp_pernet *pernet = mptcp_get_pernet(net);
555
556 mptcp_pernet_del_table(pernet);
557}
558
559static struct pernet_operations mptcp_pernet_ops = {
560 .init = mptcp_net_init,
561 .exit = mptcp_net_exit,
562 .id = &mptcp_pernet_id,
563 .size = sizeof(struct mptcp_pernet),
564};
565
566void __init mptcp_init(void)
567{
9466a1cc 568 mptcp_join_cookie_init();
784325e9
MB
569 mptcp_proto_init();
570
571 if (register_pernet_subsys(&mptcp_pernet_ops) < 0)
572 panic("Failed to register MPTCP pernet subsystem.\n");
573}
574
575#if IS_ENABLED(CONFIG_MPTCP_IPV6)
576int __init mptcpv6_init(void)
577{
578 int err;
579
580 err = mptcp_proto_v6_init();
581
582 return err;
583}
584#endif