]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - net/mptcp/sched.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
[thirdparty/kernel/linux.git] / net / mptcp / sched.c
CommitLineData
740ebe35
GT
1// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2022, SUSE.
5 */
6
7#define pr_fmt(fmt) "MPTCP: " fmt
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/rculist.h>
13#include <linux/spinlock.h>
14#include "protocol.h"
15
16static DEFINE_SPINLOCK(mptcp_sched_list_lock);
17static LIST_HEAD(mptcp_sched_list);
18
ed1ad86b
GT
19static int mptcp_sched_default_get_subflow(struct mptcp_sock *msk,
20 struct mptcp_sched_data *data)
21{
22 struct sock *ssk;
23
24 ssk = data->reinject ? mptcp_subflow_get_retrans(msk) :
25 mptcp_subflow_get_send(msk);
26 if (!ssk)
27 return -EINVAL;
28
29 mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
30 return 0;
31}
32
33static struct mptcp_sched_ops mptcp_sched_default = {
34 .get_subflow = mptcp_sched_default_get_subflow,
35 .name = "default",
36 .owner = THIS_MODULE,
37};
38
740ebe35
GT
39/* Must be called with rcu read lock held */
40struct mptcp_sched_ops *mptcp_sched_find(const char *name)
41{
42 struct mptcp_sched_ops *sched, *ret = NULL;
43
44 list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
45 if (!strcmp(sched->name, name)) {
46 ret = sched;
47 break;
48 }
49 }
50
51 return ret;
52}
53
73c900aa
GD
54/* Build string with list of available scheduler values.
55 * Similar to tcp_get_available_congestion_control()
56 */
57void mptcp_get_available_schedulers(char *buf, size_t maxlen)
58{
59 struct mptcp_sched_ops *sched;
60 size_t offs = 0;
61
62 rcu_read_lock();
63 spin_lock(&mptcp_sched_list_lock);
64 list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
65 offs += snprintf(buf + offs, maxlen - offs,
66 "%s%s",
67 offs == 0 ? "" : " ", sched->name);
68
69 if (WARN_ON_ONCE(offs >= maxlen))
70 break;
71 }
72 spin_unlock(&mptcp_sched_list_lock);
73 rcu_read_unlock();
74}
75
740ebe35
GT
76int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
77{
78 if (!sched->get_subflow)
79 return -EINVAL;
80
81 spin_lock(&mptcp_sched_list_lock);
82 if (mptcp_sched_find(sched->name)) {
83 spin_unlock(&mptcp_sched_list_lock);
84 return -EEXIST;
85 }
86 list_add_tail_rcu(&sched->list, &mptcp_sched_list);
87 spin_unlock(&mptcp_sched_list_lock);
88
89 pr_debug("%s registered", sched->name);
90 return 0;
91}
92
93void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
94{
ed1ad86b
GT
95 if (sched == &mptcp_sched_default)
96 return;
97
740ebe35
GT
98 spin_lock(&mptcp_sched_list_lock);
99 list_del_rcu(&sched->list);
100 spin_unlock(&mptcp_sched_list_lock);
101}
1730b2b2 102
ed1ad86b
GT
103void mptcp_sched_init(void)
104{
105 mptcp_register_scheduler(&mptcp_sched_default);
106}
107
1730b2b2
GT
108int mptcp_init_sched(struct mptcp_sock *msk,
109 struct mptcp_sched_ops *sched)
110{
111 if (!sched)
ed1ad86b 112 sched = &mptcp_sched_default;
1730b2b2
GT
113
114 if (!bpf_try_module_get(sched, sched->owner))
115 return -EBUSY;
116
117 msk->sched = sched;
118 if (msk->sched->init)
119 msk->sched->init(msk);
120
121 pr_debug("sched=%s", msk->sched->name);
122
1730b2b2
GT
123 return 0;
124}
125
126void mptcp_release_sched(struct mptcp_sock *msk)
127{
128 struct mptcp_sched_ops *sched = msk->sched;
129
130 if (!sched)
131 return;
132
133 msk->sched = NULL;
134 if (sched->release)
135 sched->release(msk);
136
137 bpf_module_put(sched, sched->owner);
138}
fce68b03
GT
139
140void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
141 bool scheduled)
142{
143 WRITE_ONCE(subflow->scheduled, scheduled);
144}
07336a87
GT
145
146int mptcp_sched_get_send(struct mptcp_sock *msk)
147{
148 struct mptcp_subflow_context *subflow;
149 struct mptcp_sched_data data;
150
0fa1b378
GT
151 msk_owned_by_me(msk);
152
153 /* the following check is moved out of mptcp_subflow_get_send */
154 if (__mptcp_check_fallback(msk)) {
155 if (msk->first &&
156 __tcp_can_send(msk->first) &&
157 sk_stream_memory_free(msk->first)) {
158 mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk->first), true);
159 return 0;
160 }
161 return -EINVAL;
162 }
163
07336a87
GT
164 mptcp_for_each_subflow(msk, subflow) {
165 if (READ_ONCE(subflow->scheduled))
166 return 0;
167 }
168
07336a87 169 data.reinject = false;
ed1ad86b
GT
170 if (msk->sched == &mptcp_sched_default || !msk->sched)
171 return mptcp_sched_default_get_subflow(msk, &data);
07336a87
GT
172 return msk->sched->get_subflow(msk, &data);
173}
174
175int mptcp_sched_get_retrans(struct mptcp_sock *msk)
176{
177 struct mptcp_subflow_context *subflow;
178 struct mptcp_sched_data data;
179
ee2708ae
GT
180 msk_owned_by_me(msk);
181
182 /* the following check is moved out of mptcp_subflow_get_retrans */
183 if (__mptcp_check_fallback(msk))
184 return -EINVAL;
185
07336a87
GT
186 mptcp_for_each_subflow(msk, subflow) {
187 if (READ_ONCE(subflow->scheduled))
188 return 0;
189 }
190
07336a87 191 data.reinject = true;
ed1ad86b
GT
192 if (msk->sched == &mptcp_sched_default || !msk->sched)
193 return mptcp_sched_default_get_subflow(msk, &data);
07336a87
GT
194 return msk->sched->get_subflow(msk, &data);
195}