]> git.ipfire.org Git - thirdparty/linux.git/blob - net/sched/act_police.c
MAINTAINERS: Fix Hyperv vIOMMU driver file name
[thirdparty/linux.git] / net / sched / act_police.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/act_police.c Input police filter
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * J Hadi Salim (action changes)
7 */
8
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <net/act_api.h>
19 #include <net/netlink.h>
20 #include <net/pkt_cls.h>
21 #include <net/tc_act/tc_police.h>
22
23 /* Each policer is serialized by its individual spinlock */
24
25 static unsigned int police_net_id;
26 static struct tc_action_ops act_police_ops;
27
28 static int tcf_police_walker(struct net *net, struct sk_buff *skb,
29 struct netlink_callback *cb, int type,
30 const struct tc_action_ops *ops,
31 struct netlink_ext_ack *extack)
32 {
33 struct tc_action_net *tn = net_generic(net, police_net_id);
34
35 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
36 }
37
38 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
39 [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
40 [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
41 [TCA_POLICE_AVRATE] = { .type = NLA_U32 },
42 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
43 };
44
45 static int tcf_police_init(struct net *net, struct nlattr *nla,
46 struct nlattr *est, struct tc_action **a,
47 int ovr, int bind, bool rtnl_held,
48 struct tcf_proto *tp,
49 struct netlink_ext_ack *extack)
50 {
51 int ret = 0, tcfp_result = TC_ACT_OK, err, size;
52 struct nlattr *tb[TCA_POLICE_MAX + 1];
53 struct tcf_chain *goto_ch = NULL;
54 struct tc_police *parm;
55 struct tcf_police *police;
56 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
57 struct tc_action_net *tn = net_generic(net, police_net_id);
58 struct tcf_police_params *new;
59 bool exists = false;
60
61 if (nla == NULL)
62 return -EINVAL;
63
64 err = nla_parse_nested_deprecated(tb, TCA_POLICE_MAX, nla,
65 police_policy, NULL);
66 if (err < 0)
67 return err;
68
69 if (tb[TCA_POLICE_TBF] == NULL)
70 return -EINVAL;
71 size = nla_len(tb[TCA_POLICE_TBF]);
72 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
73 return -EINVAL;
74
75 parm = nla_data(tb[TCA_POLICE_TBF]);
76 err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
77 if (err < 0)
78 return err;
79 exists = err;
80 if (exists && bind)
81 return 0;
82
83 if (!exists) {
84 ret = tcf_idr_create(tn, parm->index, NULL, a,
85 &act_police_ops, bind, true);
86 if (ret) {
87 tcf_idr_cleanup(tn, parm->index);
88 return ret;
89 }
90 ret = ACT_P_CREATED;
91 spin_lock_init(&(to_police(*a)->tcfp_lock));
92 } else if (!ovr) {
93 tcf_idr_release(*a, bind);
94 return -EEXIST;
95 }
96 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
97 if (err < 0)
98 goto release_idr;
99
100 police = to_police(*a);
101 if (parm->rate.rate) {
102 err = -ENOMEM;
103 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
104 if (R_tab == NULL)
105 goto failure;
106
107 if (parm->peakrate.rate) {
108 P_tab = qdisc_get_rtab(&parm->peakrate,
109 tb[TCA_POLICE_PEAKRATE], NULL);
110 if (P_tab == NULL)
111 goto failure;
112 }
113 }
114
115 if (est) {
116 err = gen_replace_estimator(&police->tcf_bstats,
117 police->common.cpu_bstats,
118 &police->tcf_rate_est,
119 &police->tcf_lock,
120 NULL, est);
121 if (err)
122 goto failure;
123 } else if (tb[TCA_POLICE_AVRATE] &&
124 (ret == ACT_P_CREATED ||
125 !gen_estimator_active(&police->tcf_rate_est))) {
126 err = -EINVAL;
127 goto failure;
128 }
129
130 if (tb[TCA_POLICE_RESULT]) {
131 tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
132 if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
133 NL_SET_ERR_MSG(extack,
134 "goto chain not allowed on fallback");
135 err = -EINVAL;
136 goto failure;
137 }
138 }
139
140 new = kzalloc(sizeof(*new), GFP_KERNEL);
141 if (unlikely(!new)) {
142 err = -ENOMEM;
143 goto failure;
144 }
145
146 /* No failure allowed after this point */
147 new->tcfp_result = tcfp_result;
148 new->tcfp_mtu = parm->mtu;
149 if (!new->tcfp_mtu) {
150 new->tcfp_mtu = ~0;
151 if (R_tab)
152 new->tcfp_mtu = 255 << R_tab->rate.cell_log;
153 }
154 if (R_tab) {
155 new->rate_present = true;
156 psched_ratecfg_precompute(&new->rate, &R_tab->rate, 0);
157 qdisc_put_rtab(R_tab);
158 } else {
159 new->rate_present = false;
160 }
161 if (P_tab) {
162 new->peak_present = true;
163 psched_ratecfg_precompute(&new->peak, &P_tab->rate, 0);
164 qdisc_put_rtab(P_tab);
165 } else {
166 new->peak_present = false;
167 }
168
169 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
170 if (new->peak_present)
171 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
172 new->tcfp_mtu);
173
174 if (tb[TCA_POLICE_AVRATE])
175 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
176
177 spin_lock_bh(&police->tcf_lock);
178 spin_lock_bh(&police->tcfp_lock);
179 police->tcfp_t_c = ktime_get_ns();
180 police->tcfp_toks = new->tcfp_burst;
181 if (new->peak_present)
182 police->tcfp_ptoks = new->tcfp_mtu_ptoks;
183 spin_unlock_bh(&police->tcfp_lock);
184 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
185 rcu_swap_protected(police->params,
186 new,
187 lockdep_is_held(&police->tcf_lock));
188 spin_unlock_bh(&police->tcf_lock);
189
190 if (goto_ch)
191 tcf_chain_put_by_act(goto_ch);
192 if (new)
193 kfree_rcu(new, rcu);
194
195 if (ret == ACT_P_CREATED)
196 tcf_idr_insert(tn, *a);
197 return ret;
198
199 failure:
200 qdisc_put_rtab(P_tab);
201 qdisc_put_rtab(R_tab);
202 if (goto_ch)
203 tcf_chain_put_by_act(goto_ch);
204 release_idr:
205 tcf_idr_release(*a, bind);
206 return err;
207 }
208
209 static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
210 struct tcf_result *res)
211 {
212 struct tcf_police *police = to_police(a);
213 struct tcf_police_params *p;
214 s64 now, toks, ptoks = 0;
215 int ret;
216
217 tcf_lastuse_update(&police->tcf_tm);
218 bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb);
219
220 ret = READ_ONCE(police->tcf_action);
221 p = rcu_dereference_bh(police->params);
222
223 if (p->tcfp_ewma_rate) {
224 struct gnet_stats_rate_est64 sample;
225
226 if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
227 sample.bps >= p->tcfp_ewma_rate)
228 goto inc_overlimits;
229 }
230
231 if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
232 if (!p->rate_present) {
233 ret = p->tcfp_result;
234 goto end;
235 }
236
237 now = ktime_get_ns();
238 spin_lock_bh(&police->tcfp_lock);
239 toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
240 if (p->peak_present) {
241 ptoks = toks + police->tcfp_ptoks;
242 if (ptoks > p->tcfp_mtu_ptoks)
243 ptoks = p->tcfp_mtu_ptoks;
244 ptoks -= (s64)psched_l2t_ns(&p->peak,
245 qdisc_pkt_len(skb));
246 }
247 toks += police->tcfp_toks;
248 if (toks > p->tcfp_burst)
249 toks = p->tcfp_burst;
250 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
251 if ((toks|ptoks) >= 0) {
252 police->tcfp_t_c = now;
253 police->tcfp_toks = toks;
254 police->tcfp_ptoks = ptoks;
255 spin_unlock_bh(&police->tcfp_lock);
256 ret = p->tcfp_result;
257 goto inc_drops;
258 }
259 spin_unlock_bh(&police->tcfp_lock);
260 }
261
262 inc_overlimits:
263 qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats));
264 inc_drops:
265 if (ret == TC_ACT_SHOT)
266 qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats));
267 end:
268 return ret;
269 }
270
271 static void tcf_police_cleanup(struct tc_action *a)
272 {
273 struct tcf_police *police = to_police(a);
274 struct tcf_police_params *p;
275
276 p = rcu_dereference_protected(police->params, 1);
277 if (p)
278 kfree_rcu(p, rcu);
279 }
280
281 static void tcf_police_stats_update(struct tc_action *a,
282 u64 bytes, u32 packets,
283 u64 lastuse, bool hw)
284 {
285 struct tcf_police *police = to_police(a);
286 struct tcf_t *tm = &police->tcf_tm;
287
288 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
289 if (hw)
290 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
291 bytes, packets);
292 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
293 }
294
295 static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
296 int bind, int ref)
297 {
298 unsigned char *b = skb_tail_pointer(skb);
299 struct tcf_police *police = to_police(a);
300 struct tcf_police_params *p;
301 struct tc_police opt = {
302 .index = police->tcf_index,
303 .refcnt = refcount_read(&police->tcf_refcnt) - ref,
304 .bindcnt = atomic_read(&police->tcf_bindcnt) - bind,
305 };
306 struct tcf_t t;
307
308 spin_lock_bh(&police->tcf_lock);
309 opt.action = police->tcf_action;
310 p = rcu_dereference_protected(police->params,
311 lockdep_is_held(&police->tcf_lock));
312 opt.mtu = p->tcfp_mtu;
313 opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
314 if (p->rate_present)
315 psched_ratecfg_getrate(&opt.rate, &p->rate);
316 if (p->peak_present)
317 psched_ratecfg_getrate(&opt.peakrate, &p->peak);
318 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
319 goto nla_put_failure;
320 if (p->tcfp_result &&
321 nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
322 goto nla_put_failure;
323 if (p->tcfp_ewma_rate &&
324 nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
325 goto nla_put_failure;
326
327 t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
328 t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
329 t.firstuse = jiffies_to_clock_t(jiffies - police->tcf_tm.firstuse);
330 t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
331 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
332 goto nla_put_failure;
333 spin_unlock_bh(&police->tcf_lock);
334
335 return skb->len;
336
337 nla_put_failure:
338 spin_unlock_bh(&police->tcf_lock);
339 nlmsg_trim(skb, b);
340 return -1;
341 }
342
343 static int tcf_police_search(struct net *net, struct tc_action **a, u32 index)
344 {
345 struct tc_action_net *tn = net_generic(net, police_net_id);
346
347 return tcf_idr_search(tn, a, index);
348 }
349
350 MODULE_AUTHOR("Alexey Kuznetsov");
351 MODULE_DESCRIPTION("Policing actions");
352 MODULE_LICENSE("GPL");
353
354 static struct tc_action_ops act_police_ops = {
355 .kind = "police",
356 .id = TCA_ID_POLICE,
357 .owner = THIS_MODULE,
358 .stats_update = tcf_police_stats_update,
359 .act = tcf_police_act,
360 .dump = tcf_police_dump,
361 .init = tcf_police_init,
362 .walk = tcf_police_walker,
363 .lookup = tcf_police_search,
364 .cleanup = tcf_police_cleanup,
365 .size = sizeof(struct tcf_police),
366 };
367
368 static __net_init int police_init_net(struct net *net)
369 {
370 struct tc_action_net *tn = net_generic(net, police_net_id);
371
372 return tc_action_net_init(tn, &act_police_ops);
373 }
374
375 static void __net_exit police_exit_net(struct list_head *net_list)
376 {
377 tc_action_net_exit(net_list, police_net_id);
378 }
379
380 static struct pernet_operations police_net_ops = {
381 .init = police_init_net,
382 .exit_batch = police_exit_net,
383 .id = &police_net_id,
384 .size = sizeof(struct tc_action_net),
385 };
386
387 static int __init police_init_module(void)
388 {
389 return tcf_register_action(&act_police_ops, &police_net_ops);
390 }
391
392 static void __exit police_cleanup_module(void)
393 {
394 tcf_unregister_action(&act_police_ops, &police_net_ops);
395 }
396
397 module_init(police_init_module);
398 module_exit(police_cleanup_module);