]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/sched/act_bpf.c
Merge tag 'gvt-fixes-2019-03-21' of https://github.com/intel/gvt-linux into drm-intel...
[thirdparty/kernel/stable.git] / net / sched / act_bpf.c
1 /*
2 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/filter.h>
16 #include <linux/bpf.h>
17
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20
21 #include <linux/tc_act/tc_bpf.h>
22 #include <net/tc_act/tc_bpf.h>
23
24 #define ACT_BPF_NAME_LEN 256
25
26 struct tcf_bpf_cfg {
27 struct bpf_prog *filter;
28 struct sock_filter *bpf_ops;
29 const char *bpf_name;
30 u16 bpf_num_ops;
31 bool is_ebpf;
32 };
33
34 static unsigned int bpf_net_id;
35 static struct tc_action_ops act_bpf_ops;
36
37 static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
38 struct tcf_result *res)
39 {
40 bool at_ingress = skb_at_tc_ingress(skb);
41 struct tcf_bpf *prog = to_bpf(act);
42 struct bpf_prog *filter;
43 int action, filter_res;
44
45 tcf_lastuse_update(&prog->tcf_tm);
46 bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
47
48 rcu_read_lock();
49 filter = rcu_dereference(prog->filter);
50 if (at_ingress) {
51 __skb_push(skb, skb->mac_len);
52 bpf_compute_data_pointers(skb);
53 filter_res = BPF_PROG_RUN(filter, skb);
54 __skb_pull(skb, skb->mac_len);
55 } else {
56 bpf_compute_data_pointers(skb);
57 filter_res = BPF_PROG_RUN(filter, skb);
58 }
59 rcu_read_unlock();
60
61 /* A BPF program may overwrite the default action opcode.
62 * Similarly as in cls_bpf, if filter_res == -1 we use the
63 * default action specified from tc.
64 *
65 * In case a different well-known TC_ACT opcode has been
66 * returned, it will overwrite the default one.
67 *
68 * For everything else that is unkown, TC_ACT_UNSPEC is
69 * returned.
70 */
71 switch (filter_res) {
72 case TC_ACT_PIPE:
73 case TC_ACT_RECLASSIFY:
74 case TC_ACT_OK:
75 case TC_ACT_REDIRECT:
76 action = filter_res;
77 break;
78 case TC_ACT_SHOT:
79 action = filter_res;
80 qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
81 break;
82 case TC_ACT_UNSPEC:
83 action = prog->tcf_action;
84 break;
85 default:
86 action = TC_ACT_UNSPEC;
87 break;
88 }
89
90 return action;
91 }
92
93 static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
94 {
95 return !prog->bpf_ops;
96 }
97
98 static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
99 struct sk_buff *skb)
100 {
101 struct nlattr *nla;
102
103 if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
104 return -EMSGSIZE;
105
106 nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
107 sizeof(struct sock_filter));
108 if (nla == NULL)
109 return -EMSGSIZE;
110
111 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
112
113 return 0;
114 }
115
116 static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
117 struct sk_buff *skb)
118 {
119 struct nlattr *nla;
120
121 if (prog->bpf_name &&
122 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
123 return -EMSGSIZE;
124
125 if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id))
126 return -EMSGSIZE;
127
128 nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
129 if (nla == NULL)
130 return -EMSGSIZE;
131
132 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
133
134 return 0;
135 }
136
137 static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
138 int bind, int ref)
139 {
140 unsigned char *tp = skb_tail_pointer(skb);
141 struct tcf_bpf *prog = to_bpf(act);
142 struct tc_act_bpf opt = {
143 .index = prog->tcf_index,
144 .refcnt = refcount_read(&prog->tcf_refcnt) - ref,
145 .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
146 };
147 struct tcf_t tm;
148 int ret;
149
150 spin_lock_bh(&prog->tcf_lock);
151 opt.action = prog->tcf_action;
152 if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
153 goto nla_put_failure;
154
155 if (tcf_bpf_is_ebpf(prog))
156 ret = tcf_bpf_dump_ebpf_info(prog, skb);
157 else
158 ret = tcf_bpf_dump_bpf_info(prog, skb);
159 if (ret)
160 goto nla_put_failure;
161
162 tcf_tm_dump(&tm, &prog->tcf_tm);
163 if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
164 TCA_ACT_BPF_PAD))
165 goto nla_put_failure;
166
167 spin_unlock_bh(&prog->tcf_lock);
168 return skb->len;
169
170 nla_put_failure:
171 spin_unlock_bh(&prog->tcf_lock);
172 nlmsg_trim(skb, tp);
173 return -1;
174 }
175
176 static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
177 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
178 [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
179 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
180 .len = ACT_BPF_NAME_LEN },
181 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
182 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
183 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
184 };
185
186 static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
187 {
188 struct sock_filter *bpf_ops;
189 struct sock_fprog_kern fprog_tmp;
190 struct bpf_prog *fp;
191 u16 bpf_size, bpf_num_ops;
192 int ret;
193
194 bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
195 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
196 return -EINVAL;
197
198 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
199 if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
200 return -EINVAL;
201
202 bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL);
203 if (bpf_ops == NULL)
204 return -ENOMEM;
205
206 fprog_tmp.len = bpf_num_ops;
207 fprog_tmp.filter = bpf_ops;
208
209 ret = bpf_prog_create(&fp, &fprog_tmp);
210 if (ret < 0) {
211 kfree(bpf_ops);
212 return ret;
213 }
214
215 cfg->bpf_ops = bpf_ops;
216 cfg->bpf_num_ops = bpf_num_ops;
217 cfg->filter = fp;
218 cfg->is_ebpf = false;
219
220 return 0;
221 }
222
223 static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
224 {
225 struct bpf_prog *fp;
226 char *name = NULL;
227 u32 bpf_fd;
228
229 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
230
231 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
232 if (IS_ERR(fp))
233 return PTR_ERR(fp);
234
235 if (tb[TCA_ACT_BPF_NAME]) {
236 name = nla_memdup(tb[TCA_ACT_BPF_NAME], GFP_KERNEL);
237 if (!name) {
238 bpf_prog_put(fp);
239 return -ENOMEM;
240 }
241 }
242
243 cfg->bpf_name = name;
244 cfg->filter = fp;
245 cfg->is_ebpf = true;
246
247 return 0;
248 }
249
250 static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
251 {
252 struct bpf_prog *filter = cfg->filter;
253
254 if (filter) {
255 if (cfg->is_ebpf)
256 bpf_prog_put(filter);
257 else
258 bpf_prog_destroy(filter);
259 }
260
261 kfree(cfg->bpf_ops);
262 kfree(cfg->bpf_name);
263 }
264
265 static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
266 struct tcf_bpf_cfg *cfg)
267 {
268 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
269 /* updates to prog->filter are prevented, since it's called either
270 * with tcf lock or during final cleanup in rcu callback
271 */
272 cfg->filter = rcu_dereference_protected(prog->filter, 1);
273
274 cfg->bpf_ops = prog->bpf_ops;
275 cfg->bpf_name = prog->bpf_name;
276 }
277
278 static int tcf_bpf_init(struct net *net, struct nlattr *nla,
279 struct nlattr *est, struct tc_action **act,
280 int replace, int bind, bool rtnl_held,
281 struct netlink_ext_ack *extack)
282 {
283 struct tc_action_net *tn = net_generic(net, bpf_net_id);
284 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
285 struct tcf_bpf_cfg cfg, old;
286 struct tc_act_bpf *parm;
287 struct tcf_bpf *prog;
288 bool is_bpf, is_ebpf;
289 int ret, res = 0;
290
291 if (!nla)
292 return -EINVAL;
293
294 ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy, NULL);
295 if (ret < 0)
296 return ret;
297
298 if (!tb[TCA_ACT_BPF_PARMS])
299 return -EINVAL;
300
301 parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
302
303 ret = tcf_idr_check_alloc(tn, &parm->index, act, bind);
304 if (!ret) {
305 ret = tcf_idr_create(tn, parm->index, est, act,
306 &act_bpf_ops, bind, true);
307 if (ret < 0) {
308 tcf_idr_cleanup(tn, parm->index);
309 return ret;
310 }
311
312 res = ACT_P_CREATED;
313 } else if (ret > 0) {
314 /* Don't override defaults. */
315 if (bind)
316 return 0;
317
318 if (!replace) {
319 tcf_idr_release(*act, bind);
320 return -EEXIST;
321 }
322 } else {
323 return ret;
324 }
325
326 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
327 is_ebpf = tb[TCA_ACT_BPF_FD];
328
329 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
330 ret = -EINVAL;
331 goto out;
332 }
333
334 memset(&cfg, 0, sizeof(cfg));
335
336 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
337 tcf_bpf_init_from_efd(tb, &cfg);
338 if (ret < 0)
339 goto out;
340
341 prog = to_bpf(*act);
342
343 spin_lock_bh(&prog->tcf_lock);
344 if (res != ACT_P_CREATED)
345 tcf_bpf_prog_fill_cfg(prog, &old);
346
347 prog->bpf_ops = cfg.bpf_ops;
348 prog->bpf_name = cfg.bpf_name;
349
350 if (cfg.bpf_num_ops)
351 prog->bpf_num_ops = cfg.bpf_num_ops;
352
353 prog->tcf_action = parm->action;
354 rcu_assign_pointer(prog->filter, cfg.filter);
355 spin_unlock_bh(&prog->tcf_lock);
356
357 if (res == ACT_P_CREATED) {
358 tcf_idr_insert(tn, *act);
359 } else {
360 /* make sure the program being replaced is no longer executing */
361 synchronize_rcu();
362 tcf_bpf_cfg_cleanup(&old);
363 }
364
365 return res;
366 out:
367 tcf_idr_release(*act, bind);
368
369 return ret;
370 }
371
372 static void tcf_bpf_cleanup(struct tc_action *act)
373 {
374 struct tcf_bpf_cfg tmp;
375
376 tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp);
377 tcf_bpf_cfg_cleanup(&tmp);
378 }
379
380 static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
381 struct netlink_callback *cb, int type,
382 const struct tc_action_ops *ops,
383 struct netlink_ext_ack *extack)
384 {
385 struct tc_action_net *tn = net_generic(net, bpf_net_id);
386
387 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
388 }
389
390 static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index)
391 {
392 struct tc_action_net *tn = net_generic(net, bpf_net_id);
393
394 return tcf_idr_search(tn, a, index);
395 }
396
397 static struct tc_action_ops act_bpf_ops __read_mostly = {
398 .kind = "bpf",
399 .id = TCA_ID_BPF,
400 .owner = THIS_MODULE,
401 .act = tcf_bpf_act,
402 .dump = tcf_bpf_dump,
403 .cleanup = tcf_bpf_cleanup,
404 .init = tcf_bpf_init,
405 .walk = tcf_bpf_walker,
406 .lookup = tcf_bpf_search,
407 .size = sizeof(struct tcf_bpf),
408 };
409
410 static __net_init int bpf_init_net(struct net *net)
411 {
412 struct tc_action_net *tn = net_generic(net, bpf_net_id);
413
414 return tc_action_net_init(tn, &act_bpf_ops);
415 }
416
417 static void __net_exit bpf_exit_net(struct list_head *net_list)
418 {
419 tc_action_net_exit(net_list, bpf_net_id);
420 }
421
422 static struct pernet_operations bpf_net_ops = {
423 .init = bpf_init_net,
424 .exit_batch = bpf_exit_net,
425 .id = &bpf_net_id,
426 .size = sizeof(struct tc_action_net),
427 };
428
429 static int __init bpf_init_module(void)
430 {
431 return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
432 }
433
434 static void __exit bpf_cleanup_module(void)
435 {
436 tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
437 }
438
439 module_init(bpf_init_module);
440 module_exit(bpf_cleanup_module);
441
442 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
443 MODULE_DESCRIPTION("TC BPF based action");
444 MODULE_LICENSE("GPL v2");