2 * net/sched/cls_matchll.c Match-all classifier
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_cls.h>
20 struct cls_mall_head
{
22 struct tcf_result res
;
25 unsigned int in_hw_count
;
26 struct tc_matchall_pcnt __percpu
*pf
;
27 struct rcu_work rwork
;
30 static int mall_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
31 struct tcf_result
*res
)
33 struct cls_mall_head
*head
= rcu_dereference_bh(tp
->root
);
38 if (tc_skip_sw(head
->flags
))
42 __this_cpu_inc(head
->pf
->rhit
);
43 return tcf_exts_exec(skb
, &head
->exts
, res
);
46 static int mall_init(struct tcf_proto
*tp
)
51 static void __mall_destroy(struct cls_mall_head
*head
)
53 tcf_exts_destroy(&head
->exts
);
54 tcf_exts_put_net(&head
->exts
);
55 free_percpu(head
->pf
);
59 static void mall_destroy_work(struct work_struct
*work
)
61 struct cls_mall_head
*head
= container_of(to_rcu_work(work
),
69 static void mall_destroy_hw_filter(struct tcf_proto
*tp
,
70 struct cls_mall_head
*head
,
72 struct netlink_ext_ack
*extack
)
74 struct tc_cls_matchall_offload cls_mall
= {};
75 struct tcf_block
*block
= tp
->chain
->block
;
77 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
78 cls_mall
.command
= TC_CLSMATCHALL_DESTROY
;
79 cls_mall
.cookie
= cookie
;
81 tc_setup_cb_call(block
, TC_SETUP_CLSMATCHALL
, &cls_mall
, false);
82 tcf_block_offload_dec(block
, &head
->flags
);
85 static int mall_replace_hw_filter(struct tcf_proto
*tp
,
86 struct cls_mall_head
*head
,
88 struct netlink_ext_ack
*extack
)
90 struct tc_cls_matchall_offload cls_mall
= {};
91 struct tcf_block
*block
= tp
->chain
->block
;
92 bool skip_sw
= tc_skip_sw(head
->flags
);
95 cls_mall
.rule
= flow_rule_alloc(tcf_exts_num_actions(&head
->exts
));
99 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
100 cls_mall
.command
= TC_CLSMATCHALL_REPLACE
;
101 cls_mall
.cookie
= cookie
;
103 err
= tc_setup_flow_action(&cls_mall
.rule
->action
, &head
->exts
);
105 kfree(cls_mall
.rule
);
106 mall_destroy_hw_filter(tp
, head
, cookie
, NULL
);
108 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
115 err
= tc_setup_cb_call(block
, TC_SETUP_CLSMATCHALL
, &cls_mall
, skip_sw
);
116 kfree(cls_mall
.rule
);
119 mall_destroy_hw_filter(tp
, head
, cookie
, NULL
);
121 } else if (err
> 0) {
122 head
->in_hw_count
= err
;
123 tcf_block_offload_inc(block
, &head
->flags
);
126 if (skip_sw
&& !(head
->flags
& TCA_CLS_FLAGS_IN_HW
))
132 static void mall_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
133 struct netlink_ext_ack
*extack
)
135 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
140 tcf_unbind_filter(tp
, &head
->res
);
142 if (!tc_skip_hw(head
->flags
))
143 mall_destroy_hw_filter(tp
, head
, (unsigned long) head
, extack
);
145 if (tcf_exts_get_net(&head
->exts
))
146 tcf_queue_work(&head
->rwork
, mall_destroy_work
);
148 __mall_destroy(head
);
151 static void *mall_get(struct tcf_proto
*tp
, u32 handle
)
153 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
155 if (head
&& head
->handle
== handle
)
161 static const struct nla_policy mall_policy
[TCA_MATCHALL_MAX
+ 1] = {
162 [TCA_MATCHALL_UNSPEC
] = { .type
= NLA_UNSPEC
},
163 [TCA_MATCHALL_CLASSID
] = { .type
= NLA_U32
},
166 static int mall_set_parms(struct net
*net
, struct tcf_proto
*tp
,
167 struct cls_mall_head
*head
,
168 unsigned long base
, struct nlattr
**tb
,
169 struct nlattr
*est
, bool ovr
,
170 struct netlink_ext_ack
*extack
)
174 err
= tcf_exts_validate(net
, tp
, tb
, est
, &head
->exts
, ovr
, true,
179 if (tb
[TCA_MATCHALL_CLASSID
]) {
180 head
->res
.classid
= nla_get_u32(tb
[TCA_MATCHALL_CLASSID
]);
181 tcf_bind_filter(tp
, &head
->res
, base
);
186 static int mall_change(struct net
*net
, struct sk_buff
*in_skb
,
187 struct tcf_proto
*tp
, unsigned long base
,
188 u32 handle
, struct nlattr
**tca
,
189 void **arg
, bool ovr
, bool rtnl_held
,
190 struct netlink_ext_ack
*extack
)
192 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
193 struct nlattr
*tb
[TCA_MATCHALL_MAX
+ 1];
194 struct cls_mall_head
*new;
198 if (!tca
[TCA_OPTIONS
])
204 err
= nla_parse_nested_deprecated(tb
, TCA_MATCHALL_MAX
,
205 tca
[TCA_OPTIONS
], mall_policy
, NULL
);
209 if (tb
[TCA_MATCHALL_FLAGS
]) {
210 flags
= nla_get_u32(tb
[TCA_MATCHALL_FLAGS
]);
211 if (!tc_flags_valid(flags
))
215 new = kzalloc(sizeof(*new), GFP_KERNEL
);
219 err
= tcf_exts_init(&new->exts
, net
, TCA_MATCHALL_ACT
, 0);
225 new->handle
= handle
;
227 new->pf
= alloc_percpu(struct tc_matchall_pcnt
);
230 goto err_alloc_percpu
;
233 err
= mall_set_parms(net
, tp
, new, base
, tb
, tca
[TCA_RATE
], ovr
,
238 if (!tc_skip_hw(new->flags
)) {
239 err
= mall_replace_hw_filter(tp
, new, (unsigned long)new,
242 goto err_replace_hw_filter
;
245 if (!tc_in_hw(new->flags
))
246 new->flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
249 rcu_assign_pointer(tp
->root
, new);
252 err_replace_hw_filter
:
254 free_percpu(new->pf
);
256 tcf_exts_destroy(&new->exts
);
262 static int mall_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
263 bool rtnl_held
, struct netlink_ext_ack
*extack
)
268 static void mall_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
,
271 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
273 if (arg
->count
< arg
->skip
)
278 if (arg
->fn(tp
, head
, arg
) < 0)
284 static int mall_reoffload(struct tcf_proto
*tp
, bool add
, tc_setup_cb_t
*cb
,
285 void *cb_priv
, struct netlink_ext_ack
*extack
)
287 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
288 struct tc_cls_matchall_offload cls_mall
= {};
289 struct tcf_block
*block
= tp
->chain
->block
;
292 if (tc_skip_hw(head
->flags
))
295 cls_mall
.rule
= flow_rule_alloc(tcf_exts_num_actions(&head
->exts
));
299 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
300 cls_mall
.command
= add
?
301 TC_CLSMATCHALL_REPLACE
: TC_CLSMATCHALL_DESTROY
;
302 cls_mall
.cookie
= (unsigned long)head
;
304 err
= tc_setup_flow_action(&cls_mall
.rule
->action
, &head
->exts
);
306 kfree(cls_mall
.rule
);
307 if (add
&& tc_skip_sw(head
->flags
)) {
308 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
314 err
= cb(TC_SETUP_CLSMATCHALL
, &cls_mall
, cb_priv
);
315 kfree(cls_mall
.rule
);
318 if (add
&& tc_skip_sw(head
->flags
))
323 tc_cls_offload_cnt_update(block
, &head
->in_hw_count
, &head
->flags
, add
);
328 static void mall_stats_hw_filter(struct tcf_proto
*tp
,
329 struct cls_mall_head
*head
,
330 unsigned long cookie
)
332 struct tc_cls_matchall_offload cls_mall
= {};
333 struct tcf_block
*block
= tp
->chain
->block
;
335 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, NULL
);
336 cls_mall
.command
= TC_CLSMATCHALL_STATS
;
337 cls_mall
.cookie
= cookie
;
339 tc_setup_cb_call(block
, TC_SETUP_CLSMATCHALL
, &cls_mall
, false);
341 tcf_exts_stats_update(&head
->exts
, cls_mall
.stats
.bytes
,
342 cls_mall
.stats
.pkts
, cls_mall
.stats
.lastused
);
345 static int mall_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
346 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
348 struct tc_matchall_pcnt gpf
= {};
349 struct cls_mall_head
*head
= fh
;
356 if (!tc_skip_hw(head
->flags
))
357 mall_stats_hw_filter(tp
, head
, (unsigned long)head
);
359 t
->tcm_handle
= head
->handle
;
361 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
363 goto nla_put_failure
;
365 if (head
->res
.classid
&&
366 nla_put_u32(skb
, TCA_MATCHALL_CLASSID
, head
->res
.classid
))
367 goto nla_put_failure
;
369 if (head
->flags
&& nla_put_u32(skb
, TCA_MATCHALL_FLAGS
, head
->flags
))
370 goto nla_put_failure
;
372 for_each_possible_cpu(cpu
) {
373 struct tc_matchall_pcnt
*pf
= per_cpu_ptr(head
->pf
, cpu
);
375 gpf
.rhit
+= pf
->rhit
;
378 if (nla_put_64bit(skb
, TCA_MATCHALL_PCNT
,
379 sizeof(struct tc_matchall_pcnt
),
380 &gpf
, TCA_MATCHALL_PAD
))
381 goto nla_put_failure
;
383 if (tcf_exts_dump(skb
, &head
->exts
))
384 goto nla_put_failure
;
386 nla_nest_end(skb
, nest
);
388 if (tcf_exts_dump_stats(skb
, &head
->exts
) < 0)
389 goto nla_put_failure
;
394 nla_nest_cancel(skb
, nest
);
398 static void mall_bind_class(void *fh
, u32 classid
, unsigned long cl
)
400 struct cls_mall_head
*head
= fh
;
402 if (head
&& head
->res
.classid
== classid
)
403 head
->res
.class = cl
;
406 static struct tcf_proto_ops cls_mall_ops __read_mostly
= {
408 .classify
= mall_classify
,
410 .destroy
= mall_destroy
,
412 .change
= mall_change
,
413 .delete = mall_delete
,
415 .reoffload
= mall_reoffload
,
417 .bind_class
= mall_bind_class
,
418 .owner
= THIS_MODULE
,
421 static int __init
cls_mall_init(void)
423 return register_tcf_proto_ops(&cls_mall_ops
);
426 static void __exit
cls_mall_exit(void)
428 unregister_tcf_proto_ops(&cls_mall_ops
);
431 module_init(cls_mall_init
);
432 module_exit(cls_mall_exit
);
434 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
435 MODULE_DESCRIPTION("Match-all classifier");
436 MODULE_LICENSE("GPL v2");