]> git.ipfire.org Git - people/arne_f/kernel.git/blame - net/sched/sch_mqprio.c
bpf: bpf_compute_data uses incorrect cb structure
[people/arne_f/kernel.git] / net / sched / sch_mqprio.c
CommitLineData
b8970f0b
JF
1/*
2 * net/sched/sch_mqprio.c
3 *
4 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/errno.h>
16#include <linux/skbuff.h>
3a9a231d 17#include <linux/module.h>
b8970f0b
JF
18#include <net/netlink.h>
19#include <net/pkt_sched.h>
20#include <net/sch_generic.h>
21
22struct mqprio_sched {
23 struct Qdisc **qdiscs;
2026fecf 24 int hw_offload;
b8970f0b
JF
25};
26
27static void mqprio_destroy(struct Qdisc *sch)
28{
29 struct net_device *dev = qdisc_dev(sch);
30 struct mqprio_sched *priv = qdisc_priv(sch);
31 unsigned int ntx;
32
ac7100ba
BH
33 if (priv->qdiscs) {
34 for (ntx = 0;
35 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
36 ntx++)
37 qdisc_destroy(priv->qdiscs[ntx]);
38 kfree(priv->qdiscs);
39 }
b8970f0b 40
56f36acd 41 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
de4784ca 42 struct tc_mqprio_qopt mqprio = {};
56f36acd 43
de4784ca 44 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, &mqprio);
56f36acd 45 } else {
b8970f0b 46 netdev_set_num_tc(dev, 0);
56f36acd 47 }
b8970f0b
JF
48}
49
50static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
51{
52 int i, j;
53
54 /* Verify num_tc is not out of max range */
55 if (qopt->num_tc > TC_MAX_QUEUE)
56 return -EINVAL;
57
58 /* Verify priority mapping uses valid tcs */
59 for (i = 0; i < TC_BITMASK + 1; i++) {
60 if (qopt->prio_tc_map[i] >= qopt->num_tc)
61 return -EINVAL;
62 }
63
2026fecf
AD
64 /* Limit qopt->hw to maximum supported offload value. Drivers have
65 * the option of overriding this later if they don't support the a
66 * given offload type.
67 */
68 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
69 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
b8970f0b 70
2026fecf
AD
71 /* If hardware offload is requested we will leave it to the device
72 * to either populate the queue counts itself or to validate the
73 * provided queue counts. If ndo_setup_tc is not present then
74 * hardware doesn't support offload and we should return an error.
b8970f0b
JF
75 */
76 if (qopt->hw)
2026fecf 77 return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
b8970f0b
JF
78
79 for (i = 0; i < qopt->num_tc; i++) {
80 unsigned int last = qopt->offset[i] + qopt->count[i];
81
82 /* Verify the queue count is in tx range being equal to the
83 * real_num_tx_queues indicates the last queue is in use.
84 */
85 if (qopt->offset[i] >= dev->real_num_tx_queues ||
86 !qopt->count[i] ||
87 last > dev->real_num_tx_queues)
88 return -EINVAL;
89
90 /* Verify that the offset and counts do not overlap */
91 for (j = i + 1; j < qopt->num_tc; j++) {
92 if (last > qopt->offset[j])
93 return -EINVAL;
94 }
95 }
96
97 return 0;
98}
99
100static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
101{
102 struct net_device *dev = qdisc_dev(sch);
103 struct mqprio_sched *priv = qdisc_priv(sch);
104 struct netdev_queue *dev_queue;
105 struct Qdisc *qdisc;
106 int i, err = -EOPNOTSUPP;
107 struct tc_mqprio_qopt *qopt = NULL;
108
109 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
110 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
111
112 if (sch->parent != TC_H_ROOT)
113 return -EOPNOTSUPP;
114
115 if (!netif_is_multiqueue(dev))
116 return -EOPNOTSUPP;
117
7838f2ce 118 if (!opt || nla_len(opt) < sizeof(*qopt))
b8970f0b
JF
119 return -EINVAL;
120
121 qopt = nla_data(opt);
122 if (mqprio_parse_opt(dev, qopt))
123 return -EINVAL;
124
125 /* pre-allocate qdisc, attachment can't fail */
126 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
127 GFP_KERNEL);
87b60cfa
ED
128 if (!priv->qdiscs)
129 return -ENOMEM;
b8970f0b
JF
130
131 for (i = 0; i < dev->num_tx_queues; i++) {
132 dev_queue = netdev_get_tx_queue(dev, i);
1f27cde3
ED
133 qdisc = qdisc_create_dflt(dev_queue,
134 get_default_qdisc_ops(dev, i),
b8970f0b
JF
135 TC_H_MAKE(TC_H_MAJ(sch->handle),
136 TC_H_MIN(i + 1)));
87b60cfa
ED
137 if (!qdisc)
138 return -ENOMEM;
139
b8970f0b 140 priv->qdiscs[i] = qdisc;
4eaf3b84 141 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
b8970f0b
JF
142 }
143
144 /* If the mqprio options indicate that hardware should own
145 * the queue mapping then run ndo_setup_tc otherwise use the
146 * supplied and verified mapping
147 */
148 if (qopt->hw) {
de4784ca 149 struct tc_mqprio_qopt mqprio = *qopt;
16e5cc64 150
de4784ca
JP
151 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO,
152 &mqprio);
b8970f0b 153 if (err)
87b60cfa 154 return err;
2026fecf 155
de4784ca 156 priv->hw_offload = mqprio.hw;
b8970f0b
JF
157 } else {
158 netdev_set_num_tc(dev, qopt->num_tc);
159 for (i = 0; i < qopt->num_tc; i++)
160 netdev_set_tc_queue(dev, i,
161 qopt->count[i], qopt->offset[i]);
162 }
163
164 /* Always use supplied priority mappings */
165 for (i = 0; i < TC_BITMASK + 1; i++)
166 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
167
168 sch->flags |= TCQ_F_MQROOT;
169 return 0;
b8970f0b
JF
170}
171
172static void mqprio_attach(struct Qdisc *sch)
173{
174 struct net_device *dev = qdisc_dev(sch);
175 struct mqprio_sched *priv = qdisc_priv(sch);
95dc1929 176 struct Qdisc *qdisc, *old;
b8970f0b
JF
177 unsigned int ntx;
178
179 /* Attach underlying qdisc */
180 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
181 qdisc = priv->qdiscs[ntx];
95dc1929
ED
182 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
183 if (old)
184 qdisc_destroy(old);
185 if (ntx < dev->real_num_tx_queues)
49b49971 186 qdisc_hash_add(qdisc, false);
b8970f0b
JF
187 }
188 kfree(priv->qdiscs);
189 priv->qdiscs = NULL;
190}
191
192static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
193 unsigned long cl)
194{
195 struct net_device *dev = qdisc_dev(sch);
196 unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
197
198 if (ntx >= dev->num_tx_queues)
199 return NULL;
200 return netdev_get_tx_queue(dev, ntx);
201}
202
203static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
204 struct Qdisc **old)
205{
206 struct net_device *dev = qdisc_dev(sch);
207 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
208
209 if (!dev_queue)
210 return -EINVAL;
211
212 if (dev->flags & IFF_UP)
213 dev_deactivate(dev);
214
215 *old = dev_graft_qdisc(dev_queue, new);
216
1abbe139 217 if (new)
4eaf3b84 218 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1abbe139 219
b8970f0b
JF
220 if (dev->flags & IFF_UP)
221 dev_activate(dev);
222
223 return 0;
224}
225
226static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
227{
228 struct net_device *dev = qdisc_dev(sch);
229 struct mqprio_sched *priv = qdisc_priv(sch);
230 unsigned char *b = skb_tail_pointer(skb);
144ce879 231 struct tc_mqprio_qopt opt = { 0 };
b8970f0b
JF
232 struct Qdisc *qdisc;
233 unsigned int i;
234
235 sch->q.qlen = 0;
236 memset(&sch->bstats, 0, sizeof(sch->bstats));
237 memset(&sch->qstats, 0, sizeof(sch->qstats));
238
239 for (i = 0; i < dev->num_tx_queues; i++) {
46e5da40 240 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
b8970f0b
JF
241 spin_lock_bh(qdisc_lock(qdisc));
242 sch->q.qlen += qdisc->q.qlen;
243 sch->bstats.bytes += qdisc->bstats.bytes;
244 sch->bstats.packets += qdisc->bstats.packets;
b8970f0b
JF
245 sch->qstats.backlog += qdisc->qstats.backlog;
246 sch->qstats.drops += qdisc->qstats.drops;
247 sch->qstats.requeues += qdisc->qstats.requeues;
248 sch->qstats.overlimits += qdisc->qstats.overlimits;
249 spin_unlock_bh(qdisc_lock(qdisc));
250 }
251
252 opt.num_tc = netdev_get_num_tc(dev);
253 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
2026fecf 254 opt.hw = priv->hw_offload;
b8970f0b
JF
255
256 for (i = 0; i < netdev_get_num_tc(dev); i++) {
257 opt.count[i] = dev->tc_to_txq[i].count;
258 opt.offset[i] = dev->tc_to_txq[i].offset;
259 }
260
1b34ec43
DM
261 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
262 goto nla_put_failure;
b8970f0b
JF
263
264 return skb->len;
265nla_put_failure:
266 nlmsg_trim(skb, b);
267 return -1;
268}
269
270static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
271{
272 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
273
274 if (!dev_queue)
275 return NULL;
276
277 return dev_queue->qdisc_sleeping;
278}
279
143976ce 280static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
b8970f0b
JF
281{
282 struct net_device *dev = qdisc_dev(sch);
283 unsigned int ntx = TC_H_MIN(classid);
284
285 if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
286 return 0;
287 return ntx;
288}
289
b8970f0b
JF
290static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
291 struct sk_buff *skb, struct tcmsg *tcm)
292{
293 struct net_device *dev = qdisc_dev(sch);
294
295 if (cl <= netdev_get_num_tc(dev)) {
296 tcm->tcm_parent = TC_H_ROOT;
297 tcm->tcm_info = 0;
298 } else {
299 int i;
300 struct netdev_queue *dev_queue;
301
302 dev_queue = mqprio_queue_get(sch, cl);
303 tcm->tcm_parent = 0;
304 for (i = 0; i < netdev_get_num_tc(dev); i++) {
305 struct netdev_tc_txq tc = dev->tc_to_txq[i];
306 int q_idx = cl - netdev_get_num_tc(dev);
307
308 if (q_idx > tc.offset &&
309 q_idx <= tc.offset + tc.count) {
310 tcm->tcm_parent =
311 TC_H_MAKE(TC_H_MAJ(sch->handle),
312 TC_H_MIN(i + 1));
313 break;
314 }
315 }
316 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
317 }
318 tcm->tcm_handle |= TC_H_MIN(cl);
319 return 0;
320}
321
322static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
ea18fd95 323 struct gnet_dump *d)
324 __releases(d->lock)
325 __acquires(d->lock)
b8970f0b
JF
326{
327 struct net_device *dev = qdisc_dev(sch);
328
329 if (cl <= netdev_get_num_tc(dev)) {
330 int i;
64015853 331 __u32 qlen = 0;
b8970f0b
JF
332 struct Qdisc *qdisc;
333 struct gnet_stats_queue qstats = {0};
334 struct gnet_stats_basic_packed bstats = {0};
335 struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
336
337 /* Drop lock here it will be reclaimed before touching
338 * statistics this is required because the d->lock we
339 * hold here is the look on dev_queue->qdisc_sleeping
340 * also acquired below.
341 */
edb09eb1
ED
342 if (d->lock)
343 spin_unlock_bh(d->lock);
b8970f0b
JF
344
345 for (i = tc.offset; i < tc.offset + tc.count; i++) {
46e5da40
JF
346 struct netdev_queue *q = netdev_get_tx_queue(dev, i);
347
348 qdisc = rtnl_dereference(q->qdisc);
b8970f0b 349 spin_lock_bh(qdisc_lock(qdisc));
64015853 350 qlen += qdisc->q.qlen;
b8970f0b
JF
351 bstats.bytes += qdisc->bstats.bytes;
352 bstats.packets += qdisc->bstats.packets;
b8970f0b
JF
353 qstats.backlog += qdisc->qstats.backlog;
354 qstats.drops += qdisc->qstats.drops;
355 qstats.requeues += qdisc->qstats.requeues;
356 qstats.overlimits += qdisc->qstats.overlimits;
357 spin_unlock_bh(qdisc_lock(qdisc));
358 }
359 /* Reclaim root sleeping lock before completing stats */
edb09eb1
ED
360 if (d->lock)
361 spin_lock_bh(d->lock);
362 if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
b0ab6f92 363 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
b8970f0b
JF
364 return -1;
365 } else {
366 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
367
368 sch = dev_queue->qdisc_sleeping;
edb09eb1
ED
369 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
370 d, NULL, &sch->bstats) < 0 ||
b0ab6f92
JF
371 gnet_stats_copy_queue(d, NULL,
372 &sch->qstats, sch->q.qlen) < 0)
b8970f0b
JF
373 return -1;
374 }
375 return 0;
376}
377
378static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
379{
380 struct net_device *dev = qdisc_dev(sch);
381 unsigned long ntx;
382
383 if (arg->stop)
384 return;
385
386 /* Walk hierarchy with a virtual class per tc */
387 arg->count = arg->skip;
388 for (ntx = arg->skip;
389 ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
390 ntx++) {
391 if (arg->fn(sch, ntx + 1, arg) < 0) {
392 arg->stop = 1;
393 break;
394 }
395 arg->count++;
396 }
397}
398
399static const struct Qdisc_class_ops mqprio_class_ops = {
400 .graft = mqprio_graft,
401 .leaf = mqprio_leaf,
143976ce 402 .find = mqprio_find,
b8970f0b
JF
403 .walk = mqprio_walk,
404 .dump = mqprio_dump_class,
405 .dump_stats = mqprio_dump_class_stats,
406};
407
ea18fd95 408static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
b8970f0b
JF
409 .cl_ops = &mqprio_class_ops,
410 .id = "mqprio",
411 .priv_size = sizeof(struct mqprio_sched),
412 .init = mqprio_init,
413 .destroy = mqprio_destroy,
414 .attach = mqprio_attach,
415 .dump = mqprio_dump,
416 .owner = THIS_MODULE,
417};
418
419static int __init mqprio_module_init(void)
420{
421 return register_qdisc(&mqprio_qdisc_ops);
422}
423
424static void __exit mqprio_module_exit(void)
425{
426 unregister_qdisc(&mqprio_qdisc_ops);
427}
428
429module_init(mqprio_module_init);
430module_exit(mqprio_module_exit);
431
432MODULE_LICENSE("GPL");