]> git.ipfire.org Git - people/ms/linux.git/blob - net/sched/sch_fq_codel.c
Merge tag 'tegra-for-5.20-arm64-defconfig' of git://git.kernel.org/pub/scm/linux...
[people/ms/linux.git] / net / sched / sch_fq_codel.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Fair Queue CoDel discipline
4 *
5 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/jiffies.h>
12 #include <linux/string.h>
13 #include <linux/in.h>
14 #include <linux/errno.h>
15 #include <linux/init.h>
16 #include <linux/skbuff.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/codel.h>
23 #include <net/codel_impl.h>
24 #include <net/codel_qdisc.h>
25
26 /* Fair Queue CoDel.
27 *
28 * Principles :
29 * Packets are classified (internal classifier or external) on flows.
30 * This is a Stochastic model (as we use a hash, several flows
31 * might be hashed on same slot)
32 * Each flow has a CoDel managed queue.
33 * Flows are linked onto two (Round Robin) lists,
34 * so that new flows have priority on old ones.
35 *
36 * For a given flow, packets are not reordered (CoDel uses a FIFO)
37 * head drops only.
38 * ECN capability is on by default.
39 * Low memory footprint (64 bytes per flow)
40 */
41
42 struct fq_codel_flow {
43 struct sk_buff *head;
44 struct sk_buff *tail;
45 struct list_head flowchain;
46 int deficit;
47 struct codel_vars cvars;
48 }; /* please try to keep this structure <= 64 bytes */
49
50 struct fq_codel_sched_data {
51 struct tcf_proto __rcu *filter_list; /* optional external classifier */
52 struct tcf_block *block;
53 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
54 u32 *backlogs; /* backlog table [flows_cnt] */
55 u32 flows_cnt; /* number of flows */
56 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
57 u32 drop_batch_size;
58 u32 memory_limit;
59 struct codel_params cparams;
60 struct codel_stats cstats;
61 u32 memory_usage;
62 u32 drop_overmemory;
63 u32 drop_overlimit;
64 u32 new_flow_count;
65
66 struct list_head new_flows; /* list of new flows */
67 struct list_head old_flows; /* list of old flows */
68 };
69
70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
71 struct sk_buff *skb)
72 {
73 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
74 }
75
76 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
77 int *qerr)
78 {
79 struct fq_codel_sched_data *q = qdisc_priv(sch);
80 struct tcf_proto *filter;
81 struct tcf_result res;
82 int result;
83
84 if (TC_H_MAJ(skb->priority) == sch->handle &&
85 TC_H_MIN(skb->priority) > 0 &&
86 TC_H_MIN(skb->priority) <= q->flows_cnt)
87 return TC_H_MIN(skb->priority);
88
89 filter = rcu_dereference_bh(q->filter_list);
90 if (!filter)
91 return fq_codel_hash(q, skb) + 1;
92
93 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
94 result = tcf_classify(skb, NULL, filter, &res, false);
95 if (result >= 0) {
96 #ifdef CONFIG_NET_CLS_ACT
97 switch (result) {
98 case TC_ACT_STOLEN:
99 case TC_ACT_QUEUED:
100 case TC_ACT_TRAP:
101 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
102 fallthrough;
103 case TC_ACT_SHOT:
104 return 0;
105 }
106 #endif
107 if (TC_H_MIN(res.classid) <= q->flows_cnt)
108 return TC_H_MIN(res.classid);
109 }
110 return 0;
111 }
112
113 /* helper functions : might be changed when/if skb use a standard list_head */
114
115 /* remove one skb from head of slot queue */
116 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
117 {
118 struct sk_buff *skb = flow->head;
119
120 flow->head = skb->next;
121 skb_mark_not_on_list(skb);
122 return skb;
123 }
124
125 /* add skb to flow queue (tail add) */
126 static inline void flow_queue_add(struct fq_codel_flow *flow,
127 struct sk_buff *skb)
128 {
129 if (flow->head == NULL)
130 flow->head = skb;
131 else
132 flow->tail->next = skb;
133 flow->tail = skb;
134 skb->next = NULL;
135 }
136
137 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
138 struct sk_buff **to_free)
139 {
140 struct fq_codel_sched_data *q = qdisc_priv(sch);
141 struct sk_buff *skb;
142 unsigned int maxbacklog = 0, idx = 0, i, len;
143 struct fq_codel_flow *flow;
144 unsigned int threshold;
145 unsigned int mem = 0;
146
147 /* Queue is full! Find the fat flow and drop packet(s) from it.
148 * This might sound expensive, but with 1024 flows, we scan
149 * 4KB of memory, and we dont need to handle a complex tree
150 * in fast path (packet queue/enqueue) with many cache misses.
151 * In stress mode, we'll try to drop 64 packets from the flow,
152 * amortizing this linear lookup to one cache line per drop.
153 */
154 for (i = 0; i < q->flows_cnt; i++) {
155 if (q->backlogs[i] > maxbacklog) {
156 maxbacklog = q->backlogs[i];
157 idx = i;
158 }
159 }
160
161 /* Our goal is to drop half of this fat flow backlog */
162 threshold = maxbacklog >> 1;
163
164 flow = &q->flows[idx];
165 len = 0;
166 i = 0;
167 do {
168 skb = dequeue_head(flow);
169 len += qdisc_pkt_len(skb);
170 mem += get_codel_cb(skb)->mem_usage;
171 __qdisc_drop(skb, to_free);
172 } while (++i < max_packets && len < threshold);
173
174 /* Tell codel to increase its signal strength also */
175 flow->cvars.count += i;
176 q->backlogs[idx] -= len;
177 q->memory_usage -= mem;
178 sch->qstats.drops += i;
179 sch->qstats.backlog -= len;
180 sch->q.qlen -= i;
181 return idx;
182 }
183
184 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
185 struct sk_buff **to_free)
186 {
187 struct fq_codel_sched_data *q = qdisc_priv(sch);
188 unsigned int idx, prev_backlog, prev_qlen;
189 struct fq_codel_flow *flow;
190 int ret;
191 unsigned int pkt_len;
192 bool memory_limited;
193
194 idx = fq_codel_classify(skb, sch, &ret);
195 if (idx == 0) {
196 if (ret & __NET_XMIT_BYPASS)
197 qdisc_qstats_drop(sch);
198 __qdisc_drop(skb, to_free);
199 return ret;
200 }
201 idx--;
202
203 codel_set_enqueue_time(skb);
204 flow = &q->flows[idx];
205 flow_queue_add(flow, skb);
206 q->backlogs[idx] += qdisc_pkt_len(skb);
207 qdisc_qstats_backlog_inc(sch, skb);
208
209 if (list_empty(&flow->flowchain)) {
210 list_add_tail(&flow->flowchain, &q->new_flows);
211 q->new_flow_count++;
212 flow->deficit = q->quantum;
213 }
214 get_codel_cb(skb)->mem_usage = skb->truesize;
215 q->memory_usage += get_codel_cb(skb)->mem_usage;
216 memory_limited = q->memory_usage > q->memory_limit;
217 if (++sch->q.qlen <= sch->limit && !memory_limited)
218 return NET_XMIT_SUCCESS;
219
220 prev_backlog = sch->qstats.backlog;
221 prev_qlen = sch->q.qlen;
222
223 /* save this packet length as it might be dropped by fq_codel_drop() */
224 pkt_len = qdisc_pkt_len(skb);
225 /* fq_codel_drop() is quite expensive, as it performs a linear search
226 * in q->backlogs[] to find a fat flow.
227 * So instead of dropping a single packet, drop half of its backlog
228 * with a 64 packets limit to not add a too big cpu spike here.
229 */
230 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
231
232 prev_qlen -= sch->q.qlen;
233 prev_backlog -= sch->qstats.backlog;
234 q->drop_overlimit += prev_qlen;
235 if (memory_limited)
236 q->drop_overmemory += prev_qlen;
237
238 /* As we dropped packet(s), better let upper stack know this.
239 * If we dropped a packet for this flow, return NET_XMIT_CN,
240 * but in this case, our parents wont increase their backlogs.
241 */
242 if (ret == idx) {
243 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
244 prev_backlog - pkt_len);
245 return NET_XMIT_CN;
246 }
247 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
248 return NET_XMIT_SUCCESS;
249 }
250
251 /* This is the specific function called from codel_dequeue()
252 * to dequeue a packet from queue. Note: backlog is handled in
253 * codel, we dont need to reduce it here.
254 */
255 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
256 {
257 struct Qdisc *sch = ctx;
258 struct fq_codel_sched_data *q = qdisc_priv(sch);
259 struct fq_codel_flow *flow;
260 struct sk_buff *skb = NULL;
261
262 flow = container_of(vars, struct fq_codel_flow, cvars);
263 if (flow->head) {
264 skb = dequeue_head(flow);
265 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
266 q->memory_usage -= get_codel_cb(skb)->mem_usage;
267 sch->q.qlen--;
268 sch->qstats.backlog -= qdisc_pkt_len(skb);
269 }
270 return skb;
271 }
272
273 static void drop_func(struct sk_buff *skb, void *ctx)
274 {
275 struct Qdisc *sch = ctx;
276
277 kfree_skb(skb);
278 qdisc_qstats_drop(sch);
279 }
280
281 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
282 {
283 struct fq_codel_sched_data *q = qdisc_priv(sch);
284 struct sk_buff *skb;
285 struct fq_codel_flow *flow;
286 struct list_head *head;
287
288 begin:
289 head = &q->new_flows;
290 if (list_empty(head)) {
291 head = &q->old_flows;
292 if (list_empty(head))
293 return NULL;
294 }
295 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
296
297 if (flow->deficit <= 0) {
298 flow->deficit += q->quantum;
299 list_move_tail(&flow->flowchain, &q->old_flows);
300 goto begin;
301 }
302
303 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
304 &flow->cvars, &q->cstats, qdisc_pkt_len,
305 codel_get_enqueue_time, drop_func, dequeue_func);
306
307 if (!skb) {
308 /* force a pass through old_flows to prevent starvation */
309 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
310 list_move_tail(&flow->flowchain, &q->old_flows);
311 else
312 list_del_init(&flow->flowchain);
313 goto begin;
314 }
315 qdisc_bstats_update(sch, skb);
316 flow->deficit -= qdisc_pkt_len(skb);
317 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
318 * or HTB crashes. Defer it for next round.
319 */
320 if (q->cstats.drop_count && sch->q.qlen) {
321 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
322 q->cstats.drop_len);
323 q->cstats.drop_count = 0;
324 q->cstats.drop_len = 0;
325 }
326 return skb;
327 }
328
329 static void fq_codel_flow_purge(struct fq_codel_flow *flow)
330 {
331 rtnl_kfree_skbs(flow->head, flow->tail);
332 flow->head = NULL;
333 }
334
335 static void fq_codel_reset(struct Qdisc *sch)
336 {
337 struct fq_codel_sched_data *q = qdisc_priv(sch);
338 int i;
339
340 INIT_LIST_HEAD(&q->new_flows);
341 INIT_LIST_HEAD(&q->old_flows);
342 for (i = 0; i < q->flows_cnt; i++) {
343 struct fq_codel_flow *flow = q->flows + i;
344
345 fq_codel_flow_purge(flow);
346 INIT_LIST_HEAD(&flow->flowchain);
347 codel_vars_init(&flow->cvars);
348 }
349 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
350 sch->q.qlen = 0;
351 sch->qstats.backlog = 0;
352 q->memory_usage = 0;
353 }
354
355 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
356 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
357 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
358 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
359 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
360 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
361 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
362 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
363 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
364 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
365 [TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR] = { .type = NLA_U8 },
366 [TCA_FQ_CODEL_CE_THRESHOLD_MASK] = { .type = NLA_U8 },
367 };
368
369 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
370 struct netlink_ext_ack *extack)
371 {
372 struct fq_codel_sched_data *q = qdisc_priv(sch);
373 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
374 u32 quantum = 0;
375 int err;
376
377 if (!opt)
378 return -EINVAL;
379
380 err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
381 fq_codel_policy, NULL);
382 if (err < 0)
383 return err;
384 if (tb[TCA_FQ_CODEL_FLOWS]) {
385 if (q->flows)
386 return -EINVAL;
387 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
388 if (!q->flows_cnt ||
389 q->flows_cnt > 65536)
390 return -EINVAL;
391 }
392 if (tb[TCA_FQ_CODEL_QUANTUM]) {
393 quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
394 if (quantum > FQ_CODEL_QUANTUM_MAX) {
395 NL_SET_ERR_MSG(extack, "Invalid quantum");
396 return -EINVAL;
397 }
398 }
399 sch_tree_lock(sch);
400
401 if (tb[TCA_FQ_CODEL_TARGET]) {
402 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
403
404 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
405 }
406
407 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
408 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
409
410 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
411 }
412
413 if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])
414 q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]);
415 if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])
416 q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]);
417
418 if (tb[TCA_FQ_CODEL_INTERVAL]) {
419 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
420
421 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
422 }
423
424 if (tb[TCA_FQ_CODEL_LIMIT])
425 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
426
427 if (tb[TCA_FQ_CODEL_ECN])
428 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
429
430 if (quantum)
431 q->quantum = quantum;
432
433 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
434 q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
435
436 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
437 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
438
439 while (sch->q.qlen > sch->limit ||
440 q->memory_usage > q->memory_limit) {
441 struct sk_buff *skb = fq_codel_dequeue(sch);
442
443 q->cstats.drop_len += qdisc_pkt_len(skb);
444 rtnl_kfree_skbs(skb, skb);
445 q->cstats.drop_count++;
446 }
447 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
448 q->cstats.drop_count = 0;
449 q->cstats.drop_len = 0;
450
451 sch_tree_unlock(sch);
452 return 0;
453 }
454
455 static void fq_codel_destroy(struct Qdisc *sch)
456 {
457 struct fq_codel_sched_data *q = qdisc_priv(sch);
458
459 tcf_block_put(q->block);
460 kvfree(q->backlogs);
461 kvfree(q->flows);
462 }
463
464 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
465 struct netlink_ext_ack *extack)
466 {
467 struct fq_codel_sched_data *q = qdisc_priv(sch);
468 int i;
469 int err;
470
471 sch->limit = 10*1024;
472 q->flows_cnt = 1024;
473 q->memory_limit = 32 << 20; /* 32 MBytes */
474 q->drop_batch_size = 64;
475 q->quantum = psched_mtu(qdisc_dev(sch));
476 INIT_LIST_HEAD(&q->new_flows);
477 INIT_LIST_HEAD(&q->old_flows);
478 codel_params_init(&q->cparams);
479 codel_stats_init(&q->cstats);
480 q->cparams.ecn = true;
481 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
482
483 if (opt) {
484 err = fq_codel_change(sch, opt, extack);
485 if (err)
486 goto init_failure;
487 }
488
489 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
490 if (err)
491 goto init_failure;
492
493 if (!q->flows) {
494 q->flows = kvcalloc(q->flows_cnt,
495 sizeof(struct fq_codel_flow),
496 GFP_KERNEL);
497 if (!q->flows) {
498 err = -ENOMEM;
499 goto init_failure;
500 }
501 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
502 if (!q->backlogs) {
503 err = -ENOMEM;
504 goto alloc_failure;
505 }
506 for (i = 0; i < q->flows_cnt; i++) {
507 struct fq_codel_flow *flow = q->flows + i;
508
509 INIT_LIST_HEAD(&flow->flowchain);
510 codel_vars_init(&flow->cvars);
511 }
512 }
513 if (sch->limit >= 1)
514 sch->flags |= TCQ_F_CAN_BYPASS;
515 else
516 sch->flags &= ~TCQ_F_CAN_BYPASS;
517 return 0;
518
519 alloc_failure:
520 kvfree(q->flows);
521 q->flows = NULL;
522 init_failure:
523 q->flows_cnt = 0;
524 return err;
525 }
526
527 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
528 {
529 struct fq_codel_sched_data *q = qdisc_priv(sch);
530 struct nlattr *opts;
531
532 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
533 if (opts == NULL)
534 goto nla_put_failure;
535
536 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
537 codel_time_to_us(q->cparams.target)) ||
538 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
539 sch->limit) ||
540 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
541 codel_time_to_us(q->cparams.interval)) ||
542 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
543 q->cparams.ecn) ||
544 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
545 q->quantum) ||
546 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
547 q->drop_batch_size) ||
548 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
549 q->memory_limit) ||
550 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
551 q->flows_cnt))
552 goto nla_put_failure;
553
554 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) {
555 if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
556 codel_time_to_us(q->cparams.ce_threshold)))
557 goto nla_put_failure;
558 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector))
559 goto nla_put_failure;
560 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask))
561 goto nla_put_failure;
562 }
563
564 return nla_nest_end(skb, opts);
565
566 nla_put_failure:
567 return -1;
568 }
569
570 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
571 {
572 struct fq_codel_sched_data *q = qdisc_priv(sch);
573 struct tc_fq_codel_xstats st = {
574 .type = TCA_FQ_CODEL_XSTATS_QDISC,
575 };
576 struct list_head *pos;
577
578 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
579 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
580 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
581 st.qdisc_stats.new_flow_count = q->new_flow_count;
582 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
583 st.qdisc_stats.memory_usage = q->memory_usage;
584 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
585
586 sch_tree_lock(sch);
587 list_for_each(pos, &q->new_flows)
588 st.qdisc_stats.new_flows_len++;
589
590 list_for_each(pos, &q->old_flows)
591 st.qdisc_stats.old_flows_len++;
592 sch_tree_unlock(sch);
593
594 return gnet_stats_copy_app(d, &st, sizeof(st));
595 }
596
597 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
598 {
599 return NULL;
600 }
601
602 static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
603 {
604 return 0;
605 }
606
607 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
608 u32 classid)
609 {
610 return 0;
611 }
612
613 static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
614 {
615 }
616
617 static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
618 struct netlink_ext_ack *extack)
619 {
620 struct fq_codel_sched_data *q = qdisc_priv(sch);
621
622 if (cl)
623 return NULL;
624 return q->block;
625 }
626
627 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
628 struct sk_buff *skb, struct tcmsg *tcm)
629 {
630 tcm->tcm_handle |= TC_H_MIN(cl);
631 return 0;
632 }
633
634 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
635 struct gnet_dump *d)
636 {
637 struct fq_codel_sched_data *q = qdisc_priv(sch);
638 u32 idx = cl - 1;
639 struct gnet_stats_queue qs = { 0 };
640 struct tc_fq_codel_xstats xstats;
641
642 if (idx < q->flows_cnt) {
643 const struct fq_codel_flow *flow = &q->flows[idx];
644 const struct sk_buff *skb;
645
646 memset(&xstats, 0, sizeof(xstats));
647 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
648 xstats.class_stats.deficit = flow->deficit;
649 xstats.class_stats.ldelay =
650 codel_time_to_us(flow->cvars.ldelay);
651 xstats.class_stats.count = flow->cvars.count;
652 xstats.class_stats.lastcount = flow->cvars.lastcount;
653 xstats.class_stats.dropping = flow->cvars.dropping;
654 if (flow->cvars.dropping) {
655 codel_tdiff_t delta = flow->cvars.drop_next -
656 codel_get_time();
657
658 xstats.class_stats.drop_next = (delta >= 0) ?
659 codel_time_to_us(delta) :
660 -codel_time_to_us(-delta);
661 }
662 if (flow->head) {
663 sch_tree_lock(sch);
664 skb = flow->head;
665 while (skb) {
666 qs.qlen++;
667 skb = skb->next;
668 }
669 sch_tree_unlock(sch);
670 }
671 qs.backlog = q->backlogs[idx];
672 qs.drops = 0;
673 }
674 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
675 return -1;
676 if (idx < q->flows_cnt)
677 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
678 return 0;
679 }
680
681 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
682 {
683 struct fq_codel_sched_data *q = qdisc_priv(sch);
684 unsigned int i;
685
686 if (arg->stop)
687 return;
688
689 for (i = 0; i < q->flows_cnt; i++) {
690 if (list_empty(&q->flows[i].flowchain) ||
691 arg->count < arg->skip) {
692 arg->count++;
693 continue;
694 }
695 if (arg->fn(sch, i + 1, arg) < 0) {
696 arg->stop = 1;
697 break;
698 }
699 arg->count++;
700 }
701 }
702
703 static const struct Qdisc_class_ops fq_codel_class_ops = {
704 .leaf = fq_codel_leaf,
705 .find = fq_codel_find,
706 .tcf_block = fq_codel_tcf_block,
707 .bind_tcf = fq_codel_bind,
708 .unbind_tcf = fq_codel_unbind,
709 .dump = fq_codel_dump_class,
710 .dump_stats = fq_codel_dump_class_stats,
711 .walk = fq_codel_walk,
712 };
713
714 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
715 .cl_ops = &fq_codel_class_ops,
716 .id = "fq_codel",
717 .priv_size = sizeof(struct fq_codel_sched_data),
718 .enqueue = fq_codel_enqueue,
719 .dequeue = fq_codel_dequeue,
720 .peek = qdisc_peek_dequeued,
721 .init = fq_codel_init,
722 .reset = fq_codel_reset,
723 .destroy = fq_codel_destroy,
724 .change = fq_codel_change,
725 .dump = fq_codel_dump,
726 .dump_stats = fq_codel_dump_stats,
727 .owner = THIS_MODULE,
728 };
729
730 static int __init fq_codel_module_init(void)
731 {
732 return register_qdisc(&fq_codel_qdisc_ops);
733 }
734
735 static void __exit fq_codel_module_exit(void)
736 {
737 unregister_qdisc(&fq_codel_qdisc_ops);
738 }
739
740 module_init(fq_codel_module_init)
741 module_exit(fq_codel_module_exit)
742 MODULE_AUTHOR("Eric Dumazet");
743 MODULE_LICENSE("GPL");
744 MODULE_DESCRIPTION("Fair Queue CoDel discipline");