]>
Commit | Line | Data |
---|---|---|
5a781ccb VCG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* net/sched/sch_taprio.c Time Aware Priority Scheduler | |
4 | * | |
5 | * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com> | |
6 | * | |
7 | */ | |
8 | ||
cc69837f | 9 | #include <linux/ethtool.h> |
a721c3e5 | 10 | #include <linux/ethtool_netlink.h> |
5a781ccb VCG |
11 | #include <linux/types.h> |
12 | #include <linux/slab.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/string.h> | |
15 | #include <linux/list.h> | |
16 | #include <linux/errno.h> | |
17 | #include <linux/skbuff.h> | |
23bddf69 | 18 | #include <linux/math64.h> |
5a781ccb VCG |
19 | #include <linux/module.h> |
20 | #include <linux/spinlock.h> | |
a3d43c0d | 21 | #include <linux/rcupdate.h> |
837ced3a | 22 | #include <linux/time.h> |
d457a0e3 | 23 | #include <net/gso.h> |
5a781ccb VCG |
24 | #include <net/netlink.h> |
25 | #include <net/pkt_sched.h> | |
26 | #include <net/pkt_cls.h> | |
27 | #include <net/sch_generic.h> | |
4cfd5779 | 28 | #include <net/sock.h> |
54002066 | 29 | #include <net/tcp.h> |
5a781ccb | 30 | |
6c1adb65 VO |
31 | #define TAPRIO_STAT_NOT_SET (~0ULL) |
32 | ||
1dfe086d VO |
33 | #include "sch_mqprio_lib.h" |
34 | ||
7b9eba7b | 35 | static LIST_HEAD(taprio_list); |
2f530df7 VO |
36 | static struct static_key_false taprio_have_broken_mqprio; |
37 | static struct static_key_false taprio_have_working_mqprio; | |
7b9eba7b | 38 | |
5a781ccb VCG |
39 | #define TAPRIO_ALL_GATES_OPEN -1 |
40 | ||
4cfd5779 | 41 | #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) |
9c66d156 | 42 | #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) |
0efc7e54 AM |
43 | #define TAPRIO_SUPPORTED_FLAGS \ |
44 | (TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) | |
a9d62274 | 45 | #define TAPRIO_FLAGS_INVALID U32_MAX |
4cfd5779 | 46 | |
5a781ccb | 47 | struct sched_entry { |
a306a90c VO |
48 | /* Durations between this GCL entry and the GCL entry where the |
49 | * respective traffic class gate closes | |
50 | */ | |
51 | u64 gate_duration[TC_MAX_QUEUE]; | |
d2ad689d | 52 | atomic_t budget[TC_MAX_QUEUE]; |
a1e6ad30 VO |
53 | /* The qdisc makes some effort so that no packet leaves |
54 | * after this time | |
5a781ccb | 55 | */ |
a1e6ad30 VO |
56 | ktime_t gate_close_time[TC_MAX_QUEUE]; |
57 | struct list_head list; | |
58 | /* Used to calculate when to advance the schedule */ | |
e5517551 | 59 | ktime_t end_time; |
4cfd5779 | 60 | ktime_t next_txtime; |
5a781ccb VCG |
61 | int index; |
62 | u32 gate_mask; | |
63 | u32 interval; | |
64 | u8 command; | |
65 | }; | |
66 | ||
a3d43c0d | 67 | struct sched_gate_list { |
a306a90c VO |
68 | /* Longest non-zero contiguous gate durations per traffic class, |
69 | * or 0 if a traffic class gate never opens during the schedule. | |
70 | */ | |
71 | u64 max_open_gate_duration[TC_MAX_QUEUE]; | |
a878fd46 | 72 | u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */ |
fed87cc6 | 73 | u32 max_sdu[TC_MAX_QUEUE]; /* for dump */ |
a3d43c0d VCG |
74 | struct rcu_head rcu; |
75 | struct list_head entries; | |
76 | size_t num_entries; | |
e5517551 | 77 | ktime_t cycle_end_time; |
6ca6a665 | 78 | s64 cycle_time; |
c25031e9 | 79 | s64 cycle_time_extension; |
a3d43c0d VCG |
80 | s64 base_time; |
81 | }; | |
82 | ||
5a781ccb VCG |
83 | struct taprio_sched { |
84 | struct Qdisc **qdiscs; | |
85 | struct Qdisc *root; | |
4cfd5779 | 86 | u32 flags; |
7ede7b03 | 87 | enum tk_offsets tk_offset; |
5a781ccb | 88 | int clockid; |
db46e3a8 | 89 | bool offloaded; |
2f530df7 VO |
90 | bool detected_mqprio; |
91 | bool broken_mqprio; | |
7b9eba7b LD |
92 | atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ |
93 | * speeds it's sub-nanoseconds per byte | |
94 | */ | |
5a781ccb VCG |
95 | |
96 | /* Protects the update side of the RCU protected current_entry */ | |
97 | spinlock_t current_entry_lock; | |
98 | struct sched_entry __rcu *current_entry; | |
a3d43c0d VCG |
99 | struct sched_gate_list __rcu *oper_sched; |
100 | struct sched_gate_list __rcu *admin_sched; | |
5a781ccb | 101 | struct hrtimer advance_timer; |
7b9eba7b | 102 | struct list_head taprio_list; |
2f530df7 | 103 | int cur_txq[TC_MAX_QUEUE]; |
fed87cc6 | 104 | u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */ |
a721c3e5 | 105 | u32 fp[TC_QOPT_MAX_QUEUE]; /* only for dump and offloading */ |
a5b64700 | 106 | u32 txtime_delay; |
5a781ccb VCG |
107 | }; |
108 | ||
9c66d156 VCG |
109 | struct __tc_taprio_qopt_offload { |
110 | refcount_t users; | |
111 | struct tc_taprio_qopt_offload offload; | |
112 | }; | |
113 | ||
a306a90c VO |
114 | static void taprio_calculate_gate_durations(struct taprio_sched *q, |
115 | struct sched_gate_list *sched) | |
116 | { | |
117 | struct net_device *dev = qdisc_dev(q->root); | |
118 | int num_tc = netdev_get_num_tc(dev); | |
119 | struct sched_entry *entry, *cur; | |
120 | int tc; | |
121 | ||
122 | list_for_each_entry(entry, &sched->entries, list) { | |
123 | u32 gates_still_open = entry->gate_mask; | |
124 | ||
125 | /* For each traffic class, calculate each open gate duration, | |
126 | * starting at this schedule entry and ending at the schedule | |
127 | * entry containing a gate close event for that TC. | |
128 | */ | |
129 | cur = entry; | |
130 | ||
131 | do { | |
132 | if (!gates_still_open) | |
133 | break; | |
134 | ||
135 | for (tc = 0; tc < num_tc; tc++) { | |
136 | if (!(gates_still_open & BIT(tc))) | |
137 | continue; | |
138 | ||
139 | if (cur->gate_mask & BIT(tc)) | |
140 | entry->gate_duration[tc] += cur->interval; | |
141 | else | |
142 | gates_still_open &= ~BIT(tc); | |
143 | } | |
144 | ||
145 | cur = list_next_entry_circular(cur, &sched->entries, list); | |
146 | } while (cur != entry); | |
147 | ||
148 | /* Keep track of the maximum gate duration for each traffic | |
149 | * class, taking care to not confuse a traffic class which is | |
150 | * temporarily closed with one that is always closed. | |
151 | */ | |
152 | for (tc = 0; tc < num_tc; tc++) | |
153 | if (entry->gate_duration[tc] && | |
154 | sched->max_open_gate_duration[tc] < entry->gate_duration[tc]) | |
155 | sched->max_open_gate_duration[tc] = entry->gate_duration[tc]; | |
156 | } | |
157 | } | |
158 | ||
a1e6ad30 VO |
159 | static bool taprio_entry_allows_tx(ktime_t skb_end_time, |
160 | struct sched_entry *entry, int tc) | |
161 | { | |
162 | return ktime_before(skb_end_time, entry->gate_close_time[tc]); | |
163 | } | |
164 | ||
a3d43c0d VCG |
165 | static ktime_t sched_base_time(const struct sched_gate_list *sched) |
166 | { | |
167 | if (!sched) | |
168 | return KTIME_MAX; | |
169 | ||
170 | return ns_to_ktime(sched->base_time); | |
171 | } | |
172 | ||
6dc25401 | 173 | static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono) |
7ede7b03 | 174 | { |
6dc25401 ED |
175 | /* This pairs with WRITE_ONCE() in taprio_parse_clockid() */ |
176 | enum tk_offsets tk_offset = READ_ONCE(q->tk_offset); | |
7ede7b03 | 177 | |
6dc25401 | 178 | switch (tk_offset) { |
7ede7b03 VP |
179 | case TK_OFFS_MAX: |
180 | return mono; | |
181 | default: | |
6dc25401 | 182 | return ktime_mono_to_any(mono, tk_offset); |
7ede7b03 | 183 | } |
6dc25401 | 184 | } |
7ede7b03 | 185 | |
6dc25401 ED |
186 | static ktime_t taprio_get_time(const struct taprio_sched *q) |
187 | { | |
188 | return taprio_mono_to_any(q, ktime_get()); | |
7ede7b03 VP |
189 | } |
190 | ||
a3d43c0d VCG |
191 | static void taprio_free_sched_cb(struct rcu_head *head) |
192 | { | |
193 | struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu); | |
194 | struct sched_entry *entry, *n; | |
195 | ||
a3d43c0d VCG |
196 | list_for_each_entry_safe(entry, n, &sched->entries, list) { |
197 | list_del(&entry->list); | |
198 | kfree(entry); | |
199 | } | |
200 | ||
201 | kfree(sched); | |
202 | } | |
203 | ||
204 | static void switch_schedules(struct taprio_sched *q, | |
205 | struct sched_gate_list **admin, | |
206 | struct sched_gate_list **oper) | |
207 | { | |
208 | rcu_assign_pointer(q->oper_sched, *admin); | |
209 | rcu_assign_pointer(q->admin_sched, NULL); | |
210 | ||
211 | if (*oper) | |
212 | call_rcu(&(*oper)->rcu, taprio_free_sched_cb); | |
213 | ||
214 | *oper = *admin; | |
215 | *admin = NULL; | |
216 | } | |
217 | ||
4cfd5779 VP |
218 | /* Get how much time has been already elapsed in the current cycle. */ |
219 | static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time) | |
220 | { | |
221 | ktime_t time_since_sched_start; | |
222 | s32 time_elapsed; | |
223 | ||
224 | time_since_sched_start = ktime_sub(time, sched->base_time); | |
225 | div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed); | |
226 | ||
227 | return time_elapsed; | |
228 | } | |
229 | ||
230 | static ktime_t get_interval_end_time(struct sched_gate_list *sched, | |
231 | struct sched_gate_list *admin, | |
232 | struct sched_entry *entry, | |
233 | ktime_t intv_start) | |
234 | { | |
235 | s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start); | |
236 | ktime_t intv_end, cycle_ext_end, cycle_end; | |
237 | ||
238 | cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed); | |
239 | intv_end = ktime_add_ns(intv_start, entry->interval); | |
240 | cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension); | |
241 | ||
242 | if (ktime_before(intv_end, cycle_end)) | |
243 | return intv_end; | |
244 | else if (admin && admin != sched && | |
245 | ktime_after(admin->base_time, cycle_end) && | |
246 | ktime_before(admin->base_time, cycle_ext_end)) | |
247 | return admin->base_time; | |
248 | else | |
249 | return cycle_end; | |
250 | } | |
251 | ||
252 | static int length_to_duration(struct taprio_sched *q, int len) | |
253 | { | |
837ced3a | 254 | return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC); |
4cfd5779 VP |
255 | } |
256 | ||
fed87cc6 VO |
257 | static int duration_to_length(struct taprio_sched *q, u64 duration) |
258 | { | |
259 | return div_u64(duration * PSEC_PER_NSEC, atomic64_read(&q->picos_per_byte)); | |
260 | } | |
261 | ||
262 | /* Sets sched->max_sdu[] and sched->max_frm_len[] to the minimum between the | |
263 | * q->max_sdu[] requested by the user and the max_sdu dynamically determined by | |
264 | * the maximum open gate durations at the given link speed. | |
265 | */ | |
a878fd46 | 266 | static void taprio_update_queue_max_sdu(struct taprio_sched *q, |
fed87cc6 VO |
267 | struct sched_gate_list *sched, |
268 | struct qdisc_size_table *stab) | |
a878fd46 VO |
269 | { |
270 | struct net_device *dev = qdisc_dev(q->root); | |
271 | int num_tc = netdev_get_num_tc(dev); | |
fed87cc6 VO |
272 | u32 max_sdu_from_user; |
273 | u32 max_sdu_dynamic; | |
274 | u32 max_sdu; | |
a878fd46 VO |
275 | int tc; |
276 | ||
277 | for (tc = 0; tc < num_tc; tc++) { | |
fed87cc6 VO |
278 | max_sdu_from_user = q->max_sdu[tc] ?: U32_MAX; |
279 | ||
280 | /* TC gate never closes => keep the queueMaxSDU | |
281 | * selected by the user | |
282 | */ | |
283 | if (sched->max_open_gate_duration[tc] == sched->cycle_time) { | |
284 | max_sdu_dynamic = U32_MAX; | |
285 | } else { | |
286 | u32 max_frm_len; | |
287 | ||
288 | max_frm_len = duration_to_length(q, sched->max_open_gate_duration[tc]); | |
bdf366bd VO |
289 | /* Compensate for L1 overhead from size table, |
290 | * but don't let the frame size go negative | |
291 | */ | |
292 | if (stab) { | |
fed87cc6 | 293 | max_frm_len -= stab->szopts.overhead; |
bdf366bd VO |
294 | max_frm_len = max_t(int, max_frm_len, |
295 | dev->hard_header_len + 1); | |
296 | } | |
fed87cc6 | 297 | max_sdu_dynamic = max_frm_len - dev->hard_header_len; |
64cb6aad VO |
298 | if (max_sdu_dynamic > dev->max_mtu) |
299 | max_sdu_dynamic = U32_MAX; | |
fed87cc6 VO |
300 | } |
301 | ||
302 | max_sdu = min(max_sdu_dynamic, max_sdu_from_user); | |
303 | ||
304 | if (max_sdu != U32_MAX) { | |
305 | sched->max_frm_len[tc] = max_sdu + dev->hard_header_len; | |
306 | sched->max_sdu[tc] = max_sdu; | |
307 | } else { | |
a878fd46 | 308 | sched->max_frm_len[tc] = U32_MAX; /* never oversized */ |
fed87cc6 VO |
309 | sched->max_sdu[tc] = 0; |
310 | } | |
a878fd46 VO |
311 | } |
312 | } | |
313 | ||
4cfd5779 VP |
314 | /* Returns the entry corresponding to next available interval. If |
315 | * validate_interval is set, it only validates whether the timestamp occurs | |
316 | * when the gate corresponding to the skb's traffic class is open. | |
317 | */ | |
318 | static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb, | |
319 | struct Qdisc *sch, | |
320 | struct sched_gate_list *sched, | |
321 | struct sched_gate_list *admin, | |
322 | ktime_t time, | |
323 | ktime_t *interval_start, | |
324 | ktime_t *interval_end, | |
325 | bool validate_interval) | |
326 | { | |
327 | ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time; | |
328 | ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time; | |
329 | struct sched_entry *entry = NULL, *entry_found = NULL; | |
330 | struct taprio_sched *q = qdisc_priv(sch); | |
331 | struct net_device *dev = qdisc_dev(sch); | |
332 | bool entry_available = false; | |
333 | s32 cycle_elapsed; | |
334 | int tc, n; | |
335 | ||
336 | tc = netdev_get_prio_tc_map(dev, skb->priority); | |
337 | packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb)); | |
338 | ||
339 | *interval_start = 0; | |
340 | *interval_end = 0; | |
341 | ||
342 | if (!sched) | |
343 | return NULL; | |
344 | ||
345 | cycle = sched->cycle_time; | |
346 | cycle_elapsed = get_cycle_time_elapsed(sched, time); | |
347 | curr_intv_end = ktime_sub_ns(time, cycle_elapsed); | |
348 | cycle_end = ktime_add_ns(curr_intv_end, cycle); | |
349 | ||
350 | list_for_each_entry(entry, &sched->entries, list) { | |
351 | curr_intv_start = curr_intv_end; | |
352 | curr_intv_end = get_interval_end_time(sched, admin, entry, | |
353 | curr_intv_start); | |
354 | ||
355 | if (ktime_after(curr_intv_start, cycle_end)) | |
356 | break; | |
357 | ||
358 | if (!(entry->gate_mask & BIT(tc)) || | |
359 | packet_transmit_time > entry->interval) | |
360 | continue; | |
361 | ||
362 | txtime = entry->next_txtime; | |
363 | ||
364 | if (ktime_before(txtime, time) || validate_interval) { | |
365 | transmit_end_time = ktime_add_ns(time, packet_transmit_time); | |
366 | if ((ktime_before(curr_intv_start, time) && | |
367 | ktime_before(transmit_end_time, curr_intv_end)) || | |
368 | (ktime_after(curr_intv_start, time) && !validate_interval)) { | |
369 | entry_found = entry; | |
370 | *interval_start = curr_intv_start; | |
371 | *interval_end = curr_intv_end; | |
372 | break; | |
373 | } else if (!entry_available && !validate_interval) { | |
374 | /* Here, we are just trying to find out the | |
375 | * first available interval in the next cycle. | |
376 | */ | |
0deee7aa | 377 | entry_available = true; |
4cfd5779 VP |
378 | entry_found = entry; |
379 | *interval_start = ktime_add_ns(curr_intv_start, cycle); | |
380 | *interval_end = ktime_add_ns(curr_intv_end, cycle); | |
381 | } | |
382 | } else if (ktime_before(txtime, earliest_txtime) && | |
383 | !entry_available) { | |
384 | earliest_txtime = txtime; | |
385 | entry_found = entry; | |
386 | n = div_s64(ktime_sub(txtime, curr_intv_start), cycle); | |
387 | *interval_start = ktime_add(curr_intv_start, n * cycle); | |
388 | *interval_end = ktime_add(curr_intv_end, n * cycle); | |
389 | } | |
390 | } | |
391 | ||
392 | return entry_found; | |
393 | } | |
394 | ||
395 | static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch) | |
396 | { | |
397 | struct taprio_sched *q = qdisc_priv(sch); | |
398 | struct sched_gate_list *sched, *admin; | |
399 | ktime_t interval_start, interval_end; | |
400 | struct sched_entry *entry; | |
401 | ||
402 | rcu_read_lock(); | |
403 | sched = rcu_dereference(q->oper_sched); | |
404 | admin = rcu_dereference(q->admin_sched); | |
405 | ||
406 | entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp, | |
407 | &interval_start, &interval_end, true); | |
408 | rcu_read_unlock(); | |
409 | ||
410 | return entry; | |
411 | } | |
412 | ||
54002066 VP |
413 | /* This returns the tstamp value set by TCP in terms of the set clock. */ |
414 | static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb) | |
415 | { | |
416 | unsigned int offset = skb_network_offset(skb); | |
417 | const struct ipv6hdr *ipv6h; | |
418 | const struct iphdr *iph; | |
419 | struct ipv6hdr _ipv6h; | |
420 | ||
421 | ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); | |
422 | if (!ipv6h) | |
423 | return 0; | |
424 | ||
425 | if (ipv6h->version == 4) { | |
426 | iph = (struct iphdr *)ipv6h; | |
427 | offset += iph->ihl * 4; | |
428 | ||
429 | /* special-case 6in4 tunnelling, as that is a common way to get | |
430 | * v6 connectivity in the home | |
431 | */ | |
432 | if (iph->protocol == IPPROTO_IPV6) { | |
433 | ipv6h = skb_header_pointer(skb, offset, | |
434 | sizeof(_ipv6h), &_ipv6h); | |
435 | ||
436 | if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP) | |
437 | return 0; | |
438 | } else if (iph->protocol != IPPROTO_TCP) { | |
439 | return 0; | |
440 | } | |
441 | } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) { | |
442 | return 0; | |
443 | } | |
444 | ||
6dc25401 | 445 | return taprio_mono_to_any(q, skb->skb_mstamp_ns); |
54002066 VP |
446 | } |
447 | ||
4cfd5779 VP |
448 | /* There are a few scenarios where we will have to modify the txtime from |
449 | * what is read from next_txtime in sched_entry. They are: | |
450 | * 1. If txtime is in the past, | |
451 | * a. The gate for the traffic class is currently open and packet can be | |
452 | * transmitted before it closes, schedule the packet right away. | |
453 | * b. If the gate corresponding to the traffic class is going to open later | |
454 | * in the cycle, set the txtime of packet to the interval start. | |
455 | * 2. If txtime is in the future, there are packets corresponding to the | |
456 | * current traffic class waiting to be transmitted. So, the following | |
457 | * possibilities exist: | |
458 | * a. We can transmit the packet before the window containing the txtime | |
459 | * closes. | |
460 | * b. The window might close before the transmission can be completed | |
461 | * successfully. So, schedule the packet in the next open window. | |
462 | */ | |
463 | static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) | |
464 | { | |
54002066 | 465 | ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp; |
4cfd5779 VP |
466 | struct taprio_sched *q = qdisc_priv(sch); |
467 | struct sched_gate_list *sched, *admin; | |
468 | ktime_t minimum_time, now, txtime; | |
469 | int len, packet_transmit_time; | |
470 | struct sched_entry *entry; | |
471 | bool sched_changed; | |
472 | ||
7ede7b03 | 473 | now = taprio_get_time(q); |
4cfd5779 VP |
474 | minimum_time = ktime_add_ns(now, q->txtime_delay); |
475 | ||
54002066 VP |
476 | tcp_tstamp = get_tcp_tstamp(q, skb); |
477 | minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp); | |
478 | ||
4cfd5779 VP |
479 | rcu_read_lock(); |
480 | admin = rcu_dereference(q->admin_sched); | |
481 | sched = rcu_dereference(q->oper_sched); | |
482 | if (admin && ktime_after(minimum_time, admin->base_time)) | |
483 | switch_schedules(q, &admin, &sched); | |
484 | ||
485 | /* Until the schedule starts, all the queues are open */ | |
486 | if (!sched || ktime_before(minimum_time, sched->base_time)) { | |
487 | txtime = minimum_time; | |
488 | goto done; | |
489 | } | |
490 | ||
491 | len = qdisc_pkt_len(skb); | |
492 | packet_transmit_time = length_to_duration(q, len); | |
493 | ||
494 | do { | |
0deee7aa | 495 | sched_changed = false; |
4cfd5779 VP |
496 | |
497 | entry = find_entry_to_transmit(skb, sch, sched, admin, | |
498 | minimum_time, | |
499 | &interval_start, &interval_end, | |
500 | false); | |
501 | if (!entry) { | |
502 | txtime = 0; | |
503 | goto done; | |
504 | } | |
505 | ||
506 | txtime = entry->next_txtime; | |
507 | txtime = max_t(ktime_t, txtime, minimum_time); | |
508 | txtime = max_t(ktime_t, txtime, interval_start); | |
509 | ||
510 | if (admin && admin != sched && | |
511 | ktime_after(txtime, admin->base_time)) { | |
512 | sched = admin; | |
0deee7aa | 513 | sched_changed = true; |
4cfd5779 VP |
514 | continue; |
515 | } | |
516 | ||
517 | transmit_end_time = ktime_add(txtime, packet_transmit_time); | |
518 | minimum_time = transmit_end_time; | |
519 | ||
520 | /* Update the txtime of current entry to the next time it's | |
521 | * interval starts. | |
522 | */ | |
523 | if (ktime_after(transmit_end_time, interval_end)) | |
524 | entry->next_txtime = ktime_add(interval_start, sched->cycle_time); | |
525 | } while (sched_changed || ktime_after(transmit_end_time, interval_end)); | |
526 | ||
527 | entry->next_txtime = transmit_end_time; | |
528 | ||
529 | done: | |
530 | rcu_read_unlock(); | |
531 | return txtime; | |
532 | } | |
533 | ||
a878fd46 VO |
534 | /* Devices with full offload are expected to honor this in hardware */ |
535 | static bool taprio_skb_exceeds_queue_max_sdu(struct Qdisc *sch, | |
536 | struct sk_buff *skb) | |
5a781ccb VCG |
537 | { |
538 | struct taprio_sched *q = qdisc_priv(sch); | |
a54fc09e | 539 | struct net_device *dev = qdisc_dev(sch); |
a878fd46 | 540 | struct sched_gate_list *sched; |
a54fc09e | 541 | int prio = skb->priority; |
a878fd46 | 542 | bool exceeds = false; |
a54fc09e | 543 | u8 tc; |
5a781ccb | 544 | |
a878fd46 VO |
545 | tc = netdev_get_prio_tc_map(dev, prio); |
546 | ||
547 | rcu_read_lock(); | |
548 | sched = rcu_dereference(q->oper_sched); | |
549 | if (sched && skb->len > sched->max_frm_len[tc]) | |
550 | exceeds = true; | |
551 | rcu_read_unlock(); | |
552 | ||
553 | return exceeds; | |
554 | } | |
555 | ||
556 | static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch, | |
557 | struct Qdisc *child, struct sk_buff **to_free) | |
558 | { | |
559 | struct taprio_sched *q = qdisc_priv(sch); | |
560 | ||
e8a64bba BS |
561 | /* sk_flags are only safe to use on full sockets. */ |
562 | if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) { | |
4cfd5779 VP |
563 | if (!is_valid_interval(skb, sch)) |
564 | return qdisc_drop(skb, sch, to_free); | |
565 | } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { | |
566 | skb->tstamp = get_packet_txtime(skb, sch); | |
567 | if (!skb->tstamp) | |
568 | return qdisc_drop(skb, sch, to_free); | |
569 | } | |
570 | ||
5a781ccb VCG |
571 | qdisc_qstats_backlog_inc(sch, skb); |
572 | sch->q.qlen++; | |
573 | ||
ac5c66f2 | 574 | return qdisc_enqueue(skb, child, to_free); |
5a781ccb VCG |
575 | } |
576 | ||
2d5e8071 VO |
577 | static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch, |
578 | struct Qdisc *child, | |
579 | struct sk_buff **to_free) | |
580 | { | |
581 | unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb); | |
582 | netdev_features_t features = netif_skb_features(skb); | |
583 | struct sk_buff *segs, *nskb; | |
584 | int ret; | |
585 | ||
586 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | |
587 | if (IS_ERR_OR_NULL(segs)) | |
588 | return qdisc_drop(skb, sch, to_free); | |
589 | ||
590 | skb_list_walk_safe(segs, segs, nskb) { | |
591 | skb_mark_not_on_list(segs); | |
592 | qdisc_skb_cb(segs)->pkt_len = segs->len; | |
593 | slen += segs->len; | |
594 | ||
39b02d6d VO |
595 | /* FIXME: we should be segmenting to a smaller size |
596 | * rather than dropping these | |
597 | */ | |
598 | if (taprio_skb_exceeds_queue_max_sdu(sch, segs)) | |
599 | ret = qdisc_drop(segs, sch, to_free); | |
600 | else | |
601 | ret = taprio_enqueue_one(segs, sch, child, to_free); | |
602 | ||
2d5e8071 VO |
603 | if (ret != NET_XMIT_SUCCESS) { |
604 | if (net_xmit_drop_count(ret)) | |
605 | qdisc_qstats_drop(sch); | |
606 | } else { | |
607 | numsegs++; | |
608 | } | |
609 | } | |
610 | ||
611 | if (numsegs > 1) | |
612 | qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen); | |
613 | consume_skb(skb); | |
614 | ||
615 | return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; | |
616 | } | |
617 | ||
2c08a4f8 VO |
618 | /* Will not be called in the full offload case, since the TX queues are |
619 | * attached to the Qdisc created using qdisc_create_dflt() | |
620 | */ | |
497cc002 KK |
621 | static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
622 | struct sk_buff **to_free) | |
623 | { | |
624 | struct taprio_sched *q = qdisc_priv(sch); | |
625 | struct Qdisc *child; | |
626 | int queue; | |
627 | ||
628 | queue = skb_get_queue_mapping(skb); | |
629 | ||
630 | child = q->qdiscs[queue]; | |
631 | if (unlikely(!child)) | |
632 | return qdisc_drop(skb, sch, to_free); | |
633 | ||
39b02d6d VO |
634 | if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) { |
635 | /* Large packets might not be transmitted when the transmission | |
636 | * duration exceeds any configured interval. Therefore, segment | |
637 | * the skb into smaller chunks. Drivers with full offload are | |
638 | * expected to handle this in hardware. | |
639 | */ | |
640 | if (skb_is_gso(skb)) | |
641 | return taprio_enqueue_segmented(skb, sch, child, | |
642 | to_free); | |
643 | ||
644 | return qdisc_drop(skb, sch, to_free); | |
645 | } | |
497cc002 KK |
646 | |
647 | return taprio_enqueue_one(skb, sch, child, to_free); | |
648 | } | |
649 | ||
25becba6 | 650 | static struct sk_buff *taprio_peek(struct Qdisc *sch) |
5a781ccb | 651 | { |
ecc0cc98 | 652 | WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented"); |
5a781ccb VCG |
653 | return NULL; |
654 | } | |
655 | ||
d2ad689d VO |
656 | static void taprio_set_budgets(struct taprio_sched *q, |
657 | struct sched_gate_list *sched, | |
658 | struct sched_entry *entry) | |
23bddf69 | 659 | { |
d2ad689d VO |
660 | struct net_device *dev = qdisc_dev(q->root); |
661 | int num_tc = netdev_get_num_tc(dev); | |
662 | int tc, budget; | |
663 | ||
664 | for (tc = 0; tc < num_tc; tc++) { | |
665 | /* Traffic classes which never close have infinite budget */ | |
666 | if (entry->gate_duration[tc] == sched->cycle_time) | |
667 | budget = INT_MAX; | |
668 | else | |
669 | budget = div64_u64((u64)entry->gate_duration[tc] * PSEC_PER_NSEC, | |
670 | atomic64_read(&q->picos_per_byte)); | |
671 | ||
672 | atomic_set(&entry->budget[tc], budget); | |
673 | } | |
674 | } | |
675 | ||
676 | /* When an skb is sent, it consumes from the budget of all traffic classes */ | |
677 | static int taprio_update_budgets(struct sched_entry *entry, size_t len, | |
678 | int tc_consumed, int num_tc) | |
679 | { | |
680 | int tc, budget, new_budget = 0; | |
681 | ||
682 | for (tc = 0; tc < num_tc; tc++) { | |
683 | budget = atomic_read(&entry->budget[tc]); | |
684 | /* Don't consume from infinite budget */ | |
685 | if (budget == INT_MAX) { | |
686 | if (tc == tc_consumed) | |
687 | new_budget = budget; | |
688 | continue; | |
689 | } | |
690 | ||
691 | if (tc == tc_consumed) | |
692 | new_budget = atomic_sub_return(len, &entry->budget[tc]); | |
693 | else | |
694 | atomic_sub(len, &entry->budget[tc]); | |
695 | } | |
696 | ||
697 | return new_budget; | |
5a781ccb VCG |
698 | } |
699 | ||
92f96667 VO |
700 | static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq, |
701 | struct sched_entry *entry, | |
702 | u32 gate_mask) | |
703 | { | |
704 | struct taprio_sched *q = qdisc_priv(sch); | |
705 | struct net_device *dev = qdisc_dev(sch); | |
706 | struct Qdisc *child = q->qdiscs[txq]; | |
d2ad689d | 707 | int num_tc = netdev_get_num_tc(dev); |
92f96667 VO |
708 | struct sk_buff *skb; |
709 | ktime_t guard; | |
710 | int prio; | |
711 | int len; | |
712 | u8 tc; | |
713 | ||
714 | if (unlikely(!child)) | |
715 | return NULL; | |
716 | ||
4c229427 VO |
717 | if (TXTIME_ASSIST_IS_ENABLED(q->flags)) |
718 | goto skip_peek_checks; | |
92f96667 VO |
719 | |
720 | skb = child->ops->peek(child); | |
721 | if (!skb) | |
722 | return NULL; | |
723 | ||
724 | prio = skb->priority; | |
725 | tc = netdev_get_prio_tc_map(dev, prio); | |
726 | ||
727 | if (!(gate_mask & BIT(tc))) | |
728 | return NULL; | |
729 | ||
730 | len = qdisc_pkt_len(skb); | |
731 | guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len)); | |
732 | ||
733 | /* In the case that there's no gate entry, there's no | |
734 | * guard band ... | |
735 | */ | |
736 | if (gate_mask != TAPRIO_ALL_GATES_OPEN && | |
a1e6ad30 | 737 | !taprio_entry_allows_tx(guard, entry, tc)) |
92f96667 VO |
738 | return NULL; |
739 | ||
740 | /* ... and no budget. */ | |
741 | if (gate_mask != TAPRIO_ALL_GATES_OPEN && | |
d2ad689d | 742 | taprio_update_budgets(entry, len, tc, num_tc) < 0) |
92f96667 VO |
743 | return NULL; |
744 | ||
4c229427 | 745 | skip_peek_checks: |
92f96667 VO |
746 | skb = child->ops->dequeue(child); |
747 | if (unlikely(!skb)) | |
748 | return NULL; | |
749 | ||
92f96667 VO |
750 | qdisc_bstats_update(sch, skb); |
751 | qdisc_qstats_backlog_dec(sch, skb); | |
752 | sch->q.qlen--; | |
753 | ||
754 | return skb; | |
755 | } | |
756 | ||
2f530df7 VO |
757 | static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq) |
758 | { | |
759 | int offset = dev->tc_to_txq[tc].offset; | |
760 | int count = dev->tc_to_txq[tc].count; | |
761 | ||
762 | (*txq)++; | |
763 | if (*txq == offset + count) | |
764 | *txq = offset; | |
765 | } | |
766 | ||
767 | /* Prioritize higher traffic classes, and select among TXQs belonging to the | |
768 | * same TC using round robin | |
769 | */ | |
770 | static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch, | |
771 | struct sched_entry *entry, | |
772 | u32 gate_mask) | |
773 | { | |
774 | struct taprio_sched *q = qdisc_priv(sch); | |
775 | struct net_device *dev = qdisc_dev(sch); | |
776 | int num_tc = netdev_get_num_tc(dev); | |
777 | struct sk_buff *skb; | |
778 | int tc; | |
779 | ||
780 | for (tc = num_tc - 1; tc >= 0; tc--) { | |
781 | int first_txq = q->cur_txq[tc]; | |
782 | ||
783 | if (!(gate_mask & BIT(tc))) | |
784 | continue; | |
785 | ||
786 | do { | |
787 | skb = taprio_dequeue_from_txq(sch, q->cur_txq[tc], | |
788 | entry, gate_mask); | |
789 | ||
790 | taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]); | |
791 | ||
be3618d9 ZS |
792 | if (q->cur_txq[tc] >= dev->num_tx_queues) |
793 | q->cur_txq[tc] = first_txq; | |
794 | ||
2f530df7 VO |
795 | if (skb) |
796 | return skb; | |
797 | } while (q->cur_txq[tc] != first_txq); | |
798 | } | |
799 | ||
800 | return NULL; | |
801 | } | |
802 | ||
803 | /* Broken way of prioritizing smaller TXQ indices and ignoring the traffic | |
804 | * class other than to determine whether the gate is open or not | |
805 | */ | |
806 | static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch, | |
807 | struct sched_entry *entry, | |
808 | u32 gate_mask) | |
809 | { | |
810 | struct net_device *dev = qdisc_dev(sch); | |
811 | struct sk_buff *skb; | |
812 | int i; | |
813 | ||
814 | for (i = 0; i < dev->num_tx_queues; i++) { | |
815 | skb = taprio_dequeue_from_txq(sch, i, entry, gate_mask); | |
816 | if (skb) | |
817 | return skb; | |
818 | } | |
819 | ||
820 | return NULL; | |
821 | } | |
822 | ||
2c08a4f8 VO |
823 | /* Will not be called in the full offload case, since the TX queues are |
824 | * attached to the Qdisc created using qdisc_create_dflt() | |
825 | */ | |
25becba6 | 826 | static struct sk_buff *taprio_dequeue(struct Qdisc *sch) |
5a781ccb VCG |
827 | { |
828 | struct taprio_sched *q = qdisc_priv(sch); | |
8c79f0ea | 829 | struct sk_buff *skb = NULL; |
5a781ccb | 830 | struct sched_entry *entry; |
5a781ccb | 831 | u32 gate_mask; |
5a781ccb VCG |
832 | |
833 | rcu_read_lock(); | |
834 | entry = rcu_dereference(q->current_entry); | |
835 | /* if there's no entry, it means that the schedule didn't | |
836 | * start yet, so force all gates to be open, this is in | |
837 | * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5 | |
633fa666 | 838 | * "AdminGateStates" |
5a781ccb VCG |
839 | */ |
840 | gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN; | |
5a781ccb | 841 | if (!gate_mask) |
8c79f0ea | 842 | goto done; |
5a781ccb | 843 | |
2f530df7 VO |
844 | if (static_branch_unlikely(&taprio_have_broken_mqprio) && |
845 | !static_branch_likely(&taprio_have_working_mqprio)) { | |
846 | /* Single NIC kind which is broken */ | |
847 | skb = taprio_dequeue_txq_priority(sch, entry, gate_mask); | |
848 | } else if (static_branch_likely(&taprio_have_working_mqprio) && | |
849 | !static_branch_unlikely(&taprio_have_broken_mqprio)) { | |
850 | /* Single NIC kind which prioritizes properly */ | |
851 | skb = taprio_dequeue_tc_priority(sch, entry, gate_mask); | |
852 | } else { | |
853 | /* Mixed NIC kinds present in system, need dynamic testing */ | |
854 | if (q->broken_mqprio) | |
855 | skb = taprio_dequeue_txq_priority(sch, entry, gate_mask); | |
856 | else | |
857 | skb = taprio_dequeue_tc_priority(sch, entry, gate_mask); | |
5a781ccb VCG |
858 | } |
859 | ||
8c79f0ea VCG |
860 | done: |
861 | rcu_read_unlock(); | |
862 | ||
863 | return skb; | |
5a781ccb VCG |
864 | } |
865 | ||
6ca6a665 VCG |
866 | static bool should_restart_cycle(const struct sched_gate_list *oper, |
867 | const struct sched_entry *entry) | |
868 | { | |
869 | if (list_is_last(&entry->list, &oper->entries)) | |
870 | return true; | |
871 | ||
e5517551 | 872 | if (ktime_compare(entry->end_time, oper->cycle_end_time) == 0) |
6ca6a665 VCG |
873 | return true; |
874 | ||
875 | return false; | |
876 | } | |
877 | ||
a3d43c0d VCG |
878 | static bool should_change_schedules(const struct sched_gate_list *admin, |
879 | const struct sched_gate_list *oper, | |
e5517551 | 880 | ktime_t end_time) |
a3d43c0d | 881 | { |
c25031e9 | 882 | ktime_t next_base_time, extension_time; |
a3d43c0d VCG |
883 | |
884 | if (!admin) | |
885 | return false; | |
886 | ||
887 | next_base_time = sched_base_time(admin); | |
888 | ||
e5517551 | 889 | /* This is the simple case, the end_time would fall after |
a3d43c0d VCG |
890 | * the next schedule base_time. |
891 | */ | |
e5517551 | 892 | if (ktime_compare(next_base_time, end_time) <= 0) |
a3d43c0d VCG |
893 | return true; |
894 | ||
e5517551 | 895 | /* This is the cycle_time_extension case, if the end_time |
c25031e9 VCG |
896 | * plus the amount that can be extended would fall after the |
897 | * next schedule base_time, we can extend the current schedule | |
898 | * for that amount. | |
899 | */ | |
e5517551 | 900 | extension_time = ktime_add_ns(end_time, oper->cycle_time_extension); |
c25031e9 VCG |
901 | |
902 | /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about | |
903 | * how precisely the extension should be made. So after | |
904 | * conformance testing, this logic may change. | |
905 | */ | |
906 | if (ktime_compare(next_base_time, extension_time) <= 0) | |
907 | return true; | |
908 | ||
a3d43c0d VCG |
909 | return false; |
910 | } | |
911 | ||
5a781ccb VCG |
912 | static enum hrtimer_restart advance_sched(struct hrtimer *timer) |
913 | { | |
914 | struct taprio_sched *q = container_of(timer, struct taprio_sched, | |
915 | advance_timer); | |
a1e6ad30 | 916 | struct net_device *dev = qdisc_dev(q->root); |
a3d43c0d | 917 | struct sched_gate_list *oper, *admin; |
a1e6ad30 | 918 | int num_tc = netdev_get_num_tc(dev); |
5a781ccb VCG |
919 | struct sched_entry *entry, *next; |
920 | struct Qdisc *sch = q->root; | |
e5517551 | 921 | ktime_t end_time; |
a1e6ad30 | 922 | int tc; |
5a781ccb VCG |
923 | |
924 | spin_lock(&q->current_entry_lock); | |
925 | entry = rcu_dereference_protected(q->current_entry, | |
926 | lockdep_is_held(&q->current_entry_lock)); | |
a3d43c0d VCG |
927 | oper = rcu_dereference_protected(q->oper_sched, |
928 | lockdep_is_held(&q->current_entry_lock)); | |
929 | admin = rcu_dereference_protected(q->admin_sched, | |
930 | lockdep_is_held(&q->current_entry_lock)); | |
5a781ccb | 931 | |
a3d43c0d VCG |
932 | if (!oper) |
933 | switch_schedules(q, &admin, &oper); | |
934 | ||
935 | /* This can happen in two cases: 1. this is the very first run | |
936 | * of this function (i.e. we weren't running any schedule | |
937 | * previously); 2. The previous schedule just ended. The first | |
938 | * entry of all schedules are pre-calculated during the | |
939 | * schedule initialization. | |
5a781ccb | 940 | */ |
e5517551 | 941 | if (unlikely(!entry || entry->end_time == oper->base_time)) { |
a3d43c0d | 942 | next = list_first_entry(&oper->entries, struct sched_entry, |
5a781ccb | 943 | list); |
e5517551 | 944 | end_time = next->end_time; |
5a781ccb VCG |
945 | goto first_run; |
946 | } | |
947 | ||
6ca6a665 | 948 | if (should_restart_cycle(oper, entry)) { |
a3d43c0d | 949 | next = list_first_entry(&oper->entries, struct sched_entry, |
5a781ccb | 950 | list); |
e5517551 VO |
951 | oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time, |
952 | oper->cycle_time); | |
6ca6a665 | 953 | } else { |
5a781ccb | 954 | next = list_next_entry(entry, list); |
6ca6a665 | 955 | } |
5a781ccb | 956 | |
e5517551 VO |
957 | end_time = ktime_add_ns(entry->end_time, next->interval); |
958 | end_time = min_t(ktime_t, end_time, oper->cycle_end_time); | |
5a781ccb | 959 | |
a1e6ad30 VO |
960 | for (tc = 0; tc < num_tc; tc++) { |
961 | if (next->gate_duration[tc] == oper->cycle_time) | |
962 | next->gate_close_time[tc] = KTIME_MAX; | |
963 | else | |
964 | next->gate_close_time[tc] = ktime_add_ns(entry->end_time, | |
965 | next->gate_duration[tc]); | |
966 | } | |
967 | ||
e5517551 | 968 | if (should_change_schedules(admin, oper, end_time)) { |
a3d43c0d VCG |
969 | /* Set things so the next time this runs, the new |
970 | * schedule runs. | |
971 | */ | |
e5517551 | 972 | end_time = sched_base_time(admin); |
a3d43c0d VCG |
973 | switch_schedules(q, &admin, &oper); |
974 | } | |
975 | ||
e5517551 | 976 | next->end_time = end_time; |
d2ad689d | 977 | taprio_set_budgets(q, oper, next); |
5a781ccb VCG |
978 | |
979 | first_run: | |
980 | rcu_assign_pointer(q->current_entry, next); | |
981 | spin_unlock(&q->current_entry_lock); | |
982 | ||
e5517551 | 983 | hrtimer_set_expires(&q->advance_timer, end_time); |
5a781ccb VCG |
984 | |
985 | rcu_read_lock(); | |
986 | __netif_schedule(sch); | |
987 | rcu_read_unlock(); | |
988 | ||
989 | return HRTIMER_RESTART; | |
990 | } | |
991 | ||
992 | static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { | |
993 | [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 }, | |
994 | [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 }, | |
995 | [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 }, | |
996 | [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, | |
997 | }; | |
998 | ||
a54fc09e | 999 | static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { |
343041b5 ED |
1000 | [TCA_TAPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32, |
1001 | TC_QOPT_MAX_QUEUE), | |
a54fc09e | 1002 | [TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 }, |
a721c3e5 VO |
1003 | [TCA_TAPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32, |
1004 | TC_FP_EXPRESS, | |
1005 | TC_FP_PREEMPTIBLE), | |
a54fc09e VO |
1006 | }; |
1007 | ||
ea23fbd2 | 1008 | static const struct netlink_range_validation_signed taprio_cycle_time_range = { |
e7397184 KI |
1009 | .min = 0, |
1010 | .max = INT_MAX, | |
1011 | }; | |
1012 | ||
5a781ccb VCG |
1013 | static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { |
1014 | [TCA_TAPRIO_ATTR_PRIOMAP] = { | |
1015 | .len = sizeof(struct tc_mqprio_qopt) | |
1016 | }, | |
c25031e9 VCG |
1017 | [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED }, |
1018 | [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, | |
1019 | [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, | |
1020 | [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, | |
e7397184 KI |
1021 | [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = |
1022 | NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range), | |
c25031e9 | 1023 | [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, |
0efc7e54 AM |
1024 | [TCA_TAPRIO_ATTR_FLAGS] = |
1025 | NLA_POLICY_MASK(NLA_U32, TAPRIO_SUPPORTED_FLAGS), | |
e13aaa06 | 1026 | [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, |
a54fc09e | 1027 | [TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED }, |
5a781ccb VCG |
1028 | }; |
1029 | ||
b5b73b26 VCG |
1030 | static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, |
1031 | struct sched_entry *entry, | |
5a781ccb VCG |
1032 | struct netlink_ext_ack *extack) |
1033 | { | |
b5b73b26 | 1034 | int min_duration = length_to_duration(q, ETH_ZLEN); |
5a781ccb VCG |
1035 | u32 interval = 0; |
1036 | ||
1037 | if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) | |
1038 | entry->command = nla_get_u8( | |
1039 | tb[TCA_TAPRIO_SCHED_ENTRY_CMD]); | |
1040 | ||
1041 | if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]) | |
1042 | entry->gate_mask = nla_get_u32( | |
1043 | tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]); | |
1044 | ||
1045 | if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]) | |
1046 | interval = nla_get_u32( | |
1047 | tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); | |
1048 | ||
b5b73b26 VCG |
1049 | /* The interval should allow at least the minimum ethernet |
1050 | * frame to go out. | |
1051 | */ | |
1052 | if (interval < min_duration) { | |
5a781ccb VCG |
1053 | NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); |
1054 | return -EINVAL; | |
1055 | } | |
1056 | ||
1057 | entry->interval = interval; | |
1058 | ||
1059 | return 0; | |
1060 | } | |
1061 | ||
b5b73b26 VCG |
1062 | static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n, |
1063 | struct sched_entry *entry, int index, | |
1064 | struct netlink_ext_ack *extack) | |
5a781ccb VCG |
1065 | { |
1066 | struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; | |
1067 | int err; | |
1068 | ||
8cb08174 JB |
1069 | err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n, |
1070 | entry_policy, NULL); | |
5a781ccb VCG |
1071 | if (err < 0) { |
1072 | NL_SET_ERR_MSG(extack, "Could not parse nested entry"); | |
1073 | return -EINVAL; | |
1074 | } | |
1075 | ||
1076 | entry->index = index; | |
1077 | ||
b5b73b26 | 1078 | return fill_sched_entry(q, tb, entry, extack); |
5a781ccb VCG |
1079 | } |
1080 | ||
b5b73b26 | 1081 | static int parse_sched_list(struct taprio_sched *q, struct nlattr *list, |
a3d43c0d | 1082 | struct sched_gate_list *sched, |
5a781ccb VCG |
1083 | struct netlink_ext_ack *extack) |
1084 | { | |
1085 | struct nlattr *n; | |
1086 | int err, rem; | |
1087 | int i = 0; | |
1088 | ||
1089 | if (!list) | |
1090 | return -EINVAL; | |
1091 | ||
1092 | nla_for_each_nested(n, list, rem) { | |
1093 | struct sched_entry *entry; | |
1094 | ||
1095 | if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) { | |
1096 | NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'"); | |
1097 | continue; | |
1098 | } | |
1099 | ||
1100 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | |
1101 | if (!entry) { | |
1102 | NL_SET_ERR_MSG(extack, "Not enough memory for entry"); | |
1103 | return -ENOMEM; | |
1104 | } | |
1105 | ||
b5b73b26 | 1106 | err = parse_sched_entry(q, n, entry, i, extack); |
5a781ccb VCG |
1107 | if (err < 0) { |
1108 | kfree(entry); | |
1109 | return err; | |
1110 | } | |
1111 | ||
a3d43c0d | 1112 | list_add_tail(&entry->list, &sched->entries); |
5a781ccb VCG |
1113 | i++; |
1114 | } | |
1115 | ||
a3d43c0d | 1116 | sched->num_entries = i; |
5a781ccb VCG |
1117 | |
1118 | return i; | |
1119 | } | |
1120 | ||
b5b73b26 | 1121 | static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, |
a3d43c0d VCG |
1122 | struct sched_gate_list *new, |
1123 | struct netlink_ext_ack *extack) | |
5a781ccb VCG |
1124 | { |
1125 | int err = 0; | |
5a781ccb | 1126 | |
a3d43c0d VCG |
1127 | if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) { |
1128 | NL_SET_ERR_MSG(extack, "Adding a single entry is not supported"); | |
1129 | return -ENOTSUPP; | |
1130 | } | |
5a781ccb VCG |
1131 | |
1132 | if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]) | |
a3d43c0d | 1133 | new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]); |
5a781ccb | 1134 | |
c25031e9 VCG |
1135 | if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]) |
1136 | new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]); | |
1137 | ||
6ca6a665 VCG |
1138 | if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]) |
1139 | new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); | |
1140 | ||
5a781ccb | 1141 | if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) |
b5b73b26 VCG |
1142 | err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], |
1143 | new, extack); | |
a3d43c0d VCG |
1144 | if (err < 0) |
1145 | return err; | |
5a781ccb | 1146 | |
037be037 VP |
1147 | if (!new->cycle_time) { |
1148 | struct sched_entry *entry; | |
1149 | ktime_t cycle = 0; | |
1150 | ||
1151 | list_for_each_entry(entry, &new->entries, list) | |
1152 | cycle = ktime_add_ns(cycle, entry->interval); | |
ed8157f1 DC |
1153 | |
1154 | if (!cycle) { | |
1155 | NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0"); | |
1156 | return -EINVAL; | |
1157 | } | |
1158 | ||
e7397184 KI |
1159 | if (cycle < 0 || cycle > INT_MAX) { |
1160 | NL_SET_ERR_MSG(extack, "'cycle_time' is too big"); | |
1161 | return -EINVAL; | |
1162 | } | |
1163 | ||
037be037 VP |
1164 | new->cycle_time = cycle; |
1165 | } | |
1166 | ||
a306a90c VO |
1167 | taprio_calculate_gate_durations(q, new); |
1168 | ||
a3d43c0d | 1169 | return 0; |
5a781ccb VCG |
1170 | } |
1171 | ||
1172 | static int taprio_parse_mqprio_opt(struct net_device *dev, | |
1173 | struct tc_mqprio_qopt *qopt, | |
4cfd5779 VP |
1174 | struct netlink_ext_ack *extack, |
1175 | u32 taprio_flags) | |
5a781ccb | 1176 | { |
1dfe086d | 1177 | bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags); |
5a781ccb | 1178 | |
a3d43c0d | 1179 | if (!qopt && !dev->num_tc) { |
5a781ccb VCG |
1180 | NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary"); |
1181 | return -EINVAL; | |
1182 | } | |
1183 | ||
a3d43c0d VCG |
1184 | /* If num_tc is already set, it means that the user already |
1185 | * configured the mqprio part | |
1186 | */ | |
1187 | if (dev->num_tc) | |
1188 | return 0; | |
1189 | ||
5a781ccb VCG |
1190 | /* taprio imposes that traffic classes map 1:n to tx queues */ |
1191 | if (qopt->num_tc > dev->num_tx_queues) { | |
1192 | NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues"); | |
1193 | return -EINVAL; | |
1194 | } | |
1195 | ||
1dfe086d VO |
1196 | /* For some reason, in txtime-assist mode, we allow TXQ ranges for |
1197 | * different TCs to overlap, and just validate the TXQ ranges. | |
1198 | */ | |
1199 | return mqprio_validate_qopt(dev, qopt, true, allow_overlapping_txqs, | |
1200 | extack); | |
5a781ccb VCG |
1201 | } |
1202 | ||
a3d43c0d VCG |
1203 | static int taprio_get_start_time(struct Qdisc *sch, |
1204 | struct sched_gate_list *sched, | |
1205 | ktime_t *start) | |
5a781ccb VCG |
1206 | { |
1207 | struct taprio_sched *q = qdisc_priv(sch); | |
5a781ccb VCG |
1208 | ktime_t now, base, cycle; |
1209 | s64 n; | |
1210 | ||
a3d43c0d | 1211 | base = sched_base_time(sched); |
7ede7b03 | 1212 | now = taprio_get_time(q); |
8599099f AG |
1213 | |
1214 | if (ktime_after(base, now)) { | |
1215 | *start = base; | |
1216 | return 0; | |
1217 | } | |
5a781ccb | 1218 | |
037be037 | 1219 | cycle = sched->cycle_time; |
5a781ccb | 1220 | |
8599099f AG |
1221 | /* The qdisc is expected to have at least one sched_entry. Moreover, |
1222 | * any entry must have 'interval' > 0. Thus if the cycle time is zero, | |
1223 | * something went really wrong. In that case, we should warn about this | |
1224 | * inconsistent state and return error. | |
1225 | */ | |
1226 | if (WARN_ON(!cycle)) | |
1227 | return -EFAULT; | |
5a781ccb VCG |
1228 | |
1229 | /* Schedule the start time for the beginning of the next | |
1230 | * cycle. | |
1231 | */ | |
1232 | n = div64_s64(ktime_sub_ns(now, base), cycle); | |
8599099f AG |
1233 | *start = ktime_add_ns(base, (n + 1) * cycle); |
1234 | return 0; | |
5a781ccb VCG |
1235 | } |
1236 | ||
e5517551 VO |
1237 | static void setup_first_end_time(struct taprio_sched *q, |
1238 | struct sched_gate_list *sched, ktime_t base) | |
5a781ccb | 1239 | { |
a1e6ad30 VO |
1240 | struct net_device *dev = qdisc_dev(q->root); |
1241 | int num_tc = netdev_get_num_tc(dev); | |
5a781ccb | 1242 | struct sched_entry *first; |
6ca6a665 | 1243 | ktime_t cycle; |
a1e6ad30 | 1244 | int tc; |
5a781ccb | 1245 | |
a3d43c0d VCG |
1246 | first = list_first_entry(&sched->entries, |
1247 | struct sched_entry, list); | |
5a781ccb | 1248 | |
037be037 | 1249 | cycle = sched->cycle_time; |
6ca6a665 VCG |
1250 | |
1251 | /* FIXME: find a better place to do this */ | |
e5517551 | 1252 | sched->cycle_end_time = ktime_add_ns(base, cycle); |
6ca6a665 | 1253 | |
e5517551 | 1254 | first->end_time = ktime_add_ns(base, first->interval); |
d2ad689d | 1255 | taprio_set_budgets(q, sched, first); |
a1e6ad30 VO |
1256 | |
1257 | for (tc = 0; tc < num_tc; tc++) { | |
1258 | if (first->gate_duration[tc] == sched->cycle_time) | |
1259 | first->gate_close_time[tc] = KTIME_MAX; | |
1260 | else | |
1261 | first->gate_close_time[tc] = ktime_add_ns(base, first->gate_duration[tc]); | |
1262 | } | |
1263 | ||
5a781ccb | 1264 | rcu_assign_pointer(q->current_entry, NULL); |
a3d43c0d | 1265 | } |
5a781ccb | 1266 | |
a3d43c0d VCG |
1267 | static void taprio_start_sched(struct Qdisc *sch, |
1268 | ktime_t start, struct sched_gate_list *new) | |
1269 | { | |
1270 | struct taprio_sched *q = qdisc_priv(sch); | |
1271 | ktime_t expires; | |
1272 | ||
9c66d156 VCG |
1273 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) |
1274 | return; | |
1275 | ||
a3d43c0d VCG |
1276 | expires = hrtimer_get_expires(&q->advance_timer); |
1277 | if (expires == 0) | |
1278 | expires = KTIME_MAX; | |
1279 | ||
1280 | /* If the new schedule starts before the next expiration, we | |
1281 | * reprogram it to the earliest one, so we change the admin | |
1282 | * schedule to the operational one at the right time. | |
1283 | */ | |
1284 | start = min_t(ktime_t, start, expires); | |
5a781ccb VCG |
1285 | |
1286 | hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS); | |
1287 | } | |
1288 | ||
7b9eba7b LD |
1289 | static void taprio_set_picos_per_byte(struct net_device *dev, |
1290 | struct taprio_sched *q) | |
1291 | { | |
1292 | struct ethtool_link_ksettings ecmd; | |
f04b514c VO |
1293 | int speed = SPEED_10; |
1294 | int picos_per_byte; | |
1295 | int err; | |
7b9eba7b | 1296 | |
f04b514c VO |
1297 | err = __ethtool_get_link_ksettings(dev, &ecmd); |
1298 | if (err < 0) | |
1299 | goto skip; | |
1300 | ||
9a9251a3 | 1301 | if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN) |
f04b514c | 1302 | speed = ecmd.base.speed; |
7b9eba7b | 1303 | |
f04b514c | 1304 | skip: |
68ce6688 | 1305 | picos_per_byte = (USEC_PER_SEC * 8) / speed; |
7b9eba7b LD |
1306 | |
1307 | atomic64_set(&q->picos_per_byte, picos_per_byte); | |
1308 | netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", | |
1309 | dev->name, (long long)atomic64_read(&q->picos_per_byte), | |
1310 | ecmd.base.speed); | |
1311 | } | |
1312 | ||
1313 | static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, | |
1314 | void *ptr) | |
1315 | { | |
1316 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | |
fed87cc6 VO |
1317 | struct sched_gate_list *oper, *admin; |
1318 | struct qdisc_size_table *stab; | |
7b9eba7b | 1319 | struct taprio_sched *q; |
7b9eba7b LD |
1320 | |
1321 | ASSERT_RTNL(); | |
1322 | ||
1323 | if (event != NETDEV_UP && event != NETDEV_CHANGE) | |
1324 | return NOTIFY_DONE; | |
1325 | ||
7b9eba7b | 1326 | list_for_each_entry(q, &taprio_list, taprio_list) { |
fc4f2fd0 VO |
1327 | if (dev != qdisc_dev(q->root)) |
1328 | continue; | |
7b9eba7b | 1329 | |
7b9eba7b | 1330 | taprio_set_picos_per_byte(dev, q); |
fed87cc6 VO |
1331 | |
1332 | stab = rtnl_dereference(q->root->stab); | |
1333 | ||
1334 | oper = rtnl_dereference(q->oper_sched); | |
1335 | if (oper) | |
1336 | taprio_update_queue_max_sdu(q, oper, stab); | |
1337 | ||
1338 | admin = rtnl_dereference(q->admin_sched); | |
1339 | if (admin) | |
1340 | taprio_update_queue_max_sdu(q, admin, stab); | |
1341 | ||
fc4f2fd0 VO |
1342 | break; |
1343 | } | |
7b9eba7b LD |
1344 | |
1345 | return NOTIFY_DONE; | |
1346 | } | |
1347 | ||
4cfd5779 VP |
1348 | static void setup_txtime(struct taprio_sched *q, |
1349 | struct sched_gate_list *sched, ktime_t base) | |
1350 | { | |
1351 | struct sched_entry *entry; | |
e7397184 | 1352 | u64 interval = 0; |
4cfd5779 VP |
1353 | |
1354 | list_for_each_entry(entry, &sched->entries, list) { | |
1355 | entry->next_txtime = ktime_add_ns(base, interval); | |
1356 | interval += entry->interval; | |
1357 | } | |
1358 | } | |
1359 | ||
9c66d156 VCG |
1360 | static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries) |
1361 | { | |
9c66d156 VCG |
1362 | struct __tc_taprio_qopt_offload *__offload; |
1363 | ||
11a33de2 GS |
1364 | __offload = kzalloc(struct_size(__offload, offload.entries, num_entries), |
1365 | GFP_KERNEL); | |
9c66d156 VCG |
1366 | if (!__offload) |
1367 | return NULL; | |
1368 | ||
1369 | refcount_set(&__offload->users, 1); | |
1370 | ||
1371 | return &__offload->offload; | |
1372 | } | |
1373 | ||
1374 | struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload | |
1375 | *offload) | |
1376 | { | |
1377 | struct __tc_taprio_qopt_offload *__offload; | |
1378 | ||
1379 | __offload = container_of(offload, struct __tc_taprio_qopt_offload, | |
1380 | offload); | |
1381 | ||
1382 | refcount_inc(&__offload->users); | |
1383 | ||
1384 | return offload; | |
1385 | } | |
1386 | EXPORT_SYMBOL_GPL(taprio_offload_get); | |
1387 | ||
1388 | void taprio_offload_free(struct tc_taprio_qopt_offload *offload) | |
1389 | { | |
1390 | struct __tc_taprio_qopt_offload *__offload; | |
1391 | ||
1392 | __offload = container_of(offload, struct __tc_taprio_qopt_offload, | |
1393 | offload); | |
1394 | ||
1395 | if (!refcount_dec_and_test(&__offload->users)) | |
1396 | return; | |
1397 | ||
1398 | kfree(__offload); | |
1399 | } | |
1400 | EXPORT_SYMBOL_GPL(taprio_offload_free); | |
1401 | ||
1402 | /* The function will only serve to keep the pointers to the "oper" and "admin" | |
1403 | * schedules valid in relation to their base times, so when calling dump() the | |
1404 | * users looks at the right schedules. | |
1405 | * When using full offload, the admin configuration is promoted to oper at the | |
1406 | * base_time in the PHC time domain. But because the system time is not | |
1407 | * necessarily in sync with that, we can't just trigger a hrtimer to call | |
1408 | * switch_schedules at the right hardware time. | |
1409 | * At the moment we call this by hand right away from taprio, but in the future | |
1410 | * it will be useful to create a mechanism for drivers to notify taprio of the | |
1411 | * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump(). | |
1412 | * This is left as TODO. | |
1413 | */ | |
d665c128 | 1414 | static void taprio_offload_config_changed(struct taprio_sched *q) |
9c66d156 VCG |
1415 | { |
1416 | struct sched_gate_list *oper, *admin; | |
1417 | ||
c8cbe123 VO |
1418 | oper = rtnl_dereference(q->oper_sched); |
1419 | admin = rtnl_dereference(q->admin_sched); | |
9c66d156 VCG |
1420 | |
1421 | switch_schedules(q, &admin, &oper); | |
9c66d156 VCG |
1422 | } |
1423 | ||
09e31cf0 VCG |
1424 | static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) |
1425 | { | |
1426 | u32 i, queue_mask = 0; | |
1427 | ||
1428 | for (i = 0; i < dev->num_tc; i++) { | |
1429 | u32 offset, count; | |
1430 | ||
1431 | if (!(tc_mask & BIT(i))) | |
1432 | continue; | |
1433 | ||
1434 | offset = dev->tc_to_txq[i].offset; | |
1435 | count = dev->tc_to_txq[i].count; | |
1436 | ||
1437 | queue_mask |= GENMASK(offset + count - 1, offset); | |
1438 | } | |
1439 | ||
1440 | return queue_mask; | |
1441 | } | |
1442 | ||
1443 | static void taprio_sched_to_offload(struct net_device *dev, | |
9c66d156 | 1444 | struct sched_gate_list *sched, |
522d15ea VO |
1445 | struct tc_taprio_qopt_offload *offload, |
1446 | const struct tc_taprio_caps *caps) | |
9c66d156 VCG |
1447 | { |
1448 | struct sched_entry *entry; | |
1449 | int i = 0; | |
1450 | ||
1451 | offload->base_time = sched->base_time; | |
1452 | offload->cycle_time = sched->cycle_time; | |
1453 | offload->cycle_time_extension = sched->cycle_time_extension; | |
1454 | ||
1455 | list_for_each_entry(entry, &sched->entries, list) { | |
1456 | struct tc_taprio_sched_entry *e = &offload->entries[i]; | |
1457 | ||
1458 | e->command = entry->command; | |
1459 | e->interval = entry->interval; | |
522d15ea VO |
1460 | if (caps->gate_mask_per_txq) |
1461 | e->gate_mask = tc_map_to_queue_mask(dev, | |
1462 | entry->gate_mask); | |
1463 | else | |
1464 | e->gate_mask = entry->gate_mask; | |
09e31cf0 | 1465 | |
9c66d156 VCG |
1466 | i++; |
1467 | } | |
1468 | ||
1469 | offload->num_entries = i; | |
1470 | } | |
1471 | ||
2f530df7 VO |
1472 | static void taprio_detect_broken_mqprio(struct taprio_sched *q) |
1473 | { | |
1474 | struct net_device *dev = qdisc_dev(q->root); | |
1475 | struct tc_taprio_caps caps; | |
1476 | ||
1477 | qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO, | |
1478 | &caps, sizeof(caps)); | |
1479 | ||
1480 | q->broken_mqprio = caps.broken_mqprio; | |
1481 | if (q->broken_mqprio) | |
1482 | static_branch_inc(&taprio_have_broken_mqprio); | |
1483 | else | |
1484 | static_branch_inc(&taprio_have_working_mqprio); | |
1485 | ||
1486 | q->detected_mqprio = true; | |
1487 | } | |
1488 | ||
1489 | static void taprio_cleanup_broken_mqprio(struct taprio_sched *q) | |
1490 | { | |
1491 | if (!q->detected_mqprio) | |
1492 | return; | |
1493 | ||
1494 | if (q->broken_mqprio) | |
1495 | static_branch_dec(&taprio_have_broken_mqprio); | |
1496 | else | |
1497 | static_branch_dec(&taprio_have_working_mqprio); | |
1498 | } | |
1499 | ||
9c66d156 | 1500 | static int taprio_enable_offload(struct net_device *dev, |
9c66d156 VCG |
1501 | struct taprio_sched *q, |
1502 | struct sched_gate_list *sched, | |
1503 | struct netlink_ext_ack *extack) | |
1504 | { | |
1505 | const struct net_device_ops *ops = dev->netdev_ops; | |
1506 | struct tc_taprio_qopt_offload *offload; | |
a54fc09e VO |
1507 | struct tc_taprio_caps caps; |
1508 | int tc, err = 0; | |
9c66d156 VCG |
1509 | |
1510 | if (!ops->ndo_setup_tc) { | |
1511 | NL_SET_ERR_MSG(extack, | |
1512 | "Device does not support taprio offload"); | |
1513 | return -EOPNOTSUPP; | |
1514 | } | |
1515 | ||
a54fc09e VO |
1516 | qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO, |
1517 | &caps, sizeof(caps)); | |
1518 | ||
1519 | if (!caps.supports_queue_max_sdu) { | |
1520 | for (tc = 0; tc < TC_MAX_QUEUE; tc++) { | |
1521 | if (q->max_sdu[tc]) { | |
1522 | NL_SET_ERR_MSG_MOD(extack, | |
1523 | "Device does not handle queueMaxSDU"); | |
1524 | return -EOPNOTSUPP; | |
1525 | } | |
1526 | } | |
1527 | } | |
1528 | ||
9c66d156 VCG |
1529 | offload = taprio_offload_alloc(sched->num_entries); |
1530 | if (!offload) { | |
1531 | NL_SET_ERR_MSG(extack, | |
1532 | "Not enough memory for enabling offload mode"); | |
1533 | return -ENOMEM; | |
1534 | } | |
2d800bc5 | 1535 | offload->cmd = TAPRIO_CMD_REPLACE; |
c54876cd | 1536 | offload->extack = extack; |
09c794c0 | 1537 | mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt); |
c54876cd | 1538 | offload->mqprio.extack = extack; |
522d15ea | 1539 | taprio_sched_to_offload(dev, sched, offload, &caps); |
a721c3e5 | 1540 | mqprio_fp_to_offload(q->fp, &offload->mqprio); |
9c66d156 | 1541 | |
a54fc09e VO |
1542 | for (tc = 0; tc < TC_MAX_QUEUE; tc++) |
1543 | offload->max_sdu[tc] = q->max_sdu[tc]; | |
1544 | ||
9c66d156 VCG |
1545 | err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); |
1546 | if (err < 0) { | |
c54876cd VO |
1547 | NL_SET_ERR_MSG_WEAK(extack, |
1548 | "Device failed to setup taprio offload"); | |
9c66d156 VCG |
1549 | goto done; |
1550 | } | |
1551 | ||
db46e3a8 VO |
1552 | q->offloaded = true; |
1553 | ||
9c66d156 | 1554 | done: |
c54876cd VO |
1555 | /* The offload structure may linger around via a reference taken by the |
1556 | * device driver, so clear up the netlink extack pointer so that the | |
1557 | * driver isn't tempted to dereference data which stopped being valid | |
1558 | */ | |
1559 | offload->extack = NULL; | |
1560 | offload->mqprio.extack = NULL; | |
9c66d156 VCG |
1561 | taprio_offload_free(offload); |
1562 | ||
1563 | return err; | |
1564 | } | |
1565 | ||
1566 | static int taprio_disable_offload(struct net_device *dev, | |
1567 | struct taprio_sched *q, | |
1568 | struct netlink_ext_ack *extack) | |
1569 | { | |
1570 | const struct net_device_ops *ops = dev->netdev_ops; | |
1571 | struct tc_taprio_qopt_offload *offload; | |
1572 | int err; | |
1573 | ||
db46e3a8 | 1574 | if (!q->offloaded) |
9c66d156 VCG |
1575 | return 0; |
1576 | ||
9c66d156 VCG |
1577 | offload = taprio_offload_alloc(0); |
1578 | if (!offload) { | |
1579 | NL_SET_ERR_MSG(extack, | |
1580 | "Not enough memory to disable offload mode"); | |
1581 | return -ENOMEM; | |
1582 | } | |
2d800bc5 | 1583 | offload->cmd = TAPRIO_CMD_DESTROY; |
9c66d156 VCG |
1584 | |
1585 | err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); | |
1586 | if (err < 0) { | |
1587 | NL_SET_ERR_MSG(extack, | |
1588 | "Device failed to disable offload"); | |
1589 | goto out; | |
1590 | } | |
1591 | ||
db46e3a8 VO |
1592 | q->offloaded = false; |
1593 | ||
9c66d156 VCG |
1594 | out: |
1595 | taprio_offload_free(offload); | |
1596 | ||
1597 | return err; | |
1598 | } | |
1599 | ||
1600 | /* If full offload is enabled, the only possible clockid is the net device's | |
1601 | * PHC. For that reason, specifying a clockid through netlink is incorrect. | |
1602 | * For txtime-assist, it is implicitly assumed that the device's PHC is kept | |
1603 | * in sync with the specified clockid via a user space daemon such as phc2sys. | |
1604 | * For both software taprio and txtime-assist, the clockid is used for the | |
1605 | * hrtimer that advances the schedule and hence mandatory. | |
1606 | */ | |
1607 | static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb, | |
1608 | struct netlink_ext_ack *extack) | |
1609 | { | |
1610 | struct taprio_sched *q = qdisc_priv(sch); | |
1611 | struct net_device *dev = qdisc_dev(sch); | |
1612 | int err = -EINVAL; | |
1613 | ||
1614 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { | |
1615 | const struct ethtool_ops *ops = dev->ethtool_ops; | |
1616 | struct ethtool_ts_info info = { | |
1617 | .cmd = ETHTOOL_GET_TS_INFO, | |
1618 | .phc_index = -1, | |
1619 | }; | |
1620 | ||
1621 | if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { | |
1622 | NL_SET_ERR_MSG(extack, | |
1623 | "The 'clockid' cannot be specified for full offload"); | |
1624 | goto out; | |
1625 | } | |
1626 | ||
1627 | if (ops && ops->get_ts_info) | |
1628 | err = ops->get_ts_info(dev, &info); | |
1629 | ||
1630 | if (err || info.phc_index < 0) { | |
1631 | NL_SET_ERR_MSG(extack, | |
1632 | "Device does not have a PTP clock"); | |
1633 | err = -ENOTSUPP; | |
1634 | goto out; | |
1635 | } | |
1636 | } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { | |
1637 | int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]); | |
6dc25401 | 1638 | enum tk_offsets tk_offset; |
9c66d156 VCG |
1639 | |
1640 | /* We only support static clockids and we don't allow | |
1641 | * for it to be modified after the first init. | |
1642 | */ | |
1643 | if (clockid < 0 || | |
1644 | (q->clockid != -1 && q->clockid != clockid)) { | |
1645 | NL_SET_ERR_MSG(extack, | |
1646 | "Changing the 'clockid' of a running schedule is not supported"); | |
1647 | err = -ENOTSUPP; | |
1648 | goto out; | |
1649 | } | |
1650 | ||
1651 | switch (clockid) { | |
1652 | case CLOCK_REALTIME: | |
6dc25401 | 1653 | tk_offset = TK_OFFS_REAL; |
9c66d156 VCG |
1654 | break; |
1655 | case CLOCK_MONOTONIC: | |
6dc25401 | 1656 | tk_offset = TK_OFFS_MAX; |
9c66d156 VCG |
1657 | break; |
1658 | case CLOCK_BOOTTIME: | |
6dc25401 | 1659 | tk_offset = TK_OFFS_BOOT; |
9c66d156 VCG |
1660 | break; |
1661 | case CLOCK_TAI: | |
6dc25401 | 1662 | tk_offset = TK_OFFS_TAI; |
9c66d156 VCG |
1663 | break; |
1664 | default: | |
1665 | NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); | |
1666 | err = -EINVAL; | |
1667 | goto out; | |
1668 | } | |
6dc25401 ED |
1669 | /* This pairs with READ_ONCE() in taprio_mono_to_any */ |
1670 | WRITE_ONCE(q->tk_offset, tk_offset); | |
9c66d156 VCG |
1671 | |
1672 | q->clockid = clockid; | |
1673 | } else { | |
1674 | NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory"); | |
1675 | goto out; | |
1676 | } | |
a954380a VCG |
1677 | |
1678 | /* Everything went ok, return success. */ | |
1679 | err = 0; | |
1680 | ||
9c66d156 VCG |
1681 | out: |
1682 | return err; | |
1683 | } | |
1684 | ||
a54fc09e VO |
1685 | static int taprio_parse_tc_entry(struct Qdisc *sch, |
1686 | struct nlattr *opt, | |
1687 | u32 max_sdu[TC_QOPT_MAX_QUEUE], | |
a721c3e5 | 1688 | u32 fp[TC_QOPT_MAX_QUEUE], |
a54fc09e VO |
1689 | unsigned long *seen_tcs, |
1690 | struct netlink_ext_ack *extack) | |
1691 | { | |
1692 | struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { }; | |
1693 | struct net_device *dev = qdisc_dev(sch); | |
a54fc09e | 1694 | int err, tc; |
a721c3e5 | 1695 | u32 val; |
a54fc09e VO |
1696 | |
1697 | err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt, | |
1698 | taprio_tc_policy, extack); | |
1699 | if (err < 0) | |
1700 | return err; | |
1701 | ||
1702 | if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) { | |
1703 | NL_SET_ERR_MSG_MOD(extack, "TC entry index missing"); | |
1704 | return -EINVAL; | |
1705 | } | |
1706 | ||
1707 | tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]); | |
1708 | if (tc >= TC_QOPT_MAX_QUEUE) { | |
1709 | NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range"); | |
1710 | return -ERANGE; | |
1711 | } | |
1712 | ||
1713 | if (*seen_tcs & BIT(tc)) { | |
1714 | NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry"); | |
1715 | return -EINVAL; | |
1716 | } | |
1717 | ||
1718 | *seen_tcs |= BIT(tc); | |
1719 | ||
a721c3e5 | 1720 | if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) { |
a54fc09e | 1721 | val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]); |
a721c3e5 VO |
1722 | if (val > dev->max_mtu) { |
1723 | NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU"); | |
1724 | return -ERANGE; | |
1725 | } | |
a54fc09e | 1726 | |
a721c3e5 | 1727 | max_sdu[tc] = val; |
a54fc09e VO |
1728 | } |
1729 | ||
a721c3e5 VO |
1730 | if (tb[TCA_TAPRIO_TC_ENTRY_FP]) |
1731 | fp[tc] = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_FP]); | |
a54fc09e VO |
1732 | |
1733 | return 0; | |
1734 | } | |
1735 | ||
1736 | static int taprio_parse_tc_entries(struct Qdisc *sch, | |
1737 | struct nlattr *opt, | |
1738 | struct netlink_ext_ack *extack) | |
1739 | { | |
1740 | struct taprio_sched *q = qdisc_priv(sch); | |
a721c3e5 | 1741 | struct net_device *dev = qdisc_dev(sch); |
a54fc09e | 1742 | u32 max_sdu[TC_QOPT_MAX_QUEUE]; |
a721c3e5 | 1743 | bool have_preemption = false; |
a54fc09e | 1744 | unsigned long seen_tcs = 0; |
a721c3e5 | 1745 | u32 fp[TC_QOPT_MAX_QUEUE]; |
a54fc09e VO |
1746 | struct nlattr *n; |
1747 | int tc, rem; | |
1748 | int err = 0; | |
1749 | ||
a721c3e5 | 1750 | for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { |
a54fc09e | 1751 | max_sdu[tc] = q->max_sdu[tc]; |
a721c3e5 VO |
1752 | fp[tc] = q->fp[tc]; |
1753 | } | |
a54fc09e | 1754 | |
e8058a49 | 1755 | nla_for_each_nested_type(n, TCA_TAPRIO_ATTR_TC_ENTRY, opt, rem) { |
a721c3e5 | 1756 | err = taprio_parse_tc_entry(sch, n, max_sdu, fp, &seen_tcs, |
fed87cc6 | 1757 | extack); |
a54fc09e | 1758 | if (err) |
a721c3e5 | 1759 | return err; |
a54fc09e VO |
1760 | } |
1761 | ||
a721c3e5 | 1762 | for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { |
a54fc09e | 1763 | q->max_sdu[tc] = max_sdu[tc]; |
a721c3e5 VO |
1764 | q->fp[tc] = fp[tc]; |
1765 | if (fp[tc] != TC_FP_EXPRESS) | |
1766 | have_preemption = true; | |
1767 | } | |
1768 | ||
1769 | if (have_preemption) { | |
1770 | if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) { | |
1771 | NL_SET_ERR_MSG(extack, | |
1772 | "Preemption only supported with full offload"); | |
1773 | return -EOPNOTSUPP; | |
1774 | } | |
1775 | ||
1776 | if (!ethtool_dev_mm_supported(dev)) { | |
1777 | NL_SET_ERR_MSG(extack, | |
1778 | "Device does not support preemption"); | |
1779 | return -EOPNOTSUPP; | |
1780 | } | |
1781 | } | |
a54fc09e | 1782 | |
a54fc09e VO |
1783 | return err; |
1784 | } | |
1785 | ||
b5a0faa3 IK |
1786 | static int taprio_mqprio_cmp(const struct net_device *dev, |
1787 | const struct tc_mqprio_qopt *mqprio) | |
1788 | { | |
1789 | int i; | |
1790 | ||
1791 | if (!mqprio || mqprio->num_tc != dev->num_tc) | |
1792 | return -1; | |
1793 | ||
1794 | for (i = 0; i < mqprio->num_tc; i++) | |
1795 | if (dev->tc_to_txq[i].count != mqprio->count[i] || | |
1796 | dev->tc_to_txq[i].offset != mqprio->offset[i]) | |
1797 | return -1; | |
1798 | ||
1799 | for (i = 0; i <= TC_BITMASK; i++) | |
1800 | if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i]) | |
1801 | return -1; | |
1802 | ||
1803 | return 0; | |
1804 | } | |
1805 | ||
5a781ccb VCG |
1806 | static int taprio_change(struct Qdisc *sch, struct nlattr *opt, |
1807 | struct netlink_ext_ack *extack) | |
1808 | { | |
a3d91b2c | 1809 | struct qdisc_size_table *stab = rtnl_dereference(sch->stab); |
5a781ccb | 1810 | struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { }; |
a3d43c0d | 1811 | struct sched_gate_list *oper, *admin, *new_admin; |
5a781ccb VCG |
1812 | struct taprio_sched *q = qdisc_priv(sch); |
1813 | struct net_device *dev = qdisc_dev(sch); | |
1814 | struct tc_mqprio_qopt *mqprio = NULL; | |
a3d43c0d | 1815 | unsigned long flags; |
0efc7e54 | 1816 | u32 taprio_flags; |
5a781ccb | 1817 | ktime_t start; |
9c66d156 | 1818 | int i, err; |
5a781ccb | 1819 | |
8cb08174 JB |
1820 | err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt, |
1821 | taprio_policy, extack); | |
5a781ccb VCG |
1822 | if (err < 0) |
1823 | return err; | |
1824 | ||
5a781ccb VCG |
1825 | if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) |
1826 | mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); | |
1827 | ||
0efc7e54 AM |
1828 | /* The semantics of the 'flags' argument in relation to 'change()' |
1829 | * requests, are interpreted following two rules (which are applied in | |
1830 | * this order): (1) an omitted 'flags' argument is interpreted as | |
1831 | * zero; (2) the 'flags' of a "running" taprio instance cannot be | |
1832 | * changed. | |
1833 | */ | |
1834 | taprio_flags = tb[TCA_TAPRIO_ATTR_FLAGS] ? nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]) : 0; | |
4cfd5779 | 1835 | |
0efc7e54 AM |
1836 | /* txtime-assist and full offload are mutually exclusive */ |
1837 | if ((taprio_flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) && | |
1838 | (taprio_flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) { | |
1839 | NL_SET_ERR_MSG_ATTR(extack, tb[TCA_TAPRIO_ATTR_FLAGS], | |
1840 | "TXTIME_ASSIST and FULL_OFFLOAD are mutually exclusive"); | |
1841 | return -EINVAL; | |
1842 | } | |
1843 | ||
1844 | if (q->flags != TAPRIO_FLAGS_INVALID && q->flags != taprio_flags) { | |
1845 | NL_SET_ERR_MSG_MOD(extack, | |
1846 | "Changing 'flags' of a running schedule is not supported"); | |
1847 | return -EOPNOTSUPP; | |
1848 | } | |
1849 | q->flags = taprio_flags; | |
4cfd5779 | 1850 | |
a9d62274 | 1851 | err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags); |
5a781ccb VCG |
1852 | if (err < 0) |
1853 | return err; | |
1854 | ||
a54fc09e VO |
1855 | err = taprio_parse_tc_entries(sch, opt, extack); |
1856 | if (err) | |
1857 | return err; | |
1858 | ||
a3d43c0d VCG |
1859 | new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL); |
1860 | if (!new_admin) { | |
1861 | NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule"); | |
1862 | return -ENOMEM; | |
1863 | } | |
1864 | INIT_LIST_HEAD(&new_admin->entries); | |
5a781ccb | 1865 | |
18cdd2f0 VO |
1866 | oper = rtnl_dereference(q->oper_sched); |
1867 | admin = rtnl_dereference(q->admin_sched); | |
5a781ccb | 1868 | |
b5a0faa3 IK |
1869 | /* no changes - no new mqprio settings */ |
1870 | if (!taprio_mqprio_cmp(dev, mqprio)) | |
1871 | mqprio = NULL; | |
1872 | ||
a3d43c0d VCG |
1873 | if (mqprio && (oper || admin)) { |
1874 | NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); | |
1875 | err = -ENOTSUPP; | |
1876 | goto free_sched; | |
5a781ccb VCG |
1877 | } |
1878 | ||
5652e63d | 1879 | if (mqprio) { |
efe487fc HZ |
1880 | err = netdev_set_num_tc(dev, mqprio->num_tc); |
1881 | if (err) | |
1882 | goto free_sched; | |
2f530df7 | 1883 | for (i = 0; i < mqprio->num_tc; i++) { |
5652e63d VCG |
1884 | netdev_set_tc_queue(dev, i, |
1885 | mqprio->count[i], | |
1886 | mqprio->offset[i]); | |
2f530df7 VO |
1887 | q->cur_txq[i] = mqprio->offset[i]; |
1888 | } | |
5652e63d VCG |
1889 | |
1890 | /* Always use supplied priority mappings */ | |
1891 | for (i = 0; i <= TC_BITMASK; i++) | |
1892 | netdev_set_prio_tc_map(dev, i, | |
1893 | mqprio->prio_tc_map[i]); | |
1894 | } | |
1895 | ||
09dbdf28 VO |
1896 | err = parse_taprio_schedule(q, tb, new_admin, extack); |
1897 | if (err < 0) | |
1898 | goto free_sched; | |
1899 | ||
1900 | if (new_admin->num_entries == 0) { | |
1901 | NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule"); | |
1902 | err = -EINVAL; | |
1903 | goto free_sched; | |
1904 | } | |
1905 | ||
1906 | err = taprio_parse_clockid(sch, tb, extack); | |
1907 | if (err < 0) | |
1908 | goto free_sched; | |
1909 | ||
1910 | taprio_set_picos_per_byte(dev, q); | |
1911 | taprio_update_queue_max_sdu(q, new_admin, stab); | |
1912 | ||
a9d62274 | 1913 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) |
09e31cf0 | 1914 | err = taprio_enable_offload(dev, q, new_admin, extack); |
9c66d156 VCG |
1915 | else |
1916 | err = taprio_disable_offload(dev, q, extack); | |
1917 | if (err) | |
1918 | goto free_sched; | |
1919 | ||
a3d43c0d VCG |
1920 | /* Protects against enqueue()/dequeue() */ |
1921 | spin_lock_bh(qdisc_lock(sch)); | |
1922 | ||
4cfd5779 VP |
1923 | if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) { |
1924 | if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) { | |
1925 | NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled"); | |
1926 | err = -EINVAL; | |
1927 | goto unlock; | |
1928 | } | |
1929 | ||
a5b64700 | 1930 | q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); |
4cfd5779 VP |
1931 | } |
1932 | ||
a9d62274 VCG |
1933 | if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && |
1934 | !FULL_OFFLOAD_IS_ENABLED(q->flags) && | |
4cfd5779 | 1935 | !hrtimer_active(&q->advance_timer)) { |
a3d43c0d VCG |
1936 | hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); |
1937 | q->advance_timer.function = advance_sched; | |
5a781ccb VCG |
1938 | } |
1939 | ||
a3d43c0d | 1940 | err = taprio_get_start_time(sch, new_admin, &start); |
8599099f AG |
1941 | if (err < 0) { |
1942 | NL_SET_ERR_MSG(extack, "Internal error: failed get start time"); | |
a3d43c0d | 1943 | goto unlock; |
8599099f | 1944 | } |
5a781ccb | 1945 | |
bfabd41d | 1946 | setup_txtime(q, new_admin, start); |
5a781ccb | 1947 | |
bfabd41d | 1948 | if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { |
4cfd5779 VP |
1949 | if (!oper) { |
1950 | rcu_assign_pointer(q->oper_sched, new_admin); | |
1951 | err = 0; | |
1952 | new_admin = NULL; | |
1953 | goto unlock; | |
1954 | } | |
a3d43c0d | 1955 | |
4cfd5779 VP |
1956 | rcu_assign_pointer(q->admin_sched, new_admin); |
1957 | if (admin) | |
1958 | call_rcu(&admin->rcu, taprio_free_sched_cb); | |
1959 | } else { | |
e5517551 | 1960 | setup_first_end_time(q, new_admin, start); |
a3d43c0d | 1961 | |
4cfd5779 VP |
1962 | /* Protects against advance_sched() */ |
1963 | spin_lock_irqsave(&q->current_entry_lock, flags); | |
1964 | ||
1965 | taprio_start_sched(sch, start, new_admin); | |
a3d43c0d | 1966 | |
4cfd5779 VP |
1967 | rcu_assign_pointer(q->admin_sched, new_admin); |
1968 | if (admin) | |
1969 | call_rcu(&admin->rcu, taprio_free_sched_cb); | |
a3d43c0d | 1970 | |
4cfd5779 | 1971 | spin_unlock_irqrestore(&q->current_entry_lock, flags); |
0763b3e8 | 1972 | |
a9d62274 | 1973 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) |
0763b3e8 | 1974 | taprio_offload_config_changed(q); |
4cfd5779 VP |
1975 | } |
1976 | ||
1977 | new_admin = NULL; | |
a3d43c0d VCG |
1978 | err = 0; |
1979 | ||
a3d91b2c VO |
1980 | if (!stab) |
1981 | NL_SET_ERR_MSG_MOD(extack, | |
1982 | "Size table not specified, frame length estimations may be inaccurate"); | |
1983 | ||
a3d43c0d VCG |
1984 | unlock: |
1985 | spin_unlock_bh(qdisc_lock(sch)); | |
1986 | ||
1987 | free_sched: | |
51650d33 IK |
1988 | if (new_admin) |
1989 | call_rcu(&new_admin->rcu, taprio_free_sched_cb); | |
a3d43c0d VCG |
1990 | |
1991 | return err; | |
5a781ccb VCG |
1992 | } |
1993 | ||
44d4775c DC |
1994 | static void taprio_reset(struct Qdisc *sch) |
1995 | { | |
1996 | struct taprio_sched *q = qdisc_priv(sch); | |
1997 | struct net_device *dev = qdisc_dev(sch); | |
1998 | int i; | |
1999 | ||
2000 | hrtimer_cancel(&q->advance_timer); | |
3a415d59 | 2001 | |
44d4775c | 2002 | if (q->qdiscs) { |
698285da DC |
2003 | for (i = 0; i < dev->num_tx_queues; i++) |
2004 | if (q->qdiscs[i]) | |
2005 | qdisc_reset(q->qdiscs[i]); | |
44d4775c | 2006 | } |
44d4775c DC |
2007 | } |
2008 | ||
5a781ccb VCG |
2009 | static void taprio_destroy(struct Qdisc *sch) |
2010 | { | |
2011 | struct taprio_sched *q = qdisc_priv(sch); | |
2012 | struct net_device *dev = qdisc_dev(sch); | |
9af23657 | 2013 | struct sched_gate_list *oper, *admin; |
5a781ccb VCG |
2014 | unsigned int i; |
2015 | ||
7b9eba7b | 2016 | list_del(&q->taprio_list); |
7b9eba7b | 2017 | |
a56d447f ED |
2018 | /* Note that taprio_reset() might not be called if an error |
2019 | * happens in qdisc_create(), after taprio_init() has been called. | |
2020 | */ | |
2021 | hrtimer_cancel(&q->advance_timer); | |
3a415d59 | 2022 | qdisc_synchronize(sch); |
5a781ccb | 2023 | |
9c66d156 VCG |
2024 | taprio_disable_offload(dev, q, NULL); |
2025 | ||
5a781ccb | 2026 | if (q->qdiscs) { |
698285da | 2027 | for (i = 0; i < dev->num_tx_queues; i++) |
5a781ccb VCG |
2028 | qdisc_put(q->qdiscs[i]); |
2029 | ||
2030 | kfree(q->qdiscs); | |
2031 | } | |
2032 | q->qdiscs = NULL; | |
2033 | ||
7c16680a | 2034 | netdev_reset_tc(dev); |
5a781ccb | 2035 | |
9af23657 VO |
2036 | oper = rtnl_dereference(q->oper_sched); |
2037 | admin = rtnl_dereference(q->admin_sched); | |
2038 | ||
2039 | if (oper) | |
2040 | call_rcu(&oper->rcu, taprio_free_sched_cb); | |
a3d43c0d | 2041 | |
9af23657 VO |
2042 | if (admin) |
2043 | call_rcu(&admin->rcu, taprio_free_sched_cb); | |
2f530df7 VO |
2044 | |
2045 | taprio_cleanup_broken_mqprio(q); | |
5a781ccb VCG |
2046 | } |
2047 | ||
2048 | static int taprio_init(struct Qdisc *sch, struct nlattr *opt, | |
2049 | struct netlink_ext_ack *extack) | |
2050 | { | |
2051 | struct taprio_sched *q = qdisc_priv(sch); | |
2052 | struct net_device *dev = qdisc_dev(sch); | |
a721c3e5 | 2053 | int i, tc; |
5a781ccb | 2054 | |
5a781ccb VCG |
2055 | spin_lock_init(&q->current_entry_lock); |
2056 | ||
5a781ccb | 2057 | hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); |
a3d43c0d | 2058 | q->advance_timer.function = advance_sched; |
5a781ccb VCG |
2059 | |
2060 | q->root = sch; | |
2061 | ||
2062 | /* We only support static clockids. Use an invalid value as default | |
2063 | * and get the valid one on taprio_change(). | |
2064 | */ | |
2065 | q->clockid = -1; | |
a9d62274 | 2066 | q->flags = TAPRIO_FLAGS_INVALID; |
5a781ccb | 2067 | |
efb55222 | 2068 | list_add(&q->taprio_list, &taprio_list); |
efb55222 | 2069 | |
026de64d VO |
2070 | if (sch->parent != TC_H_ROOT) { |
2071 | NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc"); | |
5a781ccb | 2072 | return -EOPNOTSUPP; |
026de64d | 2073 | } |
5a781ccb | 2074 | |
026de64d VO |
2075 | if (!netif_is_multiqueue(dev)) { |
2076 | NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required"); | |
5a781ccb | 2077 | return -EOPNOTSUPP; |
026de64d | 2078 | } |
5a781ccb | 2079 | |
6e0ec800 | 2080 | q->qdiscs = kcalloc(dev->num_tx_queues, sizeof(q->qdiscs[0]), |
5a781ccb | 2081 | GFP_KERNEL); |
5a781ccb VCG |
2082 | if (!q->qdiscs) |
2083 | return -ENOMEM; | |
2084 | ||
2085 | if (!opt) | |
2086 | return -EINVAL; | |
2087 | ||
a3d43c0d VCG |
2088 | for (i = 0; i < dev->num_tx_queues; i++) { |
2089 | struct netdev_queue *dev_queue; | |
2090 | struct Qdisc *qdisc; | |
2091 | ||
2092 | dev_queue = netdev_get_tx_queue(dev, i); | |
2093 | qdisc = qdisc_create_dflt(dev_queue, | |
2094 | &pfifo_qdisc_ops, | |
2095 | TC_H_MAKE(TC_H_MAJ(sch->handle), | |
2096 | TC_H_MIN(i + 1)), | |
2097 | extack); | |
2098 | if (!qdisc) | |
2099 | return -ENOMEM; | |
2100 | ||
2101 | if (i < dev->real_num_tx_queues) | |
2102 | qdisc_hash_add(qdisc, false); | |
2103 | ||
2104 | q->qdiscs[i] = qdisc; | |
2105 | } | |
2106 | ||
a721c3e5 VO |
2107 | for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) |
2108 | q->fp[tc] = TC_FP_EXPRESS; | |
2109 | ||
2f530df7 VO |
2110 | taprio_detect_broken_mqprio(q); |
2111 | ||
5a781ccb VCG |
2112 | return taprio_change(sch, opt, extack); |
2113 | } | |
2114 | ||
13511704 YV |
2115 | static void taprio_attach(struct Qdisc *sch) |
2116 | { | |
2117 | struct taprio_sched *q = qdisc_priv(sch); | |
2118 | struct net_device *dev = qdisc_dev(sch); | |
2119 | unsigned int ntx; | |
2120 | ||
2121 | /* Attach underlying qdisc */ | |
2122 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { | |
09e0c3bb | 2123 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); |
25b0d4e4 | 2124 | struct Qdisc *old, *dev_queue_qdisc; |
13511704 YV |
2125 | |
2126 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { | |
09e0c3bb VO |
2127 | struct Qdisc *qdisc = q->qdiscs[ntx]; |
2128 | ||
25b0d4e4 VO |
2129 | /* In offload mode, the root taprio qdisc is bypassed |
2130 | * and the netdev TX queues see the children directly | |
2131 | */ | |
13511704 | 2132 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
25b0d4e4 | 2133 | dev_queue_qdisc = qdisc; |
13511704 | 2134 | } else { |
09e0c3bb VO |
2135 | /* In software mode, attach the root taprio qdisc |
2136 | * to all netdev TX queues, so that dev_qdisc_enqueue() | |
2137 | * goes through taprio_enqueue(). | |
2138 | */ | |
25b0d4e4 | 2139 | dev_queue_qdisc = sch; |
13511704 | 2140 | } |
25b0d4e4 VO |
2141 | old = dev_graft_qdisc(dev_queue, dev_queue_qdisc); |
2142 | /* The qdisc's refcount requires to be elevated once | |
2143 | * for each netdev TX queue it is grafted onto | |
2144 | */ | |
2145 | qdisc_refcount_inc(dev_queue_qdisc); | |
13511704 YV |
2146 | if (old) |
2147 | qdisc_put(old); | |
2148 | } | |
13511704 YV |
2149 | } |
2150 | ||
5a781ccb VCG |
2151 | static struct netdev_queue *taprio_queue_get(struct Qdisc *sch, |
2152 | unsigned long cl) | |
2153 | { | |
2154 | struct net_device *dev = qdisc_dev(sch); | |
2155 | unsigned long ntx = cl - 1; | |
2156 | ||
2157 | if (ntx >= dev->num_tx_queues) | |
2158 | return NULL; | |
2159 | ||
2160 | return netdev_get_tx_queue(dev, ntx); | |
2161 | } | |
2162 | ||
2163 | static int taprio_graft(struct Qdisc *sch, unsigned long cl, | |
2164 | struct Qdisc *new, struct Qdisc **old, | |
2165 | struct netlink_ext_ack *extack) | |
2166 | { | |
2167 | struct taprio_sched *q = qdisc_priv(sch); | |
2168 | struct net_device *dev = qdisc_dev(sch); | |
2169 | struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); | |
2170 | ||
2171 | if (!dev_queue) | |
2172 | return -EINVAL; | |
2173 | ||
2174 | if (dev->flags & IFF_UP) | |
2175 | dev_deactivate(dev); | |
2176 | ||
25b0d4e4 VO |
2177 | /* In offload mode, the child Qdisc is directly attached to the netdev |
2178 | * TX queue, and thus, we need to keep its refcount elevated in order | |
2179 | * to counteract qdisc_graft()'s call to qdisc_put() once per TX queue. | |
2180 | * However, save the reference to the new qdisc in the private array in | |
2181 | * both software and offload cases, to have an up-to-date reference to | |
2182 | * our children. | |
2183 | */ | |
2184 | *old = q->qdiscs[cl - 1]; | |
13511704 | 2185 | if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { |
25b0d4e4 VO |
2186 | WARN_ON_ONCE(dev_graft_qdisc(dev_queue, new) != *old); |
2187 | if (new) | |
2188 | qdisc_refcount_inc(new); | |
2189 | if (*old) | |
2190 | qdisc_put(*old); | |
13511704 | 2191 | } |
5a781ccb | 2192 | |
25b0d4e4 | 2193 | q->qdiscs[cl - 1] = new; |
5a781ccb VCG |
2194 | if (new) |
2195 | new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; | |
2196 | ||
2197 | if (dev->flags & IFF_UP) | |
2198 | dev_activate(dev); | |
2199 | ||
2200 | return 0; | |
2201 | } | |
2202 | ||
2203 | static int dump_entry(struct sk_buff *msg, | |
2204 | const struct sched_entry *entry) | |
2205 | { | |
2206 | struct nlattr *item; | |
2207 | ||
ae0be8de | 2208 | item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY); |
5a781ccb VCG |
2209 | if (!item) |
2210 | return -ENOSPC; | |
2211 | ||
2212 | if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index)) | |
2213 | goto nla_put_failure; | |
2214 | ||
2215 | if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command)) | |
2216 | goto nla_put_failure; | |
2217 | ||
2218 | if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, | |
2219 | entry->gate_mask)) | |
2220 | goto nla_put_failure; | |
2221 | ||
2222 | if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL, | |
2223 | entry->interval)) | |
2224 | goto nla_put_failure; | |
2225 | ||
2226 | return nla_nest_end(msg, item); | |
2227 | ||
2228 | nla_put_failure: | |
2229 | nla_nest_cancel(msg, item); | |
2230 | return -1; | |
2231 | } | |
2232 | ||
a3d43c0d VCG |
2233 | static int dump_schedule(struct sk_buff *msg, |
2234 | const struct sched_gate_list *root) | |
2235 | { | |
2236 | struct nlattr *entry_list; | |
2237 | struct sched_entry *entry; | |
2238 | ||
2239 | if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME, | |
2240 | root->base_time, TCA_TAPRIO_PAD)) | |
2241 | return -1; | |
2242 | ||
6ca6a665 VCG |
2243 | if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, |
2244 | root->cycle_time, TCA_TAPRIO_PAD)) | |
2245 | return -1; | |
2246 | ||
c25031e9 VCG |
2247 | if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, |
2248 | root->cycle_time_extension, TCA_TAPRIO_PAD)) | |
2249 | return -1; | |
2250 | ||
a3d43c0d VCG |
2251 | entry_list = nla_nest_start_noflag(msg, |
2252 | TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST); | |
2253 | if (!entry_list) | |
2254 | goto error_nest; | |
2255 | ||
2256 | list_for_each_entry(entry, &root->entries, list) { | |
2257 | if (dump_entry(msg, entry) < 0) | |
2258 | goto error_nest; | |
2259 | } | |
2260 | ||
2261 | nla_nest_end(msg, entry_list); | |
2262 | return 0; | |
2263 | ||
2264 | error_nest: | |
2265 | nla_nest_cancel(msg, entry_list); | |
2266 | return -1; | |
2267 | } | |
2268 | ||
fed87cc6 | 2269 | static int taprio_dump_tc_entries(struct sk_buff *skb, |
a721c3e5 | 2270 | struct taprio_sched *q, |
fed87cc6 | 2271 | struct sched_gate_list *sched) |
a54fc09e VO |
2272 | { |
2273 | struct nlattr *n; | |
2274 | int tc; | |
2275 | ||
2276 | for (tc = 0; tc < TC_MAX_QUEUE; tc++) { | |
2277 | n = nla_nest_start(skb, TCA_TAPRIO_ATTR_TC_ENTRY); | |
2278 | if (!n) | |
2279 | return -EMSGSIZE; | |
2280 | ||
2281 | if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_INDEX, tc)) | |
2282 | goto nla_put_failure; | |
2283 | ||
2284 | if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU, | |
fed87cc6 | 2285 | sched->max_sdu[tc])) |
a54fc09e VO |
2286 | goto nla_put_failure; |
2287 | ||
a721c3e5 VO |
2288 | if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_FP, q->fp[tc])) |
2289 | goto nla_put_failure; | |
2290 | ||
a54fc09e VO |
2291 | nla_nest_end(skb, n); |
2292 | } | |
2293 | ||
2294 | return 0; | |
2295 | ||
2296 | nla_put_failure: | |
2297 | nla_nest_cancel(skb, n); | |
2298 | return -EMSGSIZE; | |
2299 | } | |
2300 | ||
6c1adb65 VO |
2301 | static int taprio_put_stat(struct sk_buff *skb, u64 val, u16 attrtype) |
2302 | { | |
2303 | if (val == TAPRIO_STAT_NOT_SET) | |
2304 | return 0; | |
2305 | if (nla_put_u64_64bit(skb, attrtype, val, TCA_TAPRIO_OFFLOAD_STATS_PAD)) | |
2306 | return -EMSGSIZE; | |
2307 | return 0; | |
2308 | } | |
2309 | ||
2310 | static int taprio_dump_xstats(struct Qdisc *sch, struct gnet_dump *d, | |
2311 | struct tc_taprio_qopt_offload *offload, | |
2312 | struct tc_taprio_qopt_stats *stats) | |
2313 | { | |
2314 | struct net_device *dev = qdisc_dev(sch); | |
2315 | const struct net_device_ops *ops; | |
2316 | struct sk_buff *skb = d->skb; | |
2317 | struct nlattr *xstats; | |
2318 | int err; | |
2319 | ||
2320 | ops = qdisc_dev(sch)->netdev_ops; | |
2321 | ||
2322 | /* FIXME I could use qdisc_offload_dump_helper(), but that messes | |
2323 | * with sch->flags depending on whether the device reports taprio | |
2324 | * stats, and I'm not sure whether that's a good idea, considering | |
2325 | * that stats are optional to the offload itself | |
2326 | */ | |
2327 | if (!ops->ndo_setup_tc) | |
2328 | return 0; | |
2329 | ||
2330 | memset(stats, 0xff, sizeof(*stats)); | |
2331 | ||
2332 | err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); | |
2333 | if (err == -EOPNOTSUPP) | |
2334 | return 0; | |
2335 | if (err) | |
2336 | return err; | |
2337 | ||
2338 | xstats = nla_nest_start(skb, TCA_STATS_APP); | |
2339 | if (!xstats) | |
2340 | goto err; | |
2341 | ||
2342 | if (taprio_put_stat(skb, stats->window_drops, | |
2343 | TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS) || | |
2344 | taprio_put_stat(skb, stats->tx_overruns, | |
2345 | TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS)) | |
2346 | goto err_cancel; | |
2347 | ||
2348 | nla_nest_end(skb, xstats); | |
2349 | ||
2350 | return 0; | |
2351 | ||
2352 | err_cancel: | |
2353 | nla_nest_cancel(skb, xstats); | |
2354 | err: | |
2355 | return -EMSGSIZE; | |
2356 | } | |
2357 | ||
2358 | static int taprio_dump_stats(struct Qdisc *sch, struct gnet_dump *d) | |
2359 | { | |
2360 | struct tc_taprio_qopt_offload offload = { | |
2361 | .cmd = TAPRIO_CMD_STATS, | |
2362 | }; | |
2363 | ||
2364 | return taprio_dump_xstats(sch, d, &offload, &offload.stats); | |
2365 | } | |
2366 | ||
5a781ccb VCG |
2367 | static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) |
2368 | { | |
2369 | struct taprio_sched *q = qdisc_priv(sch); | |
2370 | struct net_device *dev = qdisc_dev(sch); | |
a3d43c0d | 2371 | struct sched_gate_list *oper, *admin; |
5a781ccb | 2372 | struct tc_mqprio_qopt opt = { 0 }; |
a3d43c0d | 2373 | struct nlattr *nest, *sched_nest; |
5a781ccb | 2374 | |
18cdd2f0 VO |
2375 | oper = rtnl_dereference(q->oper_sched); |
2376 | admin = rtnl_dereference(q->admin_sched); | |
a3d43c0d | 2377 | |
9dd6ad67 | 2378 | mqprio_qopt_reconstruct(dev, &opt); |
5a781ccb | 2379 | |
ae0be8de | 2380 | nest = nla_nest_start_noflag(skb, TCA_OPTIONS); |
5a781ccb | 2381 | if (!nest) |
a3d43c0d | 2382 | goto start_error; |
5a781ccb VCG |
2383 | |
2384 | if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt)) | |
2385 | goto options_error; | |
2386 | ||
9c66d156 VCG |
2387 | if (!FULL_OFFLOAD_IS_ENABLED(q->flags) && |
2388 | nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid)) | |
5a781ccb VCG |
2389 | goto options_error; |
2390 | ||
4cfd5779 VP |
2391 | if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags)) |
2392 | goto options_error; | |
2393 | ||
2394 | if (q->txtime_delay && | |
a5b64700 | 2395 | nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) |
4cfd5779 VP |
2396 | goto options_error; |
2397 | ||
a721c3e5 | 2398 | if (oper && taprio_dump_tc_entries(skb, q, oper)) |
a54fc09e VO |
2399 | goto options_error; |
2400 | ||
a3d43c0d | 2401 | if (oper && dump_schedule(skb, oper)) |
5a781ccb VCG |
2402 | goto options_error; |
2403 | ||
a3d43c0d VCG |
2404 | if (!admin) |
2405 | goto done; | |
2406 | ||
2407 | sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED); | |
e4acf427 CIK |
2408 | if (!sched_nest) |
2409 | goto options_error; | |
5a781ccb | 2410 | |
a3d43c0d VCG |
2411 | if (dump_schedule(skb, admin)) |
2412 | goto admin_error; | |
2413 | ||
2414 | nla_nest_end(skb, sched_nest); | |
2415 | ||
2416 | done: | |
5a781ccb VCG |
2417 | return nla_nest_end(skb, nest); |
2418 | ||
a3d43c0d VCG |
2419 | admin_error: |
2420 | nla_nest_cancel(skb, sched_nest); | |
2421 | ||
5a781ccb VCG |
2422 | options_error: |
2423 | nla_nest_cancel(skb, nest); | |
a3d43c0d VCG |
2424 | |
2425 | start_error: | |
a3d43c0d | 2426 | return -ENOSPC; |
5a781ccb VCG |
2427 | } |
2428 | ||
2429 | static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) | |
2430 | { | |
98766add VO |
2431 | struct taprio_sched *q = qdisc_priv(sch); |
2432 | struct net_device *dev = qdisc_dev(sch); | |
2433 | unsigned int ntx = cl - 1; | |
5a781ccb | 2434 | |
98766add | 2435 | if (ntx >= dev->num_tx_queues) |
5a781ccb VCG |
2436 | return NULL; |
2437 | ||
98766add | 2438 | return q->qdiscs[ntx]; |
5a781ccb VCG |
2439 | } |
2440 | ||
2441 | static unsigned long taprio_find(struct Qdisc *sch, u32 classid) | |
2442 | { | |
2443 | unsigned int ntx = TC_H_MIN(classid); | |
2444 | ||
2445 | if (!taprio_queue_get(sch, ntx)) | |
2446 | return 0; | |
2447 | return ntx; | |
2448 | } | |
2449 | ||
2450 | static int taprio_dump_class(struct Qdisc *sch, unsigned long cl, | |
2451 | struct sk_buff *skb, struct tcmsg *tcm) | |
2452 | { | |
665338b2 | 2453 | struct Qdisc *child = taprio_leaf(sch, cl); |
5a781ccb VCG |
2454 | |
2455 | tcm->tcm_parent = TC_H_ROOT; | |
2456 | tcm->tcm_handle |= TC_H_MIN(cl); | |
665338b2 | 2457 | tcm->tcm_info = child->handle; |
5a781ccb VCG |
2458 | |
2459 | return 0; | |
2460 | } | |
2461 | ||
2462 | static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |
2463 | struct gnet_dump *d) | |
2464 | __releases(d->lock) | |
2465 | __acquires(d->lock) | |
2466 | { | |
665338b2 | 2467 | struct Qdisc *child = taprio_leaf(sch, cl); |
6c1adb65 | 2468 | struct tc_taprio_qopt_offload offload = { |
2b84960f VO |
2469 | .cmd = TAPRIO_CMD_QUEUE_STATS, |
2470 | .queue_stats = { | |
2471 | .queue = cl - 1, | |
6c1adb65 VO |
2472 | }, |
2473 | }; | |
5a781ccb | 2474 | |
dced11ef VO |
2475 | if (gnet_stats_copy_basic(d, NULL, &child->bstats, true) < 0 || |
2476 | qdisc_qstats_copy(d, child) < 0) | |
5a781ccb | 2477 | return -1; |
6c1adb65 | 2478 | |
2b84960f | 2479 | return taprio_dump_xstats(sch, d, &offload, &offload.queue_stats.stats); |
5a781ccb VCG |
2480 | } |
2481 | ||
2482 | static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
2483 | { | |
2484 | struct net_device *dev = qdisc_dev(sch); | |
2485 | unsigned long ntx; | |
2486 | ||
2487 | if (arg->stop) | |
2488 | return; | |
2489 | ||
2490 | arg->count = arg->skip; | |
2491 | for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { | |
e046fa89 | 2492 | if (!tc_qdisc_stats_dump(sch, ntx + 1, arg)) |
5a781ccb | 2493 | break; |
5a781ccb VCG |
2494 | } |
2495 | } | |
2496 | ||
2497 | static struct netdev_queue *taprio_select_queue(struct Qdisc *sch, | |
2498 | struct tcmsg *tcm) | |
2499 | { | |
2500 | return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent)); | |
2501 | } | |
2502 | ||
2503 | static const struct Qdisc_class_ops taprio_class_ops = { | |
2504 | .graft = taprio_graft, | |
2505 | .leaf = taprio_leaf, | |
2506 | .find = taprio_find, | |
2507 | .walk = taprio_walk, | |
2508 | .dump = taprio_dump_class, | |
2509 | .dump_stats = taprio_dump_class_stats, | |
2510 | .select_queue = taprio_select_queue, | |
2511 | }; | |
2512 | ||
2513 | static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { | |
2514 | .cl_ops = &taprio_class_ops, | |
2515 | .id = "taprio", | |
2516 | .priv_size = sizeof(struct taprio_sched), | |
2517 | .init = taprio_init, | |
a3d43c0d | 2518 | .change = taprio_change, |
5a781ccb | 2519 | .destroy = taprio_destroy, |
44d4775c | 2520 | .reset = taprio_reset, |
13511704 | 2521 | .attach = taprio_attach, |
5a781ccb VCG |
2522 | .peek = taprio_peek, |
2523 | .dequeue = taprio_dequeue, | |
2524 | .enqueue = taprio_enqueue, | |
2525 | .dump = taprio_dump, | |
6c1adb65 | 2526 | .dump_stats = taprio_dump_stats, |
5a781ccb VCG |
2527 | .owner = THIS_MODULE, |
2528 | }; | |
241a94ab | 2529 | MODULE_ALIAS_NET_SCH("taprio"); |
5a781ccb | 2530 | |
7b9eba7b LD |
2531 | static struct notifier_block taprio_device_notifier = { |
2532 | .notifier_call = taprio_dev_notifier, | |
2533 | }; | |
2534 | ||
5a781ccb VCG |
2535 | static int __init taprio_module_init(void) |
2536 | { | |
7b9eba7b LD |
2537 | int err = register_netdevice_notifier(&taprio_device_notifier); |
2538 | ||
2539 | if (err) | |
2540 | return err; | |
2541 | ||
5a781ccb VCG |
2542 | return register_qdisc(&taprio_qdisc_ops); |
2543 | } | |
2544 | ||
2545 | static void __exit taprio_module_exit(void) | |
2546 | { | |
2547 | unregister_qdisc(&taprio_qdisc_ops); | |
7b9eba7b | 2548 | unregister_netdevice_notifier(&taprio_device_notifier); |
5a781ccb VCG |
2549 | } |
2550 | ||
2551 | module_init(taprio_module_init); | |
2552 | module_exit(taprio_module_exit); | |
2553 | MODULE_LICENSE("GPL"); | |
f96118c5 | 2554 | MODULE_DESCRIPTION("Time Aware Priority qdisc"); |