1 // SPDX-License-Identifier: GPL-2.0-only
3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
5 * Copyright (c) 2010, ST-Ericsson
6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
9 #include <net/mac80211.h>
10 #include <linux/sched.h>
15 /* private */ struct cw1200_queue_item
17 struct list_head head
;
20 unsigned long queue_timestamp
;
21 unsigned long xmit_timestamp
;
22 struct cw1200_txpriv txpriv
;
26 static inline void __cw1200_queue_lock(struct cw1200_queue
*queue
)
28 struct cw1200_queue_stats
*stats
= queue
->stats
;
29 if (queue
->tx_locked_cnt
++ == 0) {
30 pr_debug("[TX] Queue %d is locked.\n",
32 ieee80211_stop_queue(stats
->priv
->hw
, queue
->queue_id
);
36 static inline void __cw1200_queue_unlock(struct cw1200_queue
*queue
)
38 struct cw1200_queue_stats
*stats
= queue
->stats
;
39 BUG_ON(!queue
->tx_locked_cnt
);
40 if (--queue
->tx_locked_cnt
== 0) {
41 pr_debug("[TX] Queue %d is unlocked.\n",
43 ieee80211_wake_queue(stats
->priv
->hw
, queue
->queue_id
);
47 static inline void cw1200_queue_parse_id(u32 packet_id
, u8
*queue_generation
,
48 u8
*queue_id
, u8
*item_generation
,
51 *item_id
= (packet_id
>> 0) & 0xFF;
52 *item_generation
= (packet_id
>> 8) & 0xFF;
53 *queue_id
= (packet_id
>> 16) & 0xFF;
54 *queue_generation
= (packet_id
>> 24) & 0xFF;
57 static inline u32
cw1200_queue_mk_packet_id(u8 queue_generation
, u8 queue_id
,
58 u8 item_generation
, u8 item_id
)
60 return ((u32
)item_id
<< 0) |
61 ((u32
)item_generation
<< 8) |
62 ((u32
)queue_id
<< 16) |
63 ((u32
)queue_generation
<< 24);
66 static void cw1200_queue_post_gc(struct cw1200_queue_stats
*stats
,
67 struct list_head
*gc_list
)
69 struct cw1200_queue_item
*item
, *tmp
;
71 list_for_each_entry_safe(item
, tmp
, gc_list
, head
) {
72 list_del(&item
->head
);
73 stats
->skb_dtor(stats
->priv
, item
->skb
, &item
->txpriv
);
78 static void cw1200_queue_register_post_gc(struct list_head
*gc_list
,
79 struct cw1200_queue_item
*item
)
81 struct cw1200_queue_item
*gc_item
;
82 gc_item
= kmalloc(sizeof(struct cw1200_queue_item
),
85 memcpy(gc_item
, item
, sizeof(struct cw1200_queue_item
));
86 list_add_tail(&gc_item
->head
, gc_list
);
89 static void __cw1200_queue_gc(struct cw1200_queue
*queue
,
90 struct list_head
*head
,
93 struct cw1200_queue_stats
*stats
= queue
->stats
;
94 struct cw1200_queue_item
*item
= NULL
, *tmp
;
95 bool wakeup_stats
= false;
97 list_for_each_entry_safe(item
, tmp
, &queue
->queue
, head
) {
98 if (jiffies
- item
->queue_timestamp
< queue
->ttl
)
101 --queue
->link_map_cache
[item
->txpriv
.link_id
];
102 spin_lock_bh(&stats
->lock
);
104 if (!--stats
->link_map_cache
[item
->txpriv
.link_id
])
106 spin_unlock_bh(&stats
->lock
);
107 cw1200_debug_tx_ttl(stats
->priv
);
108 cw1200_queue_register_post_gc(head
, item
);
110 list_move_tail(&item
->head
, &queue
->free_pool
);
114 wake_up(&stats
->wait_link_id_empty
);
116 if (queue
->overfull
) {
117 if (queue
->num_queued
<= (queue
->capacity
>> 1)) {
118 queue
->overfull
= false;
120 __cw1200_queue_unlock(queue
);
122 unsigned long tmo
= item
->queue_timestamp
+ queue
->ttl
;
123 mod_timer(&queue
->gc
, tmo
);
124 cw1200_pm_stay_awake(&stats
->priv
->pm_state
,
130 static void cw1200_queue_gc(struct timer_list
*t
)
133 struct cw1200_queue
*queue
=
134 from_timer(queue
, t
, gc
);
136 spin_lock_bh(&queue
->lock
);
137 __cw1200_queue_gc(queue
, &list
, true);
138 spin_unlock_bh(&queue
->lock
);
139 cw1200_queue_post_gc(queue
->stats
, &list
);
142 int cw1200_queue_stats_init(struct cw1200_queue_stats
*stats
,
144 cw1200_queue_skb_dtor_t skb_dtor
,
145 struct cw1200_common
*priv
)
147 memset(stats
, 0, sizeof(*stats
));
148 stats
->map_capacity
= map_capacity
;
149 stats
->skb_dtor
= skb_dtor
;
151 spin_lock_init(&stats
->lock
);
152 init_waitqueue_head(&stats
->wait_link_id_empty
);
154 stats
->link_map_cache
= kcalloc(map_capacity
, sizeof(int),
156 if (!stats
->link_map_cache
)
162 int cw1200_queue_init(struct cw1200_queue
*queue
,
163 struct cw1200_queue_stats
*stats
,
170 memset(queue
, 0, sizeof(*queue
));
171 queue
->stats
= stats
;
172 queue
->capacity
= capacity
;
173 queue
->queue_id
= queue_id
;
175 INIT_LIST_HEAD(&queue
->queue
);
176 INIT_LIST_HEAD(&queue
->pending
);
177 INIT_LIST_HEAD(&queue
->free_pool
);
178 spin_lock_init(&queue
->lock
);
179 timer_setup(&queue
->gc
, cw1200_queue_gc
, 0);
181 queue
->pool
= kcalloc(capacity
, sizeof(struct cw1200_queue_item
),
186 queue
->link_map_cache
= kcalloc(stats
->map_capacity
, sizeof(int),
188 if (!queue
->link_map_cache
) {
194 for (i
= 0; i
< capacity
; ++i
)
195 list_add_tail(&queue
->pool
[i
].head
, &queue
->free_pool
);
200 int cw1200_queue_clear(struct cw1200_queue
*queue
)
204 struct cw1200_queue_stats
*stats
= queue
->stats
;
205 struct cw1200_queue_item
*item
, *tmp
;
207 spin_lock_bh(&queue
->lock
);
209 list_splice_tail_init(&queue
->queue
, &queue
->pending
);
210 list_for_each_entry_safe(item
, tmp
, &queue
->pending
, head
) {
212 cw1200_queue_register_post_gc(&gc_list
, item
);
214 list_move_tail(&item
->head
, &queue
->free_pool
);
216 queue
->num_queued
= 0;
217 queue
->num_pending
= 0;
219 spin_lock_bh(&stats
->lock
);
220 for (i
= 0; i
< stats
->map_capacity
; ++i
) {
221 stats
->num_queued
-= queue
->link_map_cache
[i
];
222 stats
->link_map_cache
[i
] -= queue
->link_map_cache
[i
];
223 queue
->link_map_cache
[i
] = 0;
225 spin_unlock_bh(&stats
->lock
);
226 if (queue
->overfull
) {
227 queue
->overfull
= false;
228 __cw1200_queue_unlock(queue
);
230 spin_unlock_bh(&queue
->lock
);
231 wake_up(&stats
->wait_link_id_empty
);
232 cw1200_queue_post_gc(stats
, &gc_list
);
236 void cw1200_queue_stats_deinit(struct cw1200_queue_stats
*stats
)
238 kfree(stats
->link_map_cache
);
239 stats
->link_map_cache
= NULL
;
242 void cw1200_queue_deinit(struct cw1200_queue
*queue
)
244 cw1200_queue_clear(queue
);
245 del_timer_sync(&queue
->gc
);
246 INIT_LIST_HEAD(&queue
->free_pool
);
248 kfree(queue
->link_map_cache
);
250 queue
->link_map_cache
= NULL
;
254 size_t cw1200_queue_get_num_queued(struct cw1200_queue
*queue
,
259 size_t map_capacity
= queue
->stats
->map_capacity
;
264 spin_lock_bh(&queue
->lock
);
265 if (link_id_map
== (u32
)-1) {
266 ret
= queue
->num_queued
- queue
->num_pending
;
269 for (i
= 0, bit
= 1; i
< map_capacity
; ++i
, bit
<<= 1) {
270 if (link_id_map
& bit
)
271 ret
+= queue
->link_map_cache
[i
];
274 spin_unlock_bh(&queue
->lock
);
278 int cw1200_queue_put(struct cw1200_queue
*queue
,
280 struct cw1200_txpriv
*txpriv
)
283 struct cw1200_queue_stats
*stats
= queue
->stats
;
285 if (txpriv
->link_id
>= queue
->stats
->map_capacity
)
288 spin_lock_bh(&queue
->lock
);
289 if (!WARN_ON(list_empty(&queue
->free_pool
))) {
290 struct cw1200_queue_item
*item
= list_first_entry(
291 &queue
->free_pool
, struct cw1200_queue_item
, head
);
294 list_move_tail(&item
->head
, &queue
->queue
);
296 item
->txpriv
= *txpriv
;
297 item
->generation
= 0;
298 item
->packet_id
= cw1200_queue_mk_packet_id(queue
->generation
,
302 item
->queue_timestamp
= jiffies
;
305 ++queue
->link_map_cache
[txpriv
->link_id
];
307 spin_lock_bh(&stats
->lock
);
309 ++stats
->link_map_cache
[txpriv
->link_id
];
310 spin_unlock_bh(&stats
->lock
);
312 /* TX may happen in parallel sometimes.
313 * Leave extra queue slots so we don't overflow.
315 if (queue
->overfull
== false &&
317 (queue
->capacity
- (num_present_cpus() - 1))) {
318 queue
->overfull
= true;
319 __cw1200_queue_lock(queue
);
320 mod_timer(&queue
->gc
, jiffies
);
325 spin_unlock_bh(&queue
->lock
);
329 int cw1200_queue_get(struct cw1200_queue
*queue
,
332 struct ieee80211_tx_info
**tx_info
,
333 const struct cw1200_txpriv
**txpriv
)
336 struct cw1200_queue_item
*item
;
337 struct cw1200_queue_stats
*stats
= queue
->stats
;
338 bool wakeup_stats
= false;
340 spin_lock_bh(&queue
->lock
);
341 list_for_each_entry(item
, &queue
->queue
, head
) {
342 if (link_id_map
& BIT(item
->txpriv
.link_id
)) {
349 *tx
= (struct wsm_tx
*)item
->skb
->data
;
350 *tx_info
= IEEE80211_SKB_CB(item
->skb
);
351 *txpriv
= &item
->txpriv
;
352 (*tx
)->packet_id
= item
->packet_id
;
353 list_move_tail(&item
->head
, &queue
->pending
);
354 ++queue
->num_pending
;
355 --queue
->link_map_cache
[item
->txpriv
.link_id
];
356 item
->xmit_timestamp
= jiffies
;
358 spin_lock_bh(&stats
->lock
);
360 if (!--stats
->link_map_cache
[item
->txpriv
.link_id
])
362 spin_unlock_bh(&stats
->lock
);
364 spin_unlock_bh(&queue
->lock
);
366 wake_up(&stats
->wait_link_id_empty
);
370 int cw1200_queue_requeue(struct cw1200_queue
*queue
, u32 packet_id
)
373 u8 queue_generation
, queue_id
, item_generation
, item_id
;
374 struct cw1200_queue_item
*item
;
375 struct cw1200_queue_stats
*stats
= queue
->stats
;
377 cw1200_queue_parse_id(packet_id
, &queue_generation
, &queue_id
,
378 &item_generation
, &item_id
);
380 item
= &queue
->pool
[item_id
];
382 spin_lock_bh(&queue
->lock
);
383 BUG_ON(queue_id
!= queue
->queue_id
);
384 if (queue_generation
!= queue
->generation
) {
386 } else if (item_id
>= (unsigned) queue
->capacity
) {
389 } else if (item
->generation
!= item_generation
) {
393 --queue
->num_pending
;
394 ++queue
->link_map_cache
[item
->txpriv
.link_id
];
396 spin_lock_bh(&stats
->lock
);
398 ++stats
->link_map_cache
[item
->txpriv
.link_id
];
399 spin_unlock_bh(&stats
->lock
);
401 item
->generation
= ++item_generation
;
402 item
->packet_id
= cw1200_queue_mk_packet_id(queue_generation
,
406 list_move(&item
->head
, &queue
->queue
);
408 spin_unlock_bh(&queue
->lock
);
412 int cw1200_queue_requeue_all(struct cw1200_queue
*queue
)
414 struct cw1200_queue_item
*item
, *tmp
;
415 struct cw1200_queue_stats
*stats
= queue
->stats
;
416 spin_lock_bh(&queue
->lock
);
418 list_for_each_entry_safe_reverse(item
, tmp
, &queue
->pending
, head
) {
419 --queue
->num_pending
;
420 ++queue
->link_map_cache
[item
->txpriv
.link_id
];
422 spin_lock_bh(&stats
->lock
);
424 ++stats
->link_map_cache
[item
->txpriv
.link_id
];
425 spin_unlock_bh(&stats
->lock
);
428 item
->packet_id
= cw1200_queue_mk_packet_id(queue
->generation
,
432 list_move(&item
->head
, &queue
->queue
);
434 spin_unlock_bh(&queue
->lock
);
439 int cw1200_queue_remove(struct cw1200_queue
*queue
, u32 packet_id
)
442 u8 queue_generation
, queue_id
, item_generation
, item_id
;
443 struct cw1200_queue_item
*item
;
444 struct cw1200_queue_stats
*stats
= queue
->stats
;
445 struct sk_buff
*gc_skb
= NULL
;
446 struct cw1200_txpriv gc_txpriv
;
448 cw1200_queue_parse_id(packet_id
, &queue_generation
, &queue_id
,
449 &item_generation
, &item_id
);
451 item
= &queue
->pool
[item_id
];
453 spin_lock_bh(&queue
->lock
);
454 BUG_ON(queue_id
!= queue
->queue_id
);
455 if (queue_generation
!= queue
->generation
) {
457 } else if (item_id
>= (unsigned) queue
->capacity
) {
460 } else if (item
->generation
!= item_generation
) {
464 gc_txpriv
= item
->txpriv
;
467 --queue
->num_pending
;
471 /* Do not use list_move_tail here, but list_move:
472 * try to utilize cache row.
474 list_move(&item
->head
, &queue
->free_pool
);
476 if (queue
->overfull
&&
477 (queue
->num_queued
<= (queue
->capacity
>> 1))) {
478 queue
->overfull
= false;
479 __cw1200_queue_unlock(queue
);
482 spin_unlock_bh(&queue
->lock
);
485 stats
->skb_dtor(stats
->priv
, gc_skb
, &gc_txpriv
);
490 int cw1200_queue_get_skb(struct cw1200_queue
*queue
, u32 packet_id
,
491 struct sk_buff
**skb
,
492 const struct cw1200_txpriv
**txpriv
)
495 u8 queue_generation
, queue_id
, item_generation
, item_id
;
496 struct cw1200_queue_item
*item
;
497 cw1200_queue_parse_id(packet_id
, &queue_generation
, &queue_id
,
498 &item_generation
, &item_id
);
500 item
= &queue
->pool
[item_id
];
502 spin_lock_bh(&queue
->lock
);
503 BUG_ON(queue_id
!= queue
->queue_id
);
504 if (queue_generation
!= queue
->generation
) {
506 } else if (item_id
>= (unsigned) queue
->capacity
) {
509 } else if (item
->generation
!= item_generation
) {
514 *txpriv
= &item
->txpriv
;
516 spin_unlock_bh(&queue
->lock
);
520 void cw1200_queue_lock(struct cw1200_queue
*queue
)
522 spin_lock_bh(&queue
->lock
);
523 __cw1200_queue_lock(queue
);
524 spin_unlock_bh(&queue
->lock
);
527 void cw1200_queue_unlock(struct cw1200_queue
*queue
)
529 spin_lock_bh(&queue
->lock
);
530 __cw1200_queue_unlock(queue
);
531 spin_unlock_bh(&queue
->lock
);
534 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue
*queue
,
535 unsigned long *timestamp
,
536 u32 pending_frame_id
)
538 struct cw1200_queue_item
*item
;
541 spin_lock_bh(&queue
->lock
);
542 ret
= !list_empty(&queue
->pending
);
544 list_for_each_entry(item
, &queue
->pending
, head
) {
545 if (item
->packet_id
!= pending_frame_id
)
546 if (time_before(item
->xmit_timestamp
,
548 *timestamp
= item
->xmit_timestamp
;
551 spin_unlock_bh(&queue
->lock
);
555 bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats
*stats
,
560 spin_lock_bh(&stats
->lock
);
561 if (link_id_map
== (u32
)-1) {
562 empty
= stats
->num_queued
== 0;
565 for (i
= 0; i
< stats
->map_capacity
; ++i
) {
566 if (link_id_map
& BIT(i
)) {
567 if (stats
->link_map_cache
[i
]) {
574 spin_unlock_bh(&stats
->lock
);