]>
Commit | Line | Data |
---|---|---|
17f1de56 FF |
1 | /* |
2 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
17 | #include "mt76.h" | |
18 | ||
19 | static struct mt76_txwi_cache * | |
20 | mt76_alloc_txwi(struct mt76_dev *dev) | |
21 | { | |
22 | struct mt76_txwi_cache *t; | |
23 | dma_addr_t addr; | |
24 | int size; | |
25 | ||
26 | size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1); | |
27 | t = devm_kzalloc(dev->dev, size, GFP_ATOMIC); | |
28 | if (!t) | |
29 | return NULL; | |
30 | ||
31 | addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi), | |
32 | DMA_TO_DEVICE); | |
33 | t->dma_addr = addr; | |
34 | ||
35 | return t; | |
36 | } | |
37 | ||
38 | static struct mt76_txwi_cache * | |
39 | __mt76_get_txwi(struct mt76_dev *dev) | |
40 | { | |
41 | struct mt76_txwi_cache *t = NULL; | |
42 | ||
43 | spin_lock_bh(&dev->lock); | |
44 | if (!list_empty(&dev->txwi_cache)) { | |
45 | t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, | |
46 | list); | |
47 | list_del(&t->list); | |
48 | } | |
49 | spin_unlock_bh(&dev->lock); | |
50 | ||
51 | return t; | |
52 | } | |
53 | ||
fcdd99ce | 54 | struct mt76_txwi_cache * |
17f1de56 FF |
55 | mt76_get_txwi(struct mt76_dev *dev) |
56 | { | |
57 | struct mt76_txwi_cache *t = __mt76_get_txwi(dev); | |
58 | ||
59 | if (t) | |
60 | return t; | |
61 | ||
62 | return mt76_alloc_txwi(dev); | |
63 | } | |
64 | ||
65 | void | |
66 | mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) | |
67 | { | |
68 | if (!t) | |
69 | return; | |
70 | ||
71 | spin_lock_bh(&dev->lock); | |
72 | list_add(&t->list, &dev->txwi_cache); | |
73 | spin_unlock_bh(&dev->lock); | |
74 | } | |
75 | ||
76 | void mt76_tx_free(struct mt76_dev *dev) | |
77 | { | |
78 | struct mt76_txwi_cache *t; | |
79 | ||
80 | while ((t = __mt76_get_txwi(dev)) != NULL) | |
81 | dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi), | |
82 | DMA_TO_DEVICE); | |
83 | } | |
84 | ||
85 | static int | |
86 | mt76_txq_get_qid(struct ieee80211_txq *txq) | |
87 | { | |
88 | if (!txq->sta) | |
89 | return MT_TXQ_BE; | |
90 | ||
91 | return txq->ac; | |
92 | } | |
93 | ||
49f45fa1 FF |
94 | static void |
95 | mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) | |
96 | { | |
97 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | |
98 | ||
5155938d FF |
99 | if (!ieee80211_is_data_qos(hdr->frame_control) || |
100 | !ieee80211_is_data_present(hdr->frame_control)) | |
49f45fa1 FF |
101 | return; |
102 | ||
103 | mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; | |
104 | } | |
105 | ||
79d1c94c FF |
106 | void |
107 | mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) | |
108 | __acquires(&dev->status_list.lock) | |
109 | { | |
110 | __skb_queue_head_init(list); | |
111 | spin_lock_bh(&dev->status_list.lock); | |
112 | __acquire(&dev->status_list.lock); | |
113 | } | |
114 | EXPORT_SYMBOL_GPL(mt76_tx_status_lock); | |
115 | ||
116 | void | |
117 | mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) | |
118 | __releases(&dev->status_list.unlock) | |
119 | { | |
120 | struct sk_buff *skb; | |
121 | ||
122 | spin_unlock_bh(&dev->status_list.lock); | |
123 | __release(&dev->status_list.unlock); | |
124 | ||
125 | while ((skb = __skb_dequeue(list)) != NULL) | |
126 | ieee80211_tx_status(dev->hw, skb); | |
127 | } | |
128 | EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); | |
129 | ||
88046b2c | 130 | static void |
79d1c94c FF |
131 | __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, |
132 | struct sk_buff_head *list) | |
88046b2c FF |
133 | { |
134 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
135 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); | |
136 | u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; | |
137 | ||
138 | flags |= cb->flags; | |
139 | cb->flags = flags; | |
140 | ||
141 | if ((flags & done) != done) | |
142 | return; | |
143 | ||
144 | __skb_unlink(skb, &dev->status_list); | |
145 | ||
146 | /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ | |
147 | if (flags & MT_TX_CB_TXS_FAILED) { | |
148 | ieee80211_tx_info_clear_status(info); | |
149 | info->status.rates[0].idx = -1; | |
150 | info->flags |= IEEE80211_TX_STAT_ACK; | |
151 | } | |
152 | ||
79d1c94c | 153 | __skb_queue_tail(list, skb); |
88046b2c FF |
154 | } |
155 | ||
156 | void | |
79d1c94c FF |
157 | mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, |
158 | struct sk_buff_head *list) | |
88046b2c | 159 | { |
79d1c94c | 160 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); |
88046b2c FF |
161 | } |
162 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); | |
163 | ||
164 | int | |
165 | mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, | |
166 | struct sk_buff *skb) | |
167 | { | |
168 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
169 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); | |
170 | int pid; | |
171 | ||
172 | if (!wcid) | |
173 | return 0; | |
174 | ||
175 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) | |
176 | return MT_PACKET_ID_NO_ACK; | |
177 | ||
178 | if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | | |
179 | IEEE80211_TX_CTL_RATE_CTRL_PROBE))) | |
180 | return 0; | |
181 | ||
182 | spin_lock_bh(&dev->status_list.lock); | |
183 | ||
184 | memset(cb, 0, sizeof(*cb)); | |
185 | wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK; | |
186 | if (!wcid->packet_id || wcid->packet_id == MT_PACKET_ID_NO_ACK) | |
187 | wcid->packet_id = 1; | |
188 | ||
189 | pid = wcid->packet_id; | |
190 | cb->wcid = wcid->idx; | |
191 | cb->pktid = pid; | |
192 | cb->jiffies = jiffies; | |
193 | ||
194 | __skb_queue_tail(&dev->status_list, skb); | |
195 | spin_unlock_bh(&dev->status_list.lock); | |
196 | ||
197 | return pid; | |
198 | } | |
199 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); | |
200 | ||
201 | struct sk_buff * | |
79d1c94c FF |
202 | mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, |
203 | struct sk_buff_head *list) | |
88046b2c FF |
204 | { |
205 | struct sk_buff *skb, *tmp; | |
206 | ||
207 | if (pktid == MT_PACKET_ID_NO_ACK) | |
208 | return NULL; | |
209 | ||
210 | skb_queue_walk_safe(&dev->status_list, skb, tmp) { | |
211 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); | |
212 | ||
213 | if (wcid && cb->wcid != wcid->idx) | |
214 | continue; | |
215 | ||
216 | if (cb->pktid == pktid) | |
217 | return skb; | |
218 | ||
219 | if (!pktid && | |
220 | !time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT)) | |
221 | continue; | |
222 | ||
223 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | | |
79d1c94c | 224 | MT_TX_CB_TXS_DONE, list); |
88046b2c FF |
225 | } |
226 | ||
227 | return NULL; | |
228 | } | |
229 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); | |
230 | ||
79d1c94c FF |
231 | void |
232 | mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush) | |
233 | { | |
234 | struct sk_buff_head list; | |
235 | ||
236 | mt76_tx_status_lock(dev, &list); | |
237 | mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); | |
238 | mt76_tx_status_unlock(dev, &list); | |
239 | } | |
240 | EXPORT_SYMBOL_GPL(mt76_tx_status_check); | |
241 | ||
88046b2c FF |
242 | void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb) |
243 | { | |
79d1c94c FF |
244 | struct sk_buff_head list; |
245 | ||
88046b2c FF |
246 | if (!skb->prev) { |
247 | ieee80211_free_txskb(dev->hw, skb); | |
248 | return; | |
249 | } | |
250 | ||
79d1c94c FF |
251 | mt76_tx_status_lock(dev, &list); |
252 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); | |
253 | mt76_tx_status_unlock(dev, &list); | |
88046b2c FF |
254 | } |
255 | EXPORT_SYMBOL_GPL(mt76_tx_complete_skb); | |
256 | ||
17f1de56 FF |
257 | void |
258 | mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, | |
259 | struct mt76_wcid *wcid, struct sk_buff *skb) | |
260 | { | |
261 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
49f45fa1 | 262 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
17f1de56 FF |
263 | struct mt76_queue *q; |
264 | int qid = skb_get_queue_mapping(skb); | |
265 | ||
266 | if (WARN_ON(qid >= MT_TXQ_PSD)) { | |
267 | qid = MT_TXQ_BE; | |
268 | skb_set_queue_mapping(skb, qid); | |
269 | } | |
270 | ||
271 | if (!wcid->tx_rate_set) | |
272 | ieee80211_get_tx_rates(info->control.vif, sta, skb, | |
273 | info->control.rates, 1); | |
274 | ||
49f45fa1 FF |
275 | if (sta && ieee80211_is_data_qos(hdr->frame_control)) { |
276 | struct ieee80211_txq *txq; | |
277 | struct mt76_txq *mtxq; | |
278 | u8 tid; | |
279 | ||
280 | tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; | |
281 | txq = sta->txq[tid]; | |
282 | mtxq = (struct mt76_txq *) txq->drv_priv; | |
283 | ||
284 | if (mtxq->aggr) | |
285 | mt76_check_agg_ssn(mtxq, skb); | |
286 | } | |
287 | ||
17f1de56 FF |
288 | q = &dev->q_tx[qid]; |
289 | ||
290 | spin_lock_bh(&q->lock); | |
469d4818 | 291 | dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); |
17f1de56 FF |
292 | dev->queue_ops->kick(dev, q); |
293 | ||
294 | if (q->queued > q->ndesc - 8) | |
295 | ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); | |
296 | spin_unlock_bh(&q->lock); | |
297 | } | |
298 | EXPORT_SYMBOL_GPL(mt76_tx); | |
299 | ||
300 | static struct sk_buff * | |
301 | mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps) | |
302 | { | |
303 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); | |
304 | struct sk_buff *skb; | |
305 | ||
306 | skb = skb_dequeue(&mtxq->retry_q); | |
307 | if (skb) { | |
308 | u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; | |
309 | ||
310 | if (ps && skb_queue_empty(&mtxq->retry_q)) | |
311 | ieee80211_sta_set_buffered(txq->sta, tid, false); | |
312 | ||
313 | return skb; | |
314 | } | |
315 | ||
316 | skb = ieee80211_tx_dequeue(dev->hw, txq); | |
317 | if (!skb) | |
318 | return NULL; | |
319 | ||
320 | return skb; | |
321 | } | |
322 | ||
17f1de56 FF |
323 | static void |
324 | mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, | |
325 | struct sk_buff *skb, bool last) | |
326 | { | |
327 | struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv; | |
328 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
329 | struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD]; | |
330 | ||
331 | info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; | |
332 | if (last) | |
333 | info->flags |= IEEE80211_TX_STATUS_EOSP; | |
334 | ||
335 | mt76_skb_set_moredata(skb, !last); | |
469d4818 | 336 | dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta); |
17f1de56 FF |
337 | } |
338 | ||
339 | void | |
340 | mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, | |
341 | u16 tids, int nframes, | |
342 | enum ieee80211_frame_release_type reason, | |
343 | bool more_data) | |
344 | { | |
345 | struct mt76_dev *dev = hw->priv; | |
346 | struct sk_buff *last_skb = NULL; | |
347 | struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD]; | |
348 | int i; | |
349 | ||
350 | spin_lock_bh(&hwq->lock); | |
351 | for (i = 0; tids && nframes; i++, tids >>= 1) { | |
352 | struct ieee80211_txq *txq = sta->txq[i]; | |
353 | struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; | |
354 | struct sk_buff *skb; | |
355 | ||
356 | if (!(tids & 1)) | |
357 | continue; | |
358 | ||
359 | do { | |
360 | skb = mt76_txq_dequeue(dev, mtxq, true); | |
361 | if (!skb) | |
362 | break; | |
363 | ||
364 | if (mtxq->aggr) | |
365 | mt76_check_agg_ssn(mtxq, skb); | |
366 | ||
367 | nframes--; | |
368 | if (last_skb) | |
369 | mt76_queue_ps_skb(dev, sta, last_skb, false); | |
370 | ||
371 | last_skb = skb; | |
372 | } while (nframes); | |
373 | } | |
374 | ||
375 | if (last_skb) { | |
376 | mt76_queue_ps_skb(dev, sta, last_skb, true); | |
377 | dev->queue_ops->kick(dev, hwq); | |
378 | } | |
379 | spin_unlock_bh(&hwq->lock); | |
380 | } | |
381 | EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); | |
382 | ||
383 | static int | |
384 | mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, | |
385 | struct mt76_txq *mtxq, bool *empty) | |
386 | { | |
387 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); | |
388 | struct ieee80211_tx_info *info; | |
389 | struct mt76_wcid *wcid = mtxq->wcid; | |
390 | struct sk_buff *skb; | |
391 | int n_frames = 1, limit; | |
392 | struct ieee80211_tx_rate tx_rate; | |
393 | bool ampdu; | |
394 | bool probe; | |
395 | int idx; | |
396 | ||
397 | skb = mt76_txq_dequeue(dev, mtxq, false); | |
398 | if (!skb) { | |
399 | *empty = true; | |
400 | return 0; | |
401 | } | |
402 | ||
403 | info = IEEE80211_SKB_CB(skb); | |
404 | if (!wcid->tx_rate_set) | |
405 | ieee80211_get_tx_rates(txq->vif, txq->sta, skb, | |
406 | info->control.rates, 1); | |
407 | tx_rate = info->control.rates[0]; | |
408 | ||
409 | probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); | |
410 | ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU; | |
411 | limit = ampdu ? 16 : 3; | |
412 | ||
413 | if (ampdu) | |
414 | mt76_check_agg_ssn(mtxq, skb); | |
415 | ||
469d4818 | 416 | idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta); |
17f1de56 FF |
417 | |
418 | if (idx < 0) | |
419 | return idx; | |
420 | ||
421 | do { | |
422 | bool cur_ampdu; | |
423 | ||
424 | if (probe) | |
425 | break; | |
426 | ||
89bc67e3 | 427 | if (test_bit(MT76_OFFCHANNEL, &dev->state) || |
17f1de56 FF |
428 | test_bit(MT76_RESET, &dev->state)) |
429 | return -EBUSY; | |
430 | ||
431 | skb = mt76_txq_dequeue(dev, mtxq, false); | |
432 | if (!skb) { | |
433 | *empty = true; | |
434 | break; | |
435 | } | |
436 | ||
437 | info = IEEE80211_SKB_CB(skb); | |
438 | cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; | |
439 | ||
440 | if (ampdu != cur_ampdu || | |
441 | (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { | |
442 | skb_queue_tail(&mtxq->retry_q, skb); | |
443 | break; | |
444 | } | |
445 | ||
446 | info->control.rates[0] = tx_rate; | |
447 | ||
448 | if (cur_ampdu) | |
449 | mt76_check_agg_ssn(mtxq, skb); | |
450 | ||
469d4818 LB |
451 | idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, |
452 | txq->sta); | |
17f1de56 FF |
453 | if (idx < 0) |
454 | return idx; | |
455 | ||
456 | n_frames++; | |
457 | } while (n_frames < limit); | |
458 | ||
459 | if (!probe) { | |
460 | hwq->swq_queued++; | |
461 | hwq->entry[idx].schedule = true; | |
462 | } | |
463 | ||
464 | dev->queue_ops->kick(dev, hwq); | |
465 | ||
466 | return n_frames; | |
467 | } | |
468 | ||
469 | static int | |
470 | mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq) | |
471 | { | |
472 | struct mt76_txq *mtxq, *mtxq_last; | |
473 | int len = 0; | |
474 | ||
475 | restart: | |
476 | mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list); | |
477 | while (!list_empty(&hwq->swq)) { | |
478 | bool empty = false; | |
479 | int cur; | |
480 | ||
89bc67e3 | 481 | if (test_bit(MT76_OFFCHANNEL, &dev->state) || |
95135e8c FF |
482 | test_bit(MT76_RESET, &dev->state)) |
483 | return -EBUSY; | |
484 | ||
17f1de56 FF |
485 | mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list); |
486 | if (mtxq->send_bar && mtxq->aggr) { | |
487 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); | |
488 | struct ieee80211_sta *sta = txq->sta; | |
489 | struct ieee80211_vif *vif = txq->vif; | |
490 | u16 agg_ssn = mtxq->agg_ssn; | |
491 | u8 tid = txq->tid; | |
492 | ||
493 | mtxq->send_bar = false; | |
494 | spin_unlock_bh(&hwq->lock); | |
495 | ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); | |
496 | spin_lock_bh(&hwq->lock); | |
497 | goto restart; | |
498 | } | |
499 | ||
500 | list_del_init(&mtxq->list); | |
501 | ||
502 | cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty); | |
503 | if (!empty) | |
504 | list_add_tail(&mtxq->list, &hwq->swq); | |
505 | ||
506 | if (cur < 0) | |
507 | return cur; | |
508 | ||
509 | len += cur; | |
510 | ||
511 | if (mtxq == mtxq_last) | |
512 | break; | |
513 | } | |
514 | ||
515 | return len; | |
516 | } | |
517 | ||
518 | void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq) | |
519 | { | |
520 | int len; | |
521 | ||
1d868b70 | 522 | rcu_read_lock(); |
17f1de56 FF |
523 | do { |
524 | if (hwq->swq_queued >= 4 || list_empty(&hwq->swq)) | |
525 | break; | |
526 | ||
527 | len = mt76_txq_schedule_list(dev, hwq); | |
528 | } while (len > 0); | |
1d868b70 | 529 | rcu_read_unlock(); |
17f1de56 FF |
530 | } |
531 | EXPORT_SYMBOL_GPL(mt76_txq_schedule); | |
532 | ||
533 | void mt76_txq_schedule_all(struct mt76_dev *dev) | |
534 | { | |
535 | int i; | |
536 | ||
537 | for (i = 0; i <= MT_TXQ_BK; i++) { | |
538 | struct mt76_queue *q = &dev->q_tx[i]; | |
539 | ||
540 | spin_lock_bh(&q->lock); | |
541 | mt76_txq_schedule(dev, q); | |
542 | spin_unlock_bh(&q->lock); | |
543 | } | |
544 | } | |
545 | EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); | |
546 | ||
547 | void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, | |
548 | bool send_bar) | |
549 | { | |
550 | int i; | |
551 | ||
552 | for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { | |
553 | struct ieee80211_txq *txq = sta->txq[i]; | |
554 | struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; | |
555 | ||
556 | spin_lock_bh(&mtxq->hwq->lock); | |
557 | mtxq->send_bar = mtxq->aggr && send_bar; | |
558 | if (!list_empty(&mtxq->list)) | |
559 | list_del_init(&mtxq->list); | |
560 | spin_unlock_bh(&mtxq->hwq->lock); | |
561 | } | |
562 | } | |
563 | EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); | |
564 | ||
565 | void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) | |
566 | { | |
567 | struct mt76_dev *dev = hw->priv; | |
568 | struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; | |
569 | struct mt76_queue *hwq = mtxq->hwq; | |
570 | ||
571 | spin_lock_bh(&hwq->lock); | |
572 | if (list_empty(&mtxq->list)) | |
573 | list_add_tail(&mtxq->list, &hwq->swq); | |
574 | mt76_txq_schedule(dev, hwq); | |
575 | spin_unlock_bh(&hwq->lock); | |
576 | } | |
577 | EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); | |
578 | ||
579 | void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) | |
580 | { | |
581 | struct mt76_txq *mtxq; | |
582 | struct mt76_queue *hwq; | |
583 | struct sk_buff *skb; | |
584 | ||
585 | if (!txq) | |
586 | return; | |
587 | ||
588 | mtxq = (struct mt76_txq *) txq->drv_priv; | |
589 | hwq = mtxq->hwq; | |
590 | ||
591 | spin_lock_bh(&hwq->lock); | |
592 | if (!list_empty(&mtxq->list)) | |
593 | list_del(&mtxq->list); | |
594 | spin_unlock_bh(&hwq->lock); | |
595 | ||
596 | while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) | |
597 | ieee80211_free_txskb(dev->hw, skb); | |
598 | } | |
599 | EXPORT_SYMBOL_GPL(mt76_txq_remove); | |
600 | ||
601 | void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) | |
602 | { | |
603 | struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; | |
604 | ||
605 | INIT_LIST_HEAD(&mtxq->list); | |
606 | skb_queue_head_init(&mtxq->retry_q); | |
607 | ||
608 | mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)]; | |
609 | } | |
610 | EXPORT_SYMBOL_GPL(mt76_txq_init); | |
1d0496c6 SG |
611 | |
612 | u8 mt76_ac_to_hwq(u8 ac) | |
613 | { | |
614 | static const u8 wmm_queue_map[] = { | |
615 | [IEEE80211_AC_BE] = 0, | |
616 | [IEEE80211_AC_BK] = 1, | |
617 | [IEEE80211_AC_VI] = 2, | |
618 | [IEEE80211_AC_VO] = 3, | |
619 | }; | |
620 | ||
621 | if (WARN_ON(ac >= IEEE80211_NUM_ACS)) | |
622 | return 0; | |
623 | ||
624 | return wmm_queue_map[ac]; | |
625 | } | |
626 | EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); |