2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/kernel.h>
19 #include <linux/irq.h>
22 #include "mt76x02_trace.h"
24 struct beacon_bc_data
{
25 struct mt76x02_dev
*dev
;
26 struct sk_buff_head q
;
27 struct sk_buff
*tail
[8];
31 mt76x02_update_beacon_iter(void *priv
, u8
*mac
, struct ieee80211_vif
*vif
)
33 struct mt76x02_dev
*dev
= (struct mt76x02_dev
*)priv
;
34 struct mt76x02_vif
*mvif
= (struct mt76x02_vif
*)vif
->drv_priv
;
35 struct sk_buff
*skb
= NULL
;
37 if (!(dev
->beacon_mask
& BIT(mvif
->idx
)))
40 skb
= ieee80211_beacon_get(mt76_hw(dev
), vif
);
44 mt76x02_mac_set_beacon(dev
, mvif
->idx
, skb
);
48 mt76x02_add_buffered_bc(void *priv
, u8
*mac
, struct ieee80211_vif
*vif
)
50 struct beacon_bc_data
*data
= priv
;
51 struct mt76x02_dev
*dev
= data
->dev
;
52 struct mt76x02_vif
*mvif
= (struct mt76x02_vif
*)vif
->drv_priv
;
53 struct ieee80211_tx_info
*info
;
56 if (!(dev
->beacon_mask
& BIT(mvif
->idx
)))
59 skb
= ieee80211_get_buffered_bc(mt76_hw(dev
), vif
);
63 info
= IEEE80211_SKB_CB(skb
);
64 info
->control
.vif
= vif
;
65 info
->flags
|= IEEE80211_TX_CTL_ASSIGN_SEQ
;
66 mt76_skb_set_moredata(skb
, true);
67 __skb_queue_tail(&data
->q
, skb
);
68 data
->tail
[mvif
->idx
] = skb
;
72 mt76x02_resync_beacon_timer(struct mt76x02_dev
*dev
)
74 u32 timer_val
= dev
->beacon_int
<< 4;
79 * Beacon timer drifts by 1us every tick, the timer is configured
80 * in 1/16 TU (64us) units.
82 if (dev
->tbtt_count
< 62)
85 if (dev
->tbtt_count
>= 64) {
91 * The updated beacon interval takes effect after two TBTT, because
92 * at this point the original interval has already been loaded into
93 * the next TBTT_TIMER value
95 if (dev
->tbtt_count
== 62)
98 mt76_rmw_field(dev
, MT_BEACON_TIME_CFG
,
99 MT_BEACON_TIME_CFG_INTVAL
, timer_val
);
102 static void mt76x02_pre_tbtt_tasklet(unsigned long arg
)
104 struct mt76x02_dev
*dev
= (struct mt76x02_dev
*)arg
;
105 struct mt76_queue
*q
= &dev
->mt76
.q_tx
[MT_TXQ_PSD
];
106 struct beacon_bc_data data
= {};
110 mt76x02_resync_beacon_timer(dev
);
113 __skb_queue_head_init(&data
.q
);
115 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev
),
116 IEEE80211_IFACE_ITER_RESUME_ALL
,
117 mt76x02_update_beacon_iter
, dev
);
119 mt76_csa_check(&dev
->mt76
);
121 if (dev
->mt76
.csa_complete
)
125 nframes
= skb_queue_len(&data
.q
);
126 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev
),
127 IEEE80211_IFACE_ITER_RESUME_ALL
,
128 mt76x02_add_buffered_bc
, &data
);
129 } while (nframes
!= skb_queue_len(&data
.q
) &&
130 skb_queue_len(&data
.q
) < 8);
132 if (!skb_queue_len(&data
.q
))
135 for (i
= 0; i
< ARRAY_SIZE(data
.tail
); i
++) {
139 mt76_skb_set_moredata(data
.tail
[i
], false);
142 spin_lock_bh(&q
->lock
);
143 while ((skb
= __skb_dequeue(&data
.q
)) != NULL
) {
144 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
145 struct ieee80211_vif
*vif
= info
->control
.vif
;
146 struct mt76x02_vif
*mvif
= (struct mt76x02_vif
*)vif
->drv_priv
;
148 mt76_dma_tx_queue_skb(&dev
->mt76
, q
, skb
, &mvif
->group_wcid
,
151 spin_unlock_bh(&q
->lock
);
155 mt76x02_init_tx_queue(struct mt76x02_dev
*dev
, struct mt76_queue
*q
,
160 q
->regs
= dev
->mt76
.mmio
.regs
+ MT_TX_RING_BASE
+ idx
* MT_RING_SIZE
;
164 ret
= mt76_queue_alloc(dev
, q
);
168 mt76x02_irq_enable(dev
, MT_INT_TX_DONE(idx
));
174 mt76x02_init_rx_queue(struct mt76x02_dev
*dev
, struct mt76_queue
*q
,
175 int idx
, int n_desc
, int bufsize
)
179 q
->regs
= dev
->mt76
.mmio
.regs
+ MT_RX_RING_BASE
+ idx
* MT_RING_SIZE
;
181 q
->buf_size
= bufsize
;
183 ret
= mt76_queue_alloc(dev
, q
);
187 mt76x02_irq_enable(dev
, MT_INT_RX_DONE(idx
));
192 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev
*dev
)
194 struct mt76x02_tx_status stat
;
197 while (kfifo_get(&dev
->txstatus_fifo
, &stat
))
198 mt76x02_send_tx_status(dev
, &stat
, &update
);
201 static void mt76x02_tx_tasklet(unsigned long data
)
203 struct mt76x02_dev
*dev
= (struct mt76x02_dev
*)data
;
206 mt76x02_process_tx_status_fifo(dev
);
208 for (i
= MT_TXQ_MCU
; i
>= 0; i
--)
209 mt76_queue_tx_cleanup(dev
, i
, false);
211 mt76x02_mac_poll_tx_status(dev
, false);
212 mt76x02_irq_enable(dev
, MT_INT_TX_DONE_ALL
);
215 int mt76x02_dma_init(struct mt76x02_dev
*dev
)
217 struct mt76_txwi_cache __maybe_unused
*t
;
218 int i
, ret
, fifo_size
;
219 struct mt76_queue
*q
;
222 BUILD_BUG_ON(sizeof(t
->txwi
) < sizeof(struct mt76x02_txwi
));
223 BUILD_BUG_ON(sizeof(struct mt76x02_rxwi
) > MT_RX_HEADROOM
);
225 fifo_size
= roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status
));
226 status_fifo
= devm_kzalloc(dev
->mt76
.dev
, fifo_size
, GFP_KERNEL
);
230 tasklet_init(&dev
->tx_tasklet
, mt76x02_tx_tasklet
, (unsigned long) dev
);
231 tasklet_init(&dev
->pre_tbtt_tasklet
, mt76x02_pre_tbtt_tasklet
,
234 kfifo_init(&dev
->txstatus_fifo
, status_fifo
, fifo_size
);
236 mt76_dma_attach(&dev
->mt76
);
238 mt76_wr(dev
, MT_WPDMA_RST_IDX
, ~0);
240 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
241 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[i
],
248 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_PSD
],
249 MT_TX_HW_QUEUE_MGMT
, MT_TX_RING_SIZE
);
253 ret
= mt76x02_init_tx_queue(dev
, &dev
->mt76
.q_tx
[MT_TXQ_MCU
],
254 MT_TX_HW_QUEUE_MCU
, MT_MCU_RING_SIZE
);
258 ret
= mt76x02_init_rx_queue(dev
, &dev
->mt76
.q_rx
[MT_RXQ_MCU
], 1,
259 MT_MCU_RING_SIZE
, MT_RX_BUF_SIZE
);
263 q
= &dev
->mt76
.q_rx
[MT_RXQ_MAIN
];
264 q
->buf_offset
= MT_RX_HEADROOM
- sizeof(struct mt76x02_rxwi
);
265 ret
= mt76x02_init_rx_queue(dev
, q
, 0, MT76X02_RX_RING_SIZE
,
270 return mt76_init_queues(dev
);
272 EXPORT_SYMBOL_GPL(mt76x02_dma_init
);
274 void mt76x02_rx_poll_complete(struct mt76_dev
*mdev
, enum mt76_rxq_id q
)
276 struct mt76x02_dev
*dev
;
278 dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
279 mt76x02_irq_enable(dev
, MT_INT_RX_DONE(q
));
281 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete
);
283 irqreturn_t
mt76x02_irq_handler(int irq
, void *dev_instance
)
285 struct mt76x02_dev
*dev
= dev_instance
;
288 intr
= mt76_rr(dev
, MT_INT_SOURCE_CSR
);
289 mt76_wr(dev
, MT_INT_SOURCE_CSR
, intr
);
291 if (!test_bit(MT76_STATE_INITIALIZED
, &dev
->mt76
.state
))
294 trace_dev_irq(dev
, intr
, dev
->mt76
.mmio
.irqmask
);
296 intr
&= dev
->mt76
.mmio
.irqmask
;
298 if (intr
& MT_INT_TX_DONE_ALL
) {
299 mt76x02_irq_disable(dev
, MT_INT_TX_DONE_ALL
);
300 tasklet_schedule(&dev
->tx_tasklet
);
303 if (intr
& MT_INT_RX_DONE(0)) {
304 mt76x02_irq_disable(dev
, MT_INT_RX_DONE(0));
305 napi_schedule(&dev
->mt76
.napi
[0]);
308 if (intr
& MT_INT_RX_DONE(1)) {
309 mt76x02_irq_disable(dev
, MT_INT_RX_DONE(1));
310 napi_schedule(&dev
->mt76
.napi
[1]);
313 if (intr
& MT_INT_PRE_TBTT
)
314 tasklet_schedule(&dev
->pre_tbtt_tasklet
);
316 /* send buffered multicast frames now */
317 if (intr
& MT_INT_TBTT
) {
318 if (dev
->mt76
.csa_complete
)
319 mt76_csa_finish(&dev
->mt76
);
321 mt76_queue_kick(dev
, &dev
->mt76
.q_tx
[MT_TXQ_PSD
]);
324 if (intr
& MT_INT_TX_STAT
) {
325 mt76x02_mac_poll_tx_status(dev
, true);
326 tasklet_schedule(&dev
->tx_tasklet
);
329 if (intr
& MT_INT_GPTIMER
) {
330 mt76x02_irq_disable(dev
, MT_INT_GPTIMER
);
331 tasklet_schedule(&dev
->dfs_pd
.dfs_tasklet
);
336 EXPORT_SYMBOL_GPL(mt76x02_irq_handler
);
338 void mt76x02_set_irq_mask(struct mt76x02_dev
*dev
, u32 clear
, u32 set
)
342 spin_lock_irqsave(&dev
->mt76
.mmio
.irq_lock
, flags
);
343 dev
->mt76
.mmio
.irqmask
&= ~clear
;
344 dev
->mt76
.mmio
.irqmask
|= set
;
345 mt76_wr(dev
, MT_INT_MASK_CSR
, dev
->mt76
.mmio
.irqmask
);
346 spin_unlock_irqrestore(&dev
->mt76
.mmio
.irq_lock
, flags
);
348 EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask
);
350 static void mt76x02_dma_enable(struct mt76x02_dev
*dev
)
354 mt76_wr(dev
, MT_MAC_SYS_CTRL
, MT_MAC_SYS_CTRL_ENABLE_TX
);
355 mt76x02_wait_for_wpdma(&dev
->mt76
, 1000);
356 usleep_range(50, 100);
358 val
= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
, 3) |
359 MT_WPDMA_GLO_CFG_TX_DMA_EN
|
360 MT_WPDMA_GLO_CFG_RX_DMA_EN
;
361 mt76_set(dev
, MT_WPDMA_GLO_CFG
, val
);
362 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
363 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
);
366 void mt76x02_dma_cleanup(struct mt76x02_dev
*dev
)
368 tasklet_kill(&dev
->tx_tasklet
);
369 mt76_dma_cleanup(&dev
->mt76
);
371 EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup
);
373 void mt76x02_dma_disable(struct mt76x02_dev
*dev
)
375 u32 val
= mt76_rr(dev
, MT_WPDMA_GLO_CFG
);
377 val
&= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
|
378 MT_WPDMA_GLO_CFG_BIG_ENDIAN
|
379 MT_WPDMA_GLO_CFG_HDR_SEG_LEN
;
380 val
|= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
;
381 mt76_wr(dev
, MT_WPDMA_GLO_CFG
, val
);
383 EXPORT_SYMBOL_GPL(mt76x02_dma_disable
);
385 void mt76x02_mac_start(struct mt76x02_dev
*dev
)
387 mt76x02_dma_enable(dev
);
388 mt76_wr(dev
, MT_RX_FILTR_CFG
, dev
->mt76
.rxfilter
);
389 mt76_wr(dev
, MT_MAC_SYS_CTRL
,
390 MT_MAC_SYS_CTRL_ENABLE_TX
|
391 MT_MAC_SYS_CTRL_ENABLE_RX
);
392 mt76x02_irq_enable(dev
,
393 MT_INT_RX_DONE_ALL
| MT_INT_TX_DONE_ALL
|
396 EXPORT_SYMBOL_GPL(mt76x02_mac_start
);
398 static bool mt76x02_tx_hang(struct mt76x02_dev
*dev
)
400 u32 dma_idx
, prev_dma_idx
;
401 struct mt76_queue
*q
;
404 for (i
= 0; i
< 4; i
++) {
405 q
= &dev
->mt76
.q_tx
[i
];
410 prev_dma_idx
= dev
->mt76
.tx_dma_idx
[i
];
411 dma_idx
= ioread32(&q
->regs
->dma_idx
);
412 dev
->mt76
.tx_dma_idx
[i
] = dma_idx
;
414 if (prev_dma_idx
== dma_idx
)
421 static void mt76x02_watchdog_reset(struct mt76x02_dev
*dev
)
423 u32 mask
= dev
->mt76
.mmio
.irqmask
;
426 ieee80211_stop_queues(dev
->mt76
.hw
);
427 set_bit(MT76_RESET
, &dev
->mt76
.state
);
429 tasklet_disable(&dev
->pre_tbtt_tasklet
);
430 tasklet_disable(&dev
->tx_tasklet
);
432 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.napi
); i
++)
433 napi_disable(&dev
->mt76
.napi
[i
]);
435 mutex_lock(&dev
->mt76
.mutex
);
437 if (dev
->beacon_mask
)
438 mt76_clear(dev
, MT_BEACON_TIME_CFG
,
439 MT_BEACON_TIME_CFG_BEACON_TX
|
440 MT_BEACON_TIME_CFG_TBTT_EN
);
442 mt76x02_irq_disable(dev
, mask
);
444 /* perform device reset */
445 mt76_clear(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
446 mt76_wr(dev
, MT_MAC_SYS_CTRL
, 0);
447 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
448 MT_WPDMA_GLO_CFG_TX_DMA_EN
| MT_WPDMA_GLO_CFG_RX_DMA_EN
);
449 usleep_range(5000, 10000);
450 mt76_wr(dev
, MT_INT_SOURCE_CSR
, 0xffffffff);
452 /* let fw reset DMA */
453 mt76_set(dev
, 0x734, 0x3);
455 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_tx
); i
++)
456 mt76_queue_tx_cleanup(dev
, i
, true);
458 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_rx
); i
++)
459 mt76_queue_rx_reset(dev
, i
);
461 mt76_wr(dev
, MT_MAC_SYS_CTRL
,
462 MT_MAC_SYS_CTRL_ENABLE_TX
| MT_MAC_SYS_CTRL_ENABLE_RX
);
463 mt76_set(dev
, MT_WPDMA_GLO_CFG
,
464 MT_WPDMA_GLO_CFG_TX_DMA_EN
| MT_WPDMA_GLO_CFG_RX_DMA_EN
);
466 mt76_set(dev
, MT_TXOP_CTRL_CFG
, MT_TXOP_ED_CCA_EN
);
468 if (dev
->beacon_mask
)
469 mt76_set(dev
, MT_BEACON_TIME_CFG
,
470 MT_BEACON_TIME_CFG_BEACON_TX
|
471 MT_BEACON_TIME_CFG_TBTT_EN
);
473 mt76x02_irq_enable(dev
, mask
);
475 mutex_unlock(&dev
->mt76
.mutex
);
477 clear_bit(MT76_RESET
, &dev
->mt76
.state
);
479 tasklet_enable(&dev
->tx_tasklet
);
480 tasklet_schedule(&dev
->tx_tasklet
);
482 tasklet_enable(&dev
->pre_tbtt_tasklet
);
484 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.napi
); i
++) {
485 napi_enable(&dev
->mt76
.napi
[i
]);
486 napi_schedule(&dev
->mt76
.napi
[i
]);
489 ieee80211_wake_queues(dev
->mt76
.hw
);
491 mt76_txq_schedule_all(&dev
->mt76
);
494 static void mt76x02_check_tx_hang(struct mt76x02_dev
*dev
)
496 if (mt76x02_tx_hang(dev
)) {
497 if (++dev
->tx_hang_check
< MT_TX_HANG_TH
)
500 mt76x02_watchdog_reset(dev
);
502 dev
->tx_hang_reset
++;
503 dev
->tx_hang_check
= 0;
504 memset(dev
->mt76
.tx_dma_idx
, 0xff,
505 sizeof(dev
->mt76
.tx_dma_idx
));
507 dev
->tx_hang_check
= 0;
511 void mt76x02_wdt_work(struct work_struct
*work
)
513 struct mt76x02_dev
*dev
= container_of(work
, struct mt76x02_dev
,
516 mt76x02_check_tx_hang(dev
);
518 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->wdt_work
,