1 /* SPDX-License-Identifier: ISC */
3 #include <linux/etherdevice.h>
4 #include <linux/timekeeping.h>
8 #define MT_PSE_PAGE_SIZE 128
11 mt7603_ac_queue_mask0(u32 mask
)
15 ret
|= GENMASK(3, 0) * !!(mask
& BIT(0));
16 ret
|= GENMASK(8, 5) * !!(mask
& BIT(1));
17 ret
|= GENMASK(13, 10) * !!(mask
& BIT(2));
18 ret
|= GENMASK(19, 16) * !!(mask
& BIT(3));
23 mt76_stop_tx_ac(struct mt7603_dev
*dev
, u32 mask
)
25 mt76_set(dev
, MT_WF_ARB_TX_STOP_0
, mt7603_ac_queue_mask0(mask
));
29 mt76_start_tx_ac(struct mt7603_dev
*dev
, u32 mask
)
31 mt76_set(dev
, MT_WF_ARB_TX_START_0
, mt7603_ac_queue_mask0(mask
));
34 void mt7603_mac_set_timing(struct mt7603_dev
*dev
)
36 u32 cck
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, 231) |
37 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, 48);
38 u32 ofdm
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, 60) |
39 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, 24);
40 int offset
= 3 * dev
->coverage_class
;
41 u32 reg_offset
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, offset
) |
42 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, offset
);
46 if (dev
->mt76
.chandef
.chan
->band
== NL80211_BAND_5GHZ
)
51 mt76_set(dev
, MT_ARB_SCR
,
52 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
55 mt76_wr(dev
, MT_TIMEOUT_CCK
, cck
+ reg_offset
);
56 mt76_wr(dev
, MT_TIMEOUT_OFDM
, ofdm
+ reg_offset
);
58 FIELD_PREP(MT_IFS_EIFS
, 360) |
59 FIELD_PREP(MT_IFS_RIFS
, 2) |
60 FIELD_PREP(MT_IFS_SIFS
, sifs
) |
61 FIELD_PREP(MT_IFS_SLOT
, dev
->slottime
));
63 if (dev
->slottime
< 20)
64 val
= MT7603_CFEND_RATE_DEFAULT
;
66 val
= MT7603_CFEND_RATE_11B
;
68 mt76_rmw_field(dev
, MT_AGG_CONTROL
, MT_AGG_CONTROL_CFEND_RATE
, val
);
70 mt76_clear(dev
, MT_ARB_SCR
,
71 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
75 mt7603_wtbl_update(struct mt7603_dev
*dev
, int idx
, u32 mask
)
77 mt76_rmw(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_WLAN_IDX
,
78 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX
, idx
) | mask
);
80 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
84 mt7603_wtbl1_addr(int idx
)
86 return MT_WTBL1_BASE
+ idx
* MT_WTBL1_SIZE
;
90 mt7603_wtbl2_addr(int idx
)
93 return MT_PCIE_REMAP_BASE_1
+ idx
* MT_WTBL2_SIZE
;
97 mt7603_wtbl3_addr(int idx
)
99 u32 base
= mt7603_wtbl2_addr(MT7603_WTBL_SIZE
);
101 return base
+ idx
* MT_WTBL3_SIZE
;
105 mt7603_wtbl4_addr(int idx
)
107 u32 base
= mt7603_wtbl3_addr(MT7603_WTBL_SIZE
);
109 return base
+ idx
* MT_WTBL4_SIZE
;
112 void mt7603_wtbl_init(struct mt7603_dev
*dev
, int idx
, int vif
,
115 const void *_mac
= mac_addr
;
116 u32 addr
= mt7603_wtbl1_addr(idx
);
121 w0
= FIELD_PREP(MT_WTBL1_W0_ADDR_HI
,
122 get_unaligned_le16(_mac
+ 4));
123 w1
= FIELD_PREP(MT_WTBL1_W1_ADDR_LO
,
124 get_unaligned_le32(_mac
));
130 w0
|= MT_WTBL1_W0_RX_CHECK_A1
;
131 w0
|= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX
, vif
);
133 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
135 mt76_set(dev
, addr
+ 0 * 4, w0
);
136 mt76_set(dev
, addr
+ 1 * 4, w1
);
137 mt76_set(dev
, addr
+ 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL
);
139 mt76_stop_tx_ac(dev
, GENMASK(3, 0));
140 addr
= mt7603_wtbl2_addr(idx
);
141 for (i
= 0; i
< MT_WTBL2_SIZE
; i
+= 4)
142 mt76_wr(dev
, addr
+ i
, 0);
143 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_WTBL2
);
144 mt76_start_tx_ac(dev
, GENMASK(3, 0));
146 addr
= mt7603_wtbl3_addr(idx
);
147 for (i
= 0; i
< MT_WTBL3_SIZE
; i
+= 4)
148 mt76_wr(dev
, addr
+ i
, 0);
150 addr
= mt7603_wtbl4_addr(idx
);
151 for (i
= 0; i
< MT_WTBL4_SIZE
; i
+= 4)
152 mt76_wr(dev
, addr
+ i
, 0);
156 mt7603_wtbl_set_skip_tx(struct mt7603_dev
*dev
, int idx
, bool enabled
)
158 u32 addr
= mt7603_wtbl1_addr(idx
);
159 u32 val
= mt76_rr(dev
, addr
+ 3 * 4);
161 val
&= ~MT_WTBL1_W3_SKIP_TX
;
162 val
|= enabled
* MT_WTBL1_W3_SKIP_TX
;
164 mt76_wr(dev
, addr
+ 3 * 4, val
);
167 void mt7603_filter_tx(struct mt7603_dev
*dev
, int idx
, bool abort
)
173 queue
= 8; /* free queue */
176 queue
= 1; /* MCU queue */
179 mt7603_wtbl_set_skip_tx(dev
, idx
, true);
181 mt76_wr(dev
, MT_TX_ABORT
, MT_TX_ABORT_EN
|
182 FIELD_PREP(MT_TX_ABORT_WCID
, idx
));
184 for (i
= 0; i
< 4; i
++) {
185 mt76_wr(dev
, MT_DMA_FQCR0
, MT_DMA_FQCR0_BUSY
|
186 FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID
, idx
) |
187 FIELD_PREP(MT_DMA_FQCR0_TARGET_QID
, i
) |
188 FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID
, port
) |
189 FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID
, queue
));
191 WARN_ON_ONCE(!mt76_poll(dev
, MT_DMA_FQCR0
, MT_DMA_FQCR0_BUSY
,
195 mt76_wr(dev
, MT_TX_ABORT
, 0);
197 mt7603_wtbl_set_skip_tx(dev
, idx
, false);
200 void mt7603_wtbl_set_smps(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
203 u32 addr
= mt7603_wtbl1_addr(sta
->wcid
.idx
);
205 if (sta
->smps
== enabled
)
208 mt76_rmw_field(dev
, addr
+ 2 * 4, MT_WTBL1_W2_SMPS
, enabled
);
212 void mt7603_wtbl_set_ps(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
215 int idx
= sta
->wcid
.idx
;
218 spin_lock_bh(&dev
->ps_lock
);
220 if (sta
->ps
== enabled
)
223 mt76_wr(dev
, MT_PSE_RTA
,
224 FIELD_PREP(MT_PSE_RTA_TAG_ID
, idx
) |
225 FIELD_PREP(MT_PSE_RTA_PORT_ID
, 0) |
226 FIELD_PREP(MT_PSE_RTA_QUEUE_ID
, 1) |
227 FIELD_PREP(MT_PSE_RTA_REDIRECT_EN
, enabled
) |
228 MT_PSE_RTA_WRITE
| MT_PSE_RTA_BUSY
);
230 mt76_poll(dev
, MT_PSE_RTA
, MT_PSE_RTA_BUSY
, 0, 5000);
233 mt7603_filter_tx(dev
, idx
, false);
235 addr
= mt7603_wtbl1_addr(idx
);
236 mt76_set(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
237 mt76_rmw(dev
, addr
+ 3 * 4, MT_WTBL1_W3_POWER_SAVE
,
238 enabled
* MT_WTBL1_W3_POWER_SAVE
);
239 mt76_clear(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
243 spin_unlock_bh(&dev
->ps_lock
);
246 void mt7603_wtbl_clear(struct mt7603_dev
*dev
, int idx
)
248 int wtbl2_frame_size
= MT_PSE_PAGE_SIZE
/ MT_WTBL2_SIZE
;
249 int wtbl2_frame
= idx
/ wtbl2_frame_size
;
250 int wtbl2_entry
= idx
% wtbl2_frame_size
;
252 int wtbl3_base_frame
= MT_WTBL3_OFFSET
/ MT_PSE_PAGE_SIZE
;
253 int wtbl3_frame_size
= MT_PSE_PAGE_SIZE
/ MT_WTBL3_SIZE
;
254 int wtbl3_frame
= wtbl3_base_frame
+ idx
/ wtbl3_frame_size
;
255 int wtbl3_entry
= (idx
% wtbl3_frame_size
) * 2;
257 int wtbl4_base_frame
= MT_WTBL4_OFFSET
/ MT_PSE_PAGE_SIZE
;
258 int wtbl4_frame_size
= MT_PSE_PAGE_SIZE
/ MT_WTBL4_SIZE
;
259 int wtbl4_frame
= wtbl4_base_frame
+ idx
/ wtbl4_frame_size
;
260 int wtbl4_entry
= idx
% wtbl4_frame_size
;
262 u32 addr
= MT_WTBL1_BASE
+ idx
* MT_WTBL1_SIZE
;
265 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
267 mt76_wr(dev
, addr
+ 0 * 4,
268 MT_WTBL1_W0_RX_CHECK_A1
|
269 MT_WTBL1_W0_RX_CHECK_A2
|
270 MT_WTBL1_W0_RX_VALID
);
271 mt76_wr(dev
, addr
+ 1 * 4, 0);
272 mt76_wr(dev
, addr
+ 2 * 4, 0);
274 mt76_set(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
276 mt76_wr(dev
, addr
+ 3 * 4,
277 FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID
, wtbl2_frame
) |
278 FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID
, wtbl2_entry
) |
279 FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID
, wtbl4_frame
) |
280 MT_WTBL1_W3_I_PSM
| MT_WTBL1_W3_KEEP_I_PSM
);
281 mt76_wr(dev
, addr
+ 4 * 4,
282 FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID
, wtbl3_frame
) |
283 FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID
, wtbl3_entry
) |
284 FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID
, wtbl4_entry
));
286 mt76_clear(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
288 addr
= mt7603_wtbl2_addr(idx
);
290 /* Clear BA information */
291 mt76_wr(dev
, addr
+ (15 * 4), 0);
293 mt76_stop_tx_ac(dev
, GENMASK(3, 0));
294 for (i
= 2; i
<= 4; i
++)
295 mt76_wr(dev
, addr
+ (i
* 4), 0);
296 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_WTBL2
);
297 mt76_start_tx_ac(dev
, GENMASK(3, 0));
299 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_RX_COUNT_CLEAR
);
300 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_TX_COUNT_CLEAR
);
301 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_ADM_COUNT_CLEAR
);
304 void mt7603_wtbl_update_cap(struct mt7603_dev
*dev
, struct ieee80211_sta
*sta
)
306 struct mt7603_sta
*msta
= (struct mt7603_sta
*)sta
->drv_priv
;
307 int idx
= msta
->wcid
.idx
;
311 addr
= mt7603_wtbl1_addr(idx
);
313 val
= mt76_rr(dev
, addr
+ 2 * 4);
314 val
&= MT_WTBL1_W2_KEY_TYPE
| MT_WTBL1_W2_ADMISSION_CONTROL
;
315 val
|= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR
, sta
->ht_cap
.ampdu_factor
) |
316 FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY
, sta
->ht_cap
.ampdu_density
) |
317 MT_WTBL1_W2_TXS_BAF_REPORT
;
320 val
|= MT_WTBL1_W2_HT
;
321 if (sta
->vht_cap
.cap
)
322 val
|= MT_WTBL1_W2_VHT
;
324 mt76_wr(dev
, addr
+ 2 * 4, val
);
326 addr
= mt7603_wtbl2_addr(idx
);
327 val
= mt76_rr(dev
, addr
+ 9 * 4);
328 val
&= ~(MT_WTBL2_W9_SHORT_GI_20
| MT_WTBL2_W9_SHORT_GI_40
|
329 MT_WTBL2_W9_SHORT_GI_80
);
330 if (sta
->ht_cap
.cap
& IEEE80211_HT_CAP_SGI_20
)
331 val
|= MT_WTBL2_W9_SHORT_GI_20
;
332 if (sta
->ht_cap
.cap
& IEEE80211_HT_CAP_SGI_40
)
333 val
|= MT_WTBL2_W9_SHORT_GI_40
;
334 mt76_wr(dev
, addr
+ 9 * 4, val
);
337 void mt7603_mac_rx_ba_reset(struct mt7603_dev
*dev
, void *addr
, u8 tid
)
339 mt76_wr(dev
, MT_BA_CONTROL_0
, get_unaligned_le32(addr
));
340 mt76_wr(dev
, MT_BA_CONTROL_1
,
341 (get_unaligned_le16(addr
+ 4) |
342 FIELD_PREP(MT_BA_CONTROL_1_TID
, tid
) |
343 MT_BA_CONTROL_1_RESET
));
346 void mt7603_mac_tx_ba_reset(struct mt7603_dev
*dev
, int wcid
, int tid
,
349 u32 addr
= mt7603_wtbl2_addr(wcid
);
350 u32 tid_mask
= FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS
, BIT(tid
)) |
351 (MT_WTBL2_W15_BA_WIN_SIZE
<<
352 (tid
* MT_WTBL2_W15_BA_WIN_SIZE_SHIFT
));
358 mt76_clear(dev
, addr
+ (15 * 4), tid_mask
);
362 for (i
= 7; i
> 0; i
--) {
363 if (ba_size
>= MT_AGG_SIZE_LIMIT(i
))
367 tid_val
= FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS
, BIT(tid
)) |
368 i
<< (tid
* MT_WTBL2_W15_BA_WIN_SIZE_SHIFT
);
370 mt76_rmw(dev
, addr
+ (15 * 4), tid_mask
, tid_val
);
374 mt7603_get_rate(struct mt7603_dev
*dev
, struct ieee80211_supported_band
*sband
,
378 int len
= sband
->n_bitrates
;
382 if (sband
== &dev
->mt76
.sband_5g
.sband
)
385 idx
&= ~BIT(2); /* short preamble */
386 } else if (sband
== &dev
->mt76
.sband_2g
.sband
) {
390 for (i
= offset
; i
< len
; i
++) {
391 if ((sband
->bitrates
[i
].hw_value
& GENMASK(7, 0)) == idx
)
398 static struct mt76_wcid
*
399 mt7603_rx_get_wcid(struct mt7603_dev
*dev
, u8 idx
, bool unicast
)
401 struct mt7603_sta
*sta
;
402 struct mt76_wcid
*wcid
;
404 if (idx
>= ARRAY_SIZE(dev
->mt76
.wcid
))
407 wcid
= rcu_dereference(dev
->mt76
.wcid
[idx
]);
408 if (unicast
|| !wcid
)
414 sta
= container_of(wcid
, struct mt7603_sta
, wcid
);
418 return &sta
->vif
->sta
.wcid
;
422 mt7603_insert_ccmp_hdr(struct sk_buff
*skb
, u8 key_id
)
424 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
425 int hdr_len
= ieee80211_get_hdrlen_from_skb(skb
);
430 memmove(skb
->data
, skb
->data
+ 8, hdr_len
);
431 hdr
= skb
->data
+ hdr_len
;
436 hdr
[3] = 0x20 | (key_id
<< 6);
442 status
->flag
&= ~RX_FLAG_IV_STRIPPED
;
446 mt7603_mac_fill_rx(struct mt7603_dev
*dev
, struct sk_buff
*skb
)
448 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
449 struct ieee80211_supported_band
*sband
;
450 struct ieee80211_hdr
*hdr
;
451 __le32
*rxd
= (__le32
*)skb
->data
;
452 u32 rxd0
= le32_to_cpu(rxd
[0]);
453 u32 rxd1
= le32_to_cpu(rxd
[1]);
454 u32 rxd2
= le32_to_cpu(rxd
[2]);
455 bool unicast
= rxd1
& MT_RXD1_NORMAL_U2M
;
456 bool insert_ccmp_hdr
= false;
461 memset(status
, 0, sizeof(*status
));
463 i
= FIELD_GET(MT_RXD1_NORMAL_CH_FREQ
, rxd1
);
464 sband
= (i
& 1) ? &dev
->mt76
.sband_5g
.sband
: &dev
->mt76
.sband_2g
.sband
;
467 idx
= FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX
, rxd2
);
468 status
->wcid
= mt7603_rx_get_wcid(dev
, idx
, unicast
);
470 status
->band
= sband
->band
;
471 if (i
< sband
->n_channels
)
472 status
->freq
= sband
->channels
[i
].center_freq
;
474 if (rxd2
& MT_RXD2_NORMAL_FCS_ERR
)
475 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
477 if (rxd2
& MT_RXD2_NORMAL_TKIP_MIC_ERR
)
478 status
->flag
|= RX_FLAG_MMIC_ERROR
;
480 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE
, rxd2
) != 0 &&
481 !(rxd2
& (MT_RXD2_NORMAL_CLM
| MT_RXD2_NORMAL_CM
))) {
482 status
->flag
|= RX_FLAG_DECRYPTED
;
483 status
->flag
|= RX_FLAG_IV_STRIPPED
;
484 status
->flag
|= RX_FLAG_MMIC_STRIPPED
| RX_FLAG_MIC_STRIPPED
;
487 remove_pad
= rxd1
& MT_RXD1_NORMAL_HDR_OFFSET
;
489 if (rxd2
& MT_RXD2_NORMAL_MAX_LEN_ERROR
)
492 if (!sband
->channels
)
496 if (rxd0
& MT_RXD0_NORMAL_GROUP_4
) {
498 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
501 if (rxd0
& MT_RXD0_NORMAL_GROUP_1
) {
502 u8
*data
= (u8
*)rxd
;
504 if (status
->flag
& RX_FLAG_DECRYPTED
) {
505 status
->iv
[0] = data
[5];
506 status
->iv
[1] = data
[4];
507 status
->iv
[2] = data
[3];
508 status
->iv
[3] = data
[2];
509 status
->iv
[4] = data
[1];
510 status
->iv
[5] = data
[0];
512 insert_ccmp_hdr
= FIELD_GET(MT_RXD2_NORMAL_FRAG
, rxd2
);
516 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
519 if (rxd0
& MT_RXD0_NORMAL_GROUP_2
) {
521 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
524 if (rxd0
& MT_RXD0_NORMAL_GROUP_3
) {
525 u32 rxdg0
= le32_to_cpu(rxd
[0]);
526 u32 rxdg3
= le32_to_cpu(rxd
[3]);
529 i
= FIELD_GET(MT_RXV1_TX_RATE
, rxdg0
);
530 switch (FIELD_GET(MT_RXV1_TX_MODE
, rxdg0
)) {
531 case MT_PHY_TYPE_CCK
:
534 case MT_PHY_TYPE_OFDM
:
535 i
= mt7603_get_rate(dev
, sband
, i
, cck
);
537 case MT_PHY_TYPE_HT_GF
:
539 status
->encoding
= RX_ENC_HT
;
547 if (rxdg0
& MT_RXV1_HT_SHORT_GI
)
548 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
549 if (rxdg0
& MT_RXV1_HT_AD_CODE
)
550 status
->enc_flags
|= RX_ENC_FLAG_LDPC
;
552 status
->enc_flags
|= RX_ENC_FLAG_STBC_MASK
*
553 FIELD_GET(MT_RXV1_HT_STBC
, rxdg0
);
555 status
->rate_idx
= i
;
557 status
->chains
= dev
->mt76
.antenna_mask
;
558 status
->chain_signal
[0] = FIELD_GET(MT_RXV4_IB_RSSI0
, rxdg3
) +
560 status
->chain_signal
[1] = FIELD_GET(MT_RXV4_IB_RSSI1
, rxdg3
) +
563 status
->signal
= status
->chain_signal
[0];
564 if (status
->chains
& BIT(1))
565 status
->signal
= max(status
->signal
,
566 status
->chain_signal
[1]);
568 if (FIELD_GET(MT_RXV1_FRAME_MODE
, rxdg0
) == 1)
569 status
->bw
= RATE_INFO_BW_40
;
572 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
578 skb_pull(skb
, (u8
*)rxd
- skb
->data
+ 2 * remove_pad
);
580 if (insert_ccmp_hdr
) {
581 u8 key_id
= FIELD_GET(MT_RXD1_NORMAL_KEY_ID
, rxd1
);
583 mt7603_insert_ccmp_hdr(skb
, key_id
);
586 hdr
= (struct ieee80211_hdr
*)skb
->data
;
587 if (!status
->wcid
|| !ieee80211_is_data_qos(hdr
->frame_control
))
590 status
->aggr
= unicast
&&
591 !ieee80211_is_qos_nullfunc(hdr
->frame_control
);
592 status
->tid
= *ieee80211_get_qos_ctl(hdr
) & IEEE80211_QOS_CTL_TID_MASK
;
593 status
->seqno
= hdr
->seq_ctrl
>> 4;
599 mt7603_mac_tx_rate_val(struct mt7603_dev
*dev
,
600 const struct ieee80211_tx_rate
*rate
, bool stbc
, u8
*bw
)
602 u8 phy
, nss
, rate_idx
;
606 if (rate
->flags
& IEEE80211_TX_RC_MCS
) {
607 rate_idx
= rate
->idx
;
608 nss
= 1 + (rate
->idx
>> 3);
609 phy
= MT_PHY_TYPE_HT
;
610 if (rate
->flags
& IEEE80211_TX_RC_GREEN_FIELD
)
611 phy
= MT_PHY_TYPE_HT_GF
;
612 if (rate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
615 const struct ieee80211_rate
*r
;
616 int band
= dev
->mt76
.chandef
.chan
->band
;
620 r
= &mt76_hw(dev
)->wiphy
->bands
[band
]->bitrates
[rate
->idx
];
621 if (rate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
622 val
= r
->hw_value_short
;
627 rate_idx
= val
& 0xff;
630 rateval
= (FIELD_PREP(MT_TX_RATE_IDX
, rate_idx
) |
631 FIELD_PREP(MT_TX_RATE_MODE
, phy
));
633 if (stbc
&& nss
== 1)
634 rateval
|= MT_TX_RATE_STBC
;
639 void mt7603_wtbl_set_rates(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
640 struct ieee80211_tx_rate
*probe_rate
,
641 struct ieee80211_tx_rate
*rates
)
643 int wcid
= sta
->wcid
.idx
;
644 u32 addr
= mt7603_wtbl2_addr(wcid
);
646 int n_rates
= sta
->n_rates
;
647 u8 bw
, bw_prev
, bw_idx
= 0;
650 u32 w9
= mt76_rr(dev
, addr
+ 9 * 4);
653 if (!mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000))
656 for (i
= n_rates
; i
< 4; i
++)
657 rates
[i
] = rates
[n_rates
- 1];
659 w9
&= MT_WTBL2_W9_SHORT_GI_20
| MT_WTBL2_W9_SHORT_GI_40
|
660 MT_WTBL2_W9_SHORT_GI_80
;
662 val
[0] = mt7603_mac_tx_rate_val(dev
, &rates
[0], stbc
, &bw
);
666 probe_val
= mt7603_mac_tx_rate_val(dev
, probe_rate
, stbc
, &bw
);
675 w9
|= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL
, bw
);
676 w9
|= FIELD_PREP(MT_WTBL2_W9_BW_CAP
, bw
);
678 val
[1] = mt7603_mac_tx_rate_val(dev
, &rates
[1], stbc
, &bw
);
684 val
[2] = mt7603_mac_tx_rate_val(dev
, &rates
[2], stbc
, &bw
);
690 val
[3] = mt7603_mac_tx_rate_val(dev
, &rates
[3], stbc
, &bw
);
694 w9
|= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE
,
695 bw_idx
? bw_idx
- 1 : 7);
697 mt76_wr(dev
, MT_WTBL_RIUCR0
, w9
);
699 mt76_wr(dev
, MT_WTBL_RIUCR1
,
700 FIELD_PREP(MT_WTBL_RIUCR1_RATE0
, probe_val
) |
701 FIELD_PREP(MT_WTBL_RIUCR1_RATE1
, val
[0]) |
702 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO
, val
[0]));
704 mt76_wr(dev
, MT_WTBL_RIUCR2
,
705 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI
, val
[0] >> 8) |
706 FIELD_PREP(MT_WTBL_RIUCR2_RATE3
, val
[1]) |
707 FIELD_PREP(MT_WTBL_RIUCR2_RATE4
, val
[1]) |
708 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO
, val
[2]));
710 mt76_wr(dev
, MT_WTBL_RIUCR3
,
711 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI
, val
[2] >> 4) |
712 FIELD_PREP(MT_WTBL_RIUCR3_RATE6
, val
[2]) |
713 FIELD_PREP(MT_WTBL_RIUCR3_RATE7
, val
[3]));
715 mt76_wr(dev
, MT_WTBL_UPDATE
,
716 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX
, wcid
) |
717 MT_WTBL_UPDATE_RATE_UPDATE
|
718 MT_WTBL_UPDATE_TX_COUNT_CLEAR
);
720 if (!sta
->wcid
.tx_rate_set
)
721 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
723 sta
->rate_count
= 2 * MT7603_RATE_RETRY
* n_rates
;
724 sta
->wcid
.tx_rate_set
= true;
727 static enum mt7603_cipher_type
728 mt7603_mac_get_key_info(struct ieee80211_key_conf
*key
, u8
*key_data
)
730 memset(key_data
, 0, 32);
732 return MT_CIPHER_NONE
;
734 if (key
->keylen
> 32)
735 return MT_CIPHER_NONE
;
737 memcpy(key_data
, key
->key
, key
->keylen
);
739 switch (key
->cipher
) {
740 case WLAN_CIPHER_SUITE_WEP40
:
741 return MT_CIPHER_WEP40
;
742 case WLAN_CIPHER_SUITE_WEP104
:
743 return MT_CIPHER_WEP104
;
744 case WLAN_CIPHER_SUITE_TKIP
:
745 /* Rx/Tx MIC keys are swapped */
746 memcpy(key_data
+ 16, key
->key
+ 24, 8);
747 memcpy(key_data
+ 24, key
->key
+ 16, 8);
748 return MT_CIPHER_TKIP
;
749 case WLAN_CIPHER_SUITE_CCMP
:
750 return MT_CIPHER_AES_CCMP
;
752 return MT_CIPHER_NONE
;
756 int mt7603_wtbl_set_key(struct mt7603_dev
*dev
, int wcid
,
757 struct ieee80211_key_conf
*key
)
759 enum mt7603_cipher_type cipher
;
760 u32 addr
= mt7603_wtbl3_addr(wcid
);
762 int key_len
= sizeof(key_data
);
764 cipher
= mt7603_mac_get_key_info(key
, key_data
);
765 if (cipher
== MT_CIPHER_NONE
&& key
)
768 if (key
&& (cipher
== MT_CIPHER_WEP40
|| cipher
== MT_CIPHER_WEP104
)) {
769 addr
+= key
->keyidx
* 16;
773 mt76_wr_copy(dev
, addr
, key_data
, key_len
);
775 addr
= mt7603_wtbl1_addr(wcid
);
776 mt76_rmw_field(dev
, addr
+ 2 * 4, MT_WTBL1_W2_KEY_TYPE
, cipher
);
778 mt76_rmw_field(dev
, addr
, MT_WTBL1_W0_KEY_IDX
, key
->keyidx
);
779 mt76_rmw_field(dev
, addr
, MT_WTBL1_W0_RX_KEY_VALID
, !!key
);
785 mt7603_mac_write_txwi(struct mt7603_dev
*dev
, __le32
*txwi
,
786 struct sk_buff
*skb
, struct mt76_queue
*q
,
787 struct mt76_wcid
*wcid
, struct ieee80211_sta
*sta
,
788 int pid
, struct ieee80211_key_conf
*key
)
790 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
791 struct ieee80211_tx_rate
*rate
= &info
->control
.rates
[0];
792 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
793 struct ieee80211_bar
*bar
= (struct ieee80211_bar
*)skb
->data
;
794 struct ieee80211_vif
*vif
= info
->control
.vif
;
795 struct mt7603_vif
*mvif
;
797 int hdr_len
= ieee80211_get_hdrlen_from_skb(skb
);
799 u8 frame_type
, frame_subtype
;
800 u16 fc
= le16_to_cpu(hdr
->frame_control
);
807 mvif
= (struct mt7603_vif
*)vif
->drv_priv
;
809 if (vif_idx
&& q
>= &dev
->mt76
.q_tx
[MT_TXQ_BEACON
])
814 struct mt7603_sta
*msta
= (struct mt7603_sta
*)sta
->drv_priv
;
816 tx_count
= msta
->rate_count
;
820 wlan_idx
= wcid
->idx
;
822 wlan_idx
= MT7603_WTBL_RESERVED
;
824 frame_type
= (fc
& IEEE80211_FCTL_FTYPE
) >> 2;
825 frame_subtype
= (fc
& IEEE80211_FCTL_STYPE
) >> 4;
827 val
= FIELD_PREP(MT_TXD0_TX_BYTES
, skb
->len
+ MT_TXD_SIZE
) |
828 FIELD_PREP(MT_TXD0_Q_IDX
, q
->hw_idx
);
829 txwi
[0] = cpu_to_le32(val
);
831 val
= MT_TXD1_LONG_FORMAT
|
832 FIELD_PREP(MT_TXD1_OWN_MAC
, vif_idx
) |
833 FIELD_PREP(MT_TXD1_TID
,
834 skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
) |
835 FIELD_PREP(MT_TXD1_HDR_FORMAT
, MT_HDR_FORMAT_802_11
) |
836 FIELD_PREP(MT_TXD1_HDR_INFO
, hdr_len
/ 2) |
837 FIELD_PREP(MT_TXD1_WLAN_IDX
, wlan_idx
) |
838 FIELD_PREP(MT_TXD1_PROTECTED
, !!key
);
839 txwi
[1] = cpu_to_le32(val
);
841 if (info
->flags
& IEEE80211_TX_CTL_NO_ACK
)
842 txwi
[1] |= cpu_to_le32(MT_TXD1_NO_ACK
);
844 val
= FIELD_PREP(MT_TXD2_FRAME_TYPE
, frame_type
) |
845 FIELD_PREP(MT_TXD2_SUB_TYPE
, frame_subtype
) |
846 FIELD_PREP(MT_TXD2_MULTICAST
,
847 is_multicast_ether_addr(hdr
->addr1
));
848 txwi
[2] = cpu_to_le32(val
);
850 if (!(info
->flags
& IEEE80211_TX_CTL_AMPDU
))
851 txwi
[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE
);
855 val
= MT_TXD5_TX_STATUS_HOST
| MT_TXD5_SW_POWER_MGMT
|
856 FIELD_PREP(MT_TXD5_PID
, pid
);
857 txwi
[5] = cpu_to_le32(val
);
861 if (rate
->idx
>= 0 && rate
->count
&&
862 !(info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
)) {
863 bool stbc
= info
->flags
& IEEE80211_TX_CTL_STBC
;
864 u16 rateval
= mt7603_mac_tx_rate_val(dev
, rate
, stbc
, &bw
);
866 txwi
[2] |= cpu_to_le32(MT_TXD2_FIX_RATE
);
868 val
= MT_TXD6_FIXED_BW
|
869 FIELD_PREP(MT_TXD6_BW
, bw
) |
870 FIELD_PREP(MT_TXD6_TX_RATE
, rateval
);
871 txwi
[6] |= cpu_to_le32(val
);
873 if (rate
->flags
& IEEE80211_TX_RC_SHORT_GI
)
874 txwi
[6] |= cpu_to_le32(MT_TXD6_SGI
);
876 if (!(rate
->flags
& IEEE80211_TX_RC_MCS
))
877 txwi
[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE
);
879 tx_count
= rate
->count
;
882 /* use maximum tx count for beacons and buffered multicast */
883 if (q
>= &dev
->mt76
.q_tx
[MT_TXQ_BEACON
])
886 val
= FIELD_PREP(MT_TXD3_REM_TX_COUNT
, tx_count
) |
889 if (ieee80211_is_data_qos(hdr
->frame_control
))
890 seqno
= le16_to_cpu(hdr
->seq_ctrl
);
891 else if (ieee80211_is_back_req(hdr
->frame_control
))
892 seqno
= le16_to_cpu(bar
->start_seq_num
);
894 val
&= ~MT_TXD3_SN_VALID
;
896 val
|= FIELD_PREP(MT_TXD3_SEQ
, seqno
>> 4);
898 txwi
[3] = cpu_to_le32(val
);
901 u64 pn
= atomic64_inc_return(&key
->tx_pn
);
903 txwi
[3] |= cpu_to_le32(MT_TXD3_PN_VALID
);
904 txwi
[4] = cpu_to_le32(pn
& GENMASK(31, 0));
905 txwi
[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH
, pn
>> 32));
913 int mt7603_tx_prepare_skb(struct mt76_dev
*mdev
, void *txwi_ptr
,
914 struct sk_buff
*skb
, struct mt76_queue
*q
,
915 struct mt76_wcid
*wcid
, struct ieee80211_sta
*sta
,
918 struct mt7603_dev
*dev
= container_of(mdev
, struct mt7603_dev
, mt76
);
919 struct mt7603_sta
*msta
= container_of(wcid
, struct mt7603_sta
, wcid
);
920 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
921 struct ieee80211_key_conf
*key
= info
->control
.hw_key
;
925 wcid
= &dev
->global_sta
.wcid
;
928 msta
= (struct mt7603_sta
*)sta
->drv_priv
;
930 if ((info
->flags
& (IEEE80211_TX_CTL_NO_PS_BUFFER
|
931 IEEE80211_TX_CTL_CLEAR_PS_FILT
)) ||
932 (info
->control
.flags
& IEEE80211_TX_CTRL_PS_RESPONSE
))
933 mt7603_wtbl_set_ps(dev
, msta
, false);
936 pid
= mt76_tx_status_skb_add(mdev
, wcid
, skb
);
938 if (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
) {
939 spin_lock_bh(&dev
->mt76
.lock
);
940 msta
->rate_probe
= true;
941 mt7603_wtbl_set_rates(dev
, msta
, &info
->control
.rates
[0],
943 spin_unlock_bh(&dev
->mt76
.lock
);
946 mt7603_mac_write_txwi(dev
, txwi_ptr
, skb
, q
, wcid
, sta
, pid
, key
);
952 mt7603_fill_txs(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
953 struct ieee80211_tx_info
*info
, __le32
*txs_data
)
955 struct ieee80211_supported_band
*sband
;
958 u32 final_rate_flags
;
971 fixed_rate
= info
->status
.rates
[0].count
;
972 probe
= !!(info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
);
974 txs
= le32_to_cpu(txs_data
[4]);
975 final_mpdu
= txs
& MT_TXS4_ACKED_MPDU
;
976 ampdu
= !fixed_rate
&& (txs
& MT_TXS4_AMPDU
);
977 pid
= FIELD_GET(MT_TXS4_PID
, txs
);
978 count
= FIELD_GET(MT_TXS4_TX_COUNT
, txs
);
980 txs
= le32_to_cpu(txs_data
[0]);
981 final_rate
= FIELD_GET(MT_TXS0_TX_RATE
, txs
);
982 ack_timeout
= txs
& MT_TXS0_ACK_TIMEOUT
;
984 if (!ampdu
&& (txs
& MT_TXS0_RTS_TIMEOUT
))
987 if (txs
& MT_TXS0_QUEUE_TIMEOUT
)
991 info
->flags
|= IEEE80211_TX_STAT_ACK
;
993 info
->status
.ampdu_len
= 1;
994 info
->status
.ampdu_ack_len
= !!(info
->flags
&
995 IEEE80211_TX_STAT_ACK
);
997 if (ampdu
|| (info
->flags
& IEEE80211_TX_CTL_AMPDU
))
998 info
->flags
|= IEEE80211_TX_STAT_AMPDU
| IEEE80211_TX_CTL_AMPDU
;
1000 if (fixed_rate
&& !probe
) {
1001 info
->status
.rates
[0].count
= count
;
1005 for (i
= 0, idx
= 0; i
< ARRAY_SIZE(info
->status
.rates
); i
++) {
1006 int cur_count
= min_t(int, count
, 2 * MT7603_RATE_RETRY
);
1011 info
->status
.rates
[i
] = sta
->rates
[idx
];
1015 if (i
&& info
->status
.rates
[i
].idx
< 0) {
1016 info
->status
.rates
[i
- 1].count
+= count
;
1021 info
->status
.rates
[i
].idx
= -1;
1025 info
->status
.rates
[i
].count
= cur_count
;
1031 final_rate_flags
= info
->status
.rates
[final_idx
].flags
;
1033 switch (FIELD_GET(MT_TX_RATE_MODE
, final_rate
)) {
1034 case MT_PHY_TYPE_CCK
:
1037 case MT_PHY_TYPE_OFDM
:
1038 if (dev
->mt76
.chandef
.chan
->band
== NL80211_BAND_5GHZ
)
1039 sband
= &dev
->mt76
.sband_5g
.sband
;
1041 sband
= &dev
->mt76
.sband_2g
.sband
;
1042 final_rate
&= GENMASK(5, 0);
1043 final_rate
= mt7603_get_rate(dev
, sband
, final_rate
, cck
);
1044 final_rate_flags
= 0;
1046 case MT_PHY_TYPE_HT_GF
:
1047 case MT_PHY_TYPE_HT
:
1048 final_rate_flags
|= IEEE80211_TX_RC_MCS
;
1049 final_rate
&= GENMASK(5, 0);
1050 if (final_rate
> 15)
1057 info
->status
.rates
[final_idx
].idx
= final_rate
;
1058 info
->status
.rates
[final_idx
].flags
= final_rate_flags
;
1064 mt7603_mac_add_txs_skb(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
, int pid
,
1067 struct mt76_dev
*mdev
= &dev
->mt76
;
1068 struct sk_buff_head list
;
1069 struct sk_buff
*skb
;
1071 if (pid
< MT_PACKET_ID_FIRST
)
1074 mt76_tx_status_lock(mdev
, &list
);
1075 skb
= mt76_tx_status_skb_get(mdev
, &sta
->wcid
, pid
, &list
);
1077 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1079 if (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
) {
1080 spin_lock_bh(&dev
->mt76
.lock
);
1081 if (sta
->rate_probe
) {
1082 mt7603_wtbl_set_rates(dev
, sta
, NULL
,
1084 sta
->rate_probe
= false;
1086 spin_unlock_bh(&dev
->mt76
.lock
);
1089 if (!mt7603_fill_txs(dev
, sta
, info
, txs_data
)) {
1090 ieee80211_tx_info_clear_status(info
);
1091 info
->status
.rates
[0].idx
= -1;
1094 mt76_tx_status_skb_done(mdev
, skb
, &list
);
1096 mt76_tx_status_unlock(mdev
, &list
);
1101 void mt7603_mac_add_txs(struct mt7603_dev
*dev
, void *data
)
1103 struct ieee80211_tx_info info
= {};
1104 struct ieee80211_sta
*sta
= NULL
;
1105 struct mt7603_sta
*msta
= NULL
;
1106 struct mt76_wcid
*wcid
;
1107 __le32
*txs_data
= data
;
1112 txs
= le32_to_cpu(txs_data
[4]);
1113 pid
= FIELD_GET(MT_TXS4_PID
, txs
);
1114 txs
= le32_to_cpu(txs_data
[3]);
1115 wcidx
= FIELD_GET(MT_TXS3_WCID
, txs
);
1117 if (pid
== MT_PACKET_ID_NO_ACK
)
1120 if (wcidx
>= ARRAY_SIZE(dev
->mt76
.wcid
))
1125 wcid
= rcu_dereference(dev
->mt76
.wcid
[wcidx
]);
1129 msta
= container_of(wcid
, struct mt7603_sta
, wcid
);
1130 sta
= wcid_to_sta(wcid
);
1132 if (mt7603_mac_add_txs_skb(dev
, msta
, pid
, txs_data
))
1135 if (wcidx
>= MT7603_WTBL_STA
|| !sta
)
1138 if (mt7603_fill_txs(dev
, msta
, &info
, txs_data
))
1139 ieee80211_tx_status_noskb(mt76_hw(dev
), sta
, &info
);
1145 void mt7603_tx_complete_skb(struct mt76_dev
*mdev
, struct mt76_queue
*q
,
1146 struct mt76_queue_entry
*e
, bool flush
)
1148 struct mt7603_dev
*dev
= container_of(mdev
, struct mt7603_dev
, mt76
);
1149 struct sk_buff
*skb
= e
->skb
;
1152 dev_kfree_skb_any(skb
);
1156 if (q
- dev
->mt76
.q_tx
< 4)
1157 dev
->tx_hang_check
= 0;
1159 mt76_tx_complete_skb(mdev
, skb
);
1163 wait_for_wpdma(struct mt7603_dev
*dev
)
1165 return mt76_poll(dev
, MT_WPDMA_GLO_CFG
,
1166 MT_WPDMA_GLO_CFG_TX_DMA_BUSY
|
1167 MT_WPDMA_GLO_CFG_RX_DMA_BUSY
,
1171 static void mt7603_pse_reset(struct mt7603_dev
*dev
)
1173 /* Clear previous reset result */
1174 if (!dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
])
1175 mt76_clear(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_PSE_S
);
1178 mt76_set(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_PSE
);
1180 if (!mt76_poll_msec(dev
, MT_MCU_DEBUG_RESET
,
1181 MT_MCU_DEBUG_RESET_PSE_S
,
1182 MT_MCU_DEBUG_RESET_PSE_S
, 500)) {
1183 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
]++;
1184 mt76_clear(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_PSE
);
1186 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] = 0;
1187 mt76_clear(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_QUEUES
);
1190 if (dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] >= 3)
1191 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] = 0;
1194 void mt7603_mac_dma_start(struct mt7603_dev
*dev
)
1196 mt7603_mac_start(dev
);
1198 wait_for_wpdma(dev
);
1199 usleep_range(50, 100);
1201 mt76_set(dev
, MT_WPDMA_GLO_CFG
,
1202 (MT_WPDMA_GLO_CFG_TX_DMA_EN
|
1203 MT_WPDMA_GLO_CFG_RX_DMA_EN
|
1204 FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
, 3) |
1205 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
));
1207 mt7603_irq_enable(dev
, MT_INT_RX_DONE_ALL
| MT_INT_TX_DONE_ALL
);
1210 void mt7603_mac_start(struct mt7603_dev
*dev
)
1212 mt76_clear(dev
, MT_ARB_SCR
,
1213 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
1214 mt76_wr(dev
, MT_WF_ARB_TX_START_0
, ~0);
1215 mt76_set(dev
, MT_WF_ARB_RQCR
, MT_WF_ARB_RQCR_RX_START
);
1218 void mt7603_mac_stop(struct mt7603_dev
*dev
)
1220 mt76_set(dev
, MT_ARB_SCR
,
1221 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
1222 mt76_wr(dev
, MT_WF_ARB_TX_START_0
, 0);
1223 mt76_clear(dev
, MT_WF_ARB_RQCR
, MT_WF_ARB_RQCR_RX_START
);
1226 void mt7603_pse_client_reset(struct mt7603_dev
*dev
)
1230 addr
= mt7603_reg_map(dev
, MT_CLIENT_BASE_PHYS_ADDR
+
1231 MT_CLIENT_RESET_TX
);
1233 /* Clear previous reset state */
1234 mt76_clear(dev
, addr
,
1235 MT_CLIENT_RESET_TX_R_E_1
|
1236 MT_CLIENT_RESET_TX_R_E_2
|
1237 MT_CLIENT_RESET_TX_R_E_1_S
|
1238 MT_CLIENT_RESET_TX_R_E_2_S
);
1240 /* Start PSE client TX abort */
1241 mt76_set(dev
, addr
, MT_CLIENT_RESET_TX_R_E_1
);
1242 mt76_poll_msec(dev
, addr
, MT_CLIENT_RESET_TX_R_E_1_S
,
1243 MT_CLIENT_RESET_TX_R_E_1_S
, 500);
1245 mt76_set(dev
, addr
, MT_CLIENT_RESET_TX_R_E_2
);
1246 mt76_set(dev
, MT_WPDMA_GLO_CFG
, MT_WPDMA_GLO_CFG_SW_RESET
);
1248 /* Wait for PSE client to clear TX FIFO */
1249 mt76_poll_msec(dev
, addr
, MT_CLIENT_RESET_TX_R_E_2_S
,
1250 MT_CLIENT_RESET_TX_R_E_2_S
, 500);
1252 /* Clear PSE client TX abort state */
1253 mt76_clear(dev
, addr
,
1254 MT_CLIENT_RESET_TX_R_E_1
|
1255 MT_CLIENT_RESET_TX_R_E_2
);
1258 static void mt7603_dma_sched_reset(struct mt7603_dev
*dev
)
1260 if (!is_mt7628(dev
))
1263 mt76_set(dev
, MT_SCH_4
, MT_SCH_4_RESET
);
1264 mt76_clear(dev
, MT_SCH_4
, MT_SCH_4_RESET
);
1267 static void mt7603_mac_watchdog_reset(struct mt7603_dev
*dev
)
1269 int beacon_int
= dev
->beacon_int
;
1270 u32 mask
= dev
->mt76
.mmio
.irqmask
;
1273 ieee80211_stop_queues(dev
->mt76
.hw
);
1274 set_bit(MT76_RESET
, &dev
->mt76
.state
);
1276 /* lock/unlock all queues to ensure that no tx is pending */
1277 mt76_txq_schedule_all(&dev
->mt76
);
1279 tasklet_disable(&dev
->tx_tasklet
);
1280 tasklet_disable(&dev
->pre_tbtt_tasklet
);
1281 napi_disable(&dev
->mt76
.napi
[0]);
1282 napi_disable(&dev
->mt76
.napi
[1]);
1284 mutex_lock(&dev
->mt76
.mutex
);
1286 mt7603_beacon_set_timer(dev
, -1, 0);
1288 if (dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] ||
1289 dev
->cur_reset_cause
== RESET_CAUSE_RX_PSE_BUSY
||
1290 dev
->cur_reset_cause
== RESET_CAUSE_BEACON_STUCK
||
1291 dev
->cur_reset_cause
== RESET_CAUSE_TX_HANG
)
1292 mt7603_pse_reset(dev
);
1294 if (dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
])
1295 goto skip_dma_reset
;
1297 mt7603_mac_stop(dev
);
1299 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
1300 MT_WPDMA_GLO_CFG_RX_DMA_EN
| MT_WPDMA_GLO_CFG_TX_DMA_EN
|
1301 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
);
1302 usleep_range(1000, 2000);
1304 mt7603_irq_disable(dev
, mask
);
1306 mt76_set(dev
, MT_WPDMA_GLO_CFG
, MT_WPDMA_GLO_CFG_FORCE_TX_EOF
);
1308 mt7603_pse_client_reset(dev
);
1310 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_tx
); i
++)
1311 mt76_queue_tx_cleanup(dev
, i
, true);
1313 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_rx
); i
++)
1314 mt76_queue_rx_reset(dev
, i
);
1316 mt7603_dma_sched_reset(dev
);
1318 mt7603_mac_dma_start(dev
);
1320 mt7603_irq_enable(dev
, mask
);
1323 clear_bit(MT76_RESET
, &dev
->mt76
.state
);
1324 mutex_unlock(&dev
->mt76
.mutex
);
1326 tasklet_enable(&dev
->tx_tasklet
);
1327 tasklet_schedule(&dev
->tx_tasklet
);
1329 tasklet_enable(&dev
->pre_tbtt_tasklet
);
1330 mt7603_beacon_set_timer(dev
, -1, beacon_int
);
1332 napi_enable(&dev
->mt76
.napi
[0]);
1333 napi_schedule(&dev
->mt76
.napi
[0]);
1335 napi_enable(&dev
->mt76
.napi
[1]);
1336 napi_schedule(&dev
->mt76
.napi
[1]);
1338 ieee80211_wake_queues(dev
->mt76
.hw
);
1339 mt76_txq_schedule_all(&dev
->mt76
);
1342 static u32
mt7603_dma_debug(struct mt7603_dev
*dev
, u8 index
)
1346 mt76_wr(dev
, MT_WPDMA_DEBUG
,
1347 FIELD_PREP(MT_WPDMA_DEBUG_IDX
, index
) |
1348 MT_WPDMA_DEBUG_SEL
);
1350 val
= mt76_rr(dev
, MT_WPDMA_DEBUG
);
1351 return FIELD_GET(MT_WPDMA_DEBUG_VALUE
, val
);
1354 static bool mt7603_rx_fifo_busy(struct mt7603_dev
*dev
)
1357 return mt7603_dma_debug(dev
, 9) & BIT(9);
1359 return mt7603_dma_debug(dev
, 2) & BIT(8);
1362 static bool mt7603_rx_dma_busy(struct mt7603_dev
*dev
)
1364 if (!(mt76_rr(dev
, MT_WPDMA_GLO_CFG
) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY
))
1367 return mt7603_rx_fifo_busy(dev
);
1370 static bool mt7603_tx_dma_busy(struct mt7603_dev
*dev
)
1374 if (!(mt76_rr(dev
, MT_WPDMA_GLO_CFG
) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY
))
1377 val
= mt7603_dma_debug(dev
, 9);
1378 return (val
& BIT(8)) && (val
& 0xf) != 0xf;
1381 static bool mt7603_tx_hang(struct mt7603_dev
*dev
)
1383 struct mt76_queue
*q
;
1384 u32 dma_idx
, prev_dma_idx
;
1387 for (i
= 0; i
< 4; i
++) {
1388 q
= &dev
->mt76
.q_tx
[i
];
1393 prev_dma_idx
= dev
->tx_dma_idx
[i
];
1394 dma_idx
= ioread32(&q
->regs
->dma_idx
);
1395 dev
->tx_dma_idx
[i
] = dma_idx
;
1397 if (dma_idx
== prev_dma_idx
&&
1398 dma_idx
!= ioread32(&q
->regs
->cpu_idx
))
1405 static bool mt7603_rx_pse_busy(struct mt7603_dev
*dev
)
1409 if (mt76_rr(dev
, MT_MCU_DEBUG_RESET
) & MT_MCU_DEBUG_RESET_QUEUES
)
1412 if (mt7603_rx_fifo_busy(dev
))
1415 addr
= mt7603_reg_map(dev
, MT_CLIENT_BASE_PHYS_ADDR
+ MT_CLIENT_STATUS
);
1416 mt76_wr(dev
, addr
, 3);
1417 val
= mt76_rr(dev
, addr
) >> 16;
1419 if (is_mt7628(dev
) && (val
& 0x4001) == 0x4001)
1422 return (val
& 0x8001) == 0x8001 || (val
& 0xe001) == 0xe001;
1426 mt7603_watchdog_check(struct mt7603_dev
*dev
, u8
*counter
,
1427 enum mt7603_reset_cause cause
,
1428 bool (*check
)(struct mt7603_dev
*dev
))
1430 if (dev
->reset_test
== cause
+ 1) {
1431 dev
->reset_test
= 0;
1436 if (!check(dev
) && *counter
< MT7603_WATCHDOG_TIMEOUT
) {
1444 if (*counter
< MT7603_WATCHDOG_TIMEOUT
)
1447 dev
->cur_reset_cause
= cause
;
1448 dev
->reset_cause
[cause
]++;
1452 void mt7603_update_channel(struct mt76_dev
*mdev
)
1454 struct mt7603_dev
*dev
= container_of(mdev
, struct mt7603_dev
, mt76
);
1455 struct mt76_channel_state
*state
;
1459 if (!test_bit(MT76_STATE_RUNNING
, &dev
->mt76
.state
))
1462 state
= mt76_channel_state(&dev
->mt76
, dev
->mt76
.chandef
.chan
);
1463 busy
= mt76_rr(dev
, MT_MIB_STAT_PSCCA
);
1465 spin_lock_bh(&dev
->mt76
.cc_lock
);
1466 cur_time
= ktime_get_boottime();
1467 state
->cc_busy
+= busy
;
1468 state
->cc_active
+= ktime_to_us(ktime_sub(cur_time
, dev
->survey_time
));
1469 dev
->survey_time
= cur_time
;
1470 spin_unlock_bh(&dev
->mt76
.cc_lock
);
1474 mt7603_edcca_set_strict(struct mt7603_dev
*dev
, bool val
)
1476 u32 rxtd_6
= 0xd7c80000;
1478 if (val
== dev
->ed_strict_mode
)
1481 dev
->ed_strict_mode
= val
;
1483 /* Ensure that ED/CCA does not trigger if disabled */
1484 if (!dev
->ed_monitor
)
1485 rxtd_6
|= FIELD_PREP(MT_RXTD_6_CCAED_TH
, 0x34);
1487 rxtd_6
|= FIELD_PREP(MT_RXTD_6_CCAED_TH
, 0x7d);
1489 if (dev
->ed_monitor
&& !dev
->ed_strict_mode
)
1490 rxtd_6
|= FIELD_PREP(MT_RXTD_6_ACI_TH
, 0x0f);
1492 rxtd_6
|= FIELD_PREP(MT_RXTD_6_ACI_TH
, 0x10);
1494 mt76_wr(dev
, MT_RXTD(6), rxtd_6
);
1496 mt76_rmw_field(dev
, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN
,
1497 dev
->ed_monitor
&& !dev
->ed_strict_mode
);
1501 mt7603_edcca_check(struct mt7603_dev
*dev
)
1503 u32 val
= mt76_rr(dev
, MT_AGC(41));
1509 if (!dev
->ed_monitor
)
1512 rssi0
= FIELD_GET(MT_AGC_41_RSSI_0
, val
);
1516 rssi1
= FIELD_GET(MT_AGC_41_RSSI_1
, val
);
1520 if (max(rssi0
, rssi1
) >= -40 &&
1521 dev
->ed_strong_signal
< MT7603_EDCCA_BLOCK_TH
)
1522 dev
->ed_strong_signal
++;
1523 else if (dev
->ed_strong_signal
> 0)
1524 dev
->ed_strong_signal
--;
1526 cur_time
= ktime_get_boottime();
1527 ed_busy
= mt76_rr(dev
, MT_MIB_STAT_ED
) & MT_MIB_STAT_ED_MASK
;
1529 active
= ktime_to_us(ktime_sub(cur_time
, dev
->ed_time
));
1530 dev
->ed_time
= cur_time
;
1535 if (100 * ed_busy
/ active
> 90) {
1536 if (dev
->ed_trigger
< 0)
1537 dev
->ed_trigger
= 0;
1540 if (dev
->ed_trigger
> 0)
1541 dev
->ed_trigger
= 0;
1545 if (dev
->ed_trigger
> MT7603_EDCCA_BLOCK_TH
||
1546 dev
->ed_strong_signal
< MT7603_EDCCA_BLOCK_TH
/ 2) {
1547 mt7603_edcca_set_strict(dev
, true);
1548 } else if (dev
->ed_trigger
< -MT7603_EDCCA_BLOCK_TH
) {
1549 mt7603_edcca_set_strict(dev
, false);
1552 if (dev
->ed_trigger
> MT7603_EDCCA_BLOCK_TH
)
1553 dev
->ed_trigger
= MT7603_EDCCA_BLOCK_TH
;
1554 else if (dev
->ed_trigger
< -MT7603_EDCCA_BLOCK_TH
)
1555 dev
->ed_trigger
= -MT7603_EDCCA_BLOCK_TH
;
1558 void mt7603_cca_stats_reset(struct mt7603_dev
*dev
)
1560 mt76_set(dev
, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET
);
1561 mt76_clear(dev
, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET
);
1562 mt76_set(dev
, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN
);
1566 mt7603_adjust_sensitivity(struct mt7603_dev
*dev
)
1568 u32 agc0
= dev
->agc0
, agc3
= dev
->agc3
;
1571 if (!dev
->sensitivity
|| dev
->sensitivity
< -100) {
1572 dev
->sensitivity
= 0;
1573 } else if (dev
->sensitivity
<= -84) {
1574 adj
= 7 + (dev
->sensitivity
+ 92) / 2;
1580 } else if (dev
->sensitivity
<= -72) {
1581 adj
= 7 + (dev
->sensitivity
+ 80) / 2;
1590 if (dev
->sensitivity
> -54)
1591 dev
->sensitivity
= -54;
1593 adj
= 7 + (dev
->sensitivity
+ 80) / 2;
1604 mt76_wr(dev
, MT_AGC(0), agc0
);
1605 mt76_wr(dev
, MT_AGC1(0), agc0
);
1607 mt76_wr(dev
, MT_AGC(3), agc3
);
1608 mt76_wr(dev
, MT_AGC1(3), agc3
);
1612 mt7603_false_cca_check(struct mt7603_dev
*dev
)
1614 int pd_cck
, pd_ofdm
, mdrdy_cck
, mdrdy_ofdm
;
1619 val
= mt76_rr(dev
, MT_PHYCTRL_STAT_PD
);
1620 pd_cck
= FIELD_GET(MT_PHYCTRL_STAT_PD_CCK
, val
);
1621 pd_ofdm
= FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM
, val
);
1623 val
= mt76_rr(dev
, MT_PHYCTRL_STAT_MDRDY
);
1624 mdrdy_cck
= FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK
, val
);
1625 mdrdy_ofdm
= FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM
, val
);
1627 dev
->false_cca_ofdm
= pd_ofdm
- mdrdy_ofdm
;
1628 dev
->false_cca_cck
= pd_cck
- mdrdy_cck
;
1630 mt7603_cca_stats_reset(dev
);
1632 min_signal
= mt76_get_min_avg_rssi(&dev
->mt76
);
1634 dev
->sensitivity
= 0;
1635 dev
->last_cca_adj
= jiffies
;
1641 false_cca
= dev
->false_cca_ofdm
+ dev
->false_cca_cck
;
1642 if (false_cca
> 600) {
1643 if (!dev
->sensitivity
)
1644 dev
->sensitivity
= -92;
1646 dev
->sensitivity
+= 2;
1647 dev
->last_cca_adj
= jiffies
;
1648 } else if (false_cca
< 100 ||
1649 time_after(jiffies
, dev
->last_cca_adj
+ 10 * HZ
)) {
1650 dev
->last_cca_adj
= jiffies
;
1651 if (!dev
->sensitivity
)
1654 dev
->sensitivity
-= 2;
1657 if (dev
->sensitivity
&& dev
->sensitivity
> min_signal
) {
1658 dev
->sensitivity
= min_signal
;
1659 dev
->last_cca_adj
= jiffies
;
1663 mt7603_adjust_sensitivity(dev
);
1666 void mt7603_mac_work(struct work_struct
*work
)
1668 struct mt7603_dev
*dev
= container_of(work
, struct mt7603_dev
,
1672 mt76_tx_status_check(&dev
->mt76
, NULL
, false);
1674 mutex_lock(&dev
->mt76
.mutex
);
1676 dev
->mac_work_count
++;
1677 mt7603_update_channel(&dev
->mt76
);
1678 mt7603_edcca_check(dev
);
1680 if (dev
->mac_work_count
== 10)
1681 mt7603_false_cca_check(dev
);
1683 if (mt7603_watchdog_check(dev
, &dev
->rx_pse_check
,
1684 RESET_CAUSE_RX_PSE_BUSY
,
1685 mt7603_rx_pse_busy
) ||
1686 mt7603_watchdog_check(dev
, &dev
->beacon_check
,
1687 RESET_CAUSE_BEACON_STUCK
,
1689 mt7603_watchdog_check(dev
, &dev
->tx_hang_check
,
1690 RESET_CAUSE_TX_HANG
,
1692 mt7603_watchdog_check(dev
, &dev
->tx_dma_check
,
1693 RESET_CAUSE_TX_BUSY
,
1694 mt7603_tx_dma_busy
) ||
1695 mt7603_watchdog_check(dev
, &dev
->rx_dma_check
,
1696 RESET_CAUSE_RX_BUSY
,
1697 mt7603_rx_dma_busy
) ||
1698 mt7603_watchdog_check(dev
, &dev
->mcu_hang
,
1699 RESET_CAUSE_MCU_HANG
,
1701 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
]) {
1702 dev
->beacon_check
= 0;
1703 dev
->tx_dma_check
= 0;
1704 dev
->tx_hang_check
= 0;
1705 dev
->rx_dma_check
= 0;
1706 dev
->rx_pse_check
= 0;
1708 dev
->rx_dma_idx
= ~0;
1709 memset(dev
->tx_dma_idx
, 0xff, sizeof(dev
->tx_dma_idx
));
1711 dev
->mac_work_count
= 0;
1714 if (dev
->mac_work_count
>= 10)
1715 dev
->mac_work_count
= 0;
1717 mutex_unlock(&dev
->mt76
.mutex
);
1720 mt7603_mac_watchdog_reset(dev
);
1722 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->mac_work
,
1723 msecs_to_jiffies(MT7603_WATCHDOG_TIME
));