]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/wireless/mediatek/mt76/mt7603/mac.c
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/linux.git] / drivers / net / wireless / mediatek / mt76 / mt7603 / mac.c
1 /* SPDX-License-Identifier: ISC */
2
3 #include <linux/etherdevice.h>
4 #include <linux/timekeeping.h>
5 #include "mt7603.h"
6 #include "mac.h"
7
8 #define MT_PSE_PAGE_SIZE 128
9
10 static u32
11 mt7603_ac_queue_mask0(u32 mask)
12 {
13 u32 ret = 0;
14
15 ret |= GENMASK(3, 0) * !!(mask & BIT(0));
16 ret |= GENMASK(8, 5) * !!(mask & BIT(1));
17 ret |= GENMASK(13, 10) * !!(mask & BIT(2));
18 ret |= GENMASK(19, 16) * !!(mask & BIT(3));
19 return ret;
20 }
21
22 static void
23 mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask)
24 {
25 mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask));
26 }
27
28 static void
29 mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask)
30 {
31 mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask));
32 }
33
34 void mt7603_mac_set_timing(struct mt7603_dev *dev)
35 {
36 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
37 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
38 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
39 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24);
40 int offset = 3 * dev->coverage_class;
41 u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
42 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
43 int sifs;
44 u32 val;
45
46 if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
47 sifs = 16;
48 else
49 sifs = 10;
50
51 mt76_set(dev, MT_ARB_SCR,
52 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
53 udelay(1);
54
55 mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset);
56 mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset);
57 mt76_wr(dev, MT_IFS,
58 FIELD_PREP(MT_IFS_EIFS, 360) |
59 FIELD_PREP(MT_IFS_RIFS, 2) |
60 FIELD_PREP(MT_IFS_SIFS, sifs) |
61 FIELD_PREP(MT_IFS_SLOT, dev->slottime));
62
63 if (dev->slottime < 20)
64 val = MT7603_CFEND_RATE_DEFAULT;
65 else
66 val = MT7603_CFEND_RATE_11B;
67
68 mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val);
69
70 mt76_clear(dev, MT_ARB_SCR,
71 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
72 }
73
74 static void
75 mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask)
76 {
77 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
78 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
79
80 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
81 }
82
83 static u32
84 mt7603_wtbl1_addr(int idx)
85 {
86 return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
87 }
88
89 static u32
90 mt7603_wtbl2_addr(int idx)
91 {
92 /* Mapped to WTBL2 */
93 return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE;
94 }
95
96 static u32
97 mt7603_wtbl3_addr(int idx)
98 {
99 u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE);
100
101 return base + idx * MT_WTBL3_SIZE;
102 }
103
104 static u32
105 mt7603_wtbl4_addr(int idx)
106 {
107 u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE);
108
109 return base + idx * MT_WTBL4_SIZE;
110 }
111
112 void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
113 const u8 *mac_addr)
114 {
115 const void *_mac = mac_addr;
116 u32 addr = mt7603_wtbl1_addr(idx);
117 u32 w0 = 0, w1 = 0;
118 int i;
119
120 if (_mac) {
121 w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI,
122 get_unaligned_le16(_mac + 4));
123 w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO,
124 get_unaligned_le32(_mac));
125 }
126
127 if (vif < 0)
128 vif = 0;
129 else
130 w0 |= MT_WTBL1_W0_RX_CHECK_A1;
131 w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif);
132
133 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
134
135 mt76_set(dev, addr + 0 * 4, w0);
136 mt76_set(dev, addr + 1 * 4, w1);
137 mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL);
138
139 mt76_stop_tx_ac(dev, GENMASK(3, 0));
140 addr = mt7603_wtbl2_addr(idx);
141 for (i = 0; i < MT_WTBL2_SIZE; i += 4)
142 mt76_wr(dev, addr + i, 0);
143 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
144 mt76_start_tx_ac(dev, GENMASK(3, 0));
145
146 addr = mt7603_wtbl3_addr(idx);
147 for (i = 0; i < MT_WTBL3_SIZE; i += 4)
148 mt76_wr(dev, addr + i, 0);
149
150 addr = mt7603_wtbl4_addr(idx);
151 for (i = 0; i < MT_WTBL4_SIZE; i += 4)
152 mt76_wr(dev, addr + i, 0);
153 }
154
155 static void
156 mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled)
157 {
158 u32 addr = mt7603_wtbl1_addr(idx);
159 u32 val = mt76_rr(dev, addr + 3 * 4);
160
161 val &= ~MT_WTBL1_W3_SKIP_TX;
162 val |= enabled * MT_WTBL1_W3_SKIP_TX;
163
164 mt76_wr(dev, addr + 3 * 4, val);
165 }
166
167 void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort)
168 {
169 int i, port, queue;
170
171 if (abort) {
172 port = 3; /* PSE */
173 queue = 8; /* free queue */
174 } else {
175 port = 0; /* HIF */
176 queue = 1; /* MCU queue */
177 }
178
179 mt7603_wtbl_set_skip_tx(dev, idx, true);
180
181 mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN |
182 FIELD_PREP(MT_TX_ABORT_WCID, idx));
183
184 for (i = 0; i < 4; i++) {
185 mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
186 FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) |
187 FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) |
188 FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) |
189 FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue));
190
191 WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY,
192 0, 5000));
193 }
194
195 mt76_wr(dev, MT_TX_ABORT, 0);
196
197 mt7603_wtbl_set_skip_tx(dev, idx, false);
198 }
199
200 void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
201 bool enabled)
202 {
203 u32 addr = mt7603_wtbl1_addr(sta->wcid.idx);
204
205 if (sta->smps == enabled)
206 return;
207
208 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled);
209 sta->smps = enabled;
210 }
211
212 void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta,
213 bool enabled)
214 {
215 int idx = sta->wcid.idx;
216 u32 addr;
217
218 spin_lock_bh(&dev->ps_lock);
219
220 if (sta->ps == enabled)
221 goto out;
222
223 mt76_wr(dev, MT_PSE_RTA,
224 FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) |
225 FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) |
226 FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) |
227 FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) |
228 MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY);
229
230 mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000);
231
232 if (enabled)
233 mt7603_filter_tx(dev, idx, false);
234
235 addr = mt7603_wtbl1_addr(idx);
236 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
237 mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE,
238 enabled * MT_WTBL1_W3_POWER_SAVE);
239 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
240 sta->ps = enabled;
241
242 out:
243 spin_unlock_bh(&dev->ps_lock);
244 }
245
246 void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx)
247 {
248 int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE;
249 int wtbl2_frame = idx / wtbl2_frame_size;
250 int wtbl2_entry = idx % wtbl2_frame_size;
251
252 int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE;
253 int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE;
254 int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size;
255 int wtbl3_entry = (idx % wtbl3_frame_size) * 2;
256
257 int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE;
258 int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE;
259 int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size;
260 int wtbl4_entry = idx % wtbl4_frame_size;
261
262 u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
263 int i;
264
265 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
266
267 mt76_wr(dev, addr + 0 * 4,
268 MT_WTBL1_W0_RX_CHECK_A1 |
269 MT_WTBL1_W0_RX_CHECK_A2 |
270 MT_WTBL1_W0_RX_VALID);
271 mt76_wr(dev, addr + 1 * 4, 0);
272 mt76_wr(dev, addr + 2 * 4, 0);
273
274 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
275
276 mt76_wr(dev, addr + 3 * 4,
277 FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) |
278 FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) |
279 FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) |
280 MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM);
281 mt76_wr(dev, addr + 4 * 4,
282 FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) |
283 FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) |
284 FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry));
285
286 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
287
288 addr = mt7603_wtbl2_addr(idx);
289
290 /* Clear BA information */
291 mt76_wr(dev, addr + (15 * 4), 0);
292
293 mt76_stop_tx_ac(dev, GENMASK(3, 0));
294 for (i = 2; i <= 4; i++)
295 mt76_wr(dev, addr + (i * 4), 0);
296 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
297 mt76_start_tx_ac(dev, GENMASK(3, 0));
298
299 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR);
300 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR);
301 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
302 }
303
304 void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
305 {
306 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
307 int idx = msta->wcid.idx;
308 u32 addr;
309 u32 val;
310
311 addr = mt7603_wtbl1_addr(idx);
312
313 val = mt76_rr(dev, addr + 2 * 4);
314 val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL;
315 val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) |
316 FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) |
317 MT_WTBL1_W2_TXS_BAF_REPORT;
318
319 if (sta->ht_cap.cap)
320 val |= MT_WTBL1_W2_HT;
321 if (sta->vht_cap.cap)
322 val |= MT_WTBL1_W2_VHT;
323
324 mt76_wr(dev, addr + 2 * 4, val);
325
326 addr = mt7603_wtbl2_addr(idx);
327 val = mt76_rr(dev, addr + 9 * 4);
328 val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
329 MT_WTBL2_W9_SHORT_GI_80);
330 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
331 val |= MT_WTBL2_W9_SHORT_GI_20;
332 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
333 val |= MT_WTBL2_W9_SHORT_GI_40;
334 mt76_wr(dev, addr + 9 * 4, val);
335 }
336
337 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
338 {
339 mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr));
340 mt76_wr(dev, MT_BA_CONTROL_1,
341 (get_unaligned_le16(addr + 4) |
342 FIELD_PREP(MT_BA_CONTROL_1_TID, tid) |
343 MT_BA_CONTROL_1_RESET));
344 }
345
346 void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
347 int ba_size)
348 {
349 u32 addr = mt7603_wtbl2_addr(wcid);
350 u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
351 (MT_WTBL2_W15_BA_WIN_SIZE <<
352 (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT));
353 u32 tid_val;
354 int i;
355
356 if (ba_size < 0) {
357 /* disable */
358 mt76_clear(dev, addr + (15 * 4), tid_mask);
359 return;
360 }
361
362 for (i = 7; i > 0; i--) {
363 if (ba_size >= MT_AGG_SIZE_LIMIT(i))
364 break;
365 }
366
367 tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
368 i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT);
369
370 mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val);
371 }
372
373 static int
374 mt7603_get_rate(struct mt7603_dev *dev, struct ieee80211_supported_band *sband,
375 int idx, bool cck)
376 {
377 int offset = 0;
378 int len = sband->n_bitrates;
379 int i;
380
381 if (cck) {
382 if (sband == &dev->mt76.sband_5g.sband)
383 return 0;
384
385 idx &= ~BIT(2); /* short preamble */
386 } else if (sband == &dev->mt76.sband_2g.sband) {
387 offset = 4;
388 }
389
390 for (i = offset; i < len; i++) {
391 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
392 return i;
393 }
394
395 return 0;
396 }
397
398 static struct mt76_wcid *
399 mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
400 {
401 struct mt7603_sta *sta;
402 struct mt76_wcid *wcid;
403
404 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
405 return NULL;
406
407 wcid = rcu_dereference(dev->mt76.wcid[idx]);
408 if (unicast || !wcid)
409 return wcid;
410
411 if (!wcid->sta)
412 return NULL;
413
414 sta = container_of(wcid, struct mt7603_sta, wcid);
415 if (!sta->vif)
416 return NULL;
417
418 return &sta->vif->sta.wcid;
419 }
420
421 static void
422 mt7603_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
423 {
424 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
425 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
426 u8 *pn = status->iv;
427 u8 *hdr;
428
429 __skb_push(skb, 8);
430 memmove(skb->data, skb->data + 8, hdr_len);
431 hdr = skb->data + hdr_len;
432
433 hdr[0] = pn[5];
434 hdr[1] = pn[4];
435 hdr[2] = 0;
436 hdr[3] = 0x20 | (key_id << 6);
437 hdr[4] = pn[3];
438 hdr[5] = pn[2];
439 hdr[6] = pn[1];
440 hdr[7] = pn[0];
441
442 status->flag &= ~RX_FLAG_IV_STRIPPED;
443 }
444
445 int
446 mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
447 {
448 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
449 struct ieee80211_supported_band *sband;
450 struct ieee80211_hdr *hdr;
451 __le32 *rxd = (__le32 *)skb->data;
452 u32 rxd0 = le32_to_cpu(rxd[0]);
453 u32 rxd1 = le32_to_cpu(rxd[1]);
454 u32 rxd2 = le32_to_cpu(rxd[2]);
455 bool unicast = rxd1 & MT_RXD1_NORMAL_U2M;
456 bool insert_ccmp_hdr = false;
457 bool remove_pad;
458 int idx;
459 int i;
460
461 memset(status, 0, sizeof(*status));
462
463 i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
464 sband = (i & 1) ? &dev->mt76.sband_5g.sband : &dev->mt76.sband_2g.sband;
465 i >>= 1;
466
467 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
468 status->wcid = mt7603_rx_get_wcid(dev, idx, unicast);
469
470 status->band = sband->band;
471 if (i < sband->n_channels)
472 status->freq = sband->channels[i].center_freq;
473
474 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
475 status->flag |= RX_FLAG_FAILED_FCS_CRC;
476
477 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
478 status->flag |= RX_FLAG_MMIC_ERROR;
479
480 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
481 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
482 status->flag |= RX_FLAG_DECRYPTED;
483 status->flag |= RX_FLAG_IV_STRIPPED;
484 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
485 }
486
487 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
488
489 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
490 return -EINVAL;
491
492 if (!sband->channels)
493 return -EINVAL;
494
495 rxd += 4;
496 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
497 rxd += 4;
498 if ((u8 *)rxd - skb->data >= skb->len)
499 return -EINVAL;
500 }
501 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
502 u8 *data = (u8 *)rxd;
503
504 if (status->flag & RX_FLAG_DECRYPTED) {
505 status->iv[0] = data[5];
506 status->iv[1] = data[4];
507 status->iv[2] = data[3];
508 status->iv[3] = data[2];
509 status->iv[4] = data[1];
510 status->iv[5] = data[0];
511
512 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
513 }
514
515 rxd += 4;
516 if ((u8 *)rxd - skb->data >= skb->len)
517 return -EINVAL;
518 }
519 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
520 rxd += 2;
521 if ((u8 *)rxd - skb->data >= skb->len)
522 return -EINVAL;
523 }
524 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
525 u32 rxdg0 = le32_to_cpu(rxd[0]);
526 u32 rxdg3 = le32_to_cpu(rxd[3]);
527 bool cck = false;
528
529 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
530 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
531 case MT_PHY_TYPE_CCK:
532 cck = true;
533 /* fall through */
534 case MT_PHY_TYPE_OFDM:
535 i = mt7603_get_rate(dev, sband, i, cck);
536 break;
537 case MT_PHY_TYPE_HT_GF:
538 case MT_PHY_TYPE_HT:
539 status->encoding = RX_ENC_HT;
540 if (i > 15)
541 return -EINVAL;
542 break;
543 default:
544 return -EINVAL;
545 }
546
547 if (rxdg0 & MT_RXV1_HT_SHORT_GI)
548 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
549 if (rxdg0 & MT_RXV1_HT_AD_CODE)
550 status->enc_flags |= RX_ENC_FLAG_LDPC;
551
552 status->enc_flags |= RX_ENC_FLAG_STBC_MASK *
553 FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
554
555 status->rate_idx = i;
556
557 status->chains = dev->mt76.antenna_mask;
558 status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) +
559 dev->rssi_offset[0];
560 status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
561 dev->rssi_offset[1];
562
563 status->signal = status->chain_signal[0];
564 if (status->chains & BIT(1))
565 status->signal = max(status->signal,
566 status->chain_signal[1]);
567
568 if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1)
569 status->bw = RATE_INFO_BW_40;
570
571 rxd += 6;
572 if ((u8 *)rxd - skb->data >= skb->len)
573 return -EINVAL;
574 } else {
575 return -EINVAL;
576 }
577
578 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
579
580 if (insert_ccmp_hdr) {
581 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
582
583 mt7603_insert_ccmp_hdr(skb, key_id);
584 }
585
586 hdr = (struct ieee80211_hdr *)skb->data;
587 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
588 return 0;
589
590 status->aggr = unicast &&
591 !ieee80211_is_qos_nullfunc(hdr->frame_control);
592 status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
593 status->seqno = hdr->seq_ctrl >> 4;
594
595 return 0;
596 }
597
598 static u16
599 mt7603_mac_tx_rate_val(struct mt7603_dev *dev,
600 const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw)
601 {
602 u8 phy, nss, rate_idx;
603 u16 rateval;
604
605 *bw = 0;
606 if (rate->flags & IEEE80211_TX_RC_MCS) {
607 rate_idx = rate->idx;
608 nss = 1 + (rate->idx >> 3);
609 phy = MT_PHY_TYPE_HT;
610 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
611 phy = MT_PHY_TYPE_HT_GF;
612 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
613 *bw = 1;
614 } else {
615 const struct ieee80211_rate *r;
616 int band = dev->mt76.chandef.chan->band;
617 u16 val;
618
619 nss = 1;
620 r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
621 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
622 val = r->hw_value_short;
623 else
624 val = r->hw_value;
625
626 phy = val >> 8;
627 rate_idx = val & 0xff;
628 }
629
630 rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
631 FIELD_PREP(MT_TX_RATE_MODE, phy));
632
633 if (stbc && nss == 1)
634 rateval |= MT_TX_RATE_STBC;
635
636 return rateval;
637 }
638
639 void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
640 struct ieee80211_tx_rate *probe_rate,
641 struct ieee80211_tx_rate *rates)
642 {
643 int wcid = sta->wcid.idx;
644 u32 addr = mt7603_wtbl2_addr(wcid);
645 bool stbc = false;
646 int n_rates = sta->n_rates;
647 u8 bw, bw_prev, bw_idx = 0;
648 u16 val[4];
649 u16 probe_val;
650 u32 w9 = mt76_rr(dev, addr + 9 * 4);
651 int i;
652
653 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
654 return;
655
656 for (i = n_rates; i < 4; i++)
657 rates[i] = rates[n_rates - 1];
658
659 w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
660 MT_WTBL2_W9_SHORT_GI_80;
661
662 val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
663 bw_prev = bw;
664
665 if (probe_rate) {
666 probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
667 if (bw)
668 bw_idx = 1;
669 else
670 bw_prev = 0;
671 } else {
672 probe_val = val[0];
673 }
674
675 w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw);
676 w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw);
677
678 val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
679 if (bw_prev) {
680 bw_idx = 3;
681 bw_prev = bw;
682 }
683
684 val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
685 if (bw_prev) {
686 bw_idx = 5;
687 bw_prev = bw;
688 }
689
690 val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
691 if (bw_prev)
692 bw_idx = 7;
693
694 w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE,
695 bw_idx ? bw_idx - 1 : 7);
696
697 mt76_wr(dev, MT_WTBL_RIUCR0, w9);
698
699 mt76_wr(dev, MT_WTBL_RIUCR1,
700 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
701 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
702 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[0]));
703
704 mt76_wr(dev, MT_WTBL_RIUCR2,
705 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[0] >> 8) |
706 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
707 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[1]) |
708 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
709
710 mt76_wr(dev, MT_WTBL_RIUCR3,
711 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
712 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[2]) |
713 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
714
715 mt76_wr(dev, MT_WTBL_UPDATE,
716 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
717 MT_WTBL_UPDATE_RATE_UPDATE |
718 MT_WTBL_UPDATE_TX_COUNT_CLEAR);
719
720 if (!sta->wcid.tx_rate_set)
721 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
722
723 sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates;
724 sta->wcid.tx_rate_set = true;
725 }
726
727 static enum mt7603_cipher_type
728 mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
729 {
730 memset(key_data, 0, 32);
731 if (!key)
732 return MT_CIPHER_NONE;
733
734 if (key->keylen > 32)
735 return MT_CIPHER_NONE;
736
737 memcpy(key_data, key->key, key->keylen);
738
739 switch (key->cipher) {
740 case WLAN_CIPHER_SUITE_WEP40:
741 return MT_CIPHER_WEP40;
742 case WLAN_CIPHER_SUITE_WEP104:
743 return MT_CIPHER_WEP104;
744 case WLAN_CIPHER_SUITE_TKIP:
745 /* Rx/Tx MIC keys are swapped */
746 memcpy(key_data + 16, key->key + 24, 8);
747 memcpy(key_data + 24, key->key + 16, 8);
748 return MT_CIPHER_TKIP;
749 case WLAN_CIPHER_SUITE_CCMP:
750 return MT_CIPHER_AES_CCMP;
751 default:
752 return MT_CIPHER_NONE;
753 }
754 }
755
756 int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
757 struct ieee80211_key_conf *key)
758 {
759 enum mt7603_cipher_type cipher;
760 u32 addr = mt7603_wtbl3_addr(wcid);
761 u8 key_data[32];
762 int key_len = sizeof(key_data);
763
764 cipher = mt7603_mac_get_key_info(key, key_data);
765 if (cipher == MT_CIPHER_NONE && key)
766 return -EOPNOTSUPP;
767
768 if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) {
769 addr += key->keyidx * 16;
770 key_len = 16;
771 }
772
773 mt76_wr_copy(dev, addr, key_data, key_len);
774
775 addr = mt7603_wtbl1_addr(wcid);
776 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher);
777 if (key)
778 mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx);
779 mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key);
780
781 return 0;
782 }
783
784 static int
785 mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
786 struct sk_buff *skb, struct mt76_queue *q,
787 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
788 int pid, struct ieee80211_key_conf *key)
789 {
790 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
791 struct ieee80211_tx_rate *rate = &info->control.rates[0];
792 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
793 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
794 struct ieee80211_vif *vif = info->control.vif;
795 struct mt7603_vif *mvif;
796 int wlan_idx;
797 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
798 int tx_count = 8;
799 u8 frame_type, frame_subtype;
800 u16 fc = le16_to_cpu(hdr->frame_control);
801 u16 seqno = 0;
802 u8 vif_idx = 0;
803 u32 val;
804 u8 bw;
805
806 if (vif) {
807 mvif = (struct mt7603_vif *)vif->drv_priv;
808 vif_idx = mvif->idx;
809 if (vif_idx && q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
810 vif_idx += 0x10;
811 }
812
813 if (sta) {
814 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
815
816 tx_count = msta->rate_count;
817 }
818
819 if (wcid)
820 wlan_idx = wcid->idx;
821 else
822 wlan_idx = MT7603_WTBL_RESERVED;
823
824 frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2;
825 frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4;
826
827 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
828 FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx);
829 txwi[0] = cpu_to_le32(val);
830
831 val = MT_TXD1_LONG_FORMAT |
832 FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) |
833 FIELD_PREP(MT_TXD1_TID,
834 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
835 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
836 FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) |
837 FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) |
838 FIELD_PREP(MT_TXD1_PROTECTED, !!key);
839 txwi[1] = cpu_to_le32(val);
840
841 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
842 txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK);
843
844 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) |
845 FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) |
846 FIELD_PREP(MT_TXD2_MULTICAST,
847 is_multicast_ether_addr(hdr->addr1));
848 txwi[2] = cpu_to_le32(val);
849
850 if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
851 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
852
853 txwi[4] = 0;
854
855 val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
856 FIELD_PREP(MT_TXD5_PID, pid);
857 txwi[5] = cpu_to_le32(val);
858
859 txwi[6] = 0;
860
861 if (rate->idx >= 0 && rate->count &&
862 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
863 bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
864 u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw);
865
866 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
867
868 val = MT_TXD6_FIXED_BW |
869 FIELD_PREP(MT_TXD6_BW, bw) |
870 FIELD_PREP(MT_TXD6_TX_RATE, rateval);
871 txwi[6] |= cpu_to_le32(val);
872
873 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
874 txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
875
876 if (!(rate->flags & IEEE80211_TX_RC_MCS))
877 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
878
879 tx_count = rate->count;
880 }
881
882 /* use maximum tx count for beacons and buffered multicast */
883 if (q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
884 tx_count = 0x1f;
885
886 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
887 MT_TXD3_SN_VALID;
888
889 if (ieee80211_is_data_qos(hdr->frame_control))
890 seqno = le16_to_cpu(hdr->seq_ctrl);
891 else if (ieee80211_is_back_req(hdr->frame_control))
892 seqno = le16_to_cpu(bar->start_seq_num);
893 else
894 val &= ~MT_TXD3_SN_VALID;
895
896 val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
897
898 txwi[3] = cpu_to_le32(val);
899
900 if (key) {
901 u64 pn = atomic64_inc_return(&key->tx_pn);
902
903 txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID);
904 txwi[4] = cpu_to_le32(pn & GENMASK(31, 0));
905 txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32));
906 }
907
908 txwi[7] = 0;
909
910 return 0;
911 }
912
913 int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
914 struct sk_buff *skb, struct mt76_queue *q,
915 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
916 u32 *tx_info)
917 {
918 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
919 struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
920 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
921 struct ieee80211_key_conf *key = info->control.hw_key;
922 int pid;
923
924 if (!wcid)
925 wcid = &dev->global_sta.wcid;
926
927 if (sta) {
928 msta = (struct mt7603_sta *)sta->drv_priv;
929
930 if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
931 IEEE80211_TX_CTL_CLEAR_PS_FILT)) ||
932 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
933 mt7603_wtbl_set_ps(dev, msta, false);
934 }
935
936 pid = mt76_tx_status_skb_add(mdev, wcid, skb);
937
938 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
939 spin_lock_bh(&dev->mt76.lock);
940 msta->rate_probe = true;
941 mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0],
942 msta->rates);
943 spin_unlock_bh(&dev->mt76.lock);
944 }
945
946 mt7603_mac_write_txwi(dev, txwi_ptr, skb, q, wcid, sta, pid, key);
947
948 return 0;
949 }
950
951 static bool
952 mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
953 struct ieee80211_tx_info *info, __le32 *txs_data)
954 {
955 struct ieee80211_supported_band *sband;
956 int final_idx = 0;
957 u32 final_rate;
958 u32 final_rate_flags;
959 bool final_mpdu;
960 bool ack_timeout;
961 bool fixed_rate;
962 bool probe;
963 bool ampdu;
964 bool cck = false;
965 int count;
966 u32 txs;
967 u8 pid;
968 int idx;
969 int i;
970
971 fixed_rate = info->status.rates[0].count;
972 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
973
974 txs = le32_to_cpu(txs_data[4]);
975 final_mpdu = txs & MT_TXS4_ACKED_MPDU;
976 ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU);
977 pid = FIELD_GET(MT_TXS4_PID, txs);
978 count = FIELD_GET(MT_TXS4_TX_COUNT, txs);
979
980 txs = le32_to_cpu(txs_data[0]);
981 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
982 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
983
984 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
985 return false;
986
987 if (txs & MT_TXS0_QUEUE_TIMEOUT)
988 return false;
989
990 if (!ack_timeout)
991 info->flags |= IEEE80211_TX_STAT_ACK;
992
993 info->status.ampdu_len = 1;
994 info->status.ampdu_ack_len = !!(info->flags &
995 IEEE80211_TX_STAT_ACK);
996
997 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
998 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
999
1000 if (fixed_rate && !probe) {
1001 info->status.rates[0].count = count;
1002 goto out;
1003 }
1004
1005 for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) {
1006 int cur_count = min_t(int, count, 2 * MT7603_RATE_RETRY);
1007
1008 if (!i && probe) {
1009 cur_count = 1;
1010 } else {
1011 info->status.rates[i] = sta->rates[idx];
1012 idx++;
1013 }
1014
1015 if (i && info->status.rates[i].idx < 0) {
1016 info->status.rates[i - 1].count += count;
1017 break;
1018 }
1019
1020 if (!count) {
1021 info->status.rates[i].idx = -1;
1022 break;
1023 }
1024
1025 info->status.rates[i].count = cur_count;
1026 final_idx = i;
1027 count -= cur_count;
1028 }
1029
1030 out:
1031 final_rate_flags = info->status.rates[final_idx].flags;
1032
1033 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
1034 case MT_PHY_TYPE_CCK:
1035 cck = true;
1036 /* fall through */
1037 case MT_PHY_TYPE_OFDM:
1038 if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
1039 sband = &dev->mt76.sband_5g.sband;
1040 else
1041 sband = &dev->mt76.sband_2g.sband;
1042 final_rate &= GENMASK(5, 0);
1043 final_rate = mt7603_get_rate(dev, sband, final_rate, cck);
1044 final_rate_flags = 0;
1045 break;
1046 case MT_PHY_TYPE_HT_GF:
1047 case MT_PHY_TYPE_HT:
1048 final_rate_flags |= IEEE80211_TX_RC_MCS;
1049 final_rate &= GENMASK(5, 0);
1050 if (final_rate > 15)
1051 return false;
1052 break;
1053 default:
1054 return false;
1055 }
1056
1057 info->status.rates[final_idx].idx = final_rate;
1058 info->status.rates[final_idx].flags = final_rate_flags;
1059
1060 return true;
1061 }
1062
1063 static bool
1064 mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
1065 __le32 *txs_data)
1066 {
1067 struct mt76_dev *mdev = &dev->mt76;
1068 struct sk_buff_head list;
1069 struct sk_buff *skb;
1070
1071 if (pid < MT_PACKET_ID_FIRST)
1072 return false;
1073
1074 mt76_tx_status_lock(mdev, &list);
1075 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
1076 if (skb) {
1077 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1078
1079 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
1080 spin_lock_bh(&dev->mt76.lock);
1081 if (sta->rate_probe) {
1082 mt7603_wtbl_set_rates(dev, sta, NULL,
1083 sta->rates);
1084 sta->rate_probe = false;
1085 }
1086 spin_unlock_bh(&dev->mt76.lock);
1087 }
1088
1089 if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
1090 ieee80211_tx_info_clear_status(info);
1091 info->status.rates[0].idx = -1;
1092 }
1093
1094 mt76_tx_status_skb_done(mdev, skb, &list);
1095 }
1096 mt76_tx_status_unlock(mdev, &list);
1097
1098 return !!skb;
1099 }
1100
1101 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
1102 {
1103 struct ieee80211_tx_info info = {};
1104 struct ieee80211_sta *sta = NULL;
1105 struct mt7603_sta *msta = NULL;
1106 struct mt76_wcid *wcid;
1107 __le32 *txs_data = data;
1108 u32 txs;
1109 u8 wcidx;
1110 u8 pid;
1111
1112 txs = le32_to_cpu(txs_data[4]);
1113 pid = FIELD_GET(MT_TXS4_PID, txs);
1114 txs = le32_to_cpu(txs_data[3]);
1115 wcidx = FIELD_GET(MT_TXS3_WCID, txs);
1116
1117 if (pid == MT_PACKET_ID_NO_ACK)
1118 return;
1119
1120 if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
1121 return;
1122
1123 rcu_read_lock();
1124
1125 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1126 if (!wcid)
1127 goto out;
1128
1129 msta = container_of(wcid, struct mt7603_sta, wcid);
1130 sta = wcid_to_sta(wcid);
1131
1132 if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data))
1133 goto out;
1134
1135 if (wcidx >= MT7603_WTBL_STA || !sta)
1136 goto out;
1137
1138 if (mt7603_fill_txs(dev, msta, &info, txs_data))
1139 ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
1140
1141 out:
1142 rcu_read_unlock();
1143 }
1144
1145 void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
1146 struct mt76_queue_entry *e, bool flush)
1147 {
1148 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
1149 struct sk_buff *skb = e->skb;
1150
1151 if (!e->txwi) {
1152 dev_kfree_skb_any(skb);
1153 return;
1154 }
1155
1156 if (q - dev->mt76.q_tx < 4)
1157 dev->tx_hang_check = 0;
1158
1159 mt76_tx_complete_skb(mdev, skb);
1160 }
1161
1162 static bool
1163 wait_for_wpdma(struct mt7603_dev *dev)
1164 {
1165 return mt76_poll(dev, MT_WPDMA_GLO_CFG,
1166 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
1167 MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
1168 0, 1000);
1169 }
1170
1171 static void mt7603_pse_reset(struct mt7603_dev *dev)
1172 {
1173 /* Clear previous reset result */
1174 if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED])
1175 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S);
1176
1177 /* Reset PSE */
1178 mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
1179
1180 if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET,
1181 MT_MCU_DEBUG_RESET_PSE_S,
1182 MT_MCU_DEBUG_RESET_PSE_S, 500)) {
1183 dev->reset_cause[RESET_CAUSE_RESET_FAILED]++;
1184 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
1185 } else {
1186 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
1187 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES);
1188 }
1189
1190 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3)
1191 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
1192 }
1193
1194 void mt7603_mac_dma_start(struct mt7603_dev *dev)
1195 {
1196 mt7603_mac_start(dev);
1197
1198 wait_for_wpdma(dev);
1199 usleep_range(50, 100);
1200
1201 mt76_set(dev, MT_WPDMA_GLO_CFG,
1202 (MT_WPDMA_GLO_CFG_TX_DMA_EN |
1203 MT_WPDMA_GLO_CFG_RX_DMA_EN |
1204 FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
1205 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE));
1206
1207 mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
1208 }
1209
1210 void mt7603_mac_start(struct mt7603_dev *dev)
1211 {
1212 mt76_clear(dev, MT_ARB_SCR,
1213 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1214 mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0);
1215 mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
1216 }
1217
1218 void mt7603_mac_stop(struct mt7603_dev *dev)
1219 {
1220 mt76_set(dev, MT_ARB_SCR,
1221 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1222 mt76_wr(dev, MT_WF_ARB_TX_START_0, 0);
1223 mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
1224 }
1225
1226 void mt7603_pse_client_reset(struct mt7603_dev *dev)
1227 {
1228 u32 addr;
1229
1230 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR +
1231 MT_CLIENT_RESET_TX);
1232
1233 /* Clear previous reset state */
1234 mt76_clear(dev, addr,
1235 MT_CLIENT_RESET_TX_R_E_1 |
1236 MT_CLIENT_RESET_TX_R_E_2 |
1237 MT_CLIENT_RESET_TX_R_E_1_S |
1238 MT_CLIENT_RESET_TX_R_E_2_S);
1239
1240 /* Start PSE client TX abort */
1241 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
1242 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
1243 MT_CLIENT_RESET_TX_R_E_1_S, 500);
1244
1245 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2);
1246 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
1247
1248 /* Wait for PSE client to clear TX FIFO */
1249 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S,
1250 MT_CLIENT_RESET_TX_R_E_2_S, 500);
1251
1252 /* Clear PSE client TX abort state */
1253 mt76_clear(dev, addr,
1254 MT_CLIENT_RESET_TX_R_E_1 |
1255 MT_CLIENT_RESET_TX_R_E_2);
1256 }
1257
1258 static void mt7603_dma_sched_reset(struct mt7603_dev *dev)
1259 {
1260 if (!is_mt7628(dev))
1261 return;
1262
1263 mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
1264 mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
1265 }
1266
1267 static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
1268 {
1269 int beacon_int = dev->beacon_int;
1270 u32 mask = dev->mt76.mmio.irqmask;
1271 int i;
1272
1273 ieee80211_stop_queues(dev->mt76.hw);
1274 set_bit(MT76_RESET, &dev->mt76.state);
1275
1276 /* lock/unlock all queues to ensure that no tx is pending */
1277 mt76_txq_schedule_all(&dev->mt76);
1278
1279 tasklet_disable(&dev->tx_tasklet);
1280 tasklet_disable(&dev->pre_tbtt_tasklet);
1281 napi_disable(&dev->mt76.napi[0]);
1282 napi_disable(&dev->mt76.napi[1]);
1283
1284 mutex_lock(&dev->mt76.mutex);
1285
1286 mt7603_beacon_set_timer(dev, -1, 0);
1287
1288 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
1289 dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
1290 dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
1291 dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
1292 mt7603_pse_reset(dev);
1293
1294 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
1295 goto skip_dma_reset;
1296
1297 mt7603_mac_stop(dev);
1298
1299 mt76_clear(dev, MT_WPDMA_GLO_CFG,
1300 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
1301 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
1302 usleep_range(1000, 2000);
1303
1304 mt7603_irq_disable(dev, mask);
1305
1306 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
1307
1308 mt7603_pse_client_reset(dev);
1309
1310 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
1311 mt76_queue_tx_cleanup(dev, i, true);
1312
1313 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
1314 mt76_queue_rx_reset(dev, i);
1315
1316 mt7603_dma_sched_reset(dev);
1317
1318 mt7603_mac_dma_start(dev);
1319
1320 mt7603_irq_enable(dev, mask);
1321
1322 skip_dma_reset:
1323 clear_bit(MT76_RESET, &dev->mt76.state);
1324 mutex_unlock(&dev->mt76.mutex);
1325
1326 tasklet_enable(&dev->tx_tasklet);
1327 tasklet_schedule(&dev->tx_tasklet);
1328
1329 tasklet_enable(&dev->pre_tbtt_tasklet);
1330 mt7603_beacon_set_timer(dev, -1, beacon_int);
1331
1332 napi_enable(&dev->mt76.napi[0]);
1333 napi_schedule(&dev->mt76.napi[0]);
1334
1335 napi_enable(&dev->mt76.napi[1]);
1336 napi_schedule(&dev->mt76.napi[1]);
1337
1338 ieee80211_wake_queues(dev->mt76.hw);
1339 mt76_txq_schedule_all(&dev->mt76);
1340 }
1341
1342 static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index)
1343 {
1344 u32 val;
1345
1346 mt76_wr(dev, MT_WPDMA_DEBUG,
1347 FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) |
1348 MT_WPDMA_DEBUG_SEL);
1349
1350 val = mt76_rr(dev, MT_WPDMA_DEBUG);
1351 return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val);
1352 }
1353
1354 static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev)
1355 {
1356 if (is_mt7628(dev))
1357 return mt7603_dma_debug(dev, 9) & BIT(9);
1358
1359 return mt7603_dma_debug(dev, 2) & BIT(8);
1360 }
1361
1362 static bool mt7603_rx_dma_busy(struct mt7603_dev *dev)
1363 {
1364 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY))
1365 return false;
1366
1367 return mt7603_rx_fifo_busy(dev);
1368 }
1369
1370 static bool mt7603_tx_dma_busy(struct mt7603_dev *dev)
1371 {
1372 u32 val;
1373
1374 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY))
1375 return false;
1376
1377 val = mt7603_dma_debug(dev, 9);
1378 return (val & BIT(8)) && (val & 0xf) != 0xf;
1379 }
1380
1381 static bool mt7603_tx_hang(struct mt7603_dev *dev)
1382 {
1383 struct mt76_queue *q;
1384 u32 dma_idx, prev_dma_idx;
1385 int i;
1386
1387 for (i = 0; i < 4; i++) {
1388 q = &dev->mt76.q_tx[i];
1389
1390 if (!q->queued)
1391 continue;
1392
1393 prev_dma_idx = dev->tx_dma_idx[i];
1394 dma_idx = ioread32(&q->regs->dma_idx);
1395 dev->tx_dma_idx[i] = dma_idx;
1396
1397 if (dma_idx == prev_dma_idx &&
1398 dma_idx != ioread32(&q->regs->cpu_idx))
1399 break;
1400 }
1401
1402 return i < 4;
1403 }
1404
1405 static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
1406 {
1407 u32 addr, val;
1408
1409 if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
1410 return true;
1411
1412 if (mt7603_rx_fifo_busy(dev))
1413 return false;
1414
1415 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
1416 mt76_wr(dev, addr, 3);
1417 val = mt76_rr(dev, addr) >> 16;
1418
1419 if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
1420 return true;
1421
1422 return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
1423 }
1424
1425 static bool
1426 mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter,
1427 enum mt7603_reset_cause cause,
1428 bool (*check)(struct mt7603_dev *dev))
1429 {
1430 if (dev->reset_test == cause + 1) {
1431 dev->reset_test = 0;
1432 goto trigger;
1433 }
1434
1435 if (check) {
1436 if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) {
1437 *counter = 0;
1438 return false;
1439 }
1440
1441 (*counter)++;
1442 }
1443
1444 if (*counter < MT7603_WATCHDOG_TIMEOUT)
1445 return false;
1446 trigger:
1447 dev->cur_reset_cause = cause;
1448 dev->reset_cause[cause]++;
1449 return true;
1450 }
1451
1452 void mt7603_update_channel(struct mt76_dev *mdev)
1453 {
1454 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
1455 struct mt76_channel_state *state;
1456 ktime_t cur_time;
1457 u32 busy;
1458
1459 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
1460 return;
1461
1462 state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
1463 busy = mt76_rr(dev, MT_MIB_STAT_PSCCA);
1464
1465 spin_lock_bh(&dev->mt76.cc_lock);
1466 cur_time = ktime_get_boottime();
1467 state->cc_busy += busy;
1468 state->cc_active += ktime_to_us(ktime_sub(cur_time, dev->survey_time));
1469 dev->survey_time = cur_time;
1470 spin_unlock_bh(&dev->mt76.cc_lock);
1471 }
1472
1473 void
1474 mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val)
1475 {
1476 u32 rxtd_6 = 0xd7c80000;
1477
1478 if (val == dev->ed_strict_mode)
1479 return;
1480
1481 dev->ed_strict_mode = val;
1482
1483 /* Ensure that ED/CCA does not trigger if disabled */
1484 if (!dev->ed_monitor)
1485 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34);
1486 else
1487 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d);
1488
1489 if (dev->ed_monitor && !dev->ed_strict_mode)
1490 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f);
1491 else
1492 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10);
1493
1494 mt76_wr(dev, MT_RXTD(6), rxtd_6);
1495
1496 mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN,
1497 dev->ed_monitor && !dev->ed_strict_mode);
1498 }
1499
1500 static void
1501 mt7603_edcca_check(struct mt7603_dev *dev)
1502 {
1503 u32 val = mt76_rr(dev, MT_AGC(41));
1504 ktime_t cur_time;
1505 int rssi0, rssi1;
1506 u32 active;
1507 u32 ed_busy;
1508
1509 if (!dev->ed_monitor)
1510 return;
1511
1512 rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val);
1513 if (rssi0 > 128)
1514 rssi0 -= 256;
1515
1516 rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val);
1517 if (rssi1 > 128)
1518 rssi1 -= 256;
1519
1520 if (max(rssi0, rssi1) >= -40 &&
1521 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH)
1522 dev->ed_strong_signal++;
1523 else if (dev->ed_strong_signal > 0)
1524 dev->ed_strong_signal--;
1525
1526 cur_time = ktime_get_boottime();
1527 ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK;
1528
1529 active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
1530 dev->ed_time = cur_time;
1531
1532 if (!active)
1533 return;
1534
1535 if (100 * ed_busy / active > 90) {
1536 if (dev->ed_trigger < 0)
1537 dev->ed_trigger = 0;
1538 dev->ed_trigger++;
1539 } else {
1540 if (dev->ed_trigger > 0)
1541 dev->ed_trigger = 0;
1542 dev->ed_trigger--;
1543 }
1544
1545 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH ||
1546 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) {
1547 mt7603_edcca_set_strict(dev, true);
1548 } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) {
1549 mt7603_edcca_set_strict(dev, false);
1550 }
1551
1552 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH)
1553 dev->ed_trigger = MT7603_EDCCA_BLOCK_TH;
1554 else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH)
1555 dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH;
1556 }
1557
1558 void mt7603_cca_stats_reset(struct mt7603_dev *dev)
1559 {
1560 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
1561 mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
1562 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN);
1563 }
1564
1565 static void
1566 mt7603_adjust_sensitivity(struct mt7603_dev *dev)
1567 {
1568 u32 agc0 = dev->agc0, agc3 = dev->agc3;
1569 u32 adj;
1570
1571 if (!dev->sensitivity || dev->sensitivity < -100) {
1572 dev->sensitivity = 0;
1573 } else if (dev->sensitivity <= -84) {
1574 adj = 7 + (dev->sensitivity + 92) / 2;
1575
1576 agc0 = 0x56f0076f;
1577 agc0 |= adj << 12;
1578 agc0 |= adj << 16;
1579 agc3 = 0x81d0d5e3;
1580 } else if (dev->sensitivity <= -72) {
1581 adj = 7 + (dev->sensitivity + 80) / 2;
1582
1583 agc0 = 0x6af0006f;
1584 agc0 |= adj << 8;
1585 agc0 |= adj << 12;
1586 agc0 |= adj << 16;
1587
1588 agc3 = 0x8181d5e3;
1589 } else {
1590 if (dev->sensitivity > -54)
1591 dev->sensitivity = -54;
1592
1593 adj = 7 + (dev->sensitivity + 80) / 2;
1594
1595 agc0 = 0x7ff0000f;
1596 agc0 |= adj << 4;
1597 agc0 |= adj << 8;
1598 agc0 |= adj << 12;
1599 agc0 |= adj << 16;
1600
1601 agc3 = 0x818181e3;
1602 }
1603
1604 mt76_wr(dev, MT_AGC(0), agc0);
1605 mt76_wr(dev, MT_AGC1(0), agc0);
1606
1607 mt76_wr(dev, MT_AGC(3), agc3);
1608 mt76_wr(dev, MT_AGC1(3), agc3);
1609 }
1610
1611 static void
1612 mt7603_false_cca_check(struct mt7603_dev *dev)
1613 {
1614 int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm;
1615 int false_cca;
1616 int min_signal;
1617 u32 val;
1618
1619 val = mt76_rr(dev, MT_PHYCTRL_STAT_PD);
1620 pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val);
1621 pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val);
1622
1623 val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY);
1624 mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val);
1625 mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val);
1626
1627 dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
1628 dev->false_cca_cck = pd_cck - mdrdy_cck;
1629
1630 mt7603_cca_stats_reset(dev);
1631
1632 min_signal = mt76_get_min_avg_rssi(&dev->mt76);
1633 if (!min_signal) {
1634 dev->sensitivity = 0;
1635 dev->last_cca_adj = jiffies;
1636 goto out;
1637 }
1638
1639 min_signal -= 15;
1640
1641 false_cca = dev->false_cca_ofdm + dev->false_cca_cck;
1642 if (false_cca > 600) {
1643 if (!dev->sensitivity)
1644 dev->sensitivity = -92;
1645 else
1646 dev->sensitivity += 2;
1647 dev->last_cca_adj = jiffies;
1648 } else if (false_cca < 100 ||
1649 time_after(jiffies, dev->last_cca_adj + 10 * HZ)) {
1650 dev->last_cca_adj = jiffies;
1651 if (!dev->sensitivity)
1652 goto out;
1653
1654 dev->sensitivity -= 2;
1655 }
1656
1657 if (dev->sensitivity && dev->sensitivity > min_signal) {
1658 dev->sensitivity = min_signal;
1659 dev->last_cca_adj = jiffies;
1660 }
1661
1662 out:
1663 mt7603_adjust_sensitivity(dev);
1664 }
1665
1666 void mt7603_mac_work(struct work_struct *work)
1667 {
1668 struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
1669 mac_work.work);
1670 bool reset = false;
1671
1672 mt76_tx_status_check(&dev->mt76, NULL, false);
1673
1674 mutex_lock(&dev->mt76.mutex);
1675
1676 dev->mac_work_count++;
1677 mt7603_update_channel(&dev->mt76);
1678 mt7603_edcca_check(dev);
1679
1680 if (dev->mac_work_count == 10)
1681 mt7603_false_cca_check(dev);
1682
1683 if (mt7603_watchdog_check(dev, &dev->rx_pse_check,
1684 RESET_CAUSE_RX_PSE_BUSY,
1685 mt7603_rx_pse_busy) ||
1686 mt7603_watchdog_check(dev, &dev->beacon_check,
1687 RESET_CAUSE_BEACON_STUCK,
1688 NULL) ||
1689 mt7603_watchdog_check(dev, &dev->tx_hang_check,
1690 RESET_CAUSE_TX_HANG,
1691 mt7603_tx_hang) ||
1692 mt7603_watchdog_check(dev, &dev->tx_dma_check,
1693 RESET_CAUSE_TX_BUSY,
1694 mt7603_tx_dma_busy) ||
1695 mt7603_watchdog_check(dev, &dev->rx_dma_check,
1696 RESET_CAUSE_RX_BUSY,
1697 mt7603_rx_dma_busy) ||
1698 mt7603_watchdog_check(dev, &dev->mcu_hang,
1699 RESET_CAUSE_MCU_HANG,
1700 NULL) ||
1701 dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
1702 dev->beacon_check = 0;
1703 dev->tx_dma_check = 0;
1704 dev->tx_hang_check = 0;
1705 dev->rx_dma_check = 0;
1706 dev->rx_pse_check = 0;
1707 dev->mcu_hang = 0;
1708 dev->rx_dma_idx = ~0;
1709 memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx));
1710 reset = true;
1711 dev->mac_work_count = 0;
1712 }
1713
1714 if (dev->mac_work_count >= 10)
1715 dev->mac_work_count = 0;
1716
1717 mutex_unlock(&dev->mt76.mutex);
1718
1719 if (reset)
1720 mt7603_mac_watchdog_reset(dev);
1721
1722 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
1723 msecs_to_jiffies(MT7603_WATCHDOG_TIME));
1724 }