]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c
Fix common misspellings
[thirdparty/kernel/linux.git] / drivers / staging / brcm80211 / brcmsmac / wlc_ampdu.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16 #include <linux/kernel.h>
17 #include <net/mac80211.h>
18
19 #include <bcmdefs.h>
20 #include <bcmutils.h>
21 #include <siutils.h>
22 #include <wlioctl.h>
23 #include <sbhnddma.h>
24 #include <hnddma.h>
25 #include <d11.h>
26
27 #include "wlc_types.h"
28 #include "wlc_cfg.h"
29 #include "wlc_rate.h"
30 #include "wlc_scb.h"
31 #include "wlc_pub.h"
32 #include "wlc_key.h"
33 #include "phy/wlc_phy_hal.h"
34 #include "wlc_antsel.h"
35 #include "wl_export.h"
36 #include "wl_dbg.h"
37 #include "wlc_channel.h"
38 #include "wlc_main.h"
39 #include "wlc_ampdu.h"
40
41 /*
42 * Disable AMPDU statistics counters for now
43 */
44 #define WLCNTINCR(a)
45 #define WLCNTADD(a, b)
46
47 #define AMPDU_MAX_MPDU 32 /* max number of mpdus in an ampdu */
48 #define AMPDU_NUM_MPDU_LEGACY 16 /* max number of mpdus in an ampdu to a legacy */
49 #define AMPDU_TX_BA_MAX_WSIZE 64 /* max Tx ba window size (in pdu) */
50 #define AMPDU_TX_BA_DEF_WSIZE 64 /* default Tx ba window size (in pdu) */
51 #define AMPDU_RX_BA_DEF_WSIZE 64 /* max Rx ba window size (in pdu) */
52 #define AMPDU_RX_BA_MAX_WSIZE 64 /* default Rx ba window size (in pdu) */
53 #define AMPDU_MAX_DUR 5 /* max dur of tx ampdu (in msec) */
54 #define AMPDU_DEF_RETRY_LIMIT 5 /* default tx retry limit */
55 #define AMPDU_DEF_RR_RETRY_LIMIT 2 /* default tx retry limit at reg rate */
56 #define AMPDU_DEF_TXPKT_WEIGHT 2 /* default weight of ampdu in txfifo */
57 #define AMPDU_DEF_FFPLD_RSVD 2048 /* default ffpld reserved bytes */
58 #define AMPDU_INI_FREE 10 /* # of inis to be freed on detach */
59 #define AMPDU_SCB_MAX_RELEASE 20 /* max # of mpdus released at a time */
60
61 #define NUM_FFPLD_FIFO 4 /* number of fifo concerned by pre-loading */
62 #define FFPLD_TX_MAX_UNFL 200 /* default value of the average number of ampdu
63 * without underflows
64 */
65 #define FFPLD_MPDU_SIZE 1800 /* estimate of maximum mpdu size */
66 #define FFPLD_MAX_MCS 23 /* we don't deal with mcs 32 */
67 #define FFPLD_PLD_INCR 1000 /* increments in bytes */
68 #define FFPLD_MAX_AMPDU_CNT 5000 /* maximum number of ampdu we
69 * accumulate between resets.
70 */
71
72 #define TX_SEQ_TO_INDEX(seq) ((seq) % AMPDU_TX_BA_MAX_WSIZE)
73
74 /* max possible overhead per mpdu in the ampdu; 3 is for roundup if needed */
75 #define AMPDU_MAX_MPDU_OVERHEAD (FCS_LEN + DOT11_ICV_AES_LEN +\
76 AMPDU_DELIMITER_LEN + 3\
77 + DOT11_A4_HDR_LEN + DOT11_QOS_LEN + DOT11_IV_MAX_LEN)
78
79 #ifdef BCMDBG
80 u32 wl_ampdu_dbg =
81 WL_AMPDU_UPDN_VAL |
82 WL_AMPDU_ERR_VAL |
83 WL_AMPDU_TX_VAL |
84 WL_AMPDU_RX_VAL |
85 WL_AMPDU_CTL_VAL |
86 WL_AMPDU_HW_VAL | WL_AMPDU_HWTXS_VAL | WL_AMPDU_HWDBG_VAL;
87 #endif
88
89 /* structure to hold tx fifo information and pre-loading state
90 * counters specific to tx underflows of ampdus
91 * some counters might be redundant with the ones in wlc or ampdu structures.
92 * This allows to maintain a specific state independently of
93 * how often and/or when the wlc counters are updated.
94 */
95 typedef struct wlc_fifo_info {
96 u16 ampdu_pld_size; /* number of bytes to be pre-loaded */
97 u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1]; /* per-mcs max # of mpdus in an ampdu */
98 u16 prev_txfunfl; /* num of underflows last read from the HW macstats counter */
99 u32 accum_txfunfl; /* num of underflows since we modified pld params */
100 u32 accum_txampdu; /* num of tx ampdu since we modified pld params */
101 u32 prev_txampdu; /* previous reading of tx ampdu */
102 u32 dmaxferrate; /* estimated dma avg xfer rate in kbits/sec */
103 } wlc_fifo_info_t;
104
105 /* AMPDU module specific state */
106 struct ampdu_info {
107 struct wlc_info *wlc; /* pointer to main wlc structure */
108 int scb_handle; /* scb cubby handle to retrieve data from scb */
109 u8 ini_enable[AMPDU_MAX_SCB_TID]; /* per-tid initiator enable/disable of ampdu */
110 u8 ba_tx_wsize; /* Tx ba window size (in pdu) */
111 u8 ba_rx_wsize; /* Rx ba window size (in pdu) */
112 u8 retry_limit; /* mpdu transmit retry limit */
113 u8 rr_retry_limit; /* mpdu transmit retry limit at regular rate */
114 u8 retry_limit_tid[AMPDU_MAX_SCB_TID]; /* per-tid mpdu transmit retry limit */
115 /* per-tid mpdu transmit retry limit at regular rate */
116 u8 rr_retry_limit_tid[AMPDU_MAX_SCB_TID];
117 u8 mpdu_density; /* min mpdu spacing (0-7) ==> 2^(x-1)/8 usec */
118 s8 max_pdu; /* max pdus allowed in ampdu */
119 u8 dur; /* max duration of an ampdu (in msec) */
120 u8 txpkt_weight; /* weight of ampdu in txfifo; reduces rate lag */
121 u8 rx_factor; /* maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes */
122 u32 ffpld_rsvd; /* number of bytes to reserve for preload */
123 u32 max_txlen[MCS_TABLE_SIZE][2][2]; /* max size of ampdu per mcs, bw and sgi */
124 void *ini_free[AMPDU_INI_FREE]; /* array of ini's to be freed on detach */
125 bool mfbr; /* enable multiple fallback rate */
126 u32 tx_max_funl; /* underflows should be kept such that
127 * (tx_max_funfl*underflows) < tx frames
128 */
129 wlc_fifo_info_t fifo_tb[NUM_FFPLD_FIFO]; /* table of fifo infos */
130
131 };
132
133 #define AMPDU_CLEANUPFLAG_RX (0x1)
134 #define AMPDU_CLEANUPFLAG_TX (0x2)
135
136 #define SCB_AMPDU_CUBBY(ampdu, scb) (&(scb->scb_ampdu))
137 #define SCB_AMPDU_INI(scb_ampdu, tid) (&(scb_ampdu->ini[tid]))
138
139 static void wlc_ffpld_init(struct ampdu_info *ampdu);
140 static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int f);
141 static void wlc_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f);
142
143 static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(struct ampdu_info *ampdu,
144 scb_ampdu_t *scb_ampdu,
145 u8 tid, bool override);
146 static void ampdu_cleanup_tid_ini(struct ampdu_info *ampdu,
147 scb_ampdu_t *scb_ampdu,
148 u8 tid, bool force);
149 static void ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur);
150 static void scb_ampdu_update_config(struct ampdu_info *ampdu, struct scb *scb);
151 static void scb_ampdu_update_config_all(struct ampdu_info *ampdu);
152
153 #define wlc_ampdu_txflowcontrol(a, b, c) do {} while (0)
154
155 static void wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu,
156 struct scb *scb,
157 struct sk_buff *p, tx_status_t *txs,
158 u32 frmtxstatus, u32 frmtxstatus2);
159 static bool wlc_ampdu_cap(struct ampdu_info *ampdu);
160 static int wlc_ampdu_set(struct ampdu_info *ampdu, bool on);
161
162 struct ampdu_info *wlc_ampdu_attach(struct wlc_info *wlc)
163 {
164 struct ampdu_info *ampdu;
165 int i;
166
167 /* some code depends on packed structures */
168 ASSERT(DOT11_MAXNUMFRAGS == NBITS(u16));
169 ASSERT(ISPOWEROF2(AMPDU_TX_BA_MAX_WSIZE));
170 ASSERT(ISPOWEROF2(AMPDU_RX_BA_MAX_WSIZE));
171 ASSERT(wlc->pub->tunables->ampdunummpdu <= AMPDU_MAX_MPDU);
172 ASSERT(wlc->pub->tunables->ampdunummpdu > 0);
173
174 ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC);
175 if (!ampdu) {
176 WL_ERROR("wl%d: wlc_ampdu_attach: out of mem\n",
177 wlc->pub->unit);
178 return NULL;
179 }
180 ampdu->wlc = wlc;
181
182 for (i = 0; i < AMPDU_MAX_SCB_TID; i++)
183 ampdu->ini_enable[i] = true;
184 /* Disable ampdu for VO by default */
185 ampdu->ini_enable[PRIO_8021D_VO] = false;
186 ampdu->ini_enable[PRIO_8021D_NC] = false;
187
188 /* Disable ampdu for BK by default since not enough fifo space */
189 ampdu->ini_enable[PRIO_8021D_NONE] = false;
190 ampdu->ini_enable[PRIO_8021D_BK] = false;
191
192 ampdu->ba_tx_wsize = AMPDU_TX_BA_DEF_WSIZE;
193 ampdu->ba_rx_wsize = AMPDU_RX_BA_DEF_WSIZE;
194 ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY;
195 ampdu->max_pdu = AUTO;
196 ampdu->dur = AMPDU_MAX_DUR;
197 ampdu->txpkt_weight = AMPDU_DEF_TXPKT_WEIGHT;
198
199 ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
200 /* bump max ampdu rcv size to 64k for all 11n devices except 4321A0 and 4321A1 */
201 if (WLCISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2))
202 ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_32K;
203 else
204 ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_64K;
205 ampdu->retry_limit = AMPDU_DEF_RETRY_LIMIT;
206 ampdu->rr_retry_limit = AMPDU_DEF_RR_RETRY_LIMIT;
207
208 for (i = 0; i < AMPDU_MAX_SCB_TID; i++) {
209 ampdu->retry_limit_tid[i] = ampdu->retry_limit;
210 ampdu->rr_retry_limit_tid[i] = ampdu->rr_retry_limit;
211 }
212
213 ampdu_update_max_txlen(ampdu, ampdu->dur);
214 ampdu->mfbr = false;
215 /* try to set ampdu to the default value */
216 wlc_ampdu_set(ampdu, wlc->pub->_ampdu);
217
218 ampdu->tx_max_funl = FFPLD_TX_MAX_UNFL;
219 wlc_ffpld_init(ampdu);
220
221 return ampdu;
222 }
223
224 void wlc_ampdu_detach(struct ampdu_info *ampdu)
225 {
226 int i;
227
228 if (!ampdu)
229 return;
230
231 /* free all ini's which were to be freed on callbacks which were never called */
232 for (i = 0; i < AMPDU_INI_FREE; i++) {
233 kfree(ampdu->ini_free[i]);
234 }
235
236 wlc_module_unregister(ampdu->wlc->pub, "ampdu", ampdu);
237 kfree(ampdu);
238 }
239
240 void scb_ampdu_cleanup(struct ampdu_info *ampdu, struct scb *scb)
241 {
242 scb_ampdu_t *scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
243 u8 tid;
244
245 WL_AMPDU_UPDN("scb_ampdu_cleanup: enter\n");
246 ASSERT(scb_ampdu);
247
248 for (tid = 0; tid < AMPDU_MAX_SCB_TID; tid++) {
249 ampdu_cleanup_tid_ini(ampdu, scb_ampdu, tid, false);
250 }
251 }
252
253 /* reset the ampdu state machine so that it can gracefully handle packets that were
254 * freed from the dma and tx queues during reinit
255 */
256 void wlc_ampdu_reset(struct ampdu_info *ampdu)
257 {
258 WL_NONE("%s: Entering\n", __func__);
259 }
260
261 static void scb_ampdu_update_config(struct ampdu_info *ampdu, struct scb *scb)
262 {
263 scb_ampdu_t *scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
264 int i;
265
266 scb_ampdu->max_pdu = (u8) ampdu->wlc->pub->tunables->ampdunummpdu;
267
268 /* go back to legacy size if some preloading is occurring */
269 for (i = 0; i < NUM_FFPLD_FIFO; i++) {
270 if (ampdu->fifo_tb[i].ampdu_pld_size > FFPLD_PLD_INCR)
271 scb_ampdu->max_pdu = AMPDU_NUM_MPDU_LEGACY;
272 }
273
274 /* apply user override */
275 if (ampdu->max_pdu != AUTO)
276 scb_ampdu->max_pdu = (u8) ampdu->max_pdu;
277
278 scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu, AMPDU_SCB_MAX_RELEASE);
279
280 if (scb_ampdu->max_rxlen)
281 scb_ampdu->release =
282 min_t(u8, scb_ampdu->release, scb_ampdu->max_rxlen / 1600);
283
284 scb_ampdu->release = min(scb_ampdu->release,
285 ampdu->fifo_tb[TX_AC_BE_FIFO].
286 mcs2ampdu_table[FFPLD_MAX_MCS]);
287
288 ASSERT(scb_ampdu->release);
289 }
290
291 void scb_ampdu_update_config_all(struct ampdu_info *ampdu)
292 {
293 scb_ampdu_update_config(ampdu, ampdu->wlc->pub->global_scb);
294 }
295
296 static void wlc_ffpld_init(struct ampdu_info *ampdu)
297 {
298 int i, j;
299 wlc_fifo_info_t *fifo;
300
301 for (j = 0; j < NUM_FFPLD_FIFO; j++) {
302 fifo = (ampdu->fifo_tb + j);
303 fifo->ampdu_pld_size = 0;
304 for (i = 0; i <= FFPLD_MAX_MCS; i++)
305 fifo->mcs2ampdu_table[i] = 255;
306 fifo->dmaxferrate = 0;
307 fifo->accum_txampdu = 0;
308 fifo->prev_txfunfl = 0;
309 fifo->accum_txfunfl = 0;
310
311 }
312 }
313
314 /* evaluate the dma transfer rate using the tx underflows as feedback.
315 * If necessary, increase tx fifo preloading. If not enough,
316 * decrease maximum ampdu size for each mcs till underflows stop
317 * Return 1 if pre-loading not active, -1 if not an underflow event,
318 * 0 if pre-loading module took care of the event.
319 */
320 static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int fid)
321 {
322 struct ampdu_info *ampdu = wlc->ampdu;
323 u32 phy_rate = MCS_RATE(FFPLD_MAX_MCS, true, false);
324 u32 txunfl_ratio;
325 u8 max_mpdu;
326 u32 current_ampdu_cnt = 0;
327 u16 max_pld_size;
328 u32 new_txunfl;
329 wlc_fifo_info_t *fifo = (ampdu->fifo_tb + fid);
330 uint xmtfifo_sz;
331 u16 cur_txunfl;
332
333 /* return if we got here for a different reason than underflows */
334 cur_txunfl =
335 wlc_read_shm(wlc,
336 M_UCODE_MACSTAT + offsetof(macstat_t, txfunfl[fid]));
337 new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
338 if (new_txunfl == 0) {
339 WL_FFPLD("check_txunfl : TX status FRAG set but no tx underflows\n");
340 return -1;
341 }
342 fifo->prev_txfunfl = cur_txunfl;
343
344 if (!ampdu->tx_max_funl)
345 return 1;
346
347 /* check if fifo is big enough */
348 if (wlc_xmtfifo_sz_get(wlc, fid, &xmtfifo_sz)) {
349 WL_FFPLD("check_txunfl : get xmtfifo_sz failed\n");
350 return -1;
351 }
352
353 if ((TXFIFO_SIZE_UNIT * (u32) xmtfifo_sz) <= ampdu->ffpld_rsvd)
354 return 1;
355
356 max_pld_size = TXFIFO_SIZE_UNIT * xmtfifo_sz - ampdu->ffpld_rsvd;
357 fifo->accum_txfunfl += new_txunfl;
358
359 /* we need to wait for at least 10 underflows */
360 if (fifo->accum_txfunfl < 10)
361 return 0;
362
363 WL_FFPLD("ampdu_count %d tx_underflows %d\n",
364 current_ampdu_cnt, fifo->accum_txfunfl);
365
366 /*
367 compute the current ratio of tx unfl per ampdu.
368 When the current ampdu count becomes too
369 big while the ratio remains small, we reset
370 the current count in order to not
371 introduce too big of a latency in detecting a
372 large amount of tx underflows later.
373 */
374
375 txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl;
376
377 if (txunfl_ratio > ampdu->tx_max_funl) {
378 if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT) {
379 fifo->accum_txfunfl = 0;
380 }
381 return 0;
382 }
383 max_mpdu =
384 min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS], AMPDU_NUM_MPDU_LEGACY);
385
386 /* In case max value max_pdu is already lower than
387 the fifo depth, there is nothing more we can do.
388 */
389
390 if (fifo->ampdu_pld_size >= max_mpdu * FFPLD_MPDU_SIZE) {
391 WL_FFPLD(("tx fifo pld : max ampdu fits in fifo\n)"));
392 fifo->accum_txfunfl = 0;
393 return 0;
394 }
395
396 if (fifo->ampdu_pld_size < max_pld_size) {
397
398 /* increment by TX_FIFO_PLD_INC bytes */
399 fifo->ampdu_pld_size += FFPLD_PLD_INCR;
400 if (fifo->ampdu_pld_size > max_pld_size)
401 fifo->ampdu_pld_size = max_pld_size;
402
403 /* update scb release size */
404 scb_ampdu_update_config_all(ampdu);
405
406 /*
407 compute a new dma xfer rate for max_mpdu @ max mcs.
408 This is the minimum dma rate that
409 can achieve no unferflow condition for the current mpdu size.
410 */
411 /* note : we divide/multiply by 100 to avoid integer overflows */
412 fifo->dmaxferrate =
413 (((phy_rate / 100) *
414 (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
415 / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
416
417 WL_FFPLD("DMA estimated transfer rate %d; pre-load size %d\n",
418 fifo->dmaxferrate, fifo->ampdu_pld_size);
419 } else {
420
421 /* decrease ampdu size */
422 if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] > 1) {
423 if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] == 255)
424 fifo->mcs2ampdu_table[FFPLD_MAX_MCS] =
425 AMPDU_NUM_MPDU_LEGACY - 1;
426 else
427 fifo->mcs2ampdu_table[FFPLD_MAX_MCS] -= 1;
428
429 /* recompute the table */
430 wlc_ffpld_calc_mcs2ampdu_table(ampdu, fid);
431
432 /* update scb release size */
433 scb_ampdu_update_config_all(ampdu);
434 }
435 }
436 fifo->accum_txfunfl = 0;
437 return 0;
438 }
439
440 static void wlc_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
441 {
442 int i;
443 u32 phy_rate, dma_rate, tmp;
444 u8 max_mpdu;
445 wlc_fifo_info_t *fifo = (ampdu->fifo_tb + f);
446
447 /* recompute the dma rate */
448 /* note : we divide/multiply by 100 to avoid integer overflows */
449 max_mpdu =
450 min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS], AMPDU_NUM_MPDU_LEGACY);
451 phy_rate = MCS_RATE(FFPLD_MAX_MCS, true, false);
452 dma_rate =
453 (((phy_rate / 100) *
454 (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
455 / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
456 fifo->dmaxferrate = dma_rate;
457
458 /* fill up the mcs2ampdu table; do not recalc the last mcs */
459 dma_rate = dma_rate >> 7;
460 for (i = 0; i < FFPLD_MAX_MCS; i++) {
461 /* shifting to keep it within integer range */
462 phy_rate = MCS_RATE(i, true, false) >> 7;
463 if (phy_rate > dma_rate) {
464 tmp = ((fifo->ampdu_pld_size * phy_rate) /
465 ((phy_rate - dma_rate) * FFPLD_MPDU_SIZE)) + 1;
466 tmp = min_t(u32, tmp, 255);
467 fifo->mcs2ampdu_table[i] = (u8) tmp;
468 }
469 }
470 }
471
472 static void BCMFASTPATH
473 wlc_ampdu_agg(struct ampdu_info *ampdu, struct scb *scb, struct sk_buff *p,
474 uint prec)
475 {
476 scb_ampdu_t *scb_ampdu;
477 scb_ampdu_tid_ini_t *ini;
478 u8 tid = (u8) (p->priority);
479
480 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
481
482 /* initialize initiator on first packet; sends addba req */
483 ini = SCB_AMPDU_INI(scb_ampdu, tid);
484 if (ini->magic != INI_MAGIC) {
485 ini = wlc_ampdu_init_tid_ini(ampdu, scb_ampdu, tid, false);
486 }
487 return;
488 }
489
490 int BCMFASTPATH
491 wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
492 struct sk_buff **pdu, int prec)
493 {
494 struct wlc_info *wlc;
495 struct sk_buff *p, *pkt[AMPDU_MAX_MPDU];
496 u8 tid, ndelim;
497 int err = 0;
498 u8 preamble_type = WLC_GF_PREAMBLE;
499 u8 fbr_preamble_type = WLC_GF_PREAMBLE;
500 u8 rts_preamble_type = WLC_LONG_PREAMBLE;
501 u8 rts_fbr_preamble_type = WLC_LONG_PREAMBLE;
502
503 bool rr = true, fbr = false;
504 uint i, count = 0, fifo, seg_cnt = 0;
505 u16 plen, len, seq = 0, mcl, mch, index, frameid, dma_len = 0;
506 u32 ampdu_len, maxlen = 0;
507 d11txh_t *txh = NULL;
508 u8 *plcp;
509 struct ieee80211_hdr *h;
510 struct scb *scb;
511 scb_ampdu_t *scb_ampdu;
512 scb_ampdu_tid_ini_t *ini;
513 u8 mcs = 0;
514 bool use_rts = false, use_cts = false;
515 ratespec_t rspec = 0, rspec_fallback = 0;
516 ratespec_t rts_rspec = 0, rts_rspec_fallback = 0;
517 u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
518 struct ieee80211_rts *rts;
519 u8 rr_retry_limit;
520 wlc_fifo_info_t *f;
521 bool fbr_iscck;
522 struct ieee80211_tx_info *tx_info;
523 u16 qlen;
524
525 wlc = ampdu->wlc;
526 p = *pdu;
527
528 ASSERT(p);
529
530 tid = (u8) (p->priority);
531 ASSERT(tid < AMPDU_MAX_SCB_TID);
532
533 f = ampdu->fifo_tb + prio2fifo[tid];
534
535 scb = wlc->pub->global_scb;
536 ASSERT(scb->magic == SCB_MAGIC);
537
538 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
539 ASSERT(scb_ampdu);
540 ini = &scb_ampdu->ini[tid];
541
542 /* Let pressure continue to build ... */
543 qlen = pktq_plen(&qi->q, prec);
544 if (ini->tx_in_transit > 0 && qlen < scb_ampdu->max_pdu) {
545 return BCME_BUSY;
546 }
547
548 wlc_ampdu_agg(ampdu, scb, p, tid);
549
550 if (wlc->block_datafifo) {
551 WL_ERROR("%s: Fifo blocked\n", __func__);
552 return BCME_BUSY;
553 }
554 rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
555 ampdu_len = 0;
556 dma_len = 0;
557 while (p) {
558 struct ieee80211_tx_rate *txrate;
559
560 tx_info = IEEE80211_SKB_CB(p);
561 txrate = tx_info->status.rates;
562
563 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
564 err = wlc_prep_pdu(wlc, p, &fifo);
565 } else {
566 WL_ERROR("%s: AMPDU flag is off!\n", __func__);
567 *pdu = NULL;
568 err = 0;
569 break;
570 }
571
572 if (err) {
573 if (err == BCME_BUSY) {
574 WL_ERROR("wl%d: wlc_sendampdu: prep_xdu retry; seq 0x%x\n",
575 wlc->pub->unit, seq);
576 WLCNTINCR(ampdu->cnt->sduretry);
577 *pdu = p;
578 break;
579 }
580
581 /* error in the packet; reject it */
582 WL_AMPDU_ERR("wl%d: wlc_sendampdu: prep_xdu rejected; seq 0x%x\n",
583 wlc->pub->unit, seq);
584 WLCNTINCR(ampdu->cnt->sdurejected);
585
586 *pdu = NULL;
587 break;
588 }
589
590 /* pkt is good to be aggregated */
591 ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
592 txh = (d11txh_t *) p->data;
593 plcp = (u8 *) (txh + 1);
594 h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
595 seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
596 index = TX_SEQ_TO_INDEX(seq);
597
598 /* check mcl fields and test whether it can be agg'd */
599 mcl = le16_to_cpu(txh->MacTxControlLow);
600 mcl &= ~TXC_AMPDU_MASK;
601 fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x3);
602 ASSERT(!fbr_iscck);
603 txh->PreloadSize = 0; /* always default to 0 */
604
605 /* Handle retry limits */
606 if (txrate[0].count <= rr_retry_limit) {
607 txrate[0].count++;
608 rr = true;
609 fbr = false;
610 ASSERT(!fbr);
611 } else {
612 fbr = true;
613 rr = false;
614 txrate[1].count++;
615 }
616
617 /* extract the length info */
618 len = fbr_iscck ? WLC_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
619 : WLC_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
620
621 /* retrieve null delimiter count */
622 ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
623 seg_cnt += 1;
624
625 WL_AMPDU_TX("wl%d: wlc_sendampdu: mpdu %d plcp_len %d\n",
626 wlc->pub->unit, count, len);
627
628 /*
629 * aggregateable mpdu. For ucode/hw agg,
630 * test whether need to break or change the epoch
631 */
632 if (count == 0) {
633 mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
634 /* refill the bits since might be a retx mpdu */
635 mcl |= TXC_STARTMSDU;
636 rts = (struct ieee80211_rts *)&txh->rts_frame;
637
638 if (ieee80211_is_rts(rts->frame_control)) {
639 mcl |= TXC_SENDRTS;
640 use_rts = true;
641 }
642 if (ieee80211_is_cts(rts->frame_control)) {
643 mcl |= TXC_SENDCTS;
644 use_cts = true;
645 }
646 } else {
647 mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
648 mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
649 }
650
651 len = roundup(len, 4);
652 ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
653
654 dma_len += (u16) pkttotlen(p);
655
656 WL_AMPDU_TX("wl%d: wlc_sendampdu: ampdu_len %d seg_cnt %d null delim %d\n",
657 wlc->pub->unit, ampdu_len, seg_cnt, ndelim);
658
659 txh->MacTxControlLow = cpu_to_le16(mcl);
660
661 /* this packet is added */
662 pkt[count++] = p;
663
664 /* patch the first MPDU */
665 if (count == 1) {
666 u8 plcp0, plcp3, is40, sgi;
667 struct ieee80211_sta *sta;
668
669 sta = tx_info->control.sta;
670
671 if (rr) {
672 plcp0 = plcp[0];
673 plcp3 = plcp[3];
674 } else {
675 plcp0 = txh->FragPLCPFallback[0];
676 plcp3 = txh->FragPLCPFallback[3];
677
678 }
679 is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
680 sgi = PLCP3_ISSGI(plcp3) ? 1 : 0;
681 mcs = plcp0 & ~MIMO_PLCP_40MHZ;
682 ASSERT(mcs < MCS_TABLE_SIZE);
683 maxlen =
684 min(scb_ampdu->max_rxlen,
685 ampdu->max_txlen[mcs][is40][sgi]);
686
687 WL_NONE("sendampdu: sgi %d, is40 %d, mcs %d\n",
688 sgi, is40, mcs);
689
690 maxlen = 64 * 1024; /* XXX Fix me to honor real max_rxlen */
691
692 if (is40)
693 mimo_ctlchbw =
694 CHSPEC_SB_UPPER(WLC_BAND_PI_RADIO_CHANSPEC)
695 ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
696
697 /* rebuild the rspec and rspec_fallback */
698 rspec = RSPEC_MIMORATE;
699 rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
700 if (plcp[0] & MIMO_PLCP_40MHZ)
701 rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
702
703 if (fbr_iscck) /* CCK */
704 rspec_fallback =
705 CCK_RSPEC(CCK_PHY2MAC_RATE
706 (txh->FragPLCPFallback[0]));
707 else { /* MIMO */
708 rspec_fallback = RSPEC_MIMORATE;
709 rspec_fallback |=
710 txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
711 if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
712 rspec_fallback |=
713 (PHY_TXC1_BW_40MHZ <<
714 RSPEC_BW_SHIFT);
715 }
716
717 if (use_rts || use_cts) {
718 rts_rspec =
719 wlc_rspec_to_rts_rspec(wlc, rspec, false,
720 mimo_ctlchbw);
721 rts_rspec_fallback =
722 wlc_rspec_to_rts_rspec(wlc, rspec_fallback,
723 false, mimo_ctlchbw);
724 }
725 }
726
727 /* if (first mpdu for host agg) */
728 /* test whether to add more */
729 if ((MCS_RATE(mcs, true, false) >= f->dmaxferrate) &&
730 (count == f->mcs2ampdu_table[mcs])) {
731 WL_AMPDU_ERR("wl%d: PR 37644: stopping ampdu at %d for mcs %d\n",
732 wlc->pub->unit, count, mcs);
733 break;
734 }
735
736 if (count == scb_ampdu->max_pdu) {
737 WL_NONE("Stop taking from q, reached %d deep\n",
738 scb_ampdu->max_pdu);
739 break;
740 }
741
742 /* check to see if the next pkt is a candidate for aggregation */
743 p = pktq_ppeek(&qi->q, prec);
744 tx_info = IEEE80211_SKB_CB(p); /* tx_info must be checked with current p */
745
746 if (p) {
747 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
748 ((u8) (p->priority) == tid)) {
749
750 plen =
751 pkttotlen(p) + AMPDU_MAX_MPDU_OVERHEAD;
752 plen = max(scb_ampdu->min_len, plen);
753
754 if ((plen + ampdu_len) > maxlen) {
755 p = NULL;
756 WL_ERROR("%s: Bogus plen #1\n",
757 __func__);
758 ASSERT(3 == 4);
759 continue;
760 }
761
762 /* check if there are enough descriptors available */
763 if (TXAVAIL(wlc, fifo) <= (seg_cnt + 1)) {
764 WL_ERROR("%s: No fifo space !!!!!!\n",
765 __func__);
766 p = NULL;
767 continue;
768 }
769 p = pktq_pdeq(&qi->q, prec);
770 ASSERT(p);
771 } else {
772 p = NULL;
773 }
774 }
775 } /* end while(p) */
776
777 ini->tx_in_transit += count;
778
779 if (count) {
780 WLCNTADD(ampdu->cnt->txmpdu, count);
781
782 /* patch up the last txh */
783 txh = (d11txh_t *) pkt[count - 1]->data;
784 mcl = le16_to_cpu(txh->MacTxControlLow);
785 mcl &= ~TXC_AMPDU_MASK;
786 mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
787 txh->MacTxControlLow = cpu_to_le16(mcl);
788
789 /* remove the null delimiter after last mpdu */
790 ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
791 txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
792 ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
793
794 /* remove the pad len from last mpdu */
795 fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
796 len = fbr_iscck ? WLC_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
797 : WLC_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
798 ampdu_len -= roundup(len, 4) - len;
799
800 /* patch up the first txh & plcp */
801 txh = (d11txh_t *) pkt[0]->data;
802 plcp = (u8 *) (txh + 1);
803
804 WLC_SET_MIMO_PLCP_LEN(plcp, ampdu_len);
805 /* mark plcp to indicate ampdu */
806 WLC_SET_MIMO_PLCP_AMPDU(plcp);
807
808 /* reset the mixed mode header durations */
809 if (txh->MModeLen) {
810 u16 mmodelen =
811 wlc_calc_lsig_len(wlc, rspec, ampdu_len);
812 txh->MModeLen = cpu_to_le16(mmodelen);
813 preamble_type = WLC_MM_PREAMBLE;
814 }
815 if (txh->MModeFbrLen) {
816 u16 mmfbrlen =
817 wlc_calc_lsig_len(wlc, rspec_fallback, ampdu_len);
818 txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
819 fbr_preamble_type = WLC_MM_PREAMBLE;
820 }
821
822 /* set the preload length */
823 if (MCS_RATE(mcs, true, false) >= f->dmaxferrate) {
824 dma_len = min(dma_len, f->ampdu_pld_size);
825 txh->PreloadSize = cpu_to_le16(dma_len);
826 } else
827 txh->PreloadSize = 0;
828
829 mch = le16_to_cpu(txh->MacTxControlHigh);
830
831 /* update RTS dur fields */
832 if (use_rts || use_cts) {
833 u16 durid;
834 rts = (struct ieee80211_rts *)&txh->rts_frame;
835 if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
836 TXC_PREAMBLE_RTS_MAIN_SHORT)
837 rts_preamble_type = WLC_SHORT_PREAMBLE;
838
839 if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
840 TXC_PREAMBLE_RTS_FB_SHORT)
841 rts_fbr_preamble_type = WLC_SHORT_PREAMBLE;
842
843 durid =
844 wlc_compute_rtscts_dur(wlc, use_cts, rts_rspec,
845 rspec, rts_preamble_type,
846 preamble_type, ampdu_len,
847 true);
848 rts->duration = cpu_to_le16(durid);
849 durid = wlc_compute_rtscts_dur(wlc, use_cts,
850 rts_rspec_fallback,
851 rspec_fallback,
852 rts_fbr_preamble_type,
853 fbr_preamble_type,
854 ampdu_len, true);
855 txh->RTSDurFallback = cpu_to_le16(durid);
856 /* set TxFesTimeNormal */
857 txh->TxFesTimeNormal = rts->duration;
858 /* set fallback rate version of TxFesTimeNormal */
859 txh->TxFesTimeFallback = txh->RTSDurFallback;
860 }
861
862 /* set flag and plcp for fallback rate */
863 if (fbr) {
864 WLCNTADD(ampdu->cnt->txfbr_mpdu, count);
865 WLCNTINCR(ampdu->cnt->txfbr_ampdu);
866 mch |= TXC_AMPDU_FBR;
867 txh->MacTxControlHigh = cpu_to_le16(mch);
868 WLC_SET_MIMO_PLCP_AMPDU(plcp);
869 WLC_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
870 }
871
872 WL_AMPDU_TX("wl%d: wlc_sendampdu: count %d ampdu_len %d\n",
873 wlc->pub->unit, count, ampdu_len);
874
875 /* inform rate_sel if it this is a rate probe pkt */
876 frameid = le16_to_cpu(txh->TxFrameID);
877 if (frameid & TXFID_RATE_PROBE_MASK) {
878 WL_ERROR("%s: XXX what to do with TXFID_RATE_PROBE_MASK!?\n",
879 __func__);
880 }
881 for (i = 0; i < count; i++)
882 wlc_txfifo(wlc, fifo, pkt[i], i == (count - 1),
883 ampdu->txpkt_weight);
884
885 }
886 /* endif (count) */
887 return err;
888 }
889
890 void BCMFASTPATH
891 wlc_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
892 struct sk_buff *p, tx_status_t *txs)
893 {
894 scb_ampdu_t *scb_ampdu;
895 struct wlc_info *wlc = ampdu->wlc;
896 scb_ampdu_tid_ini_t *ini;
897 u32 s1 = 0, s2 = 0;
898 struct ieee80211_tx_info *tx_info;
899
900 tx_info = IEEE80211_SKB_CB(p);
901 ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
902 ASSERT(txs->status & TX_STATUS_AMPDU);
903
904 /* BMAC_NOTE: For the split driver, second level txstatus comes later
905 * So if the ACK was received then wait for the second level else just
906 * call the first one
907 */
908 if (txs->status & TX_STATUS_ACK_RCV) {
909 u8 status_delay = 0;
910
911 /* wait till the next 8 bytes of txstatus is available */
912 while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) {
913 udelay(1);
914 status_delay++;
915 if (status_delay > 10) {
916 ASSERT(status_delay <= 10);
917 return;
918 }
919 }
920
921 ASSERT(!(s1 & TX_STATUS_INTERMEDIATE));
922 ASSERT(s1 & TX_STATUS_AMPDU);
923 s2 = R_REG(&wlc->regs->frmtxstatus2);
924 }
925
926 if (likely(scb)) {
927 ASSERT(scb->magic == SCB_MAGIC);
928 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
929 ASSERT(scb_ampdu);
930 ini = SCB_AMPDU_INI(scb_ampdu, p->priority);
931 ASSERT(ini->scb == scb);
932 wlc_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
933 } else {
934 /* loop through all pkts and free */
935 u8 queue = txs->frameid & TXFID_QUEUE_MASK;
936 d11txh_t *txh;
937 u16 mcl;
938 while (p) {
939 tx_info = IEEE80211_SKB_CB(p);
940 txh = (d11txh_t *) p->data;
941 mcl = le16_to_cpu(txh->MacTxControlLow);
942 ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
943 pkt_buf_free_skb(p);
944 /* break out if last packet of ampdu */
945 if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
946 TXC_AMPDU_LAST)
947 break;
948 p = GETNEXTTXP(wlc, queue);
949 ASSERT(p != NULL);
950 }
951 wlc_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
952 }
953 wlc_ampdu_txflowcontrol(wlc, scb_ampdu, ini);
954 }
955
956 void
957 rate_status(struct wlc_info *wlc, struct ieee80211_tx_info *tx_info,
958 tx_status_t *txs, u8 mcs)
959 {
960 struct ieee80211_tx_rate *txrate = tx_info->status.rates;
961 int i;
962
963 /* clear the rest of the rates */
964 for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) {
965 txrate[i].idx = -1;
966 txrate[i].count = 0;
967 }
968 }
969
970 #define SHORTNAME "AMPDU status"
971
972 static void BCMFASTPATH
973 wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
974 struct sk_buff *p, tx_status_t *txs,
975 u32 s1, u32 s2)
976 {
977 scb_ampdu_t *scb_ampdu;
978 struct wlc_info *wlc = ampdu->wlc;
979 scb_ampdu_tid_ini_t *ini;
980 u8 bitmap[8], queue, tid;
981 d11txh_t *txh;
982 u8 *plcp;
983 struct ieee80211_hdr *h;
984 u16 seq, start_seq = 0, bindex, index, mcl;
985 u8 mcs = 0;
986 bool ba_recd = false, ack_recd = false;
987 u8 suc_mpdu = 0, tot_mpdu = 0;
988 uint supr_status;
989 bool update_rate = true, retry = true, tx_error = false;
990 u16 mimoantsel = 0;
991 u8 antselid = 0;
992 u8 retry_limit, rr_retry_limit;
993 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
994
995 #ifdef BCMDBG
996 u8 hole[AMPDU_MAX_MPDU];
997 memset(hole, 0, sizeof(hole));
998 #endif
999
1000 ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
1001 ASSERT(txs->status & TX_STATUS_AMPDU);
1002
1003 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
1004 ASSERT(scb_ampdu);
1005
1006 tid = (u8) (p->priority);
1007
1008 ini = SCB_AMPDU_INI(scb_ampdu, tid);
1009 retry_limit = ampdu->retry_limit_tid[tid];
1010 rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
1011
1012 ASSERT(ini->scb == scb);
1013
1014 memset(bitmap, 0, sizeof(bitmap));
1015 queue = txs->frameid & TXFID_QUEUE_MASK;
1016 ASSERT(queue < AC_COUNT);
1017
1018 supr_status = txs->status & TX_STATUS_SUPR_MASK;
1019
1020 if (txs->status & TX_STATUS_ACK_RCV) {
1021 if (TX_STATUS_SUPR_UF == supr_status) {
1022 update_rate = false;
1023 }
1024
1025 ASSERT(txs->status & TX_STATUS_INTERMEDIATE);
1026 start_seq = txs->sequence >> SEQNUM_SHIFT;
1027 bitmap[0] = (txs->status & TX_STATUS_BA_BMAP03_MASK) >>
1028 TX_STATUS_BA_BMAP03_SHIFT;
1029
1030 ASSERT(!(s1 & TX_STATUS_INTERMEDIATE));
1031 ASSERT(s1 & TX_STATUS_AMPDU);
1032
1033 bitmap[0] |=
1034 (s1 & TX_STATUS_BA_BMAP47_MASK) <<
1035 TX_STATUS_BA_BMAP47_SHIFT;
1036 bitmap[1] = (s1 >> 8) & 0xff;
1037 bitmap[2] = (s1 >> 16) & 0xff;
1038 bitmap[3] = (s1 >> 24) & 0xff;
1039
1040 bitmap[4] = s2 & 0xff;
1041 bitmap[5] = (s2 >> 8) & 0xff;
1042 bitmap[6] = (s2 >> 16) & 0xff;
1043 bitmap[7] = (s2 >> 24) & 0xff;
1044
1045 ba_recd = true;
1046 } else {
1047 WLCNTINCR(ampdu->cnt->noba);
1048 if (supr_status) {
1049 update_rate = false;
1050 if (supr_status == TX_STATUS_SUPR_BADCH) {
1051 WL_ERROR("%s: Pkt tx suppressed, illegal channel possibly %d\n",
1052 __func__,
1053 CHSPEC_CHANNEL(wlc->default_bss->chanspec));
1054 } else {
1055 if (supr_status == TX_STATUS_SUPR_FRAG)
1056 WL_NONE("%s: AMPDU frag err\n",
1057 __func__);
1058 else
1059 WL_ERROR("%s: wlc_ampdu_dotxstatus: supr_status 0x%x\n",
1060 __func__, supr_status);
1061 }
1062 /* no need to retry for badch; will fail again */
1063 if (supr_status == TX_STATUS_SUPR_BADCH ||
1064 supr_status == TX_STATUS_SUPR_EXPTIME) {
1065 retry = false;
1066 wlc->pub->_cnt->txchanrej++;
1067 } else if (supr_status == TX_STATUS_SUPR_EXPTIME) {
1068
1069 wlc->pub->_cnt->txexptime++;
1070
1071 /* TX underflow : try tuning pre-loading or ampdu size */
1072 } else if (supr_status == TX_STATUS_SUPR_FRAG) {
1073 /* if there were underflows, but pre-loading is not active,
1074 notify rate adaptation.
1075 */
1076 if (wlc_ffpld_check_txfunfl(wlc, prio2fifo[tid])
1077 > 0) {
1078 tx_error = true;
1079 }
1080 }
1081 } else if (txs->phyerr) {
1082 update_rate = false;
1083 wlc->pub->_cnt->txphyerr++;
1084 WL_ERROR("wl%d: wlc_ampdu_dotxstatus: tx phy error (0x%x)\n",
1085 wlc->pub->unit, txs->phyerr);
1086
1087 if (WL_ERROR_ON()) {
1088 prpkt("txpkt (AMPDU)", p);
1089 wlc_print_txdesc((d11txh_t *) p->data);
1090 }
1091 wlc_print_txstatus(txs);
1092 }
1093 }
1094
1095 /* loop through all pkts and retry if not acked */
1096 while (p) {
1097 tx_info = IEEE80211_SKB_CB(p);
1098 ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
1099 txh = (d11txh_t *) p->data;
1100 mcl = le16_to_cpu(txh->MacTxControlLow);
1101 plcp = (u8 *) (txh + 1);
1102 h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
1103 seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
1104
1105 if (tot_mpdu == 0) {
1106 mcs = plcp[0] & MIMO_PLCP_MCS_MASK;
1107 mimoantsel = le16_to_cpu(txh->ABI_MimoAntSel);
1108 }
1109
1110 index = TX_SEQ_TO_INDEX(seq);
1111 ack_recd = false;
1112 if (ba_recd) {
1113 bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
1114
1115 WL_AMPDU_TX("%s: tid %d seq is %d, start_seq is %d, bindex is %d set %d, index %d\n",
1116 __func__, tid, seq, start_seq, bindex,
1117 isset(bitmap, bindex), index);
1118
1119 /* if acked then clear bit and free packet */
1120 if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
1121 && isset(bitmap, bindex)) {
1122 ini->tx_in_transit--;
1123 ini->txretry[index] = 0;
1124
1125 /* ampdu_ack_len: number of acked aggregated frames */
1126 /* ampdu_ack_map: block ack bit map for the aggregation */
1127 /* ampdu_len: number of aggregated frames */
1128 rate_status(wlc, tx_info, txs, mcs);
1129 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1130 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
1131
1132 /* XXX TODO: Make these accurate. */
1133 tx_info->status.ampdu_ack_len =
1134 (txs->
1135 status & TX_STATUS_FRM_RTX_MASK) >>
1136 TX_STATUS_FRM_RTX_SHIFT;
1137 tx_info->status.ampdu_len =
1138 (txs->
1139 status & TX_STATUS_FRM_RTX_MASK) >>
1140 TX_STATUS_FRM_RTX_SHIFT;
1141
1142 skb_pull(p, D11_PHY_HDR_LEN);
1143 skb_pull(p, D11_TXH_LEN);
1144
1145 ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
1146 p);
1147 ack_recd = true;
1148 suc_mpdu++;
1149 }
1150 }
1151 /* either retransmit or send bar if ack not recd */
1152 if (!ack_recd) {
1153 struct ieee80211_tx_rate *txrate =
1154 tx_info->status.rates;
1155 if (retry && (txrate[0].count < (int)retry_limit)) {
1156 ini->txretry[index]++;
1157 ini->tx_in_transit--;
1158 /* Use high prededence for retransmit to give some punch */
1159 /* wlc_txq_enq(wlc, scb, p, WLC_PRIO_TO_PREC(tid)); */
1160 wlc_txq_enq(wlc, scb, p,
1161 WLC_PRIO_TO_HI_PREC(tid));
1162 } else {
1163 /* Retry timeout */
1164 ini->tx_in_transit--;
1165 ieee80211_tx_info_clear_status(tx_info);
1166 tx_info->flags |=
1167 IEEE80211_TX_STAT_AMPDU_NO_BACK;
1168 skb_pull(p, D11_PHY_HDR_LEN);
1169 skb_pull(p, D11_TXH_LEN);
1170 WL_ERROR("%s: BA Timeout, seq %d, in_transit %d\n",
1171 SHORTNAME, seq, ini->tx_in_transit);
1172 ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
1173 p);
1174 }
1175 }
1176 tot_mpdu++;
1177
1178 /* break out if last packet of ampdu */
1179 if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
1180 TXC_AMPDU_LAST)
1181 break;
1182
1183 p = GETNEXTTXP(wlc, queue);
1184 if (p == NULL) {
1185 ASSERT(p);
1186 break;
1187 }
1188 }
1189 wlc_send_q(wlc, wlc->active_queue);
1190
1191 /* update rate state */
1192 antselid = wlc_antsel_antsel2id(wlc->asi, mimoantsel);
1193
1194 wlc_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
1195 }
1196
1197 static void
1198 ampdu_cleanup_tid_ini(struct ampdu_info *ampdu, scb_ampdu_t *scb_ampdu, u8 tid,
1199 bool force)
1200 {
1201 scb_ampdu_tid_ini_t *ini;
1202 ini = SCB_AMPDU_INI(scb_ampdu, tid);
1203 if (!ini)
1204 return;
1205
1206 WL_AMPDU_CTL("wl%d: ampdu_cleanup_tid_ini: tid %d\n",
1207 ampdu->wlc->pub->unit, tid);
1208
1209 if (ini->tx_in_transit && !force)
1210 return;
1211
1212 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, ini->scb);
1213 ASSERT(ini == &scb_ampdu->ini[ini->tid]);
1214
1215 /* free all buffered tx packets */
1216 pktq_pflush(&scb_ampdu->txq, ini->tid, true, NULL, 0);
1217 }
1218
1219 /* initialize the initiator code for tid */
1220 static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(struct ampdu_info *ampdu,
1221 scb_ampdu_t *scb_ampdu,
1222 u8 tid, bool override)
1223 {
1224 scb_ampdu_tid_ini_t *ini;
1225
1226 ASSERT(scb_ampdu);
1227 ASSERT(scb_ampdu->scb);
1228 ASSERT(SCB_AMPDU(scb_ampdu->scb));
1229 ASSERT(tid < AMPDU_MAX_SCB_TID);
1230
1231 /* check for per-tid control of ampdu */
1232 if (!ampdu->ini_enable[tid]) {
1233 WL_ERROR("%s: Rejecting tid %d\n", __func__, tid);
1234 return NULL;
1235 }
1236
1237 ini = SCB_AMPDU_INI(scb_ampdu, tid);
1238 ini->tid = tid;
1239 ini->scb = scb_ampdu->scb;
1240 ini->magic = INI_MAGIC;
1241 WLCNTINCR(ampdu->cnt->txaddbareq);
1242
1243 return ini;
1244 }
1245
1246 static int wlc_ampdu_set(struct ampdu_info *ampdu, bool on)
1247 {
1248 struct wlc_info *wlc = ampdu->wlc;
1249
1250 wlc->pub->_ampdu = false;
1251
1252 if (on) {
1253 if (!N_ENAB(wlc->pub)) {
1254 WL_AMPDU_ERR("wl%d: driver not nmode enabled\n",
1255 wlc->pub->unit);
1256 return BCME_UNSUPPORTED;
1257 }
1258 if (!wlc_ampdu_cap(ampdu)) {
1259 WL_AMPDU_ERR("wl%d: device not ampdu capable\n",
1260 wlc->pub->unit);
1261 return BCME_UNSUPPORTED;
1262 }
1263 wlc->pub->_ampdu = on;
1264 }
1265
1266 return 0;
1267 }
1268
1269 static bool wlc_ampdu_cap(struct ampdu_info *ampdu)
1270 {
1271 if (WLC_PHY_11N_CAP(ampdu->wlc->band))
1272 return true;
1273 else
1274 return false;
1275 }
1276
1277 static void ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
1278 {
1279 u32 rate, mcs;
1280
1281 for (mcs = 0; mcs < MCS_TABLE_SIZE; mcs++) {
1282 /* rate is in Kbps; dur is in msec ==> len = (rate * dur) / 8 */
1283 /* 20MHz, No SGI */
1284 rate = MCS_RATE(mcs, false, false);
1285 ampdu->max_txlen[mcs][0][0] = (rate * dur) >> 3;
1286 /* 40 MHz, No SGI */
1287 rate = MCS_RATE(mcs, true, false);
1288 ampdu->max_txlen[mcs][1][0] = (rate * dur) >> 3;
1289 /* 20MHz, SGI */
1290 rate = MCS_RATE(mcs, false, true);
1291 ampdu->max_txlen[mcs][0][1] = (rate * dur) >> 3;
1292 /* 40 MHz, SGI */
1293 rate = MCS_RATE(mcs, true, true);
1294 ampdu->max_txlen[mcs][1][1] = (rate * dur) >> 3;
1295 }
1296 }
1297
1298 u8 BCMFASTPATH
1299 wlc_ampdu_null_delim_cnt(struct ampdu_info *ampdu, struct scb *scb,
1300 ratespec_t rspec, int phylen)
1301 {
1302 scb_ampdu_t *scb_ampdu;
1303 int bytes, cnt, tmp;
1304 u8 tx_density;
1305
1306 ASSERT(scb);
1307 ASSERT(SCB_AMPDU(scb));
1308
1309 scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
1310 ASSERT(scb_ampdu);
1311
1312 if (scb_ampdu->mpdu_density == 0)
1313 return 0;
1314
1315 /* RSPEC2RATE is in kbps units ==> ~RSPEC2RATE/2^13 is in bytes/usec
1316 density x is in 2^(x-4) usec
1317 ==> # of bytes needed for req density = rate/2^(17-x)
1318 ==> # of null delimiters = ceil(ceil(rate/2^(17-x)) - phylen)/4)
1319 */
1320
1321 tx_density = scb_ampdu->mpdu_density;
1322
1323 ASSERT(tx_density <= AMPDU_MAX_MPDU_DENSITY);
1324 tmp = 1 << (17 - tx_density);
1325 bytes = CEIL(RSPEC2RATE(rspec), tmp);
1326
1327 if (bytes > phylen) {
1328 cnt = CEIL(bytes - phylen, AMPDU_DELIMITER_LEN);
1329 ASSERT(cnt <= 255);
1330 return (u8) cnt;
1331 } else
1332 return 0;
1333 }
1334
1335 void wlc_ampdu_macaddr_upd(struct wlc_info *wlc)
1336 {
1337 char template[T_RAM_ACCESS_SZ * 2];
1338
1339 /* driver needs to write the ta in the template; ta is at offset 16 */
1340 memset(template, 0, sizeof(template));
1341 memcpy(template, wlc->pub->cur_etheraddr, ETH_ALEN);
1342 wlc_write_template_ram(wlc, (T_BA_TPL_BASE + 16), (T_RAM_ACCESS_SZ * 2),
1343 template);
1344 }
1345
1346 bool wlc_aggregatable(struct wlc_info *wlc, u8 tid)
1347 {
1348 return wlc->ampdu->ini_enable[tid];
1349 }
1350
1351 void wlc_ampdu_shm_upd(struct ampdu_info *ampdu)
1352 {
1353 struct wlc_info *wlc = ampdu->wlc;
1354
1355 /* Extend ucode internal watchdog timer to match larger received frames */
1356 if ((ampdu->rx_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) ==
1357 IEEE80211_HT_MAX_AMPDU_64K) {
1358 wlc_write_shm(wlc, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
1359 wlc_write_shm(wlc, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX);
1360 } else {
1361 wlc_write_shm(wlc, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF);
1362 wlc_write_shm(wlc, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF);
1363 }
1364 }
1365
1366 struct cb_del_ampdu_pars {
1367 struct ieee80211_sta *sta;
1368 u16 tid;
1369 };
1370
1371 /*
1372 * callback function that helps flushing ampdu packets from a priority queue
1373 */
1374 static bool cb_del_ampdu_pkt(void *p, int arg_a)
1375 {
1376 struct sk_buff *mpdu = (struct sk_buff *)p;
1377 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(mpdu);
1378 struct cb_del_ampdu_pars *ampdu_pars =
1379 (struct cb_del_ampdu_pars *)arg_a;
1380 bool rc;
1381
1382 rc = tx_info->flags & IEEE80211_TX_CTL_AMPDU ? true : false;
1383 rc = rc && (tx_info->control.sta == NULL || ampdu_pars->sta == NULL ||
1384 tx_info->control.sta == ampdu_pars->sta);
1385 rc = rc && ((u8)(mpdu->priority) == ampdu_pars->tid);
1386 return rc;
1387 }
1388
1389 /*
1390 * callback function that helps invalidating ampdu packets in a DMA queue
1391 */
1392 static void dma_cb_fn_ampdu(void *txi, void *arg_a)
1393 {
1394 struct ieee80211_sta *sta = arg_a;
1395 struct ieee80211_tx_info *tx_info = (struct ieee80211_tx_info *)txi;
1396
1397 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
1398 (tx_info->control.sta == sta || sta == NULL))
1399 tx_info->control.sta = NULL;
1400 }
1401
1402 /*
1403 * When a remote party is no longer available for ampdu communication, any
1404 * pending tx ampdu packets in the driver have to be flushed.
1405 */
1406 void wlc_ampdu_flush(struct wlc_info *wlc,
1407 struct ieee80211_sta *sta, u16 tid)
1408 {
1409 struct wlc_txq_info *qi = wlc->active_queue;
1410 struct pktq *pq = &qi->q;
1411 int prec;
1412 struct cb_del_ampdu_pars ampdu_pars;
1413
1414 ampdu_pars.sta = sta;
1415 ampdu_pars.tid = tid;
1416 for (prec = 0; prec < pq->num_prec; prec++) {
1417 pktq_pflush(pq, prec, true, cb_del_ampdu_pkt,
1418 (int)&ampdu_pars);
1419 }
1420 wlc_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
1421 }