]>
Commit | Line | Data |
---|---|---|
181d6902 | 1 | /* |
7e613e16 ID |
2 | Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> |
3 | Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> | |
9c9a0d14 | 4 | Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> |
181d6902 ID |
5 | <http://rt2x00.serialmonkey.com> |
6 | ||
7 | This program is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 2 of the License, or | |
10 | (at your option) any later version. | |
11 | ||
12 | This program is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
a05b8c58 | 18 | along with this program; if not, see <http://www.gnu.org/licenses/>. |
181d6902 ID |
19 | */ |
20 | ||
21 | /* | |
22 | Module: rt2x00lib | |
23 | Abstract: rt2x00 queue specific routines. | |
24 | */ | |
25 | ||
5a0e3ad6 | 26 | #include <linux/slab.h> |
181d6902 ID |
27 | #include <linux/kernel.h> |
28 | #include <linux/module.h> | |
c4da0048 | 29 | #include <linux/dma-mapping.h> |
181d6902 ID |
30 | |
31 | #include "rt2x00.h" | |
32 | #include "rt2x00lib.h" | |
33 | ||
88211021 | 34 | struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) |
239c249d | 35 | { |
f0bda571 SG |
36 | struct data_queue *queue = entry->queue; |
37 | struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; | |
c4da0048 GW |
38 | struct sk_buff *skb; |
39 | struct skb_frame_desc *skbdesc; | |
2bb057d0 ID |
40 | unsigned int frame_size; |
41 | unsigned int head_size = 0; | |
42 | unsigned int tail_size = 0; | |
239c249d GW |
43 | |
44 | /* | |
45 | * The frame size includes descriptor size, because the | |
46 | * hardware directly receive the frame into the skbuffer. | |
47 | */ | |
f0bda571 | 48 | frame_size = queue->data_size + queue->desc_size + queue->winfo_size; |
239c249d GW |
49 | |
50 | /* | |
ff352391 ID |
51 | * The payload should be aligned to a 4-byte boundary, |
52 | * this means we need at least 3 bytes for moving the frame | |
53 | * into the correct offset. | |
239c249d | 54 | */ |
2bb057d0 ID |
55 | head_size = 4; |
56 | ||
57 | /* | |
58 | * For IV/EIV/ICV assembly we must make sure there is | |
59 | * at least 8 bytes bytes available in headroom for IV/EIV | |
9c3444d3 | 60 | * and 8 bytes for ICV data as tailroon. |
2bb057d0 | 61 | */ |
7b8a00dc | 62 | if (rt2x00_has_cap_hw_crypto(rt2x00dev)) { |
2bb057d0 | 63 | head_size += 8; |
9c3444d3 | 64 | tail_size += 8; |
2bb057d0 | 65 | } |
239c249d GW |
66 | |
67 | /* | |
68 | * Allocate skbuffer. | |
69 | */ | |
88211021 | 70 | skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp); |
239c249d GW |
71 | if (!skb) |
72 | return NULL; | |
73 | ||
2bb057d0 ID |
74 | /* |
75 | * Make sure we not have a frame with the requested bytes | |
76 | * available in the head and tail. | |
77 | */ | |
78 | skb_reserve(skb, head_size); | |
239c249d GW |
79 | skb_put(skb, frame_size); |
80 | ||
c4da0048 GW |
81 | /* |
82 | * Populate skbdesc. | |
83 | */ | |
84 | skbdesc = get_skb_frame_desc(skb); | |
85 | memset(skbdesc, 0, sizeof(*skbdesc)); | |
86 | skbdesc->entry = entry; | |
87 | ||
b9d305cc | 88 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) { |
4ea545d4 SG |
89 | dma_addr_t skb_dma; |
90 | ||
91 | skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, | |
92 | DMA_FROM_DEVICE); | |
93 | if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) { | |
94 | dev_kfree_skb_any(skb); | |
95 | return NULL; | |
96 | } | |
97 | ||
98 | skbdesc->skb_dma = skb_dma; | |
c4da0048 GW |
99 | skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; |
100 | } | |
101 | ||
239c249d GW |
102 | return skb; |
103 | } | |
30caa6e3 | 104 | |
4ea545d4 | 105 | int rt2x00queue_map_txskb(struct queue_entry *entry) |
30caa6e3 | 106 | { |
fa69560f ID |
107 | struct device *dev = entry->queue->rt2x00dev->dev; |
108 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); | |
c4da0048 | 109 | |
3ee54a07 | 110 | skbdesc->skb_dma = |
fa69560f | 111 | dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); |
4ea545d4 SG |
112 | |
113 | if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma))) | |
114 | return -ENOMEM; | |
115 | ||
c4da0048 | 116 | skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; |
4ea545d4 | 117 | return 0; |
c4da0048 GW |
118 | } |
119 | EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); | |
120 | ||
fa69560f | 121 | void rt2x00queue_unmap_skb(struct queue_entry *entry) |
c4da0048 | 122 | { |
fa69560f ID |
123 | struct device *dev = entry->queue->rt2x00dev->dev; |
124 | struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); | |
c4da0048 GW |
125 | |
126 | if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { | |
fa69560f | 127 | dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, |
c4da0048 GW |
128 | DMA_FROM_DEVICE); |
129 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; | |
546adf29 | 130 | } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { |
fa69560f | 131 | dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, |
c4da0048 GW |
132 | DMA_TO_DEVICE); |
133 | skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; | |
134 | } | |
135 | } | |
0b8004aa | 136 | EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); |
c4da0048 | 137 | |
fa69560f | 138 | void rt2x00queue_free_skb(struct queue_entry *entry) |
c4da0048 | 139 | { |
fa69560f | 140 | if (!entry->skb) |
9a613195 ID |
141 | return; |
142 | ||
fa69560f ID |
143 | rt2x00queue_unmap_skb(entry); |
144 | dev_kfree_skb_any(entry->skb); | |
145 | entry->skb = NULL; | |
30caa6e3 | 146 | } |
239c249d | 147 | |
daee6c09 | 148 | void rt2x00queue_align_frame(struct sk_buff *skb) |
9f166171 | 149 | { |
9f166171 | 150 | unsigned int frame_length = skb->len; |
daee6c09 | 151 | unsigned int align = ALIGN_SIZE(skb, 0); |
9f166171 ID |
152 | |
153 | if (!align) | |
154 | return; | |
155 | ||
daee6c09 ID |
156 | skb_push(skb, align); |
157 | memmove(skb->data, skb->data + align, frame_length); | |
158 | skb_trim(skb, frame_length); | |
159 | } | |
160 | ||
cfd9167a SG |
161 | /* |
162 | * H/W needs L2 padding between the header and the paylod if header size | |
163 | * is not 4 bytes aligned. | |
164 | */ | |
165 | void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) | |
daee6c09 | 166 | { |
cfd9167a | 167 | unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; |
daee6c09 | 168 | |
cfd9167a | 169 | if (!l2pad) |
2e331462 | 170 | return; |
daee6c09 | 171 | |
cfd9167a SG |
172 | skb_push(skb, l2pad); |
173 | memmove(skb->data, skb->data + l2pad, hdr_len); | |
9f166171 ID |
174 | } |
175 | ||
cfd9167a | 176 | void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) |
daee6c09 | 177 | { |
cfd9167a | 178 | unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; |
daee6c09 | 179 | |
354e39db | 180 | if (!l2pad) |
daee6c09 ID |
181 | return; |
182 | ||
cfd9167a | 183 | memmove(skb->data + l2pad, skb->data, hdr_len); |
a061a93b | 184 | skb_pull(skb, l2pad); |
daee6c09 ID |
185 | } |
186 | ||
77b5621b GW |
187 | static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, |
188 | struct sk_buff *skb, | |
7b40982e ID |
189 | struct txentry_desc *txdesc) |
190 | { | |
77b5621b GW |
191 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
192 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
7b40982e | 193 | struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); |
e5851dac | 194 | u16 seqno; |
7b40982e | 195 | |
c262e08b | 196 | if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) |
7b40982e ID |
197 | return; |
198 | ||
7fe7ee77 HS |
199 | __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); |
200 | ||
b9d305cc | 201 | if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { |
e66a8ddf SG |
202 | /* |
203 | * rt2800 has a H/W (or F/W) bug, device incorrectly increase | |
204 | * seqno on retransmited data (non-QOS) frames. To workaround | |
205 | * the problem let's generate seqno in software if QOS is | |
206 | * disabled. | |
207 | */ | |
208 | if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) | |
209 | __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); | |
210 | else | |
211 | /* H/W will generate sequence number */ | |
212 | return; | |
213 | } | |
7fe7ee77 | 214 | |
7b40982e | 215 | /* |
7fe7ee77 HS |
216 | * The hardware is not able to insert a sequence number. Assign a |
217 | * software generated one here. | |
7b40982e ID |
218 | * |
219 | * This is wrong because beacons are not getting sequence | |
220 | * numbers assigned properly. | |
221 | * | |
222 | * A secondary problem exists for drivers that cannot toggle | |
223 | * sequence counting per-frame, since those will override the | |
224 | * sequence counter given by mac80211. | |
225 | */ | |
7b40982e | 226 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) |
e5851dac SG |
227 | seqno = atomic_add_return(0x10, &intf->seqno); |
228 | else | |
229 | seqno = atomic_read(&intf->seqno); | |
7b40982e | 230 | |
e5851dac SG |
231 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); |
232 | hdr->seq_ctrl |= cpu_to_le16(seqno); | |
7b40982e ID |
233 | } |
234 | ||
77b5621b GW |
235 | static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, |
236 | struct sk_buff *skb, | |
7b40982e ID |
237 | struct txentry_desc *txdesc, |
238 | const struct rt2x00_rate *hwrate) | |
239 | { | |
77b5621b | 240 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
7b40982e ID |
241 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; |
242 | unsigned int data_length; | |
243 | unsigned int duration; | |
244 | unsigned int residual; | |
245 | ||
2517794b HS |
246 | /* |
247 | * Determine with what IFS priority this frame should be send. | |
248 | * Set ifs to IFS_SIFS when the this is not the first fragment, | |
249 | * or this fragment came after RTS/CTS. | |
250 | */ | |
251 | if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) | |
252 | txdesc->u.plcp.ifs = IFS_BACKOFF; | |
253 | else | |
254 | txdesc->u.plcp.ifs = IFS_SIFS; | |
255 | ||
7b40982e | 256 | /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ |
77b5621b GW |
257 | data_length = skb->len + 4; |
258 | data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); | |
7b40982e ID |
259 | |
260 | /* | |
261 | * PLCP setup | |
262 | * Length calculation depends on OFDM/CCK rate. | |
263 | */ | |
26a1d07f HS |
264 | txdesc->u.plcp.signal = hwrate->plcp; |
265 | txdesc->u.plcp.service = 0x04; | |
7b40982e ID |
266 | |
267 | if (hwrate->flags & DEV_RATE_OFDM) { | |
26a1d07f HS |
268 | txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; |
269 | txdesc->u.plcp.length_low = data_length & 0x3f; | |
7b40982e ID |
270 | } else { |
271 | /* | |
272 | * Convert length to microseconds. | |
273 | */ | |
274 | residual = GET_DURATION_RES(data_length, hwrate->bitrate); | |
275 | duration = GET_DURATION(data_length, hwrate->bitrate); | |
276 | ||
277 | if (residual != 0) { | |
278 | duration++; | |
279 | ||
280 | /* | |
281 | * Check if we need to set the Length Extension | |
282 | */ | |
283 | if (hwrate->bitrate == 110 && residual <= 30) | |
26a1d07f | 284 | txdesc->u.plcp.service |= 0x80; |
7b40982e ID |
285 | } |
286 | ||
26a1d07f HS |
287 | txdesc->u.plcp.length_high = (duration >> 8) & 0xff; |
288 | txdesc->u.plcp.length_low = duration & 0xff; | |
7b40982e ID |
289 | |
290 | /* | |
291 | * When preamble is enabled we should set the | |
292 | * preamble bit for the signal. | |
293 | */ | |
294 | if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) | |
26a1d07f | 295 | txdesc->u.plcp.signal |= 0x08; |
7b40982e ID |
296 | } |
297 | } | |
298 | ||
77b5621b GW |
299 | static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, |
300 | struct sk_buff *skb, | |
46a01ec0 | 301 | struct txentry_desc *txdesc, |
36323f81 | 302 | struct ieee80211_sta *sta, |
46a01ec0 GW |
303 | const struct rt2x00_rate *hwrate) |
304 | { | |
77b5621b | 305 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
46a01ec0 | 306 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; |
77b5621b | 307 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
ead2bb64 | 308 | struct rt2x00_sta *sta_priv = NULL; |
e49abb19 | 309 | u8 density = 0; |
46a01ec0 | 310 | |
36323f81 | 311 | if (sta) { |
36323f81 | 312 | sta_priv = sta_to_rt2x00_sta(sta); |
ead2bb64 | 313 | txdesc->u.ht.wcid = sta_priv->wcid; |
e49abb19 | 314 | density = sta->ht_cap.ampdu_density; |
ead2bb64 HS |
315 | } |
316 | ||
46a01ec0 GW |
317 | /* |
318 | * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the | |
319 | * mcs rate to be used | |
320 | */ | |
321 | if (txrate->flags & IEEE80211_TX_RC_MCS) { | |
322 | txdesc->u.ht.mcs = txrate->idx; | |
323 | ||
324 | /* | |
325 | * MIMO PS should be set to 1 for STA's using dynamic SM PS | |
326 | * when using more then one tx stream (>MCS7). | |
327 | */ | |
36323f81 | 328 | if (sta && txdesc->u.ht.mcs > 7 && |
af0ed69b | 329 | sta->smps_mode == IEEE80211_SMPS_DYNAMIC) |
46a01ec0 GW |
330 | __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); |
331 | } else { | |
332 | txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); | |
333 | if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) | |
334 | txdesc->u.ht.mcs |= 0x08; | |
335 | } | |
336 | ||
da40f407 SG |
337 | if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) { |
338 | if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) | |
339 | txdesc->u.ht.txop = TXOP_SIFS; | |
340 | else | |
341 | txdesc->u.ht.txop = TXOP_BACKOFF; | |
342 | ||
343 | /* Left zero on all other settings. */ | |
344 | return; | |
345 | } | |
346 | ||
da40f407 SG |
347 | /* |
348 | * Only one STBC stream is supported for now. | |
349 | */ | |
350 | if (tx_info->flags & IEEE80211_TX_CTL_STBC) | |
351 | txdesc->u.ht.stbc = 1; | |
352 | ||
46a01ec0 GW |
353 | /* |
354 | * This frame is eligible for an AMPDU, however, don't aggregate | |
355 | * frames that are intended to probe a specific tx rate. | |
356 | */ | |
357 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && | |
e49abb19 | 358 | !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { |
46a01ec0 | 359 | __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); |
e49abb19 SG |
360 | txdesc->u.ht.mpdu_density = density; |
361 | txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ | |
362 | } | |
46a01ec0 GW |
363 | |
364 | /* | |
365 | * Set 40Mhz mode if necessary (for legacy rates this will | |
366 | * duplicate the frame to both channels). | |
367 | */ | |
368 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || | |
369 | txrate->flags & IEEE80211_TX_RC_DUP_DATA) | |
370 | __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); | |
371 | if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) | |
372 | __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); | |
373 | ||
374 | /* | |
375 | * Determine IFS values | |
376 | * - Use TXOP_BACKOFF for management frames except beacons | |
377 | * - Use TXOP_SIFS for fragment bursts | |
378 | * - Use TXOP_HTTXOP for everything else | |
379 | * | |
380 | * Note: rt2800 devices won't use CTS protection (if used) | |
381 | * for frames not transmitted with TXOP_HTTXOP | |
382 | */ | |
383 | if (ieee80211_is_mgmt(hdr->frame_control) && | |
384 | !ieee80211_is_beacon(hdr->frame_control)) | |
385 | txdesc->u.ht.txop = TXOP_BACKOFF; | |
386 | else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) | |
387 | txdesc->u.ht.txop = TXOP_SIFS; | |
388 | else | |
389 | txdesc->u.ht.txop = TXOP_HTTXOP; | |
390 | } | |
391 | ||
77b5621b GW |
392 | static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, |
393 | struct sk_buff *skb, | |
36323f81 TH |
394 | struct txentry_desc *txdesc, |
395 | struct ieee80211_sta *sta) | |
7050ec82 | 396 | { |
77b5621b GW |
397 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
398 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
55b585e2 HS |
399 | struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; |
400 | struct ieee80211_rate *rate; | |
401 | const struct rt2x00_rate *hwrate = NULL; | |
7050ec82 ID |
402 | |
403 | memset(txdesc, 0, sizeof(*txdesc)); | |
404 | ||
9f166171 | 405 | /* |
df624ca5 | 406 | * Header and frame information. |
9f166171 | 407 | */ |
77b5621b GW |
408 | txdesc->length = skb->len; |
409 | txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb); | |
9f166171 | 410 | |
7050ec82 ID |
411 | /* |
412 | * Check whether this frame is to be acked. | |
413 | */ | |
e039fa4a | 414 | if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) |
7050ec82 ID |
415 | __set_bit(ENTRY_TXD_ACK, &txdesc->flags); |
416 | ||
417 | /* | |
418 | * Check if this is a RTS/CTS frame | |
419 | */ | |
ac104462 ID |
420 | if (ieee80211_is_rts(hdr->frame_control) || |
421 | ieee80211_is_cts(hdr->frame_control)) { | |
7050ec82 | 422 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); |
ac104462 | 423 | if (ieee80211_is_rts(hdr->frame_control)) |
7050ec82 | 424 | __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); |
e039fa4a | 425 | else |
7050ec82 | 426 | __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); |
e039fa4a | 427 | if (tx_info->control.rts_cts_rate_idx >= 0) |
2e92e6f2 | 428 | rate = |
e039fa4a | 429 | ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info); |
7050ec82 ID |
430 | } |
431 | ||
432 | /* | |
433 | * Determine retry information. | |
434 | */ | |
e6a9854b | 435 | txdesc->retry_limit = tx_info->control.rates[0].count - 1; |
42c82857 | 436 | if (txdesc->retry_limit >= rt2x00dev->long_retry) |
7050ec82 ID |
437 | __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); |
438 | ||
439 | /* | |
440 | * Check if more fragments are pending | |
441 | */ | |
2606e422 | 442 | if (ieee80211_has_morefrags(hdr->frame_control)) { |
7050ec82 ID |
443 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); |
444 | __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); | |
445 | } | |
446 | ||
2606e422 HS |
447 | /* |
448 | * Check if more frames (!= fragments) are pending | |
449 | */ | |
450 | if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) | |
451 | __set_bit(ENTRY_TXD_BURST, &txdesc->flags); | |
452 | ||
7050ec82 ID |
453 | /* |
454 | * Beacons and probe responses require the tsf timestamp | |
1bce85cf | 455 | * to be inserted into the frame. |
7050ec82 | 456 | */ |
1bce85cf HS |
457 | if (ieee80211_is_beacon(hdr->frame_control) || |
458 | ieee80211_is_probe_resp(hdr->frame_control)) | |
7050ec82 ID |
459 | __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); |
460 | ||
7b40982e | 461 | if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && |
2517794b | 462 | !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) |
7050ec82 | 463 | __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); |
7050ec82 | 464 | |
076f9582 ID |
465 | /* |
466 | * Determine rate modulation. | |
467 | */ | |
55b585e2 HS |
468 | if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) |
469 | txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; | |
470 | else if (txrate->flags & IEEE80211_TX_RC_MCS) | |
471 | txdesc->rate_mode = RATE_MODE_HT_MIX; | |
472 | else { | |
473 | rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); | |
474 | hwrate = rt2x00_get_rate(rate->hw_value); | |
475 | if (hwrate->flags & DEV_RATE_OFDM) | |
476 | txdesc->rate_mode = RATE_MODE_OFDM; | |
477 | else | |
478 | txdesc->rate_mode = RATE_MODE_CCK; | |
479 | } | |
7050ec82 | 480 | |
7b40982e ID |
481 | /* |
482 | * Apply TX descriptor handling by components | |
483 | */ | |
77b5621b GW |
484 | rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); |
485 | rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); | |
26a1d07f | 486 | |
b9d305cc | 487 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC)) |
77b5621b | 488 | rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, |
36323f81 | 489 | sta, hwrate); |
26a1d07f | 490 | else |
77b5621b GW |
491 | rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, |
492 | hwrate); | |
7050ec82 | 493 | } |
7050ec82 | 494 | |
78eea11b GW |
495 | static int rt2x00queue_write_tx_data(struct queue_entry *entry, |
496 | struct txentry_desc *txdesc) | |
497 | { | |
498 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | |
499 | ||
500 | /* | |
501 | * This should not happen, we already checked the entry | |
502 | * was ours. When the hardware disagrees there has been | |
503 | * a queue corruption! | |
504 | */ | |
505 | if (unlikely(rt2x00dev->ops->lib->get_entry_state && | |
506 | rt2x00dev->ops->lib->get_entry_state(entry))) { | |
ec9c4989 JP |
507 | rt2x00_err(rt2x00dev, |
508 | "Corrupt queue %d, accessing entry which is not ours\n" | |
509 | "Please file bug report to %s\n", | |
510 | entry->queue->qid, DRV_PROJECT); | |
78eea11b GW |
511 | return -EINVAL; |
512 | } | |
513 | ||
514 | /* | |
515 | * Add the requested extra tx headroom in front of the skb. | |
516 | */ | |
5616a6ef GJ |
517 | skb_push(entry->skb, rt2x00dev->extra_tx_headroom); |
518 | memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom); | |
78eea11b GW |
519 | |
520 | /* | |
76dd5ddf | 521 | * Call the driver's write_tx_data function, if it exists. |
78eea11b | 522 | */ |
76dd5ddf GW |
523 | if (rt2x00dev->ops->lib->write_tx_data) |
524 | rt2x00dev->ops->lib->write_tx_data(entry, txdesc); | |
78eea11b GW |
525 | |
526 | /* | |
527 | * Map the skb to DMA. | |
528 | */ | |
b9d305cc | 529 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) && |
4ea545d4 SG |
530 | rt2x00queue_map_txskb(entry)) |
531 | return -ENOMEM; | |
78eea11b GW |
532 | |
533 | return 0; | |
534 | } | |
535 | ||
bd88a781 ID |
536 | static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, |
537 | struct txentry_desc *txdesc) | |
7050ec82 | 538 | { |
b869767b | 539 | struct data_queue *queue = entry->queue; |
7050ec82 | 540 | |
93331458 | 541 | queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); |
7050ec82 ID |
542 | |
543 | /* | |
544 | * All processing on the frame has been completed, this means | |
545 | * it is now ready to be dumped to userspace through debugfs. | |
546 | */ | |
93331458 | 547 | rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); |
6295d815 GW |
548 | } |
549 | ||
8be4eed0 | 550 | static void rt2x00queue_kick_tx_queue(struct data_queue *queue, |
6295d815 GW |
551 | struct txentry_desc *txdesc) |
552 | { | |
7050ec82 | 553 | /* |
b869767b | 554 | * Check if we need to kick the queue, there are however a few rules |
6295d815 | 555 | * 1) Don't kick unless this is the last in frame in a burst. |
b869767b ID |
556 | * When the burst flag is set, this frame is always followed |
557 | * by another frame which in some way are related to eachother. | |
558 | * This is true for fragments, RTS or CTS-to-self frames. | |
6295d815 | 559 | * 2) Rule 1 can be broken when the available entries |
b869767b | 560 | * in the queue are less then a certain threshold. |
7050ec82 | 561 | */ |
b869767b ID |
562 | if (rt2x00queue_threshold(queue) || |
563 | !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) | |
dbba306f | 564 | queue->rt2x00dev->ops->lib->kick_queue(queue); |
7050ec82 | 565 | } |
7050ec82 | 566 | |
84e9e8eb HS |
567 | static void rt2x00queue_bar_check(struct queue_entry *entry) |
568 | { | |
569 | struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; | |
570 | struct ieee80211_bar *bar = (void *) (entry->skb->data + | |
5616a6ef | 571 | rt2x00dev->extra_tx_headroom); |
84e9e8eb HS |
572 | struct rt2x00_bar_list_entry *bar_entry; |
573 | ||
574 | if (likely(!ieee80211_is_back_req(bar->frame_control))) | |
575 | return; | |
576 | ||
577 | bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC); | |
578 | ||
579 | /* | |
580 | * If the alloc fails we still send the BAR out but just don't track | |
581 | * it in our bar list. And as a result we will report it to mac80211 | |
582 | * back as failed. | |
583 | */ | |
584 | if (!bar_entry) | |
585 | return; | |
586 | ||
587 | bar_entry->entry = entry; | |
588 | bar_entry->block_acked = 0; | |
589 | ||
590 | /* | |
591 | * Copy the relevant parts of the 802.11 BAR into out check list | |
592 | * such that we can use RCU for less-overhead in the RX path since | |
593 | * sending BARs and processing the according BlockAck should be | |
594 | * the exception. | |
595 | */ | |
596 | memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra)); | |
597 | memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta)); | |
598 | bar_entry->control = bar->control; | |
599 | bar_entry->start_seq_num = bar->start_seq_num; | |
600 | ||
601 | /* | |
602 | * Insert BAR into our BAR check list. | |
603 | */ | |
604 | spin_lock_bh(&rt2x00dev->bar_list_lock); | |
605 | list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list); | |
606 | spin_unlock_bh(&rt2x00dev->bar_list_lock); | |
607 | } | |
608 | ||
7351c6bd | 609 | int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, |
3d8bfe14 | 610 | struct ieee80211_sta *sta, bool local) |
6db3786a | 611 | { |
e6a9854b | 612 | struct ieee80211_tx_info *tx_info; |
77a861c4 | 613 | struct queue_entry *entry; |
6db3786a | 614 | struct txentry_desc txdesc; |
d74f5ba4 | 615 | struct skb_frame_desc *skbdesc; |
e6a9854b | 616 | u8 rate_idx, rate_flags; |
77a861c4 GW |
617 | int ret = 0; |
618 | ||
6db3786a ID |
619 | /* |
620 | * Copy all TX descriptor information into txdesc, | |
621 | * after that we are free to use the skb->cb array | |
622 | * for our information. | |
623 | */ | |
3d8bfe14 | 624 | rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); |
6db3786a | 625 | |
d74f5ba4 | 626 | /* |
e6a9854b | 627 | * All information is retrieved from the skb->cb array, |
2bb057d0 | 628 | * now we should claim ownership of the driver part of that |
e6a9854b | 629 | * array, preserving the bitrate index and flags. |
d74f5ba4 | 630 | */ |
e6a9854b JB |
631 | tx_info = IEEE80211_SKB_CB(skb); |
632 | rate_idx = tx_info->control.rates[0].idx; | |
633 | rate_flags = tx_info->control.rates[0].flags; | |
0e3de998 | 634 | skbdesc = get_skb_frame_desc(skb); |
d74f5ba4 | 635 | memset(skbdesc, 0, sizeof(*skbdesc)); |
e6a9854b JB |
636 | skbdesc->tx_rate_idx = rate_idx; |
637 | skbdesc->tx_rate_flags = rate_flags; | |
d74f5ba4 | 638 | |
7351c6bd JB |
639 | if (local) |
640 | skbdesc->flags |= SKBDESC_NOT_MAC80211; | |
641 | ||
2bb057d0 ID |
642 | /* |
643 | * When hardware encryption is supported, and this frame | |
644 | * is to be encrypted, we should strip the IV/EIV data from | |
3ad2f3fb | 645 | * the frame so we can provide it to the driver separately. |
2bb057d0 ID |
646 | */ |
647 | if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && | |
dddfb478 | 648 | !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { |
b9d305cc | 649 | if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV)) |
9eb4e21e | 650 | rt2x00crypto_tx_copy_iv(skb, &txdesc); |
dddfb478 | 651 | else |
9eb4e21e | 652 | rt2x00crypto_tx_remove_iv(skb, &txdesc); |
dddfb478 | 653 | } |
2bb057d0 | 654 | |
93354cbb | 655 | /* |
25985edc | 656 | * When DMA allocation is required we should guarantee to the |
93354cbb | 657 | * driver that the DMA is aligned to a 4-byte boundary. |
93354cbb ID |
658 | * However some drivers require L2 padding to pad the payload |
659 | * rather then the header. This could be a requirement for | |
660 | * PCI and USB devices, while header alignment only is valid | |
661 | * for PCI devices. | |
662 | */ | |
b9d305cc | 663 | if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD)) |
128f8f77 | 664 | rt2x00queue_insert_l2pad(skb, txdesc.header_length); |
b9d305cc | 665 | else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA)) |
128f8f77 GW |
666 | rt2x00queue_align_frame(skb); |
667 | ||
3780d038 SG |
668 | /* |
669 | * That function must be called with bh disabled. | |
670 | */ | |
128f8f77 GW |
671 | spin_lock(&queue->tx_lock); |
672 | ||
673 | if (unlikely(rt2x00queue_full(queue))) { | |
ec9c4989 JP |
674 | rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n", |
675 | queue->qid); | |
128f8f77 GW |
676 | ret = -ENOBUFS; |
677 | goto out; | |
678 | } | |
679 | ||
680 | entry = rt2x00queue_get_entry(queue, Q_INDEX); | |
681 | ||
682 | if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, | |
683 | &entry->flags))) { | |
ec9c4989 JP |
684 | rt2x00_err(queue->rt2x00dev, |
685 | "Arrived at non-free entry in the non-full queue %d\n" | |
686 | "Please file bug report to %s\n", | |
687 | queue->qid, DRV_PROJECT); | |
128f8f77 GW |
688 | ret = -EINVAL; |
689 | goto out; | |
690 | } | |
691 | ||
692 | skbdesc->entry = entry; | |
693 | entry->skb = skb; | |
9f166171 | 694 | |
2bb057d0 ID |
695 | /* |
696 | * It could be possible that the queue was corrupted and this | |
0e3de998 ID |
697 | * call failed. Since we always return NETDEV_TX_OK to mac80211, |
698 | * this frame will simply be dropped. | |
2bb057d0 | 699 | */ |
78eea11b | 700 | if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { |
0262ab0d | 701 | clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); |
2bb057d0 | 702 | entry->skb = NULL; |
77a861c4 GW |
703 | ret = -EIO; |
704 | goto out; | |
6db3786a ID |
705 | } |
706 | ||
84e9e8eb HS |
707 | /* |
708 | * Put BlockAckReqs into our check list for driver BA processing. | |
709 | */ | |
710 | rt2x00queue_bar_check(entry); | |
711 | ||
0262ab0d | 712 | set_bit(ENTRY_DATA_PENDING, &entry->flags); |
6db3786a | 713 | |
75256f03 | 714 | rt2x00queue_index_inc(entry, Q_INDEX); |
6db3786a | 715 | rt2x00queue_write_tx_descriptor(entry, &txdesc); |
8be4eed0 | 716 | rt2x00queue_kick_tx_queue(queue, &txdesc); |
6db3786a | 717 | |
77a861c4 GW |
718 | out: |
719 | spin_unlock(&queue->tx_lock); | |
720 | return ret; | |
6db3786a ID |
721 | } |
722 | ||
69cf36a4 HS |
723 | int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, |
724 | struct ieee80211_vif *vif) | |
725 | { | |
726 | struct rt2x00_intf *intf = vif_to_intf(vif); | |
727 | ||
728 | if (unlikely(!intf->beacon)) | |
729 | return -ENOBUFS; | |
730 | ||
69cf36a4 HS |
731 | /* |
732 | * Clean up the beacon skb. | |
733 | */ | |
734 | rt2x00queue_free_skb(intf->beacon); | |
735 | ||
736 | /* | |
737 | * Clear beacon (single bssid devices don't need to clear the beacon | |
738 | * since the beacon queue will get stopped anyway). | |
739 | */ | |
740 | if (rt2x00dev->ops->lib->clear_beacon) | |
741 | rt2x00dev->ops->lib->clear_beacon(intf->beacon); | |
742 | ||
69cf36a4 HS |
743 | return 0; |
744 | } | |
745 | ||
283dafa1 SG |
746 | int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, |
747 | struct ieee80211_vif *vif) | |
bd88a781 ID |
748 | { |
749 | struct rt2x00_intf *intf = vif_to_intf(vif); | |
750 | struct skb_frame_desc *skbdesc; | |
751 | struct txentry_desc txdesc; | |
bd88a781 ID |
752 | |
753 | if (unlikely(!intf->beacon)) | |
754 | return -ENOBUFS; | |
755 | ||
17512dc3 IP |
756 | /* |
757 | * Clean up the beacon skb. | |
758 | */ | |
fa69560f | 759 | rt2x00queue_free_skb(intf->beacon); |
17512dc3 | 760 | |
bd88a781 | 761 | intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); |
8414ff07 | 762 | if (!intf->beacon->skb) |
bd88a781 ID |
763 | return -ENOMEM; |
764 | ||
765 | /* | |
766 | * Copy all TX descriptor information into txdesc, | |
767 | * after that we are free to use the skb->cb array | |
768 | * for our information. | |
769 | */ | |
36323f81 | 770 | rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL); |
bd88a781 | 771 | |
bd88a781 ID |
772 | /* |
773 | * Fill in skb descriptor | |
774 | */ | |
775 | skbdesc = get_skb_frame_desc(intf->beacon->skb); | |
776 | memset(skbdesc, 0, sizeof(*skbdesc)); | |
bd88a781 ID |
777 | skbdesc->entry = intf->beacon; |
778 | ||
bd88a781 | 779 | /* |
69cf36a4 | 780 | * Send beacon to hardware. |
bd88a781 | 781 | */ |
f224f4ef | 782 | rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); |
bd88a781 | 783 | |
8414ff07 HS |
784 | return 0; |
785 | ||
786 | } | |
787 | ||
10e11568 | 788 | bool rt2x00queue_for_each_entry(struct data_queue *queue, |
5eb7efe8 ID |
789 | enum queue_index start, |
790 | enum queue_index end, | |
1dd0dbb3 HS |
791 | void *data, |
792 | bool (*fn)(struct queue_entry *entry, | |
793 | void *data)) | |
5eb7efe8 ID |
794 | { |
795 | unsigned long irqflags; | |
796 | unsigned int index_start; | |
797 | unsigned int index_end; | |
798 | unsigned int i; | |
799 | ||
800 | if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { | |
ec9c4989 JP |
801 | rt2x00_err(queue->rt2x00dev, |
802 | "Entry requested from invalid index range (%d - %d)\n", | |
803 | start, end); | |
10e11568 | 804 | return true; |
5eb7efe8 ID |
805 | } |
806 | ||
807 | /* | |
808 | * Only protect the range we are going to loop over, | |
809 | * if during our loop a extra entry is set to pending | |
810 | * it should not be kicked during this run, since it | |
811 | * is part of another TX operation. | |
812 | */ | |
813f0339 | 813 | spin_lock_irqsave(&queue->index_lock, irqflags); |
5eb7efe8 ID |
814 | index_start = queue->index[start]; |
815 | index_end = queue->index[end]; | |
813f0339 | 816 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
5eb7efe8 ID |
817 | |
818 | /* | |
25985edc | 819 | * Start from the TX done pointer, this guarantees that we will |
5eb7efe8 ID |
820 | * send out all frames in the correct order. |
821 | */ | |
822 | if (index_start < index_end) { | |
10e11568 | 823 | for (i = index_start; i < index_end; i++) { |
1dd0dbb3 | 824 | if (fn(&queue->entries[i], data)) |
10e11568 HS |
825 | return true; |
826 | } | |
5eb7efe8 | 827 | } else { |
10e11568 | 828 | for (i = index_start; i < queue->limit; i++) { |
1dd0dbb3 | 829 | if (fn(&queue->entries[i], data)) |
10e11568 HS |
830 | return true; |
831 | } | |
5eb7efe8 | 832 | |
10e11568 | 833 | for (i = 0; i < index_end; i++) { |
1dd0dbb3 | 834 | if (fn(&queue->entries[i], data)) |
10e11568 HS |
835 | return true; |
836 | } | |
5eb7efe8 | 837 | } |
10e11568 HS |
838 | |
839 | return false; | |
5eb7efe8 ID |
840 | } |
841 | EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); | |
842 | ||
181d6902 ID |
843 | struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, |
844 | enum queue_index index) | |
845 | { | |
846 | struct queue_entry *entry; | |
5f46c4d0 | 847 | unsigned long irqflags; |
181d6902 ID |
848 | |
849 | if (unlikely(index >= Q_INDEX_MAX)) { | |
ec9c4989 JP |
850 | rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n", |
851 | index); | |
181d6902 ID |
852 | return NULL; |
853 | } | |
854 | ||
813f0339 | 855 | spin_lock_irqsave(&queue->index_lock, irqflags); |
181d6902 ID |
856 | |
857 | entry = &queue->entries[queue->index[index]]; | |
858 | ||
813f0339 | 859 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
181d6902 ID |
860 | |
861 | return entry; | |
862 | } | |
863 | EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); | |
864 | ||
75256f03 | 865 | void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) |
181d6902 | 866 | { |
75256f03 | 867 | struct data_queue *queue = entry->queue; |
5f46c4d0 ID |
868 | unsigned long irqflags; |
869 | ||
181d6902 | 870 | if (unlikely(index >= Q_INDEX_MAX)) { |
ec9c4989 JP |
871 | rt2x00_err(queue->rt2x00dev, |
872 | "Index change on invalid index type (%d)\n", index); | |
181d6902 ID |
873 | return; |
874 | } | |
875 | ||
813f0339 | 876 | spin_lock_irqsave(&queue->index_lock, irqflags); |
181d6902 ID |
877 | |
878 | queue->index[index]++; | |
879 | if (queue->index[index] >= queue->limit) | |
880 | queue->index[index] = 0; | |
881 | ||
75256f03 | 882 | entry->last_action = jiffies; |
652a9dd2 | 883 | |
10b6b801 ID |
884 | if (index == Q_INDEX) { |
885 | queue->length++; | |
886 | } else if (index == Q_INDEX_DONE) { | |
887 | queue->length--; | |
55887511 | 888 | queue->count++; |
10b6b801 | 889 | } |
181d6902 | 890 | |
813f0339 | 891 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
181d6902 | 892 | } |
181d6902 | 893 | |
6cdfc1de | 894 | static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) |
0b7fde54 | 895 | { |
0b7fde54 | 896 | switch (queue->qid) { |
f615e9a3 ID |
897 | case QID_AC_VO: |
898 | case QID_AC_VI: | |
0b7fde54 ID |
899 | case QID_AC_BE: |
900 | case QID_AC_BK: | |
0b7fde54 ID |
901 | /* |
902 | * For TX queues, we have to disable the queue | |
903 | * inside mac80211. | |
904 | */ | |
905 | ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); | |
906 | break; | |
907 | default: | |
908 | break; | |
909 | } | |
910 | } | |
e2288b66 SG |
911 | void rt2x00queue_pause_queue(struct data_queue *queue) |
912 | { | |
913 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | |
914 | !test_bit(QUEUE_STARTED, &queue->flags) || | |
915 | test_and_set_bit(QUEUE_PAUSED, &queue->flags)) | |
916 | return; | |
917 | ||
918 | rt2x00queue_pause_queue_nocheck(queue); | |
919 | } | |
0b7fde54 ID |
920 | EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); |
921 | ||
922 | void rt2x00queue_unpause_queue(struct data_queue *queue) | |
923 | { | |
924 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | |
925 | !test_bit(QUEUE_STARTED, &queue->flags) || | |
926 | !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) | |
927 | return; | |
928 | ||
929 | switch (queue->qid) { | |
f615e9a3 ID |
930 | case QID_AC_VO: |
931 | case QID_AC_VI: | |
0b7fde54 ID |
932 | case QID_AC_BE: |
933 | case QID_AC_BK: | |
0b7fde54 ID |
934 | /* |
935 | * For TX queues, we have to enable the queue | |
936 | * inside mac80211. | |
937 | */ | |
938 | ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); | |
939 | break; | |
5be65609 ID |
940 | case QID_RX: |
941 | /* | |
942 | * For RX we need to kick the queue now in order to | |
943 | * receive frames. | |
944 | */ | |
945 | queue->rt2x00dev->ops->lib->kick_queue(queue); | |
0b7fde54 ID |
946 | default: |
947 | break; | |
948 | } | |
949 | } | |
950 | EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); | |
951 | ||
952 | void rt2x00queue_start_queue(struct data_queue *queue) | |
953 | { | |
954 | mutex_lock(&queue->status_lock); | |
955 | ||
956 | if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || | |
957 | test_and_set_bit(QUEUE_STARTED, &queue->flags)) { | |
958 | mutex_unlock(&queue->status_lock); | |
959 | return; | |
960 | } | |
961 | ||
962 | set_bit(QUEUE_PAUSED, &queue->flags); | |
963 | ||
964 | queue->rt2x00dev->ops->lib->start_queue(queue); | |
965 | ||
966 | rt2x00queue_unpause_queue(queue); | |
967 | ||
968 | mutex_unlock(&queue->status_lock); | |
969 | } | |
970 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); | |
971 | ||
972 | void rt2x00queue_stop_queue(struct data_queue *queue) | |
973 | { | |
974 | mutex_lock(&queue->status_lock); | |
975 | ||
976 | if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { | |
977 | mutex_unlock(&queue->status_lock); | |
978 | return; | |
979 | } | |
980 | ||
e2288b66 | 981 | rt2x00queue_pause_queue_nocheck(queue); |
0b7fde54 ID |
982 | |
983 | queue->rt2x00dev->ops->lib->stop_queue(queue); | |
984 | ||
985 | mutex_unlock(&queue->status_lock); | |
986 | } | |
987 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); | |
988 | ||
5be65609 ID |
989 | void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) |
990 | { | |
5be65609 | 991 | bool tx_queue = |
f615e9a3 | 992 | (queue->qid == QID_AC_VO) || |
5be65609 | 993 | (queue->qid == QID_AC_VI) || |
f615e9a3 ID |
994 | (queue->qid == QID_AC_BE) || |
995 | (queue->qid == QID_AC_BK); | |
5be65609 | 996 | |
5be65609 ID |
997 | |
998 | /* | |
fdbdd25c SG |
999 | * If we are not supposed to drop any pending |
1000 | * frames, this means we must force a start (=kick) | |
1001 | * to the queue to make sure the hardware will | |
1002 | * start transmitting. | |
5be65609 | 1003 | */ |
fdbdd25c SG |
1004 | if (!drop && tx_queue) |
1005 | queue->rt2x00dev->ops->lib->kick_queue(queue); | |
5be65609 ID |
1006 | |
1007 | /* | |
152a5992 ID |
1008 | * Check if driver supports flushing, if that is the case we can |
1009 | * defer the flushing to the driver. Otherwise we must use the | |
1010 | * alternative which just waits for the queue to become empty. | |
5be65609 | 1011 | */ |
152a5992 ID |
1012 | if (likely(queue->rt2x00dev->ops->lib->flush_queue)) |
1013 | queue->rt2x00dev->ops->lib->flush_queue(queue, drop); | |
5be65609 ID |
1014 | |
1015 | /* | |
1016 | * The queue flush has failed... | |
1017 | */ | |
1018 | if (unlikely(!rt2x00queue_empty(queue))) | |
ec9c4989 JP |
1019 | rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n", |
1020 | queue->qid); | |
5be65609 ID |
1021 | } |
1022 | EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); | |
1023 | ||
0b7fde54 ID |
1024 | void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) |
1025 | { | |
1026 | struct data_queue *queue; | |
1027 | ||
1028 | /* | |
1029 | * rt2x00queue_start_queue will call ieee80211_wake_queue | |
1030 | * for each queue after is has been properly initialized. | |
1031 | */ | |
1032 | tx_queue_for_each(rt2x00dev, queue) | |
1033 | rt2x00queue_start_queue(queue); | |
1034 | ||
1035 | rt2x00queue_start_queue(rt2x00dev->rx); | |
1036 | } | |
1037 | EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); | |
1038 | ||
1039 | void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) | |
1040 | { | |
1041 | struct data_queue *queue; | |
1042 | ||
1043 | /* | |
1044 | * rt2x00queue_stop_queue will call ieee80211_stop_queue | |
1045 | * as well, but we are completely shutting doing everything | |
1046 | * now, so it is much safer to stop all TX queues at once, | |
1047 | * and use rt2x00queue_stop_queue for cleaning up. | |
1048 | */ | |
1049 | ieee80211_stop_queues(rt2x00dev->hw); | |
1050 | ||
1051 | tx_queue_for_each(rt2x00dev, queue) | |
1052 | rt2x00queue_stop_queue(queue); | |
1053 | ||
1054 | rt2x00queue_stop_queue(rt2x00dev->rx); | |
1055 | } | |
1056 | EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); | |
1057 | ||
5be65609 ID |
1058 | void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) |
1059 | { | |
1060 | struct data_queue *queue; | |
1061 | ||
1062 | tx_queue_for_each(rt2x00dev, queue) | |
1063 | rt2x00queue_flush_queue(queue, drop); | |
1064 | ||
1065 | rt2x00queue_flush_queue(rt2x00dev->rx, drop); | |
1066 | } | |
1067 | EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); | |
1068 | ||
181d6902 ID |
1069 | static void rt2x00queue_reset(struct data_queue *queue) |
1070 | { | |
5f46c4d0 | 1071 | unsigned long irqflags; |
652a9dd2 | 1072 | unsigned int i; |
5f46c4d0 | 1073 | |
813f0339 | 1074 | spin_lock_irqsave(&queue->index_lock, irqflags); |
181d6902 ID |
1075 | |
1076 | queue->count = 0; | |
1077 | queue->length = 0; | |
652a9dd2 | 1078 | |
75256f03 | 1079 | for (i = 0; i < Q_INDEX_MAX; i++) |
652a9dd2 | 1080 | queue->index[i] = 0; |
181d6902 | 1081 | |
813f0339 | 1082 | spin_unlock_irqrestore(&queue->index_lock, irqflags); |
181d6902 ID |
1083 | } |
1084 | ||
798b7adb | 1085 | void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) |
181d6902 ID |
1086 | { |
1087 | struct data_queue *queue; | |
1088 | unsigned int i; | |
1089 | ||
798b7adb | 1090 | queue_for_each(rt2x00dev, queue) { |
181d6902 ID |
1091 | rt2x00queue_reset(queue); |
1092 | ||
64e7d723 | 1093 | for (i = 0; i < queue->limit; i++) |
798b7adb | 1094 | rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); |
181d6902 ID |
1095 | } |
1096 | } | |
1097 | ||
15d6c079 | 1098 | static int rt2x00queue_alloc_entries(struct data_queue *queue) |
181d6902 ID |
1099 | { |
1100 | struct queue_entry *entries; | |
1101 | unsigned int entry_size; | |
1102 | unsigned int i; | |
1103 | ||
1104 | rt2x00queue_reset(queue); | |
1105 | ||
181d6902 ID |
1106 | /* |
1107 | * Allocate all queue entries. | |
1108 | */ | |
568f7a43 | 1109 | entry_size = sizeof(*entries) + queue->priv_size; |
baeb2ffa | 1110 | entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); |
181d6902 ID |
1111 | if (!entries) |
1112 | return -ENOMEM; | |
1113 | ||
1114 | #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ | |
f8bfbc31 ME |
1115 | (((char *)(__base)) + ((__limit) * (__esize)) + \ |
1116 | ((__index) * (__psize))) | |
181d6902 ID |
1117 | |
1118 | for (i = 0; i < queue->limit; i++) { | |
1119 | entries[i].flags = 0; | |
1120 | entries[i].queue = queue; | |
1121 | entries[i].skb = NULL; | |
1122 | entries[i].entry_idx = i; | |
1123 | entries[i].priv_data = | |
1124 | QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, | |
568f7a43 | 1125 | sizeof(*entries), queue->priv_size); |
181d6902 ID |
1126 | } |
1127 | ||
1128 | #undef QUEUE_ENTRY_PRIV_OFFSET | |
1129 | ||
1130 | queue->entries = entries; | |
1131 | ||
1132 | return 0; | |
1133 | } | |
1134 | ||
fa69560f | 1135 | static void rt2x00queue_free_skbs(struct data_queue *queue) |
30caa6e3 GW |
1136 | { |
1137 | unsigned int i; | |
1138 | ||
1139 | if (!queue->entries) | |
1140 | return; | |
1141 | ||
1142 | for (i = 0; i < queue->limit; i++) { | |
fa69560f | 1143 | rt2x00queue_free_skb(&queue->entries[i]); |
30caa6e3 GW |
1144 | } |
1145 | } | |
1146 | ||
fa69560f | 1147 | static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) |
30caa6e3 GW |
1148 | { |
1149 | unsigned int i; | |
1150 | struct sk_buff *skb; | |
1151 | ||
1152 | for (i = 0; i < queue->limit; i++) { | |
88211021 | 1153 | skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); |
30caa6e3 | 1154 | if (!skb) |
61243d8e | 1155 | return -ENOMEM; |
30caa6e3 GW |
1156 | queue->entries[i].skb = skb; |
1157 | } | |
1158 | ||
1159 | return 0; | |
30caa6e3 GW |
1160 | } |
1161 | ||
181d6902 ID |
1162 | int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) |
1163 | { | |
1164 | struct data_queue *queue; | |
1165 | int status; | |
1166 | ||
15d6c079 | 1167 | status = rt2x00queue_alloc_entries(rt2x00dev->rx); |
181d6902 ID |
1168 | if (status) |
1169 | goto exit; | |
1170 | ||
1171 | tx_queue_for_each(rt2x00dev, queue) { | |
15d6c079 | 1172 | status = rt2x00queue_alloc_entries(queue); |
181d6902 ID |
1173 | if (status) |
1174 | goto exit; | |
1175 | } | |
1176 | ||
15d6c079 | 1177 | status = rt2x00queue_alloc_entries(rt2x00dev->bcn); |
181d6902 ID |
1178 | if (status) |
1179 | goto exit; | |
1180 | ||
b9d305cc | 1181 | if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) { |
15d6c079 | 1182 | status = rt2x00queue_alloc_entries(rt2x00dev->atim); |
30caa6e3 GW |
1183 | if (status) |
1184 | goto exit; | |
1185 | } | |
181d6902 | 1186 | |
fa69560f | 1187 | status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx); |
181d6902 ID |
1188 | if (status) |
1189 | goto exit; | |
1190 | ||
1191 | return 0; | |
1192 | ||
1193 | exit: | |
ec9c4989 | 1194 | rt2x00_err(rt2x00dev, "Queue entries allocation failed\n"); |
181d6902 ID |
1195 | |
1196 | rt2x00queue_uninitialize(rt2x00dev); | |
1197 | ||
1198 | return status; | |
1199 | } | |
1200 | ||
1201 | void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) | |
1202 | { | |
1203 | struct data_queue *queue; | |
1204 | ||
fa69560f | 1205 | rt2x00queue_free_skbs(rt2x00dev->rx); |
30caa6e3 | 1206 | |
181d6902 ID |
1207 | queue_for_each(rt2x00dev, queue) { |
1208 | kfree(queue->entries); | |
1209 | queue->entries = NULL; | |
1210 | } | |
1211 | } | |
1212 | ||
8f539276 ID |
1213 | static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, |
1214 | struct data_queue *queue, enum data_queue_qid qid) | |
1215 | { | |
0b7fde54 | 1216 | mutex_init(&queue->status_lock); |
77a861c4 | 1217 | spin_lock_init(&queue->tx_lock); |
813f0339 | 1218 | spin_lock_init(&queue->index_lock); |
8f539276 ID |
1219 | |
1220 | queue->rt2x00dev = rt2x00dev; | |
1221 | queue->qid = qid; | |
2af0a570 | 1222 | queue->txop = 0; |
8f539276 ID |
1223 | queue->aifs = 2; |
1224 | queue->cw_min = 5; | |
1225 | queue->cw_max = 10; | |
10af87c3 | 1226 | |
705802bf | 1227 | rt2x00dev->ops->queue_init(queue); |
04453e9b GJ |
1228 | |
1229 | queue->threshold = DIV_ROUND_UP(queue->limit, 10); | |
8f539276 ID |
1230 | } |
1231 | ||
181d6902 ID |
1232 | int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) |
1233 | { | |
1234 | struct data_queue *queue; | |
1235 | enum data_queue_qid qid; | |
1236 | unsigned int req_atim = | |
b9d305cc | 1237 | rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE); |
181d6902 ID |
1238 | |
1239 | /* | |
1240 | * We need the following queues: | |
1241 | * RX: 1 | |
61448f88 | 1242 | * TX: ops->tx_queues |
181d6902 ID |
1243 | * Beacon: 1 |
1244 | * Atim: 1 (if required) | |
1245 | */ | |
61448f88 | 1246 | rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; |
181d6902 | 1247 | |
baeb2ffa | 1248 | queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); |
181d6902 | 1249 | if (!queue) { |
ec9c4989 | 1250 | rt2x00_err(rt2x00dev, "Queue allocation failed\n"); |
181d6902 ID |
1251 | return -ENOMEM; |
1252 | } | |
1253 | ||
1254 | /* | |
1255 | * Initialize pointers | |
1256 | */ | |
1257 | rt2x00dev->rx = queue; | |
1258 | rt2x00dev->tx = &queue[1]; | |
61448f88 | 1259 | rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; |
e74df4a7 | 1260 | rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; |
181d6902 ID |
1261 | |
1262 | /* | |
1263 | * Initialize queue parameters. | |
1264 | * RX: qid = QID_RX | |
f615e9a3 | 1265 | * TX: qid = QID_AC_VO + index |
181d6902 ID |
1266 | * TX: cw_min: 2^5 = 32. |
1267 | * TX: cw_max: 2^10 = 1024. | |
565a019a ID |
1268 | * BCN: qid = QID_BEACON |
1269 | * ATIM: qid = QID_ATIM | |
181d6902 | 1270 | */ |
8f539276 | 1271 | rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); |
181d6902 | 1272 | |
f615e9a3 | 1273 | qid = QID_AC_VO; |
8f539276 ID |
1274 | tx_queue_for_each(rt2x00dev, queue) |
1275 | rt2x00queue_init(rt2x00dev, queue, qid++); | |
181d6902 | 1276 | |
e74df4a7 | 1277 | rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); |
181d6902 | 1278 | if (req_atim) |
e74df4a7 | 1279 | rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); |
181d6902 ID |
1280 | |
1281 | return 0; | |
1282 | } | |
1283 | ||
1284 | void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) | |
1285 | { | |
1286 | kfree(rt2x00dev->rx); | |
1287 | rt2x00dev->rx = NULL; | |
1288 | rt2x00dev->tx = NULL; | |
1289 | rt2x00dev->bcn = NULL; | |
1290 | } |