]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/net/wireless/mediatek/mt76/usb.c
clang-format: Update with the latest for_each macro list
[thirdparty/kernel/stable.git] / drivers / net / wireless / mediatek / mt76 / usb.c
1 /*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <linux/module.h>
18 #include "mt76.h"
19 #include "usb_trace.h"
20 #include "dma.h"
21
22 #define MT_VEND_REQ_MAX_RETRY 10
23 #define MT_VEND_REQ_TOUT_MS 300
24
25 static bool disable_usb_sg;
26 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
27 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
28
29 /* should be called with usb_ctrl_mtx locked */
30 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
31 u8 req_type, u16 val, u16 offset,
32 void *buf, size_t len)
33 {
34 struct usb_interface *intf = to_usb_interface(dev->dev);
35 struct usb_device *udev = interface_to_usbdev(intf);
36 unsigned int pipe;
37 int i, ret;
38
39 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
40 : usb_sndctrlpipe(udev, 0);
41 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
42 if (test_bit(MT76_REMOVED, &dev->state))
43 return -EIO;
44
45 ret = usb_control_msg(udev, pipe, req, req_type, val,
46 offset, buf, len, MT_VEND_REQ_TOUT_MS);
47 if (ret == -ENODEV)
48 set_bit(MT76_REMOVED, &dev->state);
49 if (ret >= 0 || ret == -ENODEV)
50 return ret;
51 usleep_range(5000, 10000);
52 }
53
54 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
55 req, offset, ret);
56 return ret;
57 }
58
59 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
60 u8 req_type, u16 val, u16 offset,
61 void *buf, size_t len)
62 {
63 int ret;
64
65 mutex_lock(&dev->usb.usb_ctrl_mtx);
66 ret = __mt76u_vendor_request(dev, req, req_type,
67 val, offset, buf, len);
68 trace_usb_reg_wr(dev, offset, val);
69 mutex_unlock(&dev->usb.usb_ctrl_mtx);
70
71 return ret;
72 }
73 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
74
75 /* should be called with usb_ctrl_mtx locked */
76 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
77 {
78 struct mt76_usb *usb = &dev->usb;
79 u32 data = ~0;
80 u16 offset;
81 int ret;
82 u8 req;
83
84 switch (addr & MT_VEND_TYPE_MASK) {
85 case MT_VEND_TYPE_EEPROM:
86 req = MT_VEND_READ_EEPROM;
87 break;
88 case MT_VEND_TYPE_CFG:
89 req = MT_VEND_READ_CFG;
90 break;
91 default:
92 req = MT_VEND_MULTI_READ;
93 break;
94 }
95 offset = addr & ~MT_VEND_TYPE_MASK;
96
97 ret = __mt76u_vendor_request(dev, req,
98 USB_DIR_IN | USB_TYPE_VENDOR,
99 0, offset, usb->data, sizeof(__le32));
100 if (ret == sizeof(__le32))
101 data = get_unaligned_le32(usb->data);
102 trace_usb_reg_rr(dev, addr, data);
103
104 return data;
105 }
106
107 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
108 {
109 u32 ret;
110
111 mutex_lock(&dev->usb.usb_ctrl_mtx);
112 ret = __mt76u_rr(dev, addr);
113 mutex_unlock(&dev->usb.usb_ctrl_mtx);
114
115 return ret;
116 }
117
118 /* should be called with usb_ctrl_mtx locked */
119 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
120 {
121 struct mt76_usb *usb = &dev->usb;
122 u16 offset;
123 u8 req;
124
125 switch (addr & MT_VEND_TYPE_MASK) {
126 case MT_VEND_TYPE_CFG:
127 req = MT_VEND_WRITE_CFG;
128 break;
129 default:
130 req = MT_VEND_MULTI_WRITE;
131 break;
132 }
133 offset = addr & ~MT_VEND_TYPE_MASK;
134
135 put_unaligned_le32(val, usb->data);
136 __mt76u_vendor_request(dev, req,
137 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
138 offset, usb->data, sizeof(__le32));
139 trace_usb_reg_wr(dev, addr, val);
140 }
141
142 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
143 {
144 mutex_lock(&dev->usb.usb_ctrl_mtx);
145 __mt76u_wr(dev, addr, val);
146 mutex_unlock(&dev->usb.usb_ctrl_mtx);
147 }
148
149 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
150 u32 mask, u32 val)
151 {
152 mutex_lock(&dev->usb.usb_ctrl_mtx);
153 val |= __mt76u_rr(dev, addr) & ~mask;
154 __mt76u_wr(dev, addr, val);
155 mutex_unlock(&dev->usb.usb_ctrl_mtx);
156
157 return val;
158 }
159
160 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
161 const void *data, int len)
162 {
163 struct mt76_usb *usb = &dev->usb;
164 const u32 *val = data;
165 int i, ret;
166
167 mutex_lock(&usb->usb_ctrl_mtx);
168 for (i = 0; i < (len / 4); i++) {
169 put_unaligned_le32(val[i], usb->data);
170 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
171 USB_DIR_OUT | USB_TYPE_VENDOR,
172 0, offset + i * 4, usb->data,
173 sizeof(__le32));
174 if (ret < 0)
175 break;
176 }
177 mutex_unlock(&usb->usb_ctrl_mtx);
178 }
179
180 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
181 const u16 offset, const u32 val)
182 {
183 mutex_lock(&dev->usb.usb_ctrl_mtx);
184 __mt76u_vendor_request(dev, req,
185 USB_DIR_OUT | USB_TYPE_VENDOR,
186 val & 0xffff, offset, NULL, 0);
187 __mt76u_vendor_request(dev, req,
188 USB_DIR_OUT | USB_TYPE_VENDOR,
189 val >> 16, offset + 2, NULL, 0);
190 mutex_unlock(&dev->usb.usb_ctrl_mtx);
191 }
192 EXPORT_SYMBOL_GPL(mt76u_single_wr);
193
194 static int
195 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
196 const struct mt76_reg_pair *data, int len)
197 {
198 struct mt76_usb *usb = &dev->usb;
199
200 mutex_lock(&usb->usb_ctrl_mtx);
201 while (len > 0) {
202 __mt76u_wr(dev, base + data->reg, data->value);
203 len--;
204 data++;
205 }
206 mutex_unlock(&usb->usb_ctrl_mtx);
207
208 return 0;
209 }
210
211 static int
212 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
213 const struct mt76_reg_pair *data, int n)
214 {
215 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
216 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
217 else
218 return mt76u_req_wr_rp(dev, base, data, n);
219 }
220
221 static int
222 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
223 int len)
224 {
225 struct mt76_usb *usb = &dev->usb;
226
227 mutex_lock(&usb->usb_ctrl_mtx);
228 while (len > 0) {
229 data->value = __mt76u_rr(dev, base + data->reg);
230 len--;
231 data++;
232 }
233 mutex_unlock(&usb->usb_ctrl_mtx);
234
235 return 0;
236 }
237
238 static int
239 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
240 struct mt76_reg_pair *data, int n)
241 {
242 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
243 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
244 else
245 return mt76u_req_rd_rp(dev, base, data, n);
246 }
247
248 static bool mt76u_check_sg(struct mt76_dev *dev)
249 {
250 struct usb_interface *intf = to_usb_interface(dev->dev);
251 struct usb_device *udev = interface_to_usbdev(intf);
252
253 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
254 (udev->bus->no_sg_constraint ||
255 udev->speed == USB_SPEED_WIRELESS));
256 }
257
258 static int
259 mt76u_set_endpoints(struct usb_interface *intf,
260 struct mt76_usb *usb)
261 {
262 struct usb_host_interface *intf_desc = intf->cur_altsetting;
263 struct usb_endpoint_descriptor *ep_desc;
264 int i, in_ep = 0, out_ep = 0;
265
266 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
267 ep_desc = &intf_desc->endpoint[i].desc;
268
269 if (usb_endpoint_is_bulk_in(ep_desc) &&
270 in_ep < __MT_EP_IN_MAX) {
271 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
272 usb->in_max_packet = usb_endpoint_maxp(ep_desc);
273 in_ep++;
274 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
275 out_ep < __MT_EP_OUT_MAX) {
276 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
277 usb->out_max_packet = usb_endpoint_maxp(ep_desc);
278 out_ep++;
279 }
280 }
281
282 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
283 return -EINVAL;
284 return 0;
285 }
286
287 static int
288 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
289 int nsgs, int len, int sglen)
290 {
291 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
292 struct urb *urb = buf->urb;
293 int i;
294
295 spin_lock_bh(&q->rx_page_lock);
296 for (i = 0; i < nsgs; i++) {
297 struct page *page;
298 void *data;
299 int offset;
300
301 data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
302 if (!data)
303 break;
304
305 page = virt_to_head_page(data);
306 offset = data - page_address(page);
307 sg_set_page(&urb->sg[i], page, sglen, offset);
308 }
309 spin_unlock_bh(&q->rx_page_lock);
310
311 if (i < nsgs) {
312 int j;
313
314 for (j = nsgs; j < urb->num_sgs; j++)
315 skb_free_frag(sg_virt(&urb->sg[j]));
316 urb->num_sgs = i;
317 }
318
319 urb->num_sgs = max_t(int, i, urb->num_sgs);
320 buf->len = urb->num_sgs * sglen,
321 sg_init_marker(urb->sg, urb->num_sgs);
322
323 return i ? : -ENOMEM;
324 }
325
326 static int
327 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
328 struct mt76u_buf *buf, int nsgs, gfp_t gfp)
329 {
330 if (dev->usb.sg_en) {
331 return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size,
332 SKB_WITH_OVERHEAD(q->buf_size));
333 } else {
334 buf->buf = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
335 return buf->buf ? 0 : -ENOMEM;
336 }
337 }
338
339 static int
340 mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf)
341 {
342 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
343
344 buf->len = SKB_WITH_OVERHEAD(q->buf_size);
345 buf->dev = dev;
346
347 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
348 if (!buf->urb)
349 return -ENOMEM;
350
351 if (dev->usb.sg_en) {
352 buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE,
353 sizeof(*buf->urb->sg),
354 GFP_KERNEL);
355 if (!buf->urb->sg)
356 return -ENOMEM;
357
358 sg_init_table(buf->urb->sg, MT_SG_MAX_SIZE);
359 }
360
361 return mt76u_refill_rx(dev, q, buf, MT_SG_MAX_SIZE, GFP_KERNEL);
362 }
363
364 static void mt76u_buf_free(struct mt76u_buf *buf)
365 {
366 struct urb *urb = buf->urb;
367 int i;
368
369 for (i = 0; i < urb->num_sgs; i++)
370 skb_free_frag(sg_virt(&urb->sg[i]));
371
372 if (buf->buf)
373 skb_free_frag(buf->buf);
374
375 usb_free_urb(buf->urb);
376 }
377
378 static void
379 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
380 struct mt76u_buf *buf, usb_complete_t complete_fn,
381 void *context)
382 {
383 struct usb_interface *intf = to_usb_interface(dev->dev);
384 struct usb_device *udev = interface_to_usbdev(intf);
385 u8 *data = buf->urb->num_sgs ? NULL : buf->buf;
386 unsigned int pipe;
387
388 if (dir == USB_DIR_IN)
389 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
390 else
391 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
392
393 usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len,
394 complete_fn, context);
395 }
396
397 static int
398 mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
399 struct mt76u_buf *buf, gfp_t gfp,
400 usb_complete_t complete_fn, void *context)
401 {
402 mt76u_fill_bulk_urb(dev, dir, index, buf, complete_fn,
403 context);
404 trace_submit_urb(dev, buf->urb);
405
406 return usb_submit_urb(buf->urb, gfp);
407 }
408
409 static inline struct mt76u_buf
410 *mt76u_get_next_rx_entry(struct mt76_queue *q)
411 {
412 struct mt76u_buf *buf = NULL;
413 unsigned long flags;
414
415 spin_lock_irqsave(&q->lock, flags);
416 if (q->queued > 0) {
417 buf = &q->entry[q->head].ubuf;
418 q->head = (q->head + 1) % q->ndesc;
419 q->queued--;
420 }
421 spin_unlock_irqrestore(&q->lock, flags);
422
423 return buf;
424 }
425
426 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
427 {
428 u16 dma_len, min_len;
429
430 dma_len = get_unaligned_le16(data);
431 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
432 MT_FCE_INFO_LEN;
433
434 if (data_len < min_len || !dma_len ||
435 dma_len + MT_DMA_HDR_LEN > data_len ||
436 (dma_len & 0x3))
437 return -EINVAL;
438 return dma_len;
439 }
440
441 static int
442 mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
443 {
444 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
445 struct urb *urb = buf->urb;
446 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : buf->buf;
447 int data_len, len, nsgs = 1;
448 struct sk_buff *skb;
449
450 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
451 return 0;
452
453 len = mt76u_get_rx_entry_len(data, urb->actual_length);
454 if (len < 0)
455 return 0;
456
457 data_len = urb->num_sgs ? urb->sg[0].length : buf->len;
458 data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
459 if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
460 return 0;
461
462 skb = build_skb(data, q->buf_size);
463 if (!skb)
464 return 0;
465
466 skb_reserve(skb, MT_DMA_HDR_LEN);
467 __skb_put(skb, data_len);
468 len -= data_len;
469
470 while (len > 0 && nsgs < urb->num_sgs) {
471 data_len = min_t(int, len, urb->sg[nsgs].length);
472 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
473 sg_page(&urb->sg[nsgs]),
474 urb->sg[nsgs].offset,
475 data_len, q->buf_size);
476 len -= data_len;
477 nsgs++;
478 }
479 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
480
481 return nsgs;
482 }
483
484 static void mt76u_complete_rx(struct urb *urb)
485 {
486 struct mt76_dev *dev = urb->context;
487 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
488 unsigned long flags;
489
490 trace_rx_urb(dev, urb);
491
492 switch (urb->status) {
493 case -ECONNRESET:
494 case -ESHUTDOWN:
495 case -ENOENT:
496 return;
497 default:
498 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
499 urb->status);
500 /* fall through */
501 case 0:
502 break;
503 }
504
505 spin_lock_irqsave(&q->lock, flags);
506 if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
507 goto out;
508
509 q->tail = (q->tail + 1) % q->ndesc;
510 q->queued++;
511 tasklet_schedule(&dev->usb.rx_tasklet);
512 out:
513 spin_unlock_irqrestore(&q->lock, flags);
514 }
515
516 static void mt76u_rx_tasklet(unsigned long data)
517 {
518 struct mt76_dev *dev = (struct mt76_dev *)data;
519 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
520 struct mt76u_buf *buf;
521 int err, count;
522
523 rcu_read_lock();
524
525 while (true) {
526 buf = mt76u_get_next_rx_entry(q);
527 if (!buf)
528 break;
529
530 count = mt76u_process_rx_entry(dev, buf);
531 if (count > 0) {
532 err = mt76u_refill_rx(dev, q, buf, count,
533 GFP_ATOMIC);
534 if (err < 0)
535 break;
536 }
537 mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
538 buf, GFP_ATOMIC,
539 mt76u_complete_rx, dev);
540 }
541 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
542
543 rcu_read_unlock();
544 }
545
546 int mt76u_submit_rx_buffers(struct mt76_dev *dev)
547 {
548 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
549 unsigned long flags;
550 int i, err = 0;
551
552 spin_lock_irqsave(&q->lock, flags);
553 for (i = 0; i < q->ndesc; i++) {
554 err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
555 &q->entry[i].ubuf, GFP_ATOMIC,
556 mt76u_complete_rx, dev);
557 if (err < 0)
558 break;
559 }
560 q->head = q->tail = 0;
561 q->queued = 0;
562 spin_unlock_irqrestore(&q->lock, flags);
563
564 return err;
565 }
566 EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
567
568 static int mt76u_alloc_rx(struct mt76_dev *dev)
569 {
570 struct mt76_usb *usb = &dev->usb;
571 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
572 int i, err;
573
574 usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
575 if (!usb->mcu.data)
576 return -ENOMEM;
577
578 spin_lock_init(&q->rx_page_lock);
579 spin_lock_init(&q->lock);
580 q->entry = devm_kcalloc(dev->dev,
581 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
582 GFP_KERNEL);
583 if (!q->entry)
584 return -ENOMEM;
585
586 q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
587 q->ndesc = MT_NUM_RX_ENTRIES;
588 for (i = 0; i < q->ndesc; i++) {
589 err = mt76u_buf_alloc(dev, &q->entry[i].ubuf);
590 if (err < 0)
591 return err;
592 }
593
594 return mt76u_submit_rx_buffers(dev);
595 }
596
597 static void mt76u_free_rx(struct mt76_dev *dev)
598 {
599 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
600 struct page *page;
601 int i;
602
603 for (i = 0; i < q->ndesc; i++)
604 mt76u_buf_free(&q->entry[i].ubuf);
605
606 spin_lock_bh(&q->rx_page_lock);
607 if (!q->rx_page.va)
608 goto out;
609
610 page = virt_to_page(q->rx_page.va);
611 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
612 memset(&q->rx_page, 0, sizeof(q->rx_page));
613 out:
614 spin_unlock_bh(&q->rx_page_lock);
615 }
616
617 static void mt76u_stop_rx(struct mt76_dev *dev)
618 {
619 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
620 int i;
621
622 for (i = 0; i < q->ndesc; i++)
623 usb_kill_urb(q->entry[i].ubuf.urb);
624 }
625
626 static void mt76u_tx_tasklet(unsigned long data)
627 {
628 struct mt76_dev *dev = (struct mt76_dev *)data;
629 struct mt76_queue_entry entry;
630 struct mt76u_buf *buf;
631 struct mt76_queue *q;
632 bool wake;
633 int i;
634
635 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
636 q = &dev->q_tx[i];
637
638 spin_lock_bh(&q->lock);
639 while (true) {
640 buf = &q->entry[q->head].ubuf;
641 if (!buf->done || !q->queued)
642 break;
643
644 if (q->entry[q->head].schedule) {
645 q->entry[q->head].schedule = false;
646 q->swq_queued--;
647 }
648
649 entry = q->entry[q->head];
650 q->head = (q->head + 1) % q->ndesc;
651 q->queued--;
652
653 spin_unlock_bh(&q->lock);
654 dev->drv->tx_complete_skb(dev, q, &entry, false);
655 spin_lock_bh(&q->lock);
656 }
657 mt76_txq_schedule(dev, q);
658 wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
659 if (!q->queued)
660 wake_up(&dev->tx_wait);
661
662 spin_unlock_bh(&q->lock);
663
664 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
665 ieee80211_queue_delayed_work(dev->hw,
666 &dev->usb.stat_work,
667 msecs_to_jiffies(10));
668
669 if (wake)
670 ieee80211_wake_queue(dev->hw, i);
671 }
672 }
673
674 static void mt76u_tx_status_data(struct work_struct *work)
675 {
676 struct mt76_usb *usb;
677 struct mt76_dev *dev;
678 u8 update = 1;
679 u16 count = 0;
680
681 usb = container_of(work, struct mt76_usb, stat_work.work);
682 dev = container_of(usb, struct mt76_dev, usb);
683
684 while (true) {
685 if (test_bit(MT76_REMOVED, &dev->state))
686 break;
687
688 if (!dev->drv->tx_status_data(dev, &update))
689 break;
690 count++;
691 }
692
693 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
694 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
695 msecs_to_jiffies(10));
696 else
697 clear_bit(MT76_READING_STATS, &dev->state);
698 }
699
700 static void mt76u_complete_tx(struct urb *urb)
701 {
702 struct mt76u_buf *buf = urb->context;
703 struct mt76_dev *dev = buf->dev;
704
705 if (mt76u_urb_error(urb))
706 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
707 buf->done = true;
708
709 tasklet_schedule(&dev->usb.tx_tasklet);
710 }
711
712 static int
713 mt76u_tx_build_sg(struct mt76_dev *dev, struct sk_buff *skb,
714 struct urb *urb)
715 {
716 if (!dev->usb.sg_en)
717 return 0;
718
719 sg_init_table(urb->sg, MT_SG_MAX_SIZE);
720 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
721 return urb->num_sgs;
722 }
723
724 static int
725 mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
726 struct sk_buff *skb, struct mt76_wcid *wcid,
727 struct ieee80211_sta *sta)
728 {
729 struct mt76u_buf *buf;
730 u16 idx = q->tail;
731 int err;
732
733 if (q->queued == q->ndesc)
734 return -ENOSPC;
735
736 skb->prev = skb->next = NULL;
737 err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
738 if (err < 0)
739 return err;
740
741 buf = &q->entry[idx].ubuf;
742 buf->buf = skb->data;
743 buf->len = skb->len;
744 buf->done = false;
745
746 err = mt76u_tx_build_sg(dev, skb, buf->urb);
747 if (err < 0)
748 return err;
749
750 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
751 buf, mt76u_complete_tx, buf);
752
753 q->tail = (q->tail + 1) % q->ndesc;
754 q->entry[idx].skb = skb;
755 q->queued++;
756
757 return idx;
758 }
759
760 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
761 {
762 struct mt76u_buf *buf;
763 int err;
764
765 while (q->first != q->tail) {
766 buf = &q->entry[q->first].ubuf;
767
768 trace_submit_urb(dev, buf->urb);
769 err = usb_submit_urb(buf->urb, GFP_ATOMIC);
770 if (err < 0) {
771 if (err == -ENODEV)
772 set_bit(MT76_REMOVED, &dev->state);
773 else
774 dev_err(dev->dev, "tx urb submit failed:%d\n",
775 err);
776 break;
777 }
778 q->first = (q->first + 1) % q->ndesc;
779 }
780 }
781
782 static int mt76u_alloc_tx(struct mt76_dev *dev)
783 {
784 struct mt76u_buf *buf;
785 struct mt76_queue *q;
786 int i, j;
787
788 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
789 q = &dev->q_tx[i];
790 spin_lock_init(&q->lock);
791 INIT_LIST_HEAD(&q->swq);
792 q->hw_idx = mt76_ac_to_hwq(i);
793
794 q->entry = devm_kcalloc(dev->dev,
795 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
796 GFP_KERNEL);
797 if (!q->entry)
798 return -ENOMEM;
799
800 q->ndesc = MT_NUM_TX_ENTRIES;
801 for (j = 0; j < q->ndesc; j++) {
802 buf = &q->entry[j].ubuf;
803 buf->dev = dev;
804
805 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
806 if (!buf->urb)
807 return -ENOMEM;
808
809 if (dev->usb.sg_en) {
810 size_t size = MT_SG_MAX_SIZE *
811 sizeof(struct scatterlist);
812
813 buf->urb->sg = devm_kzalloc(dev->dev, size,
814 GFP_KERNEL);
815 if (!buf->urb->sg)
816 return -ENOMEM;
817 }
818 }
819 }
820 return 0;
821 }
822
823 static void mt76u_free_tx(struct mt76_dev *dev)
824 {
825 struct mt76_queue *q;
826 int i, j;
827
828 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
829 q = &dev->q_tx[i];
830 for (j = 0; j < q->ndesc; j++)
831 usb_free_urb(q->entry[j].ubuf.urb);
832 }
833 }
834
835 static void mt76u_stop_tx(struct mt76_dev *dev)
836 {
837 struct mt76_queue *q;
838 int i, j;
839
840 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
841 q = &dev->q_tx[i];
842 for (j = 0; j < q->ndesc; j++)
843 usb_kill_urb(q->entry[j].ubuf.urb);
844 }
845 }
846
847 void mt76u_stop_queues(struct mt76_dev *dev)
848 {
849 tasklet_disable(&dev->usb.rx_tasklet);
850 tasklet_disable(&dev->usb.tx_tasklet);
851
852 mt76u_stop_rx(dev);
853 mt76u_stop_tx(dev);
854 }
855 EXPORT_SYMBOL_GPL(mt76u_stop_queues);
856
857 void mt76u_stop_stat_wk(struct mt76_dev *dev)
858 {
859 cancel_delayed_work_sync(&dev->usb.stat_work);
860 clear_bit(MT76_READING_STATS, &dev->state);
861 }
862 EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
863
864 void mt76u_queues_deinit(struct mt76_dev *dev)
865 {
866 mt76u_stop_queues(dev);
867
868 mt76u_free_rx(dev);
869 mt76u_free_tx(dev);
870 }
871 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
872
873 int mt76u_alloc_queues(struct mt76_dev *dev)
874 {
875 int err;
876
877 err = mt76u_alloc_rx(dev);
878 if (err < 0)
879 return err;
880
881 return mt76u_alloc_tx(dev);
882 }
883 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
884
885 static const struct mt76_queue_ops usb_queue_ops = {
886 .tx_queue_skb = mt76u_tx_queue_skb,
887 .kick = mt76u_tx_kick,
888 };
889
890 int mt76u_init(struct mt76_dev *dev,
891 struct usb_interface *intf)
892 {
893 static const struct mt76_bus_ops mt76u_ops = {
894 .rr = mt76u_rr,
895 .wr = mt76u_wr,
896 .rmw = mt76u_rmw,
897 .copy = mt76u_copy,
898 .wr_rp = mt76u_wr_rp,
899 .rd_rp = mt76u_rd_rp,
900 .type = MT76_BUS_USB,
901 };
902 struct mt76_usb *usb = &dev->usb;
903
904 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
905 tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
906 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
907 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
908
909 mutex_init(&usb->mcu.mutex);
910
911 mutex_init(&usb->usb_ctrl_mtx);
912 dev->bus = &mt76u_ops;
913 dev->queue_ops = &usb_queue_ops;
914
915 usb->sg_en = mt76u_check_sg(dev);
916
917 return mt76u_set_endpoints(intf, usb);
918 }
919 EXPORT_SYMBOL_GPL(mt76u_init);
920
921 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
922 MODULE_LICENSE("Dual BSD/GPL");