2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/module.h>
19 #include "usb_trace.h"
22 #define MT_VEND_REQ_MAX_RETRY 10
23 #define MT_VEND_REQ_TOUT_MS 300
25 static bool disable_usb_sg
;
26 module_param_named(disable_usb_sg
, disable_usb_sg
, bool, 0644);
27 MODULE_PARM_DESC(disable_usb_sg
, "Disable usb scatter-gather support");
29 /* should be called with usb_ctrl_mtx locked */
30 static int __mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
,
31 u8 req_type
, u16 val
, u16 offset
,
32 void *buf
, size_t len
)
34 struct usb_interface
*intf
= to_usb_interface(dev
->dev
);
35 struct usb_device
*udev
= interface_to_usbdev(intf
);
39 pipe
= (req_type
& USB_DIR_IN
) ? usb_rcvctrlpipe(udev
, 0)
40 : usb_sndctrlpipe(udev
, 0);
41 for (i
= 0; i
< MT_VEND_REQ_MAX_RETRY
; i
++) {
42 if (test_bit(MT76_REMOVED
, &dev
->state
))
45 ret
= usb_control_msg(udev
, pipe
, req
, req_type
, val
,
46 offset
, buf
, len
, MT_VEND_REQ_TOUT_MS
);
48 set_bit(MT76_REMOVED
, &dev
->state
);
49 if (ret
>= 0 || ret
== -ENODEV
)
51 usleep_range(5000, 10000);
54 dev_err(dev
->dev
, "vendor request req:%02x off:%04x failed:%d\n",
59 int mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
,
60 u8 req_type
, u16 val
, u16 offset
,
61 void *buf
, size_t len
)
65 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
66 ret
= __mt76u_vendor_request(dev
, req
, req_type
,
67 val
, offset
, buf
, len
);
68 trace_usb_reg_wr(dev
, offset
, val
);
69 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
73 EXPORT_SYMBOL_GPL(mt76u_vendor_request
);
75 /* should be called with usb_ctrl_mtx locked */
76 static u32
__mt76u_rr(struct mt76_dev
*dev
, u32 addr
)
78 struct mt76_usb
*usb
= &dev
->usb
;
84 switch (addr
& MT_VEND_TYPE_MASK
) {
85 case MT_VEND_TYPE_EEPROM
:
86 req
= MT_VEND_READ_EEPROM
;
88 case MT_VEND_TYPE_CFG
:
89 req
= MT_VEND_READ_CFG
;
92 req
= MT_VEND_MULTI_READ
;
95 offset
= addr
& ~MT_VEND_TYPE_MASK
;
97 ret
= __mt76u_vendor_request(dev
, req
,
98 USB_DIR_IN
| USB_TYPE_VENDOR
,
99 0, offset
, usb
->data
, sizeof(__le32
));
100 if (ret
== sizeof(__le32
))
101 data
= get_unaligned_le32(usb
->data
);
102 trace_usb_reg_rr(dev
, addr
, data
);
107 static u32
mt76u_rr(struct mt76_dev
*dev
, u32 addr
)
111 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
112 ret
= __mt76u_rr(dev
, addr
);
113 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
118 /* should be called with usb_ctrl_mtx locked */
119 static void __mt76u_wr(struct mt76_dev
*dev
, u32 addr
, u32 val
)
121 struct mt76_usb
*usb
= &dev
->usb
;
125 switch (addr
& MT_VEND_TYPE_MASK
) {
126 case MT_VEND_TYPE_CFG
:
127 req
= MT_VEND_WRITE_CFG
;
130 req
= MT_VEND_MULTI_WRITE
;
133 offset
= addr
& ~MT_VEND_TYPE_MASK
;
135 put_unaligned_le32(val
, usb
->data
);
136 __mt76u_vendor_request(dev
, req
,
137 USB_DIR_OUT
| USB_TYPE_VENDOR
, 0,
138 offset
, usb
->data
, sizeof(__le32
));
139 trace_usb_reg_wr(dev
, addr
, val
);
142 static void mt76u_wr(struct mt76_dev
*dev
, u32 addr
, u32 val
)
144 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
145 __mt76u_wr(dev
, addr
, val
);
146 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
149 static u32
mt76u_rmw(struct mt76_dev
*dev
, u32 addr
,
152 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
153 val
|= __mt76u_rr(dev
, addr
) & ~mask
;
154 __mt76u_wr(dev
, addr
, val
);
155 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
160 static void mt76u_copy(struct mt76_dev
*dev
, u32 offset
,
161 const void *data
, int len
)
163 struct mt76_usb
*usb
= &dev
->usb
;
164 const u32
*val
= data
;
167 mutex_lock(&usb
->usb_ctrl_mtx
);
168 for (i
= 0; i
< (len
/ 4); i
++) {
169 put_unaligned_le32(val
[i
], usb
->data
);
170 ret
= __mt76u_vendor_request(dev
, MT_VEND_MULTI_WRITE
,
171 USB_DIR_OUT
| USB_TYPE_VENDOR
,
172 0, offset
+ i
* 4, usb
->data
,
177 mutex_unlock(&usb
->usb_ctrl_mtx
);
180 void mt76u_single_wr(struct mt76_dev
*dev
, const u8 req
,
181 const u16 offset
, const u32 val
)
183 mutex_lock(&dev
->usb
.usb_ctrl_mtx
);
184 __mt76u_vendor_request(dev
, req
,
185 USB_DIR_OUT
| USB_TYPE_VENDOR
,
186 val
& 0xffff, offset
, NULL
, 0);
187 __mt76u_vendor_request(dev
, req
,
188 USB_DIR_OUT
| USB_TYPE_VENDOR
,
189 val
>> 16, offset
+ 2, NULL
, 0);
190 mutex_unlock(&dev
->usb
.usb_ctrl_mtx
);
192 EXPORT_SYMBOL_GPL(mt76u_single_wr
);
195 mt76u_req_wr_rp(struct mt76_dev
*dev
, u32 base
,
196 const struct mt76_reg_pair
*data
, int len
)
198 struct mt76_usb
*usb
= &dev
->usb
;
200 mutex_lock(&usb
->usb_ctrl_mtx
);
202 __mt76u_wr(dev
, base
+ data
->reg
, data
->value
);
206 mutex_unlock(&usb
->usb_ctrl_mtx
);
212 mt76u_wr_rp(struct mt76_dev
*dev
, u32 base
,
213 const struct mt76_reg_pair
*data
, int n
)
215 if (test_bit(MT76_STATE_MCU_RUNNING
, &dev
->state
))
216 return dev
->mcu_ops
->mcu_wr_rp(dev
, base
, data
, n
);
218 return mt76u_req_wr_rp(dev
, base
, data
, n
);
222 mt76u_req_rd_rp(struct mt76_dev
*dev
, u32 base
, struct mt76_reg_pair
*data
,
225 struct mt76_usb
*usb
= &dev
->usb
;
227 mutex_lock(&usb
->usb_ctrl_mtx
);
229 data
->value
= __mt76u_rr(dev
, base
+ data
->reg
);
233 mutex_unlock(&usb
->usb_ctrl_mtx
);
239 mt76u_rd_rp(struct mt76_dev
*dev
, u32 base
,
240 struct mt76_reg_pair
*data
, int n
)
242 if (test_bit(MT76_STATE_MCU_RUNNING
, &dev
->state
))
243 return dev
->mcu_ops
->mcu_rd_rp(dev
, base
, data
, n
);
245 return mt76u_req_rd_rp(dev
, base
, data
, n
);
248 static bool mt76u_check_sg(struct mt76_dev
*dev
)
250 struct usb_interface
*intf
= to_usb_interface(dev
->dev
);
251 struct usb_device
*udev
= interface_to_usbdev(intf
);
253 return (!disable_usb_sg
&& udev
->bus
->sg_tablesize
> 0 &&
254 (udev
->bus
->no_sg_constraint
||
255 udev
->speed
== USB_SPEED_WIRELESS
));
259 mt76u_set_endpoints(struct usb_interface
*intf
,
260 struct mt76_usb
*usb
)
262 struct usb_host_interface
*intf_desc
= intf
->cur_altsetting
;
263 struct usb_endpoint_descriptor
*ep_desc
;
264 int i
, in_ep
= 0, out_ep
= 0;
266 for (i
= 0; i
< intf_desc
->desc
.bNumEndpoints
; i
++) {
267 ep_desc
= &intf_desc
->endpoint
[i
].desc
;
269 if (usb_endpoint_is_bulk_in(ep_desc
) &&
270 in_ep
< __MT_EP_IN_MAX
) {
271 usb
->in_ep
[in_ep
] = usb_endpoint_num(ep_desc
);
272 usb
->in_max_packet
= usb_endpoint_maxp(ep_desc
);
274 } else if (usb_endpoint_is_bulk_out(ep_desc
) &&
275 out_ep
< __MT_EP_OUT_MAX
) {
276 usb
->out_ep
[out_ep
] = usb_endpoint_num(ep_desc
);
277 usb
->out_max_packet
= usb_endpoint_maxp(ep_desc
);
282 if (in_ep
!= __MT_EP_IN_MAX
|| out_ep
!= __MT_EP_OUT_MAX
)
288 mt76u_fill_rx_sg(struct mt76_dev
*dev
, struct mt76u_buf
*buf
,
289 int nsgs
, int len
, int sglen
)
291 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
292 struct urb
*urb
= buf
->urb
;
295 spin_lock_bh(&q
->rx_page_lock
);
296 for (i
= 0; i
< nsgs
; i
++) {
301 data
= page_frag_alloc(&q
->rx_page
, len
, GFP_ATOMIC
);
305 page
= virt_to_head_page(data
);
306 offset
= data
- page_address(page
);
307 sg_set_page(&urb
->sg
[i
], page
, sglen
, offset
);
309 spin_unlock_bh(&q
->rx_page_lock
);
314 for (j
= nsgs
; j
< urb
->num_sgs
; j
++)
315 skb_free_frag(sg_virt(&urb
->sg
[j
]));
319 urb
->num_sgs
= max_t(int, i
, urb
->num_sgs
);
320 buf
->len
= urb
->num_sgs
* sglen
,
321 sg_init_marker(urb
->sg
, urb
->num_sgs
);
323 return i
? : -ENOMEM
;
327 mt76u_refill_rx(struct mt76_dev
*dev
, struct mt76_queue
*q
,
328 struct mt76u_buf
*buf
, int nsgs
, gfp_t gfp
)
330 if (dev
->usb
.sg_en
) {
331 return mt76u_fill_rx_sg(dev
, buf
, nsgs
, q
->buf_size
,
332 SKB_WITH_OVERHEAD(q
->buf_size
));
334 buf
->buf
= page_frag_alloc(&q
->rx_page
, q
->buf_size
, gfp
);
335 return buf
->buf
? 0 : -ENOMEM
;
340 mt76u_buf_alloc(struct mt76_dev
*dev
, struct mt76u_buf
*buf
)
342 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
344 buf
->len
= SKB_WITH_OVERHEAD(q
->buf_size
);
347 buf
->urb
= usb_alloc_urb(0, GFP_KERNEL
);
351 if (dev
->usb
.sg_en
) {
352 buf
->urb
->sg
= devm_kcalloc(dev
->dev
, MT_SG_MAX_SIZE
,
353 sizeof(*buf
->urb
->sg
),
358 sg_init_table(buf
->urb
->sg
, MT_SG_MAX_SIZE
);
361 return mt76u_refill_rx(dev
, q
, buf
, MT_SG_MAX_SIZE
, GFP_KERNEL
);
364 static void mt76u_buf_free(struct mt76u_buf
*buf
)
366 struct urb
*urb
= buf
->urb
;
369 for (i
= 0; i
< urb
->num_sgs
; i
++)
370 skb_free_frag(sg_virt(&urb
->sg
[i
]));
373 skb_free_frag(buf
->buf
);
375 usb_free_urb(buf
->urb
);
379 mt76u_fill_bulk_urb(struct mt76_dev
*dev
, int dir
, int index
,
380 struct mt76u_buf
*buf
, usb_complete_t complete_fn
,
383 struct usb_interface
*intf
= to_usb_interface(dev
->dev
);
384 struct usb_device
*udev
= interface_to_usbdev(intf
);
385 u8
*data
= buf
->urb
->num_sgs
? NULL
: buf
->buf
;
388 if (dir
== USB_DIR_IN
)
389 pipe
= usb_rcvbulkpipe(udev
, dev
->usb
.in_ep
[index
]);
391 pipe
= usb_sndbulkpipe(udev
, dev
->usb
.out_ep
[index
]);
393 usb_fill_bulk_urb(buf
->urb
, udev
, pipe
, data
, buf
->len
,
394 complete_fn
, context
);
398 mt76u_submit_buf(struct mt76_dev
*dev
, int dir
, int index
,
399 struct mt76u_buf
*buf
, gfp_t gfp
,
400 usb_complete_t complete_fn
, void *context
)
402 mt76u_fill_bulk_urb(dev
, dir
, index
, buf
, complete_fn
,
404 trace_submit_urb(dev
, buf
->urb
);
406 return usb_submit_urb(buf
->urb
, gfp
);
409 static inline struct mt76u_buf
410 *mt76u_get_next_rx_entry(struct mt76_queue
*q
)
412 struct mt76u_buf
*buf
= NULL
;
415 spin_lock_irqsave(&q
->lock
, flags
);
417 buf
= &q
->entry
[q
->head
].ubuf
;
418 q
->head
= (q
->head
+ 1) % q
->ndesc
;
421 spin_unlock_irqrestore(&q
->lock
, flags
);
426 static int mt76u_get_rx_entry_len(u8
*data
, u32 data_len
)
428 u16 dma_len
, min_len
;
430 dma_len
= get_unaligned_le16(data
);
431 min_len
= MT_DMA_HDR_LEN
+ MT_RX_RXWI_LEN
+
434 if (data_len
< min_len
|| !dma_len
||
435 dma_len
+ MT_DMA_HDR_LEN
> data_len
||
442 mt76u_process_rx_entry(struct mt76_dev
*dev
, struct mt76u_buf
*buf
)
444 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
445 struct urb
*urb
= buf
->urb
;
446 u8
*data
= urb
->num_sgs
? sg_virt(&urb
->sg
[0]) : buf
->buf
;
447 int data_len
, len
, nsgs
= 1;
450 if (!test_bit(MT76_STATE_INITIALIZED
, &dev
->state
))
453 len
= mt76u_get_rx_entry_len(data
, urb
->actual_length
);
457 data_len
= urb
->num_sgs
? urb
->sg
[0].length
: buf
->len
;
458 data_len
= min_t(int, len
, data_len
- MT_DMA_HDR_LEN
);
459 if (MT_DMA_HDR_LEN
+ data_len
> SKB_WITH_OVERHEAD(q
->buf_size
))
462 skb
= build_skb(data
, q
->buf_size
);
466 skb_reserve(skb
, MT_DMA_HDR_LEN
);
467 __skb_put(skb
, data_len
);
470 while (len
> 0 && nsgs
< urb
->num_sgs
) {
471 data_len
= min_t(int, len
, urb
->sg
[nsgs
].length
);
472 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
473 sg_page(&urb
->sg
[nsgs
]),
474 urb
->sg
[nsgs
].offset
,
475 data_len
, q
->buf_size
);
479 dev
->drv
->rx_skb(dev
, MT_RXQ_MAIN
, skb
);
484 static void mt76u_complete_rx(struct urb
*urb
)
486 struct mt76_dev
*dev
= urb
->context
;
487 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
490 trace_rx_urb(dev
, urb
);
492 switch (urb
->status
) {
498 dev_err_ratelimited(dev
->dev
, "rx urb failed: %d\n",
505 spin_lock_irqsave(&q
->lock
, flags
);
506 if (WARN_ONCE(q
->entry
[q
->tail
].ubuf
.urb
!= urb
, "rx urb mismatch"))
509 q
->tail
= (q
->tail
+ 1) % q
->ndesc
;
511 tasklet_schedule(&dev
->usb
.rx_tasklet
);
513 spin_unlock_irqrestore(&q
->lock
, flags
);
516 static void mt76u_rx_tasklet(unsigned long data
)
518 struct mt76_dev
*dev
= (struct mt76_dev
*)data
;
519 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
520 struct mt76u_buf
*buf
;
526 buf
= mt76u_get_next_rx_entry(q
);
530 count
= mt76u_process_rx_entry(dev
, buf
);
532 err
= mt76u_refill_rx(dev
, q
, buf
, count
,
537 mt76u_submit_buf(dev
, USB_DIR_IN
, MT_EP_IN_PKT_RX
,
539 mt76u_complete_rx
, dev
);
541 mt76_rx_poll_complete(dev
, MT_RXQ_MAIN
, NULL
);
546 int mt76u_submit_rx_buffers(struct mt76_dev
*dev
)
548 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
552 spin_lock_irqsave(&q
->lock
, flags
);
553 for (i
= 0; i
< q
->ndesc
; i
++) {
554 err
= mt76u_submit_buf(dev
, USB_DIR_IN
, MT_EP_IN_PKT_RX
,
555 &q
->entry
[i
].ubuf
, GFP_ATOMIC
,
556 mt76u_complete_rx
, dev
);
560 q
->head
= q
->tail
= 0;
562 spin_unlock_irqrestore(&q
->lock
, flags
);
566 EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers
);
568 static int mt76u_alloc_rx(struct mt76_dev
*dev
)
570 struct mt76_usb
*usb
= &dev
->usb
;
571 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
574 usb
->mcu
.data
= devm_kmalloc(dev
->dev
, MCU_RESP_URB_SIZE
, GFP_KERNEL
);
578 spin_lock_init(&q
->rx_page_lock
);
579 spin_lock_init(&q
->lock
);
580 q
->entry
= devm_kcalloc(dev
->dev
,
581 MT_NUM_RX_ENTRIES
, sizeof(*q
->entry
),
586 q
->buf_size
= dev
->usb
.sg_en
? MT_RX_BUF_SIZE
: PAGE_SIZE
;
587 q
->ndesc
= MT_NUM_RX_ENTRIES
;
588 for (i
= 0; i
< q
->ndesc
; i
++) {
589 err
= mt76u_buf_alloc(dev
, &q
->entry
[i
].ubuf
);
594 return mt76u_submit_rx_buffers(dev
);
597 static void mt76u_free_rx(struct mt76_dev
*dev
)
599 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
603 for (i
= 0; i
< q
->ndesc
; i
++)
604 mt76u_buf_free(&q
->entry
[i
].ubuf
);
606 spin_lock_bh(&q
->rx_page_lock
);
610 page
= virt_to_page(q
->rx_page
.va
);
611 __page_frag_cache_drain(page
, q
->rx_page
.pagecnt_bias
);
612 memset(&q
->rx_page
, 0, sizeof(q
->rx_page
));
614 spin_unlock_bh(&q
->rx_page_lock
);
617 static void mt76u_stop_rx(struct mt76_dev
*dev
)
619 struct mt76_queue
*q
= &dev
->q_rx
[MT_RXQ_MAIN
];
622 for (i
= 0; i
< q
->ndesc
; i
++)
623 usb_kill_urb(q
->entry
[i
].ubuf
.urb
);
626 static void mt76u_tx_tasklet(unsigned long data
)
628 struct mt76_dev
*dev
= (struct mt76_dev
*)data
;
629 struct mt76_queue_entry entry
;
630 struct mt76u_buf
*buf
;
631 struct mt76_queue
*q
;
635 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
638 spin_lock_bh(&q
->lock
);
640 buf
= &q
->entry
[q
->head
].ubuf
;
641 if (!buf
->done
|| !q
->queued
)
644 if (q
->entry
[q
->head
].schedule
) {
645 q
->entry
[q
->head
].schedule
= false;
649 entry
= q
->entry
[q
->head
];
650 q
->head
= (q
->head
+ 1) % q
->ndesc
;
653 spin_unlock_bh(&q
->lock
);
654 dev
->drv
->tx_complete_skb(dev
, q
, &entry
, false);
655 spin_lock_bh(&q
->lock
);
657 mt76_txq_schedule(dev
, q
);
658 wake
= i
< IEEE80211_NUM_ACS
&& q
->queued
< q
->ndesc
- 8;
660 wake_up(&dev
->tx_wait
);
662 spin_unlock_bh(&q
->lock
);
664 if (!test_and_set_bit(MT76_READING_STATS
, &dev
->state
))
665 ieee80211_queue_delayed_work(dev
->hw
,
667 msecs_to_jiffies(10));
670 ieee80211_wake_queue(dev
->hw
, i
);
674 static void mt76u_tx_status_data(struct work_struct
*work
)
676 struct mt76_usb
*usb
;
677 struct mt76_dev
*dev
;
681 usb
= container_of(work
, struct mt76_usb
, stat_work
.work
);
682 dev
= container_of(usb
, struct mt76_dev
, usb
);
685 if (test_bit(MT76_REMOVED
, &dev
->state
))
688 if (!dev
->drv
->tx_status_data(dev
, &update
))
693 if (count
&& test_bit(MT76_STATE_RUNNING
, &dev
->state
))
694 ieee80211_queue_delayed_work(dev
->hw
, &usb
->stat_work
,
695 msecs_to_jiffies(10));
697 clear_bit(MT76_READING_STATS
, &dev
->state
);
700 static void mt76u_complete_tx(struct urb
*urb
)
702 struct mt76u_buf
*buf
= urb
->context
;
703 struct mt76_dev
*dev
= buf
->dev
;
705 if (mt76u_urb_error(urb
))
706 dev_err(dev
->dev
, "tx urb failed: %d\n", urb
->status
);
709 tasklet_schedule(&dev
->usb
.tx_tasklet
);
713 mt76u_tx_build_sg(struct mt76_dev
*dev
, struct sk_buff
*skb
,
719 sg_init_table(urb
->sg
, MT_SG_MAX_SIZE
);
720 urb
->num_sgs
= skb_to_sgvec(skb
, urb
->sg
, 0, skb
->len
);
725 mt76u_tx_queue_skb(struct mt76_dev
*dev
, struct mt76_queue
*q
,
726 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
727 struct ieee80211_sta
*sta
)
729 struct mt76u_buf
*buf
;
733 if (q
->queued
== q
->ndesc
)
736 skb
->prev
= skb
->next
= NULL
;
737 err
= dev
->drv
->tx_prepare_skb(dev
, NULL
, skb
, q
, wcid
, sta
, NULL
);
741 buf
= &q
->entry
[idx
].ubuf
;
742 buf
->buf
= skb
->data
;
746 err
= mt76u_tx_build_sg(dev
, skb
, buf
->urb
);
750 mt76u_fill_bulk_urb(dev
, USB_DIR_OUT
, q2ep(q
->hw_idx
),
751 buf
, mt76u_complete_tx
, buf
);
753 q
->tail
= (q
->tail
+ 1) % q
->ndesc
;
754 q
->entry
[idx
].skb
= skb
;
760 static void mt76u_tx_kick(struct mt76_dev
*dev
, struct mt76_queue
*q
)
762 struct mt76u_buf
*buf
;
765 while (q
->first
!= q
->tail
) {
766 buf
= &q
->entry
[q
->first
].ubuf
;
768 trace_submit_urb(dev
, buf
->urb
);
769 err
= usb_submit_urb(buf
->urb
, GFP_ATOMIC
);
772 set_bit(MT76_REMOVED
, &dev
->state
);
774 dev_err(dev
->dev
, "tx urb submit failed:%d\n",
778 q
->first
= (q
->first
+ 1) % q
->ndesc
;
782 static int mt76u_alloc_tx(struct mt76_dev
*dev
)
784 struct mt76u_buf
*buf
;
785 struct mt76_queue
*q
;
788 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
790 spin_lock_init(&q
->lock
);
791 INIT_LIST_HEAD(&q
->swq
);
792 q
->hw_idx
= mt76_ac_to_hwq(i
);
794 q
->entry
= devm_kcalloc(dev
->dev
,
795 MT_NUM_TX_ENTRIES
, sizeof(*q
->entry
),
800 q
->ndesc
= MT_NUM_TX_ENTRIES
;
801 for (j
= 0; j
< q
->ndesc
; j
++) {
802 buf
= &q
->entry
[j
].ubuf
;
805 buf
->urb
= usb_alloc_urb(0, GFP_KERNEL
);
809 if (dev
->usb
.sg_en
) {
810 size_t size
= MT_SG_MAX_SIZE
*
811 sizeof(struct scatterlist
);
813 buf
->urb
->sg
= devm_kzalloc(dev
->dev
, size
,
823 static void mt76u_free_tx(struct mt76_dev
*dev
)
825 struct mt76_queue
*q
;
828 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
830 for (j
= 0; j
< q
->ndesc
; j
++)
831 usb_free_urb(q
->entry
[j
].ubuf
.urb
);
835 static void mt76u_stop_tx(struct mt76_dev
*dev
)
837 struct mt76_queue
*q
;
840 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++) {
842 for (j
= 0; j
< q
->ndesc
; j
++)
843 usb_kill_urb(q
->entry
[j
].ubuf
.urb
);
847 void mt76u_stop_queues(struct mt76_dev
*dev
)
849 tasklet_disable(&dev
->usb
.rx_tasklet
);
850 tasklet_disable(&dev
->usb
.tx_tasklet
);
855 EXPORT_SYMBOL_GPL(mt76u_stop_queues
);
857 void mt76u_stop_stat_wk(struct mt76_dev
*dev
)
859 cancel_delayed_work_sync(&dev
->usb
.stat_work
);
860 clear_bit(MT76_READING_STATS
, &dev
->state
);
862 EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk
);
864 void mt76u_queues_deinit(struct mt76_dev
*dev
)
866 mt76u_stop_queues(dev
);
871 EXPORT_SYMBOL_GPL(mt76u_queues_deinit
);
873 int mt76u_alloc_queues(struct mt76_dev
*dev
)
877 err
= mt76u_alloc_rx(dev
);
881 return mt76u_alloc_tx(dev
);
883 EXPORT_SYMBOL_GPL(mt76u_alloc_queues
);
885 static const struct mt76_queue_ops usb_queue_ops
= {
886 .tx_queue_skb
= mt76u_tx_queue_skb
,
887 .kick
= mt76u_tx_kick
,
890 int mt76u_init(struct mt76_dev
*dev
,
891 struct usb_interface
*intf
)
893 static const struct mt76_bus_ops mt76u_ops
= {
898 .wr_rp
= mt76u_wr_rp
,
899 .rd_rp
= mt76u_rd_rp
,
900 .type
= MT76_BUS_USB
,
902 struct mt76_usb
*usb
= &dev
->usb
;
904 tasklet_init(&usb
->rx_tasklet
, mt76u_rx_tasklet
, (unsigned long)dev
);
905 tasklet_init(&usb
->tx_tasklet
, mt76u_tx_tasklet
, (unsigned long)dev
);
906 INIT_DELAYED_WORK(&usb
->stat_work
, mt76u_tx_status_data
);
907 skb_queue_head_init(&dev
->rx_skb
[MT_RXQ_MAIN
]);
909 mutex_init(&usb
->mcu
.mutex
);
911 mutex_init(&usb
->usb_ctrl_mtx
);
912 dev
->bus
= &mt76u_ops
;
913 dev
->queue_ops
= &usb_queue_ops
;
915 usb
->sg_en
= mt76u_check_sg(dev
);
917 return mt76u_set_endpoints(intf
, usb
);
919 EXPORT_SYMBOL_GPL(mt76u_init
);
921 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
922 MODULE_LICENSE("Dual BSD/GPL");