do {
if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
- return -EBUSY;
+ break;
if (stop || mt76_txq_stopped(q))
break;
static int
mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{
- struct mt76_queue *q = phy->q_tx[qid];
struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq;
struct mt76_txq *mtxq;
struct mt76_wcid *wcid;
+ struct mt76_queue *q;
int ret = 0;
while (1) {
int n_frames = 0;
- if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
- return -EBUSY;
-
- if (dev->queue_ops->tx_cleanup &&
- q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
- dev->queue_ops->tx_cleanup(dev, q, false);
- }
-
txq = ieee80211_next_txq(phy->hw, qid);
if (!txq)
break;
if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
continue;
+ phy = mt76_dev_phy(dev, wcid->phy_idx);
+ if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
+ continue;
+
+ q = phy->q_tx[qid];
+ if (dev->queue_ops->tx_cleanup &&
+ q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
+ dev->queue_ops->tx_cleanup(dev, q, false);
+ }
+
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
{
int len;
- if (qid >= 4 || phy->offchannel)
+ if (qid >= 4)
return;
local_bh_disable();
void mt76_txq_schedule_all(struct mt76_phy *phy)
{
+ struct mt76_phy *main_phy = &phy->dev->phy;
int i;
mt76_txq_schedule_pending(phy);
+
+ if (phy != main_phy && phy->hw == main_phy->hw)
+ return;
+
for (i = 0; i <= MT_TXQ_BK; i++)
mt76_txq_schedule(phy, i);
}
struct mt76_phy *phy;
int i;
+ mt76_txq_schedule_all(&dev->phy);
for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
phy = dev->phys[i];
if (!phy)
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
- if (!test_bit(MT76_STATE_RUNNING, &phy->state))
- return;
-
mt76_worker_schedule(&dev->tx_worker);
}
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);