]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
wifi: ath12k: alloc REO queue per station
authorBaochen Qiang <quic_bqiang@quicinc.com>
Wed, 9 Apr 2025 02:26:40 +0000 (10:26 +0800)
committerJeff Johnson <jeff.johnson@oss.qualcomm.com>
Tue, 15 Apr 2025 14:35:52 +0000 (07:35 -0700)
In MLO case, all link peers share the same REO queue, so the queue
should be allocated only once, currently this is done by checking
primary_link flag in ath12k_dp_rx_peer_tid_setup(). However, the
check not only avoids duplicate allocation, but also bypasses sending
queue configuration to firmware for non-primary links. In an upcoming
patch, changes will be added to make this check no-ops for WCN7850,
as WCN7850 firmware needs to be explicitly notified each link peer's
queue configuration. That said, the duplicate allocation would arise
again after that change, hence it needs to be resolved before hand.

Since all link peers share the same queue, it should be allocated per
MLD peer, not per link peer. So change to do allocation once and save
it in MLD peer, link peers can simply get queue configuration from
there.

Also relocate ath12k_reoq_buf structure to core.h to avoid circular
dependency.

Tested-on: WCN7850 hw2.0 PCI WLAN.HMT.1.0.c5-00481-QCAHMTSWPL_V1.0_V2.0_SILICONZ-3
Tested-on: WCN7850 hw2.0 PCI WLAN.HMT.1.1.c5-00284-QCAHMTSWPL_V1.0_V2.0_SILICONZ-1
Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.3.1-00209-QCAHKSWPL_SILICONZ-1

Signed-off-by: Baochen Qiang <quic_bqiang@quicinc.com>
Link: https://patch.msgid.link/20250409-ath12k-wcn7850-mlo-support-v2-7-3801132ca2c3@quicinc.com
Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
drivers/net/wireless/ath/ath12k/core.h
drivers/net/wireless/ath/ath12k/dp_rx.c
drivers/net/wireless/ath/ath12k/dp_rx.h

index 1bc2a391171dbd8a71f8321c7e4ec600956387d9..0c8bb64ec08f198a7ea49d33db0f384a30421d12 100644 (file)
@@ -535,6 +535,12 @@ struct ath12k_link_sta {
        u8 link_idx;
 };
 
+struct ath12k_reoq_buf {
+       void *vaddr;
+       dma_addr_t paddr_aligned;
+       u32 size;
+};
+
 struct ath12k_sta {
        struct ath12k_vif *ahvif;
        enum hal_pn_type pn_type;
@@ -547,6 +553,8 @@ struct ath12k_sta {
        u8 num_peer;
 
        enum ieee80211_sta_state state;
+
+       struct ath12k_reoq_buf reoq_bufs[IEEE80211_NUM_TIDS + 1];
 };
 
 #define ATH12K_HALF_20MHZ_BW   10
index acafaabc4c9d8b780a838546a149c34488466228..a8d603bd91312fdf94d11dab599cabe62f0b8f04 100644 (file)
@@ -929,17 +929,66 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
        return 0;
 }
 
+static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab,
+                                   struct ath12k_sta *ahsta,
+                                   struct ath12k_dp_rx_tid *rx_tid,
+                                   u16 ssn, enum hal_pn_type pn_type)
+{
+       u32 ba_win_sz = rx_tid->ba_win_sz;
+       struct ath12k_reoq_buf *buf;
+       void *vaddr, *vaddr_aligned;
+       dma_addr_t paddr_aligned;
+       u8 tid = rx_tid->tid;
+       u32 hw_desc_sz;
+       int ret;
+
+       buf = &ahsta->reoq_bufs[tid];
+       if (!buf->vaddr) {
+               /* TODO: Optimize the memory allocation for qos tid based on
+                * the actual BA window size in REO tid update path.
+                */
+               if (tid == HAL_DESC_REO_NON_QOS_TID)
+                       hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
+               else
+                       hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+               vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
+               if (!vaddr)
+                       return -ENOMEM;
+
+               vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+
+               ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz,
+                                          ssn, pn_type);
+
+               paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz,
+                                              DMA_BIDIRECTIONAL);
+               ret = dma_mapping_error(ab->dev, paddr_aligned);
+               if (ret) {
+                       kfree(vaddr);
+                       return ret;
+               }
+
+               buf->vaddr = vaddr;
+               buf->paddr_aligned = paddr_aligned;
+               buf->size = hw_desc_sz;
+       }
+
+       rx_tid->qbuf = *buf;
+       rx_tid->active = true;
+
+       return 0;
+}
+
 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
                                u8 tid, u32 ba_win_sz, u16 ssn,
                                enum hal_pn_type pn_type)
 {
        struct ath12k_base *ab = ar->ab;
        struct ath12k_dp *dp = &ab->dp;
-       struct hal_rx_reo_queue *addr_aligned;
        struct ath12k_peer *peer;
+       struct ath12k_sta *ahsta;
        struct ath12k_dp_rx_tid *rx_tid;
-       u32 hw_desc_sz;
-       void *vaddr;
        dma_addr_t paddr_aligned;
        int ret;
 
@@ -972,9 +1021,9 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
        }
 
        rx_tid = &peer->rx_tid[tid];
+       paddr_aligned = rx_tid->qbuf.paddr_aligned;
        /* Update the tid queue if it is already setup */
        if (rx_tid->active) {
-               paddr_aligned = rx_tid->qbuf.paddr_aligned;
                ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
                                                    ba_win_sz, ssn, true);
                spin_unlock_bh(&ab->base_lock);
@@ -1002,39 +1051,14 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
 
        rx_tid->ba_win_sz = ba_win_sz;
 
-       /* TODO: Optimize the memory allocation for qos tid based on
-        * the actual BA window size in REO tid update path.
-        */
-       if (tid == HAL_DESC_REO_NON_QOS_TID)
-               hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
-       else
-               hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
-
-       vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
-       if (!vaddr) {
-               spin_unlock_bh(&ab->base_lock);
-               return -ENOMEM;
-       }
-
-       addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
-
-       ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
-                                  ssn, pn_type);
-
-       paddr_aligned = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
-                                      DMA_BIDIRECTIONAL);
-
-       ret = dma_mapping_error(ab->dev, paddr_aligned);
+       ahsta = ath12k_sta_to_ahsta(peer->sta);
+       ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type);
        if (ret) {
                spin_unlock_bh(&ab->base_lock);
-               goto err_mem_free;
+               ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid);
+               return ret;
        }
 
-       rx_tid->qbuf.vaddr = vaddr;
-       rx_tid->qbuf.paddr_aligned = paddr_aligned;
-       rx_tid->qbuf.size = hw_desc_sz;
-       rx_tid->active = true;
-
        if (ab->hw_params->reoq_lut_support) {
                /* Update the REO queue LUT at the corresponding peer id
                 * and tid with qaddr.
@@ -1054,11 +1078,6 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
                                                             ba_win_sz);
        }
 
-       return ret;
-
-err_mem_free:
-       kfree(vaddr);
-
        return ret;
 }
 
index 4d90fc98fd995aaaed0b8ccf4e2bf1a721aaa230..19fb41e1c32b68ac5bba078208c4685eecc8da7a 100644 (file)
 
 #define DP_MAX_NWIFI_HDR_LEN   30
 
-struct ath12k_reoq_buf {
-       void *vaddr;
-       dma_addr_t paddr_aligned;
-       u32 size;
-};
-
 struct ath12k_dp_rx_tid {
        u8 tid;
        u32 ba_win_sz;