return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
}
-static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
- u16 ppt_idx, u16 spt_idx)
+static void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_dp *dp,
+ u16 ppt_idx, u16 spt_idx)
{
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
-
return dp->spt_info[ppt_idx].vaddr + spt_idx;
}
-struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
+struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_dp *dp,
u32 cookie)
{
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct ath12k_rx_desc_info **desc_addr_ptr;
u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
- start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET(ab);
- end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES(ab);
+ start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET(dp->ab);
+ end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES(dp->ab);
if (ppt_idx < start_ppt_idx ||
ppt_idx >= end_ppt_idx ||
return NULL;
ppt_idx = ppt_idx - dp->rx_ppt_base;
- desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
+ desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, spt_idx);
return *desc_addr_ptr;
}
EXPORT_SYMBOL(ath12k_dp_get_rx_desc);
-struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
+struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_dp *dp,
u32 cookie)
{
struct ath12k_tx_desc_info **desc_addr_ptr;
start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
end_ppt_idx = start_ppt_idx +
- (ATH12K_TX_SPT_PAGES_PER_POOL(ab) * ATH12K_HW_MAX_QUEUES);
+ (ATH12K_TX_SPT_PAGES_PER_POOL(dp->ab) * ATH12K_HW_MAX_QUEUES);
if (ppt_idx < start_ppt_idx ||
ppt_idx >= end_ppt_idx ||
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
- desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
+ desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, spt_idx);
return *desc_addr_ptr;
}
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
/* Update descriptor VA in SPT */
- rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
+ rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, j);
*rx_desc_addr = &rx_descs[j];
}
}
/* Update descriptor VA in SPT */
tx_desc_addr =
- ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
+ ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, j);
*tx_desc_addr = &tx_descs[j];
}
}
struct ath12k_dp;
struct ath12k_vif;
struct ath12k_link_vif;
-struct hal_tcl_status_ring;
struct ath12k_ext_irq_grp;
struct ath12k_dp_rx_tid;
struct ath12k_dp_link_peer *peer, u8 tid);
void (*reo_cache_flush)(struct ath12k_base *ab,
struct ath12k_dp_rx_tid *rx_tid);
- int (*rx_link_desc_return)(struct ath12k_base *ab,
+ int (*rx_link_desc_return)(struct ath12k_dp *dp,
struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action);
void (*rx_frags_cleanup)(struct ath12k_dp_rx_tid *rx_tid,
struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action)
{
- return dp->ops->rx_link_desc_return(dp->ab, buf_addr_info, action);
+ return dp->ops->rx_link_desc_return(dp, buf_addr_info, action);
}
static inline
struct dp_link_desc_bank *link_desc_banks,
u32 ring_type, struct hal_srng *srng,
u32 n_link_desc);
-struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
+struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_dp *dp,
u32 cookie);
-struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
+struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_dp *dp,
u32 desc_id);
#endif
status->flag |= RX_FLAG_RADIOTAP_HE;
}
- ath12k_wifi7_dp_extract_rx_desc_data(ab, &rx_info, rx_desc, rx_desc);
+ ath12k_wifi7_dp_extract_rx_desc_data(dp->hal, &rx_info, rx_desc, rx_desc);
rcu_read_lock();
spin_lock_bh(&dp->dp_lock);
ath12k_hal_rx_msdu_list_get(&ar->ab->hal, ar, msdu_link_desc, &msdu_list,
&num_msdus);
- desc_info = ath12k_dp_get_rx_desc(ar->ab,
+ desc_info = ath12k_dp_get_rx_desc(ar->ab->dp,
msdu_list.sw_cookie[num_msdus - 1]);
tail_rx_desc = (struct hal_rx_desc *)(desc_info->skb)->data;
}
desc_info =
- ath12k_dp_get_rx_desc(ar->ab, msdu_list.sw_cookie[i]);
+ ath12k_dp_get_rx_desc(ar->ab->dp, msdu_list.sw_cookie[i]);
msdu = desc_info->skb;
if (!msdu) {
if (rx_bufs_used) {
rx_mon_stats->dest_ppdu_done++;
- ath12k_dp_rx_bufs_replenish(ar->ab,
+ ath12k_dp_rx_bufs_replenish(ar->ab->dp,
&dp->rx_refill_buf_ring,
&rx_desc_used_list,
rx_bufs_used);
return num_buffs_reaped;
}
-int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
+int ath12k_dp_mon_process_ring(struct ath12k_dp *dp, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode)
{
- u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(dp->hw_params, mac_id);
struct ath12k_pdev_dp *dp_pdev;
struct ath12k *ar;
int num_buffs_reaped = 0;
return 0;
}
- if (ab->hw_params->rxdma1_enable) {
+ if (dp->hw_params->rxdma1_enable) {
if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
num_buffs_reaped = ath12k_dp_mon_srng_process(dp_pdev, &budget,
napi);
int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *rx_ring,
int req_entries);
-int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
+int ath12k_dp_mon_process_ring(struct ath12k_dp *dp, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode);
struct sk_buff *ath12k_dp_mon_tx_alloc_skb(void);
}
/* Returns number of Rx buffers replenished */
-int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
+int ath12k_dp_rx_bufs_replenish(struct ath12k_dp *dp,
struct dp_rxdma_ring *rx_ring,
struct list_head *used_list,
int req_entries)
{
+ struct ath12k_base *ab = dp->ab;
struct ath12k_buffer_addr *desc;
struct hal_srng *srng;
struct sk_buff *skb;
int num_remain;
u32 cookie;
dma_addr_t paddr;
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct ath12k_rx_desc_info *rx_desc;
- enum hal_rx_buf_return_buf_manager mgr = ab->hal.hal_params->rx_buf_rbm;
+ enum hal_rx_buf_return_buf_manager mgr = dp->hal->hal_params->rx_buf_rbm;
req_entries = min(req_entries, rx_ring->bufs_max);
- srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+ srng = &dp->hal->srng_list[rx_ring->refill_buf_ring.ring_id];
spin_lock_bh(&srng->lock);
skb->data);
}
- paddr = dma_map_single(ab->dev, skb->data,
+ paddr = dma_map_single(dp->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
- if (dma_mapping_error(ab->dev, paddr))
+ if (dma_mapping_error(dp->dev, paddr))
goto fail_free_skb;
rx_desc = list_first_entry_or_null(used_list,
num_remain--;
- ath12k_hal_rx_buf_addr_info_set(&ab->hal, desc, paddr, cookie,
+ ath12k_hal_rx_buf_addr_info_set(dp->hal, desc, paddr, cookie,
mgr);
}
goto out;
fail_dma_unmap:
- dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ dma_unmap_single(dp->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
rx_ring->bufs_max = rx_ring->refill_buf_ring.size /
ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
+ ath12k_dp_rx_bufs_replenish(ath12k_ab_to_dp(ab), rx_ring, &list, 0);
return 0;
}
enum hal_encrypt_type enctype,
struct hal_rx_desc_data *rx_info)
{
- struct ath12k_dp *dp = dp_pdev->dp;
- struct ath12k_base *ab = dp->ab;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
struct ieee80211_hdr *hdr;
if (!(rx_info->rx_status->flag & RX_FLAG_IV_STRIPPED)) {
crypto_hdr = skb_push(msdu,
ath12k_dp_rx_crypto_param_len(dp_pdev, enctype));
- ath12k_dp_rx_desc_get_crypto_header(ab,
+ ath12k_dp_rx_desc_get_crypto_header(dp_pdev->dp->hal,
rxcb->rx_desc, crypto_hdr,
enctype);
}
{
struct hal_rx_desc *rx_desc = rxcb->rx_desc;
struct ath12k_dp *dp = dp_pdev->dp;
- struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
size_t hdr_len, crypto_len;
struct ieee80211_hdr hdr;
__le16 qos_ctl;
u8 *crypto_hdr;
- ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr);
+ ath12k_dp_rx_desc_get_dot11_hdr(hal, rx_desc, &hdr);
hdr_len = ieee80211_hdrlen(hdr.frame_control);
if (!(rx_info->rx_status->flag & RX_FLAG_IV_STRIPPED)) {
crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype);
crypto_hdr = skb_push(msdu, crypto_len);
- ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
+ ath12k_dp_rx_desc_get_crypto_header(dp->hal, rx_desc, crypto_hdr,
+ enctype);
}
skb_push(msdu, hdr_len);
struct hal_rx_desc_data *rx_info)
{
struct ath12k_dp *dp = dp_pdev->dp;
- struct ath12k_base *ab = dp->ab;
struct ieee80211_rx_status *rx_status;
struct ieee80211_sta *pubsta;
struct ath12k_dp_peer *peer;
status->link_id = peer->hw_links[rxcb->hw_link_id];
}
- ath12k_dbg(ab, ATH12K_DBG_DATA,
+ ath12k_dbg(dp->ab, ATH12K_DBG_DATA,
"rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
!!(status->flag & RX_FLAG_MMIC_ERROR),
!!(status->flag & RX_FLAG_AMSDU_MORE));
- ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
+ ath12k_dbg_dump(dp->ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
msdu->data, msdu->len);
rx_status = IEEE80211_SKB_RXCB(msdu);
}
EXPORT_SYMBOL(ath12k_dp_rx_h_undecap_frag);
-static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
+static int ath12k_dp_rx_h_cmp_frags(struct ath12k_hal *hal,
struct sk_buff *a, struct sk_buff *b)
{
int frag1, frag2;
- frag1 = ath12k_dp_rx_h_frag_no(ab, a);
- frag2 = ath12k_dp_rx_h_frag_no(ab, b);
+ frag1 = ath12k_dp_rx_h_frag_no(hal, a);
+ frag2 = ath12k_dp_rx_h_frag_no(hal, b);
return frag1 - frag2;
}
-void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
+void ath12k_dp_rx_h_sort_frags(struct ath12k_hal *hal,
struct sk_buff_head *frag_list,
struct sk_buff *cur_frag)
{
int cmp;
skb_queue_walk(frag_list, skb) {
- cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
+ cmp = ath12k_dp_rx_h_cmp_frags(hal, skb, cur_frag);
if (cmp < 0)
continue;
__skb_queue_before(frag_list, skb, cur_frag);
return ret;
}
-static inline bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
+static inline bool ath12k_dp_rx_h_more_frags(struct ath12k_hal *hal,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
+ hdr = (struct ieee80211_hdr *)(skb->data + hal->hal_desc_sz);
return ieee80211_has_morefrags(hdr->frame_control);
}
-static inline u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
+static inline u16 ath12k_dp_rx_h_frag_no(struct ath12k_hal *hal,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
+ hdr = (struct ieee80211_hdr *)(skb->data + hal->hal_desc_sz);
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
return ab->hal.ops->rx_desc_get_l3_pad_bytes(desc);
}
-static inline void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
+static inline void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_hal *hal,
struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
- ab->hal.ops->rx_desc_copy_end_tlv(fdesc, ldesc);
+ hal->ops->rx_desc_copy_end_tlv(fdesc, ldesc);
}
-static inline void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
+static inline void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_hal *hal,
struct hal_rx_desc *desc,
u16 len)
{
- ab->hal.ops->rx_desc_set_msdu_len(desc, len);
+ hal->ops->rx_desc_set_msdu_len(desc, len);
}
static inline u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
return ab->hal.ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
}
-static inline void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
+static inline void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_hal *hal,
struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr)
{
- ab->hal.ops->rx_desc_get_dot11_hdr(desc, hdr);
+ hal->ops->rx_desc_get_dot11_hdr(desc, hdr);
}
-static inline void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
+static inline void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_hal *hal,
struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype)
{
- ab->hal.ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
+ hal->ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
}
-static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
+static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_hal *hal,
struct hal_rx_desc *desc)
{
- return ab->hal.ops->rx_desc_get_msdu_src_link_id(desc);
+ return hal->ops->rx_desc_get_msdu_src_link_id(desc);
}
static inline void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list)
struct sk_buff *msdu,
struct hal_rx_desc_data *rx_info);
u64 ath12k_dp_rx_h_get_pn(struct ath12k_dp *dp, struct sk_buff *skb);
-void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
+void ath12k_dp_rx_h_sort_frags(struct ath12k_hal *hal,
struct sk_buff_head *frag_list,
struct sk_buff *cur_frag);
void ath12k_dp_rx_h_undecap_frag(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int pdev_idx);
void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int pdev_idx);
void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab);
-int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
+int ath12k_dp_rx_bufs_replenish(struct ath12k_dp *dp,
struct dp_rxdma_ring *rx_ring,
struct list_head *used_list,
int req_entries);
}
}
-int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
- struct sk_buff **pskb)
+int ath12k_dp_tx_align_payload(struct ath12k_dp *dp, struct sk_buff **pskb)
{
- u32 iova_mask = ab->hw_params->iova_mask;
+ u32 iova_mask = dp->hw_params->iova_mask;
unsigned long offset, delta1, delta2;
struct sk_buff *skb2, *skb = *pskb;
unsigned int headroom = skb_headroom(skb);
}
EXPORT_SYMBOL(ath12k_dp_tx_align_payload);
-void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
+void ath12k_dp_tx_free_txbuf(struct ath12k_dp *dp,
struct dp_tx_ring *tx_ring,
struct ath12k_tx_desc_params *desc_params)
{
- struct ath12k *ar;
+ struct ath12k_pdev_dp *dp_pdev;
struct sk_buff *msdu = desc_params->skb;
struct ath12k_skb_cb *skb_cb;
- u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params->mac_id);
+ u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(dp->hw_params, desc_params->mac_id);
skb_cb = ATH12K_SKB_CB(msdu);
- ar = ab->pdevs[pdev_id].ar;
- dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx);
+
+ dma_unmap_single(dp->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc) {
- dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ dma_unmap_single(dp->dev, skb_cb->paddr_ext_desc,
desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
dev_kfree_skb_any(desc_params->skb_ext_desc);
}
- ieee80211_free_txskb(ar->ah->hw, msdu);
+ ieee80211_free_txskb(ath12k_pdev_dp_to_hw(dp_pdev), msdu);
- if (atomic_dec_and_test(&ar->dp.num_tx_pending))
- wake_up(&ar->dp.tx_empty_waitq);
+ if (atomic_dec_and_test(&dp_pdev->num_tx_pending))
+ wake_up(&dp_pdev->tx_empty_waitq);
}
EXPORT_SYMBOL(ath12k_dp_tx_free_txbuf);
void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb);
u8 ath12k_dp_tx_get_tid(struct sk_buff *skb);
void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len);
-int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
- struct sk_buff **pskb);
+int ath12k_dp_tx_align_payload(struct ath12k_dp *dp, struct sk_buff **pskb);
void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
struct ath12k_tx_desc_info *tx_desc,
u8 pool_id);
struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
u8 pool_id);
-void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
+void ath12k_dp_tx_free_txbuf(struct ath12k_dp *dp,
struct dp_tx_ring *tx_ring,
struct ath12k_tx_desc_params *desc_params);
#endif
struct ath12k_ext_irq_grp *irq_grp,
int budget)
{
- struct ath12k_base *ab = dp->ab;
struct napi_struct *napi = &irq_grp->napi;
int grp_id = irq_grp->grp_id;
int work_done = 0;
enum dp_monitor_mode monitor_mode;
u8 ring_mask;
- if (ab->hw_params->ring_mask->tx[grp_id]) {
- i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1;
- ath12k_wifi7_dp_tx_completion_handler(ab, i);
+ if (dp->hw_params->ring_mask->tx[grp_id]) {
+ i = fls(dp->hw_params->ring_mask->tx[grp_id]) - 1;
+ ath12k_wifi7_dp_tx_completion_handler(dp, i);
}
- if (ab->hw_params->ring_mask->rx_err[grp_id]) {
- work_done = ath12k_wifi7_dp_rx_process_err(ab, napi, budget);
+ if (dp->hw_params->ring_mask->rx_err[grp_id]) {
+ work_done = ath12k_wifi7_dp_rx_process_err(dp, napi, budget);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
- if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
- work_done = ath12k_wifi7_dp_rx_process_wbm_err(ab, napi, budget);
+ if (dp->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
+ work_done = ath12k_wifi7_dp_rx_process_wbm_err(dp, napi, budget);
budget -= work_done;
tot_work_done += work_done;
goto done;
}
- if (ab->hw_params->ring_mask->rx[grp_id]) {
- i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
- work_done = ath12k_wifi7_dp_rx_process(ab, i, napi, budget);
+ if (dp->hw_params->ring_mask->rx[grp_id]) {
+ i = fls(dp->hw_params->ring_mask->rx[grp_id]) - 1;
+ work_done = ath12k_wifi7_dp_rx_process(dp, i, napi, budget);
budget -= work_done;
tot_work_done += work_done;
if (budget <= 0)
goto done;
}
- if (ab->hw_params->ring_mask->rx_mon_status[grp_id]) {
- ring_mask = ab->hw_params->ring_mask->rx_mon_status[grp_id];
- for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxdma_per_pdev + j;
+ if (dp->hw_params->ring_mask->rx_mon_status[grp_id]) {
+ ring_mask = dp->hw_params->ring_mask->rx_mon_status[grp_id];
+ for (i = 0; i < dp->ab->num_radios; i++) {
+ for (j = 0; j < dp->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * dp->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
- ath12k_dp_mon_process_ring(ab, id, napi, budget,
+ ath12k_dp_mon_process_ring(dp, id, napi, budget,
0);
budget -= work_done;
tot_work_done += work_done;
}
}
- if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
+ if (dp->hw_params->ring_mask->rx_mon_dest[grp_id]) {
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
- ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
- for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxdma_per_pdev + j;
+ ring_mask = dp->hw_params->ring_mask->rx_mon_dest[grp_id];
+ for (i = 0; i < dp->ab->num_radios; i++) {
+ for (j = 0; j < dp->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * dp->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
- ath12k_dp_mon_process_ring(ab, id, napi, budget,
+ ath12k_dp_mon_process_ring(dp, id, napi, budget,
monitor_mode);
budget -= work_done;
tot_work_done += work_done;
}
}
- if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
+ if (dp->hw_params->ring_mask->tx_mon_dest[grp_id]) {
monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
- ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
- for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxdma_per_pdev + j;
+ ring_mask = dp->hw_params->ring_mask->tx_mon_dest[grp_id];
+ for (i = 0; i < dp->ab->num_radios; i++) {
+ for (j = 0; j < dp->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * dp->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
- ath12k_dp_mon_process_ring(ab, id, napi, budget,
+ ath12k_dp_mon_process_ring(dp, id, napi, budget,
monitor_mode);
budget -= work_done;
tot_work_done += work_done;
}
}
- if (ab->hw_params->ring_mask->reo_status[grp_id])
- ath12k_wifi7_dp_rx_process_reo_status(ab);
+ if (dp->hw_params->ring_mask->reo_status[grp_id])
+ ath12k_wifi7_dp_rx_process_reo_status(dp);
- if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ if (dp->hw_params->ring_mask->host2rxdma[grp_id]) {
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
LIST_HEAD(list);
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
+ ath12k_dp_rx_bufs_replenish(dp, rx_ring, &list, 0);
}
/* TODO: Implement handler for other interrupts */
peer->rx_tid_active_bitmask &= ~(1 << tid);
}
-int ath12k_wifi7_dp_rx_link_desc_return(struct ath12k_base *ab,
+int ath12k_wifi7_dp_rx_link_desc_return(struct ath12k_dp *dp,
struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action)
{
+ struct ath12k_base *ab = dp->ab;
struct hal_wbm_release_ring *desc;
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct hal_srng *srng;
int ret = 0;
- srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+ srng = &dp->hal->srng_list[dp->wbm_desc_rel_ring.ring_id];
spin_lock_bh(&srng->lock);
}
}
-static int ath12k_wifi7_dp_rx_msdu_coalesce(struct ath12k_dp *dp,
+static int ath12k_wifi7_dp_rx_msdu_coalesce(struct ath12k_hal *hal,
struct sk_buff_head *msdu_list,
struct sk_buff *first, struct sk_buff *last,
u8 l3pad_bytes, int msdu_len,
struct hal_rx_desc_data *rx_info)
{
- struct ath12k_base *ab = dp->ab;
struct sk_buff *skb;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
int buf_first_hdr_len, buf_first_len;
struct hal_rx_desc *ldesc;
int space_extra, rem_len, buf_len;
- u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
+ u32 hal_rx_desc_sz = hal->hal_desc_sz;
bool is_continuation;
/* As the msdu is spread across multiple rx buffers,
/* When an MSDU spread over multiple buffers MSDU_END
* tlvs are valid only in the last buffer. Copy those tlvs.
*/
- ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
+ ath12k_dp_rx_desc_end_tlv_copy(hal, rxcb->rx_desc, ldesc);
space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
if (space_extra > 0 &&
struct hal_rx_desc_data *rx_info)
{
struct ath12k_dp *dp = dp_pdev->dp;
- struct ath12k_base *ab = dp->ab;
struct hal_rx_desc *rx_desc, *lrx_desc;
struct ath12k_skb_rxcb *rxcb;
struct sk_buff *last_buf;
+ struct ath12k_hal *hal = dp->hal;
u8 l3_pad_bytes;
u16 msdu_len;
int ret;
- u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
+ u32 hal_rx_desc_sz = hal->hal_desc_sz;
last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
if (!last_buf) {
- ath12k_warn(ab,
+ ath12k_warn(dp->ab,
"No valid Rx buffer to access MSDU_END tlv\n");
ret = -EIO;
goto free_out;
rx_desc = (struct hal_rx_desc *)msdu->data;
lrx_desc = (struct hal_rx_desc *)last_buf->data;
- ath12k_wifi7_dp_extract_rx_desc_data(ab, rx_info, rx_desc, lrx_desc);
+ ath12k_wifi7_dp_extract_rx_desc_data(hal, rx_info, rx_desc, lrx_desc);
if (!rx_info->msdu_done) {
- ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
+ ath12k_warn(dp->ab, "msdu_done bit in msdu_end is not set\n");
ret = -EIO;
goto free_out;
}
} else if (!rxcb->is_continuation) {
if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
ret = -EINVAL;
- ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
- ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
+ ath12k_warn(dp->ab, "invalid msdu len %u\n", msdu_len);
+ ath12k_dbg_dump(dp->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
sizeof(*rx_desc));
goto free_out;
}
skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
} else {
- ret = ath12k_wifi7_dp_rx_msdu_coalesce(dp, msdu_list,
+ ret = ath12k_wifi7_dp_rx_msdu_coalesce(hal, msdu_list,
msdu, last_buf,
l3_pad_bytes, msdu_len,
rx_info);
if (ret) {
- ath12k_warn(ab,
+ ath12k_warn(dp->ab,
"failed to coalesce msdu rx buffer%d\n", ret);
goto free_out;
}
}
static void
-ath12k_wifi7_dp_rx_process_received_packets(struct ath12k_base *ab,
+ath12k_wifi7_dp_rx_process_received_packets(struct ath12k_dp *dp,
struct napi_struct *napi,
struct sk_buff_head *msdu_list,
int ring_id)
{
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct ath12k_hw_group *ag = dp->ag;
struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
struct ieee80211_rx_status rx_status = {};
ret = ath12k_wifi7_dp_rx_process_msdu(dp_pdev, msdu, msdu_list, &rx_info);
if (ret) {
- ath12k_dbg(ab, ATH12K_DBG_DATA,
+ ath12k_dbg(dp->ab, ATH12K_DBG_DATA,
"Unable to process msdu %d", ret);
dev_kfree_skb_any(msdu);
continue;
rcu_read_unlock();
}
-int ath12k_wifi7_dp_rx_process(struct ath12k_base *ab, int ring_id,
+int ath12k_wifi7_dp_rx_process(struct ath12k_dp *dp, int ring_id,
struct napi_struct *napi, int budget)
{
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct ath12k_hw_group *ag = dp->ag;
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
struct ath12k_hw_link *hw_links = ag->hw_links;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
struct hal_reo_dest_ring *desc;
struct ath12k_dp *partner_dp;
- struct ath12k_base *partner_ab;
struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
- srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
+ srng = &hal->srng_list[dp->reo_dst_ring[ring_id].ring_id];
spin_lock_bh(&srng->lock);
continue;
}
- partner_ab = partner_dp->ab;
/* retry manual desc retrieval */
if (!desc_info) {
- desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie);
+ desc_info = ath12k_dp_get_rx_desc(partner_dp, cookie);
if (!desc_info) {
- ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
+ ath12k_warn(partner_dp->ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
cookie);
continue;
}
list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(partner_ab->dev, rxcb->paddr,
+ dma_unmap_single(partner_dp->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
num_buffs_reaped[device_id]++;
- dp->device_stats.reo_rx[ring_id][ab->device_id]++;
+ dp->device_stats.reo_rx[ring_id][dp->device_id]++;
push_reason = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
continue;
partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
- partner_ab = partner_dp->ab;
rx_ring = &partner_dp->rx_refill_buf_ring;
- ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+ ath12k_dp_rx_bufs_replenish(partner_dp, rx_ring,
&rx_desc_used_list[device_id],
num_buffs_reaped[device_id]);
}
- ath12k_wifi7_dp_rx_process_received_packets(ab, napi, &msdu_list,
+ ath12k_wifi7_dp_rx_process_received_packets(dp, napi, &msdu_list,
ring_id);
exit:
struct sk_buff *defrag_skb)
{
struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
struct hal_reo_entrance_ring *reo_ent_ring;
struct hal_reo_dest_ring *reo_dest_ring;
enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
u8 dst_ind;
- hal_rx_desc_sz = ab->hal.hal_desc_sz;
+ hal_rx_desc_sz = hal->hal_desc_sz;
link_desc_banks = dp->link_desc_banks;
reo_dest_ring = rx_tid->dst_ring_desc;
- ath12k_wifi7_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
+ ath12k_wifi7_hal_rx_reo_ent_paddr_get(&reo_dest_ring->buf_addr_info,
&link_paddr, &cookie);
desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
/* change msdu len in hal rx desc */
- ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
+ ath12k_dp_rxdesc_set_msdu_len(hal, rx_desc, defrag_skb->len - hal_rx_desc_sz);
- buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
+ buf_paddr = dma_map_single(dp->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
DMA_TO_DEVICE);
- if (dma_mapping_error(ab->dev, buf_paddr))
+ if (dma_mapping_error(dp->dev, buf_paddr))
return -ENOMEM;
spin_lock_bh(&dp->rx_desc_lock);
HAL_RX_BUF_RBM_SW3_BM);
/* Fill mpdu details into reo entrance ring */
- srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
+ srng = &hal->srng_list[dp->reo_reinject_ring.ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
reo_ent_ring->rx_mpdu_info.peer_meta_data =
reo_dest_ring->rx_mpdu_info.peer_meta_data;
- if (ab->hw_params->reoq_lut_support) {
+ if (dp->hw_params->reoq_lut_support) {
reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
queue_addr_hi = 0;
} else {
list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
spin_unlock_bh(&dp->rx_desc_lock);
err_unmap_dma:
- dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
+ dma_unmap_single(dp->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
DMA_TO_DEVICE);
return ret;
}
struct hal_rx_desc_data *rx_info)
{
struct ath12k_dp *dp = dp_pdev->dp;
- struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
struct ieee80211_key_conf *key_conf;
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
- u32 hdr_len, hal_rx_desc_sz = ab->hal.hal_desc_sz;
+ u32 hdr_len, hal_rx_desc_sz = hal->hal_desc_sz;
u8 *key, *data;
u8 key_idx;
(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
- ath12k_wifi7_dp_extract_rx_desc_data(ab, rx_info, rx_desc, rx_desc);
+ ath12k_wifi7_dp_extract_rx_desc_data(hal, rx_info, rx_desc, rx_desc);
rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
if (rx_tid->dst_ring_desc) {
if (rel_link_desc) {
buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info;
- ath12k_wifi7_dp_rx_link_desc_return(dp->ab, buf_addr_info, act);
+ ath12k_wifi7_dp_rx_link_desc_return(dp, buf_addr_info, act);
}
kfree(rx_tid->dst_ring_desc);
rx_tid->dst_ring_desc = NULL;
struct hal_rx_desc_data *rx_info)
{
struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_hal *hal = dp->hal;
struct ath12k_base *ab = dp->ab;
struct ath12k_dp_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
bool more_frags;
enum hal_encrypt_type enctype = rx_info->enctype;
- frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
- more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
+ frag_no = ath12k_dp_rx_h_frag_no(hal, msdu);
+ more_frags = ath12k_dp_rx_h_more_frags(hal, msdu);
seqno = rx_info->seq_no;
if (!rx_info->seq_ctl_valid || !rx_info->fc_valid ||
if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
__skb_queue_tail(&rx_tid->rx_frags, msdu);
else
- ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
+ ath12k_dp_rx_h_sort_frags(hal, &rx_tid->rx_frags, msdu);
rx_tid->rx_frag_bitmap |= BIT(frag_no);
if (!more_frags)
goto out_unlock;
}
} else {
- ath12k_wifi7_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info,
+ ath12k_wifi7_dp_rx_link_desc_return(dp, &ring_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
{
struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
struct ath12k_dp *dp = dp_pdev->dp;
- struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
struct sk_buff *msdu;
struct ath12k_skb_rxcb *rxcb;
struct hal_rx_desc_data rx_info;
struct hal_rx_desc *rx_desc;
u16 msdu_len;
- u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
+ u32 hal_rx_desc_sz = hal->hal_desc_sz;
struct ath12k_rx_desc_info *desc_info;
u64 desc_va;
/* retry manual desc retrieval */
if (!desc_info) {
- desc_info = ath12k_dp_get_rx_desc(ab, cookie);
+ desc_info = ath12k_dp_get_rx_desc(dp, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
+ ath12k_warn(dp->ab,
+ "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
cookie);
return -EINVAL;
}
}
if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
- ath12k_warn(ab, " RX Exception, Check HW CC implementation");
+ ath12k_warn(dp->ab, "RX Exception, Check HW CC implementation");
msdu = desc_info->skb;
desc_info->skb = NULL;
list_add_tail(&desc_info->list, used_list);
rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
+ dma_unmap_single(dp->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
}
rx_desc = (struct hal_rx_desc *)msdu->data;
- ath12k_wifi7_dp_extract_rx_desc_data(ab, &rx_info, rx_desc, rx_desc);
+ ath12k_wifi7_dp_extract_rx_desc_data(hal, &rx_info, rx_desc, rx_desc);
msdu_len = rx_info.msdu_len;
if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
- ath12k_warn(ab, "invalid msdu leng %u", msdu_len);
- ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
+ ath12k_warn(dp->ab, "invalid msdu leng %u", msdu_len);
+ ath12k_dbg_dump(dp->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
sizeof(*rx_desc));
dev_kfree_skb_any(msdu);
goto exit;
if (ath12k_wifi7_dp_rx_frag_h_mpdu(dp_pdev, msdu, desc, &rx_info)) {
dev_kfree_skb_any(msdu);
- ath12k_wifi7_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info,
+ ath12k_wifi7_dp_rx_link_desc_return(dp, &desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
exit:
return 0;
}
-int ath12k_wifi7_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
+int ath12k_wifi7_dp_rx_process_err(struct ath12k_dp *dp, struct napi_struct *napi,
int budget)
{
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
struct ath12k_hw_group *ag = dp->ag;
struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
struct ath12k_dp *partner_dp;
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
struct ath12k_hw_link *hw_links = ag->hw_links;
- struct ath12k_base *partner_ab;
struct ath12k_pdev_dp *dp_pdev;
u8 hw_link_id, device_id;
u32 desc_bank, num_msdus;
reo_except = &dp->reo_except_ring;
- srng = &ab->hal.srng_list[reo_except->ring_id];
+ srng = &hal->srng_list[reo_except->ring_id];
spin_lock_bh(&srng->lock);
drop = false;
dp->device_stats.err_ring_pkts++;
- ret = ath12k_wifi7_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
+ ret = ath12k_wifi7_hal_desc_reo_parse_err(dp, reo_desc, &paddr,
&desc_bank);
if (ret) {
ath12k_warn(ab, "failed to parse error reo desc %d\n",
HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
device_id = hw_links[hw_link_id].device_id;
partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
- partner_ab = partner_dp->ab;
- pdev_idx = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+ pdev_idx = ath12k_hw_mac_id_to_pdev_id(partner_dp->hw_params,
hw_links[hw_link_id].pdev_idx);
link_desc_banks = partner_dp->link_desc_banks;
msdu_cookies, &rbm);
if (rbm != partner_dp->idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
- rbm != partner_ab->hal.hal_params->rx_buf_rbm) {
+ rbm != partner_dp->hal->hal_params->rx_buf_rbm) {
act = HAL_WBM_REL_BM_ACT_REL_MSDU;
dp->device_stats.invalid_rbm++;
ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
- ath12k_wifi7_dp_rx_link_desc_return(partner_ab,
+ ath12k_wifi7_dp_rx_link_desc_return(partner_dp,
&reo_desc->buf_addr_info,
act);
continue;
* partner device buffers.
*/
if (!is_frag || num_msdus > 1 ||
- partner_ab->device_id != ab->device_id) {
+ partner_dp->device_id != dp->device_id) {
drop = true;
act = HAL_WBM_REL_BM_ACT_PUT_IN_IDLE;
/* Return the link desc back to wbm idle list */
- ath12k_wifi7_dp_rx_link_desc_return(partner_ab,
+ ath12k_wifi7_dp_rx_link_desc_return(partner_dp,
&reo_desc->buf_addr_info,
act);
}
continue;
partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
- partner_ab = partner_dp->ab;
rx_ring = &partner_dp->rx_refill_buf_ring;
- ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+ ath12k_dp_rx_bufs_replenish(partner_dp, rx_ring,
&rx_desc_used_list[device_id],
num_buffs_reaped[device_id]);
}
rx_info.addr2_present = false;
rx_info.rx_status = &rxs;
- ath12k_wifi7_dp_extract_rx_desc_data(dp->ab, &rx_info, rx_desc, rx_desc);
+ ath12k_wifi7_dp_extract_rx_desc_data(dp->hal, &rx_info, rx_desc, rx_desc);
switch (rxcb->err_rel_src) {
case HAL_WBM_REL_SRC_MODULE_REO:
cmd->addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
}
-int ath12k_wifi7_dp_rx_process_wbm_err(struct ath12k_base *ab,
+int ath12k_wifi7_dp_rx_process_wbm_err(struct ath12k_dp *dp,
struct napi_struct *napi, int budget)
{
struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
struct ath12k *ar;
struct ath12k_pdev_dp *dp_pdev;
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
struct ath12k_hw_group *ag = dp->ag;
struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
struct ath12k_dp *partner_dp;
for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
- srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
+ srng = &hal->srng_list[dp->rx_rel_ring.ring_id];
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
if (!rx_desc)
break;
- ret = ath12k_wifi7_hal_wbm_desc_parse_err(ab, rx_desc,
+ ret = ath12k_wifi7_hal_wbm_desc_parse_err(dp, rx_desc,
&err_info);
if (ret) {
- ath12k_warn(ab,
- "failed to parse rx error in wbm_rel ring desc %d\n",
+ ath12k_warn(ab, "failed to parse rx error in wbm_rel ring desc %d\n",
ret);
continue;
}
/* retry manual desc retrieval if hw cc is not done */
if (!desc_info) {
- desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
+ desc_info = ath12k_dp_get_rx_desc(dp, err_info.cookie);
if (!desc_info) {
ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
err_info.cookie);
continue;
}
- hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_dp->ab,
+ hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_dp->hal,
msdu_data);
if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
dev_kfree_skb_any(msdu);
partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id);
rx_ring = &partner_dp->rx_refill_buf_ring;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring,
+ ath12k_dp_rx_bufs_replenish(dp, rx_ring,
&rx_desc_used_list[device_id],
num_buffs_reaped[device_id]);
}
}
if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) {
- device_id = ar->ab->device_id;
+ device_id = dp_pdev->dp->device_id;
device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++;
}
return ret;
}
-void ath12k_wifi7_dp_rx_process_reo_status(struct ath12k_base *ab)
+void ath12k_wifi7_dp_rx_process_reo_status(struct ath12k_dp *dp)
{
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_hal *hal = dp->hal;
struct hal_tlv_64_hdr *hdr;
struct hal_srng *srng;
struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
u16 tag;
struct hal_reo_status reo_status;
- srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
+ srng = &hal->srng_list[dp->reo_status_ring.ring_id];
memset(&reo_status, 0, sizeof(reo_status));
struct ath12k_hal_reo_cmd;
-int ath12k_wifi7_dp_rx_process_wbm_err(struct ath12k_base *ab,
+int ath12k_wifi7_dp_rx_process_wbm_err(struct ath12k_dp *dp,
struct napi_struct *napi, int budget);
-int ath12k_wifi7_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
+int ath12k_wifi7_dp_rx_process_err(struct ath12k_dp *dp, struct napi_struct *napi,
int budget);
-int ath12k_wifi7_dp_rx_process(struct ath12k_base *ab, int mac_id,
+int ath12k_wifi7_dp_rx_process(struct ath12k_dp *dp, int mac_id,
struct napi_struct *napi,
int budget);
-void ath12k_wifi7_dp_rx_process_reo_status(struct ath12k_base *ab);
+void ath12k_wifi7_dp_rx_process_reo_status(struct ath12k_dp *dp);
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
void ath12k_wifi7_dp_setup_pn_check_reo_cmd(struct ath12k_hal_reo_cmd *cmd,
int ath12k_wifi7_dp_rx_assign_reoq(struct ath12k_base *ab, struct ath12k_dp_peer *dp_peer,
struct ath12k_dp_rx_tid *rx_tid,
u16 ssn, enum hal_pn_type pn_type);
-int ath12k_wifi7_dp_rx_link_desc_return(struct ath12k_base *ab,
+int ath12k_wifi7_dp_rx_link_desc_return(struct ath12k_dp *dp,
struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action);
void ath12k_wifi7_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
u32 ba_win_sz, u16 ssn,
bool update_ssn);
static inline
-void ath12k_wifi7_dp_extract_rx_desc_data(struct ath12k_base *ab,
+void ath12k_wifi7_dp_extract_rx_desc_data(struct ath12k_hal *hal,
struct hal_rx_desc_data *rx_info,
struct hal_rx_desc *rx_desc,
struct hal_rx_desc *ldesc)
{
- ab->hal.ops->extract_rx_desc_data(rx_info, rx_desc, ldesc);
+ hal->ops->extract_rx_desc_data(rx_info, rx_desc, ldesc);
}
#endif
bool is_mcast)
{
struct ath12k_dp *dp = dp_pdev->dp;
+ struct ath12k_hal *hal = dp->hal;
struct ath12k_base *ab = dp->ab;
struct hal_tx_info ti = {};
struct ath12k_tx_desc_info *tx_desc;
bool tcl_ring_retry;
bool msdu_ext_desc = false;
bool add_htt_metadata = false;
- u32 iova_mask = ab->hw_params->iova_mask;
+ u32 iova_mask = dp->hw_params->iova_mask;
bool is_diff_encap = false;
bool is_null_frame = false;
* If all rings are full, we drop the packet.
* TODO: Add throttling logic when all rings are full
*/
- ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
+ ring_selector = dp->hw_params->hw_ops->get_ring_selector(skb);
tcl_ring_sel:
tcl_ring_retry = false;
- ti.ring_id = ring_selector % ab->hw_params->max_tx_ring;
+ ti.ring_id = ring_selector % dp->hw_params->max_tx_ring;
ring_map |= BIT(ti.ring_id);
- ti.rbm_id = ab->hal.tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
+ ti.rbm_id = hal->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
tx_ring = &dp->tx_ring[ti.ring_id];
if (iova_mask &&
(unsigned long)skb->data & iova_mask) {
- ret = ath12k_dp_tx_align_payload(ab, &skb);
+ ret = ath12k_dp_tx_align_payload(dp, &skb);
if (ret) {
ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
/* don't bail out, give original buffer
hdr = (void *)skb->data;
}
map:
- ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ab->dev, ti.paddr)) {
+ ti.paddr = dma_map_single(dp->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dp->dev, ti.paddr)) {
atomic_inc(&dp->device_stats.tx_err.misc_fail);
ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM;
}
}
- ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
+ ti.paddr = dma_map_single(dp->dev, skb_ext_desc->data,
skb_ext_desc->len, DMA_TO_DEVICE);
- ret = dma_mapping_error(ab->dev, ti.paddr);
+ ret = dma_mapping_error(dp->dev, ti.paddr);
if (ret)
goto fail_free_ext_skb;
}
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
- tcl_ring = &ab->hal.srng_list[hal_ring_id];
+ tcl_ring = &hal->srng_list[hal_ring_id];
spin_lock_bh(&tcl_ring->lock);
* checking this ring earlier for each pkt tx.
* Restart ring selection if some rings are not checked yet.
*/
- if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
- ab->hw_params->tcl_ring_retry) {
+ if (ring_map != (BIT(dp->hw_params->max_tx_ring) - 1) &&
+ dp->hw_params->tcl_ring_retry) {
tcl_ring_retry = true;
ring_selector++;
}
fail_unmap_dma_ext:
if (skb_cb->paddr_ext_desc)
- dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ dma_unmap_single(dp->dev, skb_cb->paddr_ext_desc,
skb_ext_desc->len,
DMA_TO_DEVICE);
fail_free_ext_skb:
kfree_skb(skb_ext_desc);
fail_unmap_dma:
- dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+ dma_unmap_single(dp->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
fail_remove_tx_buf:
ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
}
static void
-ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
+ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_dp *dp,
struct ath12k_tx_desc_params *desc_params,
struct dp_tx_ring *tx_ring,
struct ath12k_dp_htt_wbm_tx_status *ts,
u16 peer_id)
{
+ struct ath12k_base *ab = dp->ab;
struct ieee80211_tx_info *info;
struct ath12k_link_vif *arvif;
struct ath12k_skb_cb *skb_cb;
struct ieee80211_vif *vif;
struct ath12k_vif *ahvif;
- struct ath12k *ar;
struct sk_buff *msdu = desc_params->skb;
s32 noise_floor;
struct ieee80211_tx_status status = {};
struct ath12k_dp_link_peer *peer;
struct ath12k_pdev_dp *dp_pdev;
+ u8 pdev_id;
skb_cb = ATH12K_SKB_CB(msdu);
info = IEEE80211_SKB_CB(msdu);
- ar = skb_cb->ar;
- dp_pdev = &ar->dp;
- ab->dp->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(dp->hw_params, desc_params->mac_id);
- if (atomic_dec_and_test(&ar->dp.num_tx_pending))
- wake_up(&ar->dp.tx_empty_waitq);
+ rcu_read_lock();
+ dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_id);
+ if (!dp_pdev) {
+ rcu_read_unlock();
+ return;
+ }
- dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ dp->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
+
+ if (atomic_dec_and_test(&dp_pdev->num_tx_pending))
+ wake_up(&dp_pdev->tx_empty_waitq);
+
+ dma_unmap_single(dp->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc) {
- dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ dma_unmap_single(dp->dev, skb_cb->paddr_ext_desc,
desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
dev_kfree_skb_any(desc_params->skb_ext_desc);
}
vif = skb_cb->vif;
if (vif) {
ahvif = ath12k_vif_to_ahvif(vif);
- rcu_read_lock();
arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
if (arvif) {
spin_lock_bh(&arvif->link_stats_lock);
arvif->link_stats.tx_completed++;
spin_unlock_bh(&arvif->link_stats_lock);
}
- rcu_read_unlock();
}
memset(&info->status, 0, sizeof(info->status));
if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
ab->wmi_ab.svc_map)) {
+ struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
+
spin_lock_bh(&ar->data_lock);
noise_floor = ath12k_pdev_get_noise_floor(ar);
spin_unlock_bh(&ar->data_lock);
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
}
}
- rcu_read_lock();
+
peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, peer_id);
if (!peer || !peer->sta) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
"dp_tx: failed to find the peer with peer_id %d\n", peer_id);
- ieee80211_free_txskb(ath12k_ar_to_hw(ar), msdu);
+ ieee80211_free_txskb(ath12k_pdev_dp_to_hw(dp_pdev), msdu);
goto exit;
} else {
status.sta = peer->sta;
status.info = info;
status.skb = msdu;
- ieee80211_tx_status_ext(ath12k_ar_to_hw(ar), &status);
+ ieee80211_tx_status_ext(ath12k_pdev_dp_to_hw(dp_pdev), &status);
exit:
rcu_read_unlock();
}
static void
-ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc,
+ath12k_dp_tx_process_htt_tx_complete(struct ath12k_dp *dp, void *desc,
struct dp_tx_ring *tx_ring,
struct ath12k_tx_desc_params *desc_params)
{
struct htt_tx_wbm_completion *status_desc;
struct ath12k_dp_htt_wbm_tx_status ts = {};
enum hal_wbm_htt_tx_comp_status wbm_status;
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
u16 peer_id;
status_desc = desc;
peer_id = le32_get_bits(((struct hal_wbm_completion_ring_tx *)desc)->
info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
- ath12k_dp_tx_htt_tx_complete_buf(ab, desc_params, tx_ring, &ts, peer_id);
+ ath12k_dp_tx_htt_tx_complete_buf(dp, desc_params, tx_ring, &ts, peer_id);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
- ath12k_dp_tx_free_txbuf(ab, tx_ring, desc_params);
+ ath12k_dp_tx_free_txbuf(dp, tx_ring, desc_params);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
/* This event is to be handled only when the driver decides to
*/
break;
default:
- ath12k_warn(ab, "Unknown htt wbm tx status %d\n", wbm_status);
+ ath12k_warn(dp->ab, "Unknown htt wbm tx status %d\n", wbm_status);
break;
}
}
{
struct ath12k_dp *dp = dp_pdev->dp;
struct ath12k_dp_link_peer *peer;
- struct ath12k_base *ab = dp->ab;
struct ath12k_link_sta *arsta;
struct rate_info txrate = {};
struct ieee80211_sta *sta;
peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, ts->peer_id);
if (!peer || !peer->sta) {
- ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ ath12k_dbg(dp->ab, ATH12K_DBG_DP_TX,
"failed to find the peer by id %u\n", ts->peer_id);
return;
}
&rate_idx,
&rate);
if (ret < 0) {
- ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret);
+ ath12k_warn(dp->ab, "Invalid tx legacy rate %d\n", ret);
return;
}
break;
case HAL_TX_RATE_STATS_PKT_TYPE_11N:
if (ts->mcs > ATH12K_HT_MCS_MAX) {
- ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
+ ath12k_warn(dp->ab, "Invalid HT mcs index %d\n", ts->mcs);
return;
}
break;
case HAL_TX_RATE_STATS_PKT_TYPE_11AC:
if (ts->mcs > ATH12K_VHT_MCS_MAX) {
- ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
+ ath12k_warn(dp->ab, "Invalid VHT mcs index %d\n", ts->mcs);
return;
}
break;
case HAL_TX_RATE_STATS_PKT_TYPE_11AX:
if (ts->mcs > ATH12K_HE_MCS_MAX) {
- ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs);
+ ath12k_warn(dp->ab, "Invalid HE mcs index %d\n", ts->mcs);
return;
}
break;
case HAL_TX_RATE_STATS_PKT_TYPE_11BE:
if (ts->mcs > ATH12K_EHT_MCS_MAX) {
- ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs);
+ ath12k_warn(dp->ab, "Invalid EHT mcs index %d\n", ts->mcs);
return;
}
txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi);
break;
default:
- ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
+ ath12k_warn(dp->ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
return;
}
skb_cb = ATH12K_SKB_CB(msdu);
dp->device_stats.tx_completed[ring]++;
- dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ dma_unmap_single(dp->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc) {
- dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ dma_unmap_single(dp->dev, skb_cb->paddr_ext_desc,
desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
dev_kfree_skb_any(desc_params->skb_ext_desc);
}
}
static void
-ath12k_wifi7_dp_tx_status_parse(struct ath12k_base *ab,
+ath12k_wifi7_dp_tx_status_parse(struct ath12k_dp *dp,
struct hal_wbm_completion_ring_tx *desc,
struct hal_tx_status *ts)
{
}
}
-void ath12k_wifi7_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
+void ath12k_wifi7_dp_tx_completion_handler(struct ath12k_dp *dp, int ring_id)
{
+ struct ath12k_base *ab = dp->ab;
struct ath12k_pdev_dp *dp_pdev;
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
- struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
+ struct hal_srng *status_ring = &dp->hal->srng_list[hal_ring_id];
struct ath12k_tx_desc_info *tx_desc = NULL;
struct hal_tx_status ts = {};
struct ath12k_tx_desc_params desc_params;
tx_ring->tx_status_tail =
ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_tail);
tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
- ath12k_wifi7_dp_tx_status_parse(ab, tx_status, &ts);
+ ath12k_wifi7_dp_tx_status_parse(dp, tx_status, &ts);
if (le32_get_bits(tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) {
/* HW done cookie conversion */
desc_id = le32_get_bits(tx_status->buf_va_hi,
BUFFER_ADDR_INFO1_SW_COOKIE);
- tx_desc = ath12k_dp_get_tx_desc(ab, desc_id);
+ tx_desc = ath12k_dp_get_tx_desc(dp, desc_id);
}
if (!tx_desc) {
ath12k_warn(ab, "unable to retrieve tx_desc!");
*/
ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
- ath12k_dp_tx_process_htt_tx_complete(ab, (void *)tx_status,
+ ath12k_dp_tx_process_htt_tx_complete(dp, (void *)tx_status,
tx_ring, &desc_params);
continue;
}
- pdev_idx = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params.mac_id);
+ pdev_idx = ath12k_hw_mac_id_to_pdev_id(dp->hw_params, desc_params.mac_id);
rcu_read_lock();
int ath12k_wifi7_dp_tx(struct ath12k_pdev_dp *dp_pdev, struct ath12k_link_vif *arvif,
struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
bool is_mcast);
-void ath12k_wifi7_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
+void ath12k_wifi7_dp_tx_completion_handler(struct ath12k_dp *dp, int ring_id);
u32 ath12k_wifi7_dp_tx_get_vdev_bank_config(struct ath12k_base *ab,
struct ath12k_link_vif *arvif);
#endif
}
}
-int ath12k_wifi7_hal_desc_reo_parse_err(struct ath12k_base *ab,
+int ath12k_wifi7_hal_desc_reo_parse_err(struct ath12k_dp *dp,
struct hal_reo_dest_ring *desc,
dma_addr_t *paddr, u32 *desc_bank)
{
+ struct ath12k_base *ab = dp->ab;
enum hal_reo_dest_ring_push_reason push_reason;
enum hal_reo_dest_ring_error_code err_code;
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
u32 cookie, val;
push_reason = le32_get_bits(desc->info0,
return -EINVAL;
}
- ath12k_wifi7_hal_rx_reo_ent_paddr_get(ab, &desc->buf_addr_info, paddr,
+ ath12k_wifi7_hal_rx_reo_ent_paddr_get(&desc->buf_addr_info, paddr,
&cookie);
*desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
return 0;
}
-int ath12k_wifi7_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
+int ath12k_wifi7_hal_wbm_desc_parse_err(struct ath12k_dp *dp, void *desc,
struct hal_rx_wbm_rel_info *rel_info)
{
struct hal_wbm_release_ring *wbm_desc = desc;
struct hal_wbm_release_ring_cc_rx *wbm_cc_desc = desc;
- struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
enum hal_wbm_rel_desc_type type;
enum hal_wbm_rel_src_module rel_src;
bool hw_cc_done;
return 0;
}
-void ath12k_wifi7_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
- struct ath12k_buffer_addr *buff_addr,
+void ath12k_wifi7_hal_rx_reo_ent_paddr_get(struct ath12k_buffer_addr *buff_addr,
dma_addr_t *paddr, u32 *cookie)
{
*paddr = ((u64)(le32_get_bits(buff_addr->info1,
void ath12k_wifi7_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo,
dma_addr_t *paddr,
u32 *cookie, u8 *rbm);
-int ath12k_wifi7_hal_desc_reo_parse_err(struct ath12k_base *ab,
+int ath12k_wifi7_hal_desc_reo_parse_err(struct ath12k_dp *dp,
struct hal_reo_dest_ring *desc,
dma_addr_t *paddr, u32 *desc_bank);
-int ath12k_wifi7_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
+int ath12k_wifi7_hal_wbm_desc_parse_err(struct ath12k_dp *dp, void *desc,
struct hal_rx_wbm_rel_info *rel_info);
-void ath12k_wifi7_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
- struct ath12k_buffer_addr *buff_addr,
+void ath12k_wifi7_hal_rx_reo_ent_paddr_get(struct ath12k_buffer_addr *buff_addr,
dma_addr_t *paddr, u32 *cookie);
void ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
u32 *sw_cookie,