#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
-#define ATH12K_MAX_SOCS 3
-#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS)
+#define ATH12K_MAX_DEVICES 3
+#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_DEVICES * MAX_RADIOS)
#define ATH12K_INVALID_GROUP_ID 0xFF
#define ATH12K_INVALID_DEVICE_ID 0xFF
size_t len;
};
-struct ath12k_soc_dp_tx_err_stats {
+struct ath12k_device_dp_tx_err_stats {
/* TCL Ring Descriptor unavailable */
u32 desc_na[DP_TCL_NUM_RING_MAX];
/* Other failures during dp_tx due to mem allocation failure
atomic_t misc_fail;
};
-struct ath12k_soc_dp_stats {
+struct ath12k_device_dp_stats {
u32 err_ring_pkts;
u32 invalid_rbm;
u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
u32 hal_reo_error[DP_REO_DST_RING_MAX];
- struct ath12k_soc_dp_tx_err_stats tx_err;
+ struct ath12k_device_dp_tx_err_stats tx_err;
};
struct ath12k_reg_freq {
u8 num_probed;
u8 num_started;
unsigned long flags;
- struct ath12k_base *ab[ATH12K_MAX_SOCS];
+ struct ath12k_base *ab[ATH12K_MAX_DEVICES];
/* protects access to this struct */
struct mutex mutex;
struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
u8 num_hw;
bool mlo_capable;
- struct device_node *wsi_node[ATH12K_MAX_SOCS];
+ struct device_node *wsi_node[ATH12K_MAX_DEVICES];
struct ath12k_mlo_memory mlo_mem;
struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO];
bool hw_link_id_init_done;
/* Current DFS Regulatory */
enum ath12k_dfs_region dfs_region;
- struct ath12k_soc_dp_stats soc_stats;
+ struct ath12k_device_dp_stats device_stats;
#ifdef CONFIG_ATH12K_DEBUGFS
struct dentry *debugfs_soc;
#endif
if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
return true;
- ab->soc_stats.invalid_rbm++;
+ ab->device_stats.invalid_rbm++;
WARN_ON_ONCE(1);
return false;
}
struct napi_struct *napi, int budget)
{
struct ath12k_hw_group *ag = ab->ag;
- struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
struct ath12k_hw_link *hw_links = ag->hw_links;
- int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
__skb_queue_head_init(&msdu_list);
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
- ab->soc_stats.hal_reo_error[ring_id]++;
+ ab->device_stats.hal_reo_error[ring_id]++;
continue;
}
if (!total_msdu_reaped)
goto exit;
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
int budget)
{
struct ath12k_hw_group *ag = ab->ag;
- struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
- int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
struct hal_rx_msdu_link *link_desc_va;
tot_n_bufs_reaped = 0;
quota = budget;
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
reo_except = &ab->dp.reo_except_ring;
while (budget &&
(reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
drop = false;
- ab->soc_stats.err_ring_pkts++;
+ ab->device_stats.err_ring_pkts++;
ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
&desc_bank);
if (rbm != partner_ab->dp.idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
- ab->soc_stats.invalid_rbm++;
+ ab->device_stats.invalid_rbm++;
ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
ath12k_dp_rx_link_desc_return(partner_ab,
&reo_desc->buf_addr_info,
spin_unlock_bh(&srng->lock);
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
bool drop = false;
- ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+ ar->ab->device_stats.reo_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
bool drop = false;
u32 err_bitmap;
- ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+ ar->ab->device_stats.rxdma_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
- struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
struct sk_buff_head msdu_list, scatter_msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
- int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
int total_num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
struct ath12k_hw_link *hw_links = ag->hw_links;
__skb_queue_head_init(&msdu_list);
__skb_queue_head_init(&scatter_msdu_list);
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
if (!total_num_buffs_reaped)
goto done;
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;