From: Jeff Johnson Date: Mon, 15 Dec 2025 16:55:21 +0000 (-0800) Subject: Merge branch 'ath12k-ng' into ath-next X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=631ee338f04db713ba611883f28e94157ebb68e5;p=thirdparty%2Fkernel%2Flinux.git Merge branch 'ath12k-ng' into ath-next As originally proposed in [1], the ath12k driver was re-architected in the ath12k-ng branch to separate the logic specific to 802.11be (Wi-Fi 7) from the core logic. This separation will allow ath12k to also support 802.11bn (Wi-Fi 8) in the future. Now merge this into ath-next. Many thanks to everyone who worked on this re-architecture. Special thanks to Vasanthakumar Thiagarajan and Baochen Qiang who reviewed every patch, and to Ripan Deuri for the ath12k-ng => ath-next merge conflict resolution. Link: https://lore.kernel.org/all/4a17d730-ede8-463e-98d8-9b0291d0ca45@oss.qualcomm.com/ # [1] Signed-off-by: Jeff Johnson --- 631ee338f04db713ba611883f28e94157ebb68e5 diff --cc drivers/net/wireless/ath/ath12k/ce.h index 57f75899ee03d,38f986ea1cd2b..df4f2a4f84809 --- a/drivers/net/wireless/ath/ath12k/ce.h +++ b/drivers/net/wireless/ath/ath12k/ce.h @@@ -1,7 -1,7 +1,7 @@@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. -- * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. ++ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #ifndef ATH12K_CE_H diff --cc drivers/net/wireless/ath/ath12k/debugfs.c index d6a86f075d73b,ea2282a82006c..358031fa14ebb --- a/drivers/net/wireless/ath/ath12k/debugfs.c +++ b/drivers/net/wireless/ath/ath12k/debugfs.c @@@ -1155,6 -1157,6 +1157,7 @@@ static ssize_t ath12k_debugfs_dump_devi len += scnprintf(buf + len, size - len, "\n"); ++ rcu_read_lock(); for (i = 0; i < ab->num_radios; i++) { ar = ath12k_mac_get_ar_by_pdev_id(ab, DP_SW2HW_MACID(i)); if (ar) { @@@ -1163,6 -1165,6 +1166,7 @@@ atomic_read(&ar->dp.num_tx_pending)); } } ++ rcu_read_unlock(); len += scnprintf(buf + len, size - len, "\nREO Rx Received:\n"); diff --cc drivers/net/wireless/ath/ath12k/dp.c index 4a54b8c353111,bb3b65dea02f6..9f05eea6695a5 --- a/drivers/net/wireless/ath/ath12k/dp.c +++ b/drivers/net/wireless/ath/ath12k/dp.c @@@ -1745,12 -1482,22 +1482,24 @@@ static int ath12k_dp_setup(struct ath12 INIT_LIST_HEAD(&dp->reo_cmd_list); INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list); + INIT_LIST_HEAD(&dp->reo_cmd_update_rx_queue_list); spin_lock_init(&dp->reo_cmd_lock); + spin_lock_init(&dp->reo_rxq_flush_lock); + spin_lock_init(&dp->dp_lock); + INIT_LIST_HEAD(&dp->peers); + + mutex_init(&dp->link_peer_rhash_tbl_lock); + dp->reo_cmd_cache_flush_count = 0; - dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab); + dp->idle_link_rbm = + ath12k_hal_get_idle_link_rbm(&ab->hal, ab->device_id); + + ret = ath12k_dp_link_peer_rhash_tbl_init(dp); + if (ret) { + ath12k_warn(ab, "failed to init link_peer rhash table: %d\n", ret); + return ret; + } ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc); if (ret) { diff --cc drivers/net/wireless/ath/ath12k/dp.h index 4ffec6ad7d8d9,4bb6457b0d819..f8cfc7bb29dd7 --- a/drivers/net/wireless/ath/ath12k/dp.h +++ b/drivers/net/wireless/ath/ath12k/dp.h @@@ -18,8 -19,8 +19,9 @@@ struct ath12k_dp_link_peer struct ath12k_dp; struct ath12k_vif; struct ath12k_link_vif; - struct hal_tcl_status_ring; struct ath12k_ext_irq_grp; + struct ath12k_dp_rx_tid; ++struct ath12k_dp_rx_tid_rxq; #define DP_MON_PURGE_TIMEOUT_MS 100 #define DP_MON_SERVICE_BUDGET 128 @@@ -360,9 -377,73 +378,77 @@@ struct ath12k_link_stats u32 tx_completed; u32 tx_bcast_mcast; u32 tx_dropped; - u32 tx_encap_type[HAL_TCL_ENCAP_TYPE_MAX]; - u32 tx_encrypt_type[HAL_ENCRYPT_TYPE_MAX]; - u32 tx_desc_type[HAL_TCL_DESC_TYPE_MAX]; + u32 tx_encap_type[DP_ENCAP_TYPE_MAX]; + u32 tx_encrypt_type[DP_ENCRYPT_TYPE_MAX]; + u32 tx_desc_type[DP_DESC_TYPE_MAX]; + }; + + /* DP arch ops to communicate from common module + * to arch specific module + */ + struct ath12k_dp_arch_ops { + int (*service_srng)(struct ath12k_dp *dp, + struct ath12k_ext_irq_grp *irq_grp, + int budget); + u32 (*tx_get_vdev_bank_config)(struct ath12k_base *ab, + struct ath12k_link_vif *arvif); + int (*reo_cmd_send)(struct ath12k_base *ab, - struct ath12k_dp_rx_tid *rx_tid, ++ struct ath12k_dp_rx_tid_rxq *rx_tid, + enum hal_reo_cmd_type type, + struct ath12k_hal_reo_cmd *cmd, + void (*cb)(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status)); + void (*setup_pn_check_reo_cmd)(struct ath12k_hal_reo_cmd *cmd, + struct ath12k_dp_rx_tid *rx_tid, + u32 cipher, enum set_key_cmd key_cmd); + void (*rx_peer_tid_delete)(struct ath12k_base *ab, + struct ath12k_dp_link_peer *peer, u8 tid); - void (*reo_cache_flush)(struct ath12k_base *ab, - struct ath12k_dp_rx_tid *rx_tid); ++ int (*reo_cache_flush)(struct ath12k_base *ab, ++ struct ath12k_dp_rx_tid_rxq *rx_tid); + int (*rx_link_desc_return)(struct ath12k_dp *dp, + struct ath12k_buffer_addr *buf_addr_info, + enum hal_wbm_rel_bm_act action); + void (*rx_frags_cleanup)(struct ath12k_dp_rx_tid *rx_tid, + bool rel_link_desc); + int (*peer_rx_tid_reo_update)(struct ath12k_dp *dp, + struct ath12k_dp_link_peer *peer, + struct ath12k_dp_rx_tid *rx_tid, + u32 ba_win_sz, u16 ssn, + bool update_ssn); + int (*rx_assign_reoq)(struct ath12k_base *ab, struct ath12k_dp_peer *dp_peer, + struct ath12k_dp_rx_tid *rx_tid, + u16 ssn, enum hal_pn_type pn_type); + void (*peer_rx_tid_qref_setup)(struct ath12k_base *ab, u16 peer_id, u16 tid, + dma_addr_t paddr); ++ void (*peer_rx_tid_qref_reset)(struct ath12k_base *ab, u16 peer_id, u16 tid); ++ int (*rx_tid_delete_handler)(struct ath12k_base *ab, ++ struct ath12k_dp_rx_tid_rxq *rx_tid); + }; + + struct ath12k_device_dp_tx_err_stats { + /* TCL Ring Descriptor unavailable */ + u32 desc_na[DP_TCL_NUM_RING_MAX]; + /* Other failures during dp_tx due to mem allocation failure + * idr unavailable etc. + */ + atomic_t misc_fail; + }; + + struct ath12k_device_dp_stats { + u32 err_ring_pkts; + u32 invalid_rbm; + u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX]; + u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX]; + u32 hal_reo_error[DP_REO_DST_RING_MAX]; + struct ath12k_device_dp_tx_err_stats tx_err; + u32 reo_rx[DP_REO_DST_RING_MAX][ATH12K_MAX_DEVICES]; + u32 rx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX][ATH12K_MAX_DEVICES]; + u32 tqm_rel_reason[MAX_TQM_RELEASE_REASON]; + u32 fw_tx_status[MAX_FW_TX_STATUS]; + u32 tx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX]; + u32 tx_enqueued[DP_TCL_NUM_RING_MAX]; + u32 tx_completed[DP_TCL_NUM_RING_MAX]; ++ u32 reo_excep_msdu_buf_type; }; struct ath12k_dp { @@@ -427,1513 -504,124 +513,138 @@@ struct dp_rxdma_mon_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV]; struct ath12k_reo_q_addr_lut reoq_lut; struct ath12k_reo_q_addr_lut ml_reoq_lut; - }; - - /* HTT definitions */ - #define HTT_TAG_TCL_METADATA_VERSION 5 - - #define HTT_TCL_META_DATA_TYPE GENMASK(1, 0) - #define HTT_TCL_META_DATA_VALID_HTT BIT(2) - - /* vdev meta data */ - #define HTT_TCL_META_DATA_VDEV_ID GENMASK(10, 3) - #define HTT_TCL_META_DATA_PDEV_ID GENMASK(12, 11) - #define HTT_TCL_META_DATA_HOST_INSPECTED_MISSION BIT(13) - - /* peer meta data */ - #define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 3) - - /* Global sequence number */ - #define HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM 3 - #define HTT_TCL_META_DATA_GLOBAL_SEQ_HOST_INSPECTED BIT(2) - #define HTT_TCL_META_DATA_GLOBAL_SEQ_NUM GENMASK(14, 3) - #define HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID 128 - - /* HTT tx completion is overlaid in wbm_release_ring */ - #define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13) - #define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0) - #define HTT_TX_WBM_COMP_INFO1_EXCEPTION_FRAME BIT(4) - - #define HTT_TX_WBM_COMP_INFO2_ACK_RSSI GENMASK(31, 24) - - struct htt_tx_wbm_completion { - __le32 rsvd0[2]; - __le32 info0; - __le32 info1; - __le32 info2; - __le32 info3; - __le32 info4; - __le32 rsvd1; - - } __packed; - - enum htt_h2t_msg_type { - HTT_H2T_MSG_TYPE_VERSION_REQ = 0, - HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb, - HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc, - HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10, - HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11, - HTT_H2T_MSG_TYPE_VDEV_TXRX_STATS_CFG = 0x1a, - HTT_H2T_MSG_TYPE_TX_MONITOR_CFG = 0x1b, - }; - - #define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0) - #define HTT_OPTION_TCL_METADATA_VER_V1 1 - #define HTT_OPTION_TCL_METADATA_VER_V2 2 - #define HTT_OPTION_TAG GENMASK(7, 0) - #define HTT_OPTION_LEN GENMASK(15, 8) - #define HTT_OPTION_VALUE GENMASK(31, 16) - #define HTT_TCL_METADATA_VER_SZ 4 - - struct htt_ver_req_cmd { - __le32 ver_reg_info; - __le32 tcl_metadata_version; - } __packed; - - enum htt_srng_ring_type { - HTT_HW_TO_SW_RING, - HTT_SW_TO_HW_RING, - HTT_SW_TO_SW_RING, - }; - - enum htt_srng_ring_id { - HTT_RXDMA_HOST_BUF_RING, - HTT_RXDMA_MONITOR_STATUS_RING, - HTT_RXDMA_MONITOR_BUF_RING, - HTT_RXDMA_MONITOR_DESC_RING, - HTT_RXDMA_MONITOR_DEST_RING, - HTT_HOST1_TO_FW_RXBUF_RING, - HTT_HOST2_TO_FW_RXBUF_RING, - HTT_RXDMA_NON_MONITOR_DEST_RING, - HTT_RXDMA_HOST_BUF_RING2, - HTT_TX_MON_HOST2MON_BUF_RING, - HTT_TX_MON_MON2HOST_DEST_RING, - HTT_RX_MON_HOST2MON_BUF_RING, - HTT_RX_MON_MON2HOST_DEST_RING, - }; - - /* host -> target HTT_SRING_SETUP message - * - * After target is booted up, Host can send SRING setup message for - * each host facing LMAC SRING. Target setups up HW registers based - * on setup message and confirms back to Host if response_required is set. - * Host should wait for confirmation message before sending new SRING - * setup message - * - * The message would appear as follows: - * - * |31 24|23 20|19|18 16|15|14 8|7 0| - * |--------------- +-----------------+----------------+------------------| - * | ring_type | ring_id | pdev_id | msg_type | - * |----------------------------------------------------------------------| - * | ring_base_addr_lo | - * |----------------------------------------------------------------------| - * | ring_base_addr_hi | - * |----------------------------------------------------------------------| - * |ring_misc_cfg_flag|ring_entry_size| ring_size | - * |----------------------------------------------------------------------| - * | ring_head_offset32_remote_addr_lo | - * |----------------------------------------------------------------------| - * | ring_head_offset32_remote_addr_hi | - * |----------------------------------------------------------------------| - * | ring_tail_offset32_remote_addr_lo | - * |----------------------------------------------------------------------| - * | ring_tail_offset32_remote_addr_hi | - * |----------------------------------------------------------------------| - * | ring_msi_addr_lo | - * |----------------------------------------------------------------------| - * | ring_msi_addr_hi | - * |----------------------------------------------------------------------| - * | ring_msi_data | - * |----------------------------------------------------------------------| - * | intr_timer_th |IM| intr_batch_counter_th | - * |----------------------------------------------------------------------| - * | reserved |RR|PTCF| intr_low_threshold | - * |----------------------------------------------------------------------| - * Where - * IM = sw_intr_mode - * RR = response_required - * PTCF = prefetch_timer_cfg - * - * The message is interpreted as follows: - * dword0 - b'0:7 - msg_type: This will be set to - * HTT_H2T_MSG_TYPE_SRING_SETUP - * b'8:15 - pdev_id: - * 0 (for rings at SOC/UMAC level), - * 1/2/3 mac id (for rings at LMAC level) - * b'16:23 - ring_id: identify which ring is to setup, - * more details can be got from enum htt_srng_ring_id - * b'24:31 - ring_type: identify type of host rings, - * more details can be got from enum htt_srng_ring_type - * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address - * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address - * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words - * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units - * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and - * SW_TO_HW_RING. - * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs. - * dword4 - b'0:31 - ring_head_off32_remote_addr_lo: - * Lower 32 bits of memory address of the remote variable - * storing the 4-byte word offset that identifies the head - * element within the ring. - * (The head offset variable has type u32.) - * Valid for HW_TO_SW and SW_TO_SW rings. - * dword5 - b'0:31 - ring_head_off32_remote_addr_hi: - * Upper 32 bits of memory address of the remote variable - * storing the 4-byte word offset that identifies the head - * element within the ring. - * (The head offset variable has type u32.) - * Valid for HW_TO_SW and SW_TO_SW rings. - * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo: - * Lower 32 bits of memory address of the remote variable - * storing the 4-byte word offset that identifies the tail - * element within the ring. - * (The tail offset variable has type u32.) - * Valid for HW_TO_SW and SW_TO_SW rings. - * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi: - * Upper 32 bits of memory address of the remote variable - * storing the 4-byte word offset that identifies the tail - * element within the ring. - * (The tail offset variable has type u32.) - * Valid for HW_TO_SW and SW_TO_SW rings. - * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address - * valid only for HW_TO_SW_RING and SW_TO_HW_RING - * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address - * valid only for HW_TO_SW_RING and SW_TO_HW_RING - * dword10 - b'0:31 - ring_msi_data: MSI data - * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs - * valid only for HW_TO_SW_RING and SW_TO_HW_RING - * dword11 - b'0:14 - intr_batch_counter_th: - * batch counter threshold is in units of 4-byte words. - * HW internally maintains and increments batch count. - * (see SRING spec for detail description). - * When batch count reaches threshold value, an interrupt - * is generated by HW. - * b'15 - sw_intr_mode: - * This configuration shall be static. - * Only programmed at power up. - * 0: generate pulse style sw interrupts - * 1: generate level style sw interrupts - * b'16:31 - intr_timer_th: - * The timer init value when timer is idle or is - * initialized to start downcounting. - * In 8us units (to cover a range of 0 to 524 ms) - * dword12 - b'0:15 - intr_low_threshold: - * Used only by Consumer ring to generate ring_sw_int_p. - * Ring entries low threshold water mark, that is used - * in combination with the interrupt timer as well as - * the clearing of the level interrupt. - * b'16:18 - prefetch_timer_cfg: - * Used only by Consumer ring to set timer mode to - * support Application prefetch handling. - * The external tail offset/pointer will be updated - * at following intervals: - * 3'b000: (Prefetch feature disabled; used only for debug) - * 3'b001: 1 usec - * 3'b010: 4 usec - * 3'b011: 8 usec (default) - * 3'b100: 16 usec - * Others: Reserved - * b'19 - response_required: - * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response - * b'20:31 - reserved: reserved for future use - */ - - #define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE GENMASK(7, 0) - #define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID GENMASK(15, 8) - #define HTT_SRNG_SETUP_CMD_INFO0_RING_ID GENMASK(23, 16) - #define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE GENMASK(31, 24) - - #define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE GENMASK(15, 0) - #define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE GENMASK(23, 16) - #define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS BIT(25) - #define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP BIT(27) - #define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP BIT(28) - #define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP BIT(29) - - #define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH GENMASK(14, 0) - #define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE BIT(15) - #define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH GENMASK(31, 16) - - #define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH GENMASK(15, 0) - #define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG GENMASK(18, 16) - #define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED BIT(19) - - struct htt_srng_setup_cmd { - __le32 info0; - __le32 ring_base_addr_lo; - __le32 ring_base_addr_hi; - __le32 info1; - __le32 ring_head_off32_remote_addr_lo; - __le32 ring_head_off32_remote_addr_hi; - __le32 ring_tail_off32_remote_addr_lo; - __le32 ring_tail_off32_remote_addr_hi; - __le32 ring_msi_addr_lo; - __le32 ring_msi_addr_hi; - __le32 msi_data; - __le32 intr_info; - __le32 info2; - } __packed; - - /* host -> target FW PPDU_STATS config message - * - * @details - * The following field definitions describe the format of the HTT host - * to target FW for PPDU_STATS_CFG msg. - * The message allows the host to configure the PPDU_STATS_IND messages - * produced by the target. - * - * |31 24|23 16|15 8|7 0| - * |-----------------------------------------------------------| - * | REQ bit mask | pdev_mask | msg type | - * |-----------------------------------------------------------| - * Header fields: - * - MSG_TYPE - * Bits 7:0 - * Purpose: identifies this is a req to configure ppdu_stats_ind from target - * Value: 0x11 - * - PDEV_MASK - * Bits 8:15 - * Purpose: identifies which pdevs this PPDU stats configuration applies to - * Value: This is a overloaded field, refer to usage and interpretation of - * PDEV in interface document. - * Bit 8 : Reserved for SOC stats - * Bit 9 - 15 : Indicates PDEV_MASK in DBDC - * Indicates MACID_MASK in DBS - * - REQ_TLV_BIT_MASK - * Bits 16:31 - * Purpose: each set bit indicates the corresponding PPDU stats TLV type - * needs to be included in the target's PPDU_STATS_IND messages. - * Value: refer htt_ppdu_stats_tlv_tag_t << 30 bits - * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs - * dword3 - b'0:31 - packet_type_enable_flags_1: - * Enable MGMT packet from 0b1010 to 0b1111 - * bits from low to high: FP, MD, MO - 3 bits - * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs - * dword4 - b'0:31 - packet_type_enable_flags_2: - * Enable CTRL packet from 0b0000 to 0b1001 - * bits from low to high: FP, MD, MO - 3 bits - * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs - * dword5 - b'0:31 - packet_type_enable_flags_3: - * Enable CTRL packet from 0b1010 to 0b1111, - * MCAST_DATA, UCAST_DATA, NULL_DATA - * bits from low to high: FP, MD, MO - 3 bits - * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs - * dword6 - b'0:31 - tlv_filter_in_flags: - * Filter in Attention/MPDU/PPDU/Header/User tlvs - * Refer to CFG_TLV_FILTER_IN_FLAG defs - */ - - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID BIT(26) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL BIT(27) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON BIT(28) - - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(18, 16) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(21, 19) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(24, 22) - - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD GENMASK(9, 0) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE BIT(17) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE BIT(18) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE BIT(19) - - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET BIT(0) - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET GENMASK(14, 1) - - #define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET GENMASK(15, 0) - #define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET GENMASK(31, 16) - #define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET GENMASK(15, 0) - #define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET GENMASK(31, 16) - #define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET GENMASK(15, 0) - #define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16) - #define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0) - - #define HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET BIT(23) - #define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK GENMASK(15, 0) - #define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK GENMASK(18, 16) - #define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK GENMASK(16, 0) - - enum htt_rx_filter_tlv_flags { - HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0), - HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1), - HTT_RX_FILTER_TLV_FLAGS_RX_PACKET = BIT(2), - HTT_RX_FILTER_TLV_FLAGS_MSDU_END = BIT(3), - HTT_RX_FILTER_TLV_FLAGS_MPDU_END = BIT(4), - HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER = BIT(5), - HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER = BIT(6), - HTT_RX_FILTER_TLV_FLAGS_ATTENTION = BIT(7), - HTT_RX_FILTER_TLV_FLAGS_PPDU_START = BIT(8), - HTT_RX_FILTER_TLV_FLAGS_PPDU_END = BIT(9), - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10), - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11), - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12), - HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO = BIT(13), - }; - - enum htt_rx_mgmt_pkt_filter_tlv_flags0 { - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(0), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(1), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(2), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(3), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(4), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(5), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(6), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(7), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(8), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(9), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(10), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(11), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(12), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(13), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(14), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(15), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(16), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(17), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(18), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(19), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(20), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(21), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(22), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(23), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(24), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(25), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(26), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(27), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(28), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(29), - }; - - enum htt_rx_mgmt_pkt_filter_tlv_flags1 { - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(0), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(1), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(2), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(3), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(4), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(5), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(6), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(7), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(8), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(9), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(10), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(11), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(12), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(13), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(14), - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(15), - HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(16), - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(17), - }; - - enum htt_rx_ctrl_pkt_filter_tlv_flags2 { - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(0), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(1), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(2), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(3), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(4), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(5), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(6), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(7), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(8), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(9), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(10), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(11), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(12), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(13), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(14), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(15), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(16), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(17), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(18), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(19), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(20), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(21), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(22), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(23), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(24), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(25), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(26), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(27), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(28), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(29), - }; - - enum htt_rx_ctrl_pkt_filter_tlv_flags3 { - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(0), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(1), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(2), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(3), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(4), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(5), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(6), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(7), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(8), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(9), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(10), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(11), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(12), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(13), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(14), - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(15), - HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(16), - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(17), - }; - - enum htt_rx_data_pkt_filter_tlv_flasg3 { - HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(18), - HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(19), - HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(20), - HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(21), - HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(22), - HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(23), - HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(24), - HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(25), - HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(26), - }; - - #define HTT_RX_FP_MGMT_FILTER_FLAGS0 \ - (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM) - - #define HTT_RX_MD_MGMT_FILTER_FLAGS0 \ - (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM) - - #define HTT_RX_MO_MGMT_FILTER_FLAGS0 \ - (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM) - - #define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \ - | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK) - - #define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \ - | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK) - - #define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \ - | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK) - - #define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \ - | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \ - | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA) - - #define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \ - | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \ - | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA) - - #define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \ - | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \ - | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA) - - #define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \ - | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \ - | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \ - | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \ - | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \ - | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK) - - #define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \ - | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \ - | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \ - | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \ - | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \ - | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK) - - #define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \ - | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \ - | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \ - | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \ - | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \ - | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK) - - #define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \ - | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \ - | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA) - - #define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \ - | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \ - | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA) - - #define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \ - | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \ - | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA) - - #define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \ - (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \ - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7) - - #define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \ - (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \ - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7) - - #define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \ - (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \ - HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15) - - #define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \ - (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \ - HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15) - - #define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \ - (HTT_RX_FP_CTRL_FILTER_FLASG2 | \ - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \ - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \ - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \ - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \ - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \ - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \ - HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT) - - #define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \ - (HTT_RX_MO_CTRL_FILTER_FLASG2 | \ - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \ - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \ - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \ - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \ - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \ - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \ - HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT) - - #define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3 - - #define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3 - - #define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3 - - #define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3 - - #define HTT_RX_MON_FILTER_TLV_FLAGS \ - (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE) - - #define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \ - (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE) - - #define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \ - (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ - HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \ - HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \ - HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \ - HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \ - HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \ - HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \ - HTT_RX_FILTER_TLV_FLAGS_ATTENTION) - - #define HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING \ - (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ - HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \ - HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \ - HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \ - HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \ - HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \ - HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \ - HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO) - - /* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */ - #define HTT_RX_TLV_FLAGS_RXDMA_RING \ - (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ - HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \ - HTT_RX_FILTER_TLV_FLAGS_MSDU_END) - - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0) - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8) - - struct htt_rx_ring_selection_cfg_cmd { - __le32 info0; - __le32 info1; - __le32 pkt_type_en_flags0; - __le32 pkt_type_en_flags1; - __le32 pkt_type_en_flags2; - __le32 pkt_type_en_flags3; - __le32 rx_filter_tlv; - __le32 rx_packet_offset; - __le32 rx_mpdu_offset; - __le32 rx_msdu_offset; - __le32 rx_attn_offset; - __le32 info2; - __le32 reserved[2]; - __le32 rx_mpdu_start_end_mask; - __le32 rx_msdu_end_word_mask; - __le32 info3; - } __packed; - - #define HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE 32 - #define HTT_RX_RING_DEFAULT_DMA_LENGTH 0x7 - #define HTT_RX_RING_PKT_TLV_OFFSET 0x1 - - struct htt_rx_ring_tlv_filter { - u32 rx_filter; /* see htt_rx_filter_tlv_flags */ - u32 pkt_filter_flags0; /* MGMT */ - u32 pkt_filter_flags1; /* MGMT */ - u32 pkt_filter_flags2; /* CTRL */ - u32 pkt_filter_flags3; /* DATA */ - bool offset_valid; - u16 rx_packet_offset; - u16 rx_header_offset; - u16 rx_mpdu_end_offset; - u16 rx_mpdu_start_offset; - u16 rx_msdu_end_offset; - u16 rx_msdu_start_offset; - u16 rx_attn_offset; - u16 rx_mpdu_start_wmask; - u16 rx_mpdu_end_wmask; - u32 rx_msdu_end_wmask; - u32 conf_len_ctrl; - u32 conf_len_mgmt; - u32 conf_len_data; - u16 rx_drop_threshold; - bool enable_log_mgmt_type; - bool enable_log_ctrl_type; - bool enable_log_data_type; - bool enable_rx_tlv_offset; - u16 rx_tlv_offset; - bool drop_threshold_valid; - bool rxmon_disable; - }; - - #define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0 - #define HTT_STATS_FRAME_CTRL_TYPE_CTRL 0x1 - #define HTT_STATS_FRAME_CTRL_TYPE_DATA 0x2 - #define HTT_STATS_FRAME_CTRL_TYPE_RESV 0x3 - - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0) - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8) - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16) - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24) - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25) - - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE GENMASK(15, 0) - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE GENMASK(18, 16) - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(21, 19) - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(24, 22) - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(27, 25) - - #define HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG GENMASK(2, 0) - - struct htt_tx_ring_selection_cfg_cmd { - __le32 info0; - __le32 info1; - __le32 info2; - __le32 tlv_filter_mask_in0; - __le32 tlv_filter_mask_in1; - __le32 tlv_filter_mask_in2; - __le32 tlv_filter_mask_in3; - __le32 reserved[3]; - } __packed; - - #define HTT_TX_RING_TLV_FILTER_MGMT_DMA_LEN GENMASK(3, 0) - #define HTT_TX_RING_TLV_FILTER_CTRL_DMA_LEN GENMASK(7, 4) - #define HTT_TX_RING_TLV_FILTER_DATA_DMA_LEN GENMASK(11, 8) - - #define HTT_TX_MON_FILTER_HYBRID_MODE \ - (HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS | \ - HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS | \ - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START | \ - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END | \ - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU | \ - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU | \ - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA | \ - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA | \ - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT | \ - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT | \ - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE | \ - HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO | \ - HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2) - - struct htt_tx_ring_tlv_filter { - u32 tx_mon_downstream_tlv_flags; - u32 tx_mon_upstream_tlv_flags0; - u32 tx_mon_upstream_tlv_flags1; - u32 tx_mon_upstream_tlv_flags2; - bool tx_mon_mgmt_filter; - bool tx_mon_data_filter; - bool tx_mon_ctrl_filter; - u16 tx_mon_pkt_dma_len; - } __packed; - - enum htt_tx_mon_upstream_tlv_flags0 { - HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS = BIT(1), - HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS = BIT(2), - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START = BIT(3), - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END = BIT(4), - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU = BIT(5), - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU = BIT(6), - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA = BIT(7), - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA = BIT(8), - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT = BIT(9), - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT = BIT(10), - HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE = BIT(11), - HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_BITMAP_ACK = BIT(12), - HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_1K_BITMAP_ACK = BIT(13), - HTT_TX_FILTER_TLV_FLAGS0_COEX_TX_STATUS = BIT(14), - HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO = BIT(15), - HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2 = BIT(16), - }; - - #define HTT_TX_FILTER_TLV_FLAGS2_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 BIT(11) - - /* HTT message target->host */ - - enum htt_t2h_msg_type { - HTT_T2H_MSG_TYPE_VERSION_CONF, - HTT_T2H_MSG_TYPE_PEER_MAP = 0x3, - HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4, - HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5, - HTT_T2H_MSG_TYPE_PKTLOG = 0x8, - HTT_T2H_MSG_TYPE_SEC_IND = 0xb, - HTT_T2H_MSG_TYPE_PEER_MAP2 = 0x1e, - HTT_T2H_MSG_TYPE_PEER_UNMAP2 = 0x1f, - HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d, - HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c, - HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24, - HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND = 0x28, - HTT_T2H_MSG_TYPE_PEER_MAP3 = 0x2b, - HTT_T2H_MSG_TYPE_VDEV_TXRX_STATS_PERIODIC_IND = 0x2c, - }; - - #define HTT_TARGET_VERSION_MAJOR 3 - - #define HTT_T2H_MSG_TYPE GENMASK(7, 0) - #define HTT_T2H_VERSION_CONF_MINOR GENMASK(15, 8) - #define HTT_T2H_VERSION_CONF_MAJOR GENMASK(23, 16) - - struct htt_t2h_version_conf_msg { - __le32 version; - } __packed; - - #define HTT_T2H_PEER_MAP_INFO_VDEV_ID GENMASK(15, 8) - #define HTT_T2H_PEER_MAP_INFO_PEER_ID GENMASK(31, 16) - #define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0) - #define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16) - #define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0) - #define HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID GENMASK(15, 0) - #define HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL GENMASK(31, 16) - #define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16) - #define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16 - - struct htt_t2h_peer_map_event { - __le32 info; - __le32 mac_addr_l32; - __le32 info1; - __le32 info2; - } __packed; - - #define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID HTT_T2H_PEER_MAP_INFO_VDEV_ID - #define HTT_T2H_PEER_UNMAP_INFO_PEER_ID HTT_T2H_PEER_MAP_INFO_PEER_ID - #define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \ - HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 - #define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M - #define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S - - struct htt_t2h_peer_unmap_event { - __le32 info; - __le32 mac_addr_l32; - __le32 info1; - } __packed; - - struct htt_resp_msg { - union { - struct htt_t2h_version_conf_msg version_msg; - struct htt_t2h_peer_map_event peer_map_ev; - struct htt_t2h_peer_unmap_event peer_unmap_ev; - }; - } __packed; - - #define HTT_VDEV_GET_STATS_U64(msg_l32, msg_u32)\ - (((u64)__le32_to_cpu(msg_u32) << 32) | (__le32_to_cpu(msg_l32))) - #define HTT_T2H_VDEV_STATS_PERIODIC_MSG_TYPE GENMASK(7, 0) - #define HTT_T2H_VDEV_STATS_PERIODIC_PDEV_ID GENMASK(15, 8) - #define HTT_T2H_VDEV_STATS_PERIODIC_NUM_VDEV GENMASK(23, 16) - #define HTT_T2H_VDEV_STATS_PERIODIC_PAYLOAD_BYTES GENMASK(15, 0) - #define HTT_VDEV_TXRX_STATS_COMMON_TLV 0 - #define HTT_VDEV_TXRX_STATS_HW_STATS_TLV 1 - - struct htt_t2h_vdev_txrx_stats_ind { - __le32 vdev_id; - __le32 rx_msdu_byte_cnt_lo; - __le32 rx_msdu_byte_cnt_hi; - __le32 rx_msdu_cnt_lo; - __le32 rx_msdu_cnt_hi; - __le32 tx_msdu_byte_cnt_lo; - __le32 tx_msdu_byte_cnt_hi; - __le32 tx_msdu_cnt_lo; - __le32 tx_msdu_cnt_hi; - __le32 tx_retry_cnt_lo; - __le32 tx_retry_cnt_hi; - __le32 tx_retry_byte_cnt_lo; - __le32 tx_retry_byte_cnt_hi; - __le32 tx_drop_cnt_lo; - __le32 tx_drop_cnt_hi; - __le32 tx_drop_byte_cnt_lo; - __le32 tx_drop_byte_cnt_hi; - __le32 msdu_ttl_cnt_lo; - __le32 msdu_ttl_cnt_hi; - __le32 msdu_ttl_byte_cnt_lo; - __le32 msdu_ttl_byte_cnt_hi; - } __packed; - - struct htt_t2h_vdev_common_stats_tlv { - __le32 soc_drop_count_lo; - __le32 soc_drop_count_hi; - } __packed; - - /* ppdu stats - * - * @details - * The following field definitions describe the format of the HTT target - * to host ppdu stats indication message. - * - * - * |31 16|15 12|11 10|9 8|7 0 | - * |----------------------------------------------------------------------| - * | payload_size | rsvd |pdev_id|mac_id | msg type | - * |----------------------------------------------------------------------| - * | ppdu_id | - * |----------------------------------------------------------------------| - * | Timestamp in us | - * |----------------------------------------------------------------------| - * | reserved | - * |----------------------------------------------------------------------| - * | type-specific stats info | - * | (see htt_ppdu_stats.h) | - * |----------------------------------------------------------------------| - * Header fields: - * - MSG_TYPE - * Bits 7:0 - * Purpose: Identifies this is a PPDU STATS indication - * message. - * Value: 0x1d - * - mac_id - * Bits 9:8 - * Purpose: mac_id of this ppdu_id - * Value: 0-3 - * - pdev_id - * Bits 11:10 - * Purpose: pdev_id of this ppdu_id - * Value: 0-3 - * 0 (for rings at SOC level), - * 1/2/3 PDEV -> 0/1/2 - * - payload_size - * Bits 31:16 - * Purpose: total tlv size - * Value: payload_size in bytes - */ - - #define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10) - #define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16) - - struct ath12k_htt_ppdu_stats_msg { - __le32 info; - __le32 ppdu_id; - __le32 timestamp; - __le32 rsvd; - u8 data[]; - } __packed; - - struct htt_tlv { - __le32 header; - u8 value[]; - } __packed; - - #define HTT_TLV_TAG GENMASK(11, 0) - #define HTT_TLV_LEN GENMASK(23, 12) - - enum HTT_PPDU_STATS_BW { - HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0, - HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1, - HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2, - HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3, - HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4, - HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */ - HTT_PPDU_STATS_BANDWIDTH_DYN = 6, - }; - - #define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M GENMASK(7, 0) - #define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M GENMASK(15, 8) - /* bw - HTT_PPDU_STATS_BW */ - #define HTT_PPDU_STATS_CMN_FLAGS_BW_M GENMASK(19, 16) - - struct htt_ppdu_stats_common { - __le32 ppdu_id; - __le16 sched_cmdid; - u8 ring_id; - u8 num_users; - __le32 flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/ - __le32 chain_mask; - __le32 fes_duration_us; /* frame exchange sequence */ - __le32 ppdu_sch_eval_start_tstmp_us; - __le32 ppdu_sch_end_tstmp_us; - __le32 ppdu_start_tstmp_us; - /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted - * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted + const struct ath12k_hw_params *hw_params; + struct device *dev; + struct ath12k_hal *hal; + + /* RCU on dp_pdevs[] provides a teardown synchronization mechanism, + * ensuring in-flight data path readers complete before reclaim. Writers + * update internal fields under their own synchronization, while readers of + * internal fields may perform lockless read if occasional inconsistency + * is acceptable or use additional synchronization for a coherent view. + * + * RCU is used for dp_pdevs[] at this stage to align with + * ab->pdevs_active[]. However, if the teardown paths ensure quiescence, + * both dp_pdevs[] and pdevs_active[] can be converted to plain pointers, + * removing RCU synchronize overhead. + * + * TODO: evaluate removal of RCU from dp_pdevs in the future */ - __le16 phy_mode; - __le16 bw_mhz; - } __packed; + struct ath12k_pdev_dp __rcu *dp_pdevs[MAX_RADIOS]; - enum htt_ppdu_stats_gi { - HTT_PPDU_STATS_SGI_0_8_US, - HTT_PPDU_STATS_SGI_0_4_US, - HTT_PPDU_STATS_SGI_1_6_US, - HTT_PPDU_STATS_SGI_3_2_US, - }; + struct ath12k_hw_group *ag; + u8 device_id; - #define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0) - #define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4) - - enum HTT_PPDU_STATS_PPDU_TYPE { - HTT_PPDU_STATS_PPDU_TYPE_SU, - HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO, - HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA, - HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA, - HTT_PPDU_STATS_PPDU_TYPE_UL_TRIG, - HTT_PPDU_STATS_PPDU_TYPE_BURST_BCN, - HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_RESP, - HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_TRIG, - HTT_PPDU_STATS_PPDU_TYPE_UL_RESP, - HTT_PPDU_STATS_PPDU_TYPE_MAX - }; + /* Lock for protection of peers and rhead_peer_addr */ + spinlock_t dp_lock; - #define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M BIT(0) - #define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M GENMASK(5, 1) - - #define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M GENMASK(1, 0) - #define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M BIT(2) - #define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M BIT(3) - #define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M GENMASK(7, 4) - #define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M GENMASK(11, 8) - #define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M GENMASK(15, 12) - #define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M GENMASK(19, 16) - #define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M GENMASK(23, 20) - #define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M GENMASK(27, 24) - #define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28) - #define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29) - - #define HTT_USR_RATE_PPDU_TYPE(_val) \ - le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M) - #define HTT_USR_RATE_PREAMBLE(_val) \ - le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M) - #define HTT_USR_RATE_BW(_val) \ - le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M) - #define HTT_USR_RATE_NSS(_val) \ - le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M) - #define HTT_USR_RATE_MCS(_val) \ - le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M) - #define HTT_USR_RATE_GI(_val) \ - le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M) - #define HTT_USR_RATE_DCM(_val) \ - le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M) - - #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0) - #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2) - #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M BIT(3) - #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M GENMASK(7, 4) - #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M GENMASK(11, 8) - #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M GENMASK(15, 12) - #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M GENMASK(19, 16) - #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M GENMASK(23, 20) - #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M GENMASK(27, 24) - #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M BIT(28) - #define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M BIT(29) - - struct htt_ppdu_stats_user_rate { - u8 tid_num; - u8 reserved0; - __le16 sw_peer_id; - __le32 info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/ - __le16 ru_end; - __le16 ru_start; - __le16 resp_ru_end; - __le16 resp_ru_start; - __le32 info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */ - __le32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */ - /* Note: resp_rate_info is only valid for if resp_type is UL */ - __le32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */ - } __packed; + struct ath12k_dp_arch_ops *ops; - #define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M GENMASK(7, 0) - #define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M BIT(8) - #define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M GENMASK(10, 9) - #define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M GENMASK(13, 11) - #define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M BIT(14) - #define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M GENMASK(31, 16) - - #define HTT_TX_INFO_IS_AMSDU(_flags) \ - u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M) - #define HTT_TX_INFO_BA_ACK_FAILED(_flags) \ - u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M) - #define HTT_TX_INFO_RATECODE(_flags) \ - u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M) - #define HTT_TX_INFO_PEERID(_flags) \ - u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M) - - enum htt_ppdu_stats_usr_compln_status { - HTT_PPDU_STATS_USER_STATUS_OK, - HTT_PPDU_STATS_USER_STATUS_FILTERED, - HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT, - HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH, - HTT_PPDU_STATS_USER_STATUS_ABORT, - }; - - #define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M GENMASK(3, 0) - #define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M GENMASK(7, 4) - #define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M BIT(8) - #define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M GENMASK(12, 9) - - #define HTT_USR_CMPLTN_IS_AMPDU(_val) \ - le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M) - #define HTT_USR_CMPLTN_LONG_RETRY(_val) \ - le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M) - #define HTT_USR_CMPLTN_SHORT_RETRY(_val) \ - le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M) - - struct htt_ppdu_stats_usr_cmpltn_cmn { - u8 status; - u8 tid_num; - __le16 sw_peer_id; - /* RSSI value of last ack packet (units = dB above noise floor) */ - __le32 ack_rssi; - __le16 mpdu_tried; - __le16 mpdu_success; - __le32 flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/ - } __packed; + /* Linked list of struct ath12k_dp_link_peer */ + struct list_head peers; - #define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M GENMASK(8, 0) - #define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M GENMASK(24, 9) - #define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM GENMASK(31, 25) + /* For rhash table init and deinit protection */ + struct mutex link_peer_rhash_tbl_lock; - #define HTT_PPDU_STATS_NON_QOS_TID 16 - - struct htt_ppdu_stats_usr_cmpltn_ack_ba_status { - __le32 ppdu_id; - __le16 sw_peer_id; - __le16 reserved0; - __le32 info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */ - __le16 current_seq; - __le16 start_seq; - __le32 success_bytes; - } __packed; - - struct htt_ppdu_user_stats { - u16 peer_id; - u16 delay_ba; - u32 tlv_flags; - bool is_valid_peer_id; - struct htt_ppdu_stats_user_rate rate; - struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn; - struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba; + /* The rhashtable containing struct ath12k_link_peer keyed by mac addr */ + struct rhashtable *rhead_peer_addr; + struct rhashtable_params rhash_peer_addr_param; + struct ath12k_device_dp_stats device_stats; }; - #define HTT_PPDU_STATS_MAX_USERS 8 - #define HTT_PPDU_DESC_MAX_DEPTH 16 + static inline u32 ath12k_dp_arch_tx_get_vdev_bank_config(struct ath12k_dp *dp, + struct ath12k_link_vif *arvif) + { + return dp->ops->tx_get_vdev_bank_config(dp->ab, arvif); + } - struct htt_ppdu_stats { - struct htt_ppdu_stats_common common; - struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS]; - }; + static inline int ath12k_dp_arch_reo_cmd_send(struct ath12k_dp *dp, - struct ath12k_dp_rx_tid *rx_tid, ++ struct ath12k_dp_rx_tid_rxq *rx_tid, + enum hal_reo_cmd_type type, + struct ath12k_hal_reo_cmd *cmd, + void (*cb)(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status)) + { + return dp->ops->reo_cmd_send(dp->ab, rx_tid, type, cmd, cb); + } - struct htt_ppdu_stats_info { - u32 tlv_bitmap; - u32 ppdu_id; - u32 frame_type; - u32 frame_ctrl; - u32 delay_ba; - u32 bar_num_users; - struct htt_ppdu_stats ppdu_stats; - struct list_head list; - }; -static inline void ath12k_dp_arch_setup_pn_check_reo_cmd(struct ath12k_dp *dp, - struct ath12k_hal_reo_cmd *cmd, - struct ath12k_dp_rx_tid *rx_tid, - u32 cipher, - enum set_key_cmd key_cmd) ++static inline ++void ath12k_dp_arch_setup_pn_check_reo_cmd(struct ath12k_dp *dp, ++ struct ath12k_hal_reo_cmd *cmd, ++ struct ath12k_dp_rx_tid *rx_tid, ++ u32 cipher, ++ enum set_key_cmd key_cmd) + { + dp->ops->setup_pn_check_reo_cmd(cmd, rx_tid, cipher, key_cmd); + } - /* @brief target -> host MLO offset indiciation message - * - * @details - * The following field definitions describe the format of the HTT target - * to host mlo offset indication message. - * - * - * |31 29|28 |26|25 22|21 16|15 13|12 10 |9 8|7 0| - * |---------------------------------------------------------------------| - * | rsvd1 | mac_freq |chip_id |pdev_id|msgtype| - * |---------------------------------------------------------------------| - * | sync_timestamp_lo_us | - * |---------------------------------------------------------------------| - * | sync_timestamp_hi_us | - * |---------------------------------------------------------------------| - * | mlo_offset_lo | - * |---------------------------------------------------------------------| - * | mlo_offset_hi | - * |---------------------------------------------------------------------| - * | mlo_offset_clcks | - * |---------------------------------------------------------------------| - * | rsvd2 | mlo_comp_clks |mlo_comp_us | - * |---------------------------------------------------------------------| - * | rsvd3 |mlo_comp_timer | - * |---------------------------------------------------------------------| - * Header fields - * - MSG_TYPE - * Bits 7:0 - * Purpose: Identifies this is a MLO offset indication msg - * - PDEV_ID - * Bits 9:8 - * Purpose: Pdev of this MLO offset - * - CHIP_ID - * Bits 12:10 - * Purpose: chip_id of this MLO offset - * - MAC_FREQ - * Bits 28:13 - * - SYNC_TIMESTAMP_LO_US - * Purpose: clock frequency of the mac HW block in MHz - * Bits: 31:0 - * Purpose: lower 32 bits of the WLAN global time stamp at which - * last sync interrupt was received - * - SYNC_TIMESTAMP_HI_US - * Bits: 31:0 - * Purpose: upper 32 bits of WLAN global time stamp at which - * last sync interrupt was received - * - MLO_OFFSET_LO - * Bits: 31:0 - * Purpose: lower 32 bits of the MLO offset in us - * - MLO_OFFSET_HI - * Bits: 31:0 - * Purpose: upper 32 bits of the MLO offset in us - * - MLO_COMP_US - * Bits: 15:0 - * Purpose: MLO time stamp compensation applied in us - * - MLO_COMP_CLCKS - * Bits: 25:16 - * Purpose: MLO time stamp compensation applied in clock ticks - * - MLO_COMP_TIMER - * Bits: 21:0 - * Purpose: Periodic timer at which compensation is applied - */ + static inline void ath12k_dp_arch_rx_peer_tid_delete(struct ath12k_dp *dp, + struct ath12k_dp_link_peer *peer, + u8 tid) + { + dp->ops->rx_peer_tid_delete(dp->ab, peer, tid); + } - #define HTT_T2H_MLO_OFFSET_INFO_MSG_TYPE GENMASK(7, 0) - #define HTT_T2H_MLO_OFFSET_INFO_PDEV_ID GENMASK(9, 8) - - struct ath12k_htt_mlo_offset_msg { - __le32 info; - __le32 sync_timestamp_lo_us; - __le32 sync_timestamp_hi_us; - __le32 mlo_offset_hi; - __le32 mlo_offset_lo; - __le32 mlo_offset_clks; - __le32 mlo_comp_clks; - __le32 mlo_comp_timer; - } __packed; -static inline void ath12k_dp_arch_reo_cache_flush(struct ath12k_dp *dp, - struct ath12k_dp_rx_tid *rx_tid) ++static inline int ath12k_dp_arch_reo_cache_flush(struct ath12k_dp *dp, ++ struct ath12k_dp_rx_tid_rxq *rx_tid) + { - dp->ops->reo_cache_flush(dp->ab, rx_tid); ++ return dp->ops->reo_cache_flush(dp->ab, rx_tid); + } - /* @brief host -> target FW extended statistics retrieve - * - * @details - * The following field definitions describe the format of the HTT host - * to target FW extended stats retrieve message. - * The message specifies the type of stats the host wants to retrieve. - * - * |31 24|23 16|15 8|7 0| - * |-----------------------------------------------------------| - * | reserved | stats type | pdev_mask | msg type | - * |-----------------------------------------------------------| - * | config param [0] | - * |-----------------------------------------------------------| - * | config param [1] | - * |-----------------------------------------------------------| - * | config param [2] | - * |-----------------------------------------------------------| - * | config param [3] | - * |-----------------------------------------------------------| - * | reserved | - * |-----------------------------------------------------------| - * | cookie LSBs | - * |-----------------------------------------------------------| - * | cookie MSBs | - * |-----------------------------------------------------------| - * Header fields: - * - MSG_TYPE - * Bits 7:0 - * Purpose: identifies this is a extended stats upload request message - * Value: 0x10 - * - PDEV_MASK - * Bits 8:15 - * Purpose: identifies the mask of PDEVs to retrieve stats from - * Value: This is a overloaded field, refer to usage and interpretation of - * PDEV in interface document. - * Bit 8 : Reserved for SOC stats - * Bit 9 - 15 : Indicates PDEV_MASK in DBDC - * Indicates MACID_MASK in DBS - * - STATS_TYPE - * Bits 23:16 - * Purpose: identifies which FW statistics to upload - * Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h) - * - Reserved - * Bits 31:24 - * - CONFIG_PARAM [0] - * Bits 31:0 - * Purpose: give an opaque configuration value to the specified stats type - * Value: stats-type specific configuration value - * Refer to htt_stats.h for interpretation for each stats sub_type - * - CONFIG_PARAM [1] - * Bits 31:0 - * Purpose: give an opaque configuration value to the specified stats type - * Value: stats-type specific configuration value - * Refer to htt_stats.h for interpretation for each stats sub_type - * - CONFIG_PARAM [2] - * Bits 31:0 - * Purpose: give an opaque configuration value to the specified stats type - * Value: stats-type specific configuration value - * Refer to htt_stats.h for interpretation for each stats sub_type - * - CONFIG_PARAM [3] - * Bits 31:0 - * Purpose: give an opaque configuration value to the specified stats type - * Value: stats-type specific configuration value - * Refer to htt_stats.h for interpretation for each stats sub_type - * - Reserved [31:0] for future use. - * - COOKIE_LSBS - * Bits 31:0 - * Purpose: Provide a mechanism to match a target->host stats confirmation - * message with its preceding host->target stats request message. - * Value: LSBs of the opaque cookie specified by the host-side requestor - * - COOKIE_MSBS - * Bits 31:0 - * Purpose: Provide a mechanism to match a target->host stats confirmation - * message with its preceding host->target stats request message. - * Value: MSBs of the opaque cookie specified by the host-side requestor - */ + static inline + int ath12k_dp_arch_rx_link_desc_return(struct ath12k_dp *dp, + struct ath12k_buffer_addr *buf_addr_info, + enum hal_wbm_rel_bm_act action) + { + return dp->ops->rx_link_desc_return(dp, buf_addr_info, action); + } - struct htt_ext_stats_cfg_hdr { - u8 msg_type; - u8 pdev_mask; - u8 stats_type; - u8 reserved; - } __packed; + static inline + void ath12k_dp_arch_rx_frags_cleanup(struct ath12k_dp *dp, + struct ath12k_dp_rx_tid *rx_tid, + bool rel_link_desc) + { + dp->ops->rx_frags_cleanup(rx_tid, rel_link_desc); + } - struct htt_ext_stats_cfg_cmd { - struct htt_ext_stats_cfg_hdr hdr; - __le32 cfg_param0; - __le32 cfg_param1; - __le32 cfg_param2; - __le32 cfg_param3; - __le32 reserved; - __le32 cookie_lsb; - __le32 cookie_msb; - } __packed; + static inline int ath12k_dp_arch_peer_rx_tid_reo_update(struct ath12k_dp *dp, + struct ath12k_dp_link_peer *peer, + struct ath12k_dp_rx_tid *rx_tid, + u32 ba_win_sz, u16 ssn, + bool update_ssn) + { + return dp->ops->peer_rx_tid_reo_update(dp, peer, rx_tid, + ba_win_sz, ssn, update_ssn); + } - /* htt stats config default params */ - #define HTT_STAT_DEFAULT_RESET_START_OFFSET 0 - #define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff - #define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff - #define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff - #define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff - #define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff - #define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00 - #define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00 - - /* HTT_DBG_EXT_STATS_PEER_INFO - * PARAMS: - * @config_param0: - * [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request - * [Bit15 : Bit 1] htt_peer_stats_req_mode_t - * [Bit31 : Bit16] sw_peer_id - * @config_param1: - * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum) - * 0 bit htt_peer_stats_cmn_tlv - * 1 bit htt_peer_details_tlv - * 2 bit htt_tx_peer_rate_stats_tlv - * 3 bit htt_rx_peer_rate_stats_tlv - * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv - * 5 bit htt_rx_tid_stats_tlv - * 6 bit htt_msdu_flow_stats_tlv - * @config_param2: [Bit31 : Bit0] mac_addr31to0 - * @config_param3: [Bit15 : Bit0] mac_addr47to32 - * [Bit31 : Bit16] reserved - */ - #define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0) - #define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f - - /* Used to set different configs to the specified stats type.*/ - struct htt_ext_stats_cfg_params { - u32 cfg0; - u32 cfg1; - u32 cfg2; - u32 cfg3; - }; + static inline int ath12k_dp_arch_rx_assign_reoq(struct ath12k_dp *dp, + struct ath12k_dp_peer *dp_peer, + struct ath12k_dp_rx_tid *rx_tid, + u16 ssn, enum hal_pn_type pn_type) + { + return dp->ops->rx_assign_reoq(dp->ab, dp_peer, rx_tid, ssn, pn_type); + } - enum vdev_stats_offload_timer_duration { - ATH12K_STATS_TIMER_DUR_500MS = 1, - ATH12K_STATS_TIMER_DUR_1SEC = 2, - ATH12K_STATS_TIMER_DUR_2SEC = 3, - }; + static inline void ath12k_dp_arch_peer_rx_tid_qref_setup(struct ath12k_dp *dp, + u16 peer_id, u16 tid, + dma_addr_t paddr) + { + dp->ops->peer_rx_tid_qref_setup(dp->ab, peer_id, tid, paddr); + } - #define ATH12K_HTT_MAC_ADDR_L32_0 GENMASK(7, 0) - #define ATH12K_HTT_MAC_ADDR_L32_1 GENMASK(15, 8) - #define ATH12K_HTT_MAC_ADDR_L32_2 GENMASK(23, 16) - #define ATH12K_HTT_MAC_ADDR_L32_3 GENMASK(31, 24) - #define ATH12K_HTT_MAC_ADDR_H16_0 GENMASK(7, 0) - #define ATH12K_HTT_MAC_ADDR_H16_1 GENMASK(15, 8) ++static inline void ath12k_dp_arch_peer_rx_tid_qref_reset(struct ath12k_dp *dp, ++ u16 peer_id, u16 tid) ++{ ++ dp->ops->peer_rx_tid_qref_reset(dp->ab, peer_id, tid); ++} + - struct htt_mac_addr { - __le32 mac_addr_l32; - __le32 mac_addr_h16; - } __packed; ++static inline ++int ath12k_dp_arch_rx_tid_delete_handler(struct ath12k_dp *dp, ++ struct ath12k_dp_rx_tid_rxq *rx_tid) ++{ ++ return dp->ops->rx_tid_delete_handler(dp->ab, rx_tid); ++} + static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr) { memcpy(addr, &addr_l32, 4); diff --cc drivers/net/wireless/ath/ath12k/dp_mon.c index 39d1967584db8,23ddba557001c..737287a9aa462 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@@ -2275,13 -486,16 +490,17 @@@ void ath12k_dp_mon_update_radiotap(stru rxs->mactime = ppduinfo->tsft; } + EXPORT_SYMBOL(ath12k_dp_mon_update_radiotap); - static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi, - struct sk_buff *msdu, - const struct hal_rx_mon_ppdu_info *ppduinfo, - struct ieee80211_rx_status *status, - u8 decap) + void ath12k_dp_mon_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev, + struct napi_struct *napi, + struct sk_buff *msdu, ++ const struct hal_rx_mon_ppdu_info *ppduinfo, + struct ieee80211_rx_status *status, + u8 decap) { + struct ath12k_dp *dp = dp_pdev->dp; + struct ath12k_base *ab = dp->ab; static const struct ieee80211_radiotap_he known = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), @@@ -2304,10 -521,15 +526,14 @@@ status->flag |= RX_FLAG_RADIOTAP_HE; } - spin_lock_bh(&ar->ab->base_lock); - peer = ath12k_peer_find_by_id(ar->ab, ppduinfo->peer_id); + ath12k_dp_extract_rx_desc_data(dp->hal, &rx_info, rx_desc, rx_desc); + + rcu_read_lock(); + spin_lock_bh(&dp->dp_lock); - rx_info.addr2_present = false; + peer = ath12k_dp_rx_h_find_link_peer(dp_pdev, msdu, &rx_info); if (peer && peer->sta) { pubsta = peer->sta; + memcpy(addr, peer->addr, ETH_ALEN); if (pubsta->valid_links) { status->link_valid = 1; status->link_id = peer->link_id; diff --cc drivers/net/wireless/ath/ath12k/dp_mon.h index e25595cbdcf37,394463ea19e0b..167028d27513b --- a/drivers/net/wireless/ath/ath12k/dp_mon.h +++ b/drivers/net/wireless/ath/ath12k/dp_mon.h @@@ -88,20 -89,39 +89,35 @@@ int ath12k_dp_mon_buf_replenish(struct int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab, struct dp_rxdma_mon_ring *rx_ring, int req_entries); - int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id, - struct napi_struct *napi, int budget, - enum dp_monitor_mode monitor_mode); - struct sk_buff *ath12k_dp_mon_tx_alloc_skb(void); - enum dp_mon_tx_tlv_status - ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag, - struct hal_tlv_hdr *tx_tlv, - u8 *num_users); - enum hal_rx_mon_status - ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar, - struct ath12k_mon_data *pmon, - struct sk_buff *skb, - struct napi_struct *napi, - u32 ppdu_id); void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info); - int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget, struct napi_struct *napi); + void + ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k_base *ab, + struct hal_rx_mon_ppdu_info *ppdu_info); + void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k_dp_link_peer *peer, + struct hal_rx_mon_ppdu_info *ppdu_info); + int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len); + struct sk_buff + *ath12k_dp_rx_alloc_mon_status_buf(struct ath12k_base *ab, + struct dp_rxdma_mon_ring *rx_ring, + int *buf_id); + u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id); + int + ath12k_dp_mon_parse_status_buf(struct ath12k_pdev_dp *dp_pdev, + struct ath12k_mon_data *pmon, + const struct dp_mon_packet_info *packet_info); + void ath12k_dp_mon_update_radiotap(struct ath12k_pdev_dp *dp_pdev, + struct hal_rx_mon_ppdu_info *ppduinfo, + struct sk_buff *mon_skb, + struct ieee80211_rx_status *rxs); + void ath12k_dp_mon_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev, + struct napi_struct *napi, + struct sk_buff *msdu, - struct ieee80211_rx_status *status, - u8 decap); -void ath12k_dp_mon_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev, - struct napi_struct *napi, - struct sk_buff *msdu, ++ const struct hal_rx_mon_ppdu_info *ppduinfo, + struct ieee80211_rx_status *status, + u8 decap); + struct sk_buff * + ath12k_dp_mon_rx_merg_msdus(struct ath12k_pdev_dp *dp_pdev, + struct dp_mon_mpdu *mon_mpdu, + struct hal_rx_mon_ppdu_info *ppdu_info, + struct ieee80211_rx_status *rxs); #endif diff --cc drivers/net/wireless/ath/ath12k/dp_rx.c index d28d8ffec0f83,ef0369dafbc75..a32ee9f8061af --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@@ -19,248 -17,6 +17,9 @@@ #include "dp_mon.h" #include "debugfs_htt_stats.h" - #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) - +static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab, + struct ath12k_dp_rx_tid_rxq *rx_tid); + - static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc)) - return HAL_ENCRYPT_TYPE_OPEN; - - return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc); - } - - u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_decap_type(desc); - } - - static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc); - } - - static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); - } - - static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc); - } - - static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab, - struct sk_buff *skb) - { - struct ieee80211_hdr *hdr; - - hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); - return ieee80211_has_morefrags(hdr->frame_control); - } - - static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab, - struct sk_buff *skb) - { - struct ieee80211_hdr *hdr; - - hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz); - return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; - } - - static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc); - } - - static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->dp_rx_h_msdu_done(desc); - } - - static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc); - } - - static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc); - } - - static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc); - } - - u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc); - } - - static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_msdu_len(desc); - } - - static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc); - } - - static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc); - } - - static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc); - } - - static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc); - } - - static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc); - } - - static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc)); - } - - static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc); - } - - static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc); - } - - u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc); - } - - static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_first_msdu(desc); - } - - static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_last_msdu(desc); - } - - static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab, - struct hal_rx_desc *fdesc, - struct hal_rx_desc *ldesc) - { - ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc); - } - - static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab, - struct hal_rx_desc *desc, - u16 len) - { - ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len); - } - - u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab, - struct hal_rx_desc *rx_desc) - { - return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); - } - - bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, - struct hal_rx_desc *rx_desc) - { - u32 tlv_tag; - - tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc); - - return tlv_tag == HAL_RX_MPDU_START; - } - - static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return (ath12k_dp_rx_h_first_msdu(ab, desc) && - ab->hal_rx_ops->rx_desc_is_da_mcbc(desc)); - } - - static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc); - } - - static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc); - } - - static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab, - struct hal_rx_desc *desc, - struct ieee80211_hdr *hdr) - { - ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr); - } - - static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab, - struct hal_rx_desc *desc, - u8 *crypto_hdr, - enum hal_encrypt_type enctype) - { - ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype); - } - - static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab, - struct hal_rx_desc *desc) - { - return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc); - } - - static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list) - { - struct sk_buff *skb; - - while ((skb = __skb_dequeue(skb_list))) - dev_kfree_skb_any(skb); - } - static size_t ath12k_dp_list_cut_nodes(struct list_head *list, struct list_head *head, size_t count) @@@ -584,39 -342,21 +345,41 @@@ static int ath12k_dp_rx_pdev_srng_alloc return 0; } - static void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq, - struct ath12k_dp_rx_tid *rx_tid) ++void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq, ++ struct ath12k_dp_rx_tid *rx_tid, ++ bool active) +{ + rx_tid_rxq->tid = rx_tid->tid; - rx_tid_rxq->active = rx_tid->active; ++ rx_tid_rxq->active = active; + rx_tid_rxq->qbuf = rx_tid->qbuf; +} ++EXPORT_SYMBOL(ath12k_dp_init_rx_tid_rxq); + +static void ath12k_dp_rx_tid_cleanup(struct ath12k_base *ab, + struct ath12k_reoq_buf *tid_qbuf) +{ + if (tid_qbuf->vaddr) { + dma_unmap_single(ab->dev, tid_qbuf->paddr_aligned, + tid_qbuf->size, DMA_BIDIRECTIONAL); + kfree(tid_qbuf->vaddr); + tid_qbuf->vaddr = NULL; + } +} + void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab) { - struct ath12k_dp *dp = &ab->dp; + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); struct ath12k_dp_rx_reo_cmd *cmd, *tmp; struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache; + struct dp_reo_update_rx_queue_elem *cmd_queue, *tmp_queue; - spin_lock_bh(&dp->reo_cmd_lock); - list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { - list_del(&cmd->list); - dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned, - cmd->data.qbuf.size, DMA_BIDIRECTIONAL); - kfree(cmd->data.qbuf.vaddr); - kfree(cmd); + spin_lock_bh(&dp->reo_rxq_flush_lock); + list_for_each_entry_safe(cmd_queue, tmp_queue, &dp->reo_cmd_update_rx_queue_list, + list) { + list_del(&cmd_queue->list); + ath12k_dp_rx_tid_cleanup(ab, &cmd_queue->rx_tid.qbuf); + kfree(cmd_queue); } - list_for_each_entry_safe(cmd_cache, tmp_cache, &dp->reo_cmd_cache_flush_list, list) { list_del(&cmd_cache->list); @@@ -635,153 -369,27 +398,54 @@@ spin_unlock_bh(&dp->reo_cmd_lock); } - static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx, - enum hal_reo_cmd_status status) + void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status) { - struct ath12k_dp_rx_tid *rx_tid = ctx; + struct ath12k_dp_rx_tid_rxq *rx_tid = ctx; if (status != HAL_REO_CMD_SUCCESS) ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", rx_tid->tid, status); - dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->qbuf.vaddr); - rx_tid->qbuf.vaddr = NULL; + ath12k_dp_rx_tid_cleanup(dp->ab, &rx_tid->qbuf); } + EXPORT_SYMBOL(ath12k_dp_reo_cmd_free); - static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, - struct ath12k_dp_rx_tid_rxq *rx_tid, - enum hal_reo_cmd_type type, - struct ath12k_hal_reo_cmd *cmd, - void (*cb)(struct ath12k_dp *dp, void *ctx, - enum hal_reo_cmd_status status)) - { - struct ath12k_dp *dp = &ab->dp; - struct ath12k_dp_rx_reo_cmd *dp_cmd; - struct hal_srng *cmd_ring; - int cmd_num; - - cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; - cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); - - /* cmd_num should start from 1, during failure return the error code */ - if (cmd_num < 0) - return cmd_num; - - /* reo cmd ring descriptors has cmd_num starting from 1 */ - if (cmd_num == 0) - return -EINVAL; - - if (!cb) - return 0; - - /* Can this be optimized so that we keep the pending command list only - * for tid delete command to free up the resource on the command status - * indication? - */ - dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC); - - if (!dp_cmd) - return -ENOMEM; - - memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid)); - dp_cmd->cmd_num = cmd_num; - dp_cmd->handler = cb; - - spin_lock_bh(&dp->reo_cmd_lock); - list_add_tail(&dp_cmd->list, &dp->reo_cmd_list); - spin_unlock_bh(&dp->reo_cmd_lock); - - return 0; - } - - static int ath12k_dp_reo_cache_flush(struct ath12k_base *ab, - struct ath12k_dp_rx_tid_rxq *rx_tid) - { - struct ath12k_hal_reo_cmd cmd = {}; - int ret; - - cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); - cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); - /* HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS - all pending MPDUs - *in the bitmap will be forwarded/flushed to REO output rings - */ - cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS | - HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS; - - /* For all QoS TIDs (except NON_QOS), the driver allocates a maximum - * window size of 1024. In such cases, the driver can issue a single - * 1KB descriptor flush command instead of sending multiple 128-byte - * flush commands for each QoS TID, improving efficiency. - */ - - if (rx_tid->tid != HAL_DESC_REO_NON_QOS_TID) - cmd.flag |= HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC; - - ret = ath12k_dp_reo_cmd_send(ab, rx_tid, - HAL_REO_CMD_FLUSH_CACHE, - &cmd, ath12k_dp_reo_cmd_free); - return ret; - } - - static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid) - { - struct ath12k_reo_queue_ref *qref; - struct ath12k_dp *dp = &ab->dp; - bool ml_peer = false; - - if (!ab->hw_params->reoq_lut_support) - return; - - if (peer_id & ATH12K_PEER_ML_ID_VALID) { - peer_id &= ~ATH12K_PEER_ML_ID_VALID; - ml_peer = true; - } - - if (ml_peer) - qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + - (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); - else - qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + - (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); - - qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR); - qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) | - u32_encode_bits(tid, DP_REO_QREF_NUM); - } - - static void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp) ++void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp) +{ + struct ath12k_base *ab = dp->ab; + struct dp_reo_update_rx_queue_elem *elem, *tmp; + + spin_lock_bh(&dp->reo_rxq_flush_lock); + + list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_update_rx_queue_list, list) { + if (elem->rx_tid.active) + continue; + + if (ath12k_dp_rx_tid_delete_handler(ab, &elem->rx_tid)) + break; + - ath12k_peer_rx_tid_qref_reset(ab, - elem->is_ml_peer ? elem->ml_peer_id : - elem->peer_id, - elem->rx_tid.tid); ++ ath12k_dp_arch_peer_rx_tid_qref_reset(dp, ++ elem->is_ml_peer ? ++ elem->ml_peer_id : elem->peer_id, ++ elem->rx_tid.tid); + + if (ab->hw_params->reoq_lut_support) + ath12k_hal_reo_shared_qaddr_cache_clear(ab); + + list_del(&elem->list); + kfree(elem); + } + + spin_unlock_bh(&dp->reo_rxq_flush_lock); +} ++EXPORT_SYMBOL(ath12k_dp_rx_process_reo_cmd_update_rx_queue_list); + - static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx, - enum hal_reo_cmd_status status) + void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status) { struct ath12k_base *ab = dp->ab; - struct ath12k_dp_rx_tid *rx_tid = ctx; + struct ath12k_dp_rx_tid_rxq *rx_tid = ctx; struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp; if (status == HAL_REO_CMD_DRAIN) { @@@ -793,13 -401,6 +457,13 @@@ return; } + /* Retry the HAL_REO_CMD_UPDATE_RX_QUEUE command for entries + * in the pending queue list marked TID as inactive + */ - spin_lock_bh(&dp->ab->base_lock); ++ spin_lock_bh(&dp->dp_lock); + ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp); - spin_unlock_bh(&dp->ab->base_lock); ++ spin_unlock_bh(&dp->dp_lock); + elem = kzalloc(sizeof(*elem), GFP_ATOMIC); if (!elem) goto free_desc; @@@ -817,297 -418,60 +481,108 @@@ if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES || time_after(jiffies, elem->ts + msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) { + /* The reo_cmd_cache_flush_list is used in only two contexts, + * one is in this function called from napi and the + * other in ath12k_dp_free during core destroy. + * If cache command sent is success, delete the element in + * the cache list. ath12k_dp_rx_reo_cmd_list_cleanup + * will be called during core destroy. + */ + - if (ath12k_dp_reo_cache_flush(ab, &elem->data)) ++ if (ath12k_dp_arch_reo_cache_flush(dp, &elem->data)) + break; + list_del(&elem->list); dp->reo_cmd_cache_flush_count--; + - /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send() - * within ath12k_wifi7_dp_reo_cache_flush. - * The reo_cmd_cache_flush_list is used in only two contexts, - * one is in this function called from napi and the other in - * ath12k_dp_free during core destroy. - * Before dp_free, the irqs would be disabled and would wait to - * synchronize. Hence there wouldn’t be any race against add or - * delete to this list. Hence unlock-lock is safe here. - */ - spin_unlock_bh(&dp->reo_cmd_lock); - - ath12k_dp_arch_reo_cache_flush(dp, &elem->data); kfree(elem); - spin_lock_bh(&dp->reo_cmd_lock); } } - spin_unlock_bh(&dp->reo_cmd_lock); + spin_unlock_bh(&dp->reo_rxq_flush_lock); return; free_desc: - dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->qbuf.vaddr); - rx_tid->qbuf.vaddr = NULL; + ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf); } + EXPORT_SYMBOL(ath12k_dp_rx_tid_del_func); +static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab, + struct ath12k_dp_rx_tid_rxq *rx_tid) +{ - struct ath12k_hal_reo_cmd cmd = {}; - - cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; - cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); - cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); - cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; - /* Observed flush cache failure, to avoid that set vld bit during delete */ - cmd.upd1 |= HAL_REO_CMD_UPD1_VLD; - - return ath12k_dp_reo_cmd_send(ab, rx_tid, - HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, - ath12k_dp_rx_tid_del_func); - } - - static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid, - dma_addr_t paddr) - { - struct ath12k_reo_queue_ref *qref; - struct ath12k_dp *dp = &ab->dp; - bool ml_peer = false; ++ struct ath12k_dp *dp = ath12k_ab_to_dp(ab); + - if (!ab->hw_params->reoq_lut_support) - return; - - if (peer_id & ATH12K_PEER_ML_ID_VALID) { - peer_id &= ~ATH12K_PEER_ML_ID_VALID; - ml_peer = true; - } - - if (ml_peer) - qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + - (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); - else - qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + - (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); - - qref->info0 = u32_encode_bits(lower_32_bits(paddr), - BUFFER_ADDR_INFO0_ADDR); - qref->info1 = u32_encode_bits(upper_32_bits(paddr), - BUFFER_ADDR_INFO1_ADDR) | - u32_encode_bits(tid, DP_REO_QREF_NUM); - ath12k_hal_reo_shared_qaddr_cache_clear(ab); ++ return ath12k_dp_arch_rx_tid_delete_handler(dp, rx_tid); +} + - static void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid) ++void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid) +{ + struct dp_reo_update_rx_queue_elem *elem; + struct ath12k_dp_rx_tid_rxq *rx_tid; + + spin_lock_bh(&dp->reo_rxq_flush_lock); + list_for_each_entry(elem, &dp->reo_cmd_update_rx_queue_list, list) { + if (elem->peer_id == peer_id) { + rx_tid = &elem->rx_tid; + if (rx_tid->tid == tid) { + rx_tid->active = false; + break; + } + } + } + spin_unlock_bh(&dp->reo_rxq_flush_lock); +} ++EXPORT_SYMBOL(ath12k_dp_mark_tid_as_inactive); + - void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar, - struct ath12k_peer *peer, u8 tid) - { - struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid]; - struct ath12k_base *ab = ar->ab; - struct ath12k_dp *dp = &ab->dp; - - if (!rx_tid->active) - return; - - rx_tid->active = false; - - ath12k_dp_mark_tid_as_inactive(dp, peer->peer_id, tid); - ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp); - } - - int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, - struct ath12k_buffer_addr *buf_addr_info, - enum hal_wbm_rel_bm_act action) - { - struct hal_wbm_release_ring *desc; - struct ath12k_dp *dp = &ab->dp; - struct hal_srng *srng; - int ret = 0; - - srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; - - spin_lock_bh(&srng->lock); - - ath12k_hal_srng_access_begin(ab, srng); - - desc = ath12k_hal_srng_src_get_next_entry(ab, srng); - if (!desc) { - ret = -ENOBUFS; - goto exit; - } - - ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action); - - exit: - ath12k_hal_srng_access_end(ab, srng); - - spin_unlock_bh(&srng->lock); - - return ret; - } - - static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid, - bool rel_link_desc) - { - struct ath12k_buffer_addr *buf_addr_info; - struct ath12k_base *ab = rx_tid->ab; - - lockdep_assert_held(&ab->base_lock); - - if (rx_tid->dst_ring_desc) { - if (rel_link_desc) { - buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info; - ath12k_dp_rx_link_desc_return(ab, buf_addr_info, - HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); - } - kfree(rx_tid->dst_ring_desc); - rx_tid->dst_ring_desc = NULL; - } - - rx_tid->cur_sn = 0; - rx_tid->last_frag_no = 0; - rx_tid->rx_frag_bitmap = 0; - __skb_queue_purge(&rx_tid->rx_frags); - } - - void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer) + void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_dp_link_peer *peer) { struct ath12k_dp_rx_tid *rx_tid; int i; + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); - lockdep_assert_held(&ar->ab->base_lock); + lockdep_assert_held(&dp->dp_lock); + + if (!peer->primary_link) + return; for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { - rx_tid = &peer->rx_tid[i]; + rx_tid = &peer->dp_peer->rx_tid[i]; - ath12k_dp_rx_peer_tid_delete(ar, peer, i); - ath12k_dp_rx_frags_cleanup(rx_tid, true); + ath12k_dp_arch_rx_peer_tid_delete(dp, peer, i); + ath12k_dp_arch_rx_frags_cleanup(dp, rx_tid, true); - spin_unlock_bh(&ar->ab->base_lock); + spin_unlock_bh(&dp->dp_lock); timer_delete_sync(&rx_tid->frag_timer); - spin_lock_bh(&ar->ab->base_lock); - } - } - - static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar, - struct ath12k_peer *peer, - struct ath12k_dp_rx_tid *rx_tid, - u32 ba_win_sz, u16 ssn, - bool update_ssn) - { - struct ath12k_hal_reo_cmd cmd = {}; - int ret; - struct ath12k_dp_rx_tid_rxq rx_tid_rxq; - - ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid); - - cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned); - cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned); - cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; - cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; - cmd.ba_window_size = ba_win_sz; - - if (update_ssn) { - cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; - cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN); - } - - ret = ath12k_dp_reo_cmd_send(ar->ab, &rx_tid_rxq, - HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, - NULL); - if (ret) { - ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", - rx_tid_rxq.tid, ret); - return ret; - } - - rx_tid->ba_win_sz = ba_win_sz; - - return 0; - } - - static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab, - struct ath12k_sta *ahsta, - struct ath12k_dp_rx_tid *rx_tid, - u16 ssn, enum hal_pn_type pn_type) - { - u32 ba_win_sz = rx_tid->ba_win_sz; - struct ath12k_reoq_buf *buf; - void *vaddr, *vaddr_aligned; - dma_addr_t paddr_aligned; - u8 tid = rx_tid->tid; - u32 hw_desc_sz; - int ret; - - buf = &ahsta->reoq_bufs[tid]; - if (!buf->vaddr) { - /* TODO: Optimize the memory allocation for qos tid based on - * the actual BA window size in REO tid update path. - */ - if (tid == HAL_DESC_REO_NON_QOS_TID) - hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid); - else - hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); - - vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); - if (!vaddr) - return -ENOMEM; - - vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); - - ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz, - ssn, pn_type); - - paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz, - DMA_BIDIRECTIONAL); - ret = dma_mapping_error(ab->dev, paddr_aligned); - if (ret) { - kfree(vaddr); - return ret; - } - - buf->vaddr = vaddr; - buf->paddr_aligned = paddr_aligned; - buf->size = hw_desc_sz; + spin_lock_bh(&dp->dp_lock); } - - rx_tid->qbuf = *buf; - rx_tid->active = true; - - return 0; } +static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp, - struct ath12k_peer *peer, ++ struct ath12k_dp_link_peer *peer, + struct ath12k_dp_rx_tid *rx_tid) +{ + struct dp_reo_update_rx_queue_elem *elem; + - lockdep_assert_held(&dp->ab->base_lock); ++ lockdep_assert_held(&dp->dp_lock); + + elem = kzalloc(sizeof(*elem), GFP_ATOMIC); + if (!elem) + return -ENOMEM; + + elem->peer_id = peer->peer_id; + elem->is_ml_peer = peer->mlo; + elem->ml_peer_id = peer->ml_id; + - ath12k_dp_init_rx_tid_rxq(&elem->rx_tid, rx_tid); ++ ath12k_dp_init_rx_tid_rxq(&elem->rx_tid, rx_tid, ++ (peer->rx_tid_active_bitmask & (1 << rx_tid->tid))); + + spin_lock_bh(&dp->reo_rxq_flush_lock); + list_add_tail(&elem->list, &dp->reo_cmd_update_rx_queue_list); + spin_unlock_bh(&dp->reo_rxq_flush_lock); + + return 0; +} + int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id, u8 tid, u32 ba_win_sz, u16 ssn, enum hal_pn_type pn_type) @@@ -1188,19 -550,8 +661,21 @@@ return ret; } + peer->rx_tid_active_bitmask |= (1 << tid); + + /* Pre-allocate the update_rxq_list for the corresponding tid + * This will be used during the tid delete. The reason we are not + * allocating during tid delete is that, if any alloc fail in update_rxq_list + * we may not be able to delete the tid vaddr/paddr and may lead to leak + */ + ret = ath12k_dp_prepare_reo_update_elem(dp, peer, rx_tid); + if (ret) { + ath12k_warn(ab, "failed to alloc update_rxq_list for rx tid %u\n", tid); + ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf); - spin_unlock_bh(&ab->base_lock); ++ spin_unlock_bh(&dp->dp_lock); + return ret; + } + paddr_aligned = rx_tid->qbuf.paddr_aligned; if (ab->hw_params->reoq_lut_support) { /* Update the REO queue LUT at the corresponding peer id @@@ -1307,10 -665,10 +789,11 @@@ int ath12k_dp_rx_peer_pn_replay_config( { struct ath12k *ar = arvif->ar; struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); struct ath12k_hal_reo_cmd cmd = {}; - struct ath12k_peer *peer; + struct ath12k_dp_link_peer *peer; struct ath12k_dp_rx_tid *rx_tid; + struct ath12k_dp_rx_tid_rxq rx_tid_rxq; u8 tid; int ret = 0; @@@ -1354,16 -691,16 +816,17 @@@ } for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { - rx_tid = &peer->rx_tid[tid]; - if (!rx_tid->active) + if (!(peer->rx_tid_active_bitmask & (1 << tid))) continue; - ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid); - cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned); - cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned); - ret = ath12k_dp_reo_cmd_send(ab, &rx_tid_rxq, - HAL_REO_CMD_UPDATE_RX_QUEUE, - &cmd, NULL); + rx_tid = &peer->dp_peer->rx_tid[tid]; - ++ ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid, ++ (peer->rx_tid_active_bitmask & (1 << tid))); + ath12k_dp_arch_setup_pn_check_reo_cmd(dp, &cmd, rx_tid, key->cipher, + key_cmd); - ret = ath12k_dp_arch_reo_cmd_send(dp, rx_tid, ++ ret = ath12k_dp_arch_reo_cmd_send(dp, &rx_tid_rxq, + HAL_REO_CMD_UPDATE_RX_QUEUE, + &cmd, NULL); if (ret) { ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n", tid, peer_addr, ret); @@@ -2649,13 -1161,9 +1289,15 @@@ void ath12k_dp_rx_h_ppdu(struct ath12k_ rx_status->band = NL80211_BAND_2GHZ; } else if (channel_num >= 36 && channel_num <= 173) { rx_status->band = NL80211_BAND_5GHZ; - } else { + } + + if (unlikely(rx_status->band == NUM_NL80211_BANDS || - !ath12k_ar_to_hw(ar)->wiphy->bands[rx_status->band])) { ++ !ath12k_pdev_dp_to_hw(dp_pdev)->wiphy->bands[rx_status->band])) { + struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev); + + ath12k_warn(ar->ab, "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n", + rx_status->band, channel_num, center_freq, ar->pdev_idx); + spin_lock_bh(&ar->data_lock); channel = ar->rx_channel; if (channel) { @@@ -2675,18 -1178,18 +1317,19 @@@ rx_status->freq = ieee80211_channel_to_frequency(channel_num, rx_status->band); +h_rate: - ath12k_dp_rx_h_rate(ar, rx_info); + ath12k_dp_rx_h_rate(dp_pdev, rx_info); } + EXPORT_SYMBOL(ath12k_dp_rx_h_ppdu); - static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi, - struct sk_buff *msdu, - struct ath12k_dp_rx_info *rx_info) + void ath12k_dp_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev, struct napi_struct *napi, + struct sk_buff *msdu, + struct hal_rx_desc_data *rx_info) { - struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = dp_pdev->dp; struct ieee80211_rx_status *rx_status; struct ieee80211_sta *pubsta; - struct ath12k_peer *peer; + struct ath12k_dp_peer *peer; struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); struct ieee80211_rx_status *status = rx_info->rx_status; u8 decap = rx_info->decap_type; diff --cc drivers/net/wireless/ath/ath12k/dp_rx.h index 69d0a36a91d88,88651553120d3..1ec5382f59955 --- a/drivers/net/wireless/ath/ath12k/dp_rx.h +++ b/drivers/net/wireless/ath/ath12k/dp_rx.h @@@ -28,18 -33,12 +33,18 @@@ struct ath12k_dp_rx_tid /* Timer info related to fragments */ struct timer_list frag_timer; - struct ath12k_base *ab; + struct ath12k_dp *dp; }; +struct ath12k_dp_rx_tid_rxq { + u8 tid; + bool active; + struct ath12k_reoq_buf qbuf; +}; + struct ath12k_dp_rx_reo_cache_flush_elem { struct list_head list; - struct ath12k_dp_rx_tid data; + struct ath12k_dp_rx_tid_rxq data; unsigned long ts; }; @@@ -167,25 -235,15 +249,20 @@@ u8 ath12k_dp_rx_h_decap_type(struct ath struct hal_rx_desc *desc); u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, struct hal_rx_desc *desc); - void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info); - int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab); - int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab); - - int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, - int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, - const void *ptr, void *data), - void *data); - void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc, - struct ath12k_dp_rx_info *rx_info); - - int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype); + int ath12k_dp_rx_crypto_mic_len(struct ath12k_dp *dp, enum hal_encrypt_type enctype); u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab, struct hal_rx_desc *rx_desc); - bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, - struct hal_rx_desc *rx_desc); - int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab, - struct ath12k_buffer_addr *buf_addr_info, - enum hal_wbm_rel_bm_act action); - bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, - struct hal_rx_desc *rx_desc); + void ath12k_dp_rx_h_ppdu(struct ath12k_pdev_dp *dp_pdev, + struct hal_rx_desc_data *rx_info); + struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, + struct sk_buff *first); + void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status); + void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status); ++void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp); ++void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq, ++ struct ath12k_dp_rx_tid *rx_tid, ++ bool active); ++void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid); #endif /* ATH12K_DP_RX_H */ diff --cc drivers/net/wireless/ath/ath12k/hal.c index 6406fcf5d69fd,91d697ad1799d..c7a152490fa09 --- a/drivers/net/wireless/ath/ath12k/hal.c +++ b/drivers/net/wireless/ath/ath12k/hal.c @@@ -1,1569 -1,152 +1,158 @@@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include - #include "hal_tx.h" - #include "hal_rx.h" #include "debug.h" - #include "hal_desc.h" #include "hif.h" - static const struct hal_srng_config hw_srng_config_template[] = { - /* TODO: max_rings can populated by querying HW capabilities */ - [HAL_REO_DST] = { - .start_ring_id = HAL_SRNG_RING_ID_REO2SW1, - .max_rings = 8, - .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_DST, - .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE, - }, - [HAL_REO_EXCEPTION] = { - /* Designating REO2SW0 ring as exception ring. - * Any of theREO2SW rings can be used as exception ring. - */ - .start_ring_id = HAL_SRNG_RING_ID_REO2SW0, - .max_rings = 1, - .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_DST, - .max_size = HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE, - }, - [HAL_REO_REINJECT] = { - .start_ring_id = HAL_SRNG_RING_ID_SW2REO, - .max_rings = 4, - .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE, - }, - [HAL_REO_CMD] = { - .start_ring_id = HAL_SRNG_RING_ID_REO_CMD, - .max_rings = 1, - .entry_size = (sizeof(struct hal_tlv_64_hdr) + - sizeof(struct hal_reo_get_queue_stats)) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE, - }, - [HAL_REO_STATUS] = { - .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS, - .max_rings = 1, - .entry_size = (sizeof(struct hal_tlv_64_hdr) + - sizeof(struct hal_reo_get_queue_stats_status)) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_DST, - .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE, - }, - [HAL_TCL_DATA] = { - .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1, - .max_rings = 6, - .entry_size = sizeof(struct hal_tcl_data_cmd) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE, - }, - [HAL_TCL_CMD] = { - .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD, - .max_rings = 1, - .entry_size = sizeof(struct hal_tcl_gse_cmd) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE, - }, - [HAL_TCL_STATUS] = { - .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS, - .max_rings = 1, - .entry_size = (sizeof(struct hal_tlv_hdr) + - sizeof(struct hal_tcl_status_ring)) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_DST, - .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE, - }, - [HAL_CE_SRC] = { - .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC, - .max_rings = 16, - .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE, - }, - [HAL_CE_DST] = { - .start_ring_id = HAL_SRNG_RING_ID_CE0_DST, - .max_rings = 16, - .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE, - }, - [HAL_CE_DST_STATUS] = { - .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS, - .max_rings = 16, - .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_DST, - .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE, - }, - [HAL_WBM_IDLE_LINK] = { - .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK, - .max_rings = 1, - .entry_size = sizeof(struct hal_wbm_link_desc) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE, - }, - [HAL_SW2WBM_RELEASE] = { - .start_ring_id = HAL_SRNG_RING_ID_WBM_SW0_RELEASE, - .max_rings = 2, - .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE, - }, - [HAL_WBM2SW_RELEASE] = { - .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE, - .max_rings = 8, - .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, - .mac_type = ATH12K_HAL_SRNG_UMAC, - .ring_dir = HAL_SRNG_DIR_DST, - .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE, - }, - [HAL_RXDMA_BUF] = { - .start_ring_id = HAL_SRNG_SW2RXDMA_BUF0, - .max_rings = 1, - .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, - .mac_type = ATH12K_HAL_SRNG_DMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, - }, - [HAL_RXDMA_DST] = { - .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0, - .max_rings = 0, - .entry_size = 0, - .mac_type = ATH12K_HAL_SRNG_PMAC, - .ring_dir = HAL_SRNG_DIR_DST, - .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, - }, - [HAL_RXDMA_MONITOR_BUF] = { - .start_ring_id = HAL_SRNG_SW2RXMON_BUF0, - .max_rings = 1, - .entry_size = sizeof(struct hal_mon_buf_ring) >> 2, - .mac_type = ATH12K_HAL_SRNG_PMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, - }, - [HAL_RXDMA_MONITOR_STATUS] = { - .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF, - .max_rings = 1, - .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, - .mac_type = ATH12K_HAL_SRNG_PMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, - }, - [HAL_RXDMA_MONITOR_DESC] = { 0, }, - [HAL_RXDMA_DIR_BUF] = { - .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF, - .max_rings = 2, - .entry_size = 8 >> 2, /* TODO: Define the struct */ - .mac_type = ATH12K_HAL_SRNG_PMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, - }, - [HAL_PPE2TCL] = { - .start_ring_id = HAL_SRNG_RING_ID_PPE2TCL1, - .max_rings = 1, - .entry_size = sizeof(struct hal_tcl_entrance_from_ppe_ring) >> 2, - .mac_type = ATH12K_HAL_SRNG_PMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE, - }, - [HAL_PPE_RELEASE] = { - .start_ring_id = HAL_SRNG_RING_ID_WBM_PPE_RELEASE, - .max_rings = 1, - .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, - .mac_type = ATH12K_HAL_SRNG_PMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE, - }, - [HAL_TX_MONITOR_BUF] = { - .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0, - .max_rings = 1, - .entry_size = sizeof(struct hal_mon_buf_ring) >> 2, - .mac_type = ATH12K_HAL_SRNG_PMAC, - .ring_dir = HAL_SRNG_DIR_SRC, - .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, - }, - [HAL_RXDMA_MONITOR_DST] = { - .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0, - .max_rings = 1, - .entry_size = sizeof(struct hal_mon_dest_desc) >> 2, - .mac_type = ATH12K_HAL_SRNG_PMAC, - .ring_dir = HAL_SRNG_DIR_DST, - .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, - }, - [HAL_TX_MONITOR_DST] = { - .start_ring_id = HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0, - .max_rings = 1, - .entry_size = sizeof(struct hal_mon_dest_desc) >> 2, - .mac_type = ATH12K_HAL_SRNG_PMAC, - .ring_dir = HAL_SRNG_DIR_DST, - .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, - } - }; - - static const struct ath12k_hal_tcl_to_wbm_rbm_map - ath12k_hal_qcn9274_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = { - { - .wbm_ring_num = 0, - .rbm_id = HAL_RX_BUF_RBM_SW0_BM, - }, - { - .wbm_ring_num = 1, - .rbm_id = HAL_RX_BUF_RBM_SW1_BM, - }, - { - .wbm_ring_num = 2, - .rbm_id = HAL_RX_BUF_RBM_SW2_BM, - }, - { - .wbm_ring_num = 4, - .rbm_id = HAL_RX_BUF_RBM_SW4_BM, - } - }; - - static const struct ath12k_hal_tcl_to_wbm_rbm_map - ath12k_hal_wcn7850_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = { - { - .wbm_ring_num = 0, - .rbm_id = HAL_RX_BUF_RBM_SW0_BM, - }, - { - .wbm_ring_num = 2, - .rbm_id = HAL_RX_BUF_RBM_SW2_BM, - }, - { - .wbm_ring_num = 4, - .rbm_id = HAL_RX_BUF_RBM_SW4_BM, - }, - }; - - static unsigned int ath12k_hal_reo1_ring_id_offset(struct ath12k_base *ab) - { - return HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab); - } - - static unsigned int ath12k_hal_reo1_ring_msi1_base_lsb_offset(struct ath12k_base *ab) - { - return HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab); - } - - static unsigned int ath12k_hal_reo1_ring_msi1_base_msb_offset(struct ath12k_base *ab) - { - return HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab); - } - - static unsigned int ath12k_hal_reo1_ring_msi1_data_offset(struct ath12k_base *ab) - { - return HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab); - } - - static unsigned int ath12k_hal_reo1_ring_base_msb_offset(struct ath12k_base *ab) - { - return HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab); - } - - static unsigned int ath12k_hal_reo1_ring_producer_int_setup_offset(struct ath12k_base *ab) - { - return HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab); - } - - static unsigned int ath12k_hal_reo1_ring_hp_addr_lsb_offset(struct ath12k_base *ab) - { - return HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab); - } - - static unsigned int ath12k_hal_reo1_ring_hp_addr_msb_offset(struct ath12k_base *ab) - { - return HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab); - } - - static unsigned int ath12k_hal_reo1_ring_misc_offset(struct ath12k_base *ab) - { - return HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab); - } - - static bool ath12k_hw_qcn9274_rx_desc_get_first_msdu(struct hal_rx_desc *desc) - { - return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5, - RX_MSDU_END_INFO5_FIRST_MSDU); - } - - static bool ath12k_hw_qcn9274_rx_desc_get_last_msdu(struct hal_rx_desc *desc) - { - return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5, - RX_MSDU_END_INFO5_LAST_MSDU); - } - - static u8 ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc) - { - return le16_get_bits(desc->u.qcn9274.msdu_end.info5, - RX_MSDU_END_INFO5_L3_HDR_PADDING); - } - - static bool ath12k_hw_qcn9274_rx_desc_encrypt_valid(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4, - RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID); - } - - static u32 ath12k_hw_qcn9274_rx_desc_get_encrypt_type(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274.mpdu_start.info2, - RX_MPDU_START_INFO2_ENC_TYPE); - } - - static u8 ath12k_hw_qcn9274_rx_desc_get_decap_type(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274.msdu_end.info11, - RX_MSDU_END_INFO11_DECAP_FORMAT); - } - - static u8 ath12k_hw_qcn9274_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274.msdu_end.info11, - RX_MSDU_END_INFO11_MESH_CTRL_PRESENT); - } - - static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4, - RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID); - } - - static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4, - RX_MPDU_START_INFO4_MPDU_FCTRL_VALID); - } - - static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274.mpdu_start.info4, - RX_MPDU_START_INFO4_MPDU_SEQ_NUM); - } - - static u16 ath12k_hw_qcn9274_rx_desc_get_msdu_len(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274.msdu_end.info10, - RX_MSDU_END_INFO10_MSDU_LENGTH); - } - - static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274.msdu_end.info12, - RX_MSDU_END_INFO12_SGI); - } - - static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274.msdu_end.info12, - RX_MSDU_END_INFO12_RATE_MCS); - } - - static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274.msdu_end.info12, - RX_MSDU_END_INFO12_RECV_BW); - } - - static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_freq(struct hal_rx_desc *desc) - { - return __le32_to_cpu(desc->u.qcn9274.msdu_end.phy_meta_data); - } - - static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274.msdu_end.info12, - RX_MSDU_END_INFO12_PKT_TYPE); - } - - static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274.msdu_end.info12, - RX_MSDU_END_INFO12_MIMO_SS_BITMAP); - } - - static u8 ath12k_hw_qcn9274_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) - { - return le16_get_bits(desc->u.qcn9274.msdu_end.info5, - RX_MSDU_END_INFO5_TID); - } - - static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) - { - return __le16_to_cpu(desc->u.qcn9274.mpdu_start.sw_peer_id); - } - - static void ath12k_hw_qcn9274_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc, - struct hal_rx_desc *ldesc) - { - memcpy(&fdesc->u.qcn9274.msdu_end, &ldesc->u.qcn9274.msdu_end, - sizeof(struct rx_msdu_end_qcn9274)); - } - - static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc) - { - return __le16_to_cpu(desc->u.qcn9274.mpdu_start.phy_ppdu_id); - } - - static void ath12k_hw_qcn9274_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len) - { - u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info10); - - info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH; - info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH); - - desc->u.qcn9274.msdu_end.info10 = __cpu_to_le32(info); - } - - static u8 *ath12k_hw_qcn9274_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) - { - return &desc->u.qcn9274.msdu_payload[0]; - } - - static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset(void) - { - return offsetof(struct hal_rx_desc_qcn9274, mpdu_start); - } - - static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset(void) - { - return offsetof(struct hal_rx_desc_qcn9274, msdu_end); - } - - static bool ath12k_hw_qcn9274_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc) - { - return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) & - RX_MPDU_START_INFO4_MAC_ADDR2_VALID; - } - - static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) - { - return desc->u.qcn9274.mpdu_start.addr2; - } - - static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc) - { - return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) & - RX_MSDU_END_INFO5_DA_IS_MCBC; - } - - static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc, - struct ieee80211_hdr *hdr) - { - hdr->frame_control = desc->u.qcn9274.mpdu_start.frame_ctrl; - hdr->duration_id = desc->u.qcn9274.mpdu_start.duration; - ether_addr_copy(hdr->addr1, desc->u.qcn9274.mpdu_start.addr1); - ether_addr_copy(hdr->addr2, desc->u.qcn9274.mpdu_start.addr2); - ether_addr_copy(hdr->addr3, desc->u.qcn9274.mpdu_start.addr3); - if (__le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) & - RX_MPDU_START_INFO4_MAC_ADDR4_VALID) { - ether_addr_copy(hdr->addr4, desc->u.qcn9274.mpdu_start.addr4); - } - hdr->seq_ctrl = desc->u.qcn9274.mpdu_start.seq_ctrl; - } - - static void ath12k_hw_qcn9274_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc, - u8 *crypto_hdr, - enum hal_encrypt_type enctype) - { - unsigned int key_id; - - switch (enctype) { - case HAL_ENCRYPT_TYPE_OPEN: - return; - case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: - case HAL_ENCRYPT_TYPE_TKIP_MIC: - crypto_hdr[0] = - HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]); - crypto_hdr[1] = 0; - crypto_hdr[2] = - HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]); - break; - case HAL_ENCRYPT_TYPE_CCMP_128: - case HAL_ENCRYPT_TYPE_CCMP_256: - case HAL_ENCRYPT_TYPE_GCMP_128: - case HAL_ENCRYPT_TYPE_AES_GCMP_256: - crypto_hdr[0] = - HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]); - crypto_hdr[1] = - HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]); - crypto_hdr[2] = 0; - break; - case HAL_ENCRYPT_TYPE_WEP_40: - case HAL_ENCRYPT_TYPE_WEP_104: - case HAL_ENCRYPT_TYPE_WEP_128: - case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: - case HAL_ENCRYPT_TYPE_WAPI: - return; - } - key_id = le32_get_bits(desc->u.qcn9274.mpdu_start.info5, - RX_MPDU_START_INFO5_KEY_ID); - crypto_hdr[3] = 0x20 | (key_id << 6); - crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274.mpdu_start.pn[0]); - crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274.mpdu_start.pn[0]); - crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[1]); - crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]); - } - - static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab) - { - struct ath12k_hal *hal = &ab->hal; - struct hal_srng_config *s; - - hal->srng_config = kmemdup(hw_srng_config_template, - sizeof(hw_srng_config_template), - GFP_KERNEL); - if (!hal->srng_config) - return -ENOMEM; - - s = &hal->srng_config[HAL_REO_DST]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP; - s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab); - s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP; - - s = &hal->srng_config[HAL_REO_EXCEPTION]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP; - - s = &hal->srng_config[HAL_REO_REINJECT]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP; - s->reg_size[0] = HAL_SW2REO1_RING_BASE_LSB(ab) - HAL_SW2REO_RING_BASE_LSB(ab); - s->reg_size[1] = HAL_SW2REO1_RING_HP - HAL_SW2REO_RING_HP; - - s = &hal->srng_config[HAL_REO_CMD]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP; - - s = &hal->srng_config[HAL_REO_STATUS]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP; - - s = &hal->srng_config[HAL_TCL_DATA]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP; - s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab); - s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP; - - s = &hal->srng_config[HAL_TCL_CMD]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP; - - s = &hal->srng_config[HAL_TCL_STATUS]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; - - s = &hal->srng_config[HAL_CE_SRC]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); - - s = &hal->srng_config[HAL_CE_DST]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); - - s = &hal->srng_config[HAL_CE_DST_STATUS]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + - HAL_CE_DST_STATUS_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); - - s = &hal->srng_config[HAL_WBM_IDLE_LINK]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP; - - s = &hal->srng_config[HAL_SW2WBM_RELEASE]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + - HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP; - s->reg_size[0] = HAL_WBM_SW1_RELEASE_RING_BASE_LSB(ab) - - HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab); - s->reg_size[1] = HAL_WBM_SW1_RELEASE_RING_HP - HAL_WBM_SW_RELEASE_RING_HP; - - s = &hal->srng_config[HAL_WBM2SW_RELEASE]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP; - s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) - - HAL_WBM0_RELEASE_RING_BASE_LSB(ab); - s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; - - /* Some LMAC rings are not accessed from the host: - * RXDMA_BUG, RXDMA_DST, RXDMA_MONITOR_BUF, RXDMA_MONITOR_STATUS, - * RXDMA_MONITOR_DST, RXDMA_MONITOR_DESC, RXDMA_DIR_BUF_SRC, - * RXDMA_RX_MONITOR_BUF, TX_MONITOR_BUF, TX_MONITOR_DST, SW2RXDMA - */ - s = &hal->srng_config[HAL_PPE2TCL]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_HP; - - s = &hal->srng_config[HAL_PPE_RELEASE]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + - HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_PPE_RELEASE_RING_HP; - - return 0; - } - - static u16 ath12k_hal_qcn9274_rx_mpdu_start_wmask_get(void) - { - return QCN9274_MPDU_START_WMASK; - } - - static u32 ath12k_hal_qcn9274_rx_msdu_end_wmask_get(void) - { - return QCN9274_MSDU_END_WMASK; - } - - static const struct hal_rx_ops *ath12k_hal_qcn9274_get_hal_rx_compact_ops(void) - { - return &hal_rx_qcn9274_compact_ops; - } - - static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14, - RX_MSDU_END_INFO14_MSDU_DONE); - } - - static bool ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13, - RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL); - } - - static bool ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13, - RX_MSDU_END_INFO13_IP_CKSUM_FAIL); - } - - static bool ath12k_hw_qcn9274_dp_rx_h_is_decrypted(struct hal_rx_desc *desc) - { - return (le32_get_bits(desc->u.qcn9274.msdu_end.info14, - RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) == - RX_DESC_DECRYPT_STATUS_CODE_OK); - } - - static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc) - { - u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info13); - u32 errmap = 0; - - if (info & RX_MSDU_END_INFO13_FCS_ERR) - errmap |= HAL_RX_MPDU_ERR_FCS; - - if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) - errmap |= HAL_RX_MPDU_ERR_DECRYPT; - - if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) - errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; - - if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) - errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; - - if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) - errmap |= HAL_RX_MPDU_ERR_OVERFLOW; - - if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) - errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; - - if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) - errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; - - return errmap; - } - - static u32 ath12k_hw_qcn9274_get_rx_desc_size(void) - { - return sizeof(struct hal_rx_desc_qcn9274); - } - - static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc) - { - return 0; - } - - const struct hal_rx_ops hal_rx_qcn9274_ops = { - .rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu, - .rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu, - .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes, - .rx_desc_encrypt_valid = ath12k_hw_qcn9274_rx_desc_encrypt_valid, - .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_rx_desc_get_encrypt_type, - .rx_desc_get_decap_type = ath12k_hw_qcn9274_rx_desc_get_decap_type, - .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_rx_desc_get_mesh_ctl, - .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld, - .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid, - .rx_desc_get_mpdu_start_seq_no = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no, - .rx_desc_get_msdu_len = ath12k_hw_qcn9274_rx_desc_get_msdu_len, - .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_rx_desc_get_msdu_sgi, - .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs, - .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw, - .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_rx_desc_get_msdu_freq, - .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type, - .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_rx_desc_get_msdu_nss, - .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_rx_desc_get_mpdu_tid, - .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id, - .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_rx_desc_copy_end_tlv, - .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id, - .rx_desc_set_msdu_len = ath12k_hw_qcn9274_rx_desc_set_msdu_len, - .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_rx_desc_get_msdu_payload, - .rx_desc_get_mpdu_start_offset = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset, - .rx_desc_get_msdu_end_offset = ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset, - .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_rx_desc_mac_addr2_valid, - .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2, - .rx_desc_is_da_mcbc = ath12k_hw_qcn9274_rx_desc_is_da_mcbc, - .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr, - .rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr, - .dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done, - .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail, - .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail, - .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted, - .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err, - .rx_desc_get_desc_size = ath12k_hw_qcn9274_get_rx_desc_size, - .rx_desc_get_msdu_src_link_id = ath12k_hw_qcn9274_rx_desc_get_msdu_src_link, - }; - - static bool ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu(struct hal_rx_desc *desc) - { - return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, - RX_MSDU_END_INFO5_FIRST_MSDU); - } - - static bool ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu(struct hal_rx_desc *desc) - { - return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, - RX_MSDU_END_INFO5_LAST_MSDU); - } - - static u8 ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc) - { - return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, - RX_MSDU_END_INFO5_L3_HDR_PADDING); - } - - static bool ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, - RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID); - } - - static u32 ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info2, - RX_MPDU_START_INFO2_ENC_TYPE); - } - - static u8 ath12k_hw_qcn9274_compact_rx_desc_get_decap_type(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11, - RX_MSDU_END_INFO11_DECAP_FORMAT); - } - - static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274.msdu_end.info11, - RX_MSDU_END_INFO11_MESH_CTRL_PRESENT); - } - - static bool - ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, - RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID); - } - - static bool ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, - RX_MPDU_START_INFO4_MPDU_FCTRL_VALID); - } - - static u16 - ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, - RX_MPDU_START_INFO4_MPDU_SEQ_NUM); - } - - static u16 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info10, - RX_MSDU_END_INFO10_MSDU_LENGTH); - } - - static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, - RX_MSDU_END_INFO12_SGI); - } - - static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, - RX_MSDU_END_INFO12_RATE_MCS); - } - - static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, - RX_MSDU_END_INFO12_RECV_BW); - } - - static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq(struct hal_rx_desc *desc) - { - return __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.phy_meta_data); - } - - static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, - RX_MSDU_END_INFO12_PKT_TYPE); - } - - static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, - RX_MSDU_END_INFO12_MIMO_SS_BITMAP); - } - - static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) - { - return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, - RX_MSDU_END_INFO5_TID); - } - - static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) - { - return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.sw_peer_id); - } - - static void ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc, - struct hal_rx_desc *ldesc) - { - fdesc->u.qcn9274_compact.msdu_end = ldesc->u.qcn9274_compact.msdu_end; - } - - static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc) - { - return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.phy_ppdu_id); - } - - static void - ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len) - { - u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info10); - - info = u32_replace_bits(info, len, RX_MSDU_END_INFO10_MSDU_LENGTH); - desc->u.qcn9274_compact.msdu_end.info10 = __cpu_to_le32(info); - } - - static u8 *ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) - { - return &desc->u.qcn9274_compact.msdu_payload[0]; - } - - static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset(void) - { - return offsetof(struct hal_rx_desc_qcn9274_compact, mpdu_start); - } - - static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset(void) - { - return offsetof(struct hal_rx_desc_qcn9274_compact, msdu_end); - } - - static bool ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc) - { - return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) & - RX_MPDU_START_INFO4_MAC_ADDR2_VALID; - } - - static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) - { - return desc->u.qcn9274_compact.mpdu_start.addr2; - } - - static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc) - { - return __le16_to_cpu(desc->u.qcn9274_compact.msdu_end.info5) & - RX_MSDU_END_INFO5_DA_IS_MCBC; - } - - static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc, - struct ieee80211_hdr *hdr) - { - hdr->frame_control = desc->u.qcn9274_compact.mpdu_start.frame_ctrl; - hdr->duration_id = desc->u.qcn9274_compact.mpdu_start.duration; - ether_addr_copy(hdr->addr1, desc->u.qcn9274_compact.mpdu_start.addr1); - ether_addr_copy(hdr->addr2, desc->u.qcn9274_compact.mpdu_start.addr2); - ether_addr_copy(hdr->addr3, desc->u.qcn9274_compact.mpdu_start.addr3); - if (__le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) & - RX_MPDU_START_INFO4_MAC_ADDR4_VALID) { - ether_addr_copy(hdr->addr4, desc->u.qcn9274_compact.mpdu_start.addr4); - } - hdr->seq_ctrl = desc->u.qcn9274_compact.mpdu_start.seq_ctrl; - } - - static void - ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc, - u8 *crypto_hdr, - enum hal_encrypt_type enctype) - { - unsigned int key_id; - - switch (enctype) { - case HAL_ENCRYPT_TYPE_OPEN: - return; - case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: - case HAL_ENCRYPT_TYPE_TKIP_MIC: - crypto_hdr[0] = - HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]); - crypto_hdr[1] = 0; - crypto_hdr[2] = - HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]); - break; - case HAL_ENCRYPT_TYPE_CCMP_128: - case HAL_ENCRYPT_TYPE_CCMP_256: - case HAL_ENCRYPT_TYPE_GCMP_128: - case HAL_ENCRYPT_TYPE_AES_GCMP_256: - crypto_hdr[0] = - HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]); - crypto_hdr[1] = - HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]); - crypto_hdr[2] = 0; - break; - case HAL_ENCRYPT_TYPE_WEP_40: - case HAL_ENCRYPT_TYPE_WEP_104: - case HAL_ENCRYPT_TYPE_WEP_128: - case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: - case HAL_ENCRYPT_TYPE_WAPI: - return; - } - key_id = le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info5, - RX_MPDU_START_INFO5_KEY_ID); - crypto_hdr[3] = 0x20 | (key_id << 6); - crypto_hdr[4] = - HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274_compact.mpdu_start.pn[0]); - crypto_hdr[5] = - HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274_compact.mpdu_start.pn[0]); - crypto_hdr[6] = - HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[1]); - crypto_hdr[7] = - HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]); - } - - static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14, - RX_MSDU_END_INFO14_MSDU_DONE); - } - - static bool ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13, - RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL); - } - - static bool ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13, - RX_MSDU_END_INFO13_IP_CKSUM_FAIL); - } - - static bool ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted(struct hal_rx_desc *desc) - { - return (le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14, - RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) == - RX_DESC_DECRYPT_STATUS_CODE_OK); - } - - static u32 ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err(struct hal_rx_desc *desc) - { - u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info13); - u32 errmap = 0; - - if (info & RX_MSDU_END_INFO13_FCS_ERR) - errmap |= HAL_RX_MPDU_ERR_FCS; - - if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) - errmap |= HAL_RX_MPDU_ERR_DECRYPT; - - if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) - errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; - - if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) - errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; - - if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) - errmap |= HAL_RX_MPDU_ERR_OVERFLOW; - - if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) - errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; - - if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) - errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; - - return errmap; - } - - static u32 ath12k_hw_qcn9274_compact_get_rx_desc_size(void) - { - return sizeof(struct hal_rx_desc_qcn9274_compact); - } - - static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc) - { - return le64_get_bits(desc->u.qcn9274_compact.msdu_end.msdu_end_tag, - RX_MSDU_END_64_TLV_SRC_LINK_ID); - } - - const struct hal_rx_ops hal_rx_qcn9274_compact_ops = { - .rx_desc_get_first_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu, - .rx_desc_get_last_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu, - .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes, - .rx_desc_encrypt_valid = ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid, - .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type, - .rx_desc_get_decap_type = ath12k_hw_qcn9274_compact_rx_desc_get_decap_type, - .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl, - .rx_desc_get_mpdu_seq_ctl_vld = - ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld, - .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid, - .rx_desc_get_mpdu_start_seq_no = - ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no, - .rx_desc_get_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len, - .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi, - .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs, - .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw, - .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq, - .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type, - .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss, - .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid, - .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id, - .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv, - .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id, - .rx_desc_set_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len, - .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload, - .rx_desc_get_mpdu_start_offset = - ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset, - .rx_desc_get_msdu_end_offset = - ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset, - .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid, - .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2, - .rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc, - .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr, - .rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr, - .dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done, - .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail, - .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail, - .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted, - .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err, - .rx_desc_get_desc_size = ath12k_hw_qcn9274_compact_get_rx_desc_size, - .rx_desc_get_msdu_src_link_id = - ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link, - }; - - const struct hal_ops hal_qcn9274_ops = { - .create_srng_config = ath12k_hal_srng_create_config_qcn9274, - .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map, - .rxdma_ring_wmask_rx_mpdu_start = ath12k_hal_qcn9274_rx_mpdu_start_wmask_get, - .rxdma_ring_wmask_rx_msdu_end = ath12k_hal_qcn9274_rx_msdu_end_wmask_get, - .get_hal_rx_compact_ops = ath12k_hal_qcn9274_get_hal_rx_compact_ops, - }; - - static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc) - { - return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5, - RX_MSDU_END_INFO5_FIRST_MSDU); - } - - static bool ath12k_hw_wcn7850_rx_desc_get_last_msdu(struct hal_rx_desc *desc) - { - return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5, - RX_MSDU_END_INFO5_LAST_MSDU); - } - - static u8 ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc) - { - return le16_get_bits(desc->u.wcn7850.msdu_end.info5, - RX_MSDU_END_INFO5_L3_HDR_PADDING); - } - - static bool ath12k_hw_wcn7850_rx_desc_encrypt_valid(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4, - RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID); - } - - static u32 ath12k_hw_wcn7850_rx_desc_get_encrypt_type(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.wcn7850.mpdu_start.info2, - RX_MPDU_START_INFO2_ENC_TYPE); - } - - static u8 ath12k_hw_wcn7850_rx_desc_get_decap_type(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.wcn7850.msdu_end.info11, - RX_MSDU_END_INFO11_DECAP_FORMAT); - } - - static u8 ath12k_hw_wcn7850_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.wcn7850.msdu_end.info11, - RX_MSDU_END_INFO11_MESH_CTRL_PRESENT); - } - - static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4, - RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID); - } - - static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc) - { - return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4, - RX_MPDU_START_INFO4_MPDU_FCTRL_VALID); - } - - static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.wcn7850.mpdu_start.info4, - RX_MPDU_START_INFO4_MPDU_SEQ_NUM); - } - - static u16 ath12k_hw_wcn7850_rx_desc_get_msdu_len(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.wcn7850.msdu_end.info10, - RX_MSDU_END_INFO10_MSDU_LENGTH); - } - - static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.wcn7850.msdu_end.info12, - RX_MSDU_END_INFO12_SGI); - } - - static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.wcn7850.msdu_end.info12, - RX_MSDU_END_INFO12_RATE_MCS); - } - - static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.wcn7850.msdu_end.info12, - RX_MSDU_END_INFO12_RECV_BW); - } - - static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_freq(struct hal_rx_desc *desc) - { - return __le32_to_cpu(desc->u.wcn7850.msdu_end.phy_meta_data); - } - - static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) - { - return le32_get_bits(desc->u.wcn7850.msdu_end.info12, - RX_MSDU_END_INFO12_PKT_TYPE); - } - - static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) + static void ath12k_hal_ce_dst_setup(struct ath12k_base *ab, + struct hal_srng *srng, int ring_num) { - return le32_get_bits(desc->u.wcn7850.msdu_end.info12, - RX_MSDU_END_INFO12_MIMO_SS_BITMAP); + ab->hal.ops->ce_dst_setup(ab, srng, ring_num); } - static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) + static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab, + struct hal_srng *srng) { - return le32_get_bits(desc->u.wcn7850.mpdu_start.info2, - RX_MPDU_START_INFO2_TID); + ab->hal.ops->srng_src_hw_init(ab, srng); } - static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) + static void ath12k_hal_srng_dst_hw_init(struct ath12k_base *ab, + struct hal_srng *srng) { - return __le16_to_cpu(desc->u.wcn7850.mpdu_start.sw_peer_id); + ab->hal.ops->srng_dst_hw_init(ab, srng); } - static void ath12k_hw_wcn7850_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc, - struct hal_rx_desc *ldesc) + static void ath12k_hal_set_umac_srng_ptr_addr(struct ath12k_base *ab, + struct hal_srng *srng) { - memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end, - sizeof(struct rx_msdu_end_qcn9274)); + ab->hal.ops->set_umac_srng_ptr_addr(ab, srng); } - static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc) + static int ath12k_hal_srng_get_ring_id(struct ath12k_hal *hal, + enum hal_ring_type type, + int ring_num, int mac_id) { - return le64_get_bits(desc->u.wcn7850.mpdu_start_tag, - HAL_TLV_HDR_TAG); + return hal->ops->srng_get_ring_id(hal, type, ring_num, mac_id); } - static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc) + int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab, + enum hal_ring_type ring_type, + int ring_num) { - return __le16_to_cpu(desc->u.wcn7850.mpdu_start.phy_ppdu_id); + return ab->hal.ops->srng_update_shadow_config(ab, ring_type, + ring_num); } - static void ath12k_hw_wcn7850_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len) + u32 ath12k_hal_ce_get_desc_size(struct ath12k_hal *hal, enum hal_ce_desc type) { - u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info10); - - info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH; - info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH); - - desc->u.wcn7850.msdu_end.info10 = __cpu_to_le32(info); + return hal->ops->ce_get_desc_size(type); } - static u8 *ath12k_hw_wcn7850_rx_desc_get_msdu_payload(struct hal_rx_desc *desc) + void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id) { - return &desc->u.wcn7850.msdu_payload[0]; + ab->hal.ops->tx_set_dscp_tid_map(ab, id); } - static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset(void) + void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab, + u32 bank_config, u8 bank_id) { - return offsetof(struct hal_rx_desc_wcn7850, mpdu_start_tag); + ab->hal.ops->tx_configure_bank_register(ab, bank_config, bank_id); } - static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset(void) + void ath12k_hal_reoq_lut_addr_read_enable(struct ath12k_base *ab) { - return offsetof(struct hal_rx_desc_wcn7850, msdu_end_tag); + ab->hal.ops->reoq_lut_addr_read_enable(ab); } - static bool ath12k_hw_wcn7850_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc) + void ath12k_hal_reoq_lut_set_max_peerid(struct ath12k_base *ab) { - return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) & - RX_MPDU_START_INFO4_MAC_ADDR2_VALID; + ab->hal.ops->reoq_lut_set_max_peerid(ab); } - static u8 *ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc) + void ath12k_hal_write_ml_reoq_lut_addr(struct ath12k_base *ab, dma_addr_t paddr) { - return desc->u.wcn7850.mpdu_start.addr2; + ab->hal.ops->write_ml_reoq_lut_addr(ab, paddr); } - static bool ath12k_hw_wcn7850_rx_desc_is_da_mcbc(struct hal_rx_desc *desc) + void ath12k_hal_write_reoq_lut_addr(struct ath12k_base *ab, dma_addr_t paddr) { - return __le32_to_cpu(desc->u.wcn7850.msdu_end.info13) & - RX_MSDU_END_INFO13_MCAST_BCAST; + ab->hal.ops->write_reoq_lut_addr(ab, paddr); } - static void ath12k_hw_wcn7850_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc, - struct ieee80211_hdr *hdr) + void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab, + struct hal_wbm_idle_scatter_list *sbuf, + u32 nsbufs, u32 tot_link_desc, + u32 end_offset) { - hdr->frame_control = desc->u.wcn7850.mpdu_start.frame_ctrl; - hdr->duration_id = desc->u.wcn7850.mpdu_start.duration; - ether_addr_copy(hdr->addr1, desc->u.wcn7850.mpdu_start.addr1); - ether_addr_copy(hdr->addr2, desc->u.wcn7850.mpdu_start.addr2); - ether_addr_copy(hdr->addr3, desc->u.wcn7850.mpdu_start.addr3); - if (__le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) & - RX_MPDU_START_INFO4_MAC_ADDR4_VALID) { - ether_addr_copy(hdr->addr4, desc->u.wcn7850.mpdu_start.addr4); - } - hdr->seq_ctrl = desc->u.wcn7850.mpdu_start.seq_ctrl; + ab->hal.ops->setup_link_idle_list(ab, sbuf, nsbufs, tot_link_desc, + end_offset); } - static void ath12k_hw_wcn7850_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc, - u8 *crypto_hdr, - enum hal_encrypt_type enctype) + void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map) { - unsigned int key_id; - - switch (enctype) { - case HAL_ENCRYPT_TYPE_OPEN: - return; - case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: - case HAL_ENCRYPT_TYPE_TKIP_MIC: - crypto_hdr[0] = - HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]); - crypto_hdr[1] = 0; - crypto_hdr[2] = - HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]); - break; - case HAL_ENCRYPT_TYPE_CCMP_128: - case HAL_ENCRYPT_TYPE_CCMP_256: - case HAL_ENCRYPT_TYPE_GCMP_128: - case HAL_ENCRYPT_TYPE_AES_GCMP_256: - crypto_hdr[0] = - HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]); - crypto_hdr[1] = - HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]); - crypto_hdr[2] = 0; - break; - case HAL_ENCRYPT_TYPE_WEP_40: - case HAL_ENCRYPT_TYPE_WEP_104: - case HAL_ENCRYPT_TYPE_WEP_128: - case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: - case HAL_ENCRYPT_TYPE_WAPI: - return; - } - key_id = u32_get_bits(__le32_to_cpu(desc->u.wcn7850.mpdu_start.info5), - RX_MPDU_START_INFO5_KEY_ID); - crypto_hdr[3] = 0x20 | (key_id << 6); - crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.wcn7850.mpdu_start.pn[0]); - crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.wcn7850.mpdu_start.pn[0]); - crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[1]); - crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]); + ab->hal.ops->reo_hw_setup(ab, ring_hash_map); } - static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab) + void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab, struct hal_srng *srng) { - struct ath12k_hal *hal = &ab->hal; - struct hal_srng_config *s; - - hal->srng_config = kmemdup(hw_srng_config_template, - sizeof(hw_srng_config_template), - GFP_KERNEL); - if (!hal->srng_config) - return -ENOMEM; - - s = &hal->srng_config[HAL_REO_DST]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP; - s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab); - s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP; - - s = &hal->srng_config[HAL_REO_EXCEPTION]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP; - - s = &hal->srng_config[HAL_REO_REINJECT]; - s->max_rings = 1; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP; - - s = &hal->srng_config[HAL_REO_CMD]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP; - - s = &hal->srng_config[HAL_REO_STATUS]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP; - - s = &hal->srng_config[HAL_TCL_DATA]; - s->max_rings = 5; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP; - s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab); - s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP; - - s = &hal->srng_config[HAL_TCL_CMD]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP; - - s = &hal->srng_config[HAL_TCL_STATUS]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; - - s = &hal->srng_config[HAL_CE_SRC]; - s->max_rings = 12; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); - - s = &hal->srng_config[HAL_CE_DST]; - s->max_rings = 12; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); - - s = &hal->srng_config[HAL_CE_DST_STATUS]; - s->max_rings = 12; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + - HAL_CE_DST_STATUS_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP; - s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); - s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); - - s = &hal->srng_config[HAL_WBM_IDLE_LINK]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP; - - s = &hal->srng_config[HAL_SW2WBM_RELEASE]; - s->max_rings = 1; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + - HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP; - - s = &hal->srng_config[HAL_WBM2SW_RELEASE]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP; - s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) - - HAL_WBM0_RELEASE_RING_BASE_LSB(ab); - s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; - - s = &hal->srng_config[HAL_RXDMA_BUF]; - s->max_rings = 2; - s->mac_type = ATH12K_HAL_SRNG_PMAC; - - s = &hal->srng_config[HAL_RXDMA_DST]; - s->max_rings = 1; - s->entry_size = sizeof(struct hal_reo_entrance_ring) >> 2; - - /* below rings are not used */ - s = &hal->srng_config[HAL_RXDMA_DIR_BUF]; - s->max_rings = 0; - - s = &hal->srng_config[HAL_PPE2TCL]; - s->max_rings = 0; - - s = &hal->srng_config[HAL_PPE_RELEASE]; - s->max_rings = 0; - - s = &hal->srng_config[HAL_TX_MONITOR_BUF]; - s->max_rings = 0; - - s = &hal->srng_config[HAL_TX_MONITOR_DST]; - s->max_rings = 0; - - s = &hal->srng_config[HAL_PPE2TCL]; - s->max_rings = 0; - - return 0; + ab->hal.ops->reo_init_cmd_ring(ab, srng); } - static bool ath12k_hw_wcn7850_dp_rx_h_msdu_done(struct hal_rx_desc *desc) ++void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab) +{ - return !!le32_get_bits(desc->u.wcn7850.msdu_end.info14, - RX_MSDU_END_INFO14_MSDU_DONE); ++ ab->hal.ops->reo_shared_qaddr_cache_clear(ab); +} ++EXPORT_SYMBOL(ath12k_hal_reo_shared_qaddr_cache_clear); + - static bool ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc) + void ath12k_hal_rx_buf_addr_info_set(struct ath12k_hal *hal, + struct ath12k_buffer_addr *binfo, + dma_addr_t paddr, u32 cookie, u8 manager) { - return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13, - RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL); + hal->ops->rx_buf_addr_info_set(binfo, paddr, cookie, manager); } - static bool ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc) + void ath12k_hal_rx_buf_addr_info_get(struct ath12k_hal *hal, + struct ath12k_buffer_addr *binfo, + dma_addr_t *paddr, u32 *msdu_cookies, + u8 *rbm) { - return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13, - RX_MSDU_END_INFO13_IP_CKSUM_FAIL); + hal->ops->rx_buf_addr_info_get(binfo, paddr, msdu_cookies, rbm); } - static bool ath12k_hw_wcn7850_dp_rx_h_is_decrypted(struct hal_rx_desc *desc) + void ath12k_hal_rx_msdu_list_get(struct ath12k_hal *hal, struct ath12k *ar, + void *link_desc, + void *msdu_list, + u16 *num_msdus) { - return (le32_get_bits(desc->u.wcn7850.msdu_end.info14, - RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) == - RX_DESC_DECRYPT_STATUS_CODE_OK); + hal->ops->rx_msdu_list_get(ar, link_desc, msdu_list, num_msdus); } - static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc) + void ath12k_hal_rx_reo_ent_buf_paddr_get(struct ath12k_hal *hal, void *rx_desc, + dma_addr_t *paddr, + u32 *sw_cookie, + struct ath12k_buffer_addr **pp_buf_addr, + u8 *rbm, u32 *msdu_cnt) { - u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info13); - u32 errmap = 0; - - if (info & RX_MSDU_END_INFO13_FCS_ERR) - errmap |= HAL_RX_MPDU_ERR_FCS; - - if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) - errmap |= HAL_RX_MPDU_ERR_DECRYPT; - - if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) - errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; - - if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) - errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; - - if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) - errmap |= HAL_RX_MPDU_ERR_OVERFLOW; - - if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) - errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; - - if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) - errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; - - return errmap; + hal->ops->rx_reo_ent_buf_paddr_get(rx_desc, paddr, sw_cookie, + pp_buf_addr, rbm, msdu_cnt); } - static u32 ath12k_hw_wcn7850_get_rx_desc_size(void) + void ath12k_hal_cc_config(struct ath12k_base *ab) { - return sizeof(struct hal_rx_desc_wcn7850); + ab->hal.ops->cc_config(ab); } - static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc) + enum hal_rx_buf_return_buf_manager + ath12k_hal_get_idle_link_rbm(struct ath12k_hal *hal, u8 device_id) { - return 0; + return hal->ops->get_idle_link_rbm(hal, device_id); } - const struct hal_rx_ops hal_rx_wcn7850_ops = { - .rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu, - .rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu, - .rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes, - .rx_desc_encrypt_valid = ath12k_hw_wcn7850_rx_desc_encrypt_valid, - .rx_desc_get_encrypt_type = ath12k_hw_wcn7850_rx_desc_get_encrypt_type, - .rx_desc_get_decap_type = ath12k_hw_wcn7850_rx_desc_get_decap_type, - .rx_desc_get_mesh_ctl = ath12k_hw_wcn7850_rx_desc_get_mesh_ctl, - .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld, - .rx_desc_get_mpdu_fc_valid = ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid, - .rx_desc_get_mpdu_start_seq_no = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no, - .rx_desc_get_msdu_len = ath12k_hw_wcn7850_rx_desc_get_msdu_len, - .rx_desc_get_msdu_sgi = ath12k_hw_wcn7850_rx_desc_get_msdu_sgi, - .rx_desc_get_msdu_rate_mcs = ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs, - .rx_desc_get_msdu_rx_bw = ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw, - .rx_desc_get_msdu_freq = ath12k_hw_wcn7850_rx_desc_get_msdu_freq, - .rx_desc_get_msdu_pkt_type = ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type, - .rx_desc_get_msdu_nss = ath12k_hw_wcn7850_rx_desc_get_msdu_nss, - .rx_desc_get_mpdu_tid = ath12k_hw_wcn7850_rx_desc_get_mpdu_tid, - .rx_desc_get_mpdu_peer_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id, - .rx_desc_copy_end_tlv = ath12k_hw_wcn7850_rx_desc_copy_end_tlv, - .rx_desc_get_mpdu_start_tag = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag, - .rx_desc_get_mpdu_ppdu_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id, - .rx_desc_set_msdu_len = ath12k_hw_wcn7850_rx_desc_set_msdu_len, - .rx_desc_get_msdu_payload = ath12k_hw_wcn7850_rx_desc_get_msdu_payload, - .rx_desc_get_mpdu_start_offset = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset, - .rx_desc_get_msdu_end_offset = ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset, - .rx_desc_mac_addr2_valid = ath12k_hw_wcn7850_rx_desc_mac_addr2_valid, - .rx_desc_mpdu_start_addr2 = ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2, - .rx_desc_is_da_mcbc = ath12k_hw_wcn7850_rx_desc_is_da_mcbc, - .rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr, - .rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr, - .dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done, - .dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail, - .dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail, - .dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted, - .dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err, - .rx_desc_get_desc_size = ath12k_hw_wcn7850_get_rx_desc_size, - .rx_desc_get_msdu_src_link_id = ath12k_hw_wcn7850_rx_desc_get_msdu_src_link, - }; - - const struct hal_ops hal_wcn7850_ops = { - .create_srng_config = ath12k_hal_srng_create_config_wcn7850, - .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map, - .rxdma_ring_wmask_rx_mpdu_start = NULL, - .rxdma_ring_wmask_rx_msdu_end = NULL, - .get_hal_rx_compact_ops = NULL, - }; - - static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab) + static int ath12k_hal_alloc_cont_rdp(struct ath12k_hal *hal) { - struct ath12k_hal *hal = &ab->hal; size_t size; size = sizeof(u32) * HAL_SRNG_RING_ID_MAX; diff --cc drivers/net/wireless/ath/ath12k/hal.h index efe00e1679986,ce038906bd069..f23ba1f9eaac2 --- a/drivers/net/wireless/ath/ath12k/hal.h +++ b/drivers/net/wireless/ath/ath12k/hal.h @@@ -1127,6 -1406,26 +1406,27 @@@ struct hal_ops struct hal_wbm_idle_scatter_list *sbuf, u32 nsbufs, u32 tot_link_desc, u32 end_offset); + void (*reo_init_cmd_ring)(struct ath12k_base *ab, + struct hal_srng *srng); ++ void (*reo_shared_qaddr_cache_clear)(struct ath12k_base *ab); + void (*reo_hw_setup)(struct ath12k_base *ab, u32 ring_hash_map); + void (*rx_buf_addr_info_set)(struct ath12k_buffer_addr *binfo, + dma_addr_t paddr, u32 cookie, u8 manager); + void (*rx_buf_addr_info_get)(struct ath12k_buffer_addr *binfo, + dma_addr_t *paddr, u32 *msdu_cookies, + u8 *rbm); + void (*cc_config)(struct ath12k_base *ab); + enum hal_rx_buf_return_buf_manager + (*get_idle_link_rbm)(struct ath12k_hal *hal, u8 device_id); + void (*rx_msdu_list_get)(struct ath12k *ar, + void *link_desc, + void *msdu_list, + u16 *num_msdus); + void (*rx_reo_ent_buf_paddr_get)(void *rx_desc, dma_addr_t *paddr, + u32 *sw_cookie, + struct ath12k_buffer_addr **pp_buf_addr, + u8 *rbm, u32 *msdu_cnt); + }; dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab, struct hal_srng *srng); diff --cc drivers/net/wireless/ath/ath12k/htc.c index d13616bf07f43,fe8218a56125b..92138caa2a82f --- a/drivers/net/wireless/ath/ath12k/htc.c +++ b/drivers/net/wireless/ath/ath12k/htc.c @@@ -1,7 -1,8 +1,7 @@@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. -- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include #include diff --cc drivers/net/wireless/ath/ath12k/mac.c index f7a2a544bef20,42e7503769267..2f4daee9e2f0e --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@@ -12010,14 -11741,12 +12080,14 @@@ ath12k_set_vdev_param_to_all_vifs(struc /* mac80211 stores device specific RTS/Fragmentation threshold value, * this is set interface specific to firmware from ath12k driver */ - static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, - int radio_idx, u32 value) + int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, + int radio_idx, u32 value) { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct wiphy *wiphy = hw->wiphy; struct ath12k *ar; - int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret = 0, i; + int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; + int ret = 0, ret_err, i; lockdep_assert_wiphy(hw->wiphy); @@@ -12722,12 -12294,14 +12795,14 @@@ ath12k_mac_validate_fixed_rate_settings const struct cfg80211_bitrate_mask *mask, unsigned int link_id) { - bool he_fixed_rate = false, vht_fixed_rate = false; - const u16 *vht_mcs_mask, *he_mcs_mask; + bool eht_fixed_rate = false, he_fixed_rate = false, vht_fixed_rate = false; + const u16 *vht_mcs_mask, *he_mcs_mask, *eht_mcs_mask; struct ieee80211_link_sta *link_sta; - struct ath12k_peer *peer, *tmp; + struct ath12k_dp_link_peer *peer, *tmp; - u8 vht_nss, he_nss; + u8 vht_nss, he_nss, eht_nss; int ret = true; + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); vht_mcs_mask = mask->control[band].vht_mcs; he_mcs_mask = mask->control[band].he_mcs; @@@ -12747,11 -12317,10 +12822,11 @@@ vht_nss = ath12k_mac_max_vht_nss(vht_mcs_mask); he_nss = ath12k_mac_max_he_nss(he_mcs_mask); + eht_nss = ath12k_mac_max_eht_nss(eht_mcs_mask); rcu_read_lock(); - spin_lock_bh(&ar->ab->base_lock); - list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) { + spin_lock_bh(&dp->dp_lock); + list_for_each_entry_safe(peer, tmp, &dp->peers, list) { if (peer->sta) { link_sta = rcu_dereference(peer->sta->link[link_id]); if (!link_sta) { @@@ -13131,37 -12674,20 +13208,41 @@@ int ath12k_mac_op_get_survey(struct iee return 0; } + EXPORT_SYMBOL(ath12k_mac_op_get_survey); +static void ath12k_mac_put_chain_rssi(struct station_info *sinfo, + struct ath12k_link_sta *arsta) +{ + s8 rssi; + int i; + + for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { + sinfo->chains &= ~BIT(i); + rssi = arsta->chain_signal[i]; + + if (rssi != ATH12K_DEFAULT_NOISE_FLOOR && + rssi != ATH12K_INVALID_RSSI_FULL && + rssi != ATH12K_INVALID_RSSI_EMPTY && + rssi != 0) { + sinfo->chain_signal[i] = rssi; + sinfo->chains |= BIT(i); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); + } + } +} + - static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct station_info *sinfo) + void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) { struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_dp_link_peer_rate_info rate_info = {}; struct ath12k_fw_stats_req_params params = {}; + struct ath12k_dp_link_peer *peer; struct ath12k_link_sta *arsta; s8 signal, noise_floor; + struct ath12k_dp *dp; struct ath12k *ar; bool db2dbm; @@@ -13293,18 -12830,28 +13395,32 @@@ void ath12k_mac_op_link_sta_statistics( link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } + link_sinfo->signal_avg = ewma_avg_rssi_read(&peer->avg_rssi); ++ + if (!db2dbm) + link_sinfo->signal_avg += ATH12K_DEFAULT_NOISE_FLOOR; ++ + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); + + link_sinfo->tx_retries = peer->tx_retry_count; + link_sinfo->tx_failed = peer->tx_retry_failed; + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); + link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); + /* TODO: Use real NF instead of default one. */ - signal = arsta->rssi_comb; + signal = peer->rssi_comb; - params.pdev_id = ar->pdev->pdev_id; - params.vdev_id = 0; - params.stats_id = WMI_REQUEST_VDEV_STAT; + spin_unlock_bh(&ar->ab->dp->dp_lock); - if (!signal && - ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA && - !(ath12k_mac_get_fw_stats(ar, ¶ms))) { - signal = arsta->rssi_beacon; - ath12k_fw_stats_reset(ar); + if (!signal && ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA) { + params.pdev_id = ar->pdev->pdev_id; + params.vdev_id = 0; + params.stats_id = WMI_REQUEST_VDEV_STAT; + - if (!ath12k_mac_get_fw_stats(ar, ¶ms)) ++ if (!ath12k_mac_get_fw_stats(ar, ¶ms)) { + signal = arsta->rssi_beacon; ++ ath12k_fw_stats_reset(ar); ++ } } if (signal) { diff --cc drivers/net/wireless/ath/ath12k/mhi.c index 08f44baf182a5,1f680f6e65d30..45c0f66dcc5ea --- a/drivers/net/wireless/ath/ath12k/mhi.c +++ b/drivers/net/wireless/ath/ath12k/mhi.c @@@ -1,7 -1,7 +1,7 @@@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. -- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. ++ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include diff --cc drivers/net/wireless/ath/ath12k/mhi.h index 7358b8477536a,5e1363650a9a7..3674326763858 --- a/drivers/net/wireless/ath/ath12k/mhi.h +++ b/drivers/net/wireless/ath/ath12k/mhi.h @@@ -1,7 -1,8 +1,7 @@@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. -- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #ifndef _ATH12K_MHI_H #define _ATH12K_MHI_H diff --cc drivers/net/wireless/ath/ath12k/pci.c index a12c8379cb466,0509339d2122f..a42c4289c6b27 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@@ -1862,34 -1789,44 +1807,48 @@@ static const struct dev_pm_ops __maybe_ ath12k_pci_pm_resume_early) }; - static struct pci_driver ath12k_pci_driver = { - .name = "ath12k_pci", - .id_table = ath12k_pci_id_table, - .probe = ath12k_pci_probe, - .remove = ath12k_pci_remove, - .shutdown = ath12k_pci_shutdown, - .driver.pm = &ath12k_pci_pm_ops, - }; - - int ath12k_pci_init(void) + int ath12k_pci_register_driver(const enum ath12k_device_family device_id, + struct ath12k_pci_driver *driver) { - int ret; + struct pci_driver *pci_driver; - ret = pci_register_driver(&ath12k_pci_driver); - if (ret) { - pr_err("failed to register ath12k pci driver: %d\n", - ret); - return ret; + if (device_id >= ATH12K_DEVICE_FAMILY_MAX) + return -EINVAL; + + if (!driver || !driver->ops.probe || + !driver->ops.arch_init || !driver->ops.arch_deinit) + return -EINVAL; + + if (ath12k_pci_family_drivers[device_id]) { + pr_err("Driver already registered for %d\n", device_id); + return -EALREADY; } - return 0; + ath12k_pci_family_drivers[device_id] = driver; + + pci_driver = &ath12k_pci_family_drivers[device_id]->driver; + pci_driver->name = driver->name; + pci_driver->id_table = driver->id_table; + pci_driver->probe = ath12k_pci_probe; + pci_driver->remove = ath12k_pci_remove; + pci_driver->shutdown = ath12k_pci_shutdown; + pci_driver->driver.pm = &ath12k_pci_pm_ops; + + return pci_register_driver(pci_driver); } + EXPORT_SYMBOL(ath12k_pci_register_driver); - void ath12k_pci_exit(void) + void ath12k_pci_unregister_driver(const enum ath12k_device_family device_id) { - pci_unregister_driver(&ath12k_pci_driver); + if (device_id >= ATH12K_DEVICE_FAMILY_MAX || + !ath12k_pci_family_drivers[device_id]) + return; + + pci_unregister_driver(&ath12k_pci_family_drivers[device_id]->driver); + ath12k_pci_family_drivers[device_id] = NULL; } + EXPORT_SYMBOL(ath12k_pci_unregister_driver); + +/* firmware files */ +MODULE_FIRMWARE(ATH12K_FW_DIR "/QCN9274/hw2.0/*"); +MODULE_FIRMWARE(ATH12K_FW_DIR "/WCN7850/hw2.0/*"); diff --cc drivers/net/wireless/ath/ath12k/wifi7/dp.c index 0000000000000,0b2c7f37c7566..2b194879ee80d mode 000000,100644..100644 --- a/drivers/net/wireless/ath/ath12k/wifi7/dp.c +++ b/drivers/net/wireless/ath/ath12k/wifi7/dp.c @@@ -1,0 -1,179 +1,181 @@@ + // SPDX-License-Identifier: BSD-3-Clause-Clear + /* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + #include "../core.h" + #include "../debug.h" + #include "../dp_rx.h" + #include "../dp_tx.h" + #include "hal_desc.h" + #include "../dp_mon.h" + #include "dp_mon.h" + #include "../dp_cmn.h" + #include "dp_rx.h" + #include "dp.h" + #include "dp_tx.h" + #include "hal.h" + + static int ath12k_wifi7_dp_service_srng(struct ath12k_dp *dp, + struct ath12k_ext_irq_grp *irq_grp, + int budget) + { + struct napi_struct *napi = &irq_grp->napi; + int grp_id = irq_grp->grp_id; + int work_done = 0; + int i = 0, j; + int tot_work_done = 0; + enum dp_monitor_mode monitor_mode; + u8 ring_mask; + + if (dp->hw_params->ring_mask->tx[grp_id]) { + i = fls(dp->hw_params->ring_mask->tx[grp_id]) - 1; + ath12k_wifi7_dp_tx_completion_handler(dp, i); + } + + if (dp->hw_params->ring_mask->rx_err[grp_id]) { + work_done = ath12k_wifi7_dp_rx_process_err(dp, napi, budget); + budget -= work_done; + tot_work_done += work_done; + if (budget <= 0) + goto done; + } + + if (dp->hw_params->ring_mask->rx_wbm_rel[grp_id]) { + work_done = ath12k_wifi7_dp_rx_process_wbm_err(dp, napi, budget); + budget -= work_done; + tot_work_done += work_done; + + if (budget <= 0) + goto done; + } + + if (dp->hw_params->ring_mask->rx[grp_id]) { + i = fls(dp->hw_params->ring_mask->rx[grp_id]) - 1; + work_done = ath12k_wifi7_dp_rx_process(dp, i, napi, budget); + budget -= work_done; + tot_work_done += work_done; + if (budget <= 0) + goto done; + } + + if (dp->hw_params->ring_mask->rx_mon_status[grp_id]) { + ring_mask = dp->hw_params->ring_mask->rx_mon_status[grp_id]; + for (i = 0; i < dp->ab->num_radios; i++) { + for (j = 0; j < dp->hw_params->num_rxdma_per_pdev; j++) { + int id = i * dp->hw_params->num_rxdma_per_pdev + j; + + if (ring_mask & BIT(id)) { + work_done = + ath12k_wifi7_dp_mon_process_ring(dp, id, napi, + budget, + 0); + budget -= work_done; + tot_work_done += work_done; + if (budget <= 0) + goto done; + } + } + } + } + + if (dp->hw_params->ring_mask->rx_mon_dest[grp_id]) { + monitor_mode = ATH12K_DP_RX_MONITOR_MODE; + ring_mask = dp->hw_params->ring_mask->rx_mon_dest[grp_id]; + for (i = 0; i < dp->ab->num_radios; i++) { + for (j = 0; j < dp->hw_params->num_rxdma_per_pdev; j++) { + int id = i * dp->hw_params->num_rxdma_per_pdev + j; + + if (ring_mask & BIT(id)) { + work_done = + ath12k_wifi7_dp_mon_process_ring(dp, id, napi, + budget, + monitor_mode); + budget -= work_done; + tot_work_done += work_done; + + if (budget <= 0) + goto done; + } + } + } + } + + if (dp->hw_params->ring_mask->tx_mon_dest[grp_id]) { + monitor_mode = ATH12K_DP_TX_MONITOR_MODE; + ring_mask = dp->hw_params->ring_mask->tx_mon_dest[grp_id]; + for (i = 0; i < dp->ab->num_radios; i++) { + for (j = 0; j < dp->hw_params->num_rxdma_per_pdev; j++) { + int id = i * dp->hw_params->num_rxdma_per_pdev + j; + + if (ring_mask & BIT(id)) { + work_done = + ath12k_wifi7_dp_mon_process_ring(dp, id, + napi, budget, + monitor_mode); + budget -= work_done; + tot_work_done += work_done; + + if (budget <= 0) + goto done; + } + } + } + } + + if (dp->hw_params->ring_mask->reo_status[grp_id]) + ath12k_wifi7_dp_rx_process_reo_status(dp); + + if (dp->hw_params->ring_mask->host2rxdma[grp_id]) { + struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + LIST_HEAD(list); + + ath12k_dp_rx_bufs_replenish(dp, rx_ring, &list, 0); + } + + /* TODO: Implement handler for other interrupts */ + + done: + return tot_work_done; + } + + static struct ath12k_dp_arch_ops ath12k_wifi7_dp_arch_ops = { + .service_srng = ath12k_wifi7_dp_service_srng, + .tx_get_vdev_bank_config = ath12k_wifi7_dp_tx_get_vdev_bank_config, + .reo_cmd_send = ath12k_wifi7_dp_reo_cmd_send, + .setup_pn_check_reo_cmd = ath12k_wifi7_dp_setup_pn_check_reo_cmd, + .rx_peer_tid_delete = ath12k_wifi7_dp_rx_peer_tid_delete, + .reo_cache_flush = ath12k_wifi7_dp_reo_cache_flush, + .rx_link_desc_return = ath12k_wifi7_dp_rx_link_desc_return, + .rx_frags_cleanup = ath12k_wifi7_dp_rx_frags_cleanup, + .peer_rx_tid_reo_update = ath12k_wifi7_peer_rx_tid_reo_update, + .rx_assign_reoq = ath12k_wifi7_dp_rx_assign_reoq, + .peer_rx_tid_qref_setup = ath12k_wifi7_peer_rx_tid_qref_setup, ++ .peer_rx_tid_qref_reset = ath12k_wifi7_peer_rx_tid_qref_reset, ++ .rx_tid_delete_handler = ath12k_wifi7_dp_rx_tid_delete_handler, + }; + + /* TODO: remove export once this file is built with wifi7 ko */ + struct ath12k_dp *ath12k_wifi7_dp_device_alloc(struct ath12k_base *ab) + { + struct ath12k_dp *dp; + + /* TODO: align dp later if cache alignment becomes a bottleneck */ + dp = kzalloc(sizeof(*dp), GFP_KERNEL); + if (!dp) + return NULL; + + dp->ab = ab; + dp->dev = ab->dev; + dp->hw_params = ab->hw_params; + dp->hal = &ab->hal; + + dp->ops = &ath12k_wifi7_dp_arch_ops; + + return dp; + } + + void ath12k_wifi7_dp_device_free(struct ath12k_dp *dp) + { + kfree(dp); + } diff --cc drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c index 0000000000000,9878553289c47..bd741532b7dc8 mode 000000,100644..100644 --- a/drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/wifi7/dp_mon.c @@@ -1,0 -1,3354 +1,3385 @@@ + // SPDX-License-Identifier: BSD-3-Clause-Clear + /* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + + #include "hal_desc.h" + #include "../dp_mon.h" + #include "dp_mon.h" + #include "../debug.h" + #include "hal_qcn9274.h" + #include "dp_rx.h" + #include "../dp_tx.h" + #include "../peer.h" + + static void + ath12k_wifi7_dp_mon_hal_aggr_tlv(struct hal_rx_mon_ppdu_info *ppdu_info, + u16 tlv_len, const void *tlv_data) + { + if (tlv_len <= HAL_RX_MON_MAX_AGGR_SIZE - ppdu_info->tlv_aggr.cur_len) { + memcpy(ppdu_info->tlv_aggr.buf + ppdu_info->tlv_aggr.cur_len, + tlv_data, tlv_len); + ppdu_info->tlv_aggr.cur_len += tlv_len; + } + } + + static void + ath12k_wifi7_dp_mon_rx_memset_ppdu_info(struct hal_rx_mon_ppdu_info *ppdu_info) + { + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; + } + + /* Hardware fill buffer with 128 bytes aligned. So need to reap it + * with 128 bytes aligned. + */ + #define RXDMA_DATA_DMA_BLOCK_SIZE 128 + + static void + ath12k_wifi7_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, + bool *is_frag, u32 *total_len, + u32 *frag_len, u32 *msdu_cnt) + { + if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { + *is_frag = true; + *frag_len = (RX_MON_STATUS_BASE_BUF_SIZE - + sizeof(struct hal_rx_desc)) & + ~(RXDMA_DATA_DMA_BLOCK_SIZE - 1); + *total_len += *frag_len; + } else { + if (*is_frag) + *frag_len = info->msdu_len - *total_len; + else + *frag_len = info->msdu_len; + + *msdu_cnt -= 1; + } + } + + static void + ath12k_wifi7_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user, + struct hal_rx_user_status *rx_user_status) + { + rx_user_status->ul_ofdma_user_v0_word0 = + __le32_to_cpu(ppdu_end_user->usr_resp_ref); + rx_user_status->ul_ofdma_user_v0_word1 = + __le32_to_cpu(ppdu_end_user->usr_resp_ref_ext); + } + + static void + ath12k_wifi7_dp_mon_rx_populate_byte_count(const struct hal_rx_ppdu_end_user_stats *stats, + void *ppduinfo, + struct hal_rx_user_status *rx_user_status) + { + rx_user_status->mpdu_ok_byte_count = + le32_get_bits(stats->info7, + HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT); + rx_user_status->mpdu_err_byte_count = + le32_get_bits(stats->info8, + HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT); + } + + static void + ath12k_wifi7_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats *rx_tlv, + struct hal_rx_mon_ppdu_info *ppdu_info, + struct hal_rx_user_status *rx_user_status) + { + rx_user_status->ast_index = ppdu_info->ast_index; + rx_user_status->tid = ppdu_info->tid; + rx_user_status->tcp_ack_msdu_count = + ppdu_info->tcp_ack_msdu_count; + rx_user_status->tcp_msdu_count = + ppdu_info->tcp_msdu_count; + rx_user_status->udp_msdu_count = + ppdu_info->udp_msdu_count; + rx_user_status->other_msdu_count = + ppdu_info->other_msdu_count; + rx_user_status->frame_control = ppdu_info->frame_control; + rx_user_status->frame_control_info_valid = + ppdu_info->frame_control_info_valid; + rx_user_status->data_sequence_control_info_valid = + ppdu_info->data_sequence_control_info_valid; + rx_user_status->first_data_seq_ctrl = + ppdu_info->first_data_seq_ctrl; + rx_user_status->preamble_type = ppdu_info->preamble_type; + rx_user_status->ht_flags = ppdu_info->ht_flags; + rx_user_status->vht_flags = ppdu_info->vht_flags; + rx_user_status->he_flags = ppdu_info->he_flags; + rx_user_status->rs_flags = ppdu_info->rs_flags; + + rx_user_status->mpdu_cnt_fcs_ok = + ppdu_info->num_mpdu_fcs_ok; + rx_user_status->mpdu_cnt_fcs_err = + ppdu_info->num_mpdu_fcs_err; + memcpy(&rx_user_status->mpdu_fcs_ok_bitmap[0], &ppdu_info->mpdu_fcs_ok_bitmap[0], + HAL_RX_NUM_WORDS_PER_PPDU_BITMAP * + sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0])); + + ath12k_wifi7_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status); + } + + static inline enum ath12k_eht_ru_size + hal_rx_mon_hal_ru_size_to_ath12k_ru_size(u32 hal_ru_size) + { + switch (hal_ru_size) { + case HAL_EHT_RU_26: + return ATH12K_EHT_RU_26; + case HAL_EHT_RU_52: + return ATH12K_EHT_RU_52; + case HAL_EHT_RU_78: + return ATH12K_EHT_RU_52_26; + case HAL_EHT_RU_106: + return ATH12K_EHT_RU_106; + case HAL_EHT_RU_132: + return ATH12K_EHT_RU_106_26; + case HAL_EHT_RU_242: + return ATH12K_EHT_RU_242; + case HAL_EHT_RU_484: + return ATH12K_EHT_RU_484; + case HAL_EHT_RU_726: + return ATH12K_EHT_RU_484_242; + case HAL_EHT_RU_996: + return ATH12K_EHT_RU_996; + case HAL_EHT_RU_996x2: + return ATH12K_EHT_RU_996x2; + case HAL_EHT_RU_996x3: + return ATH12K_EHT_RU_996x3; + case HAL_EHT_RU_996x4: + return ATH12K_EHT_RU_996x4; + case HAL_EHT_RU_NONE: + return ATH12K_EHT_RU_INVALID; + case HAL_EHT_RU_996_484: + return ATH12K_EHT_RU_996_484; + case HAL_EHT_RU_996x2_484: + return ATH12K_EHT_RU_996x2_484; + case HAL_EHT_RU_996x3_484: + return ATH12K_EHT_RU_996x3_484; + case HAL_EHT_RU_996_484_242: + return ATH12K_EHT_RU_996_484_242; + default: + return ATH12K_EHT_RU_INVALID; + } + } + + static inline u32 + hal_rx_ul_ofdma_ru_size_to_width(enum ath12k_eht_ru_size ru_size) + { + switch (ru_size) { + case ATH12K_EHT_RU_26: + return RU_26; + case ATH12K_EHT_RU_52: + return RU_52; + case ATH12K_EHT_RU_52_26: + return RU_52_26; + case ATH12K_EHT_RU_106: + return RU_106; + case ATH12K_EHT_RU_106_26: + return RU_106_26; + case ATH12K_EHT_RU_242: + return RU_242; + case ATH12K_EHT_RU_484: + return RU_484; + case ATH12K_EHT_RU_484_242: + return RU_484_242; + case ATH12K_EHT_RU_996: + return RU_996; + case ATH12K_EHT_RU_996_484: + return RU_996_484; + case ATH12K_EHT_RU_996_484_242: + return RU_996_484_242; + case ATH12K_EHT_RU_996x2: + return RU_2X996; + case ATH12K_EHT_RU_996x2_484: + return RU_2X996_484; + case ATH12K_EHT_RU_996x3: + return RU_3X996; + case ATH12K_EHT_RU_996x3_484: + return RU_3X996_484; + case ATH12K_EHT_RU_996x4: + return RU_4X996; + default: + return RU_INVALID; + } + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_info, + u16 user_id, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + struct hal_rx_user_status *mon_rx_user_status = NULL; + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + enum ath12k_eht_ru_size rtap_ru_size = ATH12K_EHT_RU_INVALID; + u32 ru_width, reception_type, ru_index = HAL_EHT_RU_INVALID; + u32 ru_type_80_0, ru_start_index_80_0; + u32 ru_type_80_1, ru_start_index_80_1; + u32 ru_type_80_2, ru_start_index_80_2; + u32 ru_type_80_3, ru_start_index_80_3; + u32 ru_size = 0, num_80mhz_with_ru = 0; + u64 ru_index_320mhz = 0; + u32 ru_index_per80mhz; + + reception_type = le32_get_bits(rx_usr_info->info0, + HAL_RX_USR_INFO0_RECEPTION_TYPE); + + switch (reception_type) { + case HAL_RECEPTION_TYPE_SU: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; + break; + case HAL_RECEPTION_TYPE_DL_MU_MIMO: + case HAL_RECEPTION_TYPE_UL_MU_MIMO: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; + break; + case HAL_RECEPTION_TYPE_DL_MU_OFMA: + case HAL_RECEPTION_TYPE_UL_MU_OFDMA: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; + break; + case HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO: + case HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO; + } + + ppdu_info->is_stbc = le32_get_bits(rx_usr_info->info0, HAL_RX_USR_INFO0_STBC); + ppdu_info->ldpc = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_LDPC); + ppdu_info->dcm = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_STA_DCM); + ppdu_info->bw = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_RX_BW); + ppdu_info->mcs = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_MCS); + ppdu_info->nss = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_NSS) + 1; + + if (user_id < HAL_MAX_UL_MU_USERS) { + mon_rx_user_status = &ppdu_info->userstats[user_id]; + mon_rx_user_status->mcs = ppdu_info->mcs; + mon_rx_user_status->nss = ppdu_info->nss; + } + + if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO || + ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA || + ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO)) + return; + + /* RU allocation present only for OFDMA reception */ + ru_type_80_0 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_0); + ru_start_index_80_0 = le32_get_bits(rx_usr_info->info3, + HAL_RX_USR_INFO3_RU_START_IDX_80_0); + if (ru_type_80_0 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_0; + ru_index_per80mhz = ru_start_index_80_0; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_0, 0, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + ru_type_80_1 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_1); + ru_start_index_80_1 = le32_get_bits(rx_usr_info->info3, + HAL_RX_USR_INFO3_RU_START_IDX_80_1); + if (ru_type_80_1 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_1; + ru_index_per80mhz = ru_start_index_80_1; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_1, 1, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + ru_type_80_2 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_2); + ru_start_index_80_2 = le32_get_bits(rx_usr_info->info3, + HAL_RX_USR_INFO3_RU_START_IDX_80_2); + if (ru_type_80_2 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_2; + ru_index_per80mhz = ru_start_index_80_2; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_2, 2, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + ru_type_80_3 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_3); + ru_start_index_80_3 = le32_get_bits(rx_usr_info->info2, + HAL_RX_USR_INFO3_RU_START_IDX_80_3); + if (ru_type_80_3 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_3; + ru_index_per80mhz = ru_start_index_80_3; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_3, 3, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + if (num_80mhz_with_ru > 1) { + /* Calculate the MRU index */ + switch (ru_index_320mhz) { + case HAL_EHT_RU_996_484_0: + case HAL_EHT_RU_996x2_484_0: + case HAL_EHT_RU_996x3_484_0: + ru_index = 0; + break; + case HAL_EHT_RU_996_484_1: + case HAL_EHT_RU_996x2_484_1: + case HAL_EHT_RU_996x3_484_1: + ru_index = 1; + break; + case HAL_EHT_RU_996_484_2: + case HAL_EHT_RU_996x2_484_2: + case HAL_EHT_RU_996x3_484_2: + ru_index = 2; + break; + case HAL_EHT_RU_996_484_3: + case HAL_EHT_RU_996x2_484_3: + case HAL_EHT_RU_996x3_484_3: + ru_index = 3; + break; + case HAL_EHT_RU_996_484_4: + case HAL_EHT_RU_996x2_484_4: + case HAL_EHT_RU_996x3_484_4: + ru_index = 4; + break; + case HAL_EHT_RU_996_484_5: + case HAL_EHT_RU_996x2_484_5: + case HAL_EHT_RU_996x3_484_5: + ru_index = 5; + break; + case HAL_EHT_RU_996_484_6: + case HAL_EHT_RU_996x2_484_6: + case HAL_EHT_RU_996x3_484_6: + ru_index = 6; + break; + case HAL_EHT_RU_996_484_7: + case HAL_EHT_RU_996x2_484_7: + case HAL_EHT_RU_996x3_484_7: + ru_index = 7; + break; + case HAL_EHT_RU_996x2_484_8: + ru_index = 8; + break; + case HAL_EHT_RU_996x2_484_9: + ru_index = 9; + break; + case HAL_EHT_RU_996x2_484_10: + ru_index = 10; + break; + case HAL_EHT_RU_996x2_484_11: + ru_index = 11; + break; + default: + ru_index = HAL_EHT_RU_INVALID; + break; + } + + ru_size += 4; + } + + rtap_ru_size = hal_rx_mon_hal_ru_size_to_ath12k_ru_size(ru_size); + if (rtap_ru_size != ATH12K_EHT_RU_INVALID) { + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[1]); + data |= u32_encode_bits(rtap_ru_size, + IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE); + eht->data[1] = cpu_to_le32(data); + } + + if (ru_index != HAL_EHT_RU_INVALID) { + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[1]); + data |= u32_encode_bits(rtap_ru_size, + IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX); + eht->data[1] = cpu_to_le32(data); + } + + if (mon_rx_user_status && ru_index != HAL_EHT_RU_INVALID && + rtap_ru_size != ATH12K_EHT_RU_INVALID) { + mon_rx_user_status->ul_ofdma_ru_start_index = ru_index; + mon_rx_user_status->ul_ofdma_ru_size = rtap_ru_size; + + ru_width = hal_rx_ul_ofdma_ru_size_to_width(rtap_ru_size); + + mon_rx_user_status->ul_ofdma_ru_width = ru_width; + mon_rx_user_status->ofdma_info_valid = 1; + } + } + + static void + ath12k_wifi7_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + u32 info0 = __le32_to_cpu(lsigb->info0); + u8 rate; + + rate = u32_get_bits(info0, HAL_RX_LSIG_B_INFO_INFO0_RATE); + switch (rate) { + case 1: + rate = HAL_RX_LEGACY_RATE_1_MBPS; + break; + case 2: + case 5: + rate = HAL_RX_LEGACY_RATE_2_MBPS; + break; + case 3: + case 6: + rate = HAL_RX_LEGACY_RATE_5_5_MBPS; + break; + case 4: + case 7: + rate = HAL_RX_LEGACY_RATE_11_MBPS; + break; + default: + rate = HAL_RX_LEGACY_RATE_INVALID; + } + + ppdu_info->rate = rate; + ppdu_info->cck_flag = 1; + } + + static void + ath12k_wifi7_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + u32 info0 = __le32_to_cpu(lsiga->info0); + u8 rate; + + rate = u32_get_bits(info0, HAL_RX_LSIG_A_INFO_INFO0_RATE); + switch (rate) { + case 8: + rate = HAL_RX_LEGACY_RATE_48_MBPS; + break; + case 9: + rate = HAL_RX_LEGACY_RATE_24_MBPS; + break; + case 10: + rate = HAL_RX_LEGACY_RATE_12_MBPS; + break; + case 11: + rate = HAL_RX_LEGACY_RATE_6_MBPS; + break; + case 12: + rate = HAL_RX_LEGACY_RATE_54_MBPS; + break; + case 13: + rate = HAL_RX_LEGACY_RATE_36_MBPS; + break; + case 14: + rate = HAL_RX_LEGACY_RATE_18_MBPS; + break; + case 15: + rate = HAL_RX_LEGACY_RATE_9_MBPS; + break; + default: + rate = HAL_RX_LEGACY_RATE_INVALID; + } + + ppdu_info->rate = rate; + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_cmn(const struct hal_mon_usig_cmn *cmn, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + u32 common; + + ppdu_info->u_sig_info.bw = le32_get_bits(cmn->info0, + HAL_RX_USIG_CMN_INFO0_BW); + ppdu_info->u_sig_info.ul_dl = le32_get_bits(cmn->info0, + HAL_RX_USIG_CMN_INFO0_UL_DL); + + common = __le32_to_cpu(ppdu_info->u_sig_info.usig.common); + common |= IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN | + ATH12K_LE32_DEC_ENC(cmn->info0, + HAL_RX_USIG_CMN_INFO0_PHY_VERSION, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) | + u32_encode_bits(ppdu_info->u_sig_info.bw, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) | + u32_encode_bits(ppdu_info->u_sig_info.ul_dl, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL) | + ATH12K_LE32_DEC_ENC(cmn->info0, + HAL_RX_USIG_CMN_INFO0_BSS_COLOR, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR) | + ATH12K_LE32_DEC_ENC(cmn->info0, + HAL_RX_USIG_CMN_INFO0_TXOP, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); + ppdu_info->u_sig_info.usig.common = cpu_to_le32(common); + + switch (ppdu_info->u_sig_info.bw) { + default: + fallthrough; + case HAL_EHT_BW_20: + ppdu_info->bw = HAL_RX_BW_20MHZ; + break; + case HAL_EHT_BW_40: + ppdu_info->bw = HAL_RX_BW_40MHZ; + break; + case HAL_EHT_BW_80: + ppdu_info->bw = HAL_RX_BW_80MHZ; + break; + case HAL_EHT_BW_160: + ppdu_info->bw = HAL_RX_BW_160MHZ; + break; + case HAL_EHT_BW_320_1: + case HAL_EHT_BW_320_2: + ppdu_info->bw = HAL_RX_BW_320MHZ; + break; + } + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_tb(const struct hal_mon_usig_tb *usig_tb, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig; + enum ieee80211_radiotap_eht_usig_tb spatial_reuse1, spatial_reuse2; + u32 common, value, mask; + + spatial_reuse1 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1; + spatial_reuse2 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2; + + common = __le32_to_cpu(usig->common); + value = __le32_to_cpu(usig->value); + mask = __le32_to_cpu(usig->mask); + + ppdu_info->u_sig_info.ppdu_type_comp_mode = + le32_get_bits(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE); + + common |= ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); + + value |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD | + u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE) | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1, + spatial_reuse1) | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2, + spatial_reuse2) | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_CRC, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC) | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_TAIL, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL); + + mask |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE | + spatial_reuse1 | spatial_reuse2 | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL; + + usig->common = cpu_to_le32(common); + usig->value = cpu_to_le32(value); + usig->mask = cpu_to_le32(mask); + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_mu(const struct hal_mon_usig_mu *usig_mu, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig; + enum ieee80211_radiotap_eht_usig_mu sig_symb, punc; + u32 common, value, mask; + + sig_symb = IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS; + punc = IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO; + + common = __le32_to_cpu(usig->common); + value = __le32_to_cpu(usig->value); + mask = __le32_to_cpu(usig->mask); + + ppdu_info->u_sig_info.ppdu_type_comp_mode = + le32_get_bits(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE); + ppdu_info->u_sig_info.eht_sig_mcs = + le32_get_bits(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS); + ppdu_info->u_sig_info.num_eht_sig_sym = + le32_get_bits(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM); + + common |= ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); + + value |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE | + u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE) | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE | + ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO, + punc) | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE | + u32_encode_bits(ppdu_info->u_sig_info.eht_sig_mcs, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS) | + u32_encode_bits(ppdu_info->u_sig_info.num_eht_sig_sym, + sig_symb) | + ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_CRC, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC) | + ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_TAIL, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL); + + mask |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE | + punc | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS | + sig_symb | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL; + + usig->common = cpu_to_le32(common); + usig->value = cpu_to_le32(value); + usig->mask = cpu_to_le32(mask); + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_hdr(const struct hal_mon_usig_hdr *usig, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + u8 comp_mode; + + ppdu_info->eht_usig = true; + + ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_cmn(&usig->cmn, ppdu_info); + + comp_mode = le32_get_bits(usig->non_cmn.mu.info0, + HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE); + + if (comp_mode == 0 && ppdu_info->u_sig_info.ul_dl) + ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_tb(&usig->non_cmn.tb, ppdu_info); + else + ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_mu(&usig->non_cmn.mu, ppdu_info); + } + + static void + ath12k_wifi7_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + u32 nsts, info0, info1; + u8 gi_setting; + + info0 = __le32_to_cpu(vht_sig->info0); + info1 = __le32_to_cpu(vht_sig->info1); + + ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING); + ppdu_info->mcs = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_MCS); + gi_setting = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING); + switch (gi_setting) { + case HAL_RX_VHT_SIG_A_NORMAL_GI: + ppdu_info->gi = HAL_RX_GI_0_8_US; + break; + case HAL_RX_VHT_SIG_A_SHORT_GI: + case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY: + ppdu_info->gi = HAL_RX_GI_0_4_US; + break; + } + + ppdu_info->is_stbc = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_STBC); + nsts = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS); + if (ppdu_info->is_stbc && nsts > 0) + nsts = ((nsts + 1) >> 1) - 1; + - ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK); ++ ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK) + 1; + ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW); + ppdu_info->beamformed = u32_get_bits(info1, + HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED); + ppdu_info->vht_flag_values5 = u32_get_bits(info0, + HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID); + ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) | + ppdu_info->nss); + ppdu_info->vht_flag_values2 = ppdu_info->bw; + ppdu_info->vht_flag_values4 = + u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING); + } + + static void + ath12k_wifi7_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + u32 info0 = __le32_to_cpu(ht_sig->info0); + u32 info1 = __le32_to_cpu(ht_sig->info1); + + ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_MCS); + ppdu_info->bw = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_BW); + ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_STBC); + ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING); + ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI); - ppdu_info->nss = (ppdu_info->mcs >> 3); ++ ppdu_info->nss = (ppdu_info->mcs >> 3) + 1; + } + + static void + ath12k_wifi7_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *ofdma, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + u32 info0, value; + + info0 = __le32_to_cpu(ofdma->info0); + + ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN; + + /* HE-data2 */ + ppdu_info->he_data2 |= HE_TXBF_KNOWN; + + ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS); + value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM); + value = value << HE_DCM_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING); + ppdu_info->ldpc = value; + value = value << HE_CODING_SHIFT; + ppdu_info->he_data3 |= value; + + /* HE-data4 */ + value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID); + value = value << HE_STA_ID_SHIFT; + ppdu_info->he_data4 |= value; + - ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS); ++ ppdu_info->nss = ++ u32_get_bits(info0, ++ HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS) + 1; + ppdu_info->beamformed = u32_get_bits(info0, + HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF); + } + + static void + ath12k_wifi7_dp_mon_parse_he_sig_b2_mu(const struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + u32 info0, value; + + info0 = __le32_to_cpu(he_sig_b2_mu->info0); + + ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_CODING_KNOWN; + + ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS); + value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING); + ppdu_info->ldpc = value; + value = value << HE_CODING_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID); + value = value << HE_STA_ID_SHIFT; + ppdu_info->he_data4 |= value; + - ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS); ++ ppdu_info->nss = ++ u32_get_bits(info0, ++ HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS) + 1; + } + + static void + ath12k_wifi7_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0); + u16 ru_tones; + + ru_tones = u32_get_bits(info0, + HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION); + ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); + ppdu_info->he_RU[0] = ru_tones; + } + + static void + ath12k_wifi7_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + u32 info0, info1, value; + u16 he_gi = 0, he_ltf = 0; + + info0 = __le32_to_cpu(he_sig_a_mu_dl->info0); + info1 = __le32_to_cpu(he_sig_a_mu_dl->info1); + + ppdu_info->he_mu_flags = 1; + + ppdu_info->he_data1 = HE_MU_FORMAT_TYPE; + ppdu_info->he_data1 |= + HE_BSS_COLOR_KNOWN | + HE_DL_UL_KNOWN | + HE_LDPC_EXTRA_SYMBOL_KNOWN | + HE_STBC_KNOWN | + HE_DATA_BW_RU_KNOWN | + HE_DOPPLER_KNOWN; + + ppdu_info->he_data2 = + HE_GI_KNOWN | + HE_LTF_SYMBOLS_KNOWN | + HE_PRE_FEC_PADDING_KNOWN | + HE_PE_DISAMBIGUITY_KNOWN | + HE_TXOP_KNOWN | + HE_MIDABLE_PERIODICITY_KNOWN; + + /* data3 */ + ppdu_info->he_data3 = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_BSS_COLOR); + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_UL_FLAG); + value = value << HE_DL_UL_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA); + value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC); + value = value << HE_STBC_SHIFT; + ppdu_info->he_data3 |= value; + + /* data4 */ + ppdu_info->he_data4 = u32_get_bits(info0, + HAL_RX_HE_SIG_A_MU_DL_INFO0_SPATIAL_REUSE); + ppdu_info->he_data4 = value; + + /* data5 */ + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW); + ppdu_info->he_data5 = value; + ppdu_info->bw = value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_CP_LTF_SIZE); + switch (value) { + case 0: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_4_X; + break; + case 1: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_2_X; + break; + case 2: + he_gi = HE_GI_1_6; + he_ltf = HE_LTF_2_X; + break; + case 3: + he_gi = HE_GI_3_2; + he_ltf = HE_LTF_4_X; + break; + } + + ppdu_info->gi = he_gi; + value = he_gi << HE_GI_SHIFT; + ppdu_info->he_data5 |= value; + + value = he_ltf << HE_LTF_SIZE_SHIFT; + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB); + value = (value << HE_LTF_SYM_SHIFT); + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR); + value = value << HE_PRE_FEC_PAD_SHIFT; + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM); + value = value << HE_PE_DISAMBIGUITY_SHIFT; + ppdu_info->he_data5 |= value; + + /*data6*/ + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION); + value = value << HE_DOPPLER_SHIFT; + ppdu_info->he_data6 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION); + value = value << HE_TXOP_SHIFT; + ppdu_info->he_data6 |= value; + + /* HE-MU Flags */ + /* HE-MU-flags1 */ + ppdu_info->he_flags1 = + HE_SIG_B_MCS_KNOWN | + HE_SIG_B_DCM_KNOWN | + HE_SIG_B_COMPRESSION_FLAG_1_KNOWN | + HE_SIG_B_SYM_NUM_KNOWN | + HE_RU_0_KNOWN; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_MCS_OF_SIGB); + ppdu_info->he_flags1 |= value; + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DCM_OF_SIGB); + value = value << HE_DCM_FLAG_1_SHIFT; + ppdu_info->he_flags1 |= value; + + /* HE-MU-flags2 */ + ppdu_info->he_flags2 = HE_BW_KNOWN; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW); + ppdu_info->he_flags2 |= value; + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_COMP_MODE_SIGB); + value = value << HE_SIG_B_COMPRESSION_FLAG_2_SHIFT; + ppdu_info->he_flags2 |= value; + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_NUM_SIGB_SYMB); + value = value - 1; + value = value << HE_NUM_SIG_B_SYMBOLS_SHIFT; + ppdu_info->he_flags2 |= value; + + ppdu_info->is_stbc = info1 & + HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC; + } + + static void + ath12k_wifi7_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + u32 info0, info1, value; + u32 dcm; + u8 he_dcm = 0, he_stbc = 0; + u16 he_gi = 0, he_ltf = 0; + + ppdu_info->he_flags = 1; + + info0 = __le32_to_cpu(he_sig_a->info0); + info1 = __le32_to_cpu(he_sig_a->info1); + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND); + if (value == 0) + ppdu_info->he_data1 = HE_TRIG_FORMAT_TYPE; + else + ppdu_info->he_data1 = HE_SU_FORMAT_TYPE; + + ppdu_info->he_data1 |= + HE_BSS_COLOR_KNOWN | + HE_BEAM_CHANGE_KNOWN | + HE_DL_UL_KNOWN | + HE_MCS_KNOWN | + HE_DCM_KNOWN | + HE_CODING_KNOWN | + HE_LDPC_EXTRA_SYMBOL_KNOWN | + HE_STBC_KNOWN | + HE_DATA_BW_RU_KNOWN | + HE_DOPPLER_KNOWN; + + ppdu_info->he_data2 |= + HE_GI_KNOWN | + HE_TXBF_KNOWN | + HE_PE_DISAMBIGUITY_KNOWN | + HE_TXOP_KNOWN | + HE_LTF_SYMBOLS_KNOWN | + HE_PRE_FEC_PADDING_KNOWN | + HE_MIDABLE_PERIODICITY_KNOWN; + + ppdu_info->he_data3 = u32_get_bits(info0, + HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR); + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE); + value = value << HE_BEAM_CHANGE_SHIFT; + ppdu_info->he_data3 |= value; + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG); + value = value << HE_DL_UL_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS); + ppdu_info->mcs = value; + value = value << HE_TRANSMIT_MCS_SHIFT; + ppdu_info->he_data3 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM); + he_dcm = value; + value = value << HE_DCM_SHIFT; + ppdu_info->he_data3 |= value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING); + value = value << HE_CODING_SHIFT; + ppdu_info->he_data3 |= value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA); + value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT; + ppdu_info->he_data3 |= value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC); + he_stbc = value; + value = value << HE_STBC_SHIFT; + ppdu_info->he_data3 |= value; + + /* data4 */ + ppdu_info->he_data4 = u32_get_bits(info0, + HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE); + + /* data5 */ + value = u32_get_bits(info0, + HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW); + ppdu_info->he_data5 = value; + ppdu_info->bw = value; + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE); + switch (value) { + case 0: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_1_X; + break; + case 1: + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_2_X; + break; + case 2: + he_gi = HE_GI_1_6; + he_ltf = HE_LTF_2_X; + break; + case 3: + if (he_dcm && he_stbc) { + he_gi = HE_GI_0_8; + he_ltf = HE_LTF_4_X; + } else { + he_gi = HE_GI_3_2; + he_ltf = HE_LTF_4_X; + } + break; + } + ppdu_info->gi = he_gi; + value = he_gi << HE_GI_SHIFT; + ppdu_info->he_data5 |= value; + value = he_ltf << HE_LTF_SIZE_SHIFT; + ppdu_info->ltf_size = he_ltf; + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS); + value = (value << HE_LTF_SYM_SHIFT); + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR); + value = value << HE_PRE_FEC_PAD_SHIFT; + ppdu_info->he_data5 |= value; + + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF); + value = value << HE_TXBF_SHIFT; + ppdu_info->he_data5 |= value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM); + value = value << HE_PE_DISAMBIGUITY_SHIFT; + ppdu_info->he_data5 |= value; + + /* data6 */ + value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS); + value++; + ppdu_info->he_data6 = value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND); + value = value << HE_DOPPLER_SHIFT; + ppdu_info->he_data6 |= value; + value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION); + value = value << HE_TXOP_SHIFT; + ppdu_info->he_data6 |= value; + + ppdu_info->mcs = + u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS); + ppdu_info->bw = + u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW); + ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING); + ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC); + ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF); + dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM); - ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS); ++ ppdu_info->nss = u32_get_bits(info0, ++ HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS) + 1; + ppdu_info->dcm = dcm; + } + + static inline bool + ath12k_wifi7_dp_mon_hal_rx_is_non_ofdma(const struct hal_rx_u_sig_info *usig_info) + { + u32 ppdu_type_comp_mode = usig_info->ppdu_type_comp_mode; + u32 ul_dl = usig_info->ul_dl; + + if ((ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 0) || + (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_OFDMA && ul_dl == 0) || + (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 1)) + return true; + + return false; + } + + static inline bool + ath12k_wifi7_dp_mon_hal_rx_is_ofdma(const struct hal_rx_u_sig_info *usig_info) + { + if (usig_info->ppdu_type_comp_mode == 0 && usig_info->ul_dl == 0) + return true; + + return false; + } + + static inline bool + ath12k_wifi7_dp_mon_hal_rx_is_frame_type_ndp(const struct hal_rx_u_sig_info *usig_info) + { + if (usig_info->ppdu_type_comp_mode == 1 && + usig_info->eht_sig_mcs == 0 && + usig_info->num_eht_sig_sym == 0) + return true; + + return false; + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_ndp(const struct hal_eht_sig_ndp_cmn_eb *eht_sig_ndp, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE | + IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF | + IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S | + IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S | + IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S | + IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 | + IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[0]); + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE, + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); + /* GI and LTF size are separately indicated in radiotap header + * and hence will be parsed from other TLV + */ + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); + + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC, + IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O); + + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD, + IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S); + eht->data[0] = cpu_to_le32(data); + + data = __le32_to_cpu(eht->data[7]); + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS, + IEEE80211_RADIOTAP_EHT_DATA7_NSS_S); + + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED, + IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S); + eht->data[7] = cpu_to_le32(data); + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_usig_overflow(const struct hal_eht_sig_usig_overflow *ovflow, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE | + IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF | + IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[0]); + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE, + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); + + /* GI and LTF size are separately indicated in radiotap header + * and hence will be parsed from other TLV + */ + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR, + IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY, + IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD, + IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O); + eht->data[0] = cpu_to_le32(data); + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_non_ofdma_users(const struct hal_eht_sig_non_ofdma_cmn_eb *eb, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[7]); + data |= ATH12K_LE32_DEC_ENC(eb->info0, + HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS, + IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS); + eht->data[7] = cpu_to_le32(data); + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_eht_mumimo_user(const struct hal_eht_sig_mu_mimo *user, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info; + u32 user_idx; + + if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info)) + return; + + user_idx = eht_info->num_user_info++; + + eht_info->user_info[user_idx] |= + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID, + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING, + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS, + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING, + IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M); + + ppdu_info->mcs = le32_get_bits(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS); + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_eht_non_mumimo_user(const struct hal_eht_sig_non_mu_mimo *user, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info; + u32 user_idx; + + if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info)) + return; + + user_idx = eht_info->num_user_info++; + + eht_info->user_info[user_idx] |= + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O | + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID, + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING, + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS, + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS, + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED, + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O); + + ppdu_info->mcs = le32_get_bits(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS); + + ppdu_info->nss = le32_get_bits(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS) + 1; + } + + static inline bool + ath12k_wifi7_dp_mon_hal_rx_is_mu_mimo_user(const struct hal_rx_u_sig_info *usig_info) + { + if (usig_info->ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_SU && + usig_info->ul_dl == 1) + return true; + + return false; + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_non_ofdma(const void *tlv, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + const struct hal_eht_sig_non_ofdma_cmn_eb *eb = tlv; + + ath12k_wifi7_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info); + ath12k_wifi7_dp_mon_hal_rx_parse_non_ofdma_users(eb, ppdu_info); + + if (ath12k_wifi7_dp_mon_hal_rx_is_mu_mimo_user(&ppdu_info->u_sig_info)) + ath12k_wifi7_dp_mon_hal_rx_parse_eht_mumimo_user(&eb->user_field.mu_mimo, + ppdu_info); + else + ath12k_wifi7_dp_mon_hal_rx_parse_eht_non_mumimo_user(&eb->user_field.n_mu_mimo, + ppdu_info); + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_ru_allocation(const struct hal_eht_sig_ofdma_cmn_eb *eb, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + const struct hal_eht_sig_ofdma_cmn_eb1 *ofdma_cmn_eb1 = &eb->eb1; + const struct hal_eht_sig_ofdma_cmn_eb2 *ofdma_cmn_eb2 = &eb->eb2; + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + enum ieee80211_radiotap_eht_data ru_123, ru_124, ru_125, ru_126; + enum ieee80211_radiotap_eht_data ru_121, ru_122, ru_112, ru_111; + u32 data; + + ru_123 = IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3; + ru_124 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4; + ru_125 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5; + ru_126 = IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6; + ru_121 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1; + ru_122 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2; + ru_112 = IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2; + ru_111 = IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1; + + switch (ppdu_info->u_sig_info.bw) { + case HAL_EHT_BW_320_2: + case HAL_EHT_BW_320_1: + data = __le32_to_cpu(eht->data[4]); + /* CC1 2::3 */ + data |= IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3, + ru_123); + eht->data[4] = cpu_to_le32(data); + + data = __le32_to_cpu(eht->data[5]); + /* CC1 2::4 */ + data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4, + ru_124); + + /* CC1 2::5 */ + data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5, + ru_125); + eht->data[5] = cpu_to_le32(data); + + data = __le32_to_cpu(eht->data[6]); + /* CC1 2::6 */ + data |= IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6, + ru_126); + eht->data[6] = cpu_to_le32(data); + + fallthrough; + case HAL_EHT_BW_160: + data = __le32_to_cpu(eht->data[3]); + /* CC1 2::1 */ + data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1, + ru_121); + /* CC1 2::2 */ + data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2, + ru_122); + eht->data[3] = cpu_to_le32(data); + + fallthrough; + case HAL_EHT_BW_80: + data = __le32_to_cpu(eht->data[2]); + /* CC1 1::2 */ + data |= IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0, + HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2, + ru_112); + eht->data[2] = cpu_to_le32(data); + + fallthrough; + case HAL_EHT_BW_40: + fallthrough; + case HAL_EHT_BW_20: + data = __le32_to_cpu(eht->data[1]); + /* CC1 1::1 */ + data |= IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0, + HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1, + ru_111); + eht->data[1] = cpu_to_le32(data); + break; + default: + break; + } + } + + static void + ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_ofdma(const void *tlv, + struct hal_rx_mon_ppdu_info *ppdu_info) + { + const struct hal_eht_sig_ofdma_cmn_eb *ofdma = tlv; + + ath12k_wifi7_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info); + ath12k_wifi7_dp_mon_hal_rx_parse_ru_allocation(ofdma, ppdu_info); + + ath12k_wifi7_dp_mon_hal_rx_parse_eht_non_mumimo_user(&ofdma->user_field.n_mu_mimo, + ppdu_info); + } + + static void + ath12k_wifi7_dp_mon_parse_eht_sig_hdr(struct hal_rx_mon_ppdu_info *ppdu_info, + const void *tlv_data) + { + ppdu_info->is_eht = true; + + if (ath12k_wifi7_dp_mon_hal_rx_is_frame_type_ndp(&ppdu_info->u_sig_info)) + ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_ndp(tlv_data, ppdu_info); + else if (ath12k_wifi7_dp_mon_hal_rx_is_non_ofdma(&ppdu_info->u_sig_info)) + ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_non_ofdma(tlv_data, ppdu_info); + else if (ath12k_wifi7_dp_mon_hal_rx_is_ofdma(&ppdu_info->u_sig_info)) + ath12k_wifi7_dp_mon_hal_rx_parse_eht_sig_ofdma(tlv_data, ppdu_info); + } + + static void ath12k_wifi7_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap) + { + if (info & RX_MSDU_END_INFO13_FCS_ERR) + *errmap |= HAL_RX_MPDU_ERR_FCS; + + if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) + *errmap |= HAL_RX_MPDU_ERR_DECRYPT; + + if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) + *errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; + + if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) + *errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; + + if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) + *errmap |= HAL_RX_MPDU_ERR_OVERFLOW; + + if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) + *errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; + + if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) + *errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; + } + ++static void ++ath12k_wifi7_parse_cmn_usr_info(const struct hal_phyrx_common_user_info *cmn_usr_info, ++ struct hal_rx_mon_ppdu_info *ppdu_info) ++{ ++ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; ++ u32 known, data, cp_setting, ltf_size; ++ ++ known = __le32_to_cpu(eht->known); ++ known |= IEEE80211_RADIOTAP_EHT_KNOWN_GI | ++ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF; ++ eht->known = cpu_to_le32(known); ++ ++ cp_setting = le32_get_bits(cmn_usr_info->info0, ++ HAL_RX_CMN_USR_INFO0_CP_SETTING); ++ ltf_size = le32_get_bits(cmn_usr_info->info0, ++ HAL_RX_CMN_USR_INFO0_LTF_SIZE); ++ ++ data = __le32_to_cpu(eht->data[0]); ++ data |= u32_encode_bits(cp_setting, IEEE80211_RADIOTAP_EHT_DATA0_GI); ++ data |= u32_encode_bits(ltf_size, IEEE80211_RADIOTAP_EHT_DATA0_LTF); ++ eht->data[0] = cpu_to_le32(data); ++ ++ if (!ppdu_info->ltf_size) ++ ppdu_info->ltf_size = ltf_size; ++ if (!ppdu_info->gi) ++ ppdu_info->gi = cp_setting; ++} ++ + static void + ath12k_wifi7_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon, + const struct hal_rx_msdu_end *msdu_end) + { + ath12k_wifi7_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2), + &pmon->err_bitmap); + pmon->decap_format = le32_get_bits(msdu_end->info1, + RX_MSDU_END_INFO11_DECAP_FORMAT); + } + + static enum hal_rx_mon_status + ath12k_wifi7_dp_mon_rx_parse_status_tlv(struct ath12k_pdev_dp *dp_pdev, + struct ath12k_mon_data *pmon, + const struct hal_tlv_64_hdr *tlv) + { + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; + const void *tlv_data = tlv->value; + u32 info[7], userid; + u16 tlv_tag, tlv_len; + + tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG); + tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); + userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID); + + if (ppdu_info->tlv_aggr.in_progress && ppdu_info->tlv_aggr.tlv_tag != tlv_tag) { + ath12k_wifi7_dp_mon_parse_eht_sig_hdr(ppdu_info, + ppdu_info->tlv_aggr.buf); + + ppdu_info->tlv_aggr.in_progress = false; + ppdu_info->tlv_aggr.cur_len = 0; + } + + switch (tlv_tag) { + case HAL_RX_PPDU_START: { + const struct hal_rx_ppdu_start *ppdu_start = tlv_data; + + u64 ppdu_ts = ath12k_le32hilo_to_u64(ppdu_start->ppdu_start_ts_63_32, + ppdu_start->ppdu_start_ts_31_0); + + info[0] = __le32_to_cpu(ppdu_start->info0); + + ppdu_info->ppdu_id = u32_get_bits(info[0], + HAL_RX_PPDU_START_INFO0_PPDU_ID); + + info[1] = __le32_to_cpu(ppdu_start->info1); + ppdu_info->chan_num = u32_get_bits(info[1], + HAL_RX_PPDU_START_INFO1_CHAN_NUM); + ppdu_info->freq = u32_get_bits(info[1], + HAL_RX_PPDU_START_INFO1_CHAN_FREQ); + ppdu_info->ppdu_ts = ppdu_ts; + + if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) { + ppdu_info->last_ppdu_id = ppdu_info->ppdu_id; + ppdu_info->num_users = 0; + memset(&ppdu_info->mpdu_fcs_ok_bitmap, 0, + HAL_RX_NUM_WORDS_PER_PPDU_BITMAP * + sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0])); + } + break; + } + case HAL_RX_PPDU_END_USER_STATS: { + const struct hal_rx_ppdu_end_user_stats *eu_stats = tlv_data; + u32 tid_bitmap; + + info[0] = __le32_to_cpu(eu_stats->info0); + info[1] = __le32_to_cpu(eu_stats->info1); + info[2] = __le32_to_cpu(eu_stats->info2); + info[4] = __le32_to_cpu(eu_stats->info4); + info[5] = __le32_to_cpu(eu_stats->info5); + info[6] = __le32_to_cpu(eu_stats->info6); + + ppdu_info->ast_index = + u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX); + ppdu_info->fc_valid = + u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID); + tid_bitmap = u32_get_bits(info[6], + HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP); + ppdu_info->tid = ffs(tid_bitmap) - 1; + ppdu_info->tcp_msdu_count = + u32_get_bits(info[4], + HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT); + ppdu_info->udp_msdu_count = + u32_get_bits(info[4], + HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT); + ppdu_info->other_msdu_count = + u32_get_bits(info[5], + HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT); + ppdu_info->tcp_ack_msdu_count = + u32_get_bits(info[5], + HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT); + ppdu_info->preamble_type = + u32_get_bits(info[1], + HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE); + ppdu_info->num_mpdu_fcs_ok = + u32_get_bits(info[1], + HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK); + ppdu_info->num_mpdu_fcs_err = + u32_get_bits(info[0], + HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR); + ppdu_info->peer_id = + u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID); + + switch (ppdu_info->preamble_type) { + case HAL_RX_PREAMBLE_11N: + ppdu_info->ht_flags = 1; + break; + case HAL_RX_PREAMBLE_11AC: + ppdu_info->vht_flags = 1; + break; + case HAL_RX_PREAMBLE_11AX: + ppdu_info->he_flags = 1; + break; + case HAL_RX_PREAMBLE_11BE: + ppdu_info->is_eht = true; + break; + default: + break; + } + + if (userid < HAL_MAX_UL_MU_USERS) { + struct hal_rx_user_status *rxuser_stats = + &ppdu_info->userstats[userid]; + + if (ppdu_info->num_mpdu_fcs_ok > 1 || + ppdu_info->num_mpdu_fcs_err > 1) + ppdu_info->userstats[userid].ampdu_present = true; + + ppdu_info->num_users += 1; + + ath12k_wifi7_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats); + ath12k_wifi7_dp_mon_rx_populate_mu_user_info(eu_stats, ppdu_info, + rxuser_stats); + } + ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]); + ppdu_info->mpdu_fcs_ok_bitmap[1] = __le32_to_cpu(eu_stats->rsvd1[1]); + break; + } + case HAL_RX_PPDU_END_USER_STATS_EXT: { + const struct hal_rx_ppdu_end_user_stats_ext *eu_stats = tlv_data; + + ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1); + ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2); + ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3); + ppdu_info->mpdu_fcs_ok_bitmap[5] = __le32_to_cpu(eu_stats->info4); + ppdu_info->mpdu_fcs_ok_bitmap[6] = __le32_to_cpu(eu_stats->info5); + ppdu_info->mpdu_fcs_ok_bitmap[7] = __le32_to_cpu(eu_stats->info6); + break; + } + case HAL_PHYRX_HT_SIG: + ath12k_wifi7_dp_mon_parse_ht_sig(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_L_SIG_B: + ath12k_wifi7_dp_mon_parse_l_sig_b(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_L_SIG_A: + ath12k_wifi7_dp_mon_parse_l_sig_a(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_VHT_SIG_A: + ath12k_wifi7_dp_mon_parse_vht_sig_a(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_HE_SIG_A_SU: + ath12k_wifi7_dp_mon_parse_he_sig_su(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_HE_SIG_A_MU_DL: + ath12k_wifi7_dp_mon_parse_he_sig_mu(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_HE_SIG_B1_MU: + ath12k_wifi7_dp_mon_parse_he_sig_b1_mu(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_HE_SIG_B2_MU: + ath12k_wifi7_dp_mon_parse_he_sig_b2_mu(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_HE_SIG_B2_OFDMA: + ath12k_wifi7_dp_mon_parse_he_sig_b2_ofdma(tlv_data, ppdu_info); + break; + + case HAL_PHYRX_RSSI_LEGACY: { + const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data; + + info[0] = __le32_to_cpu(rssi->info0); - info[1] = __le32_to_cpu(rssi->info1); ++ info[2] = __le32_to_cpu(rssi->info2); + + /* TODO: Please note that the combined rssi will not be accurate + * in MU case. Rssi in MU needs to be retrieved from + * PHYRX_OTHER_RECEIVE_INFO TLV. + */ + ppdu_info->rssi_comb = - u32_get_bits(info[1], - HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB); ++ u32_get_bits(info[2], ++ HAL_RX_RSSI_LEGACY_INFO_INFO2_RSSI_COMB_PPDU); + + ppdu_info->bw = u32_get_bits(info[0], - HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW); ++ HAL_RX_RSSI_LEGACY_INFO_INFO0_RX_BW); + break; + } - case HAL_PHYRX_OTHER_RECEIVE_INFO: { - const struct hal_phyrx_common_user_info *cmn_usr_info = tlv_data; - - ppdu_info->gi = le32_get_bits(cmn_usr_info->info0, - HAL_RX_PHY_CMN_USER_INFO0_GI); ++ case HAL_PHYRX_COMMON_USER_INFO: { ++ ath12k_wifi7_parse_cmn_usr_info(tlv_data, ppdu_info); + break; + } + case HAL_RX_PPDU_START_USER_INFO: + ath12k_wifi7_dp_mon_hal_rx_parse_user_info(tlv_data, userid, ppdu_info); + break; + + case HAL_RXPCU_PPDU_END_INFO: { + const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data; + + info[0] = __le32_to_cpu(ppdu_rx_duration->info0); + ppdu_info->rx_duration = + u32_get_bits(info[0], HAL_RX_PPDU_END_DURATION); + ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]); + ppdu_info->tsft = (ppdu_info->tsft << 32) | + __le32_to_cpu(ppdu_rx_duration->rsvd0[0]); + break; + } + case HAL_RX_MPDU_START: { + const struct hal_rx_mpdu_start *mpdu_start = tlv_data; + u16 peer_id; + + info[1] = __le32_to_cpu(mpdu_start->info1); + peer_id = u32_get_bits(info[1], HAL_RX_MPDU_START_INFO1_PEERID); + if (peer_id) + ppdu_info->peer_id = peer_id; + + ppdu_info->mpdu_len += u32_get_bits(info[1], + HAL_RX_MPDU_START_INFO2_MPDU_LEN); + if (userid < HAL_MAX_UL_MU_USERS) { + info[0] = __le32_to_cpu(mpdu_start->info0); + ppdu_info->userid = userid; + ppdu_info->userstats[userid].ampdu_id = + u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID); + } + + return HAL_RX_MON_STATUS_MPDU_START; + } + case HAL_RX_MSDU_START: + /* TODO: add msdu start parsing logic */ + break; + case HAL_MON_BUF_ADDR: + return HAL_RX_MON_STATUS_BUF_ADDR; + case HAL_RX_MSDU_END: + ath12k_wifi7_dp_mon_parse_status_msdu_end(pmon, tlv_data); + return HAL_RX_MON_STATUS_MSDU_END; + case HAL_RX_MPDU_END: + return HAL_RX_MON_STATUS_MPDU_END; + case HAL_PHYRX_GENERIC_U_SIG: + ath12k_wifi7_dp_mon_hal_rx_parse_u_sig_hdr(tlv_data, ppdu_info); + break; + case HAL_PHYRX_GENERIC_EHT_SIG: + /* Handle the case where aggregation is in progress + * or the current TLV is one of the TLVs which should be + * aggregated + */ + if (!ppdu_info->tlv_aggr.in_progress) { + ppdu_info->tlv_aggr.in_progress = true; + ppdu_info->tlv_aggr.tlv_tag = tlv_tag; + ppdu_info->tlv_aggr.cur_len = 0; + } + + ppdu_info->is_eht = true; + + ath12k_wifi7_dp_mon_hal_aggr_tlv(ppdu_info, tlv_len, tlv_data); + break; + case HAL_DUMMY: + return HAL_RX_MON_STATUS_BUF_DONE; + case HAL_RX_PPDU_END_STATUS_DONE: + case 0: + return HAL_RX_MON_STATUS_PPDU_DONE; + default: + break; + } + + return HAL_RX_MON_STATUS_PPDU_NOT_DONE; + } + + static int + ath12k_wifi7_dp_mon_parse_rx_dest_tlv(struct ath12k_pdev_dp *dp_pdev, + struct ath12k_mon_data *pmon, + enum hal_rx_mon_status hal_status, + const void *tlv_data) + { + switch (hal_status) { + case HAL_RX_MON_STATUS_MPDU_START: + if (WARN_ON_ONCE(pmon->mon_mpdu)) + break; + + pmon->mon_mpdu = kzalloc(sizeof(*pmon->mon_mpdu), GFP_ATOMIC); + if (!pmon->mon_mpdu) + return -ENOMEM; + break; + case HAL_RX_MON_STATUS_BUF_ADDR: + return ath12k_dp_mon_parse_status_buf(dp_pdev, pmon, tlv_data); + case HAL_RX_MON_STATUS_MPDU_END: + /* If no MSDU then free empty MPDU */ + if (pmon->mon_mpdu->tail) { + pmon->mon_mpdu->tail->next = NULL; + list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list); + } else { + kfree(pmon->mon_mpdu); + } + pmon->mon_mpdu = NULL; + break; + case HAL_RX_MON_STATUS_MSDU_END: + pmon->mon_mpdu->decap_format = pmon->decap_format; + pmon->mon_mpdu->err_bitmap = pmon->err_bitmap; + break; + default: + break; + } + + return 0; + } + + static struct dp_mon_tx_ppdu_info * + ath12k_wifi7_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon, + unsigned int ppdu_id, + enum dp_mon_tx_ppdu_info_type type) + { + struct dp_mon_tx_ppdu_info *tx_ppdu_info; + + if (type == DP_MON_TX_PROT_PPDU_INFO) { + tx_ppdu_info = pmon->tx_prot_ppdu_info; + + if (tx_ppdu_info && !tx_ppdu_info->is_used) + return tx_ppdu_info; + kfree(tx_ppdu_info); + } else { + tx_ppdu_info = pmon->tx_data_ppdu_info; + + if (tx_ppdu_info && !tx_ppdu_info->is_used) + return tx_ppdu_info; + kfree(tx_ppdu_info); + } + + /* allocate new tx_ppdu_info */ + tx_ppdu_info = kzalloc(sizeof(*tx_ppdu_info), GFP_ATOMIC); + if (!tx_ppdu_info) + return NULL; + + tx_ppdu_info->is_used = 0; + tx_ppdu_info->ppdu_id = ppdu_id; + + if (type == DP_MON_TX_PROT_PPDU_INFO) + pmon->tx_prot_ppdu_info = tx_ppdu_info; + else + pmon->tx_data_ppdu_info = tx_ppdu_info; + + return tx_ppdu_info; + } + + static struct dp_mon_tx_ppdu_info * + ath12k_wifi7_dp_mon_hal_tx_ppdu_info(struct ath12k_mon_data *pmon, + u16 tlv_tag) + { + switch (tlv_tag) { + case HAL_TX_FES_SETUP: + case HAL_TX_FLUSH: + case HAL_PCU_PPDU_SETUP_INIT: + case HAL_TX_PEER_ENTRY: + case HAL_TX_QUEUE_EXTENSION: + case HAL_TX_MPDU_START: + case HAL_TX_MSDU_START: + case HAL_TX_DATA: + case HAL_MON_BUF_ADDR: + case HAL_TX_MPDU_END: + case HAL_TX_LAST_MPDU_FETCHED: + case HAL_TX_LAST_MPDU_END: + case HAL_COEX_TX_REQ: + case HAL_TX_RAW_OR_NATIVE_FRAME_SETUP: + case HAL_SCH_CRITICAL_TLV_REFERENCE: + case HAL_TX_FES_SETUP_COMPLETE: + case HAL_TQM_MPDU_GLOBAL_START: + case HAL_SCHEDULER_END: + case HAL_TX_FES_STATUS_USER_PPDU: + break; + case HAL_TX_FES_STATUS_PROT: { + if (!pmon->tx_prot_ppdu_info->is_used) + pmon->tx_prot_ppdu_info->is_used = true; + + return pmon->tx_prot_ppdu_info; + } + } + + if (!pmon->tx_data_ppdu_info->is_used) + pmon->tx_data_ppdu_info->is_used = true; + + return pmon->tx_data_ppdu_info; + } + + #define MAX_MONITOR_HEADER 512 + #define MAX_DUMMY_FRM_BODY 128 + + static struct + sk_buff *ath12k_wifi7_dp_mon_tx_alloc_skb(void) + { + struct sk_buff *skb; + + skb = dev_alloc_skb(MAX_MONITOR_HEADER + MAX_DUMMY_FRM_BODY); + if (!skb) + return NULL; + + skb_reserve(skb, MAX_MONITOR_HEADER); + + if (!IS_ALIGNED((unsigned long)skb->data, 4)) + skb_pull(skb, PTR_ALIGN(skb->data, 4) - skb->data); + + return skb; + } + + static int + ath12k_wifi7_dp_mon_tx_gen_cts2self_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) + { + struct sk_buff *skb; + struct ieee80211_cts *cts; + + skb = ath12k_wifi7_dp_mon_tx_alloc_skb(); + if (!skb) + return -ENOMEM; + + cts = (struct ieee80211_cts *)skb->data; + memset(cts, 0, MAX_DUMMY_FRM_BODY); + cts->frame_control = + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); + cts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration); + memcpy(cts->ra, tx_ppdu_info->rx_status.addr1, sizeof(cts->ra)); + + skb_put(skb, sizeof(*cts)); + tx_ppdu_info->tx_mon_mpdu->head = skb; + tx_ppdu_info->tx_mon_mpdu->tail = NULL; + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + + return 0; + } + + static int + ath12k_wifi7_dp_mon_tx_gen_rts_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) + { + struct sk_buff *skb; + struct ieee80211_rts *rts; + + skb = ath12k_wifi7_dp_mon_tx_alloc_skb(); + if (!skb) + return -ENOMEM; + + rts = (struct ieee80211_rts *)skb->data; + memset(rts, 0, MAX_DUMMY_FRM_BODY); + rts->frame_control = + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); + rts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration); + memcpy(rts->ra, tx_ppdu_info->rx_status.addr1, sizeof(rts->ra)); + memcpy(rts->ta, tx_ppdu_info->rx_status.addr2, sizeof(rts->ta)); + + skb_put(skb, sizeof(*rts)); + tx_ppdu_info->tx_mon_mpdu->head = skb; + tx_ppdu_info->tx_mon_mpdu->tail = NULL; + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + + return 0; + } + + static int + ath12k_wifi7_dp_mon_tx_gen_3addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) + { + struct sk_buff *skb; + struct ieee80211_qos_hdr *qhdr; + + skb = ath12k_wifi7_dp_mon_tx_alloc_skb(); + if (!skb) + return -ENOMEM; + + qhdr = (struct ieee80211_qos_hdr *)skb->data; + memset(qhdr, 0, MAX_DUMMY_FRM_BODY); + qhdr->frame_control = + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); + qhdr->duration_id = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration); + memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN); + memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN); + memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN); + + skb_put(skb, sizeof(*qhdr)); + tx_ppdu_info->tx_mon_mpdu->head = skb; + tx_ppdu_info->tx_mon_mpdu->tail = NULL; + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + + return 0; + } + + static int + ath12k_wifi7_dp_mon_tx_gen_4addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) + { + struct sk_buff *skb; + struct dp_mon_qosframe_addr4 *qhdr; + + skb = ath12k_wifi7_dp_mon_tx_alloc_skb(); + if (!skb) + return -ENOMEM; + + qhdr = (struct dp_mon_qosframe_addr4 *)skb->data; + memset(qhdr, 0, MAX_DUMMY_FRM_BODY); + qhdr->frame_control = + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); + qhdr->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration); + memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN); + memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN); + memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN); + memcpy(qhdr->addr4, tx_ppdu_info->rx_status.addr4, ETH_ALEN); + + skb_put(skb, sizeof(*qhdr)); + tx_ppdu_info->tx_mon_mpdu->head = skb; + tx_ppdu_info->tx_mon_mpdu->tail = NULL; + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + + return 0; + } + + static int + ath12k_wifi7_dp_mon_tx_gen_ack_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) + { + struct sk_buff *skb; + struct dp_mon_frame_min_one *fbmhdr; + + skb = ath12k_wifi7_dp_mon_tx_alloc_skb(); + if (!skb) + return -ENOMEM; + + fbmhdr = (struct dp_mon_frame_min_one *)skb->data; + memset(fbmhdr, 0, MAX_DUMMY_FRM_BODY); + fbmhdr->frame_control = + cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_CFACK); + memcpy(fbmhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN); + + /* set duration zero for ack frame */ + fbmhdr->duration = 0; + + skb_put(skb, sizeof(*fbmhdr)); + tx_ppdu_info->tx_mon_mpdu->head = skb; + tx_ppdu_info->tx_mon_mpdu->tail = NULL; + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + + return 0; + } + + static int + ath12k_wifi7_dp_mon_tx_gen_prot_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info) + { + int ret = 0; + + switch (tx_ppdu_info->rx_status.medium_prot_type) { + case DP_MON_TX_MEDIUM_RTS_LEGACY: + case DP_MON_TX_MEDIUM_RTS_11AC_STATIC_BW: + case DP_MON_TX_MEDIUM_RTS_11AC_DYNAMIC_BW: + ret = ath12k_wifi7_dp_mon_tx_gen_rts_frame(tx_ppdu_info); + break; + case DP_MON_TX_MEDIUM_CTS2SELF: + ret = ath12k_wifi7_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info); + break; + case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_3ADDR: + ret = ath12k_wifi7_dp_mon_tx_gen_3addr_qos_null_frame(tx_ppdu_info); + break; + case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_4ADDR: + ret = ath12k_wifi7_dp_mon_tx_gen_4addr_qos_null_frame(tx_ppdu_info); + break; + } + + return ret; + } + + static enum dp_mon_tx_tlv_status + ath12k_wifi7_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, + struct ath12k_mon_data *pmon, + u16 tlv_tag, const void *tlv_data, + u32 userid) + { + struct dp_mon_tx_ppdu_info *tx_ppdu_info; + enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE; + u32 info[7]; + + tx_ppdu_info = ath12k_wifi7_dp_mon_hal_tx_ppdu_info(pmon, tlv_tag); + + switch (tlv_tag) { + case HAL_TX_FES_SETUP: { + const struct hal_tx_fes_setup *tx_fes_setup = tlv_data; + + info[0] = __le32_to_cpu(tx_fes_setup->info0); + tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id); + tx_ppdu_info->num_users = + u32_get_bits(info[0], HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS); + status = DP_MON_TX_FES_SETUP; + break; + } + + case HAL_TX_FES_STATUS_END: { + const struct hal_tx_fes_status_end *tx_fes_status_end = tlv_data; + u32 tst_15_0, tst_31_16; + + info[0] = __le32_to_cpu(tx_fes_status_end->info0); + tst_15_0 = + u32_get_bits(info[0], + HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_15_0); + tst_31_16 = + u32_get_bits(info[0], + HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_31_16); + + tx_ppdu_info->rx_status.ppdu_ts = (tst_15_0 | (tst_31_16 << 16)); + status = DP_MON_TX_FES_STATUS_END; + break; + } + + case HAL_RX_RESPONSE_REQUIRED_INFO: { + const struct hal_rx_resp_req_info *rx_resp_req_info = tlv_data; + u32 addr_32; + u16 addr_16; + + info[0] = __le32_to_cpu(rx_resp_req_info->info0); + info[1] = __le32_to_cpu(rx_resp_req_info->info1); + info[2] = __le32_to_cpu(rx_resp_req_info->info2); + info[3] = __le32_to_cpu(rx_resp_req_info->info3); + info[4] = __le32_to_cpu(rx_resp_req_info->info4); + info[5] = __le32_to_cpu(rx_resp_req_info->info5); + + tx_ppdu_info->rx_status.ppdu_id = + u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_PPDU_ID); + tx_ppdu_info->rx_status.reception_type = + u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_RECEPTION_TYPE); + tx_ppdu_info->rx_status.rx_duration = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_DURATION); + tx_ppdu_info->rx_status.mcs = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_RATE_MCS); + tx_ppdu_info->rx_status.sgi = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_SGI); + tx_ppdu_info->rx_status.is_stbc = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_STBC); + tx_ppdu_info->rx_status.ldpc = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_LDPC); + tx_ppdu_info->rx_status.is_ampdu = + u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_IS_AMPDU); + tx_ppdu_info->rx_status.num_users = + u32_get_bits(info[2], HAL_RX_RESP_REQ_INFO2_NUM_USER); + + addr_32 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO3_ADDR1_31_0); + addr_16 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO4_ADDR1_47_32); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1); + + addr_16 = u32_get_bits(info[4], HAL_RX_RESP_REQ_INFO4_ADDR1_15_0); + addr_32 = u32_get_bits(info[5], HAL_RX_RESP_REQ_INFO5_ADDR1_47_16); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2); + + if (tx_ppdu_info->rx_status.reception_type == 0) + ath12k_wifi7_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info); + status = DP_MON_RX_RESPONSE_REQUIRED_INFO; + break; + } + + case HAL_PCU_PPDU_SETUP_INIT: { + const struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = tlv_data; + u32 addr_32; + u16 addr_16; + + info[0] = __le32_to_cpu(ppdu_setup->info0); + info[1] = __le32_to_cpu(ppdu_setup->info1); + info[2] = __le32_to_cpu(ppdu_setup->info2); + info[3] = __le32_to_cpu(ppdu_setup->info3); + info[4] = __le32_to_cpu(ppdu_setup->info4); + info[5] = __le32_to_cpu(ppdu_setup->info5); + info[6] = __le32_to_cpu(ppdu_setup->info6); + + /* protection frame address 1 */ + addr_32 = u32_get_bits(info[1], + HAL_TX_PPDU_SETUP_INFO1_PROT_FRAME_ADDR1_31_0); + addr_16 = u32_get_bits(info[2], + HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR1_47_32); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1); + + /* protection frame address 2 */ + addr_16 = u32_get_bits(info[2], + HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR2_15_0); + addr_32 = u32_get_bits(info[3], + HAL_TX_PPDU_SETUP_INFO3_PROT_FRAME_ADDR2_47_16); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2); + + /* protection frame address 3 */ + addr_32 = u32_get_bits(info[4], + HAL_TX_PPDU_SETUP_INFO4_PROT_FRAME_ADDR3_31_0); + addr_16 = u32_get_bits(info[5], + HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR3_47_32); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr3); + + /* protection frame address 4 */ + addr_16 = u32_get_bits(info[5], + HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR4_15_0); + addr_32 = u32_get_bits(info[6], + HAL_TX_PPDU_SETUP_INFO6_PROT_FRAME_ADDR4_47_16); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr4); + + status = u32_get_bits(info[0], + HAL_TX_PPDU_SETUP_INFO0_MEDIUM_PROT_TYPE); + break; + } + + case HAL_TX_QUEUE_EXTENSION: { + const struct hal_tx_queue_exten *tx_q_exten = tlv_data; + + info[0] = __le32_to_cpu(tx_q_exten->info0); + + tx_ppdu_info->rx_status.frame_control = + u32_get_bits(info[0], + HAL_TX_Q_EXT_INFO0_FRAME_CTRL); + tx_ppdu_info->rx_status.fc_valid = true; + break; + } + + case HAL_TX_FES_STATUS_START: { + const struct hal_tx_fes_status_start *tx_fes_start = tlv_data; + + info[0] = __le32_to_cpu(tx_fes_start->info0); + + tx_ppdu_info->rx_status.medium_prot_type = + u32_get_bits(info[0], + HAL_TX_FES_STATUS_START_INFO0_MEDIUM_PROT_TYPE); + break; + } + + case HAL_TX_FES_STATUS_PROT: { + const struct hal_tx_fes_status_prot *tx_fes_status = tlv_data; + u32 start_timestamp; + u32 end_timestamp; + + info[0] = __le32_to_cpu(tx_fes_status->info0); + info[1] = __le32_to_cpu(tx_fes_status->info1); + + start_timestamp = + u32_get_bits(info[0], + HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_15_0); + start_timestamp |= + u32_get_bits(info[0], + HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_31_16) << 15; + end_timestamp = + u32_get_bits(info[1], + HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_15_0); + end_timestamp |= + u32_get_bits(info[1], + HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_31_16) << 15; + tx_ppdu_info->rx_status.rx_duration = end_timestamp - start_timestamp; + + ath12k_wifi7_dp_mon_tx_gen_prot_frame(tx_ppdu_info); + break; + } + + case HAL_TX_FES_STATUS_START_PPDU: + case HAL_TX_FES_STATUS_START_PROT: { + const struct hal_tx_fes_status_start_prot *tx_fes_stat_start = tlv_data; + u64 ppdu_ts; + + info[0] = __le32_to_cpu(tx_fes_stat_start->info0); + + tx_ppdu_info->rx_status.ppdu_ts = + u32_get_bits(info[0], + HAL_TX_FES_STAT_STRT_INFO0_PROT_TS_LOWER_32); + ppdu_ts = (u32_get_bits(info[1], + HAL_TX_FES_STAT_STRT_INFO1_PROT_TS_UPPER_32)); + tx_ppdu_info->rx_status.ppdu_ts |= ppdu_ts << 32; + break; + } + + case HAL_TX_FES_STATUS_USER_PPDU: { + const struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = tlv_data; + + info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0); + + tx_ppdu_info->rx_status.rx_duration = + u32_get_bits(info[0], + HAL_TX_FES_STAT_USR_PPDU_INFO0_DURATION); + break; + } + + case HAL_MACTX_HE_SIG_A_SU: + ath12k_wifi7_dp_mon_parse_he_sig_su(tlv_data, + &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_HE_SIG_A_MU_DL: + ath12k_wifi7_dp_mon_parse_he_sig_mu(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_HE_SIG_B1_MU: + ath12k_wifi7_dp_mon_parse_he_sig_b1_mu(tlv_data, + &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_HE_SIG_B2_MU: + ath12k_wifi7_dp_mon_parse_he_sig_b2_mu(tlv_data, + &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_HE_SIG_B2_OFDMA: + ath12k_wifi7_dp_mon_parse_he_sig_b2_ofdma(tlv_data, + &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_VHT_SIG_A: + ath12k_wifi7_dp_mon_parse_vht_sig_a(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_L_SIG_A: + ath12k_wifi7_dp_mon_parse_l_sig_a(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_MACTX_L_SIG_B: + ath12k_wifi7_dp_mon_parse_l_sig_b(tlv_data, &tx_ppdu_info->rx_status); + break; + + case HAL_RX_FRAME_BITMAP_ACK: { + const struct hal_rx_frame_bitmap_ack *fbm_ack = tlv_data; + u32 addr_32; + u16 addr_16; + + info[0] = __le32_to_cpu(fbm_ack->info0); + info[1] = __le32_to_cpu(fbm_ack->info1); + + addr_32 = u32_get_bits(info[0], + HAL_RX_FBM_ACK_INFO0_ADDR1_31_0); + addr_16 = u32_get_bits(info[1], + HAL_RX_FBM_ACK_INFO1_ADDR1_47_32); + ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1); + + ath12k_wifi7_dp_mon_tx_gen_ack_frame(tx_ppdu_info); + break; + } + + case HAL_MACTX_PHY_DESC: { + const struct hal_tx_phy_desc *tx_phy_desc = tlv_data; + + info[0] = __le32_to_cpu(tx_phy_desc->info0); + info[1] = __le32_to_cpu(tx_phy_desc->info1); + info[2] = __le32_to_cpu(tx_phy_desc->info2); + info[3] = __le32_to_cpu(tx_phy_desc->info3); + + tx_ppdu_info->rx_status.beamformed = + u32_get_bits(info[0], + HAL_TX_PHY_DESC_INFO0_BF_TYPE); + tx_ppdu_info->rx_status.preamble_type = + u32_get_bits(info[0], + HAL_TX_PHY_DESC_INFO0_PREAMBLE_11B); + tx_ppdu_info->rx_status.mcs = + u32_get_bits(info[1], + HAL_TX_PHY_DESC_INFO1_MCS); + tx_ppdu_info->rx_status.ltf_size = + u32_get_bits(info[3], + HAL_TX_PHY_DESC_INFO3_LTF_SIZE); + tx_ppdu_info->rx_status.nss = + u32_get_bits(info[2], + HAL_TX_PHY_DESC_INFO2_NSS); + tx_ppdu_info->rx_status.chan_num = + u32_get_bits(info[3], + HAL_TX_PHY_DESC_INFO3_ACTIVE_CHANNEL); + tx_ppdu_info->rx_status.bw = + u32_get_bits(info[0], + HAL_TX_PHY_DESC_INFO0_BANDWIDTH); + break; + } + + case HAL_TX_MPDU_START: { + struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu; + + mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); + if (!mon_mpdu) + return DP_MON_TX_STATUS_PPDU_NOT_DONE; + status = DP_MON_TX_MPDU_START; + break; + } + + case HAL_TX_MPDU_END: + list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, + &tx_ppdu_info->dp_tx_mon_mpdu_list); + break; + } + + return status; + } + + static enum dp_mon_tx_tlv_status + ath12k_wifi7_dp_mon_tx_status_get_num_user(u16 tlv_tag, + struct hal_tlv_hdr *tx_tlv, + u8 *num_users) + { + u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE; + u32 info0; + + switch (tlv_tag) { + case HAL_TX_FES_SETUP: { + struct hal_tx_fes_setup *tx_fes_setup = + (struct hal_tx_fes_setup *)tx_tlv; + + info0 = __le32_to_cpu(tx_fes_setup->info0); + + *num_users = u32_get_bits(info0, HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS); + tlv_status = DP_MON_TX_FES_SETUP; + break; + } + + case HAL_RX_RESPONSE_REQUIRED_INFO: { + /* TODO: need to update *num_users */ + tlv_status = DP_MON_RX_RESPONSE_REQUIRED_INFO; + break; + } + } + + return tlv_status; + } + + static int + ath12k_wifi7_dp_mon_rx_deliver(struct ath12k_pdev_dp *dp_pdev, + struct dp_mon_mpdu *mon_mpdu, + struct hal_rx_mon_ppdu_info *ppduinfo, + struct napi_struct *napi) + { + struct sk_buff *mon_skb, *skb_next, *header; + struct ieee80211_rx_status *rxs = &dp_pdev->rx_status; + u8 decap = DP_RX_DECAP_TYPE_RAW; + + mon_skb = ath12k_dp_mon_rx_merg_msdus(dp_pdev, mon_mpdu, ppduinfo, rxs); + if (!mon_skb) + goto mon_deliver_fail; + + header = mon_skb; + rxs->flag = 0; + + if (mon_mpdu->err_bitmap & HAL_RX_MPDU_ERR_FCS) + rxs->flag = RX_FLAG_FAILED_FCS_CRC; + + do { + skb_next = mon_skb->next; + if (!skb_next) + rxs->flag &= ~RX_FLAG_AMSDU_MORE; + else + rxs->flag |= RX_FLAG_AMSDU_MORE; + + if (mon_skb == header) { + header = NULL; + rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; + } else { + rxs->flag |= RX_FLAG_ALLOW_SAME_PN; + } + rxs->flag |= RX_FLAG_ONLY_MONITOR; + + if (!(rxs->flag & RX_FLAG_ONLY_MONITOR)) + decap = mon_mpdu->decap_format; + + ath12k_dp_mon_update_radiotap(dp_pdev, ppduinfo, mon_skb, rxs); - ath12k_dp_mon_rx_deliver_msdu(dp_pdev, napi, mon_skb, rxs, decap); ++ ath12k_dp_mon_rx_deliver_msdu(dp_pdev, napi, mon_skb, ppduinfo, ++ rxs, decap); + mon_skb = skb_next; + } while (mon_skb); + rxs->flag = 0; + + return 0; + + mon_deliver_fail: + mon_skb = mon_mpdu->head; + while (mon_skb) { + skb_next = mon_skb->next; + dev_kfree_skb_any(mon_skb); + mon_skb = skb_next; + } + return -EINVAL; + } + + static void + ath12k_wifi7_dp_mon_tx_process_ppdu_info(struct ath12k_pdev_dp *dp_pdev, + struct napi_struct *napi, + struct dp_mon_tx_ppdu_info *tx_ppdu_info) + { + struct dp_mon_mpdu *tmp, *mon_mpdu; + + list_for_each_entry_safe(mon_mpdu, tmp, + &tx_ppdu_info->dp_tx_mon_mpdu_list, list) { + list_del(&mon_mpdu->list); + + if (mon_mpdu->head) + ath12k_wifi7_dp_mon_rx_deliver(dp_pdev, mon_mpdu, + &tx_ppdu_info->rx_status, napi); + + kfree(mon_mpdu); + } + } + + enum hal_rx_mon_status + ath12k_wifi7_dp_mon_tx_parse_mon_status(struct ath12k_pdev_dp *dp_pdev, + struct ath12k_mon_data *pmon, + struct sk_buff *skb, + struct napi_struct *napi, + u32 ppdu_id) + { + struct ath12k_dp *dp = dp_pdev->dp; + struct ath12k_base *ab = dp->ab; + struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info, *tx_data_ppdu_info; + struct hal_tlv_hdr *tlv; + u8 *ptr = skb->data; + u16 tlv_tag; + u16 tlv_len; + u32 tlv_userid = 0; + u8 num_user; + u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE; + + tx_prot_ppdu_info = + ath12k_wifi7_dp_mon_tx_get_ppdu_info(pmon, ppdu_id, + DP_MON_TX_PROT_PPDU_INFO); + if (!tx_prot_ppdu_info) + return -ENOMEM; + + tlv = (struct hal_tlv_hdr *)ptr; + tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG); + + tlv_status = ath12k_wifi7_dp_mon_tx_status_get_num_user(tlv_tag, tlv, + &num_user); + if (tlv_status == DP_MON_TX_STATUS_PPDU_NOT_DONE || !num_user) + return -EINVAL; + + tx_data_ppdu_info = + ath12k_wifi7_dp_mon_tx_get_ppdu_info(pmon, ppdu_id, + DP_MON_TX_DATA_PPDU_INFO); + if (!tx_data_ppdu_info) + return -ENOMEM; + + do { + tlv = (struct hal_tlv_hdr *)ptr; + tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG); + tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN); + tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID); + + tlv_status = ath12k_wifi7_dp_mon_tx_parse_status_tlv(ab, pmon, + tlv_tag, ptr, + tlv_userid); + ptr += tlv_len; + ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN); + if ((ptr - skb->data) >= DP_TX_MONITOR_BUF_SIZE) + break; + } while (tlv_status != DP_MON_TX_FES_STATUS_END); + + ath12k_wifi7_dp_mon_tx_process_ppdu_info(dp_pdev, napi, tx_data_ppdu_info); + ath12k_wifi7_dp_mon_tx_process_ppdu_info(dp_pdev, napi, tx_prot_ppdu_info); + + return tlv_status; + } + + static void + ath12k_wifi7_dp_mon_next_link_desc_get(struct ath12k_base *ab, + struct hal_rx_msdu_link *msdu_link, + dma_addr_t *paddr, u32 *sw_cookie, u8 *rbm, + struct ath12k_buffer_addr **pp_buf_addr_info) + { + struct ath12k_buffer_addr *buf_addr_info; + + buf_addr_info = &msdu_link->buf_addr_info; + + ath12k_wifi7_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); + + *pp_buf_addr_info = buf_addr_info; + } + + static u32 + ath12k_wifi7_dp_rx_mon_mpdu_pop(struct ath12k *ar, int mac_id, + void *ring_entry, struct sk_buff **head_msdu, + struct sk_buff **tail_msdu, + struct list_head *used_list, + u32 *npackets, u32 *ppdu_id) + { + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data; + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); + struct ath12k_buffer_addr *p_buf_addr_info, *p_last_buf_addr_info; + u32 msdu_ppdu_id = 0, msdu_cnt = 0, total_len = 0, frag_len = 0; + u32 rx_buf_size, rx_pkt_offset, sw_cookie; + bool is_frag, is_first_msdu, drop_mpdu = false; + struct hal_reo_entrance_ring *ent_desc = + (struct hal_reo_entrance_ring *)ring_entry; + u32 rx_bufs_used = 0, i = 0, desc_bank = 0; + struct hal_rx_desc *rx_desc, *tail_rx_desc; + struct hal_rx_msdu_link *msdu_link_desc; + struct sk_buff *msdu = NULL, *last = NULL; + struct ath12k_rx_desc_info *desc_info; + struct ath12k_buffer_addr buf_info; + struct hal_rx_msdu_list msdu_list; + struct ath12k_skb_rxcb *rxcb; + u16 num_msdus = 0; + dma_addr_t paddr; + u8 rbm; + + ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, + &sw_cookie, + &p_last_buf_addr_info, + &rbm, + &msdu_cnt); + + spin_lock_bh(&pmon->mon_lock); + + if (le32_get_bits(ent_desc->info1, + HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON) == + HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { + u8 rxdma_err = le32_get_bits(ent_desc->info1, + HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE); + if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || + rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || + rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { + drop_mpdu = true; + pmon->rx_mon_stats.dest_mpdu_drop++; + } + } + + is_frag = false; + is_first_msdu = true; + rx_pkt_offset = sizeof(struct hal_rx_desc); + + do { + if (pmon->mon_last_linkdesc_paddr == paddr) { + pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; + spin_unlock_bh(&pmon->mon_lock); + return rx_bufs_used; + } + + desc_bank = u32_get_bits(sw_cookie, DP_LINK_DESC_BANK_MASK); + msdu_link_desc = + dp->link_desc_banks[desc_bank].vaddr + + (paddr - dp->link_desc_banks[desc_bank].paddr); + + ath12k_wifi7_hal_rx_msdu_list_get(ar, msdu_link_desc, &msdu_list, + &num_msdus); + desc_info = ath12k_dp_get_rx_desc(ar->ab->dp, + msdu_list.sw_cookie[num_msdus - 1]); + tail_rx_desc = (struct hal_rx_desc *)(desc_info->skb)->data; + + for (i = 0; i < num_msdus; i++) { + u32 l2_hdr_offset; + + if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "i %d last_cookie %d is same\n", + i, pmon->mon_last_buf_cookie); + drop_mpdu = true; + pmon->rx_mon_stats.dup_mon_buf_cnt++; + continue; + } + + desc_info = + ath12k_dp_get_rx_desc(ar->ab->dp, msdu_list.sw_cookie[i]); + msdu = desc_info->skb; + + if (!msdu) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "msdu_pop: invalid msdu (%d/%d)\n", + i + 1, num_msdus); + goto next_msdu; + } + rxcb = ATH12K_SKB_RXCB(msdu); + if (rxcb->paddr != msdu_list.paddr[i]) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "i %d paddr %lx != %lx\n", + i, (unsigned long)rxcb->paddr, + (unsigned long)msdu_list.paddr[i]); + drop_mpdu = true; + continue; + } + if (!rxcb->unmapped) { + dma_unmap_single(ar->ab->dev, rxcb->paddr, + msdu->len + + skb_tailroom(msdu), + DMA_FROM_DEVICE); + rxcb->unmapped = 1; + } + if (drop_mpdu) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "i %d drop msdu %p *ppdu_id %x\n", + i, msdu, *ppdu_id); + dev_kfree_skb_any(msdu); + msdu = NULL; + goto next_msdu; + } + + rx_desc = (struct hal_rx_desc *)msdu->data; + l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, tail_rx_desc); + if (is_first_msdu) { + if (!ath12k_wifi7_dp_rxdesc_mpdu_valid(ar->ab, + rx_desc)) { + drop_mpdu = true; + dev_kfree_skb_any(msdu); + msdu = NULL; + pmon->mon_last_linkdesc_paddr = paddr; + goto next_msdu; + } + msdu_ppdu_id = + ath12k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); + + if (ath12k_dp_mon_comp_ppduid(msdu_ppdu_id, + ppdu_id)) { + spin_unlock_bh(&pmon->mon_lock); + return rx_bufs_used; + } + pmon->mon_last_linkdesc_paddr = paddr; + is_first_msdu = false; + } + ath12k_wifi7_dp_mon_get_buf_len(&msdu_list.msdu_info[i], + &is_frag, &total_len, + &frag_len, &msdu_cnt); + rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; + + if (ath12k_dp_pkt_set_pktlen(msdu, rx_buf_size)) { + dev_kfree_skb_any(msdu); + goto next_msdu; + } + + if (!(*head_msdu)) + *head_msdu = msdu; + else if (last) + last->next = msdu; + + last = msdu; + next_msdu: + pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; + rx_bufs_used++; + desc_info->skb = NULL; + list_add_tail(&desc_info->list, used_list); + } + + ath12k_wifi7_hal_rx_buf_addr_info_set(&buf_info, paddr, + sw_cookie, rbm); + + ath12k_wifi7_dp_mon_next_link_desc_get(ab, + msdu_link_desc, &paddr, + &sw_cookie, &rbm, + &p_buf_addr_info); + + ath12k_dp_arch_rx_link_desc_return(ar->ab->dp, &buf_info, + HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); + + p_last_buf_addr_info = p_buf_addr_info; + + } while (paddr && msdu_cnt); + + spin_unlock_bh(&pmon->mon_lock); + + if (last) + last->next = NULL; + + *tail_msdu = msdu; + + if (msdu_cnt == 0) + *npackets = 1; + + return rx_bufs_used; + } + + /* The destination ring processing is stuck if the destination is not + * moving while status ring moves 16 PPDU. The destination ring processing + * skips this destination ring PPDU as a workaround. + */ + #define MON_DEST_RING_STUCK_MAX_CNT 16 + + static void + ath12k_wifi7_dp_rx_mon_dest_process(struct ath12k *ar, int mac_id, + u32 quota, struct napi_struct *napi) + { + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data; + struct ath12k_pdev_mon_stats *rx_mon_stats; + u32 ppdu_id, rx_bufs_used = 0, ring_id; + u32 mpdu_rx_bufs_used, npackets = 0; + struct ath12k_base *ab = ar->ab; + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); + void *ring_entry, *mon_dst_srng; + struct dp_mon_mpdu *tmp_mpdu; + LIST_HEAD(rx_desc_used_list); + struct hal_srng *srng; + + ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; + srng = &ab->hal.srng_list[ring_id]; + + mon_dst_srng = &ab->hal.srng_list[ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, mon_dst_srng); + + ppdu_id = pmon->mon_ppdu_info.ppdu_id; + rx_mon_stats = &pmon->rx_mon_stats; + + while ((ring_entry = ath12k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { + struct sk_buff *head_msdu, *tail_msdu; + + head_msdu = NULL; + tail_msdu = NULL; + + mpdu_rx_bufs_used = ath12k_wifi7_dp_rx_mon_mpdu_pop(ar, mac_id, + ring_entry, + &head_msdu, + &tail_msdu, + &rx_desc_used_list, + &npackets, + &ppdu_id); + + rx_bufs_used += mpdu_rx_bufs_used; + + if (mpdu_rx_bufs_used) { + dp->mon_dest_ring_stuck_cnt = 0; + } else { + dp->mon_dest_ring_stuck_cnt++; + rx_mon_stats->dest_mon_not_reaped++; + } + + if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) { + rx_mon_stats->dest_mon_stuck++; + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n", + pmon->mon_ppdu_info.ppdu_id, ppdu_id, + dp->mon_dest_ring_stuck_cnt, + rx_mon_stats->dest_mon_not_reaped, + rx_mon_stats->dest_mon_stuck); + spin_lock_bh(&pmon->mon_lock); + pmon->mon_ppdu_info.ppdu_id = ppdu_id; + spin_unlock_bh(&pmon->mon_lock); + continue; + } + + if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { + spin_lock_bh(&pmon->mon_lock); + pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + spin_unlock_bh(&pmon->mon_lock); + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n", + ppdu_id, pmon->mon_ppdu_info.ppdu_id, + rx_mon_stats->dest_mon_not_reaped, + rx_mon_stats->dest_mon_stuck); + break; + } + + if (head_msdu && tail_msdu) { + tmp_mpdu = kzalloc(sizeof(*tmp_mpdu), GFP_ATOMIC); + if (!tmp_mpdu) + break; + + tmp_mpdu->head = head_msdu; + tmp_mpdu->tail = tail_msdu; + tmp_mpdu->err_bitmap = pmon->err_bitmap; + tmp_mpdu->decap_format = pmon->decap_format; + ath12k_wifi7_dp_mon_rx_deliver(&ar->dp, tmp_mpdu, + &pmon->mon_ppdu_info, napi); + rx_mon_stats->dest_mpdu_done++; + kfree(tmp_mpdu); + } + + ring_entry = ath12k_hal_srng_dst_get_next_entry(ar->ab, + mon_dst_srng); + } + ath12k_hal_srng_access_end(ar->ab, mon_dst_srng); + + spin_unlock_bh(&srng->lock); + + if (rx_bufs_used) { + rx_mon_stats->dest_ppdu_done++; + ath12k_dp_rx_bufs_replenish(ar->ab->dp, + &dp->rx_refill_buf_ring, + &rx_desc_used_list, + rx_bufs_used); + } + } + + static enum dp_mon_status_buf_state + ath12k_wifi7_dp_rx_mon_buf_done(struct ath12k_base *ab, struct hal_srng *srng, + struct dp_rxdma_mon_ring *rx_ring) + { + struct ath12k_skb_rxcb *rxcb; + struct hal_tlv_64_hdr *tlv; + struct sk_buff *skb; + void *status_desc; + dma_addr_t paddr; + u32 cookie; + int buf_id; + u8 rbm; + + status_desc = ath12k_hal_srng_src_next_peek(ab, srng); + if (!status_desc) + return DP_MON_STATUS_NO_DMA; + + ath12k_wifi7_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm); + + buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); + + spin_lock_bh(&rx_ring->idr_lock); + skb = idr_find(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + if (!skb) + return DP_MON_STATUS_NO_DMA; + + rxcb = ATH12K_SKB_RXCB(skb); + dma_sync_single_for_cpu(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + tlv = (struct hal_tlv_64_hdr *)skb->data; + if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != HAL_RX_STATUS_BUFFER_DONE) + return DP_MON_STATUS_NO_DMA; + + return DP_MON_STATUS_REPLINISH; + } + + static enum hal_rx_mon_status + ath12k_wifi7_dp_mon_parse_rx_dest(struct ath12k_pdev_dp *dp_pdev, + struct ath12k_mon_data *pmon, + struct sk_buff *skb) + { + struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev); + struct hal_tlv_64_hdr *tlv; + struct ath12k_skb_rxcb *rxcb; + enum hal_rx_mon_status hal_status; + u16 tlv_tag, tlv_len; + u8 *ptr = skb->data; + + do { + tlv = (struct hal_tlv_64_hdr *)ptr; + tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG); + + /* The actual length of PPDU_END is the combined length of many PHY + * TLVs that follow. Skip the TLV header and + * rx_rxpcu_classification_overview that follows the header to get to + * next TLV. + */ + + if (tlv_tag == HAL_RX_PPDU_END) + tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview); + else + tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); + + hal_status = ath12k_wifi7_dp_mon_rx_parse_status_tlv(dp_pdev, pmon, + tlv); + + if (ar->monitor_started && ar->ab->hw_params->rxdma1_enable && + ath12k_wifi7_dp_mon_parse_rx_dest_tlv(dp_pdev, pmon, hal_status, + tlv->value)) + return HAL_RX_MON_STATUS_PPDU_DONE; + + ptr += sizeof(*tlv) + tlv_len; + ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN); + + if ((ptr - skb->data) > skb->len) + break; + + } while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) || + (hal_status == HAL_RX_MON_STATUS_BUF_ADDR) || + (hal_status == HAL_RX_MON_STATUS_MPDU_START) || + (hal_status == HAL_RX_MON_STATUS_MPDU_END) || + (hal_status == HAL_RX_MON_STATUS_MSDU_END)); + + rxcb = ATH12K_SKB_RXCB(skb); + if (rxcb->is_end_of_ppdu) + hal_status = HAL_RX_MON_STATUS_PPDU_DONE; + + return hal_status; + } + + static enum hal_rx_mon_status + ath12k_wifi7_dp_mon_rx_parse_mon_status(struct ath12k_pdev_dp *dp_pdev, + struct ath12k_mon_data *pmon, + struct sk_buff *skb, + struct napi_struct *napi) + { + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; + struct dp_mon_mpdu *tmp; + struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; + enum hal_rx_mon_status hal_status; + + hal_status = ath12k_wifi7_dp_mon_parse_rx_dest(dp_pdev, pmon, skb); + if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) + return hal_status; + + list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) { + list_del(&mon_mpdu->list); + + if (mon_mpdu->head && mon_mpdu->tail) + ath12k_wifi7_dp_mon_rx_deliver(dp_pdev, mon_mpdu, + ppdu_info, napi); + + kfree(mon_mpdu); + } + + return hal_status; + } + + static int + ath12k_wifi7_dp_rx_reap_mon_status_ring(struct ath12k_base *ab, int mac_id, + int *budget, struct sk_buff_head *skb_list) + { + const struct ath12k_hw_hal_params *hal_params; + int buf_id, srng_id, num_buffs_reaped = 0; + enum dp_mon_status_buf_state reap_status; + struct dp_rxdma_mon_ring *rx_ring; + struct ath12k_mon_data *pmon; + struct ath12k_skb_rxcb *rxcb; + struct hal_tlv_64_hdr *tlv; + void *rx_mon_status_desc; + struct hal_srng *srng; + struct ath12k_dp *dp; + struct sk_buff *skb; + struct ath12k *ar; + dma_addr_t paddr; + u32 cookie; + u8 rbm; + + ar = ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar; + dp = ath12k_ab_to_dp(ab); + pmon = &ar->dp.mon_data; + srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id); + rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; + + srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + while (*budget) { + *budget -= 1; + rx_mon_status_desc = ath12k_hal_srng_src_peek(ab, srng); + if (!rx_mon_status_desc) { + pmon->buf_state = DP_MON_STATUS_REPLINISH; + break; + } + ath12k_wifi7_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, + &cookie, &rbm); + if (paddr) { + buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); + + spin_lock_bh(&rx_ring->idr_lock); + skb = idr_find(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + if (!skb) { + ath12k_warn(ab, "rx monitor status with invalid buf_id %d\n", + buf_id); + pmon->buf_state = DP_MON_STATUS_REPLINISH; + goto move_next; + } + + rxcb = ATH12K_SKB_RXCB(skb); + + dma_sync_single_for_cpu(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + tlv = (struct hal_tlv_64_hdr *)skb->data; + if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != + HAL_RX_STATUS_BUFFER_DONE) { + pmon->buf_state = DP_MON_STATUS_NO_DMA; + ath12k_warn(ab, + "mon status DONE not set %llx, buf_id %d\n", + le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG), + buf_id); + /* RxDMA status done bit might not be set even + * though tp is moved by HW. + */ + + /* If done status is missing: + * 1. As per MAC team's suggestion, + * when HP + 1 entry is peeked and if DMA + * is not done and if HP + 2 entry's DMA done + * is set. skip HP + 1 entry and + * start processing in next interrupt. + * 2. If HP + 2 entry's DMA done is not set, + * poll onto HP + 1 entry DMA done to be set. + * Check status for same buffer for next time + * dp_rx_mon_status_srng_process + */ + reap_status = ath12k_wifi7_dp_rx_mon_buf_done(ab, srng, + rx_ring); + if (reap_status == DP_MON_STATUS_NO_DMA) + continue; + + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + dev_kfree_skb_any(skb); + pmon->buf_state = DP_MON_STATUS_REPLINISH; + goto move_next; + } + + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + if (ath12k_dp_pkt_set_pktlen(skb, RX_MON_STATUS_BUF_SIZE)) { + dev_kfree_skb_any(skb); + goto move_next; + } + __skb_queue_tail(skb_list, skb); + } else { + pmon->buf_state = DP_MON_STATUS_REPLINISH; + } + move_next: + skb = ath12k_dp_rx_alloc_mon_status_buf(ab, rx_ring, + &buf_id); + hal_params = ab->hal.hal_params; + + if (!skb) { + ath12k_warn(ab, "failed to alloc buffer for status ring\n"); + ath12k_wifi7_hal_rx_buf_addr_info_set(rx_mon_status_desc, + 0, 0, + hal_params->rx_buf_rbm); + num_buffs_reaped++; + break; + } + rxcb = ATH12K_SKB_RXCB(skb); + + cookie = u32_encode_bits(mac_id, DP_RXDMA_BUF_COOKIE_PDEV_ID) | + u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID); + + ath12k_wifi7_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, + cookie, hal_params->rx_buf_rbm); + ath12k_hal_srng_src_get_next_entry(ab, srng); + num_buffs_reaped++; + } + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + return num_buffs_reaped; + } + + static int + __ath12k_wifi7_dp_mon_process_ring(struct ath12k *ar, int mac_id, + struct napi_struct *napi, int *budget) + { + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data; + struct ath12k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats; + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; + enum hal_rx_mon_status hal_status; + struct sk_buff_head skb_list; + int num_buffs_reaped; + struct sk_buff *skb; + + __skb_queue_head_init(&skb_list); + + num_buffs_reaped = ath12k_wifi7_dp_rx_reap_mon_status_ring(ar->ab, mac_id, + budget, &skb_list); + if (!num_buffs_reaped) + goto exit; + + while ((skb = __skb_dequeue(&skb_list))) { + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; + + hal_status = ath12k_wifi7_dp_mon_parse_rx_dest(&ar->dp, pmon, skb); + + if (ar->monitor_started && + pmon->mon_ppdu_status == DP_PPDU_STATUS_START && + hal_status == HAL_TLV_STATUS_PPDU_DONE) { + rx_mon_stats->status_ppdu_done++; + pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; + ath12k_wifi7_dp_rx_mon_dest_process(ar, mac_id, *budget, napi); + pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + } + + dev_kfree_skb_any(skb); + } + + exit: + return num_buffs_reaped; + } + + static int + ath12k_wifi7_dp_mon_srng_process(struct ath12k_pdev_dp *pdev_dp, int *budget, + struct napi_struct *napi) + { + struct ath12k_dp *dp = pdev_dp->dp; + struct ath12k_base *ab = dp->ab; + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data; + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; + struct hal_mon_dest_desc *mon_dst_desc; + struct sk_buff *skb; + struct ath12k_skb_rxcb *rxcb; + struct dp_srng *mon_dst_ring; + struct hal_srng *srng; + struct dp_rxdma_mon_ring *buf_ring; + struct ath12k_dp_link_peer *peer; + struct sk_buff_head skb_list; + u64 cookie; + int num_buffs_reaped = 0, srng_id, buf_id; + u32 hal_status, end_offset, info0, end_reason; + u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, pdev_dp->mac_id); + + __skb_queue_head_init(&skb_list); + srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, pdev_idx); + mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id]; + buf_ring = &dp->rxdma_mon_buf_ring; + + srng = &ab->hal.srng_list[mon_dst_ring->ring_id]; + spin_lock_bh(&srng->lock); + ath12k_hal_srng_access_begin(ab, srng); + + while (likely(*budget)) { + mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng); + if (unlikely(!mon_dst_desc)) + break; + + /* In case of empty descriptor, the cookie in the ring descriptor + * is invalid. Therefore, this entry is skipped, and ring processing + * continues. + */ + info0 = le32_to_cpu(mon_dst_desc->info0); + if (u32_get_bits(info0, HAL_MON_DEST_INFO0_EMPTY_DESC)) + goto move_next; + + cookie = le32_to_cpu(mon_dst_desc->cookie); + buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); + + spin_lock_bh(&buf_ring->idr_lock); + skb = idr_remove(&buf_ring->bufs_idr, buf_id); + spin_unlock_bh(&buf_ring->idr_lock); + + if (unlikely(!skb)) { + ath12k_warn(ab, "monitor destination with invalid buf_id %d\n", + buf_id); + goto move_next; + } + + rxcb = ATH12K_SKB_RXCB(skb); + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + end_reason = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_REASON); + + /* HAL_MON_FLUSH_DETECTED implies that an rx flush received at the end of + * rx PPDU and HAL_MON_PPDU_TRUNCATED implies that the PPDU got + * truncated due to a system level error. In both the cases, buffer data + * can be discarded + */ + if ((end_reason == HAL_MON_FLUSH_DETECTED) || + (end_reason == HAL_MON_PPDU_TRUNCATED)) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "Monitor dest descriptor end reason %d", end_reason); + dev_kfree_skb_any(skb); + goto move_next; + } + + /* Calculate the budget when the ring descriptor with the + * HAL_MON_END_OF_PPDU to ensure that one PPDU worth of data is always + * reaped. This helps to efficiently utilize the NAPI budget. + */ + if (end_reason == HAL_MON_END_OF_PPDU) { + *budget -= 1; + rxcb->is_end_of_ppdu = true; + } + + end_offset = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_OFFSET); + if (likely(end_offset <= DP_RX_BUFFER_SIZE)) { + skb_put(skb, end_offset); + } else { + ath12k_warn(ab, + "invalid offset on mon stats destination %u\n", + end_offset); + skb_put(skb, DP_RX_BUFFER_SIZE); + } + + __skb_queue_tail(&skb_list, skb); + + move_next: + ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); + ath12k_hal_srng_dst_get_next_entry(ab, srng); + num_buffs_reaped++; + } + + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + if (!num_buffs_reaped) + return 0; + + /* In some cases, one PPDU worth of data can be spread across multiple NAPI + * schedules, To avoid losing existing parsed ppdu_info information, skip + * the memset of the ppdu_info structure and continue processing it. + */ + if (!ppdu_info->ppdu_continuation) + ath12k_wifi7_dp_mon_rx_memset_ppdu_info(ppdu_info); + + while ((skb = __skb_dequeue(&skb_list))) { + hal_status = ath12k_wifi7_dp_mon_rx_parse_mon_status(pdev_dp, pmon, + skb, napi); + if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { + ppdu_info->ppdu_continuation = true; + dev_kfree_skb_any(skb); + continue; + } + + if (ppdu_info->peer_id == HAL_INVALID_PEERID) + goto free_skb; + + rcu_read_lock(); + peer = ath12k_dp_link_peer_find_by_peerid(pdev_dp, ppdu_info->peer_id); + if (!peer || !peer->sta) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "failed to find the peer with monitor peer_id %d\n", + ppdu_info->peer_id); + goto next_skb; + } + + if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { + ath12k_dp_mon_rx_update_peer_su_stats(peer, ppdu_info); + } else if ((ppdu_info->fc_valid) && + (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) { + ath12k_dp_mon_rx_process_ulofdma(ppdu_info); + ath12k_dp_mon_rx_update_peer_mu_stats(ab, ppdu_info); + } + + next_skb: + rcu_read_unlock(); + free_skb: + dev_kfree_skb_any(skb); + ath12k_wifi7_dp_mon_rx_memset_ppdu_info(ppdu_info); + } + + return num_buffs_reaped; + } + + int ath12k_wifi7_dp_mon_process_ring(struct ath12k_dp *dp, int mac_id, + struct napi_struct *napi, int budget, + enum dp_monitor_mode monitor_mode) + { + u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(dp->hw_params, mac_id); + struct ath12k_pdev_dp *dp_pdev; + struct ath12k *ar; + int num_buffs_reaped = 0; + + rcu_read_lock(); + + dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx); + if (!dp_pdev) { + rcu_read_unlock(); + return 0; + } + + if (dp->hw_params->rxdma1_enable) { + if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) + num_buffs_reaped = ath12k_wifi7_dp_mon_srng_process(dp_pdev, + &budget, + napi); + } else { + ar = ath12k_pdev_dp_to_ar(dp_pdev); + + if (ar->monitor_started) + num_buffs_reaped = + __ath12k_wifi7_dp_mon_process_ring(ar, mac_id, napi, + &budget); + } + + rcu_read_unlock(); + + return num_buffs_reaped; + } diff --cc drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c index 0000000000000,af50dafc03499..a1ca55fe51c06 mode 000000,100644..100644 --- a/drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/wifi7/dp_rx.c @@@ -1,0 -1,2154 +1,2202 @@@ + // SPDX-License-Identifier: BSD-3-Clause-Clear + /* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + + #include "dp_rx.h" + #include "../dp_tx.h" + #include "../peer.h" + #include "hal_qcn9274.h" + #include "hal_wcn7850.h" + + static u16 ath12k_wifi7_dp_rx_get_peer_id(struct ath12k_dp *dp, + enum ath12k_peer_metadata_version ver, + __le32 peer_metadata) + { + switch (ver) { + default: + ath12k_warn(dp->ab, "Unknown peer metadata version: %d", ver); + fallthrough; + case ATH12K_PEER_METADATA_V0: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V0_PEER_ID); + case ATH12K_PEER_METADATA_V1: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1_PEER_ID); + case ATH12K_PEER_METADATA_V1A: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1A_PEER_ID); + case ATH12K_PEER_METADATA_V1B: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1B_PEER_ID); + } + } + + void ath12k_wifi7_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid, + dma_addr_t paddr) + { + struct ath12k_reo_queue_ref *qref; + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); + bool ml_peer = false; + + if (!ab->hw_params->reoq_lut_support) + return; + + if (peer_id & ATH12K_PEER_ML_ID_VALID) { + peer_id &= ~ATH12K_PEER_ML_ID_VALID; + ml_peer = true; + } + + if (ml_peer) + qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); + else + qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); + + qref->info0 = u32_encode_bits(lower_32_bits(paddr), + BUFFER_ADDR_INFO0_ADDR); + qref->info1 = u32_encode_bits(upper_32_bits(paddr), + BUFFER_ADDR_INFO1_ADDR) | + u32_encode_bits(tid, DP_REO_QREF_NUM); + + ath12k_hal_reo_shared_qaddr_cache_clear(ab); + } + -static void ath12k_wifi7_peer_rx_tid_qref_reset(struct ath12k_base *ab, - u16 peer_id, u16 tid) ++void ath12k_wifi7_peer_rx_tid_qref_reset(struct ath12k_base *ab, ++ u16 peer_id, u16 tid) + { + struct ath12k_reo_queue_ref *qref; + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); + bool ml_peer = false; + + if (!ab->hw_params->reoq_lut_support) + return; + + if (peer_id & ATH12K_PEER_ML_ID_VALID) { + peer_id &= ~ATH12K_PEER_ML_ID_VALID; + ml_peer = true; + } + + if (ml_peer) + qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); + else + qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); + + qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR); + qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) | + u32_encode_bits(tid, DP_REO_QREF_NUM); + } + + void ath12k_wifi7_dp_rx_peer_tid_delete(struct ath12k_base *ab, + struct ath12k_dp_link_peer *peer, u8 tid) + { - struct ath12k_hal_reo_cmd cmd = {}; - struct ath12k_dp_rx_tid *rx_tid = &peer->dp_peer->rx_tid[tid]; - int ret; ++ struct ath12k_dp *dp = ath12k_ab_to_dp(ab); + + if (!(peer->rx_tid_active_bitmask & (1 << tid))) + return; + - cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; - cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); - cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); - cmd.upd0 = HAL_REO_CMD_UPD0_VLD; - ret = ath12k_wifi7_dp_reo_cmd_send(ab, rx_tid, - HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, - ath12k_dp_rx_tid_del_func); - if (ret) { - ath12k_err(ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", - tid, ret); - dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, - rx_tid->qbuf.size, DMA_BIDIRECTIONAL); - kfree(rx_tid->qbuf.vaddr); - rx_tid->qbuf.vaddr = NULL; - } - - if (peer->mlo) - ath12k_wifi7_peer_rx_tid_qref_reset(ab, peer->ml_id, tid); - else - ath12k_wifi7_peer_rx_tid_qref_reset(ab, peer->peer_id, tid); - - peer->rx_tid_active_bitmask &= ~(1 << tid); ++ ath12k_dp_mark_tid_as_inactive(dp, peer->peer_id, tid); ++ ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp); + } + + int ath12k_wifi7_dp_rx_link_desc_return(struct ath12k_dp *dp, + struct ath12k_buffer_addr *buf_addr_info, + enum hal_wbm_rel_bm_act action) + { + struct ath12k_base *ab = dp->ab; + struct hal_wbm_release_ring *desc; + struct hal_srng *srng; + int ret = 0; + + srng = &dp->hal->srng_list[dp->wbm_desc_rel_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + desc = ath12k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) { + ret = -ENOBUFS; + goto exit; + } + + ath12k_wifi7_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action); + + exit: + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return ret; + } + + int ath12k_wifi7_dp_reo_cmd_send(struct ath12k_base *ab, - struct ath12k_dp_rx_tid *rx_tid, ++ struct ath12k_dp_rx_tid_rxq *rx_tid, + enum hal_reo_cmd_type type, + struct ath12k_hal_reo_cmd *cmd, + void (*cb)(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status)) + { + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); + struct ath12k_dp_rx_reo_cmd *dp_cmd; + struct hal_srng *cmd_ring; + int cmd_num; + + cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; + cmd_num = ath12k_wifi7_hal_reo_cmd_send(ab, cmd_ring, type, cmd); + + /* cmd_num should start from 1, during failure return the error code */ + if (cmd_num < 0) + return cmd_num; + + /* reo cmd ring descriptors has cmd_num starting from 1 */ + if (cmd_num == 0) + return -EINVAL; + + if (!cb) + return 0; + + /* Can this be optimized so that we keep the pending command list only + * for tid delete command to free up the resource on the command status + * indication? + */ + dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC); + + if (!dp_cmd) + return -ENOMEM; + + memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid)); + dp_cmd->cmd_num = cmd_num; + dp_cmd->handler = cb; + + spin_lock_bh(&dp->reo_cmd_lock); + list_add_tail(&dp_cmd->list, &dp->reo_cmd_list); + spin_unlock_bh(&dp->reo_cmd_lock); + + return 0; + } + + int ath12k_wifi7_peer_rx_tid_reo_update(struct ath12k_dp *dp, + struct ath12k_dp_link_peer *peer, + struct ath12k_dp_rx_tid *rx_tid, + u32 ba_win_sz, u16 ssn, + bool update_ssn) + { + struct ath12k_hal_reo_cmd cmd = {}; + struct ath12k_base *ab = dp->ab; + int ret; ++ struct ath12k_dp_rx_tid_rxq rx_tid_rxq; + - cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); - cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); ++ ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid, ++ (peer->rx_tid_active_bitmask & (1 << rx_tid->tid))); ++ ++ cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned); ++ cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned); + cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; + cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; + cmd.ba_window_size = ba_win_sz; + + if (update_ssn) { + cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; + cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN); + } + - ret = ath12k_wifi7_dp_reo_cmd_send(ab, rx_tid, ++ ret = ath12k_wifi7_dp_reo_cmd_send(ab, &rx_tid_rxq, + HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, + NULL); + if (ret) { + ath12k_warn(ab, "failed to update rx tid queue, tid %d (%d)\n", - rx_tid->tid, ret); ++ rx_tid_rxq.tid, ret); + return ret; + } + + rx_tid->ba_win_sz = ba_win_sz; + + return 0; + } + -void ath12k_wifi7_dp_reo_cache_flush(struct ath12k_base *ab, - struct ath12k_dp_rx_tid *rx_tid) ++int ath12k_wifi7_dp_reo_cache_flush(struct ath12k_base *ab, ++ struct ath12k_dp_rx_tid_rxq *rx_tid) + { + struct ath12k_hal_reo_cmd cmd = {}; - unsigned long tot_desc_sz, desc_sz; + int ret; + - tot_desc_sz = rx_tid->qbuf.size; - desc_sz = ath12k_wifi7_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); - - while (tot_desc_sz > desc_sz) { - tot_desc_sz -= desc_sz; - cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz); - cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); - ret = ath12k_wifi7_dp_reo_cmd_send(ab, rx_tid, - HAL_REO_CMD_FLUSH_CACHE, &cmd, - NULL); - if (ret) - ath12k_warn(ab, - "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", - rx_tid->tid, ret); - } - - memset(&cmd, 0, sizeof(cmd)); + cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); + cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); - cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; ++ /* HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS - all pending MPDUs ++ *in the bitmap will be forwarded/flushed to REO output rings ++ */ ++ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS | ++ HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS; ++ ++ /* For all QoS TIDs (except NON_QOS), the driver allocates a maximum ++ * window size of 1024. In such cases, the driver can issue a single ++ * 1KB descriptor flush command instead of sending multiple 128-byte ++ * flush commands for each QoS TID, improving efficiency. ++ */ ++ ++ if (rx_tid->tid != HAL_DESC_REO_NON_QOS_TID) ++ cmd.flag |= HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC; ++ + ret = ath12k_wifi7_dp_reo_cmd_send(ab, rx_tid, + HAL_REO_CMD_FLUSH_CACHE, + &cmd, ath12k_dp_reo_cmd_free); - if (ret) { - ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", - rx_tid->tid, ret); - dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->qbuf.vaddr); - rx_tid->qbuf.vaddr = NULL; - } ++ return ret; + } + + int ath12k_wifi7_dp_rx_assign_reoq(struct ath12k_base *ab, struct ath12k_dp_peer *dp_peer, + struct ath12k_dp_rx_tid *rx_tid, + u16 ssn, enum hal_pn_type pn_type) + { + u32 ba_win_sz = rx_tid->ba_win_sz; + struct ath12k_reoq_buf *buf; + void *vaddr, *vaddr_aligned; + dma_addr_t paddr_aligned; + u8 tid = rx_tid->tid; + u32 hw_desc_sz; + int ret; + + buf = &dp_peer->reoq_bufs[tid]; + if (!buf->vaddr) { + /* TODO: Optimize the memory allocation for qos tid based on + * the actual BA window size in REO tid update path. + */ + if (tid == HAL_DESC_REO_NON_QOS_TID) + hw_desc_sz = ath12k_wifi7_hal_reo_qdesc_size(ba_win_sz, tid); + else + hw_desc_sz = ath12k_wifi7_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, + tid); + + vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); + if (!vaddr) + return -ENOMEM; + + vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); + + ath12k_wifi7_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz, + ssn, pn_type); + + paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz, + DMA_BIDIRECTIONAL); + ret = dma_mapping_error(ab->dev, paddr_aligned); + if (ret) { + kfree(vaddr); + return ret; + } + + buf->vaddr = vaddr; + buf->paddr_aligned = paddr_aligned; + buf->size = hw_desc_sz; + } + + rx_tid->qbuf = *buf; + + return 0; + } + ++int ath12k_wifi7_dp_rx_tid_delete_handler(struct ath12k_base *ab, ++ struct ath12k_dp_rx_tid_rxq *rx_tid) ++{ ++ struct ath12k_hal_reo_cmd cmd = {}; ++ ++ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; ++ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); ++ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); ++ cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; ++ /* Observed flush cache failure, to avoid that set vld bit during delete */ ++ cmd.upd1 |= HAL_REO_CMD_UPD1_VLD; ++ ++ return ath12k_wifi7_dp_reo_cmd_send(ab, rx_tid, ++ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, ++ ath12k_dp_rx_tid_del_func); ++} ++ + static void ath12k_wifi7_dp_rx_h_csum_offload(struct sk_buff *msdu, + struct hal_rx_desc_data *rx_info) + { + msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ? + CHECKSUM_NONE : CHECKSUM_UNNECESSARY; + } + + static void ath12k_wifi7_dp_rx_h_mpdu(struct ath12k_pdev_dp *dp_pdev, + struct sk_buff *msdu, + struct hal_rx_desc *rx_desc, + struct hal_rx_desc_data *rx_info) + { + struct ath12k_skb_rxcb *rxcb; + enum hal_encrypt_type enctype; + bool is_decrypted = false; + struct ieee80211_hdr *hdr; + struct ath12k_dp_peer *peer; + struct ieee80211_rx_status *rx_status = rx_info->rx_status; + u32 err_bitmap = rx_info->err_bitmap; + + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), + "dp_rx_h_mpdu called without rcu lock"); + + /* PN for multicast packets will be checked in mac80211 */ + rxcb = ATH12K_SKB_RXCB(msdu); + rxcb->is_mcbc = rx_info->is_mcbc; + + if (rxcb->is_mcbc) + rxcb->peer_id = rx_info->peer_id; + + peer = ath12k_dp_peer_find_by_peerid(dp_pdev, rxcb->peer_id); + if (peer) { + /* resetting mcbc bit because mcbc packets are unicast + * packets only for AP as STA sends unicast packets. + */ + rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only; + + if (rxcb->is_mcbc) + enctype = peer->sec_type_grp; + else + enctype = peer->sec_type; + } else { + enctype = HAL_ENCRYPT_TYPE_OPEN; + } + + if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) + is_decrypted = rx_info->is_decrypted; + + /* Clear per-MPDU flags while leaving per-PPDU flags intact */ + rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | + RX_FLAG_MMIC_ERROR | + RX_FLAG_DECRYPTED | + RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED); + + if (err_bitmap & HAL_RX_MPDU_ERR_FCS) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) + rx_status->flag |= RX_FLAG_MMIC_ERROR; + + if (is_decrypted) { + rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; + + if (rx_info->is_mcbc) + rx_status->flag |= RX_FLAG_MIC_STRIPPED | + RX_FLAG_ICV_STRIPPED; + else + rx_status->flag |= RX_FLAG_IV_STRIPPED | + RX_FLAG_PN_VALIDATED; + } + + ath12k_wifi7_dp_rx_h_csum_offload(msdu, rx_info); + ath12k_dp_rx_h_undecap(dp_pdev, msdu, rx_desc, + enctype, is_decrypted, rx_info); + + if (!is_decrypted || rx_info->is_mcbc) + return; + + if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) { + hdr = (void *)msdu->data; + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); + } + } + + static int ath12k_wifi7_dp_rx_msdu_coalesce(struct ath12k_hal *hal, + struct sk_buff_head *msdu_list, + struct sk_buff *first, struct sk_buff *last, + u8 l3pad_bytes, int msdu_len, + struct hal_rx_desc_data *rx_info) + { + struct sk_buff *skb; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first); + int buf_first_hdr_len, buf_first_len; + struct hal_rx_desc *ldesc; + int space_extra, rem_len, buf_len; + u32 hal_rx_desc_sz = hal->hal_desc_sz; + bool is_continuation; + + /* As the msdu is spread across multiple rx buffers, + * find the offset to the start of msdu for computing + * the length of the msdu in the first buffer. + */ + buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; + buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; + + if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { + skb_put(first, buf_first_hdr_len + msdu_len); + skb_pull(first, buf_first_hdr_len); + return 0; + } + + ldesc = (struct hal_rx_desc *)last->data; + rxcb->is_first_msdu = rx_info->is_first_msdu; + rxcb->is_last_msdu = rx_info->is_last_msdu; + + /* MSDU spans over multiple buffers because the length of the MSDU + * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data + * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. + */ + skb_put(first, DP_RX_BUFFER_SIZE); + skb_pull(first, buf_first_hdr_len); + + /* When an MSDU spread over multiple buffers MSDU_END + * tlvs are valid only in the last buffer. Copy those tlvs. + */ + ath12k_dp_rx_desc_end_tlv_copy(hal, rxcb->rx_desc, ldesc); + + space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); + if (space_extra > 0 && + (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { + /* Free up all buffers of the MSDU */ + while ((skb = __skb_dequeue(msdu_list)) != NULL) { + rxcb = ATH12K_SKB_RXCB(skb); + if (!rxcb->is_continuation) { + dev_kfree_skb_any(skb); + break; + } + dev_kfree_skb_any(skb); + } + return -ENOMEM; + } + + rem_len = msdu_len - buf_first_len; + while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { + rxcb = ATH12K_SKB_RXCB(skb); + is_continuation = rxcb->is_continuation; + if (is_continuation) + buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; + else + buf_len = rem_len; + + if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { + WARN_ON_ONCE(1); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + skb_put(skb, buf_len + hal_rx_desc_sz); + skb_pull(skb, hal_rx_desc_sz); + skb_copy_from_linear_data(skb, skb_put(first, buf_len), + buf_len); + dev_kfree_skb_any(skb); + + rem_len -= buf_len; + if (!is_continuation) + break; + } + + return 0; + } + + static int ath12k_wifi7_dp_rx_process_msdu(struct ath12k_pdev_dp *dp_pdev, + struct sk_buff *msdu, + struct sk_buff_head *msdu_list, + struct hal_rx_desc_data *rx_info) + { + struct ath12k_dp *dp = dp_pdev->dp; + struct hal_rx_desc *rx_desc, *lrx_desc; + struct ath12k_skb_rxcb *rxcb; + struct sk_buff *last_buf; + struct ath12k_hal *hal = dp->hal; + u8 l3_pad_bytes; + u16 msdu_len; + int ret; + u32 hal_rx_desc_sz = hal->hal_desc_sz; + + last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu); + if (!last_buf) { + ath12k_warn(dp->ab, + "No valid Rx buffer to access MSDU_END tlv\n"); + ret = -EIO; + goto free_out; + } + + rx_desc = (struct hal_rx_desc *)msdu->data; + lrx_desc = (struct hal_rx_desc *)last_buf->data; + + ath12k_dp_extract_rx_desc_data(hal, rx_info, rx_desc, lrx_desc); + if (!rx_info->msdu_done) { + ath12k_warn(dp->ab, "msdu_done bit in msdu_end is not set\n"); + ret = -EIO; + goto free_out; + } + + rxcb = ATH12K_SKB_RXCB(msdu); + rxcb->rx_desc = rx_desc; + msdu_len = rx_info->msdu_len; + l3_pad_bytes = rx_info->l3_pad_bytes; + + if (rxcb->is_frag) { + skb_pull(msdu, hal_rx_desc_sz); + } else if (!rxcb->is_continuation) { + if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { + ret = -EINVAL; + ath12k_warn(dp->ab, "invalid msdu len %u\n", msdu_len); + ath12k_dbg_dump(dp->ab, ATH12K_DBG_DATA, NULL, "", rx_desc, + sizeof(*rx_desc)); + goto free_out; + } + skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); + } else { + ret = ath12k_wifi7_dp_rx_msdu_coalesce(hal, msdu_list, + msdu, last_buf, + l3_pad_bytes, msdu_len, + rx_info); + if (ret) { + ath12k_warn(dp->ab, + "failed to coalesce msdu rx buffer%d\n", ret); + goto free_out; + } + } + + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, rx_desc, msdu, + rx_info))) { + ret = -EINVAL; + goto free_out; + } + + ath12k_dp_rx_h_ppdu(dp_pdev, rx_info); + ath12k_wifi7_dp_rx_h_mpdu(dp_pdev, msdu, rx_desc, rx_info); + + rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; + + return 0; + + free_out: + return ret; + } + + static void + ath12k_wifi7_dp_rx_process_received_packets(struct ath12k_dp *dp, + struct napi_struct *napi, + struct sk_buff_head *msdu_list, + int ring_id) + { + struct ath12k_hw_group *ag = dp->ag; + struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp; + struct ieee80211_rx_status rx_status = {}; + struct ath12k_skb_rxcb *rxcb; + struct sk_buff *msdu; + struct ath12k *ar; + struct ath12k_pdev_dp *dp_pdev; + struct ath12k_hw_link *hw_links = ag->hw_links; + struct ath12k_base *partner_ab; + struct hal_rx_desc_data rx_info; + struct ath12k_dp *partner_dp; + u8 hw_link_id, pdev_idx; + int ret; + + if (skb_queue_empty(msdu_list)) + return; + + rx_info.addr2_present = false; + rx_info.rx_status = &rx_status; + + rcu_read_lock(); + + while ((msdu = __skb_dequeue(msdu_list))) { + rxcb = ATH12K_SKB_RXCB(msdu); + hw_link_id = rxcb->hw_link_id; + partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, + hw_links[hw_link_id].device_id); + pdev_idx = ath12k_hw_mac_id_to_pdev_id(partner_dp->hw_params, + hw_links[hw_link_id].pdev_idx); + partner_ab = partner_dp->ab; + ar = partner_ab->pdevs[pdev_idx].ar; + if (!rcu_dereference(partner_ab->pdevs_active[pdev_idx])) { + dev_kfree_skb_any(msdu); + continue; + } + + if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { + dev_kfree_skb_any(msdu); + continue; + } + + dp_pdev = ath12k_dp_to_pdev_dp(partner_dp, pdev_idx); + if (!dp_pdev) { + dev_kfree_skb_any(msdu); + continue; + } + + ret = ath12k_wifi7_dp_rx_process_msdu(dp_pdev, msdu, msdu_list, &rx_info); + if (ret) { + ath12k_dbg(dp->ab, ATH12K_DBG_DATA, + "Unable to process msdu %d", ret); + dev_kfree_skb_any(msdu); + continue; + } + + ath12k_dp_rx_deliver_msdu(dp_pdev, napi, msdu, &rx_info); + } + + rcu_read_unlock(); + } + + int ath12k_wifi7_dp_rx_process(struct ath12k_dp *dp, int ring_id, + struct napi_struct *napi, int budget) + { + struct ath12k_hw_group *ag = dp->ag; + struct ath12k_base *ab = dp->ab; + struct ath12k_hal *hal = dp->hal; + struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp; + struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; + struct ath12k_hw_link *hw_links = ag->hw_links; + int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; + struct ath12k_rx_desc_info *desc_info; + struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + struct hal_reo_dest_ring *desc; + struct ath12k_dp *partner_dp; + struct sk_buff_head msdu_list; + struct ath12k_skb_rxcb *rxcb; + int total_msdu_reaped = 0; + u8 hw_link_id, device_id; + struct hal_srng *srng; + struct sk_buff *msdu; + bool done = false; + u64 desc_va; + + __skb_queue_head_init(&msdu_list); + + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) + INIT_LIST_HEAD(&rx_desc_used_list[device_id]); + + srng = &hal->srng_list[dp->reo_dst_ring[ring_id].ring_id]; + + spin_lock_bh(&srng->lock); + + try_again: + ath12k_hal_srng_access_begin(ab, srng); + + while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + struct rx_mpdu_desc *mpdu_info; + struct rx_msdu_desc *msdu_info; + enum hal_reo_dest_ring_push_reason push_reason; + u32 cookie; + + cookie = le32_get_bits(desc->buf_addr_info.info1, + BUFFER_ADDR_INFO1_SW_COOKIE); + + hw_link_id = le32_get_bits(desc->info0, + HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); + + desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | + le32_to_cpu(desc->buf_va_lo)); + desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); + + device_id = hw_links[hw_link_id].device_id; + partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id); + if (unlikely(!partner_dp)) { + if (desc_info->skb) { + dev_kfree_skb_any(desc_info->skb); + desc_info->skb = NULL; + } + + continue; + } + + /* retry manual desc retrieval */ + if (!desc_info) { + desc_info = ath12k_dp_get_rx_desc(partner_dp, cookie); + if (!desc_info) { + ath12k_warn(partner_dp->ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", + cookie); + continue; + } + } + + if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) + ath12k_warn(ab, "Check HW CC implementation"); + + msdu = desc_info->skb; + desc_info->skb = NULL; + + list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); + + rxcb = ATH12K_SKB_RXCB(msdu); + dma_unmap_single(partner_dp->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + + num_buffs_reaped[device_id]++; + dp->device_stats.reo_rx[ring_id][dp->device_id]++; + + push_reason = le32_get_bits(desc->info0, + HAL_REO_DEST_RING_INFO0_PUSH_REASON); + if (push_reason != + HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { + dev_kfree_skb_any(msdu); + dp->device_stats.hal_reo_error[ring_id]++; + continue; + } + + msdu_info = &desc->rx_msdu_info; + mpdu_info = &desc->rx_mpdu_info; + + rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) & + RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); + rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) & + RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); + rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) & + RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); + rxcb->hw_link_id = hw_link_id; + rxcb->peer_id = ath12k_wifi7_dp_rx_get_peer_id(dp, dp->peer_metadata_ver, + mpdu_info->peer_meta_data); + rxcb->tid = le32_get_bits(mpdu_info->info0, + RX_MPDU_DESC_INFO0_TID); + + __skb_queue_tail(&msdu_list, msdu); + + if (!rxcb->is_continuation) { + total_msdu_reaped++; + done = true; + } else { + done = false; + } + + if (total_msdu_reaped >= budget) + break; + } + + /* Hw might have updated the head pointer after we cached it. + * In this case, even though there are entries in the ring we'll + * get rx_desc NULL. Give the read another try with updated cached + * head pointer so that we can reap complete MPDU in the current + * rx processing. + */ + if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) { + ath12k_hal_srng_access_end(ab, srng); + goto try_again; + } + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + if (!total_msdu_reaped) + goto exit; + + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { + if (!num_buffs_reaped[device_id]) + continue; + + partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id); + rx_ring = &partner_dp->rx_refill_buf_ring; + + ath12k_dp_rx_bufs_replenish(partner_dp, rx_ring, + &rx_desc_used_list[device_id], + num_buffs_reaped[device_id]); + } + + ath12k_wifi7_dp_rx_process_received_packets(dp, napi, &msdu_list, + ring_id); + + exit: + return total_msdu_reaped; + } + + static bool + ath12k_wifi7_dp_rx_h_defrag_validate_incr_pn(struct ath12k_pdev_dp *dp_pdev, + struct ath12k_dp_rx_tid *rx_tid, + enum hal_encrypt_type encrypt_type) + { + struct ath12k_dp *dp = dp_pdev->dp; + struct sk_buff *first_frag, *skb; + u64 last_pn; + u64 cur_pn; + + first_frag = skb_peek(&rx_tid->rx_frags); + + if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && + encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && + encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && + encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) + return true; + + last_pn = ath12k_dp_rx_h_get_pn(dp, first_frag); + skb_queue_walk(&rx_tid->rx_frags, skb) { + if (skb == first_frag) + continue; + + cur_pn = ath12k_dp_rx_h_get_pn(dp, skb); + if (cur_pn != last_pn + 1) + return false; + last_pn = cur_pn; + } + return true; + } + + static int ath12k_wifi7_dp_rx_h_defrag_reo_reinject(struct ath12k_dp *dp, + struct ath12k_dp_rx_tid *rx_tid, + struct sk_buff *defrag_skb) + { + struct ath12k_base *ab = dp->ab; + struct ath12k_hal *hal = dp->hal; + struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; + struct hal_reo_entrance_ring *reo_ent_ring; + struct hal_reo_dest_ring *reo_dest_ring; + struct dp_link_desc_bank *link_desc_banks; + struct hal_rx_msdu_link *msdu_link; + struct hal_rx_msdu_details *msdu0; + struct hal_srng *srng; + dma_addr_t link_paddr, buf_paddr; + u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info; + u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi; + int ret; + struct ath12k_rx_desc_info *desc_info; + enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm; + u8 dst_ind; + + hal_rx_desc_sz = hal->hal_desc_sz; + link_desc_banks = dp->link_desc_banks; + reo_dest_ring = rx_tid->dst_ring_desc; + + ath12k_wifi7_hal_rx_reo_ent_paddr_get(&reo_dest_ring->buf_addr_info, + &link_paddr, &cookie); + desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK); + + msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + + (link_paddr - link_desc_banks[desc_bank].paddr)); + msdu0 = &msdu_link->msdu_link[0]; + msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0); + dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND); + + memset(msdu0, 0, sizeof(*msdu0)); + + msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) | + u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) | + u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) | + u32_encode_bits(defrag_skb->len - hal_rx_desc_sz, + RX_MSDU_DESC_INFO0_MSDU_LENGTH) | + u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) | + u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA); + msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info); + msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info); + + /* change msdu len in hal rx desc */ + ath12k_dp_rxdesc_set_msdu_len(hal, rx_desc, defrag_skb->len - hal_rx_desc_sz); + + buf_paddr = dma_map_single(dp->dev, defrag_skb->data, + defrag_skb->len + skb_tailroom(defrag_skb), + DMA_TO_DEVICE); + if (dma_mapping_error(dp->dev, buf_paddr)) + return -ENOMEM; + + spin_lock_bh(&dp->rx_desc_lock); + desc_info = list_first_entry_or_null(&dp->rx_desc_free_list, + struct ath12k_rx_desc_info, + list); + if (!desc_info) { + spin_unlock_bh(&dp->rx_desc_lock); + ath12k_warn(ab, "failed to find rx desc for reinject\n"); + ret = -ENOMEM; + goto err_unmap_dma; + } + + desc_info->skb = defrag_skb; + desc_info->in_use = true; + + list_del(&desc_info->list); + spin_unlock_bh(&dp->rx_desc_lock); + + ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr; + + ath12k_wifi7_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr, + desc_info->cookie, + HAL_RX_BUF_RBM_SW3_BM); + + /* Fill mpdu details into reo entrance ring */ + srng = &hal->srng_list[dp->reo_reinject_ring.ring_id]; + + spin_lock_bh(&srng->lock); + ath12k_hal_srng_access_begin(ab, srng); + + reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng); + if (!reo_ent_ring) { + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + ret = -ENOSPC; + goto err_free_desc; + } + memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); + + ath12k_wifi7_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr, + cookie, idle_link_rbm); + + mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) | + u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) | + u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) | + u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) | + u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID); + + reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info); + reo_ent_ring->rx_mpdu_info.peer_meta_data = + reo_dest_ring->rx_mpdu_info.peer_meta_data; + + if (dp->hw_params->reoq_lut_support) { + reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data; + queue_addr_hi = 0; + } else { + reo_ent_ring->queue_addr_lo = + cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned)); + queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); + } + + reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi, + HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) | + le32_encode_bits(dst_ind, + HAL_REO_ENTR_RING_INFO0_DEST_IND); + + reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn, + HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM); + dest_ring_info0 = le32_get_bits(reo_dest_ring->info0, + HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); + reo_ent_ring->info2 = + cpu_to_le32(u32_get_bits(dest_ring_info0, + HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID)); + + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + return 0; + + err_free_desc: + spin_lock_bh(&dp->rx_desc_lock); + desc_info->in_use = false; + desc_info->skb = NULL; + list_add_tail(&desc_info->list, &dp->rx_desc_free_list); + spin_unlock_bh(&dp->rx_desc_lock); + err_unmap_dma: + dma_unmap_single(dp->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), + DMA_TO_DEVICE); + return ret; + } + + static int ath12k_wifi7_dp_rx_h_verify_tkip_mic(struct ath12k_pdev_dp *dp_pdev, + struct ath12k_dp_peer *peer, + enum hal_encrypt_type enctype, + struct sk_buff *msdu, + struct hal_rx_desc_data *rx_info) + { + struct ath12k_dp *dp = dp_pdev->dp; + struct ath12k_hal *hal = dp->hal; + struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; + struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); + struct ieee80211_key_conf *key_conf; + struct ieee80211_hdr *hdr; + u8 mic[IEEE80211_CCMP_MIC_LEN]; + int head_len, tail_len, ret; + size_t data_len; + u32 hdr_len, hal_rx_desc_sz = hal->hal_desc_sz; + u8 *key, *data; + u8 key_idx; + + if (enctype != HAL_ENCRYPT_TYPE_TKIP_MIC) + return 0; + + rx_info->addr2_present = false; + rx_info->rx_status = rxs; + + hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); + hdr_len = ieee80211_hdrlen(hdr->frame_control); + head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; + tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; + + if (!is_multicast_ether_addr(hdr->addr1)) + key_idx = peer->ucast_keyidx; + else + key_idx = peer->mcast_keyidx; + + key_conf = peer->keys[key_idx]; + + data = msdu->data + head_len; + data_len = msdu->len - head_len - tail_len; + key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; + + ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, + data_len, mic); + if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) + goto mic_fail; + + return 0; + + mic_fail: + (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true; + (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true; + + ath12k_dp_extract_rx_desc_data(hal, rx_info, rx_desc, rx_desc); + + rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | + RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; + skb_pull(msdu, hal_rx_desc_sz); + + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, rx_desc, msdu, + rx_info))) + return -EINVAL; + + ath12k_dp_rx_h_ppdu(dp_pdev, rx_info); + ath12k_dp_rx_h_undecap(dp_pdev, msdu, rx_desc, + HAL_ENCRYPT_TYPE_TKIP_MIC, true, rx_info); + ieee80211_rx(ath12k_pdev_dp_to_hw(dp_pdev), msdu); + return -EINVAL; + } + + static int ath12k_wifi7_dp_rx_h_defrag(struct ath12k_pdev_dp *dp_pdev, + struct ath12k_dp_peer *peer, + struct ath12k_dp_rx_tid *rx_tid, + struct sk_buff **defrag_skb, + enum hal_encrypt_type enctype, + struct hal_rx_desc_data *rx_info) + { + struct ath12k_dp *dp = dp_pdev->dp; + struct ath12k_base *ab = dp->ab; + struct sk_buff *skb, *first_frag, *last_frag; + struct ieee80211_hdr *hdr; + bool is_decrypted = false; + int msdu_len = 0; + int extra_space; + u32 flags, hal_rx_desc_sz = ab->hal.hal_desc_sz; + + first_frag = skb_peek(&rx_tid->rx_frags); + last_frag = skb_peek_tail(&rx_tid->rx_frags); + + skb_queue_walk(&rx_tid->rx_frags, skb) { + flags = 0; + hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); + + if (enctype != HAL_ENCRYPT_TYPE_OPEN) + is_decrypted = rx_info->is_decrypted; + + if (is_decrypted) { + if (skb != first_frag) + flags |= RX_FLAG_IV_STRIPPED; + if (skb != last_frag) + flags |= RX_FLAG_ICV_STRIPPED | + RX_FLAG_MIC_STRIPPED; + } + + /* RX fragments are always raw packets */ + if (skb != last_frag) + skb_trim(skb, skb->len - FCS_LEN); + ath12k_dp_rx_h_undecap_frag(dp_pdev, skb, enctype, flags); + + if (skb != first_frag) + skb_pull(skb, hal_rx_desc_sz + + ieee80211_hdrlen(hdr->frame_control)); + msdu_len += skb->len; + } + + extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); + if (extra_space > 0 && + (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) + return -ENOMEM; + + __skb_unlink(first_frag, &rx_tid->rx_frags); + while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { + skb_put_data(first_frag, skb->data, skb->len); + dev_kfree_skb_any(skb); + } + + hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); + ATH12K_SKB_RXCB(first_frag)->is_frag = 1; + + if (ath12k_wifi7_dp_rx_h_verify_tkip_mic(dp_pdev, peer, enctype, first_frag, + rx_info)) + first_frag = NULL; + + *defrag_skb = first_frag; + return 0; + } + + void ath12k_wifi7_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid, + bool rel_link_desc) + { + enum hal_wbm_rel_bm_act act = HAL_WBM_REL_BM_ACT_PUT_IN_IDLE; + struct ath12k_buffer_addr *buf_addr_info; + struct ath12k_dp *dp = rx_tid->dp; + + lockdep_assert_held(&dp->dp_lock); + + if (rx_tid->dst_ring_desc) { + if (rel_link_desc) { + buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info; + ath12k_wifi7_dp_rx_link_desc_return(dp, buf_addr_info, act); + } + kfree(rx_tid->dst_ring_desc); + rx_tid->dst_ring_desc = NULL; + } + + rx_tid->cur_sn = 0; + rx_tid->last_frag_no = 0; + rx_tid->rx_frag_bitmap = 0; + __skb_queue_purge(&rx_tid->rx_frags); + } + + static int ath12k_wifi7_dp_rx_frag_h_mpdu(struct ath12k_pdev_dp *dp_pdev, + struct sk_buff *msdu, + struct hal_reo_dest_ring *ring_desc, + struct hal_rx_desc_data *rx_info) + { + struct ath12k_dp *dp = dp_pdev->dp; + struct ath12k_hal *hal = dp->hal; + struct ath12k_base *ab = dp->ab; + struct ath12k_dp_peer *peer; + struct ath12k_dp_rx_tid *rx_tid; + struct sk_buff *defrag_skb = NULL; + u32 peer_id = rx_info->peer_id; + u16 seqno, frag_no; + u8 tid = rx_info->tid; + int ret = 0; + bool more_frags; + enum hal_encrypt_type enctype = rx_info->enctype; + + frag_no = ath12k_dp_rx_h_frag_no(hal, msdu); + more_frags = ath12k_dp_rx_h_more_frags(hal, msdu); + seqno = rx_info->seq_no; + + if (!rx_info->seq_ctl_valid || !rx_info->fc_valid || + tid > IEEE80211_NUM_TIDS) + return -EINVAL; + + /* received unfragmented packet in reo + * exception ring, this shouldn't happen + * as these packets typically come from + * reo2sw srngs. + */ + if (WARN_ON_ONCE(!frag_no && !more_frags)) + return -EINVAL; + + spin_lock_bh(&dp->dp_lock); + peer = ath12k_dp_peer_find_by_peerid(dp_pdev, peer_id); + if (!peer) { + ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", + peer_id); + ret = -ENOENT; + goto out_unlock; + } + + if (!peer->dp_setup_done) { + ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", + peer->addr, peer_id); + ret = -ENOENT; + goto out_unlock; + } + + rx_tid = &peer->rx_tid[tid]; + + if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || + skb_queue_empty(&rx_tid->rx_frags)) { + /* Flush stored fragments and start a new sequence */ + ath12k_wifi7_dp_rx_frags_cleanup(rx_tid, true); + rx_tid->cur_sn = seqno; + } + + if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { + /* Fragment already present */ + ret = -EINVAL; + goto out_unlock; + } + + if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap))) + __skb_queue_tail(&rx_tid->rx_frags, msdu); + else + ath12k_dp_rx_h_sort_frags(hal, &rx_tid->rx_frags, msdu); + + rx_tid->rx_frag_bitmap |= BIT(frag_no); + if (!more_frags) + rx_tid->last_frag_no = frag_no; + + if (frag_no == 0) { + rx_tid->dst_ring_desc = kmemdup(ring_desc, + sizeof(*rx_tid->dst_ring_desc), + GFP_ATOMIC); + if (!rx_tid->dst_ring_desc) { + ret = -ENOMEM; + goto out_unlock; + } + } else { + ath12k_wifi7_dp_rx_link_desc_return(dp, &ring_desc->buf_addr_info, + HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); + } + + if (!rx_tid->last_frag_no || + rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { + mod_timer(&rx_tid->frag_timer, jiffies + + ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS); + goto out_unlock; + } + + spin_unlock_bh(&dp->dp_lock); + timer_delete_sync(&rx_tid->frag_timer); + spin_lock_bh(&dp->dp_lock); + + peer = ath12k_dp_peer_find_by_peerid(dp_pdev, peer_id); + if (!peer) + goto err_frags_cleanup; + + if (!ath12k_wifi7_dp_rx_h_defrag_validate_incr_pn(dp_pdev, rx_tid, enctype)) + goto err_frags_cleanup; + + if (ath12k_wifi7_dp_rx_h_defrag(dp_pdev, peer, rx_tid, &defrag_skb, + enctype, rx_info)) + goto err_frags_cleanup; + + if (!defrag_skb) + goto err_frags_cleanup; + + if (ath12k_wifi7_dp_rx_h_defrag_reo_reinject(dp, rx_tid, defrag_skb)) + goto err_frags_cleanup; + + ath12k_wifi7_dp_rx_frags_cleanup(rx_tid, false); + goto out_unlock; + + err_frags_cleanup: + dev_kfree_skb_any(defrag_skb); + ath12k_wifi7_dp_rx_frags_cleanup(rx_tid, true); + out_unlock: + spin_unlock_bh(&dp->dp_lock); + return ret; + } + + static int + ath12k_wifi7_dp_process_rx_err_buf(struct ath12k_pdev_dp *dp_pdev, + struct hal_reo_dest_ring *desc, + struct list_head *used_list, + bool drop, u32 cookie) + { + struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev); + struct ath12k_dp *dp = dp_pdev->dp; + struct ath12k_hal *hal = dp->hal; + struct sk_buff *msdu; + struct ath12k_skb_rxcb *rxcb; + struct hal_rx_desc_data rx_info; + struct hal_rx_desc *rx_desc; + u16 msdu_len; + u32 hal_rx_desc_sz = hal->hal_desc_sz; + struct ath12k_rx_desc_info *desc_info; + u64 desc_va; + + desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | + le32_to_cpu(desc->buf_va_lo)); + desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); + + /* retry manual desc retrieval */ + if (!desc_info) { + desc_info = ath12k_dp_get_rx_desc(dp, cookie); + if (!desc_info) { + ath12k_warn(dp->ab, + "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n", + cookie); + return -EINVAL; + } + } + + if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) + ath12k_warn(dp->ab, "RX Exception, Check HW CC implementation"); + + msdu = desc_info->skb; + desc_info->skb = NULL; + + list_add_tail(&desc_info->list, used_list); + + rxcb = ATH12K_SKB_RXCB(msdu); + dma_unmap_single(dp->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + + if (drop) { + dev_kfree_skb_any(msdu); + return 0; + } + + rcu_read_lock(); + if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { + dev_kfree_skb_any(msdu); + goto exit; + } + + if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { + dev_kfree_skb_any(msdu); + goto exit; + } + + rx_desc = (struct hal_rx_desc *)msdu->data; + ath12k_dp_extract_rx_desc_data(hal, &rx_info, rx_desc, rx_desc); + + msdu_len = rx_info.msdu_len; + if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { + ath12k_warn(dp->ab, "invalid msdu leng %u", msdu_len); + ath12k_dbg_dump(dp->ab, ATH12K_DBG_DATA, NULL, "", rx_desc, + sizeof(*rx_desc)); + dev_kfree_skb_any(msdu); + goto exit; + } + + skb_put(msdu, hal_rx_desc_sz + msdu_len); + + if (ath12k_wifi7_dp_rx_frag_h_mpdu(dp_pdev, msdu, desc, &rx_info)) { + dev_kfree_skb_any(msdu); + ath12k_wifi7_dp_rx_link_desc_return(dp, &desc->buf_addr_info, + HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); + } + exit: + rcu_read_unlock(); + return 0; + } + ++static int ath12k_dp_h_msdu_buffer_type(struct ath12k_dp *dp, ++ struct list_head *list, ++ struct hal_reo_dest_ring *desc) ++{ ++ struct ath12k_rx_desc_info *desc_info; ++ struct ath12k_skb_rxcb *rxcb; ++ struct sk_buff *msdu; ++ u64 desc_va; ++ ++ dp->device_stats.reo_excep_msdu_buf_type++; ++ ++ desc_va = (u64)le32_to_cpu(desc->buf_va_hi) << 32 | ++ le32_to_cpu(desc->buf_va_lo); ++ desc_info = (struct ath12k_rx_desc_info *)(uintptr_t)desc_va; ++ if (!desc_info) { ++ u32 cookie; ++ ++ cookie = le32_get_bits(desc->buf_addr_info.info1, ++ BUFFER_ADDR_INFO1_SW_COOKIE); ++ desc_info = ath12k_dp_get_rx_desc(dp, cookie); ++ if (!desc_info) { ++ ath12k_warn(dp->ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", ++ cookie); ++ return -EINVAL; ++ } ++ } ++ ++ if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) { ++ ath12k_warn(dp->ab, "rx exception, magic check failed with value: %u\n", ++ desc_info->magic); ++ return -EINVAL; ++ } ++ ++ msdu = desc_info->skb; ++ desc_info->skb = NULL; ++ list_add_tail(&desc_info->list, list); ++ rxcb = ATH12K_SKB_RXCB(msdu); ++ dma_unmap_single(dp->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), ++ DMA_FROM_DEVICE); ++ dev_kfree_skb_any(msdu); ++ ++ return 0; ++} ++ + int ath12k_wifi7_dp_rx_process_err(struct ath12k_dp *dp, struct napi_struct *napi, + int budget) + { + struct ath12k_base *ab = dp->ab; + struct ath12k_hal *hal = dp->hal; + struct ath12k_hw_group *ag = dp->ag; + struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp; + struct ath12k_dp *partner_dp; + struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; + u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; + int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; + struct dp_link_desc_bank *link_desc_banks; + enum hal_rx_buf_return_buf_manager rbm; + struct hal_rx_msdu_link *link_desc_va; + int tot_n_bufs_reaped, quota, ret, i; + struct hal_reo_dest_ring *reo_desc; + struct dp_rxdma_ring *rx_ring; + struct dp_srng *reo_except; + struct ath12k_hw_link *hw_links = ag->hw_links; + struct ath12k_pdev_dp *dp_pdev; + u8 hw_link_id, device_id; + u32 desc_bank, num_msdus; + struct hal_srng *srng; + dma_addr_t paddr; + bool is_frag; + bool drop; + int pdev_idx; + struct list_head *used_list; + enum hal_wbm_rel_bm_act act; + + tot_n_bufs_reaped = 0; + quota = budget; + + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) + INIT_LIST_HEAD(&rx_desc_used_list[device_id]); + + reo_except = &dp->reo_except_ring; + + srng = &hal->srng_list[reo_except->ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + while (budget && + (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + drop = false; + dp->device_stats.err_ring_pkts++; + ++ hw_link_id = le32_get_bits(reo_desc->info0, ++ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); ++ device_id = hw_links[hw_link_id].device_id; ++ partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id); ++ ++ /* Below case is added to handle data packet from un-associated clients. ++ * As it is expected that AST lookup will fail for ++ * un-associated station's data packets. ++ */ ++ if (le32_get_bits(reo_desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE) == ++ HAL_REO_DEST_RING_BUFFER_TYPE_MSDU) { ++ if (!ath12k_dp_h_msdu_buffer_type(partner_dp, ++ &rx_desc_used_list[device_id], ++ reo_desc)) { ++ num_buffs_reaped[device_id]++; ++ tot_n_bufs_reaped++; ++ } ++ goto next_desc; ++ } ++ + ret = ath12k_wifi7_hal_desc_reo_parse_err(dp, reo_desc, &paddr, + &desc_bank); + if (ret) { + ath12k_warn(ab, "failed to parse error reo desc %d\n", + ret); + continue; + } + - hw_link_id = le32_get_bits(reo_desc->info0, - HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); - device_id = hw_links[hw_link_id].device_id; - partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id); - + pdev_idx = ath12k_hw_mac_id_to_pdev_id(partner_dp->hw_params, + hw_links[hw_link_id].pdev_idx); + + link_desc_banks = partner_dp->link_desc_banks; + link_desc_va = link_desc_banks[desc_bank].vaddr + + (paddr - link_desc_banks[desc_bank].paddr); + ath12k_wifi7_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, + msdu_cookies, &rbm); + if (rbm != partner_dp->idle_link_rbm && + rbm != HAL_RX_BUF_RBM_SW3_BM && + rbm != partner_dp->hal->hal_params->rx_buf_rbm) { + act = HAL_WBM_REL_BM_ACT_REL_MSDU; + dp->device_stats.invalid_rbm++; + ath12k_warn(ab, "invalid return buffer manager %d\n", rbm); + ath12k_wifi7_dp_rx_link_desc_return(partner_dp, + &reo_desc->buf_addr_info, + act); + continue; + } + + is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) & + RX_MPDU_DESC_INFO0_FRAG_FLAG); + + /* Process only rx fragments with one msdu per link desc below, and drop + * msdu's indicated due to error reasons. + * Dynamic fragmentation not supported in Multi-link client, so drop the + * partner device buffers. + */ + if (!is_frag || num_msdus > 1 || + partner_dp->device_id != dp->device_id) { + drop = true; + act = HAL_WBM_REL_BM_ACT_PUT_IN_IDLE; + + /* Return the link desc back to wbm idle list */ + ath12k_wifi7_dp_rx_link_desc_return(partner_dp, + &reo_desc->buf_addr_info, + act); + } + + rcu_read_lock(); + + dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx); + if (!dp_pdev) { + rcu_read_unlock(); + continue; + } + + for (i = 0; i < num_msdus; i++) { + used_list = &rx_desc_used_list[device_id]; + + if (!ath12k_wifi7_dp_process_rx_err_buf(dp_pdev, reo_desc, + used_list, + drop, + msdu_cookies[i])) { + num_buffs_reaped[device_id]++; + tot_n_bufs_reaped++; + } + } + + rcu_read_unlock(); + ++next_desc: + if (tot_n_bufs_reaped >= quota) { + tot_n_bufs_reaped = quota; + goto exit; + } + + budget = quota - tot_n_bufs_reaped; + } + + exit: + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { + if (!num_buffs_reaped[device_id]) + continue; + + partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id); + rx_ring = &partner_dp->rx_refill_buf_ring; + + ath12k_dp_rx_bufs_replenish(partner_dp, rx_ring, + &rx_desc_used_list[device_id], + num_buffs_reaped[device_id]); + } + + return tot_n_bufs_reaped; + } + + static void + ath12k_wifi7_dp_rx_null_q_desc_sg_drop(struct ath12k_dp *dp, int msdu_len, + struct sk_buff_head *msdu_list) + { + struct sk_buff *skb, *tmp; + struct ath12k_skb_rxcb *rxcb; + int n_buffs; + + n_buffs = DIV_ROUND_UP(msdu_len, + (DP_RX_BUFFER_SIZE - dp->ab->hal.hal_desc_sz)); + + skb_queue_walk_safe(msdu_list, skb, tmp) { + rxcb = ATH12K_SKB_RXCB(skb); + if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && + rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { + if (!n_buffs) + break; + __skb_unlink(skb, msdu_list); + dev_kfree_skb_any(skb); + n_buffs--; + } + } + } + + static int ath12k_wifi7_dp_rx_h_null_q_desc(struct ath12k_pdev_dp *dp_pdev, + struct sk_buff *msdu, + struct hal_rx_desc_data *rx_info, + struct sk_buff_head *msdu_list) + { + struct ath12k_dp *dp = dp_pdev->dp; + struct ath12k_base *ab = dp->ab; + u16 msdu_len = rx_info->msdu_len; + struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; + u8 l3pad_bytes = rx_info->l3_pad_bytes; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + u32 hal_rx_desc_sz = dp->ab->hal.hal_desc_sz; + + if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { + /* First buffer will be freed by the caller, so deduct it's length */ + msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); + ath12k_wifi7_dp_rx_null_q_desc_sg_drop(dp, msdu_len, msdu_list); + return -EINVAL; + } + + /* Even after cleaning up the sg buffers in the msdu list with above check + * any msdu received with continuation flag needs to be dropped as invalid. + * This protects against some random err frame with continuation flag. + */ + if (rxcb->is_continuation) + return -EINVAL; + + if (!rx_info->msdu_done) { + ath12k_warn(ab, + "msdu_done bit not set in null_q_des processing\n"); + __skb_queue_purge(msdu_list); + return -EIO; + } + + /* Handle NULL queue descriptor violations arising out a missing + * REO queue for a given peer or a given TID. This typically + * may happen if a packet is received on a QOS enabled TID before the + * ADDBA negotiation for that TID, when the TID queue is setup. Or + * it may also happen for MC/BC frames if they are not routed to the + * non-QOS TID queue, in the absence of any other default TID queue. + * This error can show up both in a REO destination or WBM release ring. + */ + + if (rxcb->is_frag) { + skb_pull(msdu, hal_rx_desc_sz); + } else { + if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) + return -EINVAL; + + skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); + } + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, desc, msdu, rx_info))) + return -EINVAL; + + ath12k_dp_rx_h_ppdu(dp_pdev, rx_info); + ath12k_wifi7_dp_rx_h_mpdu(dp_pdev, msdu, desc, rx_info); + + rxcb->tid = rx_info->tid; + + /* Please note that caller will having the access to msdu and completing + * rx with mac80211. Need not worry about cleaning up amsdu_list. + */ + + return 0; + } + + static bool ath12k_wifi7_dp_rx_h_tkip_mic_err(struct ath12k_pdev_dp *dp_pdev, + struct sk_buff *msdu, + struct hal_rx_desc_data *rx_info) + { + struct ath12k_dp *dp = dp_pdev->dp; + struct ath12k_base *ab = dp->ab; + u16 msdu_len = rx_info->msdu_len; + struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; + u8 l3pad_bytes = rx_info->l3_pad_bytes; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; + + rxcb->is_first_msdu = rx_info->is_first_msdu; + rxcb->is_last_msdu = rx_info->is_last_msdu; + + if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "invalid msdu len in tkip mic err %u\n", msdu_len); + ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc, + sizeof(*desc)); + return true; + } + + skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); + + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(dp, desc, msdu, rx_info))) + return true; + + ath12k_dp_rx_h_ppdu(dp_pdev, rx_info); + + rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | + RX_FLAG_DECRYPTED); + + ath12k_dp_rx_h_undecap(dp_pdev, msdu, desc, + HAL_ENCRYPT_TYPE_TKIP_MIC, false, rx_info); + return false; + } + + static bool ath12k_wifi7_dp_rx_h_rxdma_err(struct ath12k_pdev_dp *dp_pdev, + struct sk_buff *msdu, + struct hal_rx_desc_data *rx_info) + { + struct ath12k_dp *dp = dp_pdev->dp; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + bool drop = false; + + dp->device_stats.rxdma_error[rxcb->err_code]++; + + switch (rxcb->err_code) { + case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR: + case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: + if (rx_info->err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) { + drop = ath12k_wifi7_dp_rx_h_tkip_mic_err(dp_pdev, msdu, rx_info); + break; + } + fallthrough; + default: + /* TODO: Review other rxdma error code to check if anything is + * worth reporting to mac80211 + */ + drop = true; + break; + } + + return drop; + } + + static bool ath12k_wifi7_dp_rx_h_reo_err(struct ath12k_pdev_dp *dp_pdev, + struct sk_buff *msdu, + struct hal_rx_desc_data *rx_info, + struct sk_buff_head *msdu_list) + { + struct ath12k_dp *dp = dp_pdev->dp; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + bool drop = false; + + dp->device_stats.reo_error[rxcb->err_code]++; + + switch (rxcb->err_code) { + case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: + if (ath12k_wifi7_dp_rx_h_null_q_desc(dp_pdev, msdu, rx_info, msdu_list)) + drop = true; + break; + case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: + /* TODO: Do not drop PN failed packets in the driver; + * instead, it is good to drop such packets in mac80211 + * after incrementing the replay counters. + */ + fallthrough; + default: + /* TODO: Review other errors and process them to mac80211 + * as appropriate. + */ + drop = true; + break; + } + + return drop; + } + + static void ath12k_wifi7_dp_rx_wbm_err(struct ath12k_pdev_dp *dp_pdev, + struct napi_struct *napi, + struct sk_buff *msdu, + struct sk_buff_head *msdu_list) + { + struct ath12k_dp *dp = dp_pdev->dp; + struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; + struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); + struct ieee80211_rx_status rxs = {}; + struct hal_rx_desc_data rx_info; + bool drop = true; + + rx_info.addr2_present = false; + rx_info.rx_status = &rxs; + + ath12k_dp_extract_rx_desc_data(dp->hal, &rx_info, rx_desc, rx_desc); + + switch (rxcb->err_rel_src) { + case HAL_WBM_REL_SRC_MODULE_REO: + drop = ath12k_wifi7_dp_rx_h_reo_err(dp_pdev, msdu, &rx_info, msdu_list); + break; + case HAL_WBM_REL_SRC_MODULE_RXDMA: + drop = ath12k_wifi7_dp_rx_h_rxdma_err(dp_pdev, msdu, &rx_info); + break; + default: + /* msdu will get freed */ + break; + } + + if (drop) { + dev_kfree_skb_any(msdu); + return; + } + + rx_info.rx_status->flag |= RX_FLAG_SKIP_MONITOR; + + ath12k_dp_rx_deliver_msdu(dp_pdev, napi, msdu, &rx_info); + } + + void ath12k_wifi7_dp_setup_pn_check_reo_cmd(struct ath12k_hal_reo_cmd *cmd, + struct ath12k_dp_rx_tid *rx_tid, + u32 cipher, enum set_key_cmd key_cmd) + { + cmd->flag = HAL_REO_CMD_FLG_NEED_STATUS; + cmd->upd0 = HAL_REO_CMD_UPD0_PN | + HAL_REO_CMD_UPD0_PN_SIZE | + HAL_REO_CMD_UPD0_PN_VALID | + HAL_REO_CMD_UPD0_PN_CHECK | + HAL_REO_CMD_UPD0_SVLD; + + switch (cipher) { + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (key_cmd == SET_KEY) { + cmd->upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; + cmd->pn_size = 48; + } + break; + default: + break; + } + + cmd->addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned); + cmd->addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned); + } + + int ath12k_wifi7_dp_rx_process_wbm_err(struct ath12k_dp *dp, + struct napi_struct *napi, int budget) + { + struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES]; + struct ath12k_base *ab = dp->ab; + struct ath12k_hal *hal = dp->hal; + struct ath12k *ar; + struct ath12k_pdev_dp *dp_pdev; + struct ath12k_hw_group *ag = dp->ag; + struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp; + struct ath12k_dp *partner_dp; + struct dp_rxdma_ring *rx_ring; + struct hal_rx_wbm_rel_info err_info; + struct hal_srng *srng; + struct sk_buff *msdu; + struct sk_buff_head msdu_list, scatter_msdu_list; + struct ath12k_skb_rxcb *rxcb; + void *rx_desc; + int num_buffs_reaped[ATH12K_MAX_DEVICES] = {}; + int total_num_buffs_reaped = 0; + struct ath12k_rx_desc_info *desc_info; + struct ath12k_device_dp_stats *device_stats = &dp->device_stats; + struct ath12k_hw_link *hw_links = ag->hw_links; + u8 hw_link_id, device_id; + int ret, pdev_idx; + struct hal_rx_desc *msdu_data; + + __skb_queue_head_init(&msdu_list); + __skb_queue_head_init(&scatter_msdu_list); + + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) + INIT_LIST_HEAD(&rx_desc_used_list[device_id]); + + srng = &hal->srng_list[dp->rx_rel_ring.ring_id]; + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + while (budget) { + rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng); + if (!rx_desc) + break; + + ret = ath12k_wifi7_hal_wbm_desc_parse_err(dp, rx_desc, + &err_info); + if (ret) { + ath12k_warn(ab, "failed to parse rx error in wbm_rel ring desc %d\n", + ret); + continue; + } + + desc_info = err_info.rx_desc; + + /* retry manual desc retrieval if hw cc is not done */ + if (!desc_info) { + desc_info = ath12k_dp_get_rx_desc(dp, err_info.cookie); + if (!desc_info) { + ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n", + err_info.cookie); + continue; + } + } + + if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC) + ath12k_warn(ab, "WBM RX err, Check HW CC implementation"); + + msdu = desc_info->skb; + desc_info->skb = NULL; + + device_id = desc_info->device_id; + partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id); + if (unlikely(!partner_dp)) { + dev_kfree_skb_any(msdu); + + /* In any case continuation bit is set + * in the previous record, cleanup scatter_msdu_list + */ + ath12k_dp_clean_up_skb_list(&scatter_msdu_list); + continue; + } + + list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); + + rxcb = ATH12K_SKB_RXCB(msdu); + dma_unmap_single(partner_dp->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + + num_buffs_reaped[device_id]++; + total_num_buffs_reaped++; + + if (!err_info.continuation) + budget--; + + if (err_info.push_reason != + HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { + dev_kfree_skb_any(msdu); + continue; + } + + msdu_data = (struct hal_rx_desc *)msdu->data; + rxcb->err_rel_src = err_info.err_rel_src; + rxcb->err_code = err_info.err_code; + rxcb->is_first_msdu = err_info.first_msdu; + rxcb->is_last_msdu = err_info.last_msdu; + rxcb->is_continuation = err_info.continuation; + rxcb->rx_desc = msdu_data; + rxcb->peer_id = ath12k_wifi7_dp_rx_get_peer_id(dp, dp->peer_metadata_ver, + err_info.peer_metadata); + + if (err_info.continuation) { + __skb_queue_tail(&scatter_msdu_list, msdu); + continue; + } + + hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_dp->hal, + msdu_data); + if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) { + dev_kfree_skb_any(msdu); + + /* In any case continuation bit is set + * in the previous record, cleanup scatter_msdu_list + */ + ath12k_dp_clean_up_skb_list(&scatter_msdu_list); + continue; + } + + if (!skb_queue_empty(&scatter_msdu_list)) { + struct sk_buff *msdu; + + skb_queue_walk(&scatter_msdu_list, msdu) { + rxcb = ATH12K_SKB_RXCB(msdu); + rxcb->hw_link_id = hw_link_id; + } + + skb_queue_splice_tail_init(&scatter_msdu_list, + &msdu_list); + } + + rxcb = ATH12K_SKB_RXCB(msdu); + rxcb->hw_link_id = hw_link_id; + __skb_queue_tail(&msdu_list, msdu); + } + + /* In any case continuation bit is set in the + * last record, cleanup scatter_msdu_list + */ + ath12k_dp_clean_up_skb_list(&scatter_msdu_list); + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + if (!total_num_buffs_reaped) + goto done; + + for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) { + if (!num_buffs_reaped[device_id]) + continue; + + partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id); + rx_ring = &partner_dp->rx_refill_buf_ring; + + ath12k_dp_rx_bufs_replenish(dp, rx_ring, + &rx_desc_used_list[device_id], + num_buffs_reaped[device_id]); + } + + rcu_read_lock(); + while ((msdu = __skb_dequeue(&msdu_list))) { + rxcb = ATH12K_SKB_RXCB(msdu); + hw_link_id = rxcb->hw_link_id; + + device_id = hw_links[hw_link_id].device_id; + partner_dp = ath12k_dp_hw_grp_to_dp(dp_hw_grp, device_id); + if (unlikely(!partner_dp)) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n", + hw_link_id, device_id); + dev_kfree_skb_any(msdu); + continue; + } + + pdev_idx = ath12k_hw_mac_id_to_pdev_id(partner_dp->hw_params, + hw_links[hw_link_id].pdev_idx); + + dp_pdev = ath12k_dp_to_pdev_dp(partner_dp, pdev_idx); + if (!dp_pdev) { + dev_kfree_skb_any(msdu); + continue; + } + ar = ath12k_pdev_dp_to_ar(dp_pdev); + + if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_idx])) { + dev_kfree_skb_any(msdu); + continue; + } + + if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { + dev_kfree_skb_any(msdu); + continue; + } + + if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) { + device_id = dp_pdev->dp->device_id; + device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++; + } + + ath12k_wifi7_dp_rx_wbm_err(dp_pdev, napi, msdu, &msdu_list); + } + rcu_read_unlock(); + done: + return total_num_buffs_reaped; + } + + int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab) + { + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); + struct htt_rx_ring_tlv_filter tlv_filter = {}; + u32 ring_id; + int ret; + u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; + + ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; + + tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; + tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; + tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | + HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | + HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; + tlv_filter.offset_valid = true; + tlv_filter.rx_packet_offset = hal_rx_desc_sz; + + tlv_filter.rx_mpdu_start_offset = + ath12k_hal_rx_desc_get_mpdu_start_offset_qcn9274(); + tlv_filter.rx_msdu_end_offset = + ath12k_hal_rx_desc_get_msdu_end_offset_qcn9274(); + + tlv_filter.rx_mpdu_start_wmask = ath12k_hal_rx_mpdu_start_wmask_get_qcn9274(); + tlv_filter.rx_msdu_end_wmask = ath12k_hal_rx_msdu_end_wmask_get_qcn9274(); + ath12k_dbg(ab, ATH12K_DBG_DATA, + "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n", + tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask); + + ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0, + HAL_RXDMA_BUF, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + + return ret; + } + + int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) + { + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); + struct htt_rx_ring_tlv_filter tlv_filter = {}; + u32 ring_id; + int ret = 0; + u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; + int i; + + ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; + + tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING; + tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR; + tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST | + HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST | + HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA; + tlv_filter.offset_valid = true; + tlv_filter.rx_packet_offset = hal_rx_desc_sz; + + tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv); + + tlv_filter.rx_mpdu_start_offset = + ath12k_hal_rx_desc_get_mpdu_start_offset_wcn7850(); + tlv_filter.rx_msdu_end_offset = + ath12k_hal_rx_desc_get_msdu_end_offset_wcn7850(); + + /* TODO: Selectively subscribe to required qwords within msdu_end + * and mpdu_start and setup the mask in below msg + * and modify the rx_desc struct + */ + + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { + ring_id = dp->rx_mac_buf_ring[i].ring_id; + ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i, + HAL_RXDMA_BUF, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + } + + return ret; + } + + void ath12k_wifi7_dp_rx_process_reo_status(struct ath12k_dp *dp) + { + struct ath12k_base *ab = dp->ab; + struct ath12k_hal *hal = dp->hal; + struct hal_tlv_64_hdr *hdr; + struct hal_srng *srng; + struct ath12k_dp_rx_reo_cmd *cmd, *tmp; + bool found = false; + u16 tag; + struct hal_reo_status reo_status; + + srng = &hal->srng_list[dp->reo_status_ring.ring_id]; + + memset(&reo_status, 0, sizeof(reo_status)); + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG); + + switch (tag) { + case HAL_REO_GET_QUEUE_STATS_STATUS: + ath12k_wifi7_hal_reo_status_queue_stats(ab, hdr, + &reo_status); + break; + case HAL_REO_FLUSH_QUEUE_STATUS: + ath12k_wifi7_hal_reo_flush_queue_status(ab, hdr, + &reo_status); + break; + case HAL_REO_FLUSH_CACHE_STATUS: + ath12k_wifi7_hal_reo_flush_cache_status(ab, hdr, + &reo_status); + break; + case HAL_REO_UNBLOCK_CACHE_STATUS: + ath12k_wifi7_hal_reo_unblk_cache_status(ab, hdr, + &reo_status); + break; + case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: + ath12k_wifi7_hal_reo_flush_timeout_list_status(ab, hdr, + &reo_status); + break; + case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: + ath12k_wifi7_hal_reo_desc_thresh_reached_status(ab, hdr, + &reo_status); + break; + case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: + ath12k_wifi7_hal_reo_update_rx_reo_queue_status(ab, hdr, + &reo_status); + break; + default: + ath12k_warn(ab, "Unknown reo status type %d\n", tag); + continue; + } + + spin_lock_bh(&dp->reo_cmd_lock); + list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { + if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { + found = true; + list_del(&cmd->list); + break; + } + } + spin_unlock_bh(&dp->reo_cmd_lock); + + if (found) { + cmd->handler(dp, (void *)&cmd->data, + reo_status.uniform_hdr.cmd_status); + kfree(cmd); + } + + found = false; + } + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + } + + bool + ath12k_wifi7_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, + struct hal_rx_desc *rx_desc) + { + u32 tlv_tag; + + tlv_tag = ab->hal.ops->rx_desc_get_mpdu_start_tag(rx_desc); + + return tlv_tag == HAL_RX_MPDU_START; + } diff --cc drivers/net/wireless/ath/ath12k/wifi7/dp_rx.h index 0000000000000,d15bffe223c7d..a98836b83f482 mode 000000,100644..100644 --- a/drivers/net/wireless/ath/ath12k/wifi7/dp_rx.h +++ b/drivers/net/wireless/ath/ath12k/wifi7/dp_rx.h @@@ -1,0 -1,55 +1,59 @@@ + /* SPDX-License-Identifier: BSD-3-Clause-Clear */ + /* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + #ifndef ATH12K_DP_RX_WIFI7_H + #define ATH12K_DP_RX_WIFI7_H + + #include "../core.h" + #include "../dp_rx.h" + #include "hal_rx_desc.h" + + struct ath12k_hal_reo_cmd; + + int ath12k_wifi7_dp_rx_process_wbm_err(struct ath12k_dp *dp, + struct napi_struct *napi, int budget); + int ath12k_wifi7_dp_rx_process_err(struct ath12k_dp *dp, struct napi_struct *napi, + int budget); + int ath12k_wifi7_dp_rx_process(struct ath12k_dp *dp, int mac_id, + struct napi_struct *napi, + int budget); + void ath12k_wifi7_dp_rx_process_reo_status(struct ath12k_dp *dp); + int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab); + int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab); + void ath12k_wifi7_dp_setup_pn_check_reo_cmd(struct ath12k_hal_reo_cmd *cmd, + struct ath12k_dp_rx_tid *rx_tid, + u32 cipher, enum set_key_cmd key_cmd); + int ath12k_wifi7_dp_rx_assign_reoq(struct ath12k_base *ab, struct ath12k_dp_peer *dp_peer, + struct ath12k_dp_rx_tid *rx_tid, + u16 ssn, enum hal_pn_type pn_type); + int ath12k_wifi7_dp_rx_link_desc_return(struct ath12k_dp *dp, + struct ath12k_buffer_addr *buf_addr_info, + enum hal_wbm_rel_bm_act action); + void ath12k_wifi7_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid, + bool rel_link_desc); + void ath12k_wifi7_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid, + dma_addr_t paddr); + void ath12k_wifi7_dp_rx_peer_tid_delete(struct ath12k_base *ab, + struct ath12k_dp_link_peer *peer, u8 tid); -int ath12k_wifi7_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid, ++int ath12k_wifi7_dp_reo_cmd_send(struct ath12k_base *ab, ++ struct ath12k_dp_rx_tid_rxq *rx_tid, + enum hal_reo_cmd_type type, + struct ath12k_hal_reo_cmd *cmd, + void (*cb)(struct ath12k_dp *dp, void *ctx, + enum hal_reo_cmd_status status)); -void ath12k_wifi7_dp_reo_cache_flush(struct ath12k_base *ab, - struct ath12k_dp_rx_tid *rx_tid); ++int ath12k_wifi7_dp_reo_cache_flush(struct ath12k_base *ab, ++ struct ath12k_dp_rx_tid_rxq *rx_tid); + int ath12k_wifi7_peer_rx_tid_reo_update(struct ath12k_dp *dp, + struct ath12k_dp_link_peer *peer, + struct ath12k_dp_rx_tid *rx_tid, + u32 ba_win_sz, u16 ssn, + bool update_ssn); ++void ath12k_wifi7_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid); + bool + ath12k_wifi7_dp_rxdesc_mpdu_valid(struct ath12k_base *ab, + struct hal_rx_desc *rx_desc); ++int ath12k_wifi7_dp_rx_tid_delete_handler(struct ath12k_base *ab, ++ struct ath12k_dp_rx_tid_rxq *rx_tid); + #endif diff --cc drivers/net/wireless/ath/ath12k/wifi7/hal.h index 0000000000000,0a39862d07c45..7d65b82c61f25 mode 000000,100644..100644 --- a/drivers/net/wireless/ath/ath12k/wifi7/hal.h +++ b/drivers/net/wireless/ath/ath12k/wifi7/hal.h @@@ -1,0 -1,563 +1,564 @@@ + /* SPDX-License-Identifier: BSD-3-Clause-Clear */ + /* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + + #ifndef ATH12K_HAL_WIFI7_H + #define ATH12K_HAL_WIFI7_H + + #include "../core.h" + #include "../hal.h" + #include "hal_desc.h" + #include "hal_tx.h" + #include "hal_rx.h" + #include "hal_rx_desc.h" + + /* calculate the register address from bar0 of shadow register x */ + #define HAL_SHADOW_BASE_ADDR 0x000008fc + #define HAL_SHADOW_NUM_REGS 40 + #define HAL_HP_OFFSET_IN_REG_START 1 + #define HAL_OFFSET_FROM_HP_TO_TP 4 + + #define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x))) + #define HAL_REO_QDESC_MAX_PEERID 8191 + + /* WCSS Relative address */ + #define HAL_SEQ_WCSS_CMEM_OFFSET 0x00100000 + #define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000 + #define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000 + #define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000 + #define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal) \ + ((hal)->regs->umac_ce0_src_reg_base) + #define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) \ + ((hal)->regs->umac_ce0_dest_reg_base) + #define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(hal) \ + ((hal)->regs->umac_ce1_src_reg_base) + #define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) \ + ((hal)->regs->umac_ce1_dest_reg_base) + #define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000 + + #define HAL_CE_WFSS_CE_REG_BASE 0x01b80000 + + #define HAL_TCL_SW_CONFIG_BANK_ADDR 0x00a4408c + + /* SW2TCL(x) R0 ring configuration address */ + #define HAL_TCL1_RING_CMN_CTRL_REG 0x00000020 + #define HAL_TCL1_RING_DSCP_TID_MAP 0x00000240 + + #define HAL_TCL1_RING_BASE_LSB(hal) \ + ((hal)->regs->tcl1_ring_base_lsb) + #define HAL_TCL1_RING_BASE_MSB(hal) \ + ((hal)->regs->tcl1_ring_base_msb) + #define HAL_TCL1_RING_ID(hal) ((hal)->regs->tcl1_ring_id) + #define HAL_TCL1_RING_MISC(hal) \ + ((hal)->regs->tcl1_ring_misc) + #define HAL_TCL1_RING_TP_ADDR_LSB(hal) \ + ((hal)->regs->tcl1_ring_tp_addr_lsb) + #define HAL_TCL1_RING_TP_ADDR_MSB(hal) \ + ((hal)->regs->tcl1_ring_tp_addr_msb) + #define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(hal) \ + ((hal)->regs->tcl1_ring_consumer_int_setup_ix0) + #define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(hal) \ + ((hal)->regs->tcl1_ring_consumer_int_setup_ix1) + #define HAL_TCL1_RING_MSI1_BASE_LSB(hal) \ + ((hal)->regs->tcl1_ring_msi1_base_lsb) + #define HAL_TCL1_RING_MSI1_BASE_MSB(hal) \ + ((hal)->regs->tcl1_ring_msi1_base_msb) + #define HAL_TCL1_RING_MSI1_DATA(hal) \ + ((hal)->regs->tcl1_ring_msi1_data) + #define HAL_TCL2_RING_BASE_LSB(hal) \ + ((hal)->regs->tcl2_ring_base_lsb) + #define HAL_TCL_RING_BASE_LSB(hal) \ + ((hal)->regs->tcl_ring_base_lsb) + + #define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(hal) ({ typeof(hal) _hal = (hal); \ + (HAL_TCL1_RING_MSI1_BASE_LSB(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); }) + #define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(hal) ({ typeof(hal) _hal = (hal); \ + (HAL_TCL1_RING_MSI1_BASE_MSB(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); }) + #define HAL_TCL1_RING_MSI1_DATA_OFFSET(hal) ({ typeof(hal) _hal = (hal); \ + (HAL_TCL1_RING_MSI1_DATA(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); }) + #define HAL_TCL1_RING_BASE_MSB_OFFSET(hal) ({ typeof(hal) _hal = (hal); \ + (HAL_TCL1_RING_BASE_MSB(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); }) + #define HAL_TCL1_RING_ID_OFFSET(hal) ({ typeof(hal) _hal = (hal); \ + (HAL_TCL1_RING_ID(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); }) + #define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(hal) ({ typeof(hal) _hal = (hal); \ + (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); }) + #define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(hal) ({ typeof(hal) _hal = (hal); \ + (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); }) + #define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(hal) ({ typeof(hal) _hal = (hal); \ + (HAL_TCL1_RING_TP_ADDR_LSB(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); }) + #define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(hal) ({ typeof(hal) _hal = (hal); \ + (HAL_TCL1_RING_TP_ADDR_MSB(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); }) + #define HAL_TCL1_RING_MISC_OFFSET(hal) ({ typeof(hal) _hal = (hal); \ + (HAL_TCL1_RING_MISC(_hal) - HAL_TCL1_RING_BASE_LSB(_hal)); }) + + /* SW2TCL(x) R2 ring pointers (head/tail) address */ + #define HAL_TCL1_RING_HP 0x00002000 + #define HAL_TCL1_RING_TP 0x00002004 + #define HAL_TCL2_RING_HP 0x00002008 + #define HAL_TCL_RING_HP 0x00002028 + + #define HAL_TCL1_RING_TP_OFFSET \ + (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP) + + /* TCL STATUS ring address */ + #define HAL_TCL_STATUS_RING_BASE_LSB(hal) \ + ((hal)->regs->tcl_status_ring_base_lsb) + #define HAL_TCL_STATUS_RING_HP 0x00002048 + + /* PPE2TCL1 Ring address */ + #define HAL_TCL_PPE2TCL1_RING_BASE_LSB 0x00000c48 + #define HAL_TCL_PPE2TCL1_RING_HP 0x00002038 + + /* WBM PPE Release Ring address */ + #define HAL_WBM_PPE_RELEASE_RING_BASE_LSB(hal) \ + ((hal)->regs->ppe_rel_ring_base) + #define HAL_WBM_PPE_RELEASE_RING_HP 0x00003020 + + /* REO2SW(x) R0 ring configuration address */ + #define HAL_REO1_GEN_ENABLE 0x00000000 + #define HAL_REO1_MISC_CTRL_ADDR(hal) \ + ((hal)->regs->reo1_misc_ctrl_addr) + #define HAL_REO1_DEST_RING_CTRL_IX_0 0x00000004 + #define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008 + #define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c + #define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010 + #define HAL_REO1_QDESC_ADDR(hal) ((hal)->regs->reo1_qdesc_addr) + #define HAL_REO1_QDESC_MAX_PEERID(hal) ((hal)->regs->reo1_qdesc_max_peerid) + #define HAL_REO1_SW_COOKIE_CFG0(hal) ((hal)->regs->reo1_sw_cookie_cfg0) + #define HAL_REO1_SW_COOKIE_CFG1(hal) ((hal)->regs->reo1_sw_cookie_cfg1) + #define HAL_REO1_QDESC_LUT_BASE0(hal) ((hal)->regs->reo1_qdesc_lut_base0) + #define HAL_REO1_QDESC_LUT_BASE1(hal) ((hal)->regs->reo1_qdesc_lut_base1) + #define HAL_REO1_RING_BASE_LSB(hal) ((hal)->regs->reo1_ring_base_lsb) + #define HAL_REO1_RING_BASE_MSB(hal) ((hal)->regs->reo1_ring_base_msb) + #define HAL_REO1_RING_ID(hal) ((hal)->regs->reo1_ring_id) + #define HAL_REO1_RING_MISC(hal) ((hal)->regs->reo1_ring_misc) + #define HAL_REO1_RING_HP_ADDR_LSB(hal) ((hal)->regs->reo1_ring_hp_addr_lsb) + #define HAL_REO1_RING_HP_ADDR_MSB(hal) ((hal)->regs->reo1_ring_hp_addr_msb) + #define HAL_REO1_RING_PRODUCER_INT_SETUP(hal) \ + ((hal)->regs->reo1_ring_producer_int_setup) + #define HAL_REO1_RING_MSI1_BASE_LSB(hal) \ + ((hal)->regs->reo1_ring_msi1_base_lsb) + #define HAL_REO1_RING_MSI1_BASE_MSB(hal) \ + ((hal)->regs->reo1_ring_msi1_base_msb) + #define HAL_REO1_RING_MSI1_DATA(hal) ((hal)->regs->reo1_ring_msi1_data) + #define HAL_REO2_RING_BASE_LSB(hal) ((hal)->regs->reo2_ring_base) + #define HAL_REO1_AGING_THRESH_IX_0(hal) ((hal)->regs->reo1_aging_thres_ix0) + #define HAL_REO1_AGING_THRESH_IX_1(hal) ((hal)->regs->reo1_aging_thres_ix1) + #define HAL_REO1_AGING_THRESH_IX_2(hal) ((hal)->regs->reo1_aging_thres_ix2) + #define HAL_REO1_AGING_THRESH_IX_3(hal) ((hal)->regs->reo1_aging_thres_ix3) + + /* REO2SW(x) R2 ring pointers (head/tail) address */ + #define HAL_REO1_RING_HP 0x00003048 + #define HAL_REO1_RING_TP 0x0000304c + #define HAL_REO2_RING_HP 0x00003050 + + #define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP) + + /* REO2SW0 ring configuration address */ + #define HAL_REO_SW0_RING_BASE_LSB(hal) \ + ((hal)->regs->reo2_sw0_ring_base) + + /* REO2SW0 R2 ring pointer (head/tail) address */ + #define HAL_REO_SW0_RING_HP 0x00003088 + + /* REO CMD R0 address */ + #define HAL_REO_CMD_RING_BASE_LSB(hal) \ + ((hal)->regs->reo_cmd_ring_base) + + /* REO CMD R2 address */ + #define HAL_REO_CMD_HP 0x00003020 + + /* SW2REO R0 address */ + #define HAL_SW2REO_RING_BASE_LSB(hal) \ + ((hal)->regs->sw2reo_ring_base) + #define HAL_SW2REO1_RING_BASE_LSB(hal) \ + ((hal)->regs->sw2reo1_ring_base) + + /* SW2REO R2 address */ + #define HAL_SW2REO_RING_HP 0x00003028 + #define HAL_SW2REO1_RING_HP 0x00003030 + + /* CE ring R0 address */ + #define HAL_CE_SRC_RING_BASE_LSB 0x00000000 + #define HAL_CE_DST_RING_BASE_LSB 0x00000000 + #define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058 + #define HAL_CE_DST_RING_CTRL 0x000000b0 + + /* CE ring R2 address */ + #define HAL_CE_DST_RING_HP 0x00000400 + #define HAL_CE_DST_STATUS_RING_HP 0x00000408 + + /* REO status address */ + #define HAL_REO_STATUS_RING_BASE_LSB(hal) \ + ((hal)->regs->reo_status_ring_base) + #define HAL_REO_STATUS_HP 0x000030a8 + + /* WBM Idle R0 address */ + #define HAL_WBM_IDLE_LINK_RING_BASE_LSB(hal) \ + ((hal)->regs->wbm_idle_ring_base_lsb) + #define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(hal) \ + ((hal)->regs->wbm_idle_ring_misc_addr) + #define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(hal) \ + ((hal)->regs->wbm_r0_idle_list_cntl_addr) + #define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(hal) \ + ((hal)->regs->wbm_r0_idle_list_size_addr) + #define HAL_WBM_SCATTERED_RING_BASE_LSB(hal) \ + ((hal)->regs->wbm_scattered_ring_base_lsb) + #define HAL_WBM_SCATTERED_RING_BASE_MSB(hal) \ + ((hal)->regs->wbm_scattered_ring_base_msb) + #define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(hal) \ + ((hal)->regs->wbm_scattered_desc_head_info_ix0) + #define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(hal) \ + ((hal)->regs->wbm_scattered_desc_head_info_ix1) + #define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(hal) \ + ((hal)->regs->wbm_scattered_desc_tail_info_ix0) + #define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(hal) \ + ((hal)->regs->wbm_scattered_desc_tail_info_ix1) + #define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(hal) \ + ((hal)->regs->wbm_scattered_desc_ptr_hp_addr) + + /* WBM Idle R2 address */ + #define HAL_WBM_IDLE_LINK_RING_HP 0x000030b8 + + /* SW2WBM R0 release address */ + #define HAL_WBM_SW_RELEASE_RING_BASE_LSB(hal) \ + ((hal)->regs->wbm_sw_release_ring_base_lsb) + #define HAL_WBM_SW1_RELEASE_RING_BASE_LSB(hal) \ + ((hal)->regs->wbm_sw1_release_ring_base_lsb) + + /* SW2WBM R2 release address */ + #define HAL_WBM_SW_RELEASE_RING_HP 0x00003010 + #define HAL_WBM_SW1_RELEASE_RING_HP 0x00003018 + + /* WBM2SW R0 release address */ + #define HAL_WBM0_RELEASE_RING_BASE_LSB(hal) \ + ((hal)->regs->wbm0_release_ring_base_lsb) + + #define HAL_WBM1_RELEASE_RING_BASE_LSB(hal) \ + ((hal)->regs->wbm1_release_ring_base_lsb) + + /* WBM2SW R2 release address */ + #define HAL_WBM0_RELEASE_RING_HP 0x000030c8 + #define HAL_WBM1_RELEASE_RING_HP 0x000030d0 + + /* WBM cookie config address and mask */ + #define HAL_WBM_SW_COOKIE_CFG0 0x00000040 + #define HAL_WBM_SW_COOKIE_CFG1 0x00000044 + #define HAL_WBM_SW_COOKIE_CFG2 0x00000090 + #define HAL_WBM_SW_COOKIE_CONVERT_CFG 0x00000094 + + #define HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0) + #define HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8) + #define HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13) + #define HAL_WBM_SW_COOKIE_CFG_ALIGN BIT(18) + #define HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN BIT(0) + #define HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN BIT(1) + #define HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN BIT(3) + + #define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN BIT(1) + #define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN BIT(2) + #define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN BIT(3) + #define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN BIT(4) + #define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN BIT(5) + #define HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN BIT(8) + + /* TCL ring field mask and offset */ + #define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8) + #define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0) + #define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0) + #define HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE BIT(0) + #define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1) + #define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3) + #define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4) + #define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5) + #define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6) + #define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16) + #define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0) + #define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0) + #define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8) + #define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0) + #define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(23) + #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0) + #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0) + #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3) + #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6) + #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9) + #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12) + #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15) + #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18) + #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21) + + /* REO ring field mask and offset */ + #define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8) + #define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0) + #define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8) + #define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0) + #define HAL_REO1_RING_MISC_MSI_SWAP BIT(3) + #define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4) + #define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5) + #define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6) + #define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16) + #define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0) + #define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8) + #define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0) + #define HAL_REO1_MISC_CTL_FRAG_DST_RING GENMASK(20, 17) + #define HAL_REO1_MISC_CTL_BAR_DST_RING GENMASK(24, 21) + #define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2) + #define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3) + #define HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0) + #define HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8) + #define HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13) + #define HAL_REO1_SW_COOKIE_CFG_ALIGN BIT(18) + #define HAL_REO1_SW_COOKIE_CFG_ENABLE BIT(19) + #define HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE BIT(20) + #define HAL_REO_QDESC_ADDR_READ_LUT_ENABLE BIT(7) + #define HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY BIT(6) + + /* CE ring bit field mask and shift */ + #define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0) + + #define HAL_ADDR_LSB_REG_MASK 0xffffffff + + #define HAL_ADDR_MSB_REG_SHIFT 32 + + /* WBM ring bit field mask and shift */ + #define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1) + #define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2) + #define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16) + #define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0) + #define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8) + + #define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8) + #define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8) + + #define HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE BIT(6) + #define HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE BIT(0) + + #define BASE_ADDR_MATCH_TAG_VAL 0x5 + + #define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff + #define HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE 0x000fffff + #define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff + #define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff + #define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff + #define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff + #define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff + #define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff + #define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff + #define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff + #define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff + #define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x000fffff + #define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff + #define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff + #define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff + #define HAL_RXDMA_RING_MAX_SIZE_BE 0x000fffff + #define HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff + + #define HAL_WBM2SW_REL_ERR_RING_NUM 3 + /* Add any other errors here and return them in + * ath12k_hal_rx_desc_get_err(). + */ + + #define HAL_IPQ5332_CE_WFSS_REG_BASE 0x740000 + #define HAL_IPQ5332_CE_SIZE 0x100000 + + #define HAL_RX_MAX_BA_WINDOW 256 + + #define HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC (100 * 1000) + #define HAL_DEFAULT_VO_REO_TIMEOUT_USEC (40 * 1000) + + #define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1) + #define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10) + + #define HAL_SRNG_DESC_LOOP_CNT 0xf0000000 + + #define HAL_REO_CMD_FLG_NEED_STATUS BIT(0) + #define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1) + #define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2) + #define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3) + #define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4) + #define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5) + #define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6) + #define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7) + #define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8) ++#define HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC BIT(9) + + /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */ + #define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8) + #define HAL_REO_CMD_UPD0_VLD BIT(9) + #define HAL_REO_CMD_UPD0_ALDC BIT(10) + #define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11) + #define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12) + #define HAL_REO_CMD_UPD0_AC BIT(13) + #define HAL_REO_CMD_UPD0_BAR BIT(14) + #define HAL_REO_CMD_UPD0_RETRY BIT(15) + #define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16) + #define HAL_REO_CMD_UPD0_OOR_MODE BIT(17) + #define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18) + #define HAL_REO_CMD_UPD0_PN_CHECK BIT(19) + #define HAL_REO_CMD_UPD0_EVEN_PN BIT(20) + #define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21) + #define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22) + #define HAL_REO_CMD_UPD0_PN_SIZE BIT(23) + #define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24) + #define HAL_REO_CMD_UPD0_SVLD BIT(25) + #define HAL_REO_CMD_UPD0_SSN BIT(26) + #define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27) + #define HAL_REO_CMD_UPD0_PN_ERR BIT(28) + #define HAL_REO_CMD_UPD0_PN_VALID BIT(29) + #define HAL_REO_CMD_UPD0_PN BIT(30) + + /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */ + #define HAL_REO_CMD_UPD1_VLD BIT(16) + #define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17) + #define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19) + #define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20) + #define HAL_REO_CMD_UPD1_AC GENMASK(22, 21) + #define HAL_REO_CMD_UPD1_BAR BIT(23) + #define HAL_REO_CMD_UPD1_RETRY BIT(24) + #define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25) + #define HAL_REO_CMD_UPD1_OOR_MODE BIT(26) + #define HAL_REO_CMD_UPD1_PN_CHECK BIT(27) + #define HAL_REO_CMD_UPD1_EVEN_PN BIT(28) + #define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29) + #define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30) + #define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31) + + /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */ + #define HAL_REO_CMD_UPD2_SVLD BIT(10) + #define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11) + #define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23) + #define HAL_REO_CMD_UPD2_PN_ERR BIT(24) + + struct hal_reo_status_queue_stats { + u16 ssn; + u16 curr_idx; + u32 pn[4]; + u32 last_rx_queue_ts; + u32 last_rx_dequeue_ts; + u32 rx_bitmap[8]; /* Bitmap from 0-255 */ + u32 curr_mpdu_cnt; + u32 curr_msdu_cnt; + u16 fwd_due_to_bar_cnt; + u16 dup_cnt; + u32 frames_in_order_cnt; + u32 num_mpdu_processed_cnt; + u32 num_msdu_processed_cnt; + u32 total_num_processed_byte_cnt; + u32 late_rx_mpdu_cnt; + u32 reorder_hole_cnt; + u8 timeout_cnt; + u8 bar_rx_cnt; + u8 num_window_2k_jump_cnt; + }; + + struct hal_reo_status_flush_queue { + bool err_detected; + }; + + enum hal_reo_status_flush_cache_err_code { + HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS, + HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE, + HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND, + }; + + struct hal_reo_status_flush_cache { + bool err_detected; + enum hal_reo_status_flush_cache_err_code err_code; + bool cache_controller_flush_status_hit; + u8 cache_controller_flush_status_desc_type; + u8 cache_controller_flush_status_client_id; + u8 cache_controller_flush_status_err; + u8 cache_controller_flush_status_cnt; + }; + + enum hal_reo_status_unblock_cache_type { + HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE, + HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE, + }; + + struct hal_reo_status_unblock_cache { + bool err_detected; + enum hal_reo_status_unblock_cache_type unblock_type; + }; + + struct hal_reo_status_flush_timeout_list { + bool err_detected; + bool list_empty; + u16 release_desc_cnt; + u16 fwd_buf_cnt; + }; + + enum hal_reo_threshold_idx { + HAL_REO_THRESHOLD_IDX_DESC_COUNTER0, + HAL_REO_THRESHOLD_IDX_DESC_COUNTER1, + HAL_REO_THRESHOLD_IDX_DESC_COUNTER2, + HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM, + }; + + struct hal_reo_status_desc_thresh_reached { + enum hal_reo_threshold_idx threshold_idx; + u32 link_desc_counter0; + u32 link_desc_counter1; + u32 link_desc_counter2; + u32 link_desc_counter_sum; + }; + + struct hal_reo_status { + struct hal_reo_status_header uniform_hdr; + u8 loop_cnt; + union { + struct hal_reo_status_queue_stats queue_stats; + struct hal_reo_status_flush_queue flush_queue; + struct hal_reo_status_flush_cache flush_cache; + struct hal_reo_status_unblock_cache unblock_cache; + struct hal_reo_status_flush_timeout_list timeout_list; + struct hal_reo_status_desc_thresh_reached desc_thresh_reached; + } u; + }; + + int ath12k_wifi7_hal_init(struct ath12k_base *ab); + void ath12k_wifi7_hal_ce_dst_setup(struct ath12k_base *ab, + struct hal_srng *srng, int ring_num); + void ath12k_wifi7_hal_srng_dst_hw_init(struct ath12k_base *ab, + struct hal_srng *srng); + void ath12k_wifi7_hal_srng_src_hw_init(struct ath12k_base *ab, + struct hal_srng *srng); + void ath12k_wifi7_hal_set_umac_srng_ptr_addr(struct ath12k_base *ab, + struct hal_srng *srng); + int ath12k_wifi7_hal_srng_update_shadow_config(struct ath12k_base *ab, + enum hal_ring_type ring_type, + int ring_num); + int ath12k_wifi7_hal_srng_get_ring_id(struct ath12k_hal *hal, + enum hal_ring_type type, + int ring_num, int mac_id); + u32 ath12k_wifi7_hal_ce_get_desc_size(enum hal_ce_desc type); + void ath12k_wifi7_hal_cc_config(struct ath12k_base *ab); + enum hal_rx_buf_return_buf_manager + ath12k_wifi7_hal_get_idle_link_rbm(struct ath12k_hal *hal, u8 device_id); + void ath12k_wifi7_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, + dma_addr_t paddr, + u32 len, u32 id, u8 byte_swap_data); + void ath12k_wifi7_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc, + dma_addr_t paddr); + void + ath12k_wifi7_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, + u32 cookie, dma_addr_t paddr, + enum hal_rx_buf_return_buf_manager rbm); + u32 + ath12k_wifi7_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc); + void + ath12k_wifi7_hal_setup_link_idle_list(struct ath12k_base *ab, + struct hal_wbm_idle_scatter_list *sbuf, + u32 nsbufs, u32 tot_link_desc, + u32 end_offset); + void ath12k_wifi7_hal_reoq_lut_addr_read_enable(struct ath12k_base *ab); + void ath12k_wifi7_hal_reoq_lut_set_max_peerid(struct ath12k_base *ab); + void ath12k_wifi7_hal_write_reoq_lut_addr(struct ath12k_base *ab, + dma_addr_t paddr); + void ath12k_wifi7_hal_write_ml_reoq_lut_addr(struct ath12k_base *ab, + dma_addr_t paddr); + u32 ath12k_wifi7_hal_reo_qdesc_size(u32 ba_window_size, u8 tid); + #endif diff --cc drivers/net/wireless/ath/ath12k/wifi7/hal_qcn9274.c index 0000000000000,4b6f43389b2d9..c129e937132b4 mode 000000,100644..100644 --- a/drivers/net/wireless/ath/ath12k/wifi7/hal_qcn9274.c +++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_qcn9274.c @@@ -1,0 -1,1031 +1,1032 @@@ + // SPDX-License-Identifier: BSD-3-Clause-Clear + /* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + #include "hal_desc.h" + #include "hal_qcn9274.h" + #include "hw.h" + #include "hal.h" + #include "hal_tx.h" + + static const struct hal_srng_config hw_srng_config_template[] = { + /* TODO: max_rings can populated by querying HW capabilities */ + [HAL_REO_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_REO2SW1, + .max_rings = 8, + .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_EXCEPTION] = { + /* Designating REO2SW0 ring as exception ring. + * Any of theREO2SW rings can be used as exception ring. + */ + .start_ring_id = HAL_SRNG_RING_ID_REO2SW0, + .max_rings = 1, + .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_REINJECT] = { + .start_ring_id = HAL_SRNG_RING_ID_SW2REO, + .max_rings = 4, + .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_CMD] = { + .start_ring_id = HAL_SRNG_RING_ID_REO_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_64_hdr) + + sizeof(struct hal_reo_get_queue_stats)) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_64_hdr) + + sizeof(struct hal_reo_get_queue_stats_status)) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE, + }, + [HAL_TCL_DATA] = { + .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1, + .max_rings = 6, + .entry_size = sizeof(struct hal_tcl_data_cmd) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE, + }, + [HAL_TCL_CMD] = { + .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD, + .max_rings = 1, + .entry_size = sizeof(struct hal_tcl_gse_cmd) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE, + }, + [HAL_TCL_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_hdr) + + sizeof(struct hal_tcl_status_ring)) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE, + }, + [HAL_CE_SRC] = { + .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC, + .max_rings = 16, + .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE, + }, + [HAL_CE_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_CE0_DST, + .max_rings = 16, + .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE, + }, + [HAL_CE_DST_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS, + .max_rings = 16, + .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE, + }, + [HAL_WBM_IDLE_LINK] = { + .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_link_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE, + }, + [HAL_SW2WBM_RELEASE] = { + .start_ring_id = HAL_SRNG_RING_ID_WBM_SW0_RELEASE, + .max_rings = 2, + .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE, + }, + [HAL_WBM2SW_RELEASE] = { + .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE, + .max_rings = 8, + .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE, + }, + [HAL_RXDMA_BUF] = { + .start_ring_id = HAL_SRNG_SW2RXDMA_BUF0, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_DMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0, + .max_rings = 0, + .entry_size = 0, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_MONITOR_BUF] = { + .start_ring_id = HAL_SRNG_SW2RXMON_BUF0, + .max_rings = 1, + .entry_size = sizeof(struct hal_mon_buf_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_MONITOR_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_MONITOR_DESC] = { 0, }, + [HAL_RXDMA_DIR_BUF] = { + .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF, + .max_rings = 2, + .entry_size = 8 >> 2, /* TODO: Define the struct */ + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_PPE2TCL] = { + .start_ring_id = HAL_SRNG_RING_ID_PPE2TCL1, + .max_rings = 1, + .entry_size = sizeof(struct hal_tcl_entrance_from_ppe_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE, + }, + [HAL_PPE_RELEASE] = { + .start_ring_id = HAL_SRNG_RING_ID_WBM_PPE_RELEASE, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE, + }, + [HAL_TX_MONITOR_BUF] = { + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0, + .max_rings = 1, + .entry_size = sizeof(struct hal_mon_buf_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_MONITOR_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0, + .max_rings = 1, + .entry_size = sizeof(struct hal_mon_dest_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_TX_MONITOR_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0, + .max_rings = 1, + .entry_size = sizeof(struct hal_mon_dest_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + } + }; + + const struct ath12k_hw_regs qcn9274_v1_regs = { + /* SW2TCL(x) R0 ring configuration address */ + .tcl1_ring_id = 0x00000908, + .tcl1_ring_misc = 0x00000910, + .tcl1_ring_tp_addr_lsb = 0x0000091c, + .tcl1_ring_tp_addr_msb = 0x00000920, + .tcl1_ring_consumer_int_setup_ix0 = 0x00000930, + .tcl1_ring_consumer_int_setup_ix1 = 0x00000934, + .tcl1_ring_msi1_base_lsb = 0x00000948, + .tcl1_ring_msi1_base_msb = 0x0000094c, + .tcl1_ring_msi1_data = 0x00000950, + .tcl_ring_base_lsb = 0x00000b58, + .tcl1_ring_base_lsb = 0x00000900, + .tcl1_ring_base_msb = 0x00000904, + .tcl2_ring_base_lsb = 0x00000978, + + /* TCL STATUS ring address */ + .tcl_status_ring_base_lsb = 0x00000d38, + + .wbm_idle_ring_base_lsb = 0x00000d0c, + .wbm_idle_ring_misc_addr = 0x00000d1c, + .wbm_r0_idle_list_cntl_addr = 0x00000210, + .wbm_r0_idle_list_size_addr = 0x00000214, + .wbm_scattered_ring_base_lsb = 0x00000220, + .wbm_scattered_ring_base_msb = 0x00000224, + .wbm_scattered_desc_head_info_ix0 = 0x00000230, + .wbm_scattered_desc_head_info_ix1 = 0x00000234, + .wbm_scattered_desc_tail_info_ix0 = 0x00000240, + .wbm_scattered_desc_tail_info_ix1 = 0x00000244, + .wbm_scattered_desc_ptr_hp_addr = 0x0000024c, + + .wbm_sw_release_ring_base_lsb = 0x0000034c, + .wbm_sw1_release_ring_base_lsb = 0x000003c4, + .wbm0_release_ring_base_lsb = 0x00000dd8, + .wbm1_release_ring_base_lsb = 0x00000e50, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8, + .pcie_pcs_osc_dtct_config_base = 0x01e0d45c, + + /* PPE release ring address */ + .ppe_rel_ring_base = 0x0000043c, + + /* REO DEST ring address */ + .reo2_ring_base = 0x0000055c, + .reo1_misc_ctrl_addr = 0x00000b7c, + .reo1_sw_cookie_cfg0 = 0x00000050, + .reo1_sw_cookie_cfg1 = 0x00000054, + .reo1_qdesc_lut_base0 = 0x00000058, + .reo1_qdesc_lut_base1 = 0x0000005c, + .reo1_ring_base_lsb = 0x000004e4, + .reo1_ring_base_msb = 0x000004e8, + .reo1_ring_id = 0x000004ec, + .reo1_ring_misc = 0x000004f4, + .reo1_ring_hp_addr_lsb = 0x000004f8, + .reo1_ring_hp_addr_msb = 0x000004fc, + .reo1_ring_producer_int_setup = 0x00000508, + .reo1_ring_msi1_base_lsb = 0x0000052C, + .reo1_ring_msi1_base_msb = 0x00000530, + .reo1_ring_msi1_data = 0x00000534, + .reo1_aging_thres_ix0 = 0x00000b08, + .reo1_aging_thres_ix1 = 0x00000b0c, + .reo1_aging_thres_ix2 = 0x00000b10, + .reo1_aging_thres_ix3 = 0x00000b14, + + /* REO Exception ring address */ + .reo2_sw0_ring_base = 0x000008a4, + + /* REO Reinject ring address */ + .sw2reo_ring_base = 0x00000304, + .sw2reo1_ring_base = 0x0000037c, + + /* REO cmd ring address */ + .reo_cmd_ring_base = 0x0000028c, + + /* REO status ring address */ + .reo_status_ring_base = 0x00000a84, + + /* CE base address */ + .umac_ce0_src_reg_base = 0x01b80000, + .umac_ce0_dest_reg_base = 0x01b81000, + .umac_ce1_src_reg_base = 0x01b82000, + .umac_ce1_dest_reg_base = 0x01b83000, + + .gcc_gcc_pcie_hot_rst = 0x1e38338, + }; + + const struct ath12k_hw_regs qcn9274_v2_regs = { + /* SW2TCL(x) R0 ring configuration address */ + .tcl1_ring_id = 0x00000908, + .tcl1_ring_misc = 0x00000910, + .tcl1_ring_tp_addr_lsb = 0x0000091c, + .tcl1_ring_tp_addr_msb = 0x00000920, + .tcl1_ring_consumer_int_setup_ix0 = 0x00000930, + .tcl1_ring_consumer_int_setup_ix1 = 0x00000934, + .tcl1_ring_msi1_base_lsb = 0x00000948, + .tcl1_ring_msi1_base_msb = 0x0000094c, + .tcl1_ring_msi1_data = 0x00000950, + .tcl_ring_base_lsb = 0x00000b58, + .tcl1_ring_base_lsb = 0x00000900, + .tcl1_ring_base_msb = 0x00000904, + .tcl2_ring_base_lsb = 0x00000978, + + /* TCL STATUS ring address */ + .tcl_status_ring_base_lsb = 0x00000d38, + + /* WBM idle link ring address */ + .wbm_idle_ring_base_lsb = 0x00000d3c, + .wbm_idle_ring_misc_addr = 0x00000d4c, + .wbm_r0_idle_list_cntl_addr = 0x00000240, + .wbm_r0_idle_list_size_addr = 0x00000244, + .wbm_scattered_ring_base_lsb = 0x00000250, + .wbm_scattered_ring_base_msb = 0x00000254, + .wbm_scattered_desc_head_info_ix0 = 0x00000260, + .wbm_scattered_desc_head_info_ix1 = 0x00000264, + .wbm_scattered_desc_tail_info_ix0 = 0x00000270, + .wbm_scattered_desc_tail_info_ix1 = 0x00000274, + .wbm_scattered_desc_ptr_hp_addr = 0x0000027c, + + /* SW2WBM release ring address */ + .wbm_sw_release_ring_base_lsb = 0x0000037c, + .wbm_sw1_release_ring_base_lsb = 0x000003f4, + + /* WBM2SW release ring address */ + .wbm0_release_ring_base_lsb = 0x00000e08, + .wbm1_release_ring_base_lsb = 0x00000e80, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8, + .pcie_pcs_osc_dtct_config_base = 0x01e0d45c, + + /* PPE release ring address */ + .ppe_rel_ring_base = 0x0000046c, + + /* REO DEST ring address */ + .reo2_ring_base = 0x00000578, + .reo1_misc_ctrl_addr = 0x00000b9c, + .reo1_sw_cookie_cfg0 = 0x0000006c, + .reo1_sw_cookie_cfg1 = 0x00000070, + .reo1_qdesc_lut_base0 = 0x00000074, + .reo1_qdesc_lut_base1 = 0x00000078, + .reo1_qdesc_addr = 0x0000007c, + .reo1_qdesc_max_peerid = 0x00000088, + .reo1_ring_base_lsb = 0x00000500, + .reo1_ring_base_msb = 0x00000504, + .reo1_ring_id = 0x00000508, + .reo1_ring_misc = 0x00000510, + .reo1_ring_hp_addr_lsb = 0x00000514, + .reo1_ring_hp_addr_msb = 0x00000518, + .reo1_ring_producer_int_setup = 0x00000524, + .reo1_ring_msi1_base_lsb = 0x00000548, + .reo1_ring_msi1_base_msb = 0x0000054C, + .reo1_ring_msi1_data = 0x00000550, + .reo1_aging_thres_ix0 = 0x00000B28, + .reo1_aging_thres_ix1 = 0x00000B2C, + .reo1_aging_thres_ix2 = 0x00000B30, + .reo1_aging_thres_ix3 = 0x00000B34, + + /* REO Exception ring address */ + .reo2_sw0_ring_base = 0x000008c0, + + /* REO Reinject ring address */ + .sw2reo_ring_base = 0x00000320, + .sw2reo1_ring_base = 0x00000398, + + /* REO cmd ring address */ + .reo_cmd_ring_base = 0x000002A8, + + /* REO status ring address */ + .reo_status_ring_base = 0x00000aa0, + + /* CE base address */ + .umac_ce0_src_reg_base = 0x01b80000, + .umac_ce0_dest_reg_base = 0x01b81000, + .umac_ce1_src_reg_base = 0x01b82000, + .umac_ce1_dest_reg_base = 0x01b83000, + + .gcc_gcc_pcie_hot_rst = 0x1e38338, + }; + + const struct ath12k_hw_regs ipq5332_regs = { + /* SW2TCL(x) R0 ring configuration address */ + .tcl1_ring_id = 0x00000918, + .tcl1_ring_misc = 0x00000920, + .tcl1_ring_tp_addr_lsb = 0x0000092c, + .tcl1_ring_tp_addr_msb = 0x00000930, + .tcl1_ring_consumer_int_setup_ix0 = 0x00000940, + .tcl1_ring_consumer_int_setup_ix1 = 0x00000944, + .tcl1_ring_msi1_base_lsb = 0x00000958, + .tcl1_ring_msi1_base_msb = 0x0000095c, + .tcl1_ring_base_lsb = 0x00000910, + .tcl1_ring_base_msb = 0x00000914, + .tcl1_ring_msi1_data = 0x00000960, + .tcl2_ring_base_lsb = 0x00000988, + .tcl_ring_base_lsb = 0x00000b68, + + /* TCL STATUS ring address */ + .tcl_status_ring_base_lsb = 0x00000d48, + + /* REO DEST ring address */ + .reo2_ring_base = 0x00000578, + .reo1_misc_ctrl_addr = 0x00000b9c, + .reo1_sw_cookie_cfg0 = 0x0000006c, + .reo1_sw_cookie_cfg1 = 0x00000070, + .reo1_qdesc_lut_base0 = 0x00000074, + .reo1_qdesc_lut_base1 = 0x00000078, + .reo1_ring_base_lsb = 0x00000500, + .reo1_ring_base_msb = 0x00000504, + .reo1_ring_id = 0x00000508, + .reo1_ring_misc = 0x00000510, + .reo1_ring_hp_addr_lsb = 0x00000514, + .reo1_ring_hp_addr_msb = 0x00000518, + .reo1_ring_producer_int_setup = 0x00000524, + .reo1_ring_msi1_base_lsb = 0x00000548, + .reo1_ring_msi1_base_msb = 0x0000054C, + .reo1_ring_msi1_data = 0x00000550, + .reo1_aging_thres_ix0 = 0x00000B28, + .reo1_aging_thres_ix1 = 0x00000B2C, + .reo1_aging_thres_ix2 = 0x00000B30, + .reo1_aging_thres_ix3 = 0x00000B34, + + /* REO Exception ring address */ + .reo2_sw0_ring_base = 0x000008c0, + + /* REO Reinject ring address */ + .sw2reo_ring_base = 0x00000320, + .sw2reo1_ring_base = 0x00000398, + + /* REO cmd ring address */ + .reo_cmd_ring_base = 0x000002A8, + + /* REO status ring address */ + .reo_status_ring_base = 0x00000aa0, + + /* WBM idle link ring address */ + .wbm_idle_ring_base_lsb = 0x00000d3c, + .wbm_idle_ring_misc_addr = 0x00000d4c, + .wbm_r0_idle_list_cntl_addr = 0x00000240, + .wbm_r0_idle_list_size_addr = 0x00000244, + .wbm_scattered_ring_base_lsb = 0x00000250, + .wbm_scattered_ring_base_msb = 0x00000254, + .wbm_scattered_desc_head_info_ix0 = 0x00000260, + .wbm_scattered_desc_head_info_ix1 = 0x00000264, + .wbm_scattered_desc_tail_info_ix0 = 0x00000270, + .wbm_scattered_desc_tail_info_ix1 = 0x00000274, + .wbm_scattered_desc_ptr_hp_addr = 0x0000027c, + + /* SW2WBM release ring address */ + .wbm_sw_release_ring_base_lsb = 0x0000037c, + + /* WBM2SW release ring address */ + .wbm0_release_ring_base_lsb = 0x00000e08, + .wbm1_release_ring_base_lsb = 0x00000e80, + + /* PPE release ring address */ + .ppe_rel_ring_base = 0x0000046c, + + /* CE address */ + .umac_ce0_src_reg_base = 0x00740000 - + HAL_IPQ5332_CE_WFSS_REG_BASE, + .umac_ce0_dest_reg_base = 0x00741000 - + HAL_IPQ5332_CE_WFSS_REG_BASE, + .umac_ce1_src_reg_base = 0x00742000 - + HAL_IPQ5332_CE_WFSS_REG_BASE, + .umac_ce1_dest_reg_base = 0x00743000 - + HAL_IPQ5332_CE_WFSS_REG_BASE, + }; + + static inline + bool ath12k_hal_rx_desc_get_first_msdu_qcn9274(struct hal_rx_desc *desc) + { + return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, + RX_MSDU_END_INFO5_FIRST_MSDU); + } + + static inline + bool ath12k_hal_rx_desc_get_last_msdu_qcn9274(struct hal_rx_desc *desc) + { + return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, + RX_MSDU_END_INFO5_LAST_MSDU); + } + + u8 ath12k_hal_rx_desc_get_l3_pad_bytes_qcn9274(struct hal_rx_desc *desc) + { + return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, + RX_MSDU_END_INFO5_L3_HDR_PADDING); + } + + static inline + bool ath12k_hal_rx_desc_encrypt_valid_qcn9274(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, + RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID); + } + + static inline + u32 ath12k_hal_rx_desc_get_encrypt_type_qcn9274(struct hal_rx_desc *desc) + { + if (!ath12k_hal_rx_desc_encrypt_valid_qcn9274(desc)) + return HAL_ENCRYPT_TYPE_OPEN; + + return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info2, + RX_MPDU_START_INFO2_ENC_TYPE); + } + + static inline + u8 ath12k_hal_rx_desc_get_decap_type_qcn9274(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11, + RX_MSDU_END_INFO11_DECAP_FORMAT); + } + + static inline + u8 ath12k_hal_rx_desc_get_mesh_ctl_qcn9274(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11, + RX_MSDU_END_INFO11_MESH_CTRL_PRESENT); + } + + static inline + bool ath12k_hal_rx_desc_get_mpdu_seq_ctl_vld_qcn9274(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID); + } + + static inline + bool ath12k_hal_rx_desc_get_mpdu_fc_valid_qcn9274(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_FCTRL_VALID); + } + + static inline + u16 ath12k_hal_rx_desc_get_mpdu_start_seq_no_qcn9274(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_SEQ_NUM); + } + + static inline + u16 ath12k_hal_rx_desc_get_msdu_len_qcn9274(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info10, + RX_MSDU_END_INFO10_MSDU_LENGTH); + } + + static inline + u8 ath12k_hal_rx_desc_get_msdu_sgi_qcn9274(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, + RX_MSDU_END_INFO12_SGI); + } + + static inline + u8 ath12k_hal_rx_desc_get_msdu_rate_mcs_qcn9274(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, + RX_MSDU_END_INFO12_RATE_MCS); + } + + static inline + u8 ath12k_hal_rx_desc_get_msdu_rx_bw_qcn9274(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, + RX_MSDU_END_INFO12_RECV_BW); + } + + static inline + u32 ath12k_hal_rx_desc_get_msdu_freq_qcn9274(struct hal_rx_desc *desc) + { + return __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.phy_meta_data); + } + + static inline + u8 ath12k_hal_rx_desc_get_msdu_pkt_type_qcn9274(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, + RX_MSDU_END_INFO12_PKT_TYPE); + } + + static inline + u8 ath12k_hal_rx_desc_get_msdu_nss_qcn9274(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, + RX_MSDU_END_INFO12_MIMO_SS_BITMAP); + } + + static inline + u8 ath12k_hal_rx_desc_get_mpdu_tid_qcn9274(struct hal_rx_desc *desc) + { + return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, + RX_MSDU_END_INFO5_TID); + } + + static inline + u16 ath12k_hal_rx_desc_get_mpdu_peer_id_qcn9274(struct hal_rx_desc *desc) + { + return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.sw_peer_id); + } + + void ath12k_hal_rx_desc_copy_end_tlv_qcn9274(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc) + { + fdesc->u.qcn9274_compact.msdu_end = ldesc->u.qcn9274_compact.msdu_end; + } + + u32 ath12k_hal_rx_desc_get_mpdu_ppdu_id_qcn9274(struct hal_rx_desc *desc) + { + return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.phy_ppdu_id); + } + + void ath12k_hal_rx_desc_set_msdu_len_qcn9274(struct hal_rx_desc *desc, u16 len) + { + u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info10); + + info = u32_replace_bits(info, len, RX_MSDU_END_INFO10_MSDU_LENGTH); + desc->u.qcn9274_compact.msdu_end.info10 = __cpu_to_le32(info); + } + + u8 *ath12k_hal_rx_desc_get_msdu_payload_qcn9274(struct hal_rx_desc *desc) + { + return &desc->u.qcn9274_compact.msdu_payload[0]; + } + + u32 ath12k_hal_rx_desc_get_mpdu_start_offset_qcn9274(void) + { + return offsetof(struct hal_rx_desc_qcn9274_compact, mpdu_start); + } + + u32 ath12k_hal_rx_desc_get_msdu_end_offset_qcn9274(void) + { + return offsetof(struct hal_rx_desc_qcn9274_compact, msdu_end); + } + + static inline + bool ath12k_hal_rx_desc_mac_addr2_valid_qcn9274(struct hal_rx_desc *desc) + { + return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) & + RX_MPDU_START_INFO4_MAC_ADDR2_VALID; + } + + static inline + u8 *ath12k_hal_rx_desc_mpdu_start_addr2_qcn9274(struct hal_rx_desc *desc) + { + return desc->u.qcn9274_compact.mpdu_start.addr2; + } + + static inline + bool ath12k_hal_rx_desc_is_da_mcbc_qcn9274(struct hal_rx_desc *desc) + { + return __le16_to_cpu(desc->u.qcn9274_compact.msdu_end.info5) & + RX_MSDU_END_INFO5_DA_IS_MCBC; + } + + static inline + bool ath12k_hal_rx_h_msdu_done_qcn9274(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14, + RX_MSDU_END_INFO14_MSDU_DONE); + } + + static inline + bool ath12k_hal_rx_h_l4_cksum_fail_qcn9274(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13, + RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL); + } + + static inline + bool ath12k_hal_rx_h_ip_cksum_fail_qcn9274(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13, + RX_MSDU_END_INFO13_IP_CKSUM_FAIL); + } + + static inline + bool ath12k_hal_rx_h_is_decrypted_qcn9274(struct hal_rx_desc *desc) + { + return (le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14, + RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) == + RX_DESC_DECRYPT_STATUS_CODE_OK); + } + + u32 ath12k_hal_get_rx_desc_size_qcn9274(void) + { + return sizeof(struct hal_rx_desc_qcn9274_compact); + } + + u8 ath12k_hal_rx_desc_get_msdu_src_link_qcn9274(struct hal_rx_desc *desc) + { + return le64_get_bits(desc->u.qcn9274_compact.msdu_end.msdu_end_tag, + RX_MSDU_END_64_TLV_SRC_LINK_ID); + } + + u16 ath12k_hal_rx_mpdu_start_wmask_get_qcn9274(void) + { + return QCN9274_MPDU_START_WMASK; + } + + u32 ath12k_hal_rx_msdu_end_wmask_get_qcn9274(void) + { + return QCN9274_MSDU_END_WMASK; + } + + static u32 ath12k_hal_rx_h_mpdu_err_qcn9274(struct hal_rx_desc *desc) + { + u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info13); + u32 errmap = 0; + + if (info & RX_MSDU_END_INFO13_FCS_ERR) + errmap |= HAL_RX_MPDU_ERR_FCS; + + if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) + errmap |= HAL_RX_MPDU_ERR_DECRYPT; + + if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) + errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; + + if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) + errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; + + if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) + errmap |= HAL_RX_MPDU_ERR_OVERFLOW; + + if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) + errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; + + if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) + errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; + + return errmap; + } + + void ath12k_hal_rx_desc_get_crypto_hdr_qcn9274(struct hal_rx_desc *desc, + u8 *crypto_hdr, + enum hal_encrypt_type enctype) + { + unsigned int key_id; + + switch (enctype) { + case HAL_ENCRYPT_TYPE_OPEN: + return; + case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: + case HAL_ENCRYPT_TYPE_TKIP_MIC: + crypto_hdr[0] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]); + crypto_hdr[1] = 0; + crypto_hdr[2] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]); + break; + case HAL_ENCRYPT_TYPE_CCMP_128: + case HAL_ENCRYPT_TYPE_CCMP_256: + case HAL_ENCRYPT_TYPE_GCMP_128: + case HAL_ENCRYPT_TYPE_AES_GCMP_256: + crypto_hdr[0] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]); + crypto_hdr[1] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]); + crypto_hdr[2] = 0; + break; + case HAL_ENCRYPT_TYPE_WEP_40: + case HAL_ENCRYPT_TYPE_WEP_104: + case HAL_ENCRYPT_TYPE_WEP_128: + case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: + case HAL_ENCRYPT_TYPE_WAPI: + return; + } + key_id = le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info5, + RX_MPDU_START_INFO5_KEY_ID); + crypto_hdr[3] = 0x20 | (key_id << 6); + crypto_hdr[4] = + HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274_compact.mpdu_start.pn[0]); + crypto_hdr[5] = + HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274_compact.mpdu_start.pn[0]); + crypto_hdr[6] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[1]); + crypto_hdr[7] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]); + } + + void ath12k_hal_rx_desc_get_dot11_hdr_qcn9274(struct hal_rx_desc *desc, + struct ieee80211_hdr *hdr) + { + hdr->frame_control = desc->u.qcn9274_compact.mpdu_start.frame_ctrl; + hdr->duration_id = desc->u.qcn9274_compact.mpdu_start.duration; + ether_addr_copy(hdr->addr1, desc->u.qcn9274_compact.mpdu_start.addr1); + ether_addr_copy(hdr->addr2, desc->u.qcn9274_compact.mpdu_start.addr2); + ether_addr_copy(hdr->addr3, desc->u.qcn9274_compact.mpdu_start.addr3); + if (__le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) & + RX_MPDU_START_INFO4_MAC_ADDR4_VALID) { + ether_addr_copy(hdr->addr4, desc->u.qcn9274_compact.mpdu_start.addr4); + } + hdr->seq_ctrl = desc->u.qcn9274_compact.mpdu_start.seq_ctrl; + } + + void ath12k_hal_extract_rx_desc_data_qcn9274(struct hal_rx_desc_data *rx_desc_data, + struct hal_rx_desc *rx_desc, + struct hal_rx_desc *ldesc) + { + rx_desc_data->is_first_msdu = ath12k_hal_rx_desc_get_first_msdu_qcn9274(ldesc); + rx_desc_data->is_last_msdu = ath12k_hal_rx_desc_get_last_msdu_qcn9274(ldesc); + rx_desc_data->l3_pad_bytes = ath12k_hal_rx_desc_get_l3_pad_bytes_qcn9274(ldesc); + rx_desc_data->enctype = ath12k_hal_rx_desc_get_encrypt_type_qcn9274(rx_desc); + rx_desc_data->decap_type = ath12k_hal_rx_desc_get_decap_type_qcn9274(rx_desc); + rx_desc_data->mesh_ctrl_present = + ath12k_hal_rx_desc_get_mesh_ctl_qcn9274(rx_desc); + rx_desc_data->seq_ctl_valid = + ath12k_hal_rx_desc_get_mpdu_seq_ctl_vld_qcn9274(rx_desc); + rx_desc_data->fc_valid = ath12k_hal_rx_desc_get_mpdu_fc_valid_qcn9274(rx_desc); + rx_desc_data->seq_no = ath12k_hal_rx_desc_get_mpdu_start_seq_no_qcn9274(rx_desc); + rx_desc_data->msdu_len = ath12k_hal_rx_desc_get_msdu_len_qcn9274(ldesc); + rx_desc_data->sgi = ath12k_hal_rx_desc_get_msdu_sgi_qcn9274(rx_desc); + rx_desc_data->rate_mcs = ath12k_hal_rx_desc_get_msdu_rate_mcs_qcn9274(rx_desc); + rx_desc_data->bw = ath12k_hal_rx_desc_get_msdu_rx_bw_qcn9274(rx_desc); + rx_desc_data->phy_meta_data = ath12k_hal_rx_desc_get_msdu_freq_qcn9274(rx_desc); + rx_desc_data->pkt_type = ath12k_hal_rx_desc_get_msdu_pkt_type_qcn9274(rx_desc); + rx_desc_data->nss = hweight8(ath12k_hal_rx_desc_get_msdu_nss_qcn9274(rx_desc)); + rx_desc_data->tid = ath12k_hal_rx_desc_get_mpdu_tid_qcn9274(rx_desc); + rx_desc_data->peer_id = ath12k_hal_rx_desc_get_mpdu_peer_id_qcn9274(rx_desc); + rx_desc_data->addr2_present = ath12k_hal_rx_desc_mac_addr2_valid_qcn9274(rx_desc); + rx_desc_data->addr2 = ath12k_hal_rx_desc_mpdu_start_addr2_qcn9274(rx_desc); + rx_desc_data->is_mcbc = ath12k_hal_rx_desc_is_da_mcbc_qcn9274(rx_desc); + rx_desc_data->msdu_done = ath12k_hal_rx_h_msdu_done_qcn9274(ldesc); + rx_desc_data->l4_csum_fail = ath12k_hal_rx_h_l4_cksum_fail_qcn9274(rx_desc); + rx_desc_data->ip_csum_fail = ath12k_hal_rx_h_ip_cksum_fail_qcn9274(rx_desc); + rx_desc_data->is_decrypted = ath12k_hal_rx_h_is_decrypted_qcn9274(rx_desc); + rx_desc_data->err_bitmap = ath12k_hal_rx_h_mpdu_err_qcn9274(rx_desc); + } + + const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = { + .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM, + .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN, + }; + + const struct ath12k_hw_hal_params ath12k_hw_hal_params_ipq5332 = { + .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM, + .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN, + }; + + static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_hal *hal) + { + struct hal_srng_config *s; + + hal->srng_config = kmemdup(hw_srng_config_template, + sizeof(hw_srng_config_template), + GFP_KERNEL); + if (!hal->srng_config) + return -ENOMEM; + + s = &hal->srng_config[HAL_REO_DST]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP; + s->reg_size[0] = HAL_REO2_RING_BASE_LSB(hal) - HAL_REO1_RING_BASE_LSB(hal); + s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP; + + s = &hal->srng_config[HAL_REO_EXCEPTION]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP; + + s = &hal->srng_config[HAL_REO_REINJECT]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP; + s->reg_size[0] = HAL_SW2REO1_RING_BASE_LSB(hal) - HAL_SW2REO_RING_BASE_LSB(hal); + s->reg_size[1] = HAL_SW2REO1_RING_HP - HAL_SW2REO_RING_HP; + + s = &hal->srng_config[HAL_REO_CMD]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP; + + s = &hal->srng_config[HAL_REO_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP; + + s = &hal->srng_config[HAL_TCL_DATA]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP; + s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(hal) - HAL_TCL1_RING_BASE_LSB(hal); + s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP; + + s = &hal->srng_config[HAL_TCL_CMD]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP; + + s = &hal->srng_config[HAL_TCL_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; + + s = &hal->srng_config[HAL_CE_SRC]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal); + + s = &hal->srng_config[HAL_CE_DST]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal); + + s = &hal->srng_config[HAL_CE_DST_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + + HAL_CE_DST_STATUS_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_STATUS_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal); + + s = &hal->srng_config[HAL_WBM_IDLE_LINK]; + s->reg_start[0] = + HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP; + + s = &hal->srng_config[HAL_SW2WBM_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SW_RELEASE_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP; + s->reg_size[0] = HAL_WBM_SW1_RELEASE_RING_BASE_LSB(hal) - + HAL_WBM_SW_RELEASE_RING_BASE_LSB(hal); + s->reg_size[1] = HAL_WBM_SW1_RELEASE_RING_HP - HAL_WBM_SW_RELEASE_RING_HP; + + s = &hal->srng_config[HAL_WBM2SW_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP; + s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(hal) - + HAL_WBM0_RELEASE_RING_BASE_LSB(hal); + s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; + + /* Some LMAC rings are not accessed from the host: + * RXDMA_BUG, RXDMA_DST, RXDMA_MONITOR_BUF, RXDMA_MONITOR_STATUS, + * RXDMA_MONITOR_DST, RXDMA_MONITOR_DESC, RXDMA_DIR_BUF_SRC, + * RXDMA_RX_MONITOR_BUF, TX_MONITOR_BUF, TX_MONITOR_DST, SW2RXDMA + */ + s = &hal->srng_config[HAL_PPE2TCL]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_HP; + + s = &hal->srng_config[HAL_PPE_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_PPE_RELEASE_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_PPE_RELEASE_RING_HP; + + return 0; + } + + const struct ath12k_hal_tcl_to_wbm_rbm_map + ath12k_hal_tcl_to_wbm_rbm_map_qcn9274[DP_TCL_NUM_RING_MAX] = { + { + .wbm_ring_num = 0, + .rbm_id = HAL_RX_BUF_RBM_SW0_BM, + }, + { + .wbm_ring_num = 1, + .rbm_id = HAL_RX_BUF_RBM_SW1_BM, + }, + { + .wbm_ring_num = 2, + .rbm_id = HAL_RX_BUF_RBM_SW2_BM, + }, + { + .wbm_ring_num = 4, + .rbm_id = HAL_RX_BUF_RBM_SW4_BM, + }, + }; + + const struct hal_ops hal_qcn9274_ops = { + .create_srng_config = ath12k_hal_srng_create_config_qcn9274, + .rx_desc_set_msdu_len = ath12k_hal_rx_desc_set_msdu_len_qcn9274, + .rx_desc_get_dot11_hdr = ath12k_hal_rx_desc_get_dot11_hdr_qcn9274, + .rx_desc_get_crypto_header = ath12k_hal_rx_desc_get_crypto_hdr_qcn9274, + .rx_desc_copy_end_tlv = ath12k_hal_rx_desc_copy_end_tlv_qcn9274, + .rx_desc_get_msdu_src_link_id = ath12k_hal_rx_desc_get_msdu_src_link_qcn9274, + .extract_rx_desc_data = ath12k_hal_extract_rx_desc_data_qcn9274, + .rx_desc_get_l3_pad_bytes = ath12k_hal_rx_desc_get_l3_pad_bytes_qcn9274, + .rx_desc_get_mpdu_ppdu_id = ath12k_hal_rx_desc_get_mpdu_ppdu_id_qcn9274, + .rx_desc_get_msdu_payload = ath12k_hal_rx_desc_get_msdu_payload_qcn9274, + .ce_dst_setup = ath12k_wifi7_hal_ce_dst_setup, + .srng_src_hw_init = ath12k_wifi7_hal_srng_src_hw_init, + .srng_dst_hw_init = ath12k_wifi7_hal_srng_dst_hw_init, + .set_umac_srng_ptr_addr = ath12k_wifi7_hal_set_umac_srng_ptr_addr, + .srng_update_shadow_config = ath12k_wifi7_hal_srng_update_shadow_config, + .srng_get_ring_id = ath12k_wifi7_hal_srng_get_ring_id, + .ce_get_desc_size = ath12k_wifi7_hal_ce_get_desc_size, + .ce_src_set_desc = ath12k_wifi7_hal_ce_src_set_desc, + .ce_dst_set_desc = ath12k_wifi7_hal_ce_dst_set_desc, + .ce_dst_status_get_length = ath12k_wifi7_hal_ce_dst_status_get_length, + .set_link_desc_addr = ath12k_wifi7_hal_set_link_desc_addr, + .tx_set_dscp_tid_map = ath12k_wifi7_hal_tx_set_dscp_tid_map, + .tx_configure_bank_register = + ath12k_wifi7_hal_tx_configure_bank_register, + .reoq_lut_addr_read_enable = ath12k_wifi7_hal_reoq_lut_addr_read_enable, + .reoq_lut_set_max_peerid = ath12k_wifi7_hal_reoq_lut_set_max_peerid, + .write_reoq_lut_addr = ath12k_wifi7_hal_write_reoq_lut_addr, + .write_ml_reoq_lut_addr = ath12k_wifi7_hal_write_ml_reoq_lut_addr, + .setup_link_idle_list = ath12k_wifi7_hal_setup_link_idle_list, + .reo_init_cmd_ring = ath12k_wifi7_hal_reo_init_cmd_ring, + .reo_hw_setup = ath12k_wifi7_hal_reo_hw_setup, ++ .reo_shared_qaddr_cache_clear = ath12k_wifi7_hal_reo_shared_qaddr_cache_clear, + .rx_buf_addr_info_set = ath12k_wifi7_hal_rx_buf_addr_info_set, + .rx_buf_addr_info_get = ath12k_wifi7_hal_rx_buf_addr_info_get, + .cc_config = ath12k_wifi7_hal_cc_config, + .get_idle_link_rbm = ath12k_wifi7_hal_get_idle_link_rbm, + .rx_msdu_list_get = ath12k_wifi7_hal_rx_msdu_list_get, + .rx_reo_ent_buf_paddr_get = ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get, + }; diff --cc drivers/net/wireless/ath/ath12k/wifi7/hal_rx.c index c4443ca05cd65,9ae6f52249d9a..903fb52a03bfd --- a/drivers/net/wireless/ath/ath12k/wifi7/hal_rx.c +++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_rx.c @@@ -317,13 -320,14 +323,14 @@@ ath12k_wifi7_hal_rx_msdu_link_info_get( } } - int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab, - struct hal_reo_dest_ring *desc, - dma_addr_t *paddr, u32 *desc_bank) + int ath12k_wifi7_hal_desc_reo_parse_err(struct ath12k_dp *dp, + struct hal_reo_dest_ring *desc, + dma_addr_t *paddr, u32 *desc_bank) { + struct ath12k_base *ab = dp->ab; enum hal_reo_dest_ring_push_reason push_reason; enum hal_reo_dest_ring_error_code err_code; - u32 cookie, val; + u32 cookie; push_reason = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_PUSH_REASON); @@@ -338,7 -342,14 +345,8 @@@ return -EINVAL; } - ath12k_hal_rx_reo_ent_paddr_get(ab, &desc->buf_addr_info, paddr, &cookie); - val = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE); - if (val != HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) { - ath12k_warn(ab, "expected buffer type link_desc"); - return -EINVAL; - } - + ath12k_wifi7_hal_rx_reo_ent_paddr_get(&desc->buf_addr_info, paddr, + &cookie); *desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK); return 0; @@@ -929,13 -957,15 +954,15 @@@ void ath12k_wifi7_hal_reo_hw_setup(stru ring_hash_map); } --void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab) ++void ath12k_wifi7_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab) { u32 val; + struct ath12k_hal *hal = &ab->hal; + struct ath12k_dp *dp = ath12k_ab_to_dp(ab); - lockdep_assert_held(&ab->base_lock); + lockdep_assert_held(&dp->dp_lock); val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + - HAL_REO1_QDESC_ADDR(ab)); + HAL_REO1_QDESC_ADDR(hal)); val |= u32_encode_bits(1, HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY); ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + diff --cc drivers/net/wireless/ath/ath12k/wifi7/hal_rx.h index d1ad7747b82c4,926fbcd2d8b8e..8a0f4a781d8a2 --- a/drivers/net/wireless/ath/ath12k/wifi7/hal_rx.h +++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_rx.h @@@ -1044,128 -810,59 +812,60 @@@ enum hal_mon_reception_type #define HAL_RU_PER80(ru_per80, num_80mhz, ru_idx_per80mhz) \ (HAL_RU(ru_per80, num_80mhz, ru_idx_per80mhz)) - #define RU_INVALID 0 - #define RU_26 1 - #define RU_52 2 - #define RU_106 4 - #define RU_242 9 - #define RU_484 18 - #define RU_996 37 - #define RU_2X996 74 - #define RU_3X996 111 - #define RU_4X996 148 - #define RU_52_26 (RU_52 + RU_26) - #define RU_106_26 (RU_106 + RU_26) - #define RU_484_242 (RU_484 + RU_242) - #define RU_996_484 (RU_996 + RU_484) - #define RU_996_484_242 (RU_996 + RU_484_242) - #define RU_2X996_484 (RU_2X996 + RU_484) - #define RU_3X996_484 (RU_3X996 + RU_484) - - enum ath12k_eht_ru_size { - ATH12K_EHT_RU_26, - ATH12K_EHT_RU_52, - ATH12K_EHT_RU_106, - ATH12K_EHT_RU_242, - ATH12K_EHT_RU_484, - ATH12K_EHT_RU_996, - ATH12K_EHT_RU_996x2, - ATH12K_EHT_RU_996x4, - ATH12K_EHT_RU_52_26, - ATH12K_EHT_RU_106_26, - ATH12K_EHT_RU_484_242, - ATH12K_EHT_RU_996_484, - ATH12K_EHT_RU_996_484_242, - ATH12K_EHT_RU_996x2_484, - ATH12K_EHT_RU_996x3, - ATH12K_EHT_RU_996x3_484, - - /* Keep last */ - ATH12K_EHT_RU_INVALID, - }; - - #define HAL_RX_RU_ALLOC_TYPE_MAX ATH12K_EHT_RU_INVALID - - static inline - enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones) - { - enum nl80211_he_ru_alloc ret; - - switch (ru_tones) { - case RU_52: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_52; - break; - case RU_106: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_106; - break; - case RU_242: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_242; - break; - case RU_484: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_484; - break; - case RU_996: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_996; - break; - case RU_2X996: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; - break; - case RU_26: - fallthrough; - default: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; - break; - } - return ret; - } - - void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, - struct hal_tlv_64_hdr *tlv, - struct hal_reo_status *status); - void ath12k_hal_reo_flush_queue_status(struct ath12k_base *ab, - struct hal_tlv_64_hdr *tlv, - struct hal_reo_status *status); - void ath12k_hal_reo_flush_cache_status(struct ath12k_base *ab, - struct hal_tlv_64_hdr *tlv, - struct hal_reo_status *status); - void ath12k_hal_reo_unblk_cache_status(struct ath12k_base *ab, - struct hal_tlv_64_hdr *tlv, - struct hal_reo_status *status); - void ath12k_hal_reo_flush_timeout_list_status(struct ath12k_base *ab, - struct hal_tlv_64_hdr *tlv, - struct hal_reo_status *status); - void ath12k_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab, - struct hal_tlv_64_hdr *tlv, - struct hal_reo_status *status); - void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab, - struct hal_tlv_64_hdr *tlv, - struct hal_reo_status *status); - void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus, - u32 *msdu_cookies, - enum hal_rx_buf_return_buf_manager *rbm); - void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab, - struct hal_wbm_release_ring *desc, - struct ath12k_buffer_addr *buf_addr_info, - enum hal_wbm_rel_bm_act action); - void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo, - dma_addr_t paddr, u32 cookie, u8 manager); - void ath12k_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo, - dma_addr_t *paddr, - u32 *cookie, u8 *rbm); - int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab, - struct hal_reo_dest_ring *desc, - dma_addr_t *paddr, u32 *desc_bank); - int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc, - struct hal_rx_wbm_rel_info *rel_info); - void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab, - struct ath12k_buffer_addr *buff_addr, - dma_addr_t *paddr, u32 *cookie); - void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, u32 *sw_cookie, - struct ath12k_buffer_addr **pp_buf_addr, - u8 *rbm, u32 *msdu_cnt); - void ath12k_hal_rx_msdu_list_get(struct ath12k *ar, - struct hal_rx_msdu_link *link_desc, - struct hal_rx_msdu_list *msdu_list, - u16 *num_msdus); + void ath12k_wifi7_hal_reo_status_queue_stats(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); + void ath12k_wifi7_hal_reo_flush_queue_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); + void ath12k_wifi7_hal_reo_flush_cache_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); + void ath12k_wifi7_hal_reo_unblk_cache_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); + void ath12k_wifi7_hal_reo_flush_timeout_list_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); + void ath12k_wifi7_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); + void ath12k_wifi7_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab, + struct hal_tlv_64_hdr *tlv, + struct hal_reo_status *status); + void ath12k_wifi7_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus, + u32 *msdu_cookies, + enum hal_rx_buf_return_buf_manager *rbm); + void ath12k_wifi7_hal_rx_msdu_link_desc_set(struct ath12k_base *ab, + struct hal_wbm_release_ring *desc, + struct ath12k_buffer_addr *buf_addr_info, + enum hal_wbm_rel_bm_act action); + void ath12k_wifi7_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo, + dma_addr_t paddr, u32 cookie, u8 manager); + void ath12k_wifi7_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo, + dma_addr_t *paddr, + u32 *cookie, u8 *rbm); + int ath12k_wifi7_hal_desc_reo_parse_err(struct ath12k_dp *dp, + struct hal_reo_dest_ring *desc, + dma_addr_t *paddr, u32 *desc_bank); + int ath12k_wifi7_hal_wbm_desc_parse_err(struct ath12k_dp *dp, void *desc, + struct hal_rx_wbm_rel_info *rel_info); + void ath12k_wifi7_hal_rx_reo_ent_paddr_get(struct ath12k_buffer_addr *buff_addr, + dma_addr_t *paddr, u32 *cookie); + void ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, + u32 *sw_cookie, + struct ath12k_buffer_addr **pp_buf_addr, + u8 *rbm, u32 *msdu_cnt); + void ath12k_wifi7_hal_rx_msdu_list_get(struct ath12k *ar, + void *link_desc, + void *msdu_list_opaque, + u16 *num_msdus); + void ath12k_wifi7_hal_reo_init_cmd_ring(struct ath12k_base *ab, + struct hal_srng *srng); ++void ath12k_wifi7_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab); + void ath12k_wifi7_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map); + void ath12k_wifi7_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc, + int tid, u32 ba_window_size, + u32 start_seq, enum hal_pn_type type); #endif diff --cc drivers/net/wireless/ath/ath12k/wifi7/hal_wcn7850.c index 0000000000000,f243bc3ab4097..7108cc41536d1 mode 000000,100644..100644 --- a/drivers/net/wireless/ath/ath12k/wifi7/hal_wcn7850.c +++ b/drivers/net/wireless/ath/ath12k/wifi7/hal_wcn7850.c @@@ -1,0 -1,804 +1,805 @@@ + // SPDX-License-Identifier: BSD-3-Clause-Clear + /* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + + #include "hal_desc.h" + #include "hal_wcn7850.h" + #include "hw.h" + #include "hal.h" + #include "hal_tx.h" + + static const struct hal_srng_config hw_srng_config_template[] = { + /* TODO: max_rings can populated by querying HW capabilities */ + [HAL_REO_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_REO2SW1, + .max_rings = 8, + .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_EXCEPTION] = { + /* Designating REO2SW0 ring as exception ring. + * Any of theREO2SW rings can be used as exception ring. + */ + .start_ring_id = HAL_SRNG_RING_ID_REO2SW0, + .max_rings = 1, + .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_REINJECT] = { + .start_ring_id = HAL_SRNG_RING_ID_SW2REO, + .max_rings = 4, + .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_CMD] = { + .start_ring_id = HAL_SRNG_RING_ID_REO_CMD, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_64_hdr) + + sizeof(struct hal_reo_get_queue_stats)) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE, + }, + [HAL_REO_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_64_hdr) + + sizeof(struct hal_reo_get_queue_stats_status)) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE, + }, + [HAL_TCL_DATA] = { + .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1, + .max_rings = 6, + .entry_size = sizeof(struct hal_tcl_data_cmd) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE, + }, + [HAL_TCL_CMD] = { + .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD, + .max_rings = 1, + .entry_size = sizeof(struct hal_tcl_gse_cmd) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE, + }, + [HAL_TCL_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_hdr) + + sizeof(struct hal_tcl_status_ring)) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE, + }, + [HAL_CE_SRC] = { + .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC, + .max_rings = 16, + .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE, + }, + [HAL_CE_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_CE0_DST, + .max_rings = 16, + .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE, + }, + [HAL_CE_DST_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS, + .max_rings = 16, + .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE, + }, + [HAL_WBM_IDLE_LINK] = { + .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_link_desc) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE, + }, + [HAL_SW2WBM_RELEASE] = { + .start_ring_id = HAL_SRNG_RING_ID_WBM_SW0_RELEASE, + .max_rings = 2, + .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE, + }, + [HAL_WBM2SW_RELEASE] = { + .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE, + .max_rings = 8, + .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_UMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE, + }, + [HAL_RXDMA_BUF] = { + .start_ring_id = HAL_SRNG_SW2RXDMA_BUF0, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_DMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_DST] = { + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0, + .max_rings = 0, + .entry_size = 0, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_DST, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_MONITOR_BUF] = {}, + [HAL_RXDMA_MONITOR_STATUS] = { + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_RXDMA_MONITOR_DESC] = { 0, }, + [HAL_RXDMA_DIR_BUF] = { + .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF, + .max_rings = 2, + .entry_size = 8 >> 2, /* TODO: Define the struct */ + .mac_type = ATH12K_HAL_SRNG_PMAC, + .ring_dir = HAL_SRNG_DIR_SRC, + .max_size = HAL_RXDMA_RING_MAX_SIZE_BE, + }, + [HAL_PPE2TCL] = {}, + [HAL_PPE_RELEASE] = {}, + [HAL_TX_MONITOR_BUF] = {}, + [HAL_RXDMA_MONITOR_DST] = {}, + [HAL_TX_MONITOR_DST] = {} + }; + + const struct ath12k_hw_regs wcn7850_regs = { + /* SW2TCL(x) R0 ring configuration address */ + .tcl1_ring_id = 0x00000908, + .tcl1_ring_misc = 0x00000910, + .tcl1_ring_tp_addr_lsb = 0x0000091c, + .tcl1_ring_tp_addr_msb = 0x00000920, + .tcl1_ring_consumer_int_setup_ix0 = 0x00000930, + .tcl1_ring_consumer_int_setup_ix1 = 0x00000934, + .tcl1_ring_msi1_base_lsb = 0x00000948, + .tcl1_ring_msi1_base_msb = 0x0000094c, + .tcl1_ring_msi1_data = 0x00000950, + .tcl_ring_base_lsb = 0x00000b58, + .tcl1_ring_base_lsb = 0x00000900, + .tcl1_ring_base_msb = 0x00000904, + .tcl2_ring_base_lsb = 0x00000978, + + /* TCL STATUS ring address */ + .tcl_status_ring_base_lsb = 0x00000d38, + + .wbm_idle_ring_base_lsb = 0x00000d3c, + .wbm_idle_ring_misc_addr = 0x00000d4c, + .wbm_r0_idle_list_cntl_addr = 0x00000240, + .wbm_r0_idle_list_size_addr = 0x00000244, + .wbm_scattered_ring_base_lsb = 0x00000250, + .wbm_scattered_ring_base_msb = 0x00000254, + .wbm_scattered_desc_head_info_ix0 = 0x00000260, + .wbm_scattered_desc_head_info_ix1 = 0x00000264, + .wbm_scattered_desc_tail_info_ix0 = 0x00000270, + .wbm_scattered_desc_tail_info_ix1 = 0x00000274, + .wbm_scattered_desc_ptr_hp_addr = 0x00000027c, + + .wbm_sw_release_ring_base_lsb = 0x0000037c, + .wbm_sw1_release_ring_base_lsb = 0x00000284, + .wbm0_release_ring_base_lsb = 0x00000e08, + .wbm1_release_ring_base_lsb = 0x00000e80, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8, + .pcie_pcs_osc_dtct_config_base = 0x01e0f45c, + + /* PPE release ring address */ + .ppe_rel_ring_base = 0x0000043c, + + /* REO DEST ring address */ + .reo2_ring_base = 0x0000055c, + .reo1_misc_ctrl_addr = 0x00000b7c, + .reo1_sw_cookie_cfg0 = 0x00000050, + .reo1_sw_cookie_cfg1 = 0x00000054, + .reo1_qdesc_lut_base0 = 0x00000058, + .reo1_qdesc_lut_base1 = 0x0000005c, + .reo1_ring_base_lsb = 0x000004e4, + .reo1_ring_base_msb = 0x000004e8, + .reo1_ring_id = 0x000004ec, + .reo1_ring_misc = 0x000004f4, + .reo1_ring_hp_addr_lsb = 0x000004f8, + .reo1_ring_hp_addr_msb = 0x000004fc, + .reo1_ring_producer_int_setup = 0x00000508, + .reo1_ring_msi1_base_lsb = 0x0000052C, + .reo1_ring_msi1_base_msb = 0x00000530, + .reo1_ring_msi1_data = 0x00000534, + .reo1_aging_thres_ix0 = 0x00000b08, + .reo1_aging_thres_ix1 = 0x00000b0c, + .reo1_aging_thres_ix2 = 0x00000b10, + .reo1_aging_thres_ix3 = 0x00000b14, + + /* REO Exception ring address */ + .reo2_sw0_ring_base = 0x000008a4, + + /* REO Reinject ring address */ + .sw2reo_ring_base = 0x00000304, + .sw2reo1_ring_base = 0x0000037c, + + /* REO cmd ring address */ + .reo_cmd_ring_base = 0x0000028c, + + /* REO status ring address */ + .reo_status_ring_base = 0x00000a84, + + /* CE base address */ + .umac_ce0_src_reg_base = 0x01b80000, + .umac_ce0_dest_reg_base = 0x01b81000, + .umac_ce1_src_reg_base = 0x01b82000, + .umac_ce1_dest_reg_base = 0x01b83000, + + .gcc_gcc_pcie_hot_rst = 0x1e40304, + }; + + static inline + bool ath12k_hal_rx_desc_get_first_msdu_wcn7850(struct hal_rx_desc *desc) + { + return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5, + RX_MSDU_END_INFO5_FIRST_MSDU); + } + + static inline + bool ath12k_hal_rx_desc_get_last_msdu_wcn7850(struct hal_rx_desc *desc) + { + return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5, + RX_MSDU_END_INFO5_LAST_MSDU); + } + + u8 ath12k_hal_rx_desc_get_l3_pad_bytes_wcn7850(struct hal_rx_desc *desc) + { + return le16_get_bits(desc->u.wcn7850.msdu_end.info5, + RX_MSDU_END_INFO5_L3_HDR_PADDING); + } + + static inline + bool ath12k_hal_rx_desc_encrypt_valid_wcn7850(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4, + RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID); + } + + static inline + u32 ath12k_hal_rx_desc_get_encrypt_type_wcn7850(struct hal_rx_desc *desc) + { + if (!ath12k_hal_rx_desc_encrypt_valid_wcn7850(desc)) + return HAL_ENCRYPT_TYPE_OPEN; + + return le32_get_bits(desc->u.wcn7850.mpdu_start.info2, + RX_MPDU_START_INFO2_ENC_TYPE); + } + + static inline + u8 ath12k_hal_rx_desc_get_decap_type_wcn7850(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.wcn7850.msdu_end.info11, + RX_MSDU_END_INFO11_DECAP_FORMAT); + } + + static inline + u8 ath12k_hal_rx_desc_get_mesh_ctl_wcn7850(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.wcn7850.msdu_end.info11, + RX_MSDU_END_INFO11_MESH_CTRL_PRESENT); + } + + static inline + bool ath12k_hal_rx_desc_get_mpdu_seq_ctl_vld_wcn7850(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID); + } + + static inline + bool ath12k_hal_rx_desc_get_mpdu_fc_valid_wcn7850(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_FCTRL_VALID); + } + + static inline + u16 ath12k_hal_rx_desc_get_mpdu_start_seq_no_wcn7850(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.wcn7850.mpdu_start.info4, + RX_MPDU_START_INFO4_MPDU_SEQ_NUM); + } + + static inline + u16 ath12k_hal_rx_desc_get_msdu_len_wcn7850(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.wcn7850.msdu_end.info10, + RX_MSDU_END_INFO10_MSDU_LENGTH); + } + + static inline + u8 ath12k_hal_rx_desc_get_msdu_sgi_wcn7850(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.wcn7850.msdu_end.info12, + RX_MSDU_END_INFO12_SGI); + } + + static inline + u8 ath12k_hal_rx_desc_get_msdu_rate_mcs_wcn7850(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.wcn7850.msdu_end.info12, + RX_MSDU_END_INFO12_RATE_MCS); + } + + static inline + u8 ath12k_hal_rx_desc_get_msdu_rx_bw_wcn7850(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.wcn7850.msdu_end.info12, + RX_MSDU_END_INFO12_RECV_BW); + } + + static inline + u32 ath12k_hal_rx_desc_get_msdu_freq_wcn7850(struct hal_rx_desc *desc) + { + return __le32_to_cpu(desc->u.wcn7850.msdu_end.phy_meta_data); + } + + static inline + u8 ath12k_hal_rx_desc_get_msdu_pkt_type_wcn7850(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.wcn7850.msdu_end.info12, + RX_MSDU_END_INFO12_PKT_TYPE); + } + + static inline + u8 ath12k_hal_rx_desc_get_msdu_nss_wcn7850(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.wcn7850.msdu_end.info12, + RX_MSDU_END_INFO12_MIMO_SS_BITMAP); + } + + static inline + u8 ath12k_hal_rx_desc_get_mpdu_tid_wcn7850(struct hal_rx_desc *desc) + { + return le32_get_bits(desc->u.wcn7850.mpdu_start.info2, + RX_MPDU_START_INFO2_TID); + } + + static inline + u16 ath12k_hal_rx_desc_get_mpdu_peer_id_wcn7850(struct hal_rx_desc *desc) + { + return __le16_to_cpu(desc->u.wcn7850.mpdu_start.sw_peer_id); + } + + void ath12k_hal_rx_desc_copy_end_tlv_wcn7850(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc) + { + memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end, + sizeof(struct rx_msdu_end_qcn9274)); + } + + u32 ath12k_hal_rx_desc_get_mpdu_start_tag_wcn7850(struct hal_rx_desc *desc) + { + return le64_get_bits(desc->u.wcn7850.mpdu_start_tag, + HAL_TLV_HDR_TAG); + } + + u32 ath12k_hal_rx_desc_get_mpdu_ppdu_id_wcn7850(struct hal_rx_desc *desc) + { + return __le16_to_cpu(desc->u.wcn7850.mpdu_start.phy_ppdu_id); + } + + void ath12k_hal_rx_desc_set_msdu_len_wcn7850(struct hal_rx_desc *desc, u16 len) + { + u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info10); + + info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH; + info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH); + + desc->u.wcn7850.msdu_end.info10 = __cpu_to_le32(info); + } + + u8 *ath12k_hal_rx_desc_get_msdu_payload_wcn7850(struct hal_rx_desc *desc) + { + return &desc->u.wcn7850.msdu_payload[0]; + } + + u32 ath12k_hal_rx_desc_get_mpdu_start_offset_wcn7850(void) + { + return offsetof(struct hal_rx_desc_wcn7850, mpdu_start_tag); + } + + u32 ath12k_hal_rx_desc_get_msdu_end_offset_wcn7850(void) + { + return offsetof(struct hal_rx_desc_wcn7850, msdu_end_tag); + } + + static inline + bool ath12k_hal_rx_desc_mac_addr2_valid_wcn7850(struct hal_rx_desc *desc) + { + return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) & + RX_MPDU_START_INFO4_MAC_ADDR2_VALID; + } + + static inline + u8 *ath12k_hal_rx_desc_mpdu_start_addr2_wcn7850(struct hal_rx_desc *desc) + { + return desc->u.wcn7850.mpdu_start.addr2; + } + + static inline + bool ath12k_hal_rx_desc_is_da_mcbc_wcn7850(struct hal_rx_desc *desc) + { + return __le32_to_cpu(desc->u.wcn7850.msdu_end.info13) & + RX_MSDU_END_INFO13_MCAST_BCAST; + } + + static inline + bool ath12k_hal_rx_h_msdu_done_wcn7850(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.wcn7850.msdu_end.info14, + RX_MSDU_END_INFO14_MSDU_DONE); + } + + static inline + bool ath12k_hal_rx_h_l4_cksum_fail_wcn7850(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13, + RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL); + } + + static inline + bool ath12k_hal_rx_h_ip_cksum_fail_wcn7850(struct hal_rx_desc *desc) + { + return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13, + RX_MSDU_END_INFO13_IP_CKSUM_FAIL); + } + + static inline + bool ath12k_hal_rx_h_is_decrypted_wcn7850(struct hal_rx_desc *desc) + { + return (le32_get_bits(desc->u.wcn7850.msdu_end.info14, + RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) == + RX_DESC_DECRYPT_STATUS_CODE_OK); + } + + u32 ath12k_hal_get_rx_desc_size_wcn7850(void) + { + return sizeof(struct hal_rx_desc_wcn7850); + } + + u8 ath12k_hal_rx_desc_get_msdu_src_link_wcn7850(struct hal_rx_desc *desc) + { + return 0; + } + + static u32 ath12k_hal_rx_h_mpdu_err_wcn7850(struct hal_rx_desc *desc) + { + u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info13); + u32 errmap = 0; + + if (info & RX_MSDU_END_INFO13_FCS_ERR) + errmap |= HAL_RX_MPDU_ERR_FCS; + + if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) + errmap |= HAL_RX_MPDU_ERR_DECRYPT; + + if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) + errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; + + if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) + errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; + + if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) + errmap |= HAL_RX_MPDU_ERR_OVERFLOW; + + if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) + errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; + + if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) + errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; + + return errmap; + } + + void ath12k_hal_rx_desc_get_crypto_hdr_wcn7850(struct hal_rx_desc *desc, + u8 *crypto_hdr, + enum hal_encrypt_type enctype) + { + unsigned int key_id; + + switch (enctype) { + case HAL_ENCRYPT_TYPE_OPEN: + return; + case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: + case HAL_ENCRYPT_TYPE_TKIP_MIC: + crypto_hdr[0] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]); + crypto_hdr[1] = 0; + crypto_hdr[2] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]); + break; + case HAL_ENCRYPT_TYPE_CCMP_128: + case HAL_ENCRYPT_TYPE_CCMP_256: + case HAL_ENCRYPT_TYPE_GCMP_128: + case HAL_ENCRYPT_TYPE_AES_GCMP_256: + crypto_hdr[0] = + HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]); + crypto_hdr[1] = + HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]); + crypto_hdr[2] = 0; + break; + case HAL_ENCRYPT_TYPE_WEP_40: + case HAL_ENCRYPT_TYPE_WEP_104: + case HAL_ENCRYPT_TYPE_WEP_128: + case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: + case HAL_ENCRYPT_TYPE_WAPI: + return; + } + key_id = u32_get_bits(__le32_to_cpu(desc->u.wcn7850.mpdu_start.info5), + RX_MPDU_START_INFO5_KEY_ID); + crypto_hdr[3] = 0x20 | (key_id << 6); + crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.wcn7850.mpdu_start.pn[0]); + crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.wcn7850.mpdu_start.pn[0]); + crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[1]); + crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]); + } + + void ath12k_hal_rx_desc_get_dot11_hdr_wcn7850(struct hal_rx_desc *desc, + struct ieee80211_hdr *hdr) + { + hdr->frame_control = desc->u.wcn7850.mpdu_start.frame_ctrl; + hdr->duration_id = desc->u.wcn7850.mpdu_start.duration; + ether_addr_copy(hdr->addr1, desc->u.wcn7850.mpdu_start.addr1); + ether_addr_copy(hdr->addr2, desc->u.wcn7850.mpdu_start.addr2); + ether_addr_copy(hdr->addr3, desc->u.wcn7850.mpdu_start.addr3); + if (__le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) & + RX_MPDU_START_INFO4_MAC_ADDR4_VALID) { + ether_addr_copy(hdr->addr4, desc->u.wcn7850.mpdu_start.addr4); + } + hdr->seq_ctrl = desc->u.wcn7850.mpdu_start.seq_ctrl; + } + + void ath12k_hal_extract_rx_desc_data_wcn7850(struct hal_rx_desc_data *rx_desc_data, + struct hal_rx_desc *rx_desc, + struct hal_rx_desc *ldesc) + { + rx_desc_data->is_first_msdu = ath12k_hal_rx_desc_get_first_msdu_wcn7850(ldesc); + rx_desc_data->is_last_msdu = ath12k_hal_rx_desc_get_last_msdu_wcn7850(ldesc); + rx_desc_data->l3_pad_bytes = ath12k_hal_rx_desc_get_l3_pad_bytes_wcn7850(ldesc); + rx_desc_data->enctype = ath12k_hal_rx_desc_get_encrypt_type_wcn7850(rx_desc); + rx_desc_data->decap_type = ath12k_hal_rx_desc_get_decap_type_wcn7850(rx_desc); + rx_desc_data->mesh_ctrl_present = + ath12k_hal_rx_desc_get_mesh_ctl_wcn7850(rx_desc); + rx_desc_data->seq_ctl_valid = + ath12k_hal_rx_desc_get_mpdu_seq_ctl_vld_wcn7850(rx_desc); + rx_desc_data->fc_valid = ath12k_hal_rx_desc_get_mpdu_fc_valid_wcn7850(rx_desc); + rx_desc_data->seq_no = ath12k_hal_rx_desc_get_mpdu_start_seq_no_wcn7850(rx_desc); + rx_desc_data->msdu_len = ath12k_hal_rx_desc_get_msdu_len_wcn7850(ldesc); + rx_desc_data->sgi = ath12k_hal_rx_desc_get_msdu_sgi_wcn7850(rx_desc); + rx_desc_data->rate_mcs = ath12k_hal_rx_desc_get_msdu_rate_mcs_wcn7850(rx_desc); + rx_desc_data->bw = ath12k_hal_rx_desc_get_msdu_rx_bw_wcn7850(rx_desc); + rx_desc_data->phy_meta_data = ath12k_hal_rx_desc_get_msdu_freq_wcn7850(rx_desc); + rx_desc_data->pkt_type = ath12k_hal_rx_desc_get_msdu_pkt_type_wcn7850(rx_desc); + rx_desc_data->nss = hweight8(ath12k_hal_rx_desc_get_msdu_nss_wcn7850(rx_desc)); + rx_desc_data->tid = ath12k_hal_rx_desc_get_mpdu_tid_wcn7850(rx_desc); + rx_desc_data->peer_id = ath12k_hal_rx_desc_get_mpdu_peer_id_wcn7850(rx_desc); + rx_desc_data->addr2_present = ath12k_hal_rx_desc_mac_addr2_valid_wcn7850(rx_desc); + rx_desc_data->addr2 = ath12k_hal_rx_desc_mpdu_start_addr2_wcn7850(rx_desc); + rx_desc_data->is_mcbc = ath12k_hal_rx_desc_is_da_mcbc_wcn7850(rx_desc); + rx_desc_data->msdu_done = ath12k_hal_rx_h_msdu_done_wcn7850(ldesc); + rx_desc_data->l4_csum_fail = ath12k_hal_rx_h_l4_cksum_fail_wcn7850(rx_desc); + rx_desc_data->ip_csum_fail = ath12k_hal_rx_h_ip_cksum_fail_wcn7850(rx_desc); + rx_desc_data->is_decrypted = ath12k_hal_rx_h_is_decrypted_wcn7850(rx_desc); + rx_desc_data->err_bitmap = ath12k_hal_rx_h_mpdu_err_wcn7850(rx_desc); + } + + static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_hal *hal) + { + struct hal_srng_config *s; + + hal->srng_config = kmemdup(hw_srng_config_template, + sizeof(hw_srng_config_template), + GFP_KERNEL); + if (!hal->srng_config) + return -ENOMEM; + + s = &hal->srng_config[HAL_REO_DST]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP; + s->reg_size[0] = HAL_REO2_RING_BASE_LSB(hal) - HAL_REO1_RING_BASE_LSB(hal); + s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP; + + s = &hal->srng_config[HAL_REO_EXCEPTION]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP; + + s = &hal->srng_config[HAL_REO_REINJECT]; + s->max_rings = 1; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP; + + s = &hal->srng_config[HAL_REO_CMD]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP; + + s = &hal->srng_config[HAL_REO_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP; + + s = &hal->srng_config[HAL_TCL_DATA]; + s->max_rings = 5; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP; + s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(hal) - HAL_TCL1_RING_BASE_LSB(hal); + s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP; + + s = &hal->srng_config[HAL_TCL_CMD]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP; + + s = &hal->srng_config[HAL_TCL_STATUS]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; + + s = &hal->srng_config[HAL_CE_SRC]; + s->max_rings = 12; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(hal); + + s = &hal->srng_config[HAL_CE_DST]; + s->max_rings = 12; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal); + + s = &hal->srng_config[HAL_CE_DST_STATUS]; + s->max_rings = 12; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + + HAL_CE_DST_STATUS_RING_BASE_LSB; + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal) + HAL_CE_DST_STATUS_RING_HP; + s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal); + s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(hal) - + HAL_SEQ_WCSS_UMAC_CE0_DST_REG(hal); + + s = &hal->srng_config[HAL_WBM_IDLE_LINK]; + s->reg_start[0] = + HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP; + + s = &hal->srng_config[HAL_SW2WBM_RELEASE]; + s->max_rings = 1; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + + HAL_WBM_SW_RELEASE_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP; + + s = &hal->srng_config[HAL_WBM2SW_RELEASE]; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(hal); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP; + s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(hal) - + HAL_WBM0_RELEASE_RING_BASE_LSB(hal); + s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; + + s = &hal->srng_config[HAL_RXDMA_BUF]; + s->max_rings = 2; + s->mac_type = ATH12K_HAL_SRNG_PMAC; + + s = &hal->srng_config[HAL_RXDMA_DST]; + s->max_rings = 1; + s->entry_size = sizeof(struct hal_reo_entrance_ring) >> 2; + + /* below rings are not used */ + s = &hal->srng_config[HAL_RXDMA_DIR_BUF]; + s->max_rings = 0; + + s = &hal->srng_config[HAL_PPE2TCL]; + s->max_rings = 0; + + s = &hal->srng_config[HAL_PPE_RELEASE]; + s->max_rings = 0; + + s = &hal->srng_config[HAL_TX_MONITOR_BUF]; + s->max_rings = 0; + + s = &hal->srng_config[HAL_TX_MONITOR_DST]; + s->max_rings = 0; + + s = &hal->srng_config[HAL_PPE2TCL]; + s->max_rings = 0; + + return 0; + } + + const struct ath12k_hal_tcl_to_wbm_rbm_map + ath12k_hal_tcl_to_wbm_rbm_map_wcn7850[DP_TCL_NUM_RING_MAX] = { + { + .wbm_ring_num = 0, + .rbm_id = HAL_RX_BUF_RBM_SW0_BM, + }, + { + .wbm_ring_num = 2, + .rbm_id = HAL_RX_BUF_RBM_SW2_BM, + }, + { + .wbm_ring_num = 4, + .rbm_id = HAL_RX_BUF_RBM_SW4_BM, + }, + }; + + const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = { + .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM, + .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN | + HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN, + }; + + const struct hal_ops hal_wcn7850_ops = { + .create_srng_config = ath12k_hal_srng_create_config_wcn7850, + .rx_desc_set_msdu_len = ath12k_hal_rx_desc_set_msdu_len_wcn7850, + .rx_desc_get_dot11_hdr = ath12k_hal_rx_desc_get_dot11_hdr_wcn7850, + .rx_desc_get_crypto_header = ath12k_hal_rx_desc_get_crypto_hdr_wcn7850, + .rx_desc_copy_end_tlv = ath12k_hal_rx_desc_copy_end_tlv_wcn7850, + .rx_desc_get_msdu_src_link_id = ath12k_hal_rx_desc_get_msdu_src_link_wcn7850, + .extract_rx_desc_data = ath12k_hal_extract_rx_desc_data_wcn7850, + .rx_desc_get_l3_pad_bytes = ath12k_hal_rx_desc_get_l3_pad_bytes_wcn7850, + .rx_desc_get_mpdu_start_tag = ath12k_hal_rx_desc_get_mpdu_start_tag_wcn7850, + .rx_desc_get_mpdu_ppdu_id = ath12k_hal_rx_desc_get_mpdu_ppdu_id_wcn7850, + .rx_desc_get_msdu_payload = ath12k_hal_rx_desc_get_msdu_payload_wcn7850, + .ce_dst_setup = ath12k_wifi7_hal_ce_dst_setup, + .srng_src_hw_init = ath12k_wifi7_hal_srng_src_hw_init, + .srng_dst_hw_init = ath12k_wifi7_hal_srng_dst_hw_init, + .set_umac_srng_ptr_addr = ath12k_wifi7_hal_set_umac_srng_ptr_addr, + .srng_update_shadow_config = ath12k_wifi7_hal_srng_update_shadow_config, + .srng_get_ring_id = ath12k_wifi7_hal_srng_get_ring_id, + .ce_get_desc_size = ath12k_wifi7_hal_ce_get_desc_size, + .ce_src_set_desc = ath12k_wifi7_hal_ce_src_set_desc, + .ce_dst_set_desc = ath12k_wifi7_hal_ce_dst_set_desc, + .ce_dst_status_get_length = ath12k_wifi7_hal_ce_dst_status_get_length, + .set_link_desc_addr = ath12k_wifi7_hal_set_link_desc_addr, + .tx_set_dscp_tid_map = ath12k_wifi7_hal_tx_set_dscp_tid_map, + .tx_configure_bank_register = + ath12k_wifi7_hal_tx_configure_bank_register, + .reoq_lut_addr_read_enable = ath12k_wifi7_hal_reoq_lut_addr_read_enable, + .reoq_lut_set_max_peerid = ath12k_wifi7_hal_reoq_lut_set_max_peerid, + .write_reoq_lut_addr = ath12k_wifi7_hal_write_reoq_lut_addr, + .write_ml_reoq_lut_addr = ath12k_wifi7_hal_write_ml_reoq_lut_addr, + .setup_link_idle_list = ath12k_wifi7_hal_setup_link_idle_list, + .reo_init_cmd_ring = ath12k_wifi7_hal_reo_init_cmd_ring, ++ .reo_shared_qaddr_cache_clear = ath12k_wifi7_hal_reo_shared_qaddr_cache_clear, + .reo_hw_setup = ath12k_wifi7_hal_reo_hw_setup, + .rx_buf_addr_info_set = ath12k_wifi7_hal_rx_buf_addr_info_set, + .rx_buf_addr_info_get = ath12k_wifi7_hal_rx_buf_addr_info_get, + .cc_config = ath12k_wifi7_hal_cc_config, + .get_idle_link_rbm = ath12k_wifi7_hal_get_idle_link_rbm, + .rx_msdu_list_get = ath12k_wifi7_hal_rx_msdu_list_get, + .rx_reo_ent_buf_paddr_get = ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get, + }; diff --cc drivers/net/wireless/ath/ath12k/wmi.c index be8b2943094f8,0d4a83b8005a0..150b04d0a21cd --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@@ -7377,10 -7201,8 +7281,10 @@@ static void ath12k_scan_event(struct at static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb) { struct wmi_peer_sta_kickout_arg arg = {}; + struct ath12k_link_vif *arvif; struct ieee80211_sta *sta; - struct ath12k_peer *peer; - unsigned int link_id; ++ struct ath12k_sta *ahsta; + struct ath12k_link_sta *arsta; struct ath12k *ar; if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { @@@ -7400,49 -7222,25 +7304,31 @@@ goto exit; } - arvif = ath12k_mac_get_arvif_by_vdev_id(ab, peer->vdev_id); - ar = arsta->arvif->ar; - if (!ar) { - ath12k_warn(ab, "invalid ar in peer sta kickout ev for STA %pM\n", ++ arvif = arsta->arvif; + if (!arvif) { - ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d", - peer->vdev_id); ++ ath12k_warn(ab, "invalid arvif in peer sta kickout ev for STA %pM", + arg.mac_addr); goto exit; } - sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), - arg.mac_addr, NULL); - if (!sta) { - ath12k_warn(ab, "Spurious quick kickout for STA %pM\n", - arg.mac_addr); - goto exit; - } + ar = arvif->ar; - - if (peer->mlo) { - sta = ieee80211_find_sta_by_link_addrs(ath12k_ar_to_hw(ar), - arg.mac_addr, - NULL, &link_id); - if (peer->link_id != link_id) { - ath12k_warn(ab, - "Spurious quick kickout for MLO STA %pM with invalid link_id, peer: %d, sta: %d\n", - arg.mac_addr, peer->link_id, link_id); - goto exit; - } - } else { - sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), - arg.mac_addr, NULL); - } - if (!sta) { - ath12k_warn(ab, "Spurious quick kickout for %sSTA %pM\n", - peer->mlo ? "MLO " : "", arg.mac_addr); - goto exit; - } ++ ahsta = arsta->ahsta; ++ sta = ath12k_ahsta_to_sta(ahsta); - ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM", - arg.mac_addr); + ath12k_dbg(ab, ATH12K_DBG_WMI, + "peer sta kickout event %pM reason: %d rssi: %d\n", + arg.mac_addr, arg.reason, arg.rssi); - ieee80211_report_low_ack(sta, 10); + switch (arg.reason) { + case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY: + if (arvif->ahvif->vif->type == NL80211_IFTYPE_STATION) { + ath12k_mac_handle_beacon_miss(ar, arvif); + break; + } + fallthrough; + default: + ieee80211_report_low_ack(sta, 10); + } exit: spin_unlock_bh(&ab->base_lock);