return tag;
}
EXPORT_SYMBOL(ath12k_hal_decode_tlv64_hdr);
+
+u16 ath12k_hal_decode_tlv32_hdr(void *tlv, void **desc)
+{
+ struct hal_tlv_hdr *tlv32 = tlv;
+ u16 tag;
+
+ tag = le32_get_bits(tlv32->tl, HAL_SRNG_TLV_HDR_TAG);
+ *desc = tlv32->value;
+
+ return tag;
+}
+EXPORT_SYMBOL(ath12k_hal_decode_tlv32_hdr);
void *ath12k_hal_encode_tlv64_hdr(void *tlv, u64 tag, u64 len);
void *ath12k_hal_encode_tlv32_hdr(void *tlv, u64 tag, u64 len);
u16 ath12k_hal_decode_tlv64_hdr(void *tlv, void **desc);
+u16 ath12k_hal_decode_tlv32_hdr(void *tlv, void **desc);
#endif
* entries into this Ring has looped around the ring.
*/
+struct hal_reo_get_queue_stats_status_qcc2072 {
+ __le32 tlv32_padding;
+ struct hal_reo_get_queue_stats_status status;
+} __packed;
+
#define HAL_REO_STATUS_LOOP_CNT GENMASK(31, 28)
#define HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED BIT(0)
s->entry_size = (sizeof(struct hal_tlv_hdr) +
sizeof(struct hal_reo_get_queue_stats_qcc2072)) >> 2;
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_status_qcc2072)) >> 2;
+
return 0;
}
+static u16 ath12k_hal_reo_status_dec_tlv_hdr_qcc2072(void *tlv, void **desc)
+{
+ struct hal_reo_get_queue_stats_status_qcc2072 *status_tlv;
+ u16 tag;
+
+ tag = ath12k_hal_decode_tlv32_hdr(tlv, (void **)&status_tlv);
+ /*
+ * actual desc of REO status entry starts after tlv32_padding,
+ * see hal_reo_get_queue_stats_status_qcc2072
+ */
+ *desc = &status_tlv->status;
+
+ return tag;
+}
+
const struct hal_ops hal_qcc2072_ops = {
.create_srng_config = ath12k_hal_srng_create_config_qcc2072,
.rx_desc_set_msdu_len = ath12k_hal_rx_desc_set_msdu_len_qcc2072,
.rx_msdu_list_get = ath12k_wifi7_hal_rx_msdu_list_get,
.rx_reo_ent_buf_paddr_get = ath12k_wifi7_hal_rx_reo_ent_buf_paddr_get,
.reo_cmd_enc_tlv_hdr = ath12k_hal_encode_tlv32_hdr,
- .reo_status_dec_tlv_hdr = ath12k_hal_decode_tlv64_hdr,
+ .reo_status_dec_tlv_hdr = ath12k_hal_reo_status_dec_tlv_hdr_qcc2072,
};
u32 ath12k_hal_rx_desc_get_mpdu_start_offset_qcc2072(void)