From: Bhargava Marreddy Date: Wed, 28 Jan 2026 18:56:22 +0000 (+0530) Subject: bng_en: Add TPA related functions X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=58165c99324e0590c7eb0e3addffd3bd5ca18780;p=thirdparty%2Fkernel%2Flinux.git bng_en: Add TPA related functions Add the functions to handle TPA events in RX path. This helps the next patch enable TPA functionality. Signed-off-by: Bhargava Marreddy Reviewed-by: Vikas Gupta Reviewed-by: Rajashekar Hudumula Link: https://patch.msgid.link/20260128185623.26559-8-bhargava.marreddy@broadcom.com Signed-off-by: Jakub Kicinski --- diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h b/drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h index 7ca273f2e54f..49828dc05514 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h +++ b/drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h @@ -206,4 +206,241 @@ struct rx_agg_cmp { #define RX_CMP_HASH_VALID(rxcmp) \ ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID)) + +#define TPA_AGG_AGG_ID(rx_agg) \ + ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \ + RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT) + +#define RX_TPA_START_CMP_TYPE GENMASK(5, 0) +#define RX_TPA_START_CMP_FLAGS GENMASK(15, 6) +#define RX_TPA_START_CMP_FLAGS_SHIFT 6 +#define RX_TPA_START_CMP_FLAGS_ERROR BIT(6) +#define RX_TPA_START_CMP_FLAGS_PLACEMENT GENMASK(9, 7) +#define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7 +#define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO BIT(7) +#define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7) +#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) +#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) +#define RX_TPA_START_CMP_FLAGS_RSS_VALID BIT(10) +#define RX_TPA_START_CMP_FLAGS_TIMESTAMP BIT(11) +#define RX_TPA_START_CMP_FLAGS_ITYPES GENMASK(15, 12) +#define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12 +#define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12) +#define RX_TPA_START_CMP_LEN GENMASK(31, 16) +#define RX_TPA_START_CMP_LEN_SHIFT 16 +#define RX_TPA_START_CMP_V1 BIT(0) +#define RX_TPA_START_CMP_RSS_HASH_TYPE GENMASK(15, 9) +#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9 +#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE GENMASK(15, 7) +#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7 +#define RX_TPA_START_CMP_AGG_ID GENMASK(25, 16) +#define RX_TPA_START_CMP_AGG_ID_SHIFT 16 +#define RX_TPA_START_CMP_METADATA1 GENMASK(31, 28) +#define RX_TPA_START_CMP_METADATA1_SHIFT 28 +#define RX_TPA_START_METADATA1_TPID_SEL GENMASK(30, 28) +#define RX_TPA_START_METADATA1_TPID_8021Q BIT(28) +#define RX_TPA_START_METADATA1_TPID_8021AD (0x0 << 28) +#define RX_TPA_START_METADATA1_VALID BIT(31) + +struct rx_tpa_start_cmp { + __le32 rx_tpa_start_cmp_len_flags_type; + u32 rx_tpa_start_cmp_opaque; + __le32 rx_tpa_start_cmp_misc_v1; + __le32 rx_tpa_start_cmp_rss_hash; +}; + +#define TPA_START_HASH_VALID(rx_tpa_start) \ + ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID)) + +#define TPA_START_HASH_TYPE(rx_tpa_start) \ + (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_RSS_HASH_TYPE) >> \ + RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK) + +#define TPA_START_V3_HASH_TYPE(rx_tpa_start) \ + (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_V3_RSS_HASH_TYPE) >> \ + RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK) + +#define TPA_START_AGG_ID(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT) + +#define TPA_START_ERROR(rx_tpa_start) \ + ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR)) + +#define TPA_START_VLAN_VALID(rx_tpa_start) \ + ((rx_tpa_start)->rx_tpa_start_cmp_misc_v1 & \ + cpu_to_le32(RX_TPA_START_METADATA1_VALID)) + +#define TPA_START_VLAN_TPID_SEL(rx_tpa_start) \ + (le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_METADATA1_TPID_SEL) + +#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC BIT(0) +#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC BIT(1) +#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC BIT(2) +#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC BIT(3) +#define RX_TPA_START_CMP_FLAGS2_IP_TYPE BIT(8) +#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID BIT(9) +#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT GENMASK(11, 10) +#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10 +#define RX_TPA_START_CMP_V3_FLAGS2_T_IP_TYPE BIT(10) +#define RX_TPA_START_CMP_V3_FLAGS2_AGG_GRO BIT(11) +#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL GENMASK(31, 16) +#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16 +#define RX_TPA_START_CMP_V2 BIT(0) +#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK GENMASK(3, 1) +#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1 +#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) +#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) +#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1) +#define RX_TPA_START_CMP_CFA_CODE GENMASK(31, 16) +#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 +#define RX_TPA_START_CMP_METADATA0_TCI_MASK GENMASK(31, 16) +#define RX_TPA_START_CMP_METADATA0_VID_MASK GENMASK(27, 16) +#define RX_TPA_START_CMP_METADATA0_SFT 16 + +struct rx_tpa_start_cmp_ext { + __le32 rx_tpa_start_cmp_flags2; + __le32 rx_tpa_start_cmp_metadata; + __le32 rx_tpa_start_cmp_cfa_code_v2; + __le32 rx_tpa_start_cmp_hdr_info; +}; + +#define TPA_START_CFA_CODE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT) + +#define TPA_START_IS_IPV6(rx_tpa_start) \ + (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE))) + +#define TPA_START_ERROR_CODE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \ + RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT) + +#define TPA_START_METADATA0_TCI(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_METADATA0_TCI_MASK) >> \ + RX_TPA_START_CMP_METADATA0_SFT) + +#define RX_TPA_END_CMP_TYPE GENMASK(5, 0) +#define RX_TPA_END_CMP_FLAGS GENMASK(15, 6) +#define RX_TPA_END_CMP_FLAGS_SHIFT 6 +#define RX_TPA_END_CMP_FLAGS_PLACEMENT GENMASK(9, 7) +#define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT 7 +#define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO BIT(7) +#define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7) +#define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) +#define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) +#define RX_TPA_END_CMP_FLAGS_RSS_VALID BIT(10) +#define RX_TPA_END_CMP_FLAGS_ITYPES GENMASK(15, 12) +#define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT 12 +#define RX_TPA_END_CMP_FLAGS_ITYPE_TCP (0x2 << 12) +#define RX_TPA_END_CMP_LEN GENMASK(31, 16) +#define RX_TPA_END_CMP_LEN_SHIFT 16 +#define RX_TPA_END_CMP_V1 BIT(0) +#define RX_TPA_END_CMP_TPA_SEGS GENMASK(15, 8) +#define RX_TPA_END_CMP_TPA_SEGS_SHIFT 8 +#define RX_TPA_END_CMP_AGG_ID GENMASK(25, 16) +#define RX_TPA_END_CMP_AGG_ID_SHIFT 16 +#define RX_TPA_END_GRO_TS BIT(31) + +struct rx_tpa_end_cmp { + __le32 rx_tpa_end_cmp_len_flags_type; + u32 rx_tpa_end_cmp_opaque; + __le32 rx_tpa_end_cmp_misc_v1; + __le32 rx_tpa_end_cmp_tsdelta; +}; + +#define TPA_END_AGG_ID(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT) + +#define TPA_END_TPA_SEGS(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT) + +#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO \ + cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO & \ + RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS) + +#define TPA_END_GRO(rx_tpa_end) \ + ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type & \ + RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO) + +#define TPA_END_GRO_TS(rx_tpa_end) \ + (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & \ + cpu_to_le32(RX_TPA_END_GRO_TS))) + +#define RX_TPA_END_CMP_TPA_DUP_ACKS GENMASK(3, 0) +#define RX_TPA_END_CMP_PAYLOAD_OFFSET GENMASK(23, 16) +#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16 +#define RX_TPA_END_CMP_AGG_BUFS GENMASK(31, 24) +#define RX_TPA_END_CMP_AGG_BUFS_SHIFT 24 +#define RX_TPA_END_CMP_TPA_SEG_LEN GENMASK(15, 0) +#define RX_TPA_END_CMP_V2 BIT(0) +#define RX_TPA_END_CMP_ERRORS GENMASK(2, 1) +#define RX_TPA_END_CMPL_ERRORS_SHIFT 1 +#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) +#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1) +#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) +#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1) +#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1) + +struct rx_tpa_end_cmp_ext { + __le32 rx_tpa_end_cmp_dup_acks; + __le32 rx_tpa_end_cmp_seg_len; + __le32 rx_tpa_end_cmp_errors_v2; + u32 rx_tpa_end_cmp_start_opaque; +}; + +#define TPA_END_ERRORS(rx_tpa_end_ext) \ + ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \ + cpu_to_le32(RX_TPA_END_CMP_ERRORS)) + +#define TPA_END_PAYLOAD_OFF(rx_tpa_end_ext) \ + ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \ + RX_TPA_END_CMP_PAYLOAD_OFFSET) >> \ + RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT) + +#define TPA_END_AGG_BUFS(rx_tpa_end_ext) \ + ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \ + RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT) + +#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) + +#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION) + +#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2) \ + ((data2) & \ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK) + +#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \ + (!!((data1) & \ + ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)) + +#define EVENT_DATA1_RECOVERY_ENABLED(data1) \ + (!!((data1) & \ + ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED)) + +#define BNGE_EVENT_ERROR_REPORT_TYPE(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >>\ + ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT) + +#define BNGE_EVENT_INVALID_SIGNAL_DATA(data2) \ + (((data2) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\ + ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT) #endif /* _BNGE_HW_DEF_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c index a8e036d666df..9717065a2f20 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c +++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c @@ -377,11 +377,37 @@ static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn, } } +static void bnge_free_one_tpa_info_data(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr) +{ + int i; + + for (i = 0; i < bn->max_tpa; i++) { + struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[i]; + u8 *data = tpa_info->data; + + if (!data) + continue; + + tpa_info->data = NULL; + page_pool_free_va(rxr->head_pool, data, false); + } +} + static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn, struct bnge_rx_ring_info *rxr) { + struct bnge_tpa_idx_map *map; + + if (rxr->rx_tpa) + bnge_free_one_tpa_info_data(bn, rxr); + bnge_free_one_rx_ring_bufs(bn, rxr); bnge_free_one_agg_ring_bufs(bn, rxr); + + map = rxr->rx_tpa_idx_map; + if (map) + memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); } static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn) @@ -452,11 +478,70 @@ static void bnge_free_all_rings_bufs(struct bnge_net *bn) bnge_free_tx_skbs(bn); } +static void bnge_free_tpa_info(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i, j; + + for (i = 0; i < bd->rx_nr_rings; i++) { + struct bnge_rx_ring_info *rxr = &bn->rx_ring[i]; + + kfree(rxr->rx_tpa_idx_map); + rxr->rx_tpa_idx_map = NULL; + if (rxr->rx_tpa) { + for (j = 0; j < bn->max_tpa; j++) { + kfree(rxr->rx_tpa[j].agg_arr); + rxr->rx_tpa[j].agg_arr = NULL; + } + } + kfree(rxr->rx_tpa); + rxr->rx_tpa = NULL; + } +} + +static int bnge_alloc_tpa_info(struct bnge_net *bn) +{ + struct bnge_dev *bd = bn->bd; + int i, j; + + if (!bd->max_tpa_v2) + return 0; + + bn->max_tpa = max_t(u16, bd->max_tpa_v2, MAX_TPA); + for (i = 0; i < bd->rx_nr_rings; i++) { + struct bnge_rx_ring_info *rxr = &bn->rx_ring[i]; + + rxr->rx_tpa = kcalloc(bn->max_tpa, sizeof(struct bnge_tpa_info), + GFP_KERNEL); + if (!rxr->rx_tpa) + goto err_free_tpa_info; + + for (j = 0; j < bn->max_tpa; j++) { + struct rx_agg_cmp *agg; + + agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); + if (!agg) + goto err_free_tpa_info; + rxr->rx_tpa[j].agg_arr = agg; + } + rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), + GFP_KERNEL); + if (!rxr->rx_tpa_idx_map) + goto err_free_tpa_info; + } + return 0; + +err_free_tpa_info: + bnge_free_tpa_info(bn); + return -ENOMEM; +} + static void bnge_free_rx_rings(struct bnge_net *bn) { struct bnge_dev *bd = bn->bd; int i; + bnge_free_tpa_info(bn); for (i = 0; i < bd->rx_nr_rings; i++) { struct bnge_rx_ring_info *rxr = &bn->rx_ring[i]; struct bnge_ring_struct *ring; @@ -581,6 +666,12 @@ static int bnge_alloc_rx_rings(struct bnge_net *bn) goto err_free_rx_rings; } } + + if (bn->priv_flags & BNGE_NET_EN_TPA) { + rc = bnge_alloc_tpa_info(bn); + if (rc) + goto err_free_rx_rings; + } return rc; err_free_rx_rings: @@ -1126,6 +1217,30 @@ err_free_one_agg_ring_bufs: return -ENOMEM; } +static int bnge_alloc_one_tpa_info_data(struct bnge_net *bn, + struct bnge_rx_ring_info *rxr) +{ + dma_addr_t mapping; + u8 *data; + int i; + + for (i = 0; i < bn->max_tpa; i++) { + data = __bnge_alloc_rx_frag(bn, &mapping, rxr, + GFP_KERNEL); + if (!data) + goto err_free_tpa_info_data; + + rxr->rx_tpa[i].data = data; + rxr->rx_tpa[i].data_ptr = data + bn->rx_offset; + rxr->rx_tpa[i].mapping = mapping; + } + return 0; + +err_free_tpa_info_data: + bnge_free_one_tpa_info_data(bn, rxr); + return -ENOMEM; +} + static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr) { struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr]; @@ -1140,8 +1255,17 @@ static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr) if (rc) goto err_free_one_rx_ring_bufs; } + + if (rxr->rx_tpa) { + rc = bnge_alloc_one_tpa_info_data(bn, rxr); + if (rc) + goto err_free_one_agg_ring_bufs; + } + return 0; +err_free_one_agg_ring_bufs: + bnge_free_one_agg_ring_bufs(bn, rxr); err_free_one_rx_ring_bufs: bnge_free_one_rx_ring_bufs(bn, rxr); return rc; diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h index 69cc0442b456..1ab89febbef5 100644 --- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h +++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h @@ -153,6 +153,45 @@ enum { #define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO) +#define BNGE_NO_FW_ACCESS(bd) (pci_channel_offline((bd)->pdev)) + +#define MAX_TPA 256 +#define MAX_TPA_MASK (MAX_TPA - 1) +#define MAX_TPA_SEGS 0x3f + +#define BNGE_TPA_INNER_L3_OFF(hdr_info) \ + (((hdr_info) >> 18) & 0x1ff) + +#define BNGE_TPA_INNER_L2_OFF(hdr_info) \ + (((hdr_info) >> 9) & 0x1ff) + +#define BNGE_TPA_OUTER_L3_OFF(hdr_info) \ + ((hdr_info) & 0x1ff) + +struct bnge_tpa_idx_map { + u16 agg_id_tbl[1024]; + DECLARE_BITMAP(agg_idx_bmap, MAX_TPA); +}; + +struct bnge_tpa_info { + void *data; + u8 *data_ptr; + dma_addr_t mapping; + u16 len; + unsigned short gso_type; + u32 flags2; + u32 metadata; + enum pkt_hash_types hash_type; + u32 rss_hash; + u32 hdr_info; + + u16 cfa_code; /* cfa_code in TPA start compl */ + u8 agg_count; + bool vlan_valid; + bool cfa_code_valid; + struct rx_agg_cmp *agg_arr; +}; + /* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra * BD because the first TX BD is always a long BD. */ @@ -245,6 +284,10 @@ struct bnge_net { #define BNGE_STATE_NAPI_DISABLED 0 u32 msg_enable; + u16 max_tpa; + __be16 vxlan_port; + __be16 nge_port; + __be16 vxlan_gpe_port; }; #define BNGE_DEFAULT_RX_RING_SIZE 511 @@ -390,6 +433,9 @@ struct bnge_rx_ring_info { dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; + struct bnge_tpa_info *rx_tpa; + struct bnge_tpa_idx_map *rx_tpa_idx_map; + struct bnge_ring_struct rx_ring_struct; struct bnge_ring_struct rx_agg_ring_struct; struct page_pool *page_pool;