]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bng_en: Add TPA related functions
authorBhargava Marreddy <bhargava.marreddy@broadcom.com>
Wed, 28 Jan 2026 18:56:22 +0000 (00:26 +0530)
committerJakub Kicinski <kuba@kernel.org>
Fri, 30 Jan 2026 03:49:56 +0000 (19:49 -0800)
Add the functions to handle TPA events in RX path.
This helps the next patch enable TPA functionality.

Signed-off-by: Bhargava Marreddy <bhargava.marreddy@broadcom.com>
Reviewed-by: Vikas Gupta <vikas.gupta@broadcom.com>
Reviewed-by: Rajashekar Hudumula <rajashekar.hudumula@broadcom.com>
Link: https://patch.msgid.link/20260128185623.26559-8-bhargava.marreddy@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/bnge/bnge_hw_def.h
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h

index 7ca273f2e54faecd5a094b90b6d750f0f6be85f3..49828dc055140f22eeffeca94bdcb8bb8b099175 100644 (file)
@@ -206,4 +206,241 @@ struct rx_agg_cmp {
 
 #define RX_CMP_HASH_VALID(rxcmp)                               \
        ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+#define TPA_AGG_AGG_ID(rx_agg)                         \
+       ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) &         \
+        RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT)
+
+#define RX_TPA_START_CMP_TYPE                          GENMASK(5, 0)
+#define RX_TPA_START_CMP_FLAGS                         GENMASK(15, 6)
+#define RX_TPA_START_CMP_FLAGS_SHIFT                   6
+#define RX_TPA_START_CMP_FLAGS_ERROR                   BIT(6)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT               GENMASK(9, 7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT         7
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO         BIT(7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS           (0x2 << 7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO     (0x5 << 7)
+#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS       (0x6 << 7)
+#define RX_TPA_START_CMP_FLAGS_RSS_VALID               BIT(10)
+#define RX_TPA_START_CMP_FLAGS_TIMESTAMP               BIT(11)
+#define RX_TPA_START_CMP_FLAGS_ITYPES                  GENMASK(15, 12)
+#define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT            12
+#define RX_TPA_START_CMP_FLAGS_ITYPE_TCP               (0x2 << 12)
+#define RX_TPA_START_CMP_LEN                           GENMASK(31, 16)
+#define RX_TPA_START_CMP_LEN_SHIFT                     16
+#define RX_TPA_START_CMP_V1                            BIT(0)
+#define RX_TPA_START_CMP_RSS_HASH_TYPE                 GENMASK(15, 9)
+#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT           9
+#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE              GENMASK(15, 7)
+#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT                7
+#define RX_TPA_START_CMP_AGG_ID                                GENMASK(25, 16)
+#define RX_TPA_START_CMP_AGG_ID_SHIFT                  16
+#define RX_TPA_START_CMP_METADATA1                     GENMASK(31, 28)
+#define RX_TPA_START_CMP_METADATA1_SHIFT               28
+#define RX_TPA_START_METADATA1_TPID_SEL                        GENMASK(30, 28)
+#define RX_TPA_START_METADATA1_TPID_8021Q              BIT(28)
+#define RX_TPA_START_METADATA1_TPID_8021AD             (0x0 << 28)
+#define RX_TPA_START_METADATA1_VALID                   BIT(31)
+
+struct rx_tpa_start_cmp {
+       __le32 rx_tpa_start_cmp_len_flags_type;
+       u32 rx_tpa_start_cmp_opaque;
+       __le32 rx_tpa_start_cmp_misc_v1;
+       __le32 rx_tpa_start_cmp_rss_hash;
+};
+
+#define TPA_START_HASH_VALID(rx_tpa_start)                             \
+       ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type &              \
+        cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
+
+#define TPA_START_HASH_TYPE(rx_tpa_start)                              \
+       (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &      \
+          RX_TPA_START_CMP_RSS_HASH_TYPE) >>                           \
+         RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+#define TPA_START_V3_HASH_TYPE(rx_tpa_start)                           \
+       (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &      \
+          RX_TPA_START_CMP_V3_RSS_HASH_TYPE) >>                        \
+         RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
+#define TPA_START_AGG_ID(rx_tpa_start)                         \
+       ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &       \
+        RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+
+#define TPA_START_ERROR(rx_tpa_start)                                  \
+       ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type &              \
+        cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
+
+#define TPA_START_VLAN_VALID(rx_tpa_start)                             \
+       ((rx_tpa_start)->rx_tpa_start_cmp_misc_v1 &                     \
+        cpu_to_le32(RX_TPA_START_METADATA1_VALID))
+
+#define TPA_START_VLAN_TPID_SEL(rx_tpa_start)                          \
+       (le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &        \
+        RX_TPA_START_METADATA1_TPID_SEL)
+
+#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC             BIT(0)
+#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC             BIT(1)
+#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC           BIT(2)
+#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC           BIT(3)
+#define RX_TPA_START_CMP_FLAGS2_IP_TYPE                        BIT(8)
+#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID                BIT(9)
+#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT                GENMASK(11, 10)
+#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT  10
+#define RX_TPA_START_CMP_V3_FLAGS2_T_IP_TYPE           BIT(10)
+#define RX_TPA_START_CMP_V3_FLAGS2_AGG_GRO             BIT(11)
+#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL              GENMASK(31, 16)
+#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT                16
+#define RX_TPA_START_CMP_V2                            BIT(0)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK      GENMASK(3, 1)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT     1
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT        (0x3 << 1)
+#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH     (0x5 << 1)
+#define RX_TPA_START_CMP_CFA_CODE                      GENMASK(31, 16)
+#define RX_TPA_START_CMPL_CFA_CODE_SHIFT               16
+#define RX_TPA_START_CMP_METADATA0_TCI_MASK            GENMASK(31, 16)
+#define RX_TPA_START_CMP_METADATA0_VID_MASK            GENMASK(27, 16)
+#define RX_TPA_START_CMP_METADATA0_SFT                 16
+
+struct rx_tpa_start_cmp_ext {
+       __le32 rx_tpa_start_cmp_flags2;
+       __le32 rx_tpa_start_cmp_metadata;
+       __le32 rx_tpa_start_cmp_cfa_code_v2;
+       __le32 rx_tpa_start_cmp_hdr_info;
+};
+
+#define TPA_START_CFA_CODE(rx_tpa_start)                               \
+       ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) &   \
+        RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
+
+#define TPA_START_IS_IPV6(rx_tpa_start)                                \
+       (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 &           \
+           cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
+
+#define TPA_START_ERROR_CODE(rx_tpa_start)                             \
+       ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) &   \
+         RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >>                 \
+        RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
+
+#define TPA_START_METADATA0_TCI(rx_tpa_start)                          \
+       ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) &   \
+         RX_TPA_START_CMP_METADATA0_TCI_MASK) >>                       \
+        RX_TPA_START_CMP_METADATA0_SFT)
+
+#define RX_TPA_END_CMP_TYPE                            GENMASK(5, 0)
+#define RX_TPA_END_CMP_FLAGS                           GENMASK(15, 6)
+#define RX_TPA_END_CMP_FLAGS_SHIFT                     6
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT                 GENMASK(9, 7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT           7
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO           BIT(7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS             (0x2 << 7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO       (0x5 << 7)
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS         (0x6 << 7)
+#define RX_TPA_END_CMP_FLAGS_RSS_VALID                 BIT(10)
+#define RX_TPA_END_CMP_FLAGS_ITYPES                    GENMASK(15, 12)
+#define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT              12
+#define RX_TPA_END_CMP_FLAGS_ITYPE_TCP                 (0x2 << 12)
+#define RX_TPA_END_CMP_LEN                             GENMASK(31, 16)
+#define RX_TPA_END_CMP_LEN_SHIFT                       16
+#define RX_TPA_END_CMP_V1                              BIT(0)
+#define RX_TPA_END_CMP_TPA_SEGS                                GENMASK(15, 8)
+#define RX_TPA_END_CMP_TPA_SEGS_SHIFT                  8
+#define RX_TPA_END_CMP_AGG_ID                          GENMASK(25, 16)
+#define RX_TPA_END_CMP_AGG_ID_SHIFT                    16
+#define RX_TPA_END_GRO_TS                              BIT(31)
+
+struct rx_tpa_end_cmp {
+       __le32 rx_tpa_end_cmp_len_flags_type;
+       u32 rx_tpa_end_cmp_opaque;
+       __le32 rx_tpa_end_cmp_misc_v1;
+       __le32 rx_tpa_end_cmp_tsdelta;
+};
+
+#define TPA_END_AGG_ID(rx_tpa_end)                                     \
+       ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &           \
+        RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+
+#define TPA_END_TPA_SEGS(rx_tpa_end)                                   \
+       ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &           \
+        RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO                         \
+       cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO &          \
+                   RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define TPA_END_GRO(rx_tpa_end)                                                \
+       ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type &                  \
+        RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+#define TPA_END_GRO_TS(rx_tpa_end)                                     \
+       (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta &                      \
+           cpu_to_le32(RX_TPA_END_GRO_TS)))
+
+#define RX_TPA_END_CMP_TPA_DUP_ACKS                    GENMASK(3, 0)
+#define RX_TPA_END_CMP_PAYLOAD_OFFSET                  GENMASK(23, 16)
+#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT            16
+#define RX_TPA_END_CMP_AGG_BUFS                                GENMASK(31, 24)
+#define RX_TPA_END_CMP_AGG_BUFS_SHIFT                  24
+#define RX_TPA_END_CMP_TPA_SEG_LEN                     GENMASK(15, 0)
+#define RX_TPA_END_CMP_V2                              BIT(0)
+#define RX_TPA_END_CMP_ERRORS                          GENMASK(2, 1)
+#define RX_TPA_END_CMPL_ERRORS_SHIFT                   1
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER   (0x0 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT  (0x3 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR   (0x4 << 1)
+#define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH       (0x5 << 1)
+
+struct rx_tpa_end_cmp_ext {
+       __le32 rx_tpa_end_cmp_dup_acks;
+       __le32 rx_tpa_end_cmp_seg_len;
+       __le32 rx_tpa_end_cmp_errors_v2;
+       u32 rx_tpa_end_cmp_start_opaque;
+};
+
+#define TPA_END_ERRORS(rx_tpa_end_ext)                                 \
+       ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 &                   \
+        cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+
+#define TPA_END_PAYLOAD_OFF(rx_tpa_end_ext)                            \
+       ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) &      \
+        RX_TPA_END_CMP_PAYLOAD_OFFSET) >>                              \
+       RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT)
+
+#define TPA_END_AGG_BUFS(rx_tpa_end_ext)                               \
+       ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) &      \
+        RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT)
+
+#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1)                          \
+       (((data1) &                                                     \
+         ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+        ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
+
+#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)                  \
+       (((data1) &                                                     \
+         ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+       ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION)
+
+#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)                 \
+       ((data2) &                                                      \
+       ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK)
+
+#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1)                                \
+       (!!((data1) &                                                   \
+          ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC))
+
+#define EVENT_DATA1_RECOVERY_ENABLED(data1)                            \
+       (!!((data1) &                                                   \
+          ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED))
+
+#define BNGE_EVENT_ERROR_REPORT_TYPE(data1)                            \
+       (((data1) &                                                     \
+         ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >>\
+        ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT)
+
+#define BNGE_EVENT_INVALID_SIGNAL_DATA(data2)                          \
+       (((data2) &                                                     \
+         ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\
+        ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT)
 #endif /* _BNGE_HW_DEF_H_ */
index a8e036d666df2c8bb8c57cd3b5ea97badf713b5f..9717065a2f20fcb8862e6a7a4f48bb1fd080cc79 100644 (file)
@@ -377,11 +377,37 @@ static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
        }
 }
 
+static void bnge_free_one_tpa_info_data(struct bnge_net *bn,
+                                       struct bnge_rx_ring_info *rxr)
+{
+       int i;
+
+       for (i = 0; i < bn->max_tpa; i++) {
+               struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[i];
+               u8 *data = tpa_info->data;
+
+               if (!data)
+                       continue;
+
+               tpa_info->data = NULL;
+               page_pool_free_va(rxr->head_pool, data, false);
+       }
+}
+
 static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
                                            struct bnge_rx_ring_info *rxr)
 {
+       struct bnge_tpa_idx_map *map;
+
+       if (rxr->rx_tpa)
+               bnge_free_one_tpa_info_data(bn, rxr);
+
        bnge_free_one_rx_ring_bufs(bn, rxr);
        bnge_free_one_agg_ring_bufs(bn, rxr);
+
+       map = rxr->rx_tpa_idx_map;
+       if (map)
+               memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
 }
 
 static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
@@ -452,11 +478,70 @@ static void bnge_free_all_rings_bufs(struct bnge_net *bn)
        bnge_free_tx_skbs(bn);
 }
 
+static void bnge_free_tpa_info(struct bnge_net *bn)
+{
+       struct bnge_dev *bd = bn->bd;
+       int i, j;
+
+       for (i = 0; i < bd->rx_nr_rings; i++) {
+               struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+
+               kfree(rxr->rx_tpa_idx_map);
+               rxr->rx_tpa_idx_map = NULL;
+               if (rxr->rx_tpa) {
+                       for (j = 0; j < bn->max_tpa; j++) {
+                               kfree(rxr->rx_tpa[j].agg_arr);
+                               rxr->rx_tpa[j].agg_arr = NULL;
+                       }
+               }
+               kfree(rxr->rx_tpa);
+               rxr->rx_tpa = NULL;
+       }
+}
+
+static int bnge_alloc_tpa_info(struct bnge_net *bn)
+{
+       struct bnge_dev *bd = bn->bd;
+       int i, j;
+
+       if (!bd->max_tpa_v2)
+               return 0;
+
+       bn->max_tpa = max_t(u16, bd->max_tpa_v2, MAX_TPA);
+       for (i = 0; i < bd->rx_nr_rings; i++) {
+               struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+
+               rxr->rx_tpa = kcalloc(bn->max_tpa, sizeof(struct bnge_tpa_info),
+                                     GFP_KERNEL);
+               if (!rxr->rx_tpa)
+                       goto err_free_tpa_info;
+
+               for (j = 0; j < bn->max_tpa; j++) {
+                       struct rx_agg_cmp *agg;
+
+                       agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+                       if (!agg)
+                               goto err_free_tpa_info;
+                       rxr->rx_tpa[j].agg_arr = agg;
+               }
+               rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+                                             GFP_KERNEL);
+               if (!rxr->rx_tpa_idx_map)
+                       goto err_free_tpa_info;
+       }
+       return 0;
+
+err_free_tpa_info:
+       bnge_free_tpa_info(bn);
+       return -ENOMEM;
+}
+
 static void bnge_free_rx_rings(struct bnge_net *bn)
 {
        struct bnge_dev *bd = bn->bd;
        int i;
 
+       bnge_free_tpa_info(bn);
        for (i = 0; i < bd->rx_nr_rings; i++) {
                struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
                struct bnge_ring_struct *ring;
@@ -581,6 +666,12 @@ static int bnge_alloc_rx_rings(struct bnge_net *bn)
                                goto err_free_rx_rings;
                }
        }
+
+       if (bn->priv_flags & BNGE_NET_EN_TPA) {
+               rc = bnge_alloc_tpa_info(bn);
+               if (rc)
+                       goto err_free_rx_rings;
+       }
        return rc;
 
 err_free_rx_rings:
@@ -1126,6 +1217,30 @@ err_free_one_agg_ring_bufs:
        return -ENOMEM;
 }
 
+static int bnge_alloc_one_tpa_info_data(struct bnge_net *bn,
+                                       struct bnge_rx_ring_info *rxr)
+{
+       dma_addr_t mapping;
+       u8 *data;
+       int i;
+
+       for (i = 0; i < bn->max_tpa; i++) {
+               data = __bnge_alloc_rx_frag(bn, &mapping, rxr,
+                                           GFP_KERNEL);
+               if (!data)
+                       goto err_free_tpa_info_data;
+
+               rxr->rx_tpa[i].data = data;
+               rxr->rx_tpa[i].data_ptr = data + bn->rx_offset;
+               rxr->rx_tpa[i].mapping = mapping;
+       }
+       return 0;
+
+err_free_tpa_info_data:
+       bnge_free_one_tpa_info_data(bn, rxr);
+       return -ENOMEM;
+}
+
 static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
 {
        struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
@@ -1140,8 +1255,17 @@ static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
                if (rc)
                        goto err_free_one_rx_ring_bufs;
        }
+
+       if (rxr->rx_tpa) {
+               rc = bnge_alloc_one_tpa_info_data(bn, rxr);
+               if (rc)
+                       goto err_free_one_agg_ring_bufs;
+       }
+
        return 0;
 
+err_free_one_agg_ring_bufs:
+       bnge_free_one_agg_ring_bufs(bn, rxr);
 err_free_one_rx_ring_bufs:
        bnge_free_one_rx_ring_bufs(bn, rxr);
        return rc;
index 69cc0442b45691186b1bbe5e1c8ddf1a6e7e68ac..1ab89febbef54692da9f914f33fea0d1a7c28330 100644 (file)
@@ -153,6 +153,45 @@ enum {
 
 #define BNGE_NET_EN_TPA                (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
 
+#define BNGE_NO_FW_ACCESS(bd)  (pci_channel_offline((bd)->pdev))
+
+#define MAX_TPA                256
+#define MAX_TPA_MASK   (MAX_TPA - 1)
+#define MAX_TPA_SEGS   0x3f
+
+#define BNGE_TPA_INNER_L3_OFF(hdr_info)        \
+       (((hdr_info) >> 18) & 0x1ff)
+
+#define BNGE_TPA_INNER_L2_OFF(hdr_info)        \
+       (((hdr_info) >> 9) & 0x1ff)
+
+#define BNGE_TPA_OUTER_L3_OFF(hdr_info)        \
+       ((hdr_info) & 0x1ff)
+
+struct bnge_tpa_idx_map {
+       u16             agg_id_tbl[1024];
+       DECLARE_BITMAP(agg_idx_bmap, MAX_TPA);
+};
+
+struct bnge_tpa_info {
+       void                    *data;
+       u8                      *data_ptr;
+       dma_addr_t              mapping;
+       u16                     len;
+       unsigned short          gso_type;
+       u32                     flags2;
+       u32                     metadata;
+       enum pkt_hash_types     hash_type;
+       u32                     rss_hash;
+       u32                     hdr_info;
+
+       u16                     cfa_code; /* cfa_code in TPA start compl */
+       u8                      agg_count;
+       bool                    vlan_valid;
+       bool                    cfa_code_valid;
+       struct rx_agg_cmp       *agg_arr;
+};
+
 /* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
  * BD because the first TX BD is always a long BD.
  */
@@ -245,6 +284,10 @@ struct bnge_net {
 #define BNGE_STATE_NAPI_DISABLED       0
 
        u32                     msg_enable;
+       u16                     max_tpa;
+       __be16                  vxlan_port;
+       __be16                  nge_port;
+       __be16                  vxlan_gpe_port;
 };
 
 #define BNGE_DEFAULT_RX_RING_SIZE      511
@@ -390,6 +433,9 @@ struct bnge_rx_ring_info {
        dma_addr_t              rx_desc_mapping[MAX_RX_PAGES];
        dma_addr_t              rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
 
+       struct bnge_tpa_info    *rx_tpa;
+       struct bnge_tpa_idx_map *rx_tpa_idx_map;
+
        struct bnge_ring_struct rx_ring_struct;
        struct bnge_ring_struct rx_agg_ring_struct;
        struct page_pool        *page_pool;