#include <linux/etherdevice.h>
#include <linux/if.h>
#include <net/ip.h>
+#include <net/netdev_queues.h>
#include <linux/skbuff.h>
#include <net/page_pool/helpers.h>
rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bn->rx_copy_thresh = BNGE_RX_COPY_THRESH;
ring_size = bn->rx_ring_size;
bn->rx_agg_ring_size = 0;
bn->rx_agg_nr_pages = 0;
bn->rx_agg_ring_size = agg_ring_size;
bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
- rx_size = SKB_DATA_ALIGN(BNGE_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_size = max3(BNGE_DEFAULT_RX_COPYBREAK,
+ bn->rx_copybreak,
+ bn->netdev->cfg_pending->hds_thresh);
+ rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
bn->cp_ring_mask = bn->cp_bit - 1;
}
+static void bnge_init_ring_params(struct bnge_net *bn)
+{
+ u32 rx_size;
+
+ bn->rx_copybreak = BNGE_DEFAULT_RX_COPYBREAK;
+ /* Try to fit 4 chunks into a 4k page */
+ rx_size = SZ_1K -
+ NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bn->netdev->cfg->hds_thresh = max(BNGE_DEFAULT_RX_COPYBREAK, rx_size);
+}
+
int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
{
struct net_device *netdev;
bn->rx_dir = DMA_FROM_DEVICE;
bnge_set_tpa_flags(bd);
+ bnge_init_ring_params(bn);
bnge_set_ring_params(bd);
bnge_init_l2_fltr_tbl(bn);
u16 nq_fw_ring_id;
};
-#define BNGE_RX_COPY_THRESH 256
+#define BNGE_DEFAULT_RX_COPYBREAK 256
+#define BNGE_MAX_RX_COPYBREAK 1024
#define BNGE_HW_FEATURE_VLAN_ALL_RX \
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
u32 rx_buf_size;
u32 rx_buf_use_size; /* usable size */
u32 rx_agg_ring_size;
- u32 rx_copy_thresh;
+ u32 rx_copybreak;
u32 rx_ring_mask;
u32 rx_agg_ring_mask;
u16 rx_nr_pages;