#include <linux/etherdevice.h>
#include <linux/if.h>
#include <net/ip.h>
+#include <net/tcp.h>
#include <linux/skbuff.h>
#include <net/page_pool/helpers.h>
#include <linux/if_vlan.h>
return IRQ_HANDLED;
}
+static struct rx_agg_cmp *bnge_get_agg(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u16 cp_cons, u16 curr)
+{
+ struct rx_agg_cmp *agg;
+
+ cp_cons = RING_CMP(bn, ADV_RAW_CMP(cp_cons, curr));
+ agg = (struct rx_agg_cmp *)
+ &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ return agg;
+}
+
+static void bnge_reuse_rx_agg_bufs(struct bnge_cp_ring_info *cpr, u16 idx,
+ u16 start, u32 agg_bufs)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct bnge_net *bn = bnapi->bn;
+ struct bnge_rx_ring_info *rxr;
+ u16 prod, sw_prod;
+ u32 i;
+
+ rxr = bnapi->rx_ring;
+ sw_prod = rxr->rx_sw_agg_prod;
+ prod = rxr->rx_agg_prod;
+
+ for (i = 0; i < agg_bufs; i++) {
+ struct bnge_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_agg_cmp *agg;
+ struct rx_bd *prod_bd;
+ netmem_ref netmem;
+ u16 cons;
+
+ agg = bnge_get_agg(bn, cpr, idx, start + i);
+ cons = agg->rx_agg_cmp_opaque;
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ prod_rx_buf = &rxr->rx_agg_buf_ring[sw_prod];
+ cons_rx_buf = &rxr->rx_agg_buf_ring[cons];
+
+ /* It is possible for sw_prod to be equal to cons, so
+ * set cons_rx_buf->netmem to 0 first.
+ */
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+ prod_rx_buf->netmem = netmem;
+ prod_rx_buf->offset = cons_rx_buf->offset;
+
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+ prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)]
+ [RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
+ prod_bd->rx_bd_opaque = sw_prod;
+
+ prod = NEXT_RX_AGG(prod);
+ sw_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
+ }
+ rxr->rx_agg_prod = prod;
+ rxr->rx_sw_agg_prod = sw_prod;
+}
+
+static int bnge_agg_bufs_valid(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u8 agg_bufs, u32 *raw_cons)
+{
+ struct rx_agg_cmp *agg;
+ u16 last;
+
+ *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
+ last = RING_CMP(bn, *raw_cons);
+ agg = (struct rx_agg_cmp *)
+ &cpr->desc_ring[CP_RING(last)][CP_IDX(last)];
+ return RX_AGG_CMP_VALID(bn, agg, *raw_cons);
+}
+
+static int bnge_discard_rx(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
+ u32 *raw_cons, void *cmp)
+{
+ u32 tmp_raw_cons = *raw_cons;
+ struct rx_cmp *rxcmp = cmp;
+ u8 cmp_type, agg_bufs = 0;
+
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
+ RX_CMP_AGG_BUFS) >>
+ RX_CMP_AGG_BUFS_SHIFT;
+ }
+
+ if (agg_bufs) {
+ if (!bnge_agg_bufs_valid(bn, cpr, agg_bufs, &tmp_raw_cons))
+ return -EBUSY;
+ }
+ *raw_cons = tmp_raw_cons;
+ return 0;
+}
+
+static u32 __bnge_rx_agg_netmems(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ u16 idx, u32 agg_bufs,
+ struct sk_buff *skb)
+{
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct skb_shared_info *shinfo;
+ struct bnge_rx_ring_info *rxr;
+ u32 i, total_frag_len = 0;
+ u16 prod;
+
+ rxr = bnapi->rx_ring;
+ prod = rxr->rx_agg_prod;
+ shinfo = skb_shinfo(skb);
+
+ for (i = 0; i < agg_bufs; i++) {
+ struct bnge_sw_rx_agg_bd *cons_rx_buf;
+ struct rx_agg_cmp *agg;
+ u16 cons, frag_len;
+ netmem_ref netmem;
+
+ agg = bnge_get_agg(bn, cpr, idx, i);
+ cons = agg->rx_agg_cmp_opaque;
+ frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
+ RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
+
+ cons_rx_buf = &rxr->rx_agg_buf_ring[cons];
+ skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len, BNGE_RX_PAGE_SIZE);
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ /* It is possible for bnge_alloc_rx_netmem() to allocate
+ * a sw_prod index that equals the cons index, so we
+ * need to clear the cons entry now.
+ */
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+
+ if (bnge_alloc_rx_netmem(bn, rxr, prod, GFP_ATOMIC) != 0) {
+ skb->len -= frag_len;
+ skb->data_len -= frag_len;
+ skb->truesize -= BNGE_RX_PAGE_SIZE;
+
+ --shinfo->nr_frags;
+ cons_rx_buf->netmem = netmem;
+
+ /* Update prod since possibly some netmems have been
+ * allocated already.
+ */
+ rxr->rx_agg_prod = prod;
+ bnge_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i);
+ return 0;
+ }
+
+ page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
+ BNGE_RX_PAGE_SIZE);
+
+ total_frag_len += frag_len;
+ prod = NEXT_RX_AGG(prod);
+ }
+ rxr->rx_agg_prod = prod;
+ return total_frag_len;
+}
+
+static struct sk_buff *bnge_rx_agg_netmems_skb(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs)
+{
+ u32 total_frag_len;
+
+ total_frag_len = __bnge_rx_agg_netmems(bn, cpr, idx, agg_bufs, skb);
+ if (!total_frag_len) {
+ skb_mark_for_recycle(skb);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ return skb;
+}
+
static void bnge_sched_reset_rxr(struct bnge_net *bn,
struct bnge_rx_ring_info *rxr)
{
struct bnge_napi *bnapi = cpr->bnapi;
struct net_device *dev = bn->netdev;
struct bnge_rx_ring_info *rxr;
+ u32 tmp_raw_cons, flags, misc;
struct bnge_sw_rx_bd *rx_buf;
struct rx_cmp_ext *rxcmp1;
u16 cons, prod, cp_cons;
- u32 tmp_raw_cons, flags;
u8 *data_ptr, cmp_type;
struct rx_cmp *rxcmp;
dma_addr_t dma_addr;
struct sk_buff *skb;
unsigned int len;
+ u8 agg_bufs;
void *data;
int rc = 0;
cons = rxcmp->rx_cmp_opaque;
if (unlikely(cons != rxr->rx_next_cons)) {
+ int rc1 = bnge_discard_rx(bn, cpr, &tmp_raw_cons, rxcmp);
+
/* 0xffff is forced error, don't print it */
if (rxr->rx_next_cons != 0xffff)
netdev_warn(bn->netdev, "RX cons %x != expected cons %x\n",
cons, rxr->rx_next_cons);
bnge_sched_reset_rxr(bn, rxr);
+ if (rc1)
+ return rc1;
goto next_rx_no_prod_no_len;
}
rx_buf = &rxr->rx_buf_ring[cons];
data_ptr = rx_buf->data_ptr;
prefetch(data_ptr);
+ misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
+ agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
+
+ if (agg_bufs) {
+ if (!bnge_agg_bufs_valid(bn, cpr, agg_bufs, &tmp_raw_cons))
+ return -EBUSY;
+
+ cp_cons = NEXT_CMP(bn, cp_cons);
+ *event |= BNGE_AGG_EVENT;
+ }
*event |= BNGE_RX_EVENT;
rx_buf->data = NULL;
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
bnge_reuse_rx_data(rxr, cons, data);
+ if (agg_bufs)
+ bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs);
rc = -EIO;
goto next_rx_no_len;
}
if (len <= bn->rx_copybreak) {
skb = bnge_copy_skb(bnapi, data_ptr, len, dma_addr);
bnge_reuse_rx_data(rxr, cons, data);
- if (!skb)
- goto oom_next_rx;
} else {
skb = bnge_rx_skb(bn, rxr, cons, data, data_ptr, dma_addr, len);
+ }
+
+ if (!skb) {
+ if (agg_bufs)
+ bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs);
+ goto oom_next_rx;
+ }
+
+ if (agg_bufs) {
+ skb = bnge_rx_agg_netmems_skb(bn, cpr, skb, cp_cons,
+ agg_bufs);
if (!skb)
goto oom_next_rx;
}
bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
bnapi->events &= ~BNGE_RX_EVENT;
}
+
+ if (bnapi->events & BNGE_AGG_EVENT) {
+ bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnapi->events &= ~BNGE_AGG_EVENT;
+ }
}
static void