u32 tmp_raw_cons = *raw_cons;
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
struct skb_shared_info *sinfo;
+ struct bnxt_xdp_buff bnxt_xdp;
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
bool xdp_active = false;
dma_addr_t dma_addr;
struct sk_buff *skb;
- struct xdp_buff xdp;
u32 flags, misc;
u32 cmpl_ts;
void *data;
dma_addr = rx_buf->mapping;
if (bnxt_xdp_attached(bp, rxr)) {
- bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
+ bnxt_xdp.rxcmp = rxcmp;
+ bnxt_xdp.rxcmp1 = rxcmp1;
+ bnxt_xdp.cmp_type = cmp_type;
+
+ bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &bnxt_xdp.xdp);
if (agg_bufs) {
- u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
+ u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr,
+ &bnxt_xdp.xdp,
cp_cons,
agg_bufs,
false);
}
if (xdp_active) {
- if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
+ if (bnxt_rx_xdp(bp, rxr, cons, &bnxt_xdp.xdp, data, &data_ptr,
+ &len, event)) {
rc = 1;
goto next_rx;
}
- if (xdp_buff_has_frags(&xdp)) {
- sinfo = xdp_get_shared_info_from_buff(&xdp);
+ if (xdp_buff_has_frags(&bnxt_xdp.xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(&bnxt_xdp.xdp);
agg_bufs = sinfo->nr_frags;
} else {
agg_bufs = 0;
if (!xdp_active)
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
else
- skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
+ skb = bnxt_copy_xdp(bnapi, &bnxt_xdp.xdp, len,
+ dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
if (agg_bufs) {
bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
agg_bufs, false);
else
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr,
+ &bnxt_xdp.xdp);
}
goto oom_next_rx;
}
if (!skb)
goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr, &xdp);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
+ rxr, &bnxt_xdp.xdp);
if (!skb) {
/* we should be able to free the old skb here */
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, &bnxt_xdp.xdp);
goto oom_next_rx;
}
}