return new_pkts;
}
+static inline u32 octep_vf_oq_next_idx(struct octep_vf_oq *oq, u32 idx)
+{
+ return (idx + 1 == oq->max_count) ? 0 : idx + 1;
+}
+
/**
* __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack.
*
skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
skb_reserve(skb, data_offset);
skb_put(skb, buff_info->len);
- read_idx++;
desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
+ read_idx = octep_vf_oq_next_idx(oq, read_idx);
} else {
struct skb_shared_info *shinfo;
u16 data_len;
* subsequent fragments contains only data.
*/
skb_put(skb, oq->max_single_buffer_size);
- read_idx++;
desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
+ read_idx = octep_vf_oq_next_idx(oq, read_idx);
shinfo = skb_shinfo(skb);
data_len = buff_info->len - oq->max_single_buffer_size;
buff_info->len,
buff_info->len);
buff_info->page = NULL;
- read_idx++;
desc_used++;
- if (read_idx == oq->max_count)
- read_idx = 0;
+ read_idx = octep_vf_oq_next_idx(oq, read_idx);
}
}