gro->rx_count = 0;
}
+static inline void gro_flush_normal(struct gro_node *gro, bool flush_old)
+{
+ gro_flush(gro, flush_old);
+ gro_normal_list(gro);
+}
+
/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
* pass the whole batch up to the stack.
*/
* This is equivalent to how NAPI decides whether to perform a full
* flush.
*/
- gro_flush(&rcpu->gro, !empty && HZ >= 1000);
- gro_normal_list(&rcpu->gro);
+ gro_flush_normal(&rcpu->gro, !empty && HZ >= 1000);
}
static int cpu_map_kthread_run(void *data)
* it, we need to bound somehow the time packets are kept in
* the GRO layer.
*/
- gro_flush(&n->gro, !!timeout);
- gro_normal_list(&n->gro);
+ gro_flush_normal(&n->gro, !!timeout);
if (unlikely(!list_empty(&n->poll_list))) {
/* If n->poll_list is not empty, we need to mask irqs */
}
/* Flush too old packets. If HZ < 1000, flush all packets */
- gro_flush(&napi->gro, HZ >= 1000);
- gro_normal_list(&napi->gro);
+ gro_flush_normal(&napi->gro, HZ >= 1000);
clear_bit(NAPI_STATE_SCHED, &napi->state);
}
}
/* Flush too old packets. If HZ < 1000, flush all packets */
- gro_flush(&n->gro, HZ >= 1000);
- gro_normal_list(&n->gro);
+ gro_flush_normal(&n->gro, HZ >= 1000);
/* Some drivers may have called napi_schedule
* prior to exhausting their budget.