/* Another possibly contended cache line */
spinlock_t defer_lock ____cacheline_aligned_in_smp;
- int defer_count;
+ atomic_t defer_count;
int defer_ipi_scheduled;
struct sk_buff *defer_list;
call_single_data_t defer_csd;
spin_lock(&sd->defer_lock);
skb = sd->defer_list;
sd->defer_list = NULL;
- sd->defer_count = 0;
+ atomic_set(&sd->defer_count, 0);
spin_unlock(&sd->defer_lock);
while (skb != NULL) {
sd = &per_cpu(softnet_data, cpu);
defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max);
- if (READ_ONCE(sd->defer_count) >= defer_max)
+ if (atomic_read(&sd->defer_count) >= defer_max)
goto nodefer;
spin_lock_bh(&sd->defer_lock);
/* Send an IPI every time queue reaches half capacity. */
- kick = sd->defer_count == (defer_max >> 1);
- /* Paired with the READ_ONCE() few lines above */
- WRITE_ONCE(sd->defer_count, sd->defer_count + 1);
+ kick = (atomic_inc_return(&sd->defer_count) - 1) == (defer_max >> 1);
skb->next = sd->defer_list;
/* Paired with READ_ONCE() in skb_defer_free_flush() */