bool unaligned;
bool tx_sw_csum;
void *addrs;
- /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
- * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
- * sockets share a single cq when the same netdev and queue id is shared.
+ /* Mutual exclusion of the completion ring in the SKB mode.
+ * Protect: NAPI TX thread and sendmsg error paths in the SKB
+ * destructor callback.
*/
- spinlock_t cq_lock;
+ spinlock_t cq_prod_lock;
+ /* Mutual exclusion of the completion ring in the SKB mode.
+ * Protect: when sockets share a single cq when the same netdev
+ * and queue id is shared.
+ */
+ spinlock_t cq_cached_prod_lock;
struct xdp_buff_xsk *free_heads[];
};
static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&pool->cq_lock, flags);
+ spin_lock(&pool->cq_cached_prod_lock);
ret = xskq_prod_reserve(pool->cq);
- spin_unlock_irqrestore(&pool->cq_lock, flags);
+ spin_unlock(&pool->cq_cached_prod_lock);
return ret;
}
unsigned long flags;
u32 idx;
- spin_lock_irqsave(&pool->cq_lock, flags);
+ spin_lock_irqsave(&pool->cq_prod_lock, flags);
idx = xskq_get_prod(pool->cq);
xskq_prod_write_addr(pool->cq, idx,
}
}
xskq_prod_submit_n(pool->cq, descs_processed);
- spin_unlock_irqrestore(&pool->cq_lock, flags);
+ spin_unlock_irqrestore(&pool->cq_prod_lock, flags);
}
static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
{
- unsigned long flags;
-
- spin_lock_irqsave(&pool->cq_lock, flags);
+ spin_lock(&pool->cq_cached_prod_lock);
xskq_prod_cancel_n(pool->cq, n);
- spin_unlock_irqrestore(&pool->cq_lock, flags);
+ spin_unlock(&pool->cq_cached_prod_lock);
}
static void xsk_inc_num_desc(struct sk_buff *skb)
INIT_LIST_HEAD(&pool->xskb_list);
INIT_LIST_HEAD(&pool->xsk_tx_list);
spin_lock_init(&pool->xsk_tx_list_lock);
- spin_lock_init(&pool->cq_lock);
+ spin_lock_init(&pool->cq_prod_lock);
+ spin_lock_init(&pool->cq_cached_prod_lock);
refcount_set(&pool->users, 1);
pool->fq = xs->fq_tmp;