struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int flags, int node)
{
+ struct sk_buff *skb = NULL;
struct kmem_cache *cache;
- struct sk_buff *skb;
bool pfmemalloc;
u8 *data;
- cache = (flags & SKB_ALLOC_FCLONE)
- ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache;
-
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
- /* Get the HEAD */
- if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI &&
- likely(node == NUMA_NO_NODE || node == numa_mem_id()))
+ if (flags & SKB_ALLOC_FCLONE) {
+ cache = net_hotdata.skbuff_fclone_cache;
+ goto fallback;
+ }
+ cache = net_hotdata.skbuff_cache;
+ if (unlikely(node != NUMA_NO_NODE && node != numa_mem_id()))
+ goto fallback;
+
+ if (flags & SKB_ALLOC_NAPI) {
skb = napi_skb_cache_get(true);
- else
+ if (unlikely(!skb))
+ return NULL;
+ }
+ if (!skb) {
+fallback:
skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
- if (unlikely(!skb))
- return NULL;
+ if (unlikely(!skb))
+ return NULL;
+ }
prefetchw(skb);
/* We do our best to align skb_shared_info on a separate cache