return skb;
}
+/*
+ * Only clear those fields we need to clear, not those that we will
+ * actually initialise later. Hence, don't put any more fields after
+ * the tail pointer in struct sk_buff!
+ */
+static inline void skbuff_clear(struct sk_buff *skb)
+{
+ /* Replace memset(skb, 0, offsetof(struct sk_buff, tail))
+ * with two smaller memset(), with a barrier() between them.
+ * This forces the compiler to inline both calls.
+ */
+ BUILD_BUG_ON(offsetof(struct sk_buff, tail) <= 128);
+ memset(skb, 0, 128);
+ barrier();
+ memset((void *)skb + 128, 0, offsetof(struct sk_buff, tail) - 128);
+}
+
/**
* napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache
* @skbs: pointer to an at least @n-sized array to fill with skb pointers
skbs[i] = nc->skb_cache[base + i];
kasan_mempool_unpoison_object(skbs[i], skbuff_cache_size);
- memset(skbs[i], 0, offsetof(struct sk_buff, tail));
+ skbuff_clear(skbs[i]);
}
nc->skb_count -= n;
if (unlikely(!skb))
return NULL;
- memset(skb, 0, offsetof(struct sk_buff, tail));
+ skbuff_clear(skb);
data = __slab_build_skb(data, &size);
__finalize_skb_around(skb, data, size);
if (unlikely(!skb))
return NULL;
- memset(skb, 0, offsetof(struct sk_buff, tail));
+ skbuff_clear(skb);
__build_skb_around(skb, data, frag_size);
return skb;
if (unlikely(!skb))
return NULL;
- memset(skb, 0, offsetof(struct sk_buff, tail));
+ skbuff_clear(skb);
__build_skb_around(skb, data, frag_size);
return skb;
*/
prefetchw(data + SKB_WITH_OVERHEAD(size));
- /*
- * Only clear those fields we need to clear, not those that we will
- * actually initialise below. Hence, don't put any more fields after
- * the tail pointer in struct sk_buff!
- */
- memset(skb, 0, offsetof(struct sk_buff, tail));
+ skbuff_clear(skb);
__build_skb_around(skb, data, size);
skb->pfmemalloc = pfmemalloc;