static DEFINE_MUTEX(flowtable_lock);
static LIST_HEAD(flowtables);
+static __read_mostly struct kmem_cache *flow_offload_cachep;
static void
flow_offload_fill_dir(struct flow_offload *flow,
if (unlikely(nf_ct_is_dying(ct)))
return NULL;
- flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ flow = kmem_cache_zalloc(flow_offload_cachep, GFP_ATOMIC);
if (!flow)
return NULL;
{
int ret;
+ flow_offload_cachep = KMEM_CACHE(flow_offload, SLAB_HWCACHE_ALIGN);
+ if (!flow_offload_cachep)
+ return -ENOMEM;
+
ret = register_pernet_subsys(&nf_flow_table_net_ops);
if (ret < 0)
- return ret;
+ goto out_pernet;
ret = nf_flow_table_offload_init();
if (ret)
nf_flow_table_offload_exit();
out_offload:
unregister_pernet_subsys(&nf_flow_table_net_ops);
+out_pernet:
+ kmem_cache_destroy(flow_offload_cachep);
return ret;
}
{
nf_flow_table_offload_exit();
unregister_pernet_subsys(&nf_flow_table_net_ops);
+ kmem_cache_destroy(flow_offload_cachep);
}
module_init(nf_flow_table_module_init);