arp/ip(6)t_register_table() add the table to the per-netns list via
xt_register_table() before allocating the per-netns hook ops copy
via kmemdup_array(). This leaves a window where the table is
visible in the list with ops=NULL.
If the pernet exit happens runs concurrently the pre_exit callback finds
the table via xt_find_table() and passes the NULL ops pointer to
nf_unregister_net_hooks(), causing a NULL dereference:
general protection fault in nf_unregister_net_hooks+0xbc/0x150
RIP: nf_unregister_net_hooks (net/netfilter/core.c:613)
Call Trace:
ipt_unregister_table_pre_exit
iptable_mangle_net_pre_exit
ops_pre_exit_list
cleanup_net
Fix by moving the ops allocation into the xtables core so the table is
never in the list without valid ops. Also ensure the table is no longer
processing packets before its torn down on error unwind.
nf_register_net_hooks might have published at least one hook; call
synchronize_rcu() if there was an error.
audit log register message gets deferred until all operations have
passed, this avoids need to emit another ureg message in case of
error unwinding.
Based on earlier patch by Tristan Madani.
Fixes: f9006acc8dfe5 ("netfilter: arp_tables: pass table pointer via nf_hook_ops")
Fixes: ee177a54413a ("netfilter: ip6_tables: pass table pointer via nf_hook_ops")
Fixes: ae689334225f ("netfilter: ip_tables: pass table pointer via nf_hook_ops")
Link: https://lore.kernel.org/netfilter-devel/20260429175613.1459342-1-tristmd@gmail.com/
Signed-off-by: Tristan Madani <tristan@talencesecurity.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *table,
+ const struct nf_hook_ops *template_ops,
struct xt_table_info *bootstrap,
struct xt_table_info *newinfo);
void *xt_unregister_table(struct xt_table *table);
const struct arpt_replace *repl,
const struct nf_hook_ops *template_ops)
{
- struct nf_hook_ops *ops;
- unsigned int num_ops;
- int ret, i;
- struct xt_table_info *newinfo;
struct xt_table_info bootstrap = {0};
- void *loc_cpu_entry;
+ struct xt_table_info *newinfo;
struct xt_table *new_table;
+ void *loc_cpu_entry;
+ int ret;
newinfo = xt_alloc_table_info(repl->size);
if (!newinfo)
return ret;
}
- new_table = xt_register_table(net, table, &bootstrap, newinfo);
+ new_table = xt_register_table(net, table, template_ops, &bootstrap, newinfo);
if (IS_ERR(new_table)) {
struct arpt_entry *iter;
return PTR_ERR(new_table);
}
- num_ops = hweight32(table->valid_hooks);
- if (num_ops == 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
- if (!ops) {
- ret = -ENOMEM;
- goto out_free;
- }
-
- for (i = 0; i < num_ops; i++)
- ops[i].priv = new_table;
-
- new_table->ops = ops;
-
- ret = nf_register_net_hooks(net, ops, num_ops);
- if (ret != 0)
- goto out_free;
-
- return ret;
-
-out_free:
- __arpt_unregister_table(net, new_table);
return ret;
}
const struct ipt_replace *repl,
const struct nf_hook_ops *template_ops)
{
- struct nf_hook_ops *ops;
- unsigned int num_ops;
- int ret, i;
- struct xt_table_info *newinfo;
struct xt_table_info bootstrap = {0};
- void *loc_cpu_entry;
+ struct xt_table_info *newinfo;
struct xt_table *new_table;
+ void *loc_cpu_entry;
+ int ret;
newinfo = xt_alloc_table_info(repl->size);
if (!newinfo)
return ret;
}
- new_table = xt_register_table(net, table, &bootstrap, newinfo);
+ new_table = xt_register_table(net, table, template_ops, &bootstrap, newinfo);
if (IS_ERR(new_table)) {
struct ipt_entry *iter;
return PTR_ERR(new_table);
}
- /* No template? No need to do anything. This is used by 'nat' table, it registers
- * with the nat core instead of the netfilter core.
- */
- if (!template_ops)
- return 0;
-
- num_ops = hweight32(table->valid_hooks);
- if (num_ops == 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
- if (!ops) {
- ret = -ENOMEM;
- goto out_free;
- }
-
- for (i = 0; i < num_ops; i++)
- ops[i].priv = new_table;
-
- new_table->ops = ops;
-
- ret = nf_register_net_hooks(net, ops, num_ops);
- if (ret != 0)
- goto out_free;
-
- return ret;
-
-out_free:
- __ipt_unregister_table(net, new_table);
return ret;
}
const struct ip6t_replace *repl,
const struct nf_hook_ops *template_ops)
{
- struct nf_hook_ops *ops;
- unsigned int num_ops;
- int ret, i;
- struct xt_table_info *newinfo;
struct xt_table_info bootstrap = {0};
- void *loc_cpu_entry;
+ struct xt_table_info *newinfo;
struct xt_table *new_table;
+ void *loc_cpu_entry;
+ int ret;
newinfo = xt_alloc_table_info(repl->size);
if (!newinfo)
return ret;
}
- new_table = xt_register_table(net, table, &bootstrap, newinfo);
+ new_table = xt_register_table(net, table, template_ops, &bootstrap, newinfo);
if (IS_ERR(new_table)) {
struct ip6t_entry *iter;
return PTR_ERR(new_table);
}
- if (!template_ops)
- return 0;
-
- num_ops = hweight32(table->valid_hooks);
- if (num_ops == 0) {
- ret = -EINVAL;
- goto out_free;
- }
-
- ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
- if (!ops) {
- ret = -ENOMEM;
- goto out_free;
- }
-
- for (i = 0; i < num_ops; i++)
- ops[i].priv = new_table;
-
- new_table->ops = ops;
-
- ret = nf_register_net_hooks(net, ops, num_ops);
- if (ret != 0)
- goto out_free;
-
- return ret;
-
-out_free:
- __ip6t_unregister_table(net, new_table);
return ret;
}
private = do_replace_table(table, num_counters, newinfo, error);
if (private)
audit_log_nfcfg(table->name, table->af, private->number,
- !private->number ? AUDIT_XT_OP_REGISTER :
AUDIT_XT_OP_REPLACE,
GFP_KERNEL);
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *input_table,
+ const struct nf_hook_ops *template_ops,
struct xt_table_info *bootstrap,
struct xt_table_info *newinfo)
{
struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
+ struct xt_table *t, *table = NULL;
+ struct nf_hook_ops *ops = NULL;
struct xt_table_info *private;
- struct xt_table *t, *table;
- int ret;
+ unsigned int num_ops;
+ int ret = -EINVAL;
+
+ num_ops = hweight32(input_table->valid_hooks);
+ if (num_ops == 0)
+ goto out;
+
+ ret = -ENOMEM;
+ if (template_ops) {
+ ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ goto out;
+ }
/* Don't add one object to multiple lists. */
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
- if (!table) {
- ret = -ENOMEM;
+ if (!table)
goto out;
- }
mutex_lock(&xt[table->af].mutex);
/* Don't autoload: we'd eat our tail... */
/* Simplifies replace_table code. */
table->private = bootstrap;
- if (!xt_replace_table(table, 0, newinfo, &ret))
+ if (!do_replace_table(table, 0, newinfo, &ret))
goto unlock;
private = table->private;
/* save number of initial entries */
private->initial_entries = private->number;
+ if (ops) {
+ int i;
+
+ for (i = 0; i < num_ops; i++)
+ ops[i].priv = table;
+
+ ret = nf_register_net_hooks(net, ops, num_ops);
+ if (ret != 0) {
+ mutex_unlock(&xt[table->af].mutex);
+ /* nf_register_net_hooks() might have published a
+ * base chain before internal error unwind.
+ */
+ synchronize_rcu();
+ goto out;
+ }
+
+ table->ops = ops;
+ }
+
+ audit_log_nfcfg(table->name, table->af, private->number,
+ AUDIT_XT_OP_REGISTER, GFP_KERNEL);
+
list_add(&table->list, &xt_net->tables[table->af]);
mutex_unlock(&xt[table->af].mutex);
return table;
unlock:
mutex_unlock(&xt[table->af].mutex);
- kfree(table);
out:
+ kfree(table);
+ kfree(ops);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(xt_register_table);