From 30d0c965fa8046ef2eb719eb790e42c6e731905f Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sun, 16 Jul 2023 21:30:55 +0200 Subject: [PATCH] 5.15-stable patches added patches: netfilter-conntrack-avoid-nf_ct_helper_hash-uses-after-free.patch netfilter-nf_tables-do-not-ignore-genmask-when-looking-up-chain-by-id.patch netfilter-nf_tables-prevent-oob-access-in-nft_byteorder_eval.patch tty-serial-fsl_lpuart-add-earlycon-for-imx8ulp-platform.patch wireguard-netlink-send-staged-packets-when-setting-initial-private-key.patch wireguard-queueing-use-saner-cpu-selection-wrapping.patch --- ...id-nf_ct_helper_hash-uses-after-free.patch | 51 +++++ ...-genmask-when-looking-up-chain-by-id.patch | 120 ++++++++++ ...ent-oob-access-in-nft_byteorder_eval.patch | 211 ++++++++++++++++++ queue-5.15/series | 6 + ...rt-add-earlycon-for-imx8ulp-platform.patch | 29 +++ ...ets-when-setting-initial-private-key.patch | 118 ++++++++++ ...ing-use-saner-cpu-selection-wrapping.patch | 111 +++++++++ 7 files changed, 646 insertions(+) create mode 100644 queue-5.15/netfilter-conntrack-avoid-nf_ct_helper_hash-uses-after-free.patch create mode 100644 queue-5.15/netfilter-nf_tables-do-not-ignore-genmask-when-looking-up-chain-by-id.patch create mode 100644 queue-5.15/netfilter-nf_tables-prevent-oob-access-in-nft_byteorder_eval.patch create mode 100644 queue-5.15/tty-serial-fsl_lpuart-add-earlycon-for-imx8ulp-platform.patch create mode 100644 queue-5.15/wireguard-netlink-send-staged-packets-when-setting-initial-private-key.patch create mode 100644 queue-5.15/wireguard-queueing-use-saner-cpu-selection-wrapping.patch diff --git a/queue-5.15/netfilter-conntrack-avoid-nf_ct_helper_hash-uses-after-free.patch b/queue-5.15/netfilter-conntrack-avoid-nf_ct_helper_hash-uses-after-free.patch new file mode 100644 index 00000000000..354be87f38b --- /dev/null +++ b/queue-5.15/netfilter-conntrack-avoid-nf_ct_helper_hash-uses-after-free.patch @@ -0,0 +1,51 @@ +From 6eef7a2b933885a17679eb8ed0796ddf0ee5309b Mon Sep 17 00:00:00 2001 +From: Florent Revest +Date: Mon, 3 Jul 2023 16:52:16 +0200 +Subject: netfilter: conntrack: Avoid nf_ct_helper_hash uses after free + +From: Florent Revest + +commit 6eef7a2b933885a17679eb8ed0796ddf0ee5309b upstream. + +If nf_conntrack_init_start() fails (for example due to a +register_nf_conntrack_bpf() failure), the nf_conntrack_helper_fini() +clean-up path frees the nf_ct_helper_hash map. + +When built with NF_CONNTRACK=y, further netfilter modules (e.g: +netfilter_conntrack_ftp) can still be loaded and call +nf_conntrack_helpers_register(), independently of whether nf_conntrack +initialized correctly. This accesses the nf_ct_helper_hash dangling +pointer and causes a uaf, possibly leading to random memory corruption. + +This patch guards nf_conntrack_helper_register() from accessing a freed +or uninitialized nf_ct_helper_hash pointer and fixes possible +uses-after-free when loading a conntrack module. + +Cc: stable@vger.kernel.org +Fixes: 12f7a505331e ("netfilter: add user-space connection tracking helper infrastructure") +Signed-off-by: Florent Revest +Reviewed-by: Florian Westphal +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Greg Kroah-Hartman +--- + net/netfilter/nf_conntrack_helper.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/net/netfilter/nf_conntrack_helper.c ++++ b/net/netfilter/nf_conntrack_helper.c +@@ -405,6 +405,9 @@ int nf_conntrack_helper_register(struct + BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); + BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); + ++ if (!nf_ct_helper_hash) ++ return -ENOENT; ++ + if (me->expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT) + return -EINVAL; + +@@ -595,4 +598,5 @@ void nf_conntrack_helper_fini(void) + { + nf_ct_extend_unregister(&helper_extend); + kvfree(nf_ct_helper_hash); ++ nf_ct_helper_hash = NULL; + } diff --git a/queue-5.15/netfilter-nf_tables-do-not-ignore-genmask-when-looking-up-chain-by-id.patch b/queue-5.15/netfilter-nf_tables-do-not-ignore-genmask-when-looking-up-chain-by-id.patch new file mode 100644 index 00000000000..3150e4ee6e0 --- /dev/null +++ b/queue-5.15/netfilter-nf_tables-do-not-ignore-genmask-when-looking-up-chain-by-id.patch @@ -0,0 +1,120 @@ +From 515ad530795c118f012539ed76d02bacfd426d89 Mon Sep 17 00:00:00 2001 +From: Thadeu Lima de Souza Cascardo +Date: Wed, 5 Jul 2023 09:12:55 -0300 +Subject: netfilter: nf_tables: do not ignore genmask when looking up chain by id + +From: Thadeu Lima de Souza Cascardo + +commit 515ad530795c118f012539ed76d02bacfd426d89 upstream. + +When adding a rule to a chain referring to its ID, if that chain had been +deleted on the same batch, the rule might end up referring to a deleted +chain. + +This will lead to a WARNING like following: + +[ 33.098431] ------------[ cut here ]------------ +[ 33.098678] WARNING: CPU: 5 PID: 69 at net/netfilter/nf_tables_api.c:2037 nf_tables_chain_destroy+0x23d/0x260 +[ 33.099217] Modules linked in: +[ 33.099388] CPU: 5 PID: 69 Comm: kworker/5:1 Not tainted 6.4.0+ #409 +[ 33.099726] Workqueue: events nf_tables_trans_destroy_work +[ 33.100018] RIP: 0010:nf_tables_chain_destroy+0x23d/0x260 +[ 33.100306] Code: 8b 7c 24 68 e8 64 9c ed fe 4c 89 e7 e8 5c 9c ed fe 48 83 c4 08 5b 41 5c 41 5d 41 5e 41 5f 5d 31 c0 89 c6 89 c7 c3 cc cc cc cc <0f> 0b 48 83 c4 08 5b 41 5c 41 5d 41 5e 41 5f 5d 31 c0 89 c6 89 c7 +[ 33.101271] RSP: 0018:ffffc900004ffc48 EFLAGS: 00010202 +[ 33.101546] RAX: 0000000000000001 RBX: ffff888006fc0a28 RCX: 0000000000000000 +[ 33.101920] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 +[ 33.102649] RBP: ffffc900004ffc78 R08: 0000000000000000 R09: 0000000000000000 +[ 33.103018] R10: 0000000000000000 R11: 0000000000000000 R12: ffff8880135ef500 +[ 33.103385] R13: 0000000000000000 R14: dead000000000122 R15: ffff888006fc0a10 +[ 33.103762] FS: 0000000000000000(0000) GS:ffff888024c80000(0000) knlGS:0000000000000000 +[ 33.104184] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 33.104493] CR2: 00007fe863b56a50 CR3: 00000000124b0001 CR4: 0000000000770ee0 +[ 33.104872] PKRU: 55555554 +[ 33.104999] Call Trace: +[ 33.105113] +[ 33.105214] ? show_regs+0x72/0x90 +[ 33.105371] ? __warn+0xa5/0x210 +[ 33.105520] ? nf_tables_chain_destroy+0x23d/0x260 +[ 33.105732] ? report_bug+0x1f2/0x200 +[ 33.105902] ? handle_bug+0x46/0x90 +[ 33.106546] ? exc_invalid_op+0x19/0x50 +[ 33.106762] ? asm_exc_invalid_op+0x1b/0x20 +[ 33.106995] ? nf_tables_chain_destroy+0x23d/0x260 +[ 33.107249] ? nf_tables_chain_destroy+0x30/0x260 +[ 33.107506] nf_tables_trans_destroy_work+0x669/0x680 +[ 33.107782] ? mark_held_locks+0x28/0xa0 +[ 33.107996] ? __pfx_nf_tables_trans_destroy_work+0x10/0x10 +[ 33.108294] ? _raw_spin_unlock_irq+0x28/0x70 +[ 33.108538] process_one_work+0x68c/0xb70 +[ 33.108755] ? lock_acquire+0x17f/0x420 +[ 33.108977] ? __pfx_process_one_work+0x10/0x10 +[ 33.109218] ? do_raw_spin_lock+0x128/0x1d0 +[ 33.109435] ? _raw_spin_lock_irq+0x71/0x80 +[ 33.109634] worker_thread+0x2bd/0x700 +[ 33.109817] ? __pfx_worker_thread+0x10/0x10 +[ 33.110254] kthread+0x18b/0x1d0 +[ 33.110410] ? __pfx_kthread+0x10/0x10 +[ 33.110581] ret_from_fork+0x29/0x50 +[ 33.110757] +[ 33.110866] irq event stamp: 1651 +[ 33.111017] hardirqs last enabled at (1659): [] __up_console_sem+0x79/0xa0 +[ 33.111379] hardirqs last disabled at (1666): [] __up_console_sem+0x5e/0xa0 +[ 33.111740] softirqs last enabled at (1616): [] __irq_exit_rcu+0x9e/0xe0 +[ 33.112094] softirqs last disabled at (1367): [] __irq_exit_rcu+0x9e/0xe0 +[ 33.112453] ---[ end trace 0000000000000000 ]--- + +This is due to the nft_chain_lookup_byid ignoring the genmask. After this +change, adding the new rule will fail as it will not find the chain. + +Fixes: 837830a4b439 ("netfilter: nf_tables: add NFTA_RULE_CHAIN_ID attribute") +Cc: stable@vger.kernel.org +Reported-by: Mingi Cho of Theori working with ZDI +Signed-off-by: Thadeu Lima de Souza Cascardo +Reviewed-by: Florian Westphal +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Greg Kroah-Hartman +--- + net/netfilter/nf_tables_api.c | 11 +++++++---- + 1 file changed, 7 insertions(+), 4 deletions(-) + +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -2533,7 +2533,7 @@ err: + + static struct nft_chain *nft_chain_lookup_byid(const struct net *net, + const struct nft_table *table, +- const struct nlattr *nla) ++ const struct nlattr *nla, u8 genmask) + { + struct nftables_pernet *nft_net = nft_pernet(net); + u32 id = ntohl(nla_get_be32(nla)); +@@ -2544,7 +2544,8 @@ static struct nft_chain *nft_chain_looku + + if (trans->msg_type == NFT_MSG_NEWCHAIN && + chain->table == table && +- id == nft_trans_chain_id(trans)) ++ id == nft_trans_chain_id(trans) && ++ nft_active_genmask(chain, genmask)) + return chain; + } + return ERR_PTR(-ENOENT); +@@ -3532,7 +3533,8 @@ static int nf_tables_newrule(struct sk_b + return -EOPNOTSUPP; + + } else if (nla[NFTA_RULE_CHAIN_ID]) { +- chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID]); ++ chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID], ++ genmask); + if (IS_ERR(chain)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN_ID]); + return PTR_ERR(chain); +@@ -9931,7 +9933,8 @@ static int nft_verdict_init(const struct + genmask); + } else if (tb[NFTA_VERDICT_CHAIN_ID]) { + chain = nft_chain_lookup_byid(ctx->net, ctx->table, +- tb[NFTA_VERDICT_CHAIN_ID]); ++ tb[NFTA_VERDICT_CHAIN_ID], ++ genmask); + if (IS_ERR(chain)) + return PTR_ERR(chain); + } else { diff --git a/queue-5.15/netfilter-nf_tables-prevent-oob-access-in-nft_byteorder_eval.patch b/queue-5.15/netfilter-nf_tables-prevent-oob-access-in-nft_byteorder_eval.patch new file mode 100644 index 00000000000..4651546ad4a --- /dev/null +++ b/queue-5.15/netfilter-nf_tables-prevent-oob-access-in-nft_byteorder_eval.patch @@ -0,0 +1,211 @@ +From caf3ef7468f7534771b5c44cd8dbd6f7f87c2cbd Mon Sep 17 00:00:00 2001 +From: Thadeu Lima de Souza Cascardo +Date: Wed, 5 Jul 2023 18:05:35 -0300 +Subject: netfilter: nf_tables: prevent OOB access in nft_byteorder_eval + +From: Thadeu Lima de Souza Cascardo + +commit caf3ef7468f7534771b5c44cd8dbd6f7f87c2cbd upstream. + +When evaluating byteorder expressions with size 2, a union with 32-bit and +16-bit members is used. Since the 16-bit members are aligned to 32-bit, +the array accesses will be out-of-bounds. + +It may lead to a stack-out-of-bounds access like the one below: + +[ 23.095215] ================================================================== +[ 23.095625] BUG: KASAN: stack-out-of-bounds in nft_byteorder_eval+0x13c/0x320 +[ 23.096020] Read of size 2 at addr ffffc90000007948 by task ping/115 +[ 23.096358] +[ 23.096456] CPU: 0 PID: 115 Comm: ping Not tainted 6.4.0+ #413 +[ 23.096770] Call Trace: +[ 23.096910] +[ 23.097030] dump_stack_lvl+0x60/0xc0 +[ 23.097218] print_report+0xcf/0x630 +[ 23.097388] ? nft_byteorder_eval+0x13c/0x320 +[ 23.097577] ? kasan_addr_to_slab+0xd/0xc0 +[ 23.097760] ? nft_byteorder_eval+0x13c/0x320 +[ 23.097949] kasan_report+0xc9/0x110 +[ 23.098106] ? nft_byteorder_eval+0x13c/0x320 +[ 23.098298] __asan_load2+0x83/0xd0 +[ 23.098453] nft_byteorder_eval+0x13c/0x320 +[ 23.098659] nft_do_chain+0x1c8/0xc50 +[ 23.098852] ? __pfx_nft_do_chain+0x10/0x10 +[ 23.099078] ? __kasan_check_read+0x11/0x20 +[ 23.099295] ? __pfx___lock_acquire+0x10/0x10 +[ 23.099535] ? __pfx___lock_acquire+0x10/0x10 +[ 23.099745] ? __kasan_check_read+0x11/0x20 +[ 23.099929] nft_do_chain_ipv4+0xfe/0x140 +[ 23.100105] ? __pfx_nft_do_chain_ipv4+0x10/0x10 +[ 23.100327] ? lock_release+0x204/0x400 +[ 23.100515] ? nf_hook.constprop.0+0x340/0x550 +[ 23.100779] nf_hook_slow+0x6c/0x100 +[ 23.100977] ? __pfx_nft_do_chain_ipv4+0x10/0x10 +[ 23.101223] nf_hook.constprop.0+0x334/0x550 +[ 23.101443] ? __pfx_ip_local_deliver_finish+0x10/0x10 +[ 23.101677] ? __pfx_nf_hook.constprop.0+0x10/0x10 +[ 23.101882] ? __pfx_ip_rcv_finish+0x10/0x10 +[ 23.102071] ? __pfx_ip_local_deliver_finish+0x10/0x10 +[ 23.102291] ? rcu_read_lock_held+0x4b/0x70 +[ 23.102481] ip_local_deliver+0xbb/0x110 +[ 23.102665] ? __pfx_ip_rcv+0x10/0x10 +[ 23.102839] ip_rcv+0x199/0x2a0 +[ 23.102980] ? __pfx_ip_rcv+0x10/0x10 +[ 23.103140] __netif_receive_skb_one_core+0x13e/0x150 +[ 23.103362] ? __pfx___netif_receive_skb_one_core+0x10/0x10 +[ 23.103647] ? mark_held_locks+0x48/0xa0 +[ 23.103819] ? process_backlog+0x36c/0x380 +[ 23.103999] __netif_receive_skb+0x23/0xc0 +[ 23.104179] process_backlog+0x91/0x380 +[ 23.104350] __napi_poll.constprop.0+0x66/0x360 +[ 23.104589] ? net_rx_action+0x1cb/0x610 +[ 23.104811] net_rx_action+0x33e/0x610 +[ 23.105024] ? _raw_spin_unlock+0x23/0x50 +[ 23.105257] ? __pfx_net_rx_action+0x10/0x10 +[ 23.105485] ? mark_held_locks+0x48/0xa0 +[ 23.105741] __do_softirq+0xfa/0x5ab +[ 23.105956] ? __dev_queue_xmit+0x765/0x1c00 +[ 23.106193] do_softirq.part.0+0x49/0xc0 +[ 23.106423] +[ 23.106547] +[ 23.106670] __local_bh_enable_ip+0xf5/0x120 +[ 23.106903] __dev_queue_xmit+0x789/0x1c00 +[ 23.107131] ? __pfx___dev_queue_xmit+0x10/0x10 +[ 23.107381] ? find_held_lock+0x8e/0xb0 +[ 23.107585] ? lock_release+0x204/0x400 +[ 23.107798] ? neigh_resolve_output+0x185/0x350 +[ 23.108049] ? mark_held_locks+0x48/0xa0 +[ 23.108265] ? neigh_resolve_output+0x185/0x350 +[ 23.108514] neigh_resolve_output+0x246/0x350 +[ 23.108753] ? neigh_resolve_output+0x246/0x350 +[ 23.109003] ip_finish_output2+0x3c3/0x10b0 +[ 23.109250] ? __pfx_ip_finish_output2+0x10/0x10 +[ 23.109510] ? __pfx_nf_hook+0x10/0x10 +[ 23.109732] __ip_finish_output+0x217/0x390 +[ 23.109978] ip_finish_output+0x2f/0x130 +[ 23.110207] ip_output+0xc9/0x170 +[ 23.110404] ip_push_pending_frames+0x1a0/0x240 +[ 23.110652] raw_sendmsg+0x102e/0x19e0 +[ 23.110871] ? __pfx_raw_sendmsg+0x10/0x10 +[ 23.111093] ? lock_release+0x204/0x400 +[ 23.111304] ? __mod_lruvec_page_state+0x148/0x330 +[ 23.111567] ? find_held_lock+0x8e/0xb0 +[ 23.111777] ? find_held_lock+0x8e/0xb0 +[ 23.111993] ? __rcu_read_unlock+0x7c/0x2f0 +[ 23.112225] ? aa_sk_perm+0x18a/0x550 +[ 23.112431] ? filemap_map_pages+0x4f1/0x900 +[ 23.112665] ? __pfx_aa_sk_perm+0x10/0x10 +[ 23.112880] ? find_held_lock+0x8e/0xb0 +[ 23.113098] inet_sendmsg+0xa0/0xb0 +[ 23.113297] ? inet_sendmsg+0xa0/0xb0 +[ 23.113500] ? __pfx_inet_sendmsg+0x10/0x10 +[ 23.113727] sock_sendmsg+0xf4/0x100 +[ 23.113924] ? move_addr_to_kernel.part.0+0x4f/0xa0 +[ 23.114190] __sys_sendto+0x1d4/0x290 +[ 23.114391] ? __pfx___sys_sendto+0x10/0x10 +[ 23.114621] ? __pfx_mark_lock.part.0+0x10/0x10 +[ 23.114869] ? lock_release+0x204/0x400 +[ 23.115076] ? find_held_lock+0x8e/0xb0 +[ 23.115287] ? rcu_is_watching+0x23/0x60 +[ 23.115503] ? __rseq_handle_notify_resume+0x6e2/0x860 +[ 23.115778] ? __kasan_check_write+0x14/0x30 +[ 23.116008] ? blkcg_maybe_throttle_current+0x8d/0x770 +[ 23.116285] ? mark_held_locks+0x28/0xa0 +[ 23.116503] ? do_syscall_64+0x37/0x90 +[ 23.116713] __x64_sys_sendto+0x7f/0xb0 +[ 23.116924] do_syscall_64+0x59/0x90 +[ 23.117123] ? irqentry_exit_to_user_mode+0x25/0x30 +[ 23.117387] ? irqentry_exit+0x77/0xb0 +[ 23.117593] ? exc_page_fault+0x92/0x140 +[ 23.117806] entry_SYSCALL_64_after_hwframe+0x6e/0xd8 +[ 23.118081] RIP: 0033:0x7f744aee2bba +[ 23.118282] Code: d8 64 89 02 48 c7 c0 ff ff ff ff eb b8 0f 1f 00 f3 0f 1e fa 41 89 ca 64 8b 04 25 18 00 00 00 85 c0 75 15 b8 2c 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 7e c3 0f 1f 44 00 00 41 54 48 83 ec 30 44 89 +[ 23.119237] RSP: 002b:00007ffd04a7c9f8 EFLAGS: 00000246 ORIG_RAX: 000000000000002c +[ 23.119644] RAX: ffffffffffffffda RBX: 00007ffd04a7e0a0 RCX: 00007f744aee2bba +[ 23.120023] RDX: 0000000000000040 RSI: 000056488e9e6300 RDI: 0000000000000003 +[ 23.120413] RBP: 000056488e9e6300 R08: 00007ffd04a80320 R09: 0000000000000010 +[ 23.120809] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000040 +[ 23.121219] R13: 00007ffd04a7dc38 R14: 00007ffd04a7ca00 R15: 00007ffd04a7e0a0 +[ 23.121617] +[ 23.121749] +[ 23.121845] The buggy address belongs to the virtual mapping at +[ 23.121845] [ffffc90000000000, ffffc90000009000) created by: +[ 23.121845] irq_init_percpu_irqstack+0x1cf/0x270 +[ 23.122707] +[ 23.122803] The buggy address belongs to the physical page: +[ 23.123104] page:0000000072ac19f0 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x24a09 +[ 23.123609] flags: 0xfffffc0001000(reserved|node=0|zone=1|lastcpupid=0x1fffff) +[ 23.123998] page_type: 0xffffffff() +[ 23.124194] raw: 000fffffc0001000 ffffea0000928248 ffffea0000928248 0000000000000000 +[ 23.124610] raw: 0000000000000000 0000000000000000 00000001ffffffff 0000000000000000 +[ 23.125023] page dumped because: kasan: bad access detected +[ 23.125326] +[ 23.125421] Memory state around the buggy address: +[ 23.125682] ffffc90000007800: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 +[ 23.126072] ffffc90000007880: 00 00 00 00 00 f1 f1 f1 f1 f1 f1 00 00 f2 f2 00 +[ 23.126455] >ffffc90000007900: 00 00 00 00 00 00 00 00 00 f2 f2 f2 f2 00 00 00 +[ 23.126840] ^ +[ 23.127138] ffffc90000007980: 00 00 00 00 00 00 00 00 00 00 00 00 00 f3 f3 f3 +[ 23.127522] ffffc90000007a00: f3 00 00 00 00 00 00 00 00 00 00 00 f1 f1 f1 f1 +[ 23.127906] ================================================================== +[ 23.128324] Disabling lock debugging due to kernel taint + +Using simple s16 pointers for the 16-bit accesses fixes the problem. For +the 32-bit accesses, src and dst can be used directly. + +Fixes: 96518518cc41 ("netfilter: add nftables") +Cc: stable@vger.kernel.org +Reported-by: Tanguy DUBROCA (@SidewayRE) from @Synacktiv working with ZDI +Signed-off-by: Thadeu Lima de Souza Cascardo +Reviewed-by: Florian Westphal +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Greg Kroah-Hartman +--- + net/netfilter/nft_byteorder.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +--- a/net/netfilter/nft_byteorder.c ++++ b/net/netfilter/nft_byteorder.c +@@ -30,11 +30,11 @@ void nft_byteorder_eval(const struct nft + const struct nft_byteorder *priv = nft_expr_priv(expr); + u32 *src = ®s->data[priv->sreg]; + u32 *dst = ®s->data[priv->dreg]; +- union { u32 u32; u16 u16; } *s, *d; ++ u16 *s16, *d16; + unsigned int i; + +- s = (void *)src; +- d = (void *)dst; ++ s16 = (void *)src; ++ d16 = (void *)dst; + + switch (priv->size) { + case 8: { +@@ -61,11 +61,11 @@ void nft_byteorder_eval(const struct nft + switch (priv->op) { + case NFT_BYTEORDER_NTOH: + for (i = 0; i < priv->len / 4; i++) +- d[i].u32 = ntohl((__force __be32)s[i].u32); ++ dst[i] = ntohl((__force __be32)src[i]); + break; + case NFT_BYTEORDER_HTON: + for (i = 0; i < priv->len / 4; i++) +- d[i].u32 = (__force __u32)htonl(s[i].u32); ++ dst[i] = (__force __u32)htonl(src[i]); + break; + } + break; +@@ -73,11 +73,11 @@ void nft_byteorder_eval(const struct nft + switch (priv->op) { + case NFT_BYTEORDER_NTOH: + for (i = 0; i < priv->len / 2; i++) +- d[i].u16 = ntohs((__force __be16)s[i].u16); ++ d16[i] = ntohs((__force __be16)s16[i]); + break; + case NFT_BYTEORDER_HTON: + for (i = 0; i < priv->len / 2; i++) +- d[i].u16 = (__force __u16)htons(s[i].u16); ++ d16[i] = (__force __u16)htons(s16[i]); + break; + } + break; diff --git a/queue-5.15/series b/queue-5.15/series index 8728718c635..f6fbbda872e 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -382,3 +382,9 @@ fanotify-disallow-mount-sb-marks-on-kernel-internal-pseudo-fs.patch netfilter-nf_tables-unbind-non-anonymous-set-if-rule-construction-fails.patch mips-dts-ci20-raise-vddcore-voltage-to-1.125-volts.patch io_uring-use-io_schedule-in-cqring-wait.patch +netfilter-conntrack-avoid-nf_ct_helper_hash-uses-after-free.patch +netfilter-nf_tables-do-not-ignore-genmask-when-looking-up-chain-by-id.patch +netfilter-nf_tables-prevent-oob-access-in-nft_byteorder_eval.patch +wireguard-queueing-use-saner-cpu-selection-wrapping.patch +wireguard-netlink-send-staged-packets-when-setting-initial-private-key.patch +tty-serial-fsl_lpuart-add-earlycon-for-imx8ulp-platform.patch diff --git a/queue-5.15/tty-serial-fsl_lpuart-add-earlycon-for-imx8ulp-platform.patch b/queue-5.15/tty-serial-fsl_lpuart-add-earlycon-for-imx8ulp-platform.patch new file mode 100644 index 00000000000..bb80aa8d096 --- /dev/null +++ b/queue-5.15/tty-serial-fsl_lpuart-add-earlycon-for-imx8ulp-platform.patch @@ -0,0 +1,29 @@ +From e0edfdc15863ec80a1d9ac6e174dbccc00206dd0 Mon Sep 17 00:00:00 2001 +From: Sherry Sun +Date: Mon, 19 Jun 2023 16:06:13 +0800 +Subject: tty: serial: fsl_lpuart: add earlycon for imx8ulp platform + +From: Sherry Sun + +commit e0edfdc15863ec80a1d9ac6e174dbccc00206dd0 upstream. + +Add earlycon support for imx8ulp platform. + +Signed-off-by: Sherry Sun +Cc: stable +Link: https://lore.kernel.org/r/20230619080613.16522-1-sherry.sun@nxp.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/tty/serial/fsl_lpuart.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/tty/serial/fsl_lpuart.c ++++ b/drivers/tty/serial/fsl_lpuart.c +@@ -2632,6 +2632,7 @@ OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-l + OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup); + OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup); + OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup); ++OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8ulp-lpuart", lpuart32_imx_early_console_setup); + OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup); + EARLYCON_DECLARE(lpuart, lpuart_early_console_setup); + EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup); diff --git a/queue-5.15/wireguard-netlink-send-staged-packets-when-setting-initial-private-key.patch b/queue-5.15/wireguard-netlink-send-staged-packets-when-setting-initial-private-key.patch new file mode 100644 index 00000000000..160e7f5bbab --- /dev/null +++ b/queue-5.15/wireguard-netlink-send-staged-packets-when-setting-initial-private-key.patch @@ -0,0 +1,118 @@ +From f58d0a9b4c6a7a5199c3af967e43cc8b654604d4 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Mon, 3 Jul 2023 03:27:05 +0200 +Subject: wireguard: netlink: send staged packets when setting initial private key + +From: Jason A. Donenfeld + +commit f58d0a9b4c6a7a5199c3af967e43cc8b654604d4 upstream. + +Packets bound for peers can queue up prior to the device private key +being set. For example, if persistent keepalive is set, a packet is +queued up to be sent as soon as the device comes up. However, if the +private key hasn't been set yet, the handshake message never sends, and +no timer is armed to retry, since that would be pointless. + +But, if a user later sets a private key, the expectation is that those +queued packets, such as a persistent keepalive, are actually sent. So +adjust the configuration logic to account for this edge case, and add a +test case to make sure this works. + +Maxim noticed this with a wg-quick(8) config to the tune of: + + [Interface] + PostUp = wg set %i private-key somefile + + [Peer] + PublicKey = ... + Endpoint = ... + PersistentKeepalive = 25 + +Here, the private key gets set after the device comes up using a PostUp +script, triggering the bug. + +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Cc: stable@vger.kernel.org +Reported-by: Maxim Cournoyer +Tested-by: Maxim Cournoyer +Link: https://lore.kernel.org/wireguard/87fs7xtqrv.fsf@gmail.com/ +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/wireguard/netlink.c | 14 ++++++++----- + tools/testing/selftests/wireguard/netns.sh | 30 +++++++++++++++++++++++++---- + 2 files changed, 35 insertions(+), 9 deletions(-) + +--- a/drivers/net/wireguard/netlink.c ++++ b/drivers/net/wireguard/netlink.c +@@ -546,6 +546,7 @@ static int wg_set_device(struct sk_buff + u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]); + u8 public_key[NOISE_PUBLIC_KEY_LEN]; + struct wg_peer *peer, *temp; ++ bool send_staged_packets; + + if (!crypto_memneq(wg->static_identity.static_private, + private_key, NOISE_PUBLIC_KEY_LEN)) +@@ -564,14 +565,17 @@ static int wg_set_device(struct sk_buff + } + + down_write(&wg->static_identity.lock); +- wg_noise_set_static_identity_private_key(&wg->static_identity, +- private_key); +- list_for_each_entry_safe(peer, temp, &wg->peer_list, +- peer_list) { ++ send_staged_packets = !wg->static_identity.has_identity && netif_running(wg->dev); ++ wg_noise_set_static_identity_private_key(&wg->static_identity, private_key); ++ send_staged_packets = send_staged_packets && wg->static_identity.has_identity; ++ ++ wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); ++ list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { + wg_noise_precompute_static_static(peer); + wg_noise_expire_current_peer_keypairs(peer); ++ if (send_staged_packets) ++ wg_packet_send_staged_packets(peer); + } +- wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); + up_write(&wg->static_identity.lock); + } + skip_set_private_key: +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -502,10 +502,32 @@ n2 bash -c 'printf 0 > /proc/sys/net/ipv + n1 ping -W 1 -c 1 192.168.241.2 + [[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.3:1" ]] + +-ip1 link del veth1 +-ip1 link del veth3 +-ip1 link del wg0 +-ip2 link del wg0 ++ip1 link del dev veth3 ++ip1 link del dev wg0 ++ip2 link del dev wg0 ++ ++# Make sure persistent keep alives are sent when an adapter comes up ++ip1 link add dev wg0 type wireguard ++n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1 ++read _ _ tx_bytes < <(n1 wg show wg0 transfer) ++[[ $tx_bytes -eq 0 ]] ++ip1 link set dev wg0 up ++read _ _ tx_bytes < <(n1 wg show wg0 transfer) ++[[ $tx_bytes -gt 0 ]] ++ip1 link del dev wg0 ++# This should also happen even if the private key is set later ++ip1 link add dev wg0 type wireguard ++n1 wg set wg0 peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1 ++read _ _ tx_bytes < <(n1 wg show wg0 transfer) ++[[ $tx_bytes -eq 0 ]] ++ip1 link set dev wg0 up ++read _ _ tx_bytes < <(n1 wg show wg0 transfer) ++[[ $tx_bytes -eq 0 ]] ++n1 wg set wg0 private-key <(echo "$key1") ++read _ _ tx_bytes < <(n1 wg show wg0 transfer) ++[[ $tx_bytes -gt 0 ]] ++ip1 link del dev veth1 ++ip1 link del dev wg0 + + # We test that Netlink/IPC is working properly by doing things that usually cause split responses + ip0 link add dev wg0 type wireguard diff --git a/queue-5.15/wireguard-queueing-use-saner-cpu-selection-wrapping.patch b/queue-5.15/wireguard-queueing-use-saner-cpu-selection-wrapping.patch new file mode 100644 index 00000000000..ef84e36881a --- /dev/null +++ b/queue-5.15/wireguard-queueing-use-saner-cpu-selection-wrapping.patch @@ -0,0 +1,111 @@ +From 7387943fa35516f6f8017a3b0e9ce48a3bef9faa Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Mon, 3 Jul 2023 03:27:04 +0200 +Subject: wireguard: queueing: use saner cpu selection wrapping + +From: Jason A. Donenfeld + +commit 7387943fa35516f6f8017a3b0e9ce48a3bef9faa upstream. + +Using `% nr_cpumask_bits` is slow and complicated, and not totally +robust toward dynamic changes to CPU topologies. Rather than storing the +next CPU in the round-robin, just store the last one, and also return +that value. This simplifies the loop drastically into a much more common +pattern. + +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Cc: stable@vger.kernel.org +Reported-by: Linus Torvalds +Tested-by: Manuel Leiner +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/wireguard/queueing.c | 1 + + drivers/net/wireguard/queueing.h | 25 +++++++++++-------------- + drivers/net/wireguard/receive.c | 2 +- + drivers/net/wireguard/send.c | 2 +- + 4 files changed, 14 insertions(+), 16 deletions(-) + +--- a/drivers/net/wireguard/queueing.c ++++ b/drivers/net/wireguard/queueing.c +@@ -28,6 +28,7 @@ int wg_packet_queue_init(struct crypt_qu + int ret; + + memset(queue, 0, sizeof(*queue)); ++ queue->last_cpu = -1; + ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); + if (ret) + return ret; +--- a/drivers/net/wireguard/queueing.h ++++ b/drivers/net/wireguard/queueing.h +@@ -119,20 +119,17 @@ static inline int wg_cpumask_choose_onli + return cpu; + } + +-/* This function is racy, in the sense that next is unlocked, so it could return +- * the same CPU twice. A race-free version of this would be to instead store an +- * atomic sequence number, do an increment-and-return, and then iterate through +- * every possible CPU until we get to that index -- choose_cpu. However that's +- * a bit slower, and it doesn't seem like this potential race actually +- * introduces any performance loss, so we live with it. ++/* This function is racy, in the sense that it's called while last_cpu is ++ * unlocked, so it could return the same CPU twice. Adding locking or using ++ * atomic sequence numbers is slower though, and the consequences of racing are ++ * harmless, so live with it. + */ +-static inline int wg_cpumask_next_online(int *next) ++static inline int wg_cpumask_next_online(int *last_cpu) + { +- int cpu = *next; +- +- while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask))) +- cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; +- *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; ++ int cpu = cpumask_next(*last_cpu, cpu_online_mask); ++ if (cpu >= nr_cpu_ids) ++ cpu = cpumask_first(cpu_online_mask); ++ *last_cpu = cpu; + return cpu; + } + +@@ -161,7 +158,7 @@ static inline void wg_prev_queue_drop_pe + + static inline int wg_queue_enqueue_per_device_and_peer( + struct crypt_queue *device_queue, struct prev_queue *peer_queue, +- struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) ++ struct sk_buff *skb, struct workqueue_struct *wq) + { + int cpu; + +@@ -175,7 +172,7 @@ static inline int wg_queue_enqueue_per_d + /* Then we queue it up in the device queue, which consumes the + * packet as soon as it can. + */ +- cpu = wg_cpumask_next_online(next_cpu); ++ cpu = wg_cpumask_next_online(&device_queue->last_cpu); + if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) + return -EPIPE; + queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work); +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -531,7 +531,7 @@ static void wg_packet_consume_data(struc + goto err; + + ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, +- wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu); ++ wg->packet_crypt_wq); + if (unlikely(ret == -EPIPE)) + wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); + if (likely(!ret || ret == -EPIPE)) { +--- a/drivers/net/wireguard/send.c ++++ b/drivers/net/wireguard/send.c +@@ -318,7 +318,7 @@ static void wg_packet_create_data(struct + goto err; + + ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first, +- wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu); ++ wg->packet_crypt_wq); + if (unlikely(ret == -EPIPE)) + wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD); + err: -- 2.47.3