--- /dev/null
+From c2aba69d0c36a496ab4f2e81e9c2b271f2693fd7 Mon Sep 17 00:00:00 2001
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+Date: Mon, 19 May 2025 14:50:26 +0200
+Subject: can: bcm: add locking for bcm_op runtime updates
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+commit c2aba69d0c36a496ab4f2e81e9c2b271f2693fd7 upstream.
+
+The CAN broadcast manager (CAN BCM) can send a sequence of CAN frames via
+hrtimer. The content and also the length of the sequence can be changed
+resp reduced at runtime where the 'currframe' counter is then set to zero.
+
+Although this appeared to be a safe operation the updates of 'currframe'
+can be triggered from user space and hrtimer context in bcm_can_tx().
+Anderson Nascimento created a proof of concept that triggered a KASAN
+slab-out-of-bounds read access which can be prevented with a spin_lock_bh.
+
+At the rework of bcm_can_tx() the 'count' variable has been moved into
+the protected section as this variable can be modified from both contexts
+too.
+
+Fixes: ffd980f976e7 ("[CAN]: Add broadcast manager (bcm) protocol")
+Reported-by: Anderson Nascimento <anderson@allelesecurity.com>
+Tested-by: Anderson Nascimento <anderson@allelesecurity.com>
+Reviewed-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20250519125027.11900-1-socketcan@hartkopp.net
+Cc: stable@vger.kernel.org
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/can/bcm.c | 66 +++++++++++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 45 insertions(+), 21 deletions(-)
+
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -58,6 +58,7 @@
+ #include <linux/can/skb.h>
+ #include <linux/can/bcm.h>
+ #include <linux/slab.h>
++#include <linux/spinlock.h>
+ #include <net/sock.h>
+ #include <net/net_namespace.h>
+
+@@ -120,6 +121,7 @@ struct bcm_op {
+ struct canfd_frame last_sframe;
+ struct sock *sk;
+ struct net_device *rx_reg_dev;
++ spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */
+ };
+
+ struct bcm_sock {
+@@ -273,13 +275,18 @@ static void bcm_can_tx(struct bcm_op *op
+ {
+ struct sk_buff *skb;
+ struct net_device *dev;
+- struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
++ struct canfd_frame *cf;
+ int err;
+
+ /* no target device? => exit */
+ if (!op->ifindex)
+ return;
+
++ /* read currframe under lock protection */
++ spin_lock_bh(&op->bcm_tx_lock);
++ cf = op->frames + op->cfsiz * op->currframe;
++ spin_unlock_bh(&op->bcm_tx_lock);
++
+ dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
+ if (!dev) {
+ /* RFC: should this bcm_op remove itself here? */
+@@ -300,6 +307,10 @@ static void bcm_can_tx(struct bcm_op *op
+ skb->dev = dev;
+ can_skb_set_owner(skb, op->sk);
+ err = can_send(skb, 1);
++
++ /* update currframe and count under lock protection */
++ spin_lock_bh(&op->bcm_tx_lock);
++
+ if (!err)
+ op->frames_abs++;
+
+@@ -308,6 +319,11 @@ static void bcm_can_tx(struct bcm_op *op
+ /* reached last frame? */
+ if (op->currframe >= op->nframes)
+ op->currframe = 0;
++
++ if (op->count > 0)
++ op->count--;
++
++ spin_unlock_bh(&op->bcm_tx_lock);
+ out:
+ dev_put(dev);
+ }
+@@ -404,7 +420,7 @@ static enum hrtimer_restart bcm_tx_timeo
+ struct bcm_msg_head msg_head;
+
+ if (op->kt_ival1 && (op->count > 0)) {
+- op->count--;
++ bcm_can_tx(op);
+ if (!op->count && (op->flags & TX_COUNTEVT)) {
+
+ /* create notification to user */
+@@ -419,7 +435,6 @@ static enum hrtimer_restart bcm_tx_timeo
+
+ bcm_send_to_user(op, &msg_head, NULL, 0);
+ }
+- bcm_can_tx(op);
+
+ } else if (op->kt_ival2) {
+ bcm_can_tx(op);
+@@ -909,6 +924,27 @@ static int bcm_tx_setup(struct bcm_msg_h
+ }
+ op->flags = msg_head->flags;
+
++ /* only lock for unlikely count/nframes/currframe changes */
++ if (op->nframes != msg_head->nframes ||
++ op->flags & TX_RESET_MULTI_IDX ||
++ op->flags & SETTIMER) {
++
++ spin_lock_bh(&op->bcm_tx_lock);
++
++ if (op->nframes != msg_head->nframes ||
++ op->flags & TX_RESET_MULTI_IDX) {
++ /* potentially update changed nframes */
++ op->nframes = msg_head->nframes;
++ /* restart multiple frame transmission */
++ op->currframe = 0;
++ }
++
++ if (op->flags & SETTIMER)
++ op->count = msg_head->count;
++
++ spin_unlock_bh(&op->bcm_tx_lock);
++ }
++
+ } else {
+ /* insert new BCM operation for the given can_id */
+
+@@ -916,9 +952,14 @@ static int bcm_tx_setup(struct bcm_msg_h
+ if (!op)
+ return -ENOMEM;
+
++ spin_lock_init(&op->bcm_tx_lock);
+ op->can_id = msg_head->can_id;
+ op->cfsiz = CFSIZ(msg_head->flags);
+ op->flags = msg_head->flags;
++ op->nframes = msg_head->nframes;
++
++ if (op->flags & SETTIMER)
++ op->count = msg_head->count;
+
+ /* create array for CAN frames and copy the data */
+ if (msg_head->nframes > 1) {
+@@ -977,22 +1018,8 @@ static int bcm_tx_setup(struct bcm_msg_h
+
+ } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
+
+- if (op->nframes != msg_head->nframes) {
+- op->nframes = msg_head->nframes;
+- /* start multiple frame transmission with index 0 */
+- op->currframe = 0;
+- }
+-
+- /* check flags */
+-
+- if (op->flags & TX_RESET_MULTI_IDX) {
+- /* start multiple frame transmission with index 0 */
+- op->currframe = 0;
+- }
+-
+ if (op->flags & SETTIMER) {
+ /* set timer values */
+- op->count = msg_head->count;
+ op->ival1 = msg_head->ival1;
+ op->ival2 = msg_head->ival2;
+ op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
+@@ -1009,11 +1036,8 @@ static int bcm_tx_setup(struct bcm_msg_h
+ op->flags |= TX_ANNOUNCE;
+ }
+
+- if (op->flags & TX_ANNOUNCE) {
++ if (op->flags & TX_ANNOUNCE)
+ bcm_can_tx(op);
+- if (op->count)
+- op->count--;
+- }
+
+ if (op->flags & STARTTIMER)
+ bcm_tx_start_timer(op);
--- /dev/null
+From dac5e6249159ac255dad9781793dbe5908ac9ddb Mon Sep 17 00:00:00 2001
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+Date: Mon, 19 May 2025 14:50:27 +0200
+Subject: can: bcm: add missing rcu read protection for procfs content
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+commit dac5e6249159ac255dad9781793dbe5908ac9ddb upstream.
+
+When the procfs content is generated for a bcm_op which is in the process
+to be removed the procfs output might show unreliable data (UAF).
+
+As the removal of bcm_op's is already implemented with rcu handling this
+patch adds the missing rcu_read_lock() and makes sure the list entries
+are properly removed under rcu protection.
+
+Fixes: f1b4e32aca08 ("can: bcm: use call_rcu() instead of costly synchronize_rcu()")
+Reported-by: Anderson Nascimento <anderson@allelesecurity.com>
+Suggested-by: Anderson Nascimento <anderson@allelesecurity.com>
+Tested-by: Anderson Nascimento <anderson@allelesecurity.com>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20250519125027.11900-2-socketcan@hartkopp.net
+Cc: stable@vger.kernel.org # >= 5.4
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/can/bcm.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -207,7 +207,9 @@ static int bcm_proc_show(struct seq_file
+ seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
+ seq_printf(m, " <<<\n");
+
+- list_for_each_entry(op, &bo->rx_ops, list) {
++ rcu_read_lock();
++
++ list_for_each_entry_rcu(op, &bo->rx_ops, list) {
+
+ unsigned long reduction;
+
+@@ -263,6 +265,9 @@ static int bcm_proc_show(struct seq_file
+ seq_printf(m, "# sent %ld\n", op->frames_abs);
+ }
+ seq_putc(m, '\n');
++
++ rcu_read_unlock();
++
+ return 0;
+ }
+ #endif /* CONFIG_PROC_FS */
+@@ -811,7 +816,7 @@ static int bcm_delete_rx_op(struct list_
+ REGMASK(op->can_id),
+ bcm_rx_handler, op);
+
+- list_del(&op->list);
++ list_del_rcu(&op->list);
+ bcm_remove_op(op);
+ return 1; /* done */
+ }
+@@ -831,7 +836,7 @@ static int bcm_delete_tx_op(struct list_
+ list_for_each_entry_safe(op, n, ops, list) {
+ if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
+ (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
+- list_del(&op->list);
++ list_del_rcu(&op->list);
+ bcm_remove_op(op);
+ return 1; /* done */
+ }
+@@ -1253,7 +1258,7 @@ static int bcm_rx_setup(struct bcm_msg_h
+ bcm_rx_handler, op, "bcm", sk);
+ if (err) {
+ /* this bcm rx op is broken -> remove it */
+- list_del(&op->list);
++ list_del_rcu(&op->list);
+ bcm_remove_op(op);
+ return err;
+ }
--- /dev/null
+From b2df03ed4052e97126267e8c13ad4204ea6ba9b6 Mon Sep 17 00:00:00 2001
+From: Ivan Pravdin <ipravdin.official@gmail.com>
+Date: Sun, 18 May 2025 18:41:02 -0400
+Subject: crypto: algif_hash - fix double free in hash_accept
+
+From: Ivan Pravdin <ipravdin.official@gmail.com>
+
+commit b2df03ed4052e97126267e8c13ad4204ea6ba9b6 upstream.
+
+If accept(2) is called on socket type algif_hash with
+MSG_MORE flag set and crypto_ahash_import fails,
+sk2 is freed. However, it is also freed in af_alg_release,
+leading to slab-use-after-free error.
+
+Fixes: fe869cdb89c9 ("crypto: algif_hash - User-space interface for hash operations")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Ivan Pravdin <ipravdin.official@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/algif_hash.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -262,10 +262,6 @@ static int hash_accept(struct socket *so
+ return err;
+
+ err = crypto_ahash_import(&ctx2->req, state);
+- if (err) {
+- sock_orphan(sk2);
+- sock_put(sk2);
+- }
+
+ return err;
+ }
--- /dev/null
+From d6ebcde6d4ecf34f8495fb30516645db3aea8993 Mon Sep 17 00:00:00 2001
+From: Dominik Grzegorzek <dominik.grzegorzek@oracle.com>
+Date: Sun, 18 May 2025 19:45:31 +0200
+Subject: padata: do not leak refcount in reorder_work
+
+From: Dominik Grzegorzek <dominik.grzegorzek@oracle.com>
+
+commit d6ebcde6d4ecf34f8495fb30516645db3aea8993 upstream.
+
+A recent patch that addressed a UAF introduced a reference count leak:
+the parallel_data refcount is incremented unconditionally, regardless
+of the return value of queue_work(). If the work item is already queued,
+the incremented refcount is never decremented.
+
+Fix this by checking the return value of queue_work() and decrementing
+the refcount when necessary.
+
+Resolves:
+
+Unreferenced object 0xffff9d9f421e3d80 (size 192):
+ comm "cryptomgr_probe", pid 157, jiffies 4294694003
+ hex dump (first 32 bytes):
+ 80 8b cf 41 9f 9d ff ff b8 97 e0 89 ff ff ff ff ...A............
+ d0 97 e0 89 ff ff ff ff 19 00 00 00 1f 88 23 00 ..............#.
+ backtrace (crc 838fb36):
+ __kmalloc_cache_noprof+0x284/0x320
+ padata_alloc_pd+0x20/0x1e0
+ padata_alloc_shell+0x3b/0xa0
+ 0xffffffffc040a54d
+ cryptomgr_probe+0x43/0xc0
+ kthread+0xf6/0x1f0
+ ret_from_fork+0x2f/0x50
+ ret_from_fork_asm+0x1a/0x30
+
+Fixes: dd7d37ccf6b1 ("padata: avoid UAF for reorder_work")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek@oracle.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/padata.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -363,7 +363,8 @@ static void padata_reorder(struct parall
+ * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
+ */
+ padata_get_pd(pd);
+- queue_work(pinst->serial_wq, &pd->reorder_work);
++ if (!queue_work(pinst->serial_wq, &pd->reorder_work))
++ padata_put_pd(pd);
+ }
+ }
+
net-dwmac-sun8i-use-parsed-internal-phy-address-inst.patch
sch_hfsc-fix-qlen-accounting-bug-when-using-peek-in-.patch
net-tipc-fix-slab-use-after-free-read-in-tipc_aead_e.patch
+crypto-algif_hash-fix-double-free-in-hash_accept.patch
+padata-do-not-leak-refcount-in-reorder_work.patch
+can-bcm-add-locking-for-bcm_op-runtime-updates.patch
+can-bcm-add-missing-rcu-read-protection-for-procfs-content.patch