--- /dev/null
+From 93ec4a3b76404bce01bd5c9032bef5df6feb1d62 Mon Sep 17 00:00:00 2001
+From: Jing Xia <jing.xia@unisoc.com>
+Date: Wed, 20 Dec 2023 10:46:03 +0800
+Subject: class: fix use-after-free in class_register()
+
+From: Jing Xia <jing.xia@unisoc.com>
+
+commit 93ec4a3b76404bce01bd5c9032bef5df6feb1d62 upstream.
+
+The lock_class_key is still registered and can be found in
+lock_keys_hash hlist after subsys_private is freed in error
+handler path.A task who iterate over the lock_keys_hash
+later may cause use-after-free.So fix that up and unregister
+the lock_class_key before kfree(cp).
+
+On our platform, a driver fails to kset_register because of
+creating duplicate filename '/class/xxx'.With Kasan enabled,
+it prints a invalid-access bug report.
+
+KASAN bug report:
+
+BUG: KASAN: invalid-access in lockdep_register_key+0x19c/0x1bc
+Write of size 8 at addr 15ffff808b8c0368 by task modprobe/252
+Pointer tag: [15], memory tag: [fe]
+
+CPU: 7 PID: 252 Comm: modprobe Tainted: G W
+ 6.6.0-mainline-maybe-dirty #1
+
+Call trace:
+dump_backtrace+0x1b0/0x1e4
+show_stack+0x2c/0x40
+dump_stack_lvl+0xac/0xe0
+print_report+0x18c/0x4d8
+kasan_report+0xe8/0x148
+__hwasan_store8_noabort+0x88/0x98
+lockdep_register_key+0x19c/0x1bc
+class_register+0x94/0x1ec
+init_module+0xbc/0xf48 [rfkill]
+do_one_initcall+0x17c/0x72c
+do_init_module+0x19c/0x3f8
+...
+Memory state around the buggy address:
+ffffff808b8c0100: 8a 8a 8a 8a 8a 8a 8a 8a 8a 8a 8a 8a 8a 8a 8a 8a
+ffffff808b8c0200: 8a 8a 8a 8a 8a 8a 8a 8a fe fe fe fe fe fe fe fe
+>ffffff808b8c0300: fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe
+ ^
+ffffff808b8c0400: 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03
+
+As CONFIG_KASAN_GENERIC is not set, Kasan reports invalid-access
+not use-after-free here.In this case, modprobe is manipulating
+the corrupted lock_keys_hash hlish where lock_class_key is already
+freed before.
+
+It's worth noting that this only can happen if lockdep is enabled,
+which is not true for normal system.
+
+Fixes: dcfbb67e48a2 ("driver core: class: use lock_class_key already present in struct subsys_private")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Jing Xia <jing.xia@unisoc.com>
+Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
+Link: https://lore.kernel.org/r/20231220024603.186078-1-jing.xia@unisoc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/class.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/base/class.c
++++ b/drivers/base/class.c
+@@ -215,6 +215,7 @@ int class_register(const struct class *c
+ return 0;
+
+ err_out:
++ lockdep_unregister_key(key);
+ kfree(cp);
+ return error;
+ }
--- /dev/null
+From c312828c37a72fe2d033a961c47c227b0767e9f8 Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Fri, 29 Dec 2023 08:49:16 +0100
+Subject: kernfs: convert kernfs_idr_lock to an irq safe raw spinlock
+
+From: Andrea Righi <andrea.righi@canonical.com>
+
+commit c312828c37a72fe2d033a961c47c227b0767e9f8 upstream.
+
+bpf_cgroup_from_id() is basically a wrapper to cgroup_get_from_id(),
+that is relying on kernfs to determine the right cgroup associated to
+the target id.
+
+As a kfunc, it has the potential to be attached to any function through
+BPF, particularly in contexts where certain locks are held.
+
+However, kernfs is not using an irq safe spinlock for kernfs_idr_lock,
+that means any kernfs function that is acquiring this lock can be
+interrupted and potentially hit bpf_cgroup_from_id() in the process,
+triggering a deadlock.
+
+For example, it is really easy to trigger a lockdep splat between
+kernfs_idr_lock and rq->_lock, attaching a small BPF program to
+__set_cpus_allowed_ptr_locked() that just calls bpf_cgroup_from_id():
+
+ =====================================================
+ WARNING: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected
+ 6.7.0-rc7-virtme #5 Not tainted
+ -----------------------------------------------------
+ repro/131 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire:
+ ffffffffb2dc4578 (kernfs_idr_lock){+.+.}-{2:2}, at: kernfs_find_and_get_node_by_id+0x1d/0x80
+
+ and this task is already holding:
+ ffff911cbecaf218 (&rq->__lock){-.-.}-{2:2}, at: task_rq_lock+0x50/0xc0
+ which would create a new lock dependency:
+ (&rq->__lock){-.-.}-{2:2} -> (kernfs_idr_lock){+.+.}-{2:2}
+
+ but this new dependency connects a HARDIRQ-irq-safe lock:
+ (&rq->__lock){-.-.}-{2:2}
+
+ ... which became HARDIRQ-irq-safe at:
+ lock_acquire+0xbf/0x2b0
+ _raw_spin_lock_nested+0x2e/0x40
+ scheduler_tick+0x5d/0x170
+ update_process_times+0x9c/0xb0
+ tick_periodic+0x27/0xe0
+ tick_handle_periodic+0x24/0x70
+ __sysvec_apic_timer_interrupt+0x64/0x1a0
+ sysvec_apic_timer_interrupt+0x6f/0x80
+ asm_sysvec_apic_timer_interrupt+0x1a/0x20
+ memcpy+0xc/0x20
+ arch_dup_task_struct+0x15/0x30
+ copy_process+0x1ce/0x1eb0
+ kernel_clone+0xac/0x390
+ kernel_thread+0x6f/0xa0
+ kthreadd+0x199/0x230
+ ret_from_fork+0x31/0x50
+ ret_from_fork_asm+0x1b/0x30
+
+ to a HARDIRQ-irq-unsafe lock:
+ (kernfs_idr_lock){+.+.}-{2:2}
+
+ ... which became HARDIRQ-irq-unsafe at:
+ ...
+ lock_acquire+0xbf/0x2b0
+ _raw_spin_lock+0x30/0x40
+ __kernfs_new_node.isra.0+0x83/0x280
+ kernfs_create_root+0xf6/0x1d0
+ sysfs_init+0x1b/0x70
+ mnt_init+0xd9/0x2a0
+ vfs_caches_init+0xcf/0xe0
+ start_kernel+0x58a/0x6a0
+ x86_64_start_reservations+0x18/0x30
+ x86_64_start_kernel+0xc5/0xe0
+ secondary_startup_64_no_verify+0x178/0x17b
+
+ other info that might help us debug this:
+
+ Possible interrupt unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(kernfs_idr_lock);
+ local_irq_disable();
+ lock(&rq->__lock);
+ lock(kernfs_idr_lock);
+ <Interrupt>
+ lock(&rq->__lock);
+
+ *** DEADLOCK ***
+
+Prevent this deadlock condition converting kernfs_idr_lock to a raw irq
+safe spinlock.
+
+The performance impact of this change should be negligible and it also
+helps to prevent similar deadlock conditions with any other subsystems
+that may depend on kernfs.
+
+Fixes: 332ea1f697be ("bpf: Add bpf_cgroup_from_id() kfunc")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Link: https://lore.kernel.org/r/20231229074916.53547-1-andrea.righi@canonical.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/kernfs/dir.c | 23 +++++++++++++----------
+ 1 file changed, 13 insertions(+), 10 deletions(-)
+
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -27,7 +27,7 @@ static DEFINE_RWLOCK(kernfs_rename_lock)
+ */
+ static DEFINE_SPINLOCK(kernfs_pr_cont_lock);
+ static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */
+-static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
++static DEFINE_RAW_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
+
+ #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
+
+@@ -539,6 +539,7 @@ void kernfs_put(struct kernfs_node *kn)
+ {
+ struct kernfs_node *parent;
+ struct kernfs_root *root;
++ unsigned long flags;
+
+ if (!kn || !atomic_dec_and_test(&kn->count))
+ return;
+@@ -563,9 +564,9 @@ void kernfs_put(struct kernfs_node *kn)
+ simple_xattrs_free(&kn->iattr->xattrs, NULL);
+ kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
+ }
+- spin_lock(&kernfs_idr_lock);
++ raw_spin_lock_irqsave(&kernfs_idr_lock, flags);
+ idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
+- spin_unlock(&kernfs_idr_lock);
++ raw_spin_unlock_irqrestore(&kernfs_idr_lock, flags);
+ kmem_cache_free(kernfs_node_cache, kn);
+
+ kn = parent;
+@@ -607,6 +608,7 @@ static struct kernfs_node *__kernfs_new_
+ struct kernfs_node *kn;
+ u32 id_highbits;
+ int ret;
++ unsigned long irqflags;
+
+ name = kstrdup_const(name, GFP_KERNEL);
+ if (!name)
+@@ -617,13 +619,13 @@ static struct kernfs_node *__kernfs_new_
+ goto err_out1;
+
+ idr_preload(GFP_KERNEL);
+- spin_lock(&kernfs_idr_lock);
++ raw_spin_lock_irqsave(&kernfs_idr_lock, irqflags);
+ ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
+ if (ret >= 0 && ret < root->last_id_lowbits)
+ root->id_highbits++;
+ id_highbits = root->id_highbits;
+ root->last_id_lowbits = ret;
+- spin_unlock(&kernfs_idr_lock);
++ raw_spin_unlock_irqrestore(&kernfs_idr_lock, irqflags);
+ idr_preload_end();
+ if (ret < 0)
+ goto err_out2;
+@@ -659,9 +661,9 @@ static struct kernfs_node *__kernfs_new_
+ return kn;
+
+ err_out3:
+- spin_lock(&kernfs_idr_lock);
++ raw_spin_lock_irqsave(&kernfs_idr_lock, irqflags);
+ idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
+- spin_unlock(&kernfs_idr_lock);
++ raw_spin_unlock_irqrestore(&kernfs_idr_lock, irqflags);
+ err_out2:
+ kmem_cache_free(kernfs_node_cache, kn);
+ err_out1:
+@@ -702,8 +704,9 @@ struct kernfs_node *kernfs_find_and_get_
+ struct kernfs_node *kn;
+ ino_t ino = kernfs_id_ino(id);
+ u32 gen = kernfs_id_gen(id);
++ unsigned long flags;
+
+- spin_lock(&kernfs_idr_lock);
++ raw_spin_lock_irqsave(&kernfs_idr_lock, flags);
+
+ kn = idr_find(&root->ino_idr, (u32)ino);
+ if (!kn)
+@@ -727,10 +730,10 @@ struct kernfs_node *kernfs_find_and_get_
+ if (unlikely(!__kernfs_active(kn) || !atomic_inc_not_zero(&kn->count)))
+ goto err_unlock;
+
+- spin_unlock(&kernfs_idr_lock);
++ raw_spin_unlock_irqrestore(&kernfs_idr_lock, flags);
+ return kn;
+ err_unlock:
+- spin_unlock(&kernfs_idr_lock);
++ raw_spin_unlock_irqrestore(&kernfs_idr_lock, flags);
+ return NULL;
+ }
+
--- /dev/null
+From e3977e0609a07d86406029fceea0fd40d7849368 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 9 Jan 2024 11:48:02 -1000
+Subject: Revert "kernfs: convert kernfs_idr_lock to an irq safe raw spinlock"
+
+From: Tejun Heo <tj@kernel.org>
+
+commit e3977e0609a07d86406029fceea0fd40d7849368 upstream.
+
+This reverts commit dad3fb67ca1cbef87ce700e83a55835e5921ce8a.
+
+The commit converted kernfs_idr_lock to an IRQ-safe raw_spinlock because it
+could be acquired while holding an rq lock through bpf_cgroup_from_id().
+However, kernfs_idr_lock is held while doing GPF_NOWAIT allocations which
+involves acquiring an non-IRQ-safe and non-raw lock leading to the following
+lockdep warning:
+
+ =============================
+ [ BUG: Invalid wait context ]
+ 6.7.0-rc5-kzm9g-00251-g655022a45b1c #578 Not tainted
+ -----------------------------
+ swapper/0/0 is trying to lock:
+ dfbcd488 (&c->lock){....}-{3:3}, at: local_lock_acquire+0x0/0xa4
+ other info that might help us debug this:
+ context-{5:5}
+ 2 locks held by swapper/0/0:
+ #0: dfbc9c60 (lock){+.+.}-{3:3}, at: local_lock_acquire+0x0/0xa4
+ #1: c0c012a8 (kernfs_idr_lock){....}-{2:2}, at: __kernfs_new_node.constprop.0+0x68/0x258
+ stack backtrace:
+ CPU: 0 PID: 0 Comm: swapper/0 Not tainted 6.7.0-rc5-kzm9g-00251-g655022a45b1c #578
+ Hardware name: Generic SH73A0 (Flattened Device Tree)
+ unwind_backtrace from show_stack+0x10/0x14
+ show_stack from dump_stack_lvl+0x68/0x90
+ dump_stack_lvl from __lock_acquire+0x3cc/0x168c
+ __lock_acquire from lock_acquire+0x274/0x30c
+ lock_acquire from local_lock_acquire+0x28/0xa4
+ local_lock_acquire from ___slab_alloc+0x234/0x8a8
+ ___slab_alloc from __slab_alloc.constprop.0+0x30/0x44
+ __slab_alloc.constprop.0 from kmem_cache_alloc+0x7c/0x148
+ kmem_cache_alloc from radix_tree_node_alloc.constprop.0+0x44/0xdc
+ radix_tree_node_alloc.constprop.0 from idr_get_free+0x110/0x2b8
+ idr_get_free from idr_alloc_u32+0x9c/0x108
+ idr_alloc_u32 from idr_alloc_cyclic+0x50/0xb8
+ idr_alloc_cyclic from __kernfs_new_node.constprop.0+0x88/0x258
+ __kernfs_new_node.constprop.0 from kernfs_create_root+0xbc/0x154
+ kernfs_create_root from sysfs_init+0x18/0x5c
+ sysfs_init from mnt_init+0xc4/0x220
+ mnt_init from vfs_caches_init+0x6c/0x88
+ vfs_caches_init from start_kernel+0x474/0x528
+ start_kernel from 0x0
+
+Let's rever the commit. It's undesirable to spread out raw spinlock usage
+anyway and the problem can be solved by protecting the lookup path with RCU
+instead.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Andrea Righi <andrea.righi@canonical.com>
+Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Link: http://lkml.kernel.org/r/CAMuHMdV=AKt+mwY7svEq5gFPx41LoSQZ_USME5_MEdWQze13ww@mail.gmail.com
+Link: https://lore.kernel.org/r/20240109214828.252092-2-tj@kernel.org
+Tested-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/kernfs/dir.c | 23 ++++++++++-------------
+ 1 file changed, 10 insertions(+), 13 deletions(-)
+
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -27,7 +27,7 @@ static DEFINE_RWLOCK(kernfs_rename_lock)
+ */
+ static DEFINE_SPINLOCK(kernfs_pr_cont_lock);
+ static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */
+-static DEFINE_RAW_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
++static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
+
+ #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
+
+@@ -539,7 +539,6 @@ void kernfs_put(struct kernfs_node *kn)
+ {
+ struct kernfs_node *parent;
+ struct kernfs_root *root;
+- unsigned long flags;
+
+ if (!kn || !atomic_dec_and_test(&kn->count))
+ return;
+@@ -564,9 +563,9 @@ void kernfs_put(struct kernfs_node *kn)
+ simple_xattrs_free(&kn->iattr->xattrs, NULL);
+ kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
+ }
+- raw_spin_lock_irqsave(&kernfs_idr_lock, flags);
++ spin_lock(&kernfs_idr_lock);
+ idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
+- raw_spin_unlock_irqrestore(&kernfs_idr_lock, flags);
++ spin_unlock(&kernfs_idr_lock);
+ kmem_cache_free(kernfs_node_cache, kn);
+
+ kn = parent;
+@@ -608,7 +607,6 @@ static struct kernfs_node *__kernfs_new_
+ struct kernfs_node *kn;
+ u32 id_highbits;
+ int ret;
+- unsigned long irqflags;
+
+ name = kstrdup_const(name, GFP_KERNEL);
+ if (!name)
+@@ -619,13 +617,13 @@ static struct kernfs_node *__kernfs_new_
+ goto err_out1;
+
+ idr_preload(GFP_KERNEL);
+- raw_spin_lock_irqsave(&kernfs_idr_lock, irqflags);
++ spin_lock(&kernfs_idr_lock);
+ ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
+ if (ret >= 0 && ret < root->last_id_lowbits)
+ root->id_highbits++;
+ id_highbits = root->id_highbits;
+ root->last_id_lowbits = ret;
+- raw_spin_unlock_irqrestore(&kernfs_idr_lock, irqflags);
++ spin_unlock(&kernfs_idr_lock);
+ idr_preload_end();
+ if (ret < 0)
+ goto err_out2;
+@@ -661,9 +659,9 @@ static struct kernfs_node *__kernfs_new_
+ return kn;
+
+ err_out3:
+- raw_spin_lock_irqsave(&kernfs_idr_lock, irqflags);
++ spin_lock(&kernfs_idr_lock);
+ idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
+- raw_spin_unlock_irqrestore(&kernfs_idr_lock, irqflags);
++ spin_unlock(&kernfs_idr_lock);
+ err_out2:
+ kmem_cache_free(kernfs_node_cache, kn);
+ err_out1:
+@@ -704,9 +702,8 @@ struct kernfs_node *kernfs_find_and_get_
+ struct kernfs_node *kn;
+ ino_t ino = kernfs_id_ino(id);
+ u32 gen = kernfs_id_gen(id);
+- unsigned long flags;
+
+- raw_spin_lock_irqsave(&kernfs_idr_lock, flags);
++ spin_lock(&kernfs_idr_lock);
+
+ kn = idr_find(&root->ino_idr, (u32)ino);
+ if (!kn)
+@@ -730,10 +727,10 @@ struct kernfs_node *kernfs_find_and_get_
+ if (unlikely(!__kernfs_active(kn) || !atomic_inc_not_zero(&kn->count)))
+ goto err_unlock;
+
+- raw_spin_unlock_irqrestore(&kernfs_idr_lock, flags);
++ spin_unlock(&kernfs_idr_lock);
+ return kn;
+ err_unlock:
+- raw_spin_unlock_irqrestore(&kernfs_idr_lock, flags);
++ spin_unlock(&kernfs_idr_lock);
+ return NULL;
+ }
+
of-fix-double-free-in-of_parse_phandle_with_args_map.patch
fbdev-imxfb-fix-left-margin-setting.patch
of-unittest-fix-of_count_phandle_with_args-expected-.patch
+class-fix-use-after-free-in-class_register.patch
+kernfs-convert-kernfs_idr_lock-to-an-irq-safe-raw-spinlock.patch
+revert-kernfs-convert-kernfs_idr_lock-to-an-irq-safe-raw-spinlock.patch