--- /dev/null
+From 252ce073c79006ca4713d9d789b5c7dd343a42a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Sep 2023 16:16:07 +0300
+Subject: cgroup: Fix suspicious rcu_dereference_check() usage warning
+
+From: Chengming Zhou <zhouchengming@bytedance.com>
+
+commit f2aa197e4794bf4c2c0c9570684f86e6fa103e8b upstream.
+
+task_css_set_check() will use rcu_dereference_check() to check for
+rcu_read_lock_held() on the read-side, which is not true after commit
+dc6e0818bc9a ("sched/cpuacct: Optimize away RCU read lock"). This
+commit drop explicit rcu_read_lock(), change to RCU-sched read-side
+critical section. So fix the RCU warning by adding check for
+rcu_read_lock_sched_held().
+
+Fixes: dc6e0818bc9a ("sched/cpuacct: Optimize away RCU read lock")
+Reported-by: Linux Kernel Functional Testing <lkft@linaro.org>
+Reported-by: syzbot+16e3f2c77e7c5a0113f9@syzkaller.appspotmail.com
+Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Tejun Heo <tj@kernel.org>
+Tested-by: Zhouyi Zhou <zhouzhouyi@gmail.com>
+Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Link: https://lore.kernel.org/r/20220305034103.57123-1-zhouchengming@bytedance.com
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/cgroup.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index f425389ce4bb2..0d97d1cf660f7 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -451,6 +451,7 @@ extern struct mutex cgroup_mutex;
+ extern spinlock_t css_set_lock;
+ #define task_css_set_check(task, __c) \
+ rcu_dereference_check((task)->cgroups, \
++ rcu_read_lock_sched_held() || \
+ lockdep_is_held(&cgroup_mutex) || \
+ lockdep_is_held(&css_set_lock) || \
+ ((task)->flags & PF_EXITING) || (__c))
+--
+2.40.1
+
--- /dev/null
+From d77bea4b24c17e826c412d44d957fa663ff2e01e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Jul 2023 17:26:54 -0300
+Subject: perf build: Define YYNOMEM as YYNOABORT for bison < 3.81
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+[ Upstream commit 88cc47e24597971b05b6e94c28a2fc81d2a8d61a ]
+
+YYNOMEM was introduced in bison 3.81, so define it as YYABORT for older
+versions, which should provide the previous perf behaviour.
+
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/Build | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/tools/perf/util/Build b/tools/perf/util/Build
+index f2914d5bed6e8..7d085927da413 100644
+--- a/tools/perf/util/Build
++++ b/tools/perf/util/Build
+@@ -263,6 +263,12 @@ ifeq ($(BISON_GE_35),1)
+ else
+ bison_flags += -w
+ endif
++
++BISON_LT_381 := $(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \< 381)
++ifeq ($(BISON_LT_381),1)
++ bison_flags += -DYYNOMEM=YYABORT
++endif
++
+ CFLAGS_parse-events-bison.o += $(bison_flags)
+ CFLAGS_pmu-bison.o += -DYYLTYPE_IS_TRIVIAL=0 $(bison_flags)
+ CFLAGS_expr-bison.o += -DYYLTYPE_IS_TRIVIAL=0 $(bison_flags)
+--
+2.40.1
+
--- /dev/null
+From feaafda418f917b009ba2ab363faaa1954f8a855 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Sep 2023 16:16:06 +0300
+Subject: sched/cpuacct: Optimize away RCU read lock
+
+From: Chengming Zhou <zhouchengming@bytedance.com>
+
+commit dc6e0818bc9a0336d9accf3ea35d146d72aa7a18 upstream.
+
+Since cpuacct_charge() is called from the scheduler update_curr(),
+we must already have rq lock held, then the RCU read lock can
+be optimized away.
+
+And do the same thing in it's wrapper cgroup_account_cputime(),
+but we can't use lockdep_assert_rq_held() there, which defined
+in kernel/sched/sched.h.
+
+Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20220220051426.5274-2-zhouchengming@bytedance.com
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/cgroup.h | 2 --
+ kernel/sched/cpuacct.c | 4 +---
+ 2 files changed, 1 insertion(+), 5 deletions(-)
+
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index 45cdb12243e3f..f425389ce4bb2 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -792,11 +792,9 @@ static inline void cgroup_account_cputime(struct task_struct *task,
+
+ cpuacct_charge(task, delta_exec);
+
+- rcu_read_lock();
+ cgrp = task_dfl_cgroup(task);
+ if (cgroup_parent(cgrp))
+ __cgroup_account_cputime(cgrp, delta_exec);
+- rcu_read_unlock();
+ }
+
+ static inline void cgroup_account_cputime_field(struct task_struct *task,
+diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
+index cacc2076ad214..f0af0fecde9d9 100644
+--- a/kernel/sched/cpuacct.c
++++ b/kernel/sched/cpuacct.c
+@@ -331,12 +331,10 @@ void cpuacct_charge(struct task_struct *tsk, u64 cputime)
+ unsigned int cpu = task_cpu(tsk);
+ struct cpuacct *ca;
+
+- rcu_read_lock();
++ lockdep_assert_rq_held(cpu_rq(cpu));
+
+ for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
+ *per_cpu_ptr(ca->cpuusage, cpu) += cputime;
+-
+- rcu_read_unlock();
+ }
+
+ /*
+--
+2.40.1
+
powerpc-watchpoints-annotate-atomic-context-in-more-.patch
ncsi-propagate-carrier-gain-loss-events-to-the-ncsi-.patch
fbdev-sh7760fb-depend-on-fb-y.patch
+perf-build-define-yynomem-as-yynoabort-for-bison-3.8.patch
+sched-cpuacct-optimize-away-rcu-read-lock.patch
+cgroup-fix-suspicious-rcu_dereference_check-usage-wa.patch