--- /dev/null
+From eaa03d34535872d29004cb5cf77dc9dec1ba9a25 Mon Sep 17 00:00:00 2001
+From: "Andrea Parri (Microsoft)" <parri.andrea@gmail.com>
+Date: Mon, 28 Mar 2022 17:44:57 +0200
+Subject: Drivers: hv: vmbus: Replace smp_store_mb() with virt_store_mb()
+
+From: Andrea Parri (Microsoft) <parri.andrea@gmail.com>
+
+commit eaa03d34535872d29004cb5cf77dc9dec1ba9a25 upstream.
+
+Following the recommendation in Documentation/memory-barriers.txt for
+virtual machine guests.
+
+Fixes: 8b6a877c060ed ("Drivers: hv: vmbus: Replace the per-CPU channel lists with a global array of channels")
+Signed-off-by: Andrea Parri (Microsoft) <parri.andrea@gmail.com>
+Link: https://lore.kernel.org/r/20220328154457.100872-1-parri.andrea@gmail.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hv/channel_mgmt.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -380,7 +380,7 @@ void vmbus_channel_map_relid(struct vmbu
+ * execute:
+ *
+ * (a) In the "normal (i.e., not resuming from hibernation)" path,
+- * the full barrier in smp_store_mb() guarantees that the store
++ * the full barrier in virt_store_mb() guarantees that the store
+ * is propagated to all CPUs before the add_channel_work work
+ * is queued. In turn, add_channel_work is queued before the
+ * channel's ring buffer is allocated/initialized and the
+@@ -392,14 +392,14 @@ void vmbus_channel_map_relid(struct vmbu
+ * recv_int_page before retrieving the channel pointer from the
+ * array of channels.
+ *
+- * (b) In the "resuming from hibernation" path, the smp_store_mb()
++ * (b) In the "resuming from hibernation" path, the virt_store_mb()
+ * guarantees that the store is propagated to all CPUs before
+ * the VMBus connection is marked as ready for the resume event
+ * (cf. check_ready_for_resume_event()). The interrupt handler
+ * of the VMBus driver and vmbus_chan_sched() can not run before
+ * vmbus_bus_resume() has completed execution (cf. resume_noirq).
+ */
+- smp_store_mb(
++ virt_store_mb(
+ vmbus_connection.channels[channel->offermsg.child_relid],
+ channel);
+ }
--- /dev/null
+From 544808f7e21cb9ccdb8f3aa7de594c05b1419061 Mon Sep 17 00:00:00 2001
+From: Andre Przywara <andre.przywara@arm.com>
+Date: Mon, 4 Apr 2022 12:08:42 +0100
+Subject: irqchip/gic, gic-v3: Prevent GSI to SGI translations
+
+From: Andre Przywara <andre.przywara@arm.com>
+
+commit 544808f7e21cb9ccdb8f3aa7de594c05b1419061 upstream.
+
+At the moment the GIC IRQ domain translation routine happily converts
+ACPI table GSI numbers below 16 to GIC SGIs (Software Generated
+Interrupts aka IPIs). On the Devicetree side we explicitly forbid this
+translation, actually the function will never return HWIRQs below 16 when
+using a DT based domain translation.
+
+We expect SGIs to be handled in the first part of the function, and any
+further occurrence should be treated as a firmware bug, so add a check
+and print to report this explicitly and avoid lengthy debug sessions.
+
+Fixes: 64b499d8df40 ("irqchip/gic-v3: Configure SGIs as standard interrupts")
+Signed-off-by: Andre Przywara <andre.przywara@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20220404110842.2882446-1-andre.przywara@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-gic-v3.c | 6 ++++++
+ drivers/irqchip/irq-gic.c | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -1466,6 +1466,12 @@ static int gic_irq_domain_translate(stru
+ if(fwspec->param_count != 2)
+ return -EINVAL;
+
++ if (fwspec->param[0] < 16) {
++ pr_err(FW_BUG "Illegal GSI%d translation request\n",
++ fwspec->param[0]);
++ return -EINVAL;
++ }
++
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -1085,6 +1085,12 @@ static int gic_irq_domain_translate(stru
+ if(fwspec->param_count != 2)
+ return -EINVAL;
+
++ if (fwspec->param[0] < 16) {
++ pr_err(FW_BUG "Illegal GSI%d translation request\n",
++ fwspec->param[0]);
++ return -EINVAL;
++ }
++
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+
--- /dev/null
+From af27e41612ec7e5b4783f589b753a7c31a37aac8 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 17 Mar 2022 09:49:02 +0000
+Subject: irqchip/gic-v4: Wait for GICR_VPENDBASER.Dirty to clear before descheduling
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit af27e41612ec7e5b4783f589b753a7c31a37aac8 upstream.
+
+The way KVM drives GICv4.{0,1} is as follows:
+- vcpu_load() makes the VPE resident, instructing the RD to start
+ scanning for interrupts
+- just before entering the guest, we check that the RD has finished
+ scanning and that we can start running the vcpu
+- on preemption, we deschedule the VPE by making it invalid on
+ the RD
+
+However, we are preemptible between the first two steps. If it so
+happens *and* that the RD was still scanning, we nonetheless write
+to the GICR_VPENDBASER register while Dirty is set, and bad things
+happen (we're in UNPRED land).
+
+This affects both the 4.0 and 4.1 implementations.
+
+Make sure Dirty is cleared before performing the deschedule,
+meaning that its_clear_vpend_valid() becomes a sort of full VPE
+residency barrier.
+
+Reported-by: Jingyi Wang <wangjingyi11@huawei.com>
+Tested-by: Nianyao Tang <tangnianyao@huawei.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Fixes: 57e3cebd022f ("KVM: arm64: Delay the polling of the GICR_VPENDBASER.Dirty bit")
+Link: https://lore.kernel.org/r/4aae10ba-b39a-5f84-754b-69c2eb0a2c03@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-gic-v3-its.c | 28 +++++++++++++++++++---------
+ 1 file changed, 19 insertions(+), 9 deletions(-)
+
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -3007,18 +3007,12 @@ static int __init allocate_lpi_tables(vo
+ return 0;
+ }
+
+-static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
++static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
+ {
+ u32 count = 1000000; /* 1s! */
+ bool clean;
+ u64 val;
+
+- val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+- val &= ~GICR_VPENDBASER_Valid;
+- val &= ~clr;
+- val |= set;
+- gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+-
+ do {
+ val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+ clean = !(val & GICR_VPENDBASER_Dirty);
+@@ -3029,10 +3023,26 @@ static u64 its_clear_vpend_valid(void __
+ }
+ } while (!clean && count);
+
+- if (unlikely(val & GICR_VPENDBASER_Dirty)) {
++ if (unlikely(!clean))
+ pr_err_ratelimited("ITS virtual pending table not cleaning\n");
++
++ return val;
++}
++
++static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
++{
++ u64 val;
++
++ /* Make sure we wait until the RD is done with the initial scan */
++ val = read_vpend_dirty_clear(vlpi_base);
++ val &= ~GICR_VPENDBASER_Valid;
++ val &= ~clr;
++ val |= set;
++ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
++
++ val = read_vpend_dirty_clear(vlpi_base);
++ if (unlikely(val & GICR_VPENDBASER_Dirty))
+ val |= GICR_VPENDBASER_PendingLast;
+- }
+
+ return val;
+ }
--- /dev/null
+From a431dbbc540532b7465eae4fc8b56a85a9fc7d17 Mon Sep 17 00:00:00 2001
+From: Waiman Long <longman@redhat.com>
+Date: Fri, 8 Apr 2022 13:09:01 -0700
+Subject: mm/sparsemem: fix 'mem_section' will never be NULL gcc 12 warning
+
+From: Waiman Long <longman@redhat.com>
+
+commit a431dbbc540532b7465eae4fc8b56a85a9fc7d17 upstream.
+
+The gcc 12 compiler reports a "'mem_section' will never be NULL" warning
+on the following code:
+
+ static inline struct mem_section *__nr_to_section(unsigned long nr)
+ {
+ #ifdef CONFIG_SPARSEMEM_EXTREME
+ if (!mem_section)
+ return NULL;
+ #endif
+ if (!mem_section[SECTION_NR_TO_ROOT(nr)])
+ return NULL;
+ :
+
+It happens with CONFIG_SPARSEMEM_EXTREME off. The mem_section definition
+is
+
+ #ifdef CONFIG_SPARSEMEM_EXTREME
+ extern struct mem_section **mem_section;
+ #else
+ extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
+ #endif
+
+In the !CONFIG_SPARSEMEM_EXTREME case, mem_section is a static
+2-dimensional array and so the check "!mem_section[SECTION_NR_TO_ROOT(nr)]"
+doesn't make sense.
+
+Fix this warning by moving the "!mem_section[SECTION_NR_TO_ROOT(nr)]"
+check up inside the CONFIG_SPARSEMEM_EXTREME block and adding an
+explicit NR_SECTION_ROOTS check to make sure that there is no
+out-of-bound array access.
+
+Link: https://lkml.kernel.org/r/20220331180246.2746210-1-longman@redhat.com
+Fixes: 3e347261a80b ("sparsemem extreme implementation")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Reported-by: Justin Forbes <jforbes@redhat.com>
+Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Rafael Aquini <aquini@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mmzone.h | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -1351,13 +1351,16 @@ static inline unsigned long *section_to_
+
+ static inline struct mem_section *__nr_to_section(unsigned long nr)
+ {
++ unsigned long root = SECTION_NR_TO_ROOT(nr);
++
++ if (unlikely(root >= NR_SECTION_ROOTS))
++ return NULL;
++
+ #ifdef CONFIG_SPARSEMEM_EXTREME
+- if (!mem_section)
++ if (!mem_section || !mem_section[root])
+ return NULL;
+ #endif
+- if (!mem_section[SECTION_NR_TO_ROOT(nr)])
+- return NULL;
+- return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
++ return &mem_section[root][nr & SECTION_ROOT_MASK];
+ }
+ extern size_t mem_section_usage_size(void);
+
--- /dev/null
+From af41d2866f7d75bbb38d487f6ec7770425d70e45 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Sun, 27 Mar 2022 09:32:26 +0200
+Subject: powerpc/64: Fix build failure with allyesconfig in book3s_64_entry.S
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit af41d2866f7d75bbb38d487f6ec7770425d70e45 upstream.
+
+Using conditional branches between two files is hasardous,
+they may get linked too far from each other.
+
+ arch/powerpc/kvm/book3s_64_entry.o:(.text+0x3ec): relocation truncated
+ to fit: R_PPC64_REL14 (stub) against symbol `system_reset_common'
+ defined in .text section in arch/powerpc/kernel/head_64.o
+
+Reorganise the code to use non conditional branches.
+
+Fixes: 89d35b239101 ("KVM: PPC: Book3S HV P9: Implement the rest of the P9 path in C")
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+[mpe: Avoid odd-looking bne ., use named local labels]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/89cf27bf43ee07a0b2879b9e8e2f5cd6386a3645.1648366338.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/book3s_64_entry.S | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_64_entry.S
++++ b/arch/powerpc/kvm/book3s_64_entry.S
+@@ -407,10 +407,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
+ */
+ ld r10,HSTATE_SCRATCH0(r13)
+ cmpwi r10,BOOK3S_INTERRUPT_MACHINE_CHECK
+- beq machine_check_common
++ beq .Lcall_machine_check_common
+
+ cmpwi r10,BOOK3S_INTERRUPT_SYSTEM_RESET
+- beq system_reset_common
++ beq .Lcall_system_reset_common
+
+ b .
++
++.Lcall_machine_check_common:
++ b machine_check_common
++
++.Lcall_system_reset_common:
++ b system_reset_common
+ #endif
--- /dev/null
+From 5b6547ed97f4f5dfc23f8e3970af6d11d7b7ed7e Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 16 Mar 2022 22:03:41 +0100
+Subject: sched/core: Fix forceidle balancing
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 5b6547ed97f4f5dfc23f8e3970af6d11d7b7ed7e upstream.
+
+Steve reported that ChromeOS encounters the forceidle balancer being
+ran from rt_mutex_setprio()'s balance_callback() invocation and
+explodes.
+
+Now, the forceidle balancer gets queued every time the idle task gets
+selected, set_next_task(), which is strictly too often.
+rt_mutex_setprio() also uses set_next_task() in the 'change' pattern:
+
+ queued = task_on_rq_queued(p); /* p->on_rq == TASK_ON_RQ_QUEUED */
+ running = task_current(rq, p); /* rq->curr == p */
+
+ if (queued)
+ dequeue_task(...);
+ if (running)
+ put_prev_task(...);
+
+ /* change task properties */
+
+ if (queued)
+ enqueue_task(...);
+ if (running)
+ set_next_task(...);
+
+However, rt_mutex_setprio() will explicitly not run this pattern on
+the idle task (since priority boosting the idle task is quite insane).
+Most other 'change' pattern users are pidhash based and would also not
+apply to idle.
+
+Also, the change pattern doesn't contain a __balance_callback()
+invocation and hence we could have an out-of-band balance-callback,
+which *should* trigger the WARN in rq_pin_lock() (which guards against
+this exact anti-pattern).
+
+So while none of that explains how this happens, it does indicate that
+having it in set_next_task() might not be the most robust option.
+
+Instead, explicitly queue the forceidle balancer from pick_next_task()
+when it does indeed result in forceidle selection. Having it here,
+ensures it can only be triggered under the __schedule() rq->lock
+instance, and hence must be ran from that context.
+
+This also happens to clean up the code a little, so win-win.
+
+Fixes: d2dfa17bc7de ("sched: Trivial forced-newidle balancer")
+Reported-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: T.J. Alumbaugh <talumbau@chromium.org>
+Link: https://lkml.kernel.org/r/20220330160535.GN8939@worktop.programming.kicks-ass.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 14 ++++++++++----
+ kernel/sched/idle.c | 1 -
+ kernel/sched/sched.h | 6 ------
+ 3 files changed, 10 insertions(+), 11 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5669,6 +5669,8 @@ pick_task(struct rq *rq, const struct sc
+
+ extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
+
++static void queue_core_balance(struct rq *rq);
++
+ static struct task_struct *
+ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ {
+@@ -5716,7 +5718,7 @@ pick_next_task(struct rq *rq, struct tas
+ }
+
+ rq->core_pick = NULL;
+- return next;
++ goto out;
+ }
+
+ put_prev_task_balance(rq, prev, rf);
+@@ -5763,7 +5765,7 @@ pick_next_task(struct rq *rq, struct tas
+ */
+ WARN_ON_ONCE(fi_before);
+ task_vruntime_update(rq, next, false);
+- goto done;
++ goto out_set_next;
+ }
+ }
+
+@@ -5897,8 +5899,12 @@ again:
+ resched_curr(rq_i);
+ }
+
+-done:
++out_set_next:
+ set_next_task(rq, next);
++out:
++ if (rq->core->core_forceidle_count && next == rq->idle)
++ queue_core_balance(rq);
++
+ return next;
+ }
+
+@@ -5993,7 +5999,7 @@ static void sched_core_balance(struct rq
+
+ static DEFINE_PER_CPU(struct callback_head, core_balance_head);
+
+-void queue_core_balance(struct rq *rq)
++static void queue_core_balance(struct rq *rq)
+ {
+ if (!sched_core_enabled(rq))
+ return;
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -437,7 +437,6 @@ static void set_next_task_idle(struct rq
+ {
+ update_idle_core(rq);
+ schedstat_inc(rq->sched_goidle);
+- queue_core_balance(rq);
+ }
+
+ #ifdef CONFIG_SMP
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1242,8 +1242,6 @@ static inline bool sched_group_cookie_ma
+ return false;
+ }
+
+-extern void queue_core_balance(struct rq *rq);
+-
+ static inline bool sched_core_enqueued(struct task_struct *p)
+ {
+ return !RB_EMPTY_NODE(&p->core_node);
+@@ -1282,10 +1280,6 @@ static inline raw_spinlock_t *__rq_lockp
+ return &rq->__lock;
+ }
+
+-static inline void queue_core_balance(struct rq *rq)
+-{
+-}
+-
+ static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
+ {
+ return true;
--- /dev/null
+From 386ef214c3c6ab111d05e1790e79475363abaa05 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 17 Mar 2022 15:51:32 +0100
+Subject: sched: Teach the forced-newidle balancer about CPU affinity limitation.
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+commit 386ef214c3c6ab111d05e1790e79475363abaa05 upstream.
+
+try_steal_cookie() looks at task_struct::cpus_mask to decide if the
+task could be moved to `this' CPU. It ignores that the task might be in
+a migration disabled section while not on the CPU. In this case the task
+must not be moved otherwise per-CPU assumption are broken.
+
+Use is_cpu_allowed(), as suggested by Peter Zijlstra, to decide if the a
+task can be moved.
+
+Fixes: d2dfa17bc7de6 ("sched: Trivial forced-newidle balancer")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/YjNK9El+3fzGmswf@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5933,7 +5933,7 @@ static bool try_steal_cookie(int this, i
+ if (p == src->core_pick || p == src->curr)
+ goto next;
+
+- if (!cpumask_test_cpu(this, &p->cpus_mask))
++ if (!is_cpu_allowed(p, this))
+ goto next;
+
+ if (p->core_occupation > dst->idle->core_occupation)
selftests-cgroup-test-open-time-credential-usage-for-migration-checks.patch
selftests-cgroup-test-open-time-cgroup-namespace-usage-for-migration-checks.patch
mm-don-t-skip-swap-entry-even-if-zap_details-specified.patch
+drivers-hv-vmbus-replace-smp_store_mb-with-virt_store_mb.patch
+x86-bug-prevent-shadowing-in-__warn_flags.patch
+sched-core-fix-forceidle-balancing.patch
+sched-teach-the-forced-newidle-balancer-about-cpu-affinity-limitation.patch
+x86-static_call-fix-__static_call_return0-for-i386.patch
+irqchip-gic-v4-wait-for-gicr_vpendbaser.dirty-to-clear-before-descheduling.patch
+powerpc-64-fix-build-failure-with-allyesconfig-in-book3s_64_entry.s.patch
+irqchip-gic-gic-v3-prevent-gsi-to-sgi-translations.patch
+mm-sparsemem-fix-mem_section-will-never-be-null-gcc-12-warning.patch
--- /dev/null
+From 9ce02f0fc68326dd1f87a0a3a4c6ae7fdd39e6f6 Mon Sep 17 00:00:00 2001
+From: Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+Date: Thu, 24 Mar 2022 11:37:42 +0900
+Subject: x86/bug: Prevent shadowing in __WARN_FLAGS
+
+From: Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+
+commit 9ce02f0fc68326dd1f87a0a3a4c6ae7fdd39e6f6 upstream.
+
+The macro __WARN_FLAGS() uses a local variable named "f". This being a
+common name, there is a risk of shadowing other variables.
+
+For example, GCC would yield:
+
+| In file included from ./include/linux/bug.h:5,
+| from ./include/linux/cpumask.h:14,
+| from ./arch/x86/include/asm/cpumask.h:5,
+| from ./arch/x86/include/asm/msr.h:11,
+| from ./arch/x86/include/asm/processor.h:22,
+| from ./arch/x86/include/asm/timex.h:5,
+| from ./include/linux/timex.h:65,
+| from ./include/linux/time32.h:13,
+| from ./include/linux/time.h:60,
+| from ./include/linux/stat.h:19,
+| from ./include/linux/module.h:13,
+| from virt/lib/irqbypass.mod.c:1:
+| ./include/linux/rcupdate.h: In function 'rcu_head_after_call_rcu':
+| ./arch/x86/include/asm/bug.h:80:21: warning: declaration of 'f' shadows a parameter [-Wshadow]
+| 80 | __auto_type f = BUGFLAG_WARNING|(flags); \
+| | ^
+| ./include/asm-generic/bug.h:106:17: note: in expansion of macro '__WARN_FLAGS'
+| 106 | __WARN_FLAGS(BUGFLAG_ONCE | \
+| | ^~~~~~~~~~~~
+| ./include/linux/rcupdate.h:1007:9: note: in expansion of macro 'WARN_ON_ONCE'
+| 1007 | WARN_ON_ONCE(func != (rcu_callback_t)~0L);
+| | ^~~~~~~~~~~~
+| In file included from ./include/linux/rbtree.h:24,
+| from ./include/linux/mm_types.h:11,
+| from ./include/linux/buildid.h:5,
+| from ./include/linux/module.h:14,
+| from virt/lib/irqbypass.mod.c:1:
+| ./include/linux/rcupdate.h:1001:62: note: shadowed declaration is here
+| 1001 | rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
+| | ~~~~~~~~~~~~~~~^
+
+For reference, sparse also warns about it, c.f. [1].
+
+This patch renames the variable from f to __flags (with two underscore
+prefixes as suggested in the Linux kernel coding style [2]) in order
+to prevent collisions.
+
+[1] https://lore.kernel.org/all/CAFGhKbyifH1a+nAMCvWM88TK6fpNPdzFtUXPmRGnnQeePV+1sw@mail.gmail.com/
+
+[2] Linux kernel coding style, section 12) Macros, Enums and RTL,
+paragraph 5) namespace collisions when defining local variables in
+macros resembling functions
+https://www.kernel.org/doc/html/latest/process/coding-style.html#macros-enums-and-rtl
+
+Fixes: bfb1a7c91fb7 ("x86/bug: Merge annotate_reachable() into_BUG_FLAGS() asm")
+Signed-off-by: Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20220324023742.106546-1-mailhol.vincent@wanadoo.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/bug.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/bug.h
++++ b/arch/x86/include/asm/bug.h
+@@ -77,9 +77,9 @@ do { \
+ */
+ #define __WARN_FLAGS(flags) \
+ do { \
+- __auto_type f = BUGFLAG_WARNING|(flags); \
++ __auto_type __flags = BUGFLAG_WARNING|(flags); \
+ instrumentation_begin(); \
+- _BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE); \
++ _BUG_FLAGS(ASM_UD2, __flags, ASM_REACHABLE); \
+ instrumentation_end(); \
+ } while (0)
+
--- /dev/null
+From 1cd5f059d956e6f614ba6666ecdbcf95db05d5f5 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 18 Mar 2022 21:24:38 +0100
+Subject: x86,static_call: Fix __static_call_return0 for i386
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 1cd5f059d956e6f614ba6666ecdbcf95db05d5f5 upstream.
+
+Paolo reported that the instruction sequence that is used to replace:
+
+ call __static_call_return0
+
+namely:
+
+ 66 66 48 31 c0 data16 data16 xor %rax,%rax
+
+decodes to something else on i386, namely:
+
+ 66 66 48 data16 dec %ax
+ 31 c0 xor %eax,%eax
+
+Which is a nonsensical sequence that happens to have the same outcome.
+*However* an important distinction is that it consists of 2
+instructions which is a problem when the thing needs to be overwriten
+with a regular call instruction again.
+
+As such, replace the instruction with something that decodes the same
+on both i386 and x86_64.
+
+Fixes: 3f2a8fc4b15d ("static_call/x86: Add __static_call_return0()")
+Reported-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20220318204419.GT8939@worktop.programming.kicks-ass.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/static_call.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -12,10 +12,9 @@ enum insn_type {
+ };
+
+ /*
+- * data16 data16 xorq %rax, %rax - a single 5 byte instruction that clears %rax
+- * The REX.W cancels the effect of any data16.
++ * cs cs cs xorl %eax, %eax - a single 5 byte instruction that clears %[er]ax
+ */
+-static const u8 xor5rax[] = { 0x66, 0x66, 0x48, 0x31, 0xc0 };
++static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
+
+ static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
+ {