--- /dev/null
+From d16d69bf5a25d91c6d8f3e29711be12551bf56cd Mon Sep 17 00:00:00 2001
+From: Meng Tang <tangmeng@uniontech.com>
+Date: Mon, 11 Jul 2022 18:17:44 +0800
+Subject: ALSA: hda/conexant: Apply quirk for another HP ProDesk 600 G3 model
+
+From: Meng Tang <tangmeng@uniontech.com>
+
+commit d16d69bf5a25d91c6d8f3e29711be12551bf56cd upstream.
+
+There is another HP ProDesk 600 G3 model with the PCI SSID 103c:82b4
+that requires the quirk HP_MIC_NO_PRESENCE. Add the corresponding
+entry to the quirk table.
+
+Signed-off-by: Meng Tang <tangmeng@uniontech.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20220711101744.25189-1-tangmeng@uniontech.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/pci/hda/patch_conexant.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -955,6 +955,7 @@ static const struct snd_pci_quirk cxt506
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x103c, 0x82b4, "HP ProDesk 600 G3", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
--- /dev/null
+From e4ced82deb5fb17222fb82e092c3f8311955b585 Mon Sep 17 00:00:00 2001
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Date: Tue, 28 Jun 2022 08:55:45 +0100
+Subject: ARM: 9213/1: Print message about disabled Spectre workarounds only once
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+commit e4ced82deb5fb17222fb82e092c3f8311955b585 upstream.
+
+Print the message about disabled Spectre workarounds only once. The
+message is printed each time CPU goes out from idling state on NVIDIA
+Tegra boards, causing storm in KMSG that makes system unusable.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/proc-v7-bugs.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -110,8 +110,7 @@ static unsigned int spectre_v2_install_w
+ #else
+ static unsigned int spectre_v2_install_workaround(unsigned int method)
+ {
+- pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n",
+- smp_processor_id());
++ pr_info_once("Spectre V2: workarounds disabled by configuration\n");
+
+ return SPECTRE_VULNERABLE;
+ }
--- /dev/null
+From e5c46fde75e43c15a29b40e5fc5641727f97ae47 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Thu, 30 Jun 2022 16:46:54 +0100
+Subject: ARM: 9214/1: alignment: advance IT state after emulating Thumb instruction
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit e5c46fde75e43c15a29b40e5fc5641727f97ae47 upstream.
+
+After emulating a misaligned load or store issued in Thumb mode, we have
+to advance the IT state by hand, or it will get out of sync with the
+actual instruction stream, which means we'll end up applying the wrong
+condition code to subsequent instructions. This might corrupt the
+program state rather catastrophically.
+
+So borrow the it_advance() helper from the probing code, and use it on
+CPSR if the emulated instruction is Thumb.
+
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/ptrace.h | 26 ++++++++++++++++++++++++++
+ arch/arm/mm/alignment.c | 3 +++
+ arch/arm/probes/decode.h | 26 +-------------------------
+ 3 files changed, 30 insertions(+), 25 deletions(-)
+
+--- a/arch/arm/include/asm/ptrace.h
++++ b/arch/arm/include/asm/ptrace.h
+@@ -167,5 +167,31 @@ static inline unsigned long user_stack_p
+ ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
+ })
+
++
++/*
++ * Update ITSTATE after normal execution of an IT block instruction.
++ *
++ * The 8 IT state bits are split into two parts in CPSR:
++ * ITSTATE<1:0> are in CPSR<26:25>
++ * ITSTATE<7:2> are in CPSR<15:10>
++ */
++static inline unsigned long it_advance(unsigned long cpsr)
++{
++ if ((cpsr & 0x06000400) == 0) {
++ /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
++ cpsr &= ~PSR_IT_MASK;
++ } else {
++ /* We need to shift left ITSTATE<4:0> */
++ const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
++ unsigned long it = cpsr & mask;
++ it <<= 1;
++ it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
++ it &= mask;
++ cpsr &= ~mask;
++ cpsr |= it;
++ }
++ return cpsr;
++}
++
+ #endif /* __ASSEMBLY__ */
+ #endif
+--- a/arch/arm/mm/alignment.c
++++ b/arch/arm/mm/alignment.c
+@@ -936,6 +936,9 @@ do_alignment(unsigned long addr, unsigne
+ if (type == TYPE_LDST)
+ do_alignment_finish_ldst(addr, instr, regs, offset);
+
++ if (thumb_mode(regs))
++ regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
++
+ return 0;
+
+ bad_or_fault:
+--- a/arch/arm/probes/decode.h
++++ b/arch/arm/probes/decode.h
+@@ -22,6 +22,7 @@
+ #include <linux/types.h>
+ #include <linux/stddef.h>
+ #include <asm/probes.h>
++#include <asm/ptrace.h>
+ #include <asm/kprobes.h>
+
+ void __init arm_probes_decode_init(void);
+@@ -43,31 +44,6 @@ void __init find_str_pc_offset(void);
+ #endif
+
+
+-/*
+- * Update ITSTATE after normal execution of an IT block instruction.
+- *
+- * The 8 IT state bits are split into two parts in CPSR:
+- * ITSTATE<1:0> are in CPSR<26:25>
+- * ITSTATE<7:2> are in CPSR<15:10>
+- */
+-static inline unsigned long it_advance(unsigned long cpsr)
+- {
+- if ((cpsr & 0x06000400) == 0) {
+- /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
+- cpsr &= ~PSR_IT_MASK;
+- } else {
+- /* We need to shift left ITSTATE<4:0> */
+- const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
+- unsigned long it = cpsr & mask;
+- it <<= 1;
+- it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
+- it &= mask;
+- cpsr &= ~mask;
+- cpsr |= it;
+- }
+- return cpsr;
+-}
+-
+ static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs)
+ {
+ long cpsr = regs->ARM_cpsr;
--- /dev/null
+From 07fd5b6cdf3cc30bfde8fe0f644771688be04447 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Mon, 13 Jun 2022 12:19:50 -1000
+Subject: cgroup: Use separate src/dst nodes when preloading css_sets for migration
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 07fd5b6cdf3cc30bfde8fe0f644771688be04447 upstream.
+
+Each cset (css_set) is pinned by its tasks. When we're moving tasks around
+across csets for a migration, we need to hold the source and destination
+csets to ensure that they don't go away while we're moving tasks about. This
+is done by linking cset->mg_preload_node on either the
+mgctx->preloaded_src_csets or mgctx->preloaded_dst_csets list. Using the
+same cset->mg_preload_node for both the src and dst lists was deemed okay as
+a cset can't be both the source and destination at the same time.
+
+Unfortunately, this overloading becomes problematic when multiple tasks are
+involved in a migration and some of them are identity noop migrations while
+others are actually moving across cgroups. For example, this can happen with
+the following sequence on cgroup1:
+
+ #1> mkdir -p /sys/fs/cgroup/misc/a/b
+ #2> echo $$ > /sys/fs/cgroup/misc/a/cgroup.procs
+ #3> RUN_A_COMMAND_WHICH_CREATES_MULTIPLE_THREADS &
+ #4> PID=$!
+ #5> echo $PID > /sys/fs/cgroup/misc/a/b/tasks
+ #6> echo $PID > /sys/fs/cgroup/misc/a/cgroup.procs
+
+the process including the group leader back into a. In this final migration,
+non-leader threads would be doing identity migration while the group leader
+is doing an actual one.
+
+After #3, let's say the whole process was in cset A, and that after #4, the
+leader moves to cset B. Then, during #6, the following happens:
+
+ 1. cgroup_migrate_add_src() is called on B for the leader.
+
+ 2. cgroup_migrate_add_src() is called on A for the other threads.
+
+ 3. cgroup_migrate_prepare_dst() is called. It scans the src list.
+
+ 4. It notices that B wants to migrate to A, so it tries to A to the dst
+ list but realizes that its ->mg_preload_node is already busy.
+
+ 5. and then it notices A wants to migrate to A as it's an identity
+ migration, it culls it by list_del_init()'ing its ->mg_preload_node and
+ putting references accordingly.
+
+ 6. The rest of migration takes place with B on the src list but nothing on
+ the dst list.
+
+This means that A isn't held while migration is in progress. If all tasks
+leave A before the migration finishes and the incoming task pins it, the
+cset will be destroyed leading to use-after-free.
+
+This is caused by overloading cset->mg_preload_node for both src and dst
+preload lists. We wanted to exclude the cset from the src list but ended up
+inadvertently excluding it from the dst list too.
+
+This patch fixes the issue by separating out cset->mg_preload_node into
+->mg_src_preload_node and ->mg_dst_preload_node, so that the src and dst
+preloadings don't interfere with each other.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Mukesh Ojha <quic_mojha@quicinc.com>
+Reported-by: shisiyuan <shisiyuan19870131@gmail.com>
+Link: http://lkml.kernel.org/r/1654187688-27411-1-git-send-email-shisiyuan@xiaomi.com
+Link: https://www.spinics.net/lists/cgroups/msg33313.html
+Fixes: f817de98513d ("cgroup: prepare migration path for unified hierarchy")
+Cc: stable@vger.kernel.org # v3.16+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/cgroup-defs.h | 3 ++-
+ kernel/cgroup/cgroup.c | 37 +++++++++++++++++++++++--------------
+ 2 files changed, 25 insertions(+), 15 deletions(-)
+
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -235,7 +235,8 @@ struct css_set {
+ * List of csets participating in the on-going migration either as
+ * source or destination. Protected by cgroup_mutex.
+ */
+- struct list_head mg_preload_node;
++ struct list_head mg_src_preload_node;
++ struct list_head mg_dst_preload_node;
+ struct list_head mg_node;
+
+ /*
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -647,7 +647,8 @@ struct css_set init_css_set = {
+ .task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
+ .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
+ .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
+- .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node),
++ .mg_src_preload_node = LIST_HEAD_INIT(init_css_set.mg_src_preload_node),
++ .mg_dst_preload_node = LIST_HEAD_INIT(init_css_set.mg_dst_preload_node),
+ .mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
+ };
+
+@@ -1113,7 +1114,8 @@ static struct css_set *find_css_set(stru
+ INIT_LIST_HEAD(&cset->threaded_csets);
+ INIT_HLIST_NODE(&cset->hlist);
+ INIT_LIST_HEAD(&cset->cgrp_links);
+- INIT_LIST_HEAD(&cset->mg_preload_node);
++ INIT_LIST_HEAD(&cset->mg_src_preload_node);
++ INIT_LIST_HEAD(&cset->mg_dst_preload_node);
+ INIT_LIST_HEAD(&cset->mg_node);
+
+ /* Copy the set of subsystem state objects generated in
+@@ -2399,21 +2401,27 @@ int cgroup_migrate_vet_dst(struct cgroup
+ */
+ void cgroup_migrate_finish(struct cgroup_mgctx *mgctx)
+ {
+- LIST_HEAD(preloaded);
+ struct css_set *cset, *tmp_cset;
+
+ lockdep_assert_held(&cgroup_mutex);
+
+ spin_lock_irq(&css_set_lock);
+
+- list_splice_tail_init(&mgctx->preloaded_src_csets, &preloaded);
+- list_splice_tail_init(&mgctx->preloaded_dst_csets, &preloaded);
++ list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_src_csets,
++ mg_src_preload_node) {
++ cset->mg_src_cgrp = NULL;
++ cset->mg_dst_cgrp = NULL;
++ cset->mg_dst_cset = NULL;
++ list_del_init(&cset->mg_src_preload_node);
++ put_css_set_locked(cset);
++ }
+
+- list_for_each_entry_safe(cset, tmp_cset, &preloaded, mg_preload_node) {
++ list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_dst_csets,
++ mg_dst_preload_node) {
+ cset->mg_src_cgrp = NULL;
+ cset->mg_dst_cgrp = NULL;
+ cset->mg_dst_cset = NULL;
+- list_del_init(&cset->mg_preload_node);
++ list_del_init(&cset->mg_dst_preload_node);
+ put_css_set_locked(cset);
+ }
+
+@@ -2455,7 +2463,7 @@ void cgroup_migrate_add_src(struct css_s
+
+ src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
+
+- if (!list_empty(&src_cset->mg_preload_node))
++ if (!list_empty(&src_cset->mg_src_preload_node))
+ return;
+
+ WARN_ON(src_cset->mg_src_cgrp);
+@@ -2466,7 +2474,7 @@ void cgroup_migrate_add_src(struct css_s
+ src_cset->mg_src_cgrp = src_cgrp;
+ src_cset->mg_dst_cgrp = dst_cgrp;
+ get_css_set(src_cset);
+- list_add_tail(&src_cset->mg_preload_node, &mgctx->preloaded_src_csets);
++ list_add_tail(&src_cset->mg_src_preload_node, &mgctx->preloaded_src_csets);
+ }
+
+ /**
+@@ -2491,7 +2499,7 @@ int cgroup_migrate_prepare_dst(struct cg
+
+ /* look up the dst cset for each src cset and link it to src */
+ list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets,
+- mg_preload_node) {
++ mg_src_preload_node) {
+ struct css_set *dst_cset;
+ struct cgroup_subsys *ss;
+ int ssid;
+@@ -2510,7 +2518,7 @@ int cgroup_migrate_prepare_dst(struct cg
+ if (src_cset == dst_cset) {
+ src_cset->mg_src_cgrp = NULL;
+ src_cset->mg_dst_cgrp = NULL;
+- list_del_init(&src_cset->mg_preload_node);
++ list_del_init(&src_cset->mg_src_preload_node);
+ put_css_set(src_cset);
+ put_css_set(dst_cset);
+ continue;
+@@ -2518,8 +2526,8 @@ int cgroup_migrate_prepare_dst(struct cg
+
+ src_cset->mg_dst_cset = dst_cset;
+
+- if (list_empty(&dst_cset->mg_preload_node))
+- list_add_tail(&dst_cset->mg_preload_node,
++ if (list_empty(&dst_cset->mg_dst_preload_node))
++ list_add_tail(&dst_cset->mg_dst_preload_node,
+ &mgctx->preloaded_dst_csets);
+ else
+ put_css_set(dst_cset);
+@@ -2753,7 +2761,8 @@ static int cgroup_update_dfl_csses(struc
+ goto out_finish;
+
+ spin_lock_irq(&css_set_lock);
+- list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, mg_preload_node) {
++ list_for_each_entry(src_cset, &mgctx.preloaded_src_csets,
++ mg_src_preload_node) {
+ struct task_struct *task, *ntask;
+
+ /* all tasks in src_csets need to be migrated */
--- /dev/null
+From 820b8963adaea34a87abbecb906d1f54c0aabfb7 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Wed, 6 Jul 2022 10:50:40 -0400
+Subject: net: sock: tracing: Fix sock_exceed_buf_limit not to dereference stale pointer
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 820b8963adaea34a87abbecb906d1f54c0aabfb7 upstream.
+
+The trace event sock_exceed_buf_limit saves the prot->sysctl_mem pointer
+and then dereferences it in the TP_printk() portion. This is unsafe as the
+TP_printk() portion is executed at the time the buffer is read. That is,
+it can be seconds, minutes, days, months, even years later. If the proto
+is freed, then this dereference will can also lead to a kernel crash.
+
+Instead, save the sysctl_mem array into the ring buffer and have the
+TP_printk() reference that instead. This is the proper and safe way to
+read pointers in trace events.
+
+Link: https://lore.kernel.org/all/20220706052130.16368-12-kuniyu@amazon.com/
+
+Cc: stable@vger.kernel.org
+Fixes: 3847ce32aea9f ("core: add tracepoints for queueing skb to rcvbuf")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Acked-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/trace/events/sock.h | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/include/trace/events/sock.h
++++ b/include/trace/events/sock.h
+@@ -38,7 +38,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+- __field(long *, sysctl_mem)
++ __array(long, sysctl_mem, 3)
+ __field(long, allocated)
+ __field(int, sysctl_rmem)
+ __field(int, rmem_alloc)
+@@ -46,7 +46,9 @@ TRACE_EVENT(sock_exceed_buf_limit,
+
+ TP_fast_assign(
+ strncpy(__entry->name, prot->name, 32);
+- __entry->sysctl_mem = prot->sysctl_mem;
++ __entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]);
++ __entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]);
++ __entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]);
+ __entry->allocated = allocated;
+ __entry->sysctl_rmem = prot->sysctl_rmem[0];
+ __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
--- /dev/null
+From 5924e6ec1585445f251ea92713eb15beb732622a Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Thu, 23 Jun 2022 17:54:01 +0900
+Subject: nilfs2: fix incorrect masking of permission flags for symlinks
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 5924e6ec1585445f251ea92713eb15beb732622a upstream.
+
+The permission flags of newly created symlinks are wrongly dropped on
+nilfs2 with the current umask value even though symlinks should have 777
+(rwxrwxrwx) permissions:
+
+ $ umask
+ 0022
+ $ touch file && ln -s file symlink; ls -l file symlink
+ -rw-r--r--. 1 root root 0 Jun 23 16:29 file
+ lrwxr-xr-x. 1 root root 4 Jun 23 16:29 symlink -> file
+
+This fixes the bug by inserting a missing check that excludes
+symlinks.
+
+Link: https://lkml.kernel.org/r/1655974441-5612-1-git-send-email-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: Tommy Pettersson <ptp@lysator.liu.se>
+Reported-by: Ciprian Craciun <ciprian.craciun@gmail.com>
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/nilfs.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -212,6 +212,9 @@ static inline int nilfs_acl_chmod(struct
+
+ static inline int nilfs_init_acl(struct inode *inode, struct inode *dir)
+ {
++ if (S_ISLNK(inode->i_mode))
++ return 0;
++
+ inode->i_mode &= ~current_umask();
+ return 0;
+ }
alsa-hda-add-fixup-for-dell-latitidue-e5430.patch
+alsa-hda-conexant-apply-quirk-for-another-hp-prodesk-600-g3-model.patch
+xen-netback-avoid-entering-xenvif_rx_next_skb-with-an-empty-rx-queue.patch
+net-sock-tracing-fix-sock_exceed_buf_limit-not-to-dereference-stale-pointer.patch
+arm-9213-1-print-message-about-disabled-spectre-workarounds-only-once.patch
+arm-9214-1-alignment-advance-it-state-after-emulating-thumb-instruction.patch
+cgroup-use-separate-src-dst-nodes-when-preloading-css_sets-for-migration.patch
+nilfs2-fix-incorrect-masking-of-permission-flags-for-symlinks.patch
--- /dev/null
+From 94e8100678889ab428e68acadf042de723f094b9 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Wed, 13 Jul 2022 15:53:22 +0200
+Subject: xen/netback: avoid entering xenvif_rx_next_skb() with an empty rx queue
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 94e8100678889ab428e68acadf042de723f094b9 upstream.
+
+xenvif_rx_next_skb() is expecting the rx queue not being empty, but
+in case the loop in xenvif_rx_action() is doing multiple iterations,
+the availability of another skb in the rx queue is not being checked.
+
+This can lead to crashes:
+
+[40072.537261] BUG: unable to handle kernel NULL pointer dereference at 0000000000000080
+[40072.537407] IP: xenvif_rx_skb+0x23/0x590 [xen_netback]
+[40072.537534] PGD 0 P4D 0
+[40072.537644] Oops: 0000 [#1] SMP NOPTI
+[40072.537749] CPU: 0 PID: 12505 Comm: v1-c40247-q2-gu Not tainted 4.12.14-122.121-default #1 SLE12-SP5
+[40072.537867] Hardware name: HP ProLiant DL580 Gen9/ProLiant DL580 Gen9, BIOS U17 11/23/2021
+[40072.537999] task: ffff880433b38100 task.stack: ffffc90043d40000
+[40072.538112] RIP: e030:xenvif_rx_skb+0x23/0x590 [xen_netback]
+[40072.538217] RSP: e02b:ffffc90043d43de0 EFLAGS: 00010246
+[40072.538319] RAX: 0000000000000000 RBX: ffffc90043cd7cd0 RCX: 00000000000000f7
+[40072.538430] RDX: 0000000000000000 RSI: 0000000000000006 RDI: ffffc90043d43df8
+[40072.538531] RBP: 000000000000003f R08: 000077ff80000000 R09: 0000000000000008
+[40072.538644] R10: 0000000000007ff0 R11: 00000000000008f6 R12: ffffc90043ce2708
+[40072.538745] R13: 0000000000000000 R14: ffffc90043d43ed0 R15: ffff88043ea748c0
+[40072.538861] FS: 0000000000000000(0000) GS:ffff880484600000(0000) knlGS:0000000000000000
+[40072.538988] CS: e033 DS: 0000 ES: 0000 CR0: 0000000080050033
+[40072.539088] CR2: 0000000000000080 CR3: 0000000407ac8000 CR4: 0000000000040660
+[40072.539211] Call Trace:
+[40072.539319] xenvif_rx_action+0x71/0x90 [xen_netback]
+[40072.539429] xenvif_kthread_guest_rx+0x14a/0x29c [xen_netback]
+
+Fix that by stopping the loop in case the rx queue becomes empty.
+
+Cc: stable@vger.kernel.org
+Fixes: 98f6d57ced73 ("xen-netback: process guest rx packets in batches")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Paul Durrant <paul@xen.org>
+Link: https://lore.kernel.org/r/20220713135322.19616-1-jgross@suse.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/rx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/xen-netback/rx.c
++++ b/drivers/net/xen-netback/rx.c
+@@ -482,6 +482,7 @@ void xenvif_rx_action(struct xenvif_queu
+ queue->rx_copy.completed = &completed_skbs;
+
+ while (xenvif_rx_ring_slots_available(queue) &&
++ !skb_queue_empty(&queue->rx_queue) &&
+ work_done < RX_BATCH_SIZE) {
+ xenvif_rx_skb(queue);
+ work_done++;