--- /dev/null
+From acdab4cb4ba7e5f94d2b422ebd7bf4bf68178fb2 Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Sat, 8 Oct 2022 14:02:20 +0300
+Subject: Revert "tty: n_gsm: avoid call of sleeping functions from atomic context"
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit acdab4cb4ba7e5f94d2b422ebd7bf4bf68178fb2 upstream.
+
+This reverts commit 902e02ea9385373ce4b142576eef41c642703955.
+
+The above commit is reverted as the usage of tx_mutex seems not to solve
+the problem described in 902e02ea9385 ("tty: n_gsm: avoid call of sleeping
+functions from atomic context") and just moves the bug to another place.
+
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Signed-off-by: Alexey Khoroshilov <khoroshilov@ispras.ru>
+Reviewed-by: Daniel Starke <daniel.starke@siemens.com>
+Link: https://lore.kernel.org/r/20221008110221.13645-2-pchelkin@ispras.ru
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/n_gsm.c | 39 +++++++++++++++++++++------------------
+ 1 file changed, 21 insertions(+), 18 deletions(-)
+
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -235,7 +235,7 @@ struct gsm_mux {
+ int old_c_iflag; /* termios c_iflag value before attach */
+ bool constipated; /* Asked by remote to shut up */
+
+- struct mutex tx_mutex;
++ spinlock_t tx_lock;
+ unsigned int tx_bytes; /* TX data outstanding */
+ #define TX_THRESH_HI 8192
+ #define TX_THRESH_LO 2048
+@@ -820,14 +820,15 @@ static void __gsm_data_queue(struct gsm_
+ *
+ * Add data to the transmit queue and try and get stuff moving
+ * out of the mux tty if not already doing so. Take the
+- * the gsm tx mutex and dlci lock.
++ * the gsm tx lock and dlci lock.
+ */
+
+ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+ {
+- mutex_lock(&dlci->gsm->tx_mutex);
++ unsigned long flags;
++ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ __gsm_data_queue(dlci, msg);
+- mutex_unlock(&dlci->gsm->tx_mutex);
++ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ }
+
+ /**
+@@ -839,7 +840,7 @@ static void gsm_data_queue(struct gsm_dl
+ * is data. Keep to the MRU of the mux. This path handles the usual tty
+ * interface which is a byte stream with optional modem data.
+ *
+- * Caller must hold the tx_mutex of the mux.
++ * Caller must hold the tx_lock of the mux.
+ */
+
+ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+@@ -902,7 +903,7 @@ static int gsm_dlci_data_output(struct g
+ * is data. Keep to the MRU of the mux. This path handles framed data
+ * queued as skbuffs to the DLCI.
+ *
+- * Caller must hold the tx_mutex of the mux.
++ * Caller must hold the tx_lock of the mux.
+ */
+
+ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+@@ -918,7 +919,7 @@ static int gsm_dlci_data_output_framed(s
+ if (dlci->adaption == 4)
+ overhead = 1;
+
+- /* dlci->skb is locked by tx_mutex */
++ /* dlci->skb is locked by tx_lock */
+ if (dlci->skb == NULL) {
+ dlci->skb = skb_dequeue_tail(&dlci->skb_list);
+ if (dlci->skb == NULL)
+@@ -1018,12 +1019,13 @@ static void gsm_dlci_data_sweep(struct g
+
+ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
+ {
++ unsigned long flags;
+ int sweep;
+
+ if (dlci->constipated)
+ return;
+
+- mutex_lock(&dlci->gsm->tx_mutex);
++ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ /* If we have nothing running then we need to fire up */
+ sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
+ if (dlci->gsm->tx_bytes == 0) {
+@@ -1034,7 +1036,7 @@ static void gsm_dlci_data_kick(struct gs
+ }
+ if (sweep)
+ gsm_dlci_data_sweep(dlci->gsm);
+- mutex_unlock(&dlci->gsm->tx_mutex);
++ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ }
+
+ /*
+@@ -1256,6 +1258,7 @@ static void gsm_control_message(struct g
+ const u8 *data, int clen)
+ {
+ u8 buf[1];
++ unsigned long flags;
+
+ switch (command) {
+ case CMD_CLD: {
+@@ -1277,9 +1280,9 @@ static void gsm_control_message(struct g
+ gsm->constipated = false;
+ gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+ /* Kick the link in case it is idling */
+- mutex_lock(&gsm->tx_mutex);
++ spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_data_kick(gsm, NULL);
+- mutex_unlock(&gsm->tx_mutex);
++ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ break;
+ case CMD_FCOFF:
+ /* Modem wants us to STFU */
+@@ -2225,7 +2228,6 @@ static void gsm_free_mux(struct gsm_mux
+ break;
+ }
+ }
+- mutex_destroy(&gsm->tx_mutex);
+ mutex_destroy(&gsm->mutex);
+ kfree(gsm->txframe);
+ kfree(gsm->buf);
+@@ -2297,12 +2299,12 @@ static struct gsm_mux *gsm_alloc_mux(voi
+ }
+ spin_lock_init(&gsm->lock);
+ mutex_init(&gsm->mutex);
+- mutex_init(&gsm->tx_mutex);
+ kref_init(&gsm->ref);
+ INIT_LIST_HEAD(&gsm->tx_list);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ init_waitqueue_head(&gsm->event);
+ spin_lock_init(&gsm->control_lock);
++ spin_lock_init(&gsm->tx_lock);
+
+ gsm->t1 = T1;
+ gsm->t2 = T2;
+@@ -2327,7 +2329,6 @@ static struct gsm_mux *gsm_alloc_mux(voi
+ }
+ spin_unlock(&gsm_mux_lock);
+ if (i == MAX_MUX) {
+- mutex_destroy(&gsm->tx_mutex);
+ mutex_destroy(&gsm->mutex);
+ kfree(gsm->txframe);
+ kfree(gsm->buf);
+@@ -2652,15 +2653,16 @@ static int gsmld_open(struct tty_struct
+ static void gsmld_write_wakeup(struct tty_struct *tty)
+ {
+ struct gsm_mux *gsm = tty->disc_data;
++ unsigned long flags;
+
+ /* Queue poll */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+- mutex_lock(&gsm->tx_mutex);
++ spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_data_kick(gsm, NULL);
+ if (gsm->tx_bytes < TX_THRESH_LO) {
+ gsm_dlci_data_sweep(gsm);
+ }
+- mutex_unlock(&gsm->tx_mutex);
++ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ }
+
+ /**
+@@ -2703,6 +2705,7 @@ static ssize_t gsmld_write(struct tty_st
+ const unsigned char *buf, size_t nr)
+ {
+ struct gsm_mux *gsm = tty->disc_data;
++ unsigned long flags;
+ int space;
+ int ret;
+
+@@ -2710,13 +2713,13 @@ static ssize_t gsmld_write(struct tty_st
+ return -ENODEV;
+
+ ret = -ENOBUFS;
+- mutex_lock(&gsm->tx_mutex);
++ spin_lock_irqsave(&gsm->tx_lock, flags);
+ space = tty_write_room(tty);
+ if (space >= nr)
+ ret = tty->ops->write(tty, buf, nr);
+ else
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+- mutex_unlock(&gsm->tx_mutex);
++ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+
+ return ret;
+ }
selftests-net-fix-nexthop-warning-cleanup-double-ip-.patch
ipv4-handle-attempt-to-delete-multipath-route-when-f.patch
ipv4-fix-route-deletion-when-nexthop-info-is-not-spe.patch
+revert-tty-n_gsm-avoid-call-of-sleeping-functions-from-atomic-context.patch
+x86-tsx-add-a-feature-bit-for-tsx-control-msr-support.patch
+x86-pm-add-enumeration-check-before-spec-msrs-save-restore-setup.patch
--- /dev/null
+From foo@baz Sun Dec 4 05:01:14 PM CET 2022
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Thu, 1 Dec 2022 12:19:41 -0800
+Subject: x86/pm: Add enumeration check before spec MSRs save/restore setup
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: bp@suse.de, dave.hansen@linux.intel.com, hdegoede@redhat.com, rafael.j.wysocki@intel.com, stable@kernel.org
+Message-ID: <d4a7adae09cd0e6062bc35b268af55dba7b81b8a.1669925645.git.pawan.kumar.gupta@linux.intel.com>
+Content-Disposition: inline
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 50bcceb7724e471d9b591803889df45dcbb584bc upstream.
+
+pm_save_spec_msr() keeps a list of all the MSRs which _might_ need
+to be saved and restored at hibernate and resume. However, it has
+zero awareness of CPU support for these MSRs. It mostly works by
+unconditionally attempting to manipulate these MSRs and relying on
+rdmsrl_safe() being able to handle a #GP on CPUs where the support is
+unavailable.
+
+However, it's possible for reads (RDMSR) to be supported for a given MSR
+while writes (WRMSR) are not. In this case, msr_build_context() sees
+a successful read (RDMSR) and marks the MSR as valid. Then, later, a
+write (WRMSR) fails, producing a nasty (but harmless) error message.
+This causes restore_processor_state() to try and restore it, but writing
+this MSR is not allowed on the Intel Atom N2600 leading to:
+
+ unchecked MSR access error: WRMSR to 0x122 (tried to write 0x0000000000000002) \
+ at rIP: 0xffffffff8b07a574 (native_write_msr+0x4/0x20)
+ Call Trace:
+ <TASK>
+ restore_processor_state
+ x86_acpi_suspend_lowlevel
+ acpi_suspend_enter
+ suspend_devices_and_enter
+ pm_suspend.cold
+ state_store
+ kernfs_fop_write_iter
+ vfs_write
+ ksys_write
+ do_syscall_64
+ ? do_syscall_64
+ ? up_read
+ ? lock_is_held_type
+ ? asm_exc_page_fault
+ ? lockdep_hardirqs_on
+ entry_SYSCALL_64_after_hwframe
+
+To fix this, add the corresponding X86_FEATURE bit for each MSR. Avoid
+trying to manipulate the MSR when the feature bit is clear. This
+required adding a X86_FEATURE bit for MSRs that do not have one already,
+but it's a small price to pay.
+
+ [ bp: Move struct msr_enumeration inside the only function that uses it. ]
+ [Pawan: Resolve build issue in backport]
+
+Fixes: 73924ec4d560 ("x86/pm: Save the MSR validity status at context setup")
+Reported-by: Hans de Goede <hdegoede@redhat.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/c24db75d69df6e66c0465e13676ad3f2837a2ed8.1668539735.git.pawan.kumar.gupta@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/power/cpu.c | 23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -516,16 +516,23 @@ static int pm_cpu_check(const struct x86
+
+ static void pm_save_spec_msr(void)
+ {
+- u32 spec_msr_id[] = {
+- MSR_IA32_SPEC_CTRL,
+- MSR_IA32_TSX_CTRL,
+- MSR_TSX_FORCE_ABORT,
+- MSR_IA32_MCU_OPT_CTRL,
+- MSR_AMD64_LS_CFG,
+- MSR_AMD64_DE_CFG,
++ struct msr_enumeration {
++ u32 msr_no;
++ u32 feature;
++ } msr_enum[] = {
++ { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
++ { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
++ { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
++ { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
++ { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
++ { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
+ };
++ int i;
+
+- msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
++ for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
++ if (boot_cpu_has(msr_enum[i].feature))
++ msr_build_context(&msr_enum[i].msr_no, 1);
++ }
+ }
+
+ static int pm_check_save_msr(void)
--- /dev/null
+From foo@baz Sun Dec 4 05:01:14 PM CET 2022
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Thu, 1 Dec 2022 12:19:35 -0800
+Subject: x86/tsx: Add a feature bit for TSX control MSR support
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: bp@suse.de, dave.hansen@linux.intel.com, hdegoede@redhat.com, rafael.j.wysocki@intel.com, stable@kernel.org
+Message-ID: <4427d80ee35e1a6f26f7383dcca320d04ce36575.1669925645.git.pawan.kumar.gupta@linux.intel.com>
+Content-Disposition: inline
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit aaa65d17eec372c6a9756833f3964ba05b05ea14 upstream.
+
+Support for the TSX control MSR is enumerated in MSR_IA32_ARCH_CAPABILITIES.
+This is different from how other CPU features are enumerated i.e. via
+CPUID. Currently, a call to tsx_ctrl_is_supported() is required for
+enumerating the feature. In the absence of a feature bit for TSX control,
+any code that relies on checking feature bits directly will not work.
+
+In preparation for adding a feature bit check in MSR save/restore
+during suspend/resume, set a new feature bit X86_FEATURE_TSX_CTRL when
+MSR_IA32_TSX_CTRL is present.
+
+ [ bp: Remove tsx_ctrl_is_supported()]
+
+ [Pawan: Resolved conflicts in backport; Removed parts of commit message
+ referring to removed function tsx_ctrl_is_supported()]
+
+Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/de619764e1d98afbb7a5fa58424f1278ede37b45.1668539735.git.pawan.kumar.gupta@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/tsx.c | 33 ++++++++++++++-------------------
+ 2 files changed, 15 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -300,6 +300,7 @@
+ #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
+ #define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
+ #define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
++#define X86_FEATURE_MSR_TSX_CTRL (11*32+18) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
+ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
+--- a/arch/x86/kernel/cpu/tsx.c
++++ b/arch/x86/kernel/cpu/tsx.c
+@@ -58,24 +58,6 @@ void tsx_enable(void)
+ wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+ }
+
+-static bool __init tsx_ctrl_is_supported(void)
+-{
+- u64 ia32_cap = x86_read_arch_cap_msr();
+-
+- /*
+- * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
+- * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
+- *
+- * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
+- * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
+- * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
+- * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
+- * tsx= cmdline requests will do nothing on CPUs without
+- * MSR_IA32_TSX_CTRL support.
+- */
+- return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
+-}
+-
+ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
+ {
+ if (boot_cpu_has_bug(X86_BUG_TAA))
+@@ -89,9 +71,22 @@ void __init tsx_init(void)
+ char arg[5] = {};
+ int ret;
+
+- if (!tsx_ctrl_is_supported())
++ /*
++ * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
++ * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
++ *
++ * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
++ * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
++ * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
++ * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
++ * tsx= cmdline requests will do nothing on CPUs without
++ * MSR_IA32_TSX_CTRL support.
++ */
++ if (!(x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR))
+ return;
+
++ setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL);
++
+ ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
+ if (ret >= 0) {
+ if (!strcmp(arg, "on")) {