--- /dev/null
+From 112e66017bff7f2837030f34c2bc19501e9212d5 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 10 Mar 2023 11:10:56 -0500
+Subject: KVM: nVMX: add missing consistency checks for CR0 and CR4
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 112e66017bff7f2837030f34c2bc19501e9212d5 upstream.
+
+The effective values of the guest CR0 and CR4 registers may differ from
+those included in the VMCS12. In particular, disabling EPT forces
+CR4.PAE=1 and disabling unrestricted guest mode forces CR0.PG=CR0.PE=1.
+
+Therefore, checks on these bits cannot be delegated to the processor
+and must be performed by KVM.
+
+Reported-by: Reima ISHII <ishiir@g.ecc.u-tokyo.ac.jp>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2779,7 +2779,7 @@ static int nested_vmx_check_guest_state(
+ struct vmcs12 *vmcs12,
+ u32 *exit_qual)
+ {
+- bool ia32e;
++ bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
+
+ *exit_qual = ENTRY_FAIL_DEFAULT;
+
+@@ -2796,6 +2796,13 @@ static int nested_vmx_check_guest_state(
+ return -EINVAL;
+ }
+
++ if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
++ return -EINVAL;
++
++ if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
++ CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
++ return -EINVAL;
++
+ /*
+ * If the load IA32_EFER VM-entry control is 1, the following checks
+ * are performed on the field for the IA32_EFER MSR:
+@@ -2807,7 +2814,6 @@ static int nested_vmx_check_guest_state(
+ */
+ if (to_vmx(vcpu)->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
+- ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
+ if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
+ CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
+ CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
--- /dev/null
+From 9f116f76fa8c04c81aef33ad870dbf9a158e5b70 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Wed, 1 Mar 2023 20:00:53 -0500
+Subject: tracing: Check field value in hist_field_name()
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 9f116f76fa8c04c81aef33ad870dbf9a158e5b70 upstream.
+
+The function hist_field_name() cannot handle being passed a NULL field
+parameter. It should never be NULL, but due to a previous bug, NULL was
+passed to the function and the kernel crashed due to a NULL dereference.
+Mark Rutland reported this to me on IRC.
+
+The bug was fixed, but to prevent future bugs from crashing the kernel,
+check the field and add a WARN_ON() if it is NULL.
+
+Link: https://lkml.kernel.org/r/20230302020810.762384440@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Reported-by: Mark Rutland <mark.rutland@arm.com>
+Fixes: c6afad49d127f ("tracing: Add hist trigger 'sym' and 'sym-offset' modifiers")
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_events_hist.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1996,6 +1996,9 @@ static const char *hist_field_name(struc
+ {
+ const char *field_name = "";
+
++ if (WARN_ON_ONCE(!field))
++ return field_name;
++
+ if (level > 1)
+ return field_name;
+
--- /dev/null
+From c2679254b9c9980d9045f0f722cf093a2b1f7590 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Fri, 10 Mar 2023 17:28:56 -0500
+Subject: tracing: Make tracepoint lockdep check actually test something
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit c2679254b9c9980d9045f0f722cf093a2b1f7590 upstream.
+
+A while ago where the trace events had the following:
+
+ rcu_read_lock_sched_notrace();
+ rcu_dereference_sched(...);
+ rcu_read_unlock_sched_notrace();
+
+If the tracepoint is enabled, it could trigger RCU issues if called in
+the wrong place. And this warning was only triggered if lockdep was
+enabled. If the tracepoint was never enabled with lockdep, the bug would
+not be caught. To handle this, the above sequence was done when lockdep
+was enabled regardless if the tracepoint was enabled or not (although the
+always enabled code really didn't do anything, it would still trigger a
+warning).
+
+But a lot has changed since that lockdep code was added. One is, that
+sequence no longer triggers any warning. Another is, the tracepoint when
+enabled doesn't even do that sequence anymore.
+
+The main check we care about today is whether RCU is "watching" or not.
+So if lockdep is enabled, always check if rcu_is_watching() which will
+trigger a warning if it is not (tracepoints require RCU to be watching).
+
+Note, that old sequence did add a bit of overhead when lockdep was enabled,
+and with the latest kernel updates, would cause the system to slow down
+enough to trigger kernel "stalled" warnings.
+
+Link: http://lore.kernel.org/lkml/20140806181801.GA4605@redhat.com
+Link: http://lore.kernel.org/lkml/20140807175204.C257CAC5@viggo.jf.intel.com
+Link: https://lore.kernel.org/lkml/20230307184645.521db5c9@gandalf.local.home/
+Link: https://lore.kernel.org/linux-trace-kernel/20230310172856.77406446@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: "Paul E. McKenney" <paulmck@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Joel Fernandes <joel@joelfernandes.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Paul E. McKenney <paulmck@kernel.org>
+Fixes: e6753f23d961 ("tracepoint: Make rcuidle tracepoint callers use SRCU")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/tracepoint.h | 15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -231,12 +231,11 @@ static inline struct tracepoint *tracepo
+ * not add unwanted padding between the beginning of the section and the
+ * structure. Force alignment to the same alignment as the section start.
+ *
+- * When lockdep is enabled, we make sure to always do the RCU portions of
+- * the tracepoint code, regardless of whether tracing is on. However,
+- * don't check if the condition is false, due to interaction with idle
+- * instrumentation. This lets us find RCU issues triggered with tracepoints
+- * even when this tracepoint is off. This code has no purpose other than
+- * poking RCU a bit.
++ * When lockdep is enabled, we make sure to always test if RCU is
++ * "watching" regardless if the tracepoint is enabled or not. Tracepoints
++ * require RCU to be active, and it should always warn at the tracepoint
++ * site if it is not watching, as it will need to be active when the
++ * tracepoint is enabled.
+ */
+ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
+ extern struct tracepoint __tracepoint_##name; \
+@@ -248,9 +247,7 @@ static inline struct tracepoint *tracepo
+ TP_ARGS(data_args), \
+ TP_CONDITION(cond), 0); \
+ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
+- rcu_read_lock_sched_notrace(); \
+- rcu_dereference_sched(__tracepoint_##name.funcs);\
+- rcu_read_unlock_sched_notrace(); \
++ WARN_ON_ONCE(!rcu_is_watching()); \
+ } \
+ } \
+ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \