--- /dev/null
+From 0ce0638edf5ec83343302b884fa208179580700a Mon Sep 17 00:00:00 2001
+From: Zheng Yejian <zhengyejian1@huawei.com>
+Date: Mon, 26 Sep 2022 15:20:08 +0000
+Subject: ftrace: Properly unset FTRACE_HASH_FL_MOD
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+commit 0ce0638edf5ec83343302b884fa208179580700a upstream.
+
+When executing following commands like what document said, but the log
+"#### all functions enabled ####" was not shown as expect:
+ 1. Set a 'mod' filter:
+ $ echo 'write*:mod:ext3' > /sys/kernel/tracing/set_ftrace_filter
+ 2. Invert above filter:
+ $ echo '!write*:mod:ext3' >> /sys/kernel/tracing/set_ftrace_filter
+ 3. Read the file:
+ $ cat /sys/kernel/tracing/set_ftrace_filter
+
+By some debugging, I found that flag FTRACE_HASH_FL_MOD was not unset
+after inversion like above step 2 and then result of ftrace_hash_empty()
+is incorrect.
+
+Link: https://lkml.kernel.org/r/20220926152008.2239274-1-zhengyejian1@huawei.com
+
+Cc: <mingo@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: 8c08f0d5c6fb ("ftrace: Have cached module filters be an active filter")
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ftrace.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5662,8 +5662,12 @@ int ftrace_regex_release(struct inode *i
+
+ if (filter_hash) {
+ orig_hash = &iter->ops->func_hash->filter_hash;
+- if (iter->tr && !list_empty(&iter->tr->mod_trace))
+- iter->hash->flags |= FTRACE_HASH_FL_MOD;
++ if (iter->tr) {
++ if (list_empty(&iter->tr->mod_trace))
++ iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
++ else
++ iter->hash->flags |= FTRACE_HASH_FL_MOD;
++ }
+ } else
+ orig_hash = &iter->ops->func_hash->notrace_hash;
+
--- /dev/null
+From 747f7a2901174c9afa805dddfb7b24db6f65e985 Mon Sep 17 00:00:00 2001
+From: Rik van Riel <riel@surriel.com>
+Date: Mon, 8 Aug 2022 15:00:19 -0400
+Subject: livepatch: fix race between fork and KLP transition
+
+From: Rik van Riel <riel@surriel.com>
+
+commit 747f7a2901174c9afa805dddfb7b24db6f65e985 upstream.
+
+The KLP transition code depends on the TIF_PATCH_PENDING and
+the task->patch_state to stay in sync. On a normal (forward)
+transition, TIF_PATCH_PENDING will be set on every task in
+the system, while on a reverse transition (after a failed
+forward one) first TIF_PATCH_PENDING will be cleared from
+every task, followed by it being set on tasks that need to
+be transitioned back to the original code.
+
+However, the fork code copies over the TIF_PATCH_PENDING flag
+from the parent to the child early on, in dup_task_struct and
+setup_thread_stack. Much later, klp_copy_process will set
+child->patch_state to match that of the parent.
+
+However, the parent's patch_state may have been changed by KLP loading
+or unloading since it was initially copied over into the child.
+
+This results in the KLP code occasionally hitting this warning in
+klp_complete_transition:
+
+ for_each_process_thread(g, task) {
+ WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
+ task->patch_state = KLP_UNDEFINED;
+ }
+
+Set, or clear, the TIF_PATCH_PENDING flag in the child task
+depending on whether or not it is needed at the time
+klp_copy_process is called, at a point in copy_process where the
+tasklist_lock is held exclusively, preventing races with the KLP
+code.
+
+The KLP code does have a few places where the state is changed
+without the tasklist_lock held, but those should not cause
+problems because klp_update_patch_state(current) cannot be
+called while the current task is in the middle of fork,
+klp_check_and_switch_task() which is called under the pi_lock,
+which prevents rescheduling, and manipulation of the patch
+state of idle tasks, which do not fork.
+
+This should prevent this warning from triggering again in the
+future, and close the race for both normal and reverse transitions.
+
+Signed-off-by: Rik van Riel <riel@surriel.com>
+Reported-by: Breno Leitao <leitao@debian.org>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Fixes: d83a7cb375ee ("livepatch: change to a per-task consistency model")
+Cc: stable@kernel.org
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20220808150019.03d6a67b@imladris.surriel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/livepatch/transition.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/kernel/livepatch/transition.c
++++ b/kernel/livepatch/transition.c
+@@ -611,9 +611,23 @@ void klp_reverse_transition(void)
+ /* Called from copy_process() during fork */
+ void klp_copy_process(struct task_struct *child)
+ {
+- child->patch_state = current->patch_state;
+
+- /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
++ /*
++ * The parent process may have gone through a KLP transition since
++ * the thread flag was copied in setup_thread_stack earlier. Bring
++ * the task flag up to date with the parent here.
++ *
++ * The operation is serialized against all klp_*_transition()
++ * operations by the tasklist_lock. The only exception is
++ * klp_update_patch_state(current), but we cannot race with
++ * that because we are current.
++ */
++ if (test_tsk_thread_flag(current, TIF_PATCH_PENDING))
++ set_tsk_thread_flag(child, TIF_PATCH_PENDING);
++ else
++ clear_tsk_thread_flag(child, TIF_PATCH_PENDING);
++
++ child->patch_state = current->patch_state;
+ }
+
+ /*
--- /dev/null
+From 7e9fbbb1b776d8d7969551565bc246f74ec53b27 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Wed, 28 Sep 2022 13:39:38 -0400
+Subject: ring-buffer: Add ring_buffer_wake_waiters()
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 7e9fbbb1b776d8d7969551565bc246f74ec53b27 upstream.
+
+On closing of a file that represents a ring buffer or flushing the file,
+there may be waiters on the ring buffer that needs to be woken up and exit
+the ring_buffer_wait() function.
+
+Add ring_buffer_wake_waiters() to wake up the waiters on the ring buffer
+and allow them to exit the wait loop.
+
+Link: https://lkml.kernel.org/r/20220928133938.28dc2c27@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Fixes: 15693458c4bc0 ("tracing/ring-buffer: Move poll wake ups into ring buffer code")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/ring_buffer.h | 2 +-
+ kernel/trace/ring_buffer.c | 39 +++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 40 insertions(+), 1 deletion(-)
+
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -100,7 +100,7 @@ __ring_buffer_alloc(unsigned long size,
+ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ struct file *filp, poll_table *poll_table);
+-
++void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
+
+ #define RING_BUFFER_ALL_CPUS -1
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -414,6 +414,7 @@ struct rb_irq_work {
+ struct irq_work work;
+ wait_queue_head_t waiters;
+ wait_queue_head_t full_waiters;
++ long wait_index;
+ bool waiters_pending;
+ bool full_waiters_pending;
+ bool wakeup_full;
+@@ -802,6 +803,37 @@ static void rb_wake_up_waiters(struct ir
+ }
+
+ /**
++ * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
++ * @buffer: The ring buffer to wake waiters on
++ *
++ * In the case of a file that represents a ring buffer is closing,
++ * it is prudent to wake up any waiters that are on this.
++ */
++void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
++{
++ struct ring_buffer_per_cpu *cpu_buffer;
++ struct rb_irq_work *rbwork;
++
++ if (cpu == RING_BUFFER_ALL_CPUS) {
++
++ /* Wake up individual ones too. One level recursion */
++ for_each_buffer_cpu(buffer, cpu)
++ ring_buffer_wake_waiters(buffer, cpu);
++
++ rbwork = &buffer->irq_work;
++ } else {
++ cpu_buffer = buffer->buffers[cpu];
++ rbwork = &cpu_buffer->irq_work;
++ }
++
++ rbwork->wait_index++;
++ /* make sure the waiters see the new index */
++ smp_wmb();
++
++ rb_wake_up_waiters(&rbwork->work);
++}
++
++/**
+ * ring_buffer_wait - wait for input to the ring buffer
+ * @buffer: buffer to wait on
+ * @cpu: the cpu buffer to wait on
+@@ -816,6 +848,7 @@ int ring_buffer_wait(struct trace_buffer
+ struct ring_buffer_per_cpu *cpu_buffer;
+ DEFINE_WAIT(wait);
+ struct rb_irq_work *work;
++ long wait_index;
+ int ret = 0;
+
+ /*
+@@ -834,6 +867,7 @@ int ring_buffer_wait(struct trace_buffer
+ work = &cpu_buffer->irq_work;
+ }
+
++ wait_index = READ_ONCE(work->wait_index);
+
+ while (true) {
+ if (full)
+@@ -898,6 +932,11 @@ int ring_buffer_wait(struct trace_buffer
+ }
+
+ schedule();
++
++ /* Make sure to see the new wait index */
++ smp_rmb();
++ if (wait_index != work->wait_index)
++ break;
+ }
+
+ if (full)
--- /dev/null
+From fa8f4a89736b654125fb254b0db753ac68a5fced Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Tue, 27 Sep 2022 14:43:17 -0400
+Subject: ring-buffer: Allow splice to read previous partially read pages
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit fa8f4a89736b654125fb254b0db753ac68a5fced upstream.
+
+If a page is partially read, and then the splice system call is run
+against the ring buffer, it will always fail to read, no matter how much
+is in the ring buffer. That's because the code path for a partial read of
+the page does will fail if the "full" flag is set.
+
+The splice system call wants full pages, so if the read of the ring buffer
+is not yet full, it should return zero, and the splice will block. But if
+a previous read was done, where the beginning has been consumed, it should
+still be given to the splice caller if the rest of the page has been
+written to.
+
+This caused the splice command to never consume data in this scenario, and
+let the ring buffer just fill up and lose events.
+
+Link: https://lkml.kernel.org/r/20220927144317.46be6b80@gandalf.local.home
+
+Cc: stable@vger.kernel.org
+Fixes: 8789a9e7df6bf ("ring-buffer: read page interface")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -5341,7 +5341,15 @@ int ring_buffer_read_page(struct trace_b
+ unsigned int pos = 0;
+ unsigned int size;
+
+- if (full)
++ /*
++ * If a full page is expected, this can still be returned
++ * if there's been a previous partial read and the
++ * rest of the page can be read and the commit page is off
++ * the reader page.
++ */
++ if (full &&
++ (!read || (len < (commit - read)) ||
++ cpu_buffer->reader_page == cpu_buffer->commit_page))
+ goto out_unlock;
+
+ if (len > (commit - read))
--- /dev/null
+From ec0bbc5ec5664dcee344f79373852117dc672c86 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Tue, 27 Sep 2022 19:15:25 -0400
+Subject: ring-buffer: Check pending waiters when doing wake ups as well
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit ec0bbc5ec5664dcee344f79373852117dc672c86 upstream.
+
+The wake up waiters only checks the "wakeup_full" variable and not the
+"full_waiters_pending". The full_waiters_pending is set when a waiter is
+added to the wait queue. The wakeup_full is only set when an event is
+triggered, and it clears the full_waiters_pending to avoid multiple calls
+to irq_work_queue().
+
+The irq_work callback really needs to check both wakeup_full as well as
+full_waiters_pending such that this code can be used to wake up waiters
+when a file is closed that represents the ring buffer and the waiters need
+to be woken up.
+
+Link: https://lkml.kernel.org/r/20220927231824.209460321@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Fixes: 15693458c4bc0 ("tracing/ring-buffer: Move poll wake ups into ring buffer code")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -794,8 +794,9 @@ static void rb_wake_up_waiters(struct ir
+ struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
+
+ wake_up_all(&rbwork->waiters);
+- if (rbwork->wakeup_full) {
++ if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
+ rbwork->wakeup_full = false;
++ rbwork->full_waiters_pending = false;
+ wake_up_all(&rbwork->full_waiters);
+ }
+ }
--- /dev/null
+From a0fcaaed0c46cf9399d3a2d6e0c87ddb3df0e044 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Thu, 29 Sep 2022 10:49:09 -0400
+Subject: ring-buffer: Fix race between reset page and reading page
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit a0fcaaed0c46cf9399d3a2d6e0c87ddb3df0e044 upstream.
+
+The ring buffer is broken up into sub buffers (currently of page size).
+Each sub buffer has a pointer to its "tail" (the last event written to the
+sub buffer). When a new event is requested, the tail is locally
+incremented to cover the size of the new event. This is done in a way that
+there is no need for locking.
+
+If the tail goes past the end of the sub buffer, the process of moving to
+the next sub buffer takes place. After setting the current sub buffer to
+the next one, the previous one that had the tail go passed the end of the
+sub buffer needs to be reset back to the original tail location (before
+the new event was requested) and the rest of the sub buffer needs to be
+"padded".
+
+The race happens when a reader takes control of the sub buffer. As readers
+do a "swap" of sub buffers from the ring buffer to get exclusive access to
+the sub buffer, it replaces the "head" sub buffer with an empty sub buffer
+that goes back into the writable portion of the ring buffer. This swap can
+happen as soon as the writer moves to the next sub buffer and before it
+updates the last sub buffer with padding.
+
+Because the sub buffer can be released to the reader while the writer is
+still updating the padding, it is possible for the reader to see the event
+that goes past the end of the sub buffer. This can cause obvious issues.
+
+To fix this, add a few memory barriers so that the reader definitely sees
+the updates to the sub buffer, and also waits until the writer has put
+back the "tail" of the sub buffer back to the last event that was written
+on it.
+
+To be paranoid, it will only spin for 1 second, otherwise it will
+warn and shutdown the ring buffer code. 1 second should be enough as
+the writer does have preemption disabled. If the writer doesn't move
+within 1 second (with preemption disabled) something is horribly
+wrong. No interrupt should last 1 second!
+
+Link: https://lore.kernel.org/all/20220830120854.7545-1-jiazi.li@transsion.com/
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=216369
+Link: https://lkml.kernel.org/r/20220929104909.0650a36c@gandalf.local.home
+
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Fixes: c7b0930857e22 ("ring-buffer: prevent adding write in discarded area")
+Reported-by: Jiazi.Li <jiazi.li@transsion.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c | 33 +++++++++++++++++++++++++++++++++
+ 1 file changed, 33 insertions(+)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2531,6 +2531,9 @@ rb_reset_tail(struct ring_buffer_per_cpu
+ /* Mark the rest of the page with padding */
+ rb_event_set_padding(event);
+
++ /* Make sure the padding is visible before the write update */
++ smp_wmb();
++
+ /* Set the write back to the previous setting */
+ local_sub(length, &tail_page->write);
+ return;
+@@ -2542,6 +2545,9 @@ rb_reset_tail(struct ring_buffer_per_cpu
+ /* time delta must be non zero */
+ event->time_delta = 1;
+
++ /* Make sure the padding is visible before the tail_page->write update */
++ smp_wmb();
++
+ /* Set write to end of buffer */
+ length = (tail + length) - BUF_PAGE_SIZE;
+ local_sub(length, &tail_page->write);
+@@ -4356,6 +4362,33 @@ rb_get_reader_page(struct ring_buffer_pe
+ arch_spin_unlock(&cpu_buffer->lock);
+ local_irq_restore(flags);
+
++ /*
++ * The writer has preempt disable, wait for it. But not forever
++ * Although, 1 second is pretty much "forever"
++ */
++#define USECS_WAIT 1000000
++ for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
++ /* If the write is past the end of page, a writer is still updating it */
++ if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE))
++ break;
++
++ udelay(1);
++
++ /* Get the latest version of the reader write value */
++ smp_rmb();
++ }
++
++ /* The writer is not moving forward? Something is wrong */
++ if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
++ reader = NULL;
++
++ /*
++ * Make sure we see any padding after the write update
++ * (see rb_reset_tail())
++ */
++ smp_rmb();
++
++
+ return reader;
+ }
+
--- /dev/null
+From 3b19d614b61b93a131f463817e08219c9ce1fee3 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Tue, 27 Sep 2022 19:15:24 -0400
+Subject: ring-buffer: Have the shortest_full queue be the shortest not longest
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 3b19d614b61b93a131f463817e08219c9ce1fee3 upstream.
+
+The logic to know when the shortest waiters on the ring buffer should be
+woken up or not has uses a less than instead of a greater than compare,
+which causes the shortest_full to actually be the longest.
+
+Link: https://lkml.kernel.org/r/20220927231823.718039222@goodmis.org
+
+Cc: stable@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Fixes: 2c2b0a78b3739 ("ring-buffer: Add percentage of ring buffer full to wake up reader")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ring_buffer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -888,7 +888,7 @@ int ring_buffer_wait(struct trace_buffer
+ nr_pages = cpu_buffer->nr_pages;
+ dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
+ if (!cpu_buffer->shortest_full ||
+- cpu_buffer->shortest_full < full)
++ cpu_buffer->shortest_full > full)
+ cpu_buffer->shortest_full = full;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ if (!pagebusy &&
ext4-fix-potential-memory-leak-in-ext4_fc_record_modified_inode.patch
ext4-fix-potential-memory-leak-in-ext4_fc_record_regions.patch
ext4-update-state-fc_regions_size-after-successful-memory-allocation.patch
+livepatch-fix-race-between-fork-and-klp-transition.patch
+ftrace-properly-unset-ftrace_hash_fl_mod.patch
+ring-buffer-allow-splice-to-read-previous-partially-read-pages.patch
+ring-buffer-have-the-shortest_full-queue-be-the-shortest-not-longest.patch
+ring-buffer-check-pending-waiters-when-doing-wake-ups-as-well.patch
+ring-buffer-add-ring_buffer_wake_waiters.patch
+ring-buffer-fix-race-between-reset-page-and-reading-page.patch
+tracing-disable-interrupt-or-preemption-before-acquiring-arch_spinlock_t.patch
+thunderbolt-explicitly-enable-lane-adapter-hotplug-events-at-startup.patch
--- /dev/null
+From 5d2569cb4a65c373896ec0217febdf88739ed295 Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Mon, 26 Sep 2022 09:33:50 -0500
+Subject: thunderbolt: Explicitly enable lane adapter hotplug events at startup
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 5d2569cb4a65c373896ec0217febdf88739ed295 upstream.
+
+Software that has run before the USB4 CM in Linux runs may have disabled
+hotplug events for a given lane adapter.
+
+Other CMs such as that one distributed with Windows 11 will enable hotplug
+events. Do the same thing in the Linux CM which fixes hotplug events on
+"AMD Pink Sardine".
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/switch.c | 24 ++++++++++++++++++++++++
+ drivers/thunderbolt/tb.h | 1 +
+ drivers/thunderbolt/tb_regs.h | 1 +
+ drivers/thunderbolt/usb4.c | 20 ++++++++++++++++++++
+ 4 files changed, 46 insertions(+)
+
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -2413,6 +2413,26 @@ void tb_switch_unconfigure_link(struct t
+ tb_lc_unconfigure_port(down);
+ }
+
++static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
++{
++ struct tb_port *port;
++
++ if (tb_switch_is_icm(sw))
++ return 0;
++
++ tb_switch_for_each_port(sw, port) {
++ int res;
++
++ if (!port->cap_usb4)
++ continue;
++
++ res = usb4_port_hotplug_enable(port);
++ if (res)
++ return res;
++ }
++ return 0;
++}
++
+ /**
+ * tb_switch_add() - Add a switch to the domain
+ * @sw: Switch to add
+@@ -2480,6 +2500,10 @@ int tb_switch_add(struct tb_switch *sw)
+ return ret;
+ }
+
++ ret = tb_switch_port_hotplug_enable(sw);
++ if (ret)
++ return ret;
++
+ ret = device_add(&sw->dev);
+ if (ret) {
+ dev_err(&sw->dev, "failed to add device: %d\n", ret);
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -979,6 +979,7 @@ struct tb_port *usb4_switch_map_usb3_dow
+ const struct tb_port *port);
+
+ int usb4_port_unlock(struct tb_port *port);
++int usb4_port_hotplug_enable(struct tb_port *port);
+ int usb4_port_configure(struct tb_port *port);
+ void usb4_port_unconfigure(struct tb_port *port);
+ int usb4_port_configure_xdomain(struct tb_port *port);
+--- a/drivers/thunderbolt/tb_regs.h
++++ b/drivers/thunderbolt/tb_regs.h
+@@ -285,6 +285,7 @@ struct tb_regs_port_header {
+ #define ADP_CS_5 0x05
+ #define ADP_CS_5_LCA_MASK GENMASK(28, 22)
+ #define ADP_CS_5_LCA_SHIFT 22
++#define ADP_CS_5_DHP BIT(31)
+
+ /* TMU adapter registers */
+ #define TMU_ADP_CS_3 0x03
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -854,6 +854,26 @@ int usb4_port_unlock(struct tb_port *por
+ return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+ }
+
++/**
++ * usb4_port_hotplug_enable() - Enables hotplug for a port
++ * @port: USB4 port to operate on
++ *
++ * Enables hot plug events on a given port. This is only intended
++ * to be used on lane, DP-IN, and DP-OUT adapters.
++ */
++int usb4_port_hotplug_enable(struct tb_port *port)
++{
++ int ret;
++ u32 val;
++
++ ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
++ if (ret)
++ return ret;
++
++ val &= ~ADP_CS_5_DHP;
++ return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
++}
++
+ static int usb4_port_set_configured(struct tb_port *port, bool configured)
+ {
+ int ret;
--- /dev/null
+From c0a581d7126c0bbc96163276f585fd7b4e4d8d0e Mon Sep 17 00:00:00 2001
+From: Waiman Long <longman@redhat.com>
+Date: Thu, 22 Sep 2022 10:56:22 -0400
+Subject: tracing: Disable interrupt or preemption before acquiring arch_spinlock_t
+
+From: Waiman Long <longman@redhat.com>
+
+commit c0a581d7126c0bbc96163276f585fd7b4e4d8d0e upstream.
+
+It was found that some tracing functions in kernel/trace/trace.c acquire
+an arch_spinlock_t with preemption and irqs enabled. An example is the
+tracing_saved_cmdlines_size_read() function which intermittently causes
+a "BUG: using smp_processor_id() in preemptible" warning when the LTP
+read_all_proc test is run.
+
+That can be problematic in case preemption happens after acquiring the
+lock. Add the necessary preemption or interrupt disabling code in the
+appropriate places before acquiring an arch_spinlock_t.
+
+The convention here is to disable preemption for trace_cmdline_lock and
+interupt for max_lock.
+
+Link: https://lkml.kernel.org/r/20220922145622.1744826-1-longman@redhat.com
+
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: stable@vger.kernel.org
+Fixes: a35873a0993b ("tracing: Add conditional snapshot")
+Fixes: 939c7a4f04fc ("tracing: Introduce saved_cmdlines_size file")
+Suggested-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1197,12 +1197,14 @@ void *tracing_cond_snapshot_data(struct
+ {
+ void *cond_data = NULL;
+
++ local_irq_disable();
+ arch_spin_lock(&tr->max_lock);
+
+ if (tr->cond_snapshot)
+ cond_data = tr->cond_snapshot->cond_data;
+
+ arch_spin_unlock(&tr->max_lock);
++ local_irq_enable();
+
+ return cond_data;
+ }
+@@ -1338,9 +1340,11 @@ int tracing_snapshot_cond_enable(struct
+ goto fail_unlock;
+ }
+
++ local_irq_disable();
+ arch_spin_lock(&tr->max_lock);
+ tr->cond_snapshot = cond_snapshot;
+ arch_spin_unlock(&tr->max_lock);
++ local_irq_enable();
+
+ mutex_unlock(&trace_types_lock);
+
+@@ -1367,6 +1371,7 @@ int tracing_snapshot_cond_disable(struct
+ {
+ int ret = 0;
+
++ local_irq_disable();
+ arch_spin_lock(&tr->max_lock);
+
+ if (!tr->cond_snapshot)
+@@ -1377,6 +1382,7 @@ int tracing_snapshot_cond_disable(struct
+ }
+
+ arch_spin_unlock(&tr->max_lock);
++ local_irq_enable();
+
+ return ret;
+ }
+@@ -2198,6 +2204,11 @@ static size_t tgid_map_max;
+
+ #define SAVED_CMDLINES_DEFAULT 128
+ #define NO_CMDLINE_MAP UINT_MAX
++/*
++ * Preemption must be disabled before acquiring trace_cmdline_lock.
++ * The various trace_arrays' max_lock must be acquired in a context
++ * where interrupt is disabled.
++ */
+ static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+ struct saved_cmdlines_buffer {
+ unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
+@@ -2410,7 +2421,11 @@ static int trace_save_cmdline(struct tas
+ * the lock, but we also don't want to spin
+ * nor do we want to disable interrupts,
+ * so if we miss here, then better luck next time.
++ *
++ * This is called within the scheduler and wake up, so interrupts
++ * had better been disabled and run queue lock been held.
+ */
++ lockdep_assert_preemption_disabled();
+ if (!arch_spin_trylock(&trace_cmdline_lock))
+ return 0;
+
+@@ -5470,9 +5485,11 @@ tracing_saved_cmdlines_size_read(struct
+ char buf[64];
+ int r;
+
++ preempt_disable();
+ arch_spin_lock(&trace_cmdline_lock);
+ r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
+ arch_spin_unlock(&trace_cmdline_lock);
++ preempt_enable();
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+ }
+@@ -5497,10 +5514,12 @@ static int tracing_resize_saved_cmdlines
+ return -ENOMEM;
+ }
+
++ preempt_disable();
+ arch_spin_lock(&trace_cmdline_lock);
+ savedcmd_temp = savedcmd;
+ savedcmd = s;
+ arch_spin_unlock(&trace_cmdline_lock);
++ preempt_enable();
+ free_saved_cmdlines_buffer(savedcmd_temp);
+
+ return 0;
+@@ -5953,10 +5972,12 @@ int tracing_set_tracer(struct trace_arra
+
+ #ifdef CONFIG_TRACER_SNAPSHOT
+ if (t->use_max_tr) {
++ local_irq_disable();
+ arch_spin_lock(&tr->max_lock);
+ if (tr->cond_snapshot)
+ ret = -EBUSY;
+ arch_spin_unlock(&tr->max_lock);
++ local_irq_enable();
+ if (ret)
+ goto out;
+ }
+@@ -7030,10 +7051,12 @@ tracing_snapshot_write(struct file *filp
+ goto out;
+ }
+
++ local_irq_disable();
+ arch_spin_lock(&tr->max_lock);
+ if (tr->cond_snapshot)
+ ret = -EBUSY;
+ arch_spin_unlock(&tr->max_lock);
++ local_irq_enable();
+ if (ret)
+ goto out;
+