]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 13 Oct 2025 08:28:26 +0000 (10:28 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 13 Oct 2025 08:28:26 +0000 (10:28 +0200)
added patches:
asoc-codecs-wcd937x-make-stub-functions-inline.patch
asoc-codecs-wcd937x-set-the-comp-soundwire-port-correctly.patch
asoc-sof-ipc3-topology-fix-multi-core-and-static-pipelines-tear-down.patch
asoc-sof-ipc4-pcm-fix-delay-calculation-when-dsp-resamples.patch
asoc-sof-ipc4-pcm-fix-start-offset-calculation-for-chain-dma.patch
asoc-wcd934x-fix-error-handling-in-wcd934x_codec_parse_data.patch
fs-udf-fix-oob-read-in-lengthallocdescs-handling.patch
hisi_acc_vfio_pci-fix-reference-leak-in-hisi_acc_vfio_debug_init.patch
io_uring-waitid-always-prune-wait-queue-entry-in-io_waitid_wait.patch
io_uring-zcrx-fix-overshooting-recv-limit.patch
mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch
mfd-rz-mtu3-fix-mtu5-nfcr-register-offset.patch
mfd-vexpress-sysreg-check-the-return-value-of-devm_gpiochip_add_data.patch
net-nfc-nci-add-parameter-validation-for-packet-data.patch
tracing-fix-irqoff-tracers-on-failure-of-acquiring-calltime.patch
tracing-fix-lock-imbalance-in-s_start-memory-allocation-failure-path.patch
tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch
tracing-fix-wakeup-tracers-on-failure-of-acquiring-calltime.patch
tracing-have-trace_marker-use-per-cpu-data-to-read-user-space.patch
uio_hv_generic-let-userspace-take-care-of-interrupt-mask.patch

21 files changed:
queue-6.17/asoc-codecs-wcd937x-make-stub-functions-inline.patch [new file with mode: 0644]
queue-6.17/asoc-codecs-wcd937x-set-the-comp-soundwire-port-correctly.patch [new file with mode: 0644]
queue-6.17/asoc-sof-ipc3-topology-fix-multi-core-and-static-pipelines-tear-down.patch [new file with mode: 0644]
queue-6.17/asoc-sof-ipc4-pcm-fix-delay-calculation-when-dsp-resamples.patch [new file with mode: 0644]
queue-6.17/asoc-sof-ipc4-pcm-fix-start-offset-calculation-for-chain-dma.patch [new file with mode: 0644]
queue-6.17/asoc-wcd934x-fix-error-handling-in-wcd934x_codec_parse_data.patch [new file with mode: 0644]
queue-6.17/fs-udf-fix-oob-read-in-lengthallocdescs-handling.patch [new file with mode: 0644]
queue-6.17/hisi_acc_vfio_pci-fix-reference-leak-in-hisi_acc_vfio_debug_init.patch [new file with mode: 0644]
queue-6.17/io_uring-waitid-always-prune-wait-queue-entry-in-io_waitid_wait.patch [new file with mode: 0644]
queue-6.17/io_uring-zcrx-fix-overshooting-recv-limit.patch [new file with mode: 0644]
queue-6.17/mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch [new file with mode: 0644]
queue-6.17/mfd-rz-mtu3-fix-mtu5-nfcr-register-offset.patch [new file with mode: 0644]
queue-6.17/mfd-vexpress-sysreg-check-the-return-value-of-devm_gpiochip_add_data.patch [new file with mode: 0644]
queue-6.17/net-nfc-nci-add-parameter-validation-for-packet-data.patch [new file with mode: 0644]
queue-6.17/series
queue-6.17/tracing-fix-irqoff-tracers-on-failure-of-acquiring-calltime.patch [new file with mode: 0644]
queue-6.17/tracing-fix-lock-imbalance-in-s_start-memory-allocation-failure-path.patch [new file with mode: 0644]
queue-6.17/tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch [new file with mode: 0644]
queue-6.17/tracing-fix-wakeup-tracers-on-failure-of-acquiring-calltime.patch [new file with mode: 0644]
queue-6.17/tracing-have-trace_marker-use-per-cpu-data-to-read-user-space.patch [new file with mode: 0644]
queue-6.17/uio_hv_generic-let-userspace-take-care-of-interrupt-mask.patch [new file with mode: 0644]

diff --git a/queue-6.17/asoc-codecs-wcd937x-make-stub-functions-inline.patch b/queue-6.17/asoc-codecs-wcd937x-make-stub-functions-inline.patch
new file mode 100644 (file)
index 0000000..a3e90e1
--- /dev/null
@@ -0,0 +1,50 @@
+From c4bb62eb594418a6bd05ff03bb9072ee1fef29c2 Mon Sep 17 00:00:00 2001
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Date: Tue, 9 Sep 2025 13:19:43 +0100
+Subject: ASoC: codecs: wcd937x: make stub functions inline
+
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+
+commit c4bb62eb594418a6bd05ff03bb9072ee1fef29c2 upstream.
+
+For some reason we ended up with stub functions that are not inline,
+this can result in build error if its included multiple places, as we will
+be redefining the same function
+
+Fixes: c99a515ff153 ("ASoC: codecs: wcd937x-sdw: add SoundWire driver")
+Cc: Stable@vger.kernel.org
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Link: https://patch.msgid.link/20250909121954.225833-3-srinivas.kandagatla@oss.qualcomm.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/codecs/wcd937x.h |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/sound/soc/codecs/wcd937x.h
++++ b/sound/soc/codecs/wcd937x.h
+@@ -552,21 +552,21 @@ int wcd937x_sdw_hw_params(struct wcd937x
+ struct device *wcd937x_sdw_device_get(struct device_node *np);
+ #else
+-int wcd937x_sdw_free(struct wcd937x_sdw_priv *wcd,
++static inline int wcd937x_sdw_free(struct wcd937x_sdw_priv *wcd,
+                    struct snd_pcm_substream *substream,
+                    struct snd_soc_dai *dai)
+ {
+       return -EOPNOTSUPP;
+ }
+-int wcd937x_sdw_set_sdw_stream(struct wcd937x_sdw_priv *wcd,
++static inline int wcd937x_sdw_set_sdw_stream(struct wcd937x_sdw_priv *wcd,
+                              struct snd_soc_dai *dai,
+                              void *stream, int direction)
+ {
+       return -EOPNOTSUPP;
+ }
+-int wcd937x_sdw_hw_params(struct wcd937x_sdw_priv *wcd,
++static inline int wcd937x_sdw_hw_params(struct wcd937x_sdw_priv *wcd,
+                         struct snd_pcm_substream *substream,
+                         struct snd_pcm_hw_params *params,
+                         struct snd_soc_dai *dai)
diff --git a/queue-6.17/asoc-codecs-wcd937x-set-the-comp-soundwire-port-correctly.patch b/queue-6.17/asoc-codecs-wcd937x-set-the-comp-soundwire-port-correctly.patch
new file mode 100644 (file)
index 0000000..b946bfe
--- /dev/null
@@ -0,0 +1,39 @@
+From 66a940b1bf48a7095162688332d725ba160154eb Mon Sep 17 00:00:00 2001
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Date: Tue, 9 Sep 2025 13:19:42 +0100
+Subject: ASoC: codecs: wcd937x: set the comp soundwire port correctly
+
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+
+commit 66a940b1bf48a7095162688332d725ba160154eb upstream.
+
+For some reason we endup with setting soundwire port for
+HPHL_COMP and HPHR_COMP as zero, this can potentially result
+in a memory corruption due to accessing and setting -1 th element of
+port_map array.
+
+Fixes: 82be8c62a38c ("ASoC: codecs: wcd937x: add basic controls")
+Cc: Stable@vger.kernel.org
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Reviewed-by: Alexey Klimov <alexey.klimov@linaro.org>
+Link: https://patch.msgid.link/20250909121954.225833-2-srinivas.kandagatla@oss.qualcomm.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/codecs/wcd937x.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/codecs/wcd937x.c
++++ b/sound/soc/codecs/wcd937x.c
+@@ -2046,9 +2046,9 @@ static const struct snd_kcontrol_new wcd
+       SOC_ENUM_EXT("RX HPH Mode", rx_hph_mode_mux_enum,
+                    wcd937x_rx_hph_mode_get, wcd937x_rx_hph_mode_put),
+-      SOC_SINGLE_EXT("HPHL_COMP Switch", SND_SOC_NOPM, 0, 1, 0,
++      SOC_SINGLE_EXT("HPHL_COMP Switch", WCD937X_COMP_L, 0, 1, 0,
+                      wcd937x_get_compander, wcd937x_set_compander),
+-      SOC_SINGLE_EXT("HPHR_COMP Switch", SND_SOC_NOPM, 1, 1, 0,
++      SOC_SINGLE_EXT("HPHR_COMP Switch", WCD937X_COMP_R, 1, 1, 0,
+                      wcd937x_get_compander, wcd937x_set_compander),
+       SOC_SINGLE_TLV("HPHL Volume", WCD937X_HPH_L_EN, 0, 20, 1, line_gain),
diff --git a/queue-6.17/asoc-sof-ipc3-topology-fix-multi-core-and-static-pipelines-tear-down.patch b/queue-6.17/asoc-sof-ipc3-topology-fix-multi-core-and-static-pipelines-tear-down.patch
new file mode 100644 (file)
index 0000000..4c5ff23
--- /dev/null
@@ -0,0 +1,57 @@
+From 59abe7bc7e7c70e9066b3e46874d1b7e6a13de14 Mon Sep 17 00:00:00 2001
+From: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Date: Thu, 2 Oct 2025 10:31:25 +0300
+Subject: ASoC: SOF: ipc3-topology: Fix multi-core and static pipelines tear down
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+
+commit 59abe7bc7e7c70e9066b3e46874d1b7e6a13de14 upstream.
+
+In the case of static pipelines, freeing the widgets in the pipelines
+that were not suspended after freeing the scheduler widgets results in
+errors because the secondary cores are powered off when the scheduler
+widgets are freed. Fix this by tearing down the leftover pipelines before
+powering off the secondary cores.
+
+Cc: stable@vger.kernel.org
+Fixes: d7332c4a4f1a ("ASoC: SOF: ipc3-topology: Fix pipeline tear down logic")
+Signed-off-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Link: https://patch.msgid.link/20251002073125.32471-1-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/sof/ipc3-topology.c |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/sound/soc/sof/ipc3-topology.c
++++ b/sound/soc/sof/ipc3-topology.c
+@@ -2473,11 +2473,6 @@ static int sof_ipc3_tear_down_all_pipeli
+       if (ret < 0)
+               return ret;
+-      /* free all the scheduler widgets now */
+-      ret = sof_ipc3_free_widgets_in_list(sdev, true, &dyn_widgets, verify);
+-      if (ret < 0)
+-              return ret;
+-
+       /*
+        * Tear down all pipelines associated with PCMs that did not get suspended
+        * and unset the prepare flag so that they can be set up again during resume.
+@@ -2493,6 +2488,11 @@ static int sof_ipc3_tear_down_all_pipeli
+               }
+       }
++      /* free all the scheduler widgets now. This will also power down the secondary cores */
++      ret = sof_ipc3_free_widgets_in_list(sdev, true, &dyn_widgets, verify);
++      if (ret < 0)
++              return ret;
++
+       list_for_each_entry(sroute, &sdev->route_list, list)
+               sroute->setup = false;
diff --git a/queue-6.17/asoc-sof-ipc4-pcm-fix-delay-calculation-when-dsp-resamples.patch b/queue-6.17/asoc-sof-ipc4-pcm-fix-delay-calculation-when-dsp-resamples.patch
new file mode 100644 (file)
index 0000000..e902dd4
--- /dev/null
@@ -0,0 +1,182 @@
+From bcd1383516bb5a6f72b2d1e7f7ad42c4a14837d1 Mon Sep 17 00:00:00 2001
+From: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Date: Thu, 2 Oct 2025 10:47:15 +0300
+Subject: ASoC: SOF: ipc4-pcm: fix delay calculation when DSP resamples
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+
+commit bcd1383516bb5a6f72b2d1e7f7ad42c4a14837d1 upstream.
+
+When the sampling rates going in (host) and out (dai) from the DSP
+are different, the IPC4 delay reporting does not work correctly.
+Add support for this case by scaling the all raw position values to
+a common timebase before calculating real-time delay for the PCM.
+
+Cc: stable@vger.kernel.org
+Fixes: 0ea06680dfcb ("ASoC: SOF: ipc4-pcm: Correct the delay calculation")
+Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Link: https://patch.msgid.link/20251002074719.2084-2-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/sof/ipc4-pcm.c |   83 +++++++++++++++++++++++++++++++++++------------
+ 1 file changed, 62 insertions(+), 21 deletions(-)
+
+--- a/sound/soc/sof/ipc4-pcm.c
++++ b/sound/soc/sof/ipc4-pcm.c
+@@ -19,12 +19,14 @@
+  * struct sof_ipc4_timestamp_info - IPC4 timestamp info
+  * @host_copier: the host copier of the pcm stream
+  * @dai_copier: the dai copier of the pcm stream
+- * @stream_start_offset: reported by fw in memory window (converted to frames)
+- * @stream_end_offset: reported by fw in memory window (converted to frames)
++ * @stream_start_offset: reported by fw in memory window (converted to
++ *                       frames at host_copier sampling rate)
++ * @stream_end_offset: reported by fw in memory window (converted to
++ *                     frames at host_copier sampling rate)
+  * @llp_offset: llp offset in memory window
+- * @boundary: wrap boundary should be used for the LLP frame counter
+  * @delay: Calculated and stored in pointer callback. The stored value is
+- *       returned in the delay callback.
++ *         returned in the delay callback. Expressed in frames at host copier
++ *         sampling rate.
+  */
+ struct sof_ipc4_timestamp_info {
+       struct sof_ipc4_copier *host_copier;
+@@ -33,7 +35,6 @@ struct sof_ipc4_timestamp_info {
+       u64 stream_end_offset;
+       u32 llp_offset;
+-      u64 boundary;
+       snd_pcm_sframes_t delay;
+ };
+@@ -48,6 +49,16 @@ struct sof_ipc4_pcm_stream_priv {
+       bool chain_dma_allocated;
+ };
++/*
++ * Modulus to use to compare host and link position counters. The sampling
++ * rates may be different, so the raw hardware counters will wrap
++ * around at different times. To calculate differences, use
++ * DELAY_BOUNDARY as a common modulus. This value must be smaller than
++ * the wrap-around point of any hardware counter, and larger than any
++ * valid delay measurement.
++ */
++#define DELAY_BOUNDARY                U32_MAX
++
+ static inline struct sof_ipc4_timestamp_info *
+ sof_ipc4_sps_to_time_info(struct snd_sof_pcm_stream *sps)
+ {
+@@ -993,6 +1004,35 @@ static int sof_ipc4_pcm_hw_params(struct
+       return 0;
+ }
++static u64 sof_ipc4_frames_dai_to_host(struct sof_ipc4_timestamp_info *time_info, u64 value)
++{
++      u64 dai_rate, host_rate;
++
++      if (!time_info->dai_copier || !time_info->host_copier)
++              return value;
++
++      /*
++       * copiers do not change sampling rate, so we can use the
++       * out_format independently of stream direction
++       */
++      dai_rate = time_info->dai_copier->data.out_format.sampling_frequency;
++      host_rate = time_info->host_copier->data.out_format.sampling_frequency;
++
++      if (!dai_rate || !host_rate || dai_rate == host_rate)
++              return value;
++
++      /* take care not to overflow u64, rates can be up to 768000 */
++      if (value > U32_MAX) {
++              value = div64_u64(value, dai_rate);
++              value *= host_rate;
++      } else {
++              value *= host_rate;
++              value = div64_u64(value, dai_rate);
++      }
++
++      return value;
++}
++
+ static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
+                                           struct snd_pcm_substream *substream,
+                                           struct snd_sof_pcm_stream *sps,
+@@ -1043,14 +1083,13 @@ static int sof_ipc4_get_stream_start_off
+       time_info->stream_end_offset = ppl_reg.stream_end_offset;
+       do_div(time_info->stream_end_offset, dai_sample_size);
++      /* convert to host frame time */
++      time_info->stream_start_offset =
++              sof_ipc4_frames_dai_to_host(time_info, time_info->stream_start_offset);
++      time_info->stream_end_offset =
++              sof_ipc4_frames_dai_to_host(time_info, time_info->stream_end_offset);
++
+ out:
+-      /*
+-       * Calculate the wrap boundary need to be used for delay calculation
+-       * The host counter is in bytes, it will wrap earlier than the frames
+-       * based link counter.
+-       */
+-      time_info->boundary = div64_u64(~((u64)0),
+-                                      frames_to_bytes(substream->runtime, 1));
+       /* Initialize the delay value to 0 (no delay) */
+       time_info->delay = 0;
+@@ -1093,6 +1132,8 @@ static int sof_ipc4_pcm_pointer(struct s
+       /* For delay calculation we need the host counter */
+       host_cnt = snd_sof_pcm_get_host_byte_counter(sdev, component, substream);
++
++      /* Store the original value to host_ptr */
+       host_ptr = host_cnt;
+       /* convert the host_cnt to frames */
+@@ -1111,6 +1152,8 @@ static int sof_ipc4_pcm_pointer(struct s
+               sof_mailbox_read(sdev, time_info->llp_offset, &llp, sizeof(llp));
+               dai_cnt = ((u64)llp.reading.llp_u << 32) | llp.reading.llp_l;
+       }
++
++      dai_cnt = sof_ipc4_frames_dai_to_host(time_info, dai_cnt);
+       dai_cnt += time_info->stream_end_offset;
+       /* In two cases dai dma counter is not accurate
+@@ -1144,8 +1187,9 @@ static int sof_ipc4_pcm_pointer(struct s
+               dai_cnt -= time_info->stream_start_offset;
+       }
+-      /* Wrap the dai counter at the boundary where the host counter wraps */
+-      div64_u64_rem(dai_cnt, time_info->boundary, &dai_cnt);
++      /* Convert to a common base before comparisons */
++      dai_cnt &= DELAY_BOUNDARY;
++      host_cnt &= DELAY_BOUNDARY;
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               head_cnt = host_cnt;
+@@ -1155,14 +1199,11 @@ static int sof_ipc4_pcm_pointer(struct s
+               tail_cnt = host_cnt;
+       }
+-      if (head_cnt < tail_cnt) {
+-              time_info->delay = time_info->boundary - tail_cnt + head_cnt;
+-              goto out;
+-      }
+-
+-      time_info->delay =  head_cnt - tail_cnt;
++      if (unlikely(head_cnt < tail_cnt))
++              time_info->delay = DELAY_BOUNDARY - tail_cnt + head_cnt;
++      else
++              time_info->delay = head_cnt - tail_cnt;
+-out:
+       /*
+        * Convert the host byte counter to PCM pointer which wraps in buffer
+        * and it is in frames
diff --git a/queue-6.17/asoc-sof-ipc4-pcm-fix-start-offset-calculation-for-chain-dma.patch b/queue-6.17/asoc-sof-ipc4-pcm-fix-start-offset-calculation-for-chain-dma.patch
new file mode 100644 (file)
index 0000000..8f67043
--- /dev/null
@@ -0,0 +1,93 @@
+From bace10b59624e6bd8d68bc9304357f292f1b3dcf Mon Sep 17 00:00:00 2001
+From: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Date: Thu, 2 Oct 2025 10:47:16 +0300
+Subject: ASoC: SOF: ipc4-pcm: fix start offset calculation for chain DMA
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+
+commit bace10b59624e6bd8d68bc9304357f292f1b3dcf upstream.
+
+Assumption that chain DMA module starts the link DMA when 1ms of
+data is available from host is not correct. Instead the firmware
+chain DMA module fills the link DMA with initial buffer of zeroes
+and the host and link DMAs are started at the same time.
+
+This results in a small error in delay calculation. This can become a
+more severe problem if host DMA has delays that exceed 1ms. This results
+in negative delay to be calculated and bogus values reported to
+applications. This can confuse some applications like
+alsa_conformance_test.
+
+Fix the issue by correctly calculating the firmware chain DMA
+preamble size and initializing the start offset to this value.
+
+Cc: stable@vger.kernel.org
+Fixes: a1d203d390e0 ("ASoC: SOF: ipc4-pcm: Enable delay reporting for ChainDMA streams")
+Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Link: https://patch.msgid.link/20251002074719.2084-3-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/sof/ipc4-pcm.c      |   14 ++++++++++----
+ sound/soc/sof/ipc4-topology.c |    1 -
+ sound/soc/sof/ipc4-topology.h |    2 ++
+ 3 files changed, 12 insertions(+), 5 deletions(-)
+
+--- a/sound/soc/sof/ipc4-pcm.c
++++ b/sound/soc/sof/ipc4-pcm.c
+@@ -1052,7 +1052,7 @@ static int sof_ipc4_get_stream_start_off
+               return -EINVAL;
+       } else if (host_copier->data.gtw_cfg.node_id == SOF_IPC4_CHAIN_DMA_NODE_ID) {
+               /*
+-               * While the firmware does not supports time_info reporting for
++               * While the firmware does not support time_info reporting for
+                * streams using ChainDMA, it is granted that ChainDMA can only
+                * be used on Host+Link pairs where the link position is
+                * accessible from the host side.
+@@ -1060,10 +1060,16 @@ static int sof_ipc4_get_stream_start_off
+                * Enable delay calculation in case of ChainDMA via host
+                * accessible registers.
+                *
+-               * The ChainDMA uses 2x 1ms ping-pong buffer, dai side starts
+-               * when 1ms data is available
++               * The ChainDMA prefills the link DMA with a preamble
++               * of zero samples. Set the stream start offset based
++               * on size of the preamble (driver provided fifo size
++               * multiplied by 2.5). We add 1ms of margin as the FW
++               * will align the buffer size to DMA hardware
++               * alignment that is not known to host.
+                */
+-              time_info->stream_start_offset = substream->runtime->rate / MSEC_PER_SEC;
++              int pre_ms = SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS * 5 / 2 + 1;
++
++              time_info->stream_start_offset = pre_ms * substream->runtime->rate / MSEC_PER_SEC;
+               goto out;
+       }
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -33,7 +33,6 @@ MODULE_PARM_DESC(ipc4_ignore_cpc,
+ #define SOF_IPC4_GAIN_PARAM_ID  0
+ #define SOF_IPC4_TPLG_ABI_SIZE 6
+-#define SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS 2
+ static DEFINE_IDA(alh_group_ida);
+ static DEFINE_IDA(pipeline_ida);
+--- a/sound/soc/sof/ipc4-topology.h
++++ b/sound/soc/sof/ipc4-topology.h
+@@ -247,6 +247,8 @@ struct sof_ipc4_dma_stream_ch_map {
+ #define SOF_IPC4_DMA_METHOD_HDA   1
+ #define SOF_IPC4_DMA_METHOD_GPDMA 2 /* defined for consistency but not used */
++#define SOF_IPC4_CHAIN_DMA_BUF_SIZE_MS 2
++
+ /**
+  * struct sof_ipc4_dma_config: DMA configuration
+  * @dma_method: HDAudio or GPDMA
diff --git a/queue-6.17/asoc-wcd934x-fix-error-handling-in-wcd934x_codec_parse_data.patch b/queue-6.17/asoc-wcd934x-fix-error-handling-in-wcd934x_codec_parse_data.patch
new file mode 100644 (file)
index 0000000..8a11508
--- /dev/null
@@ -0,0 +1,81 @@
+From 4e65bda8273c938039403144730923e77916a3d7 Mon Sep 17 00:00:00 2001
+From: Ma Ke <make24@iscas.ac.cn>
+Date: Tue, 23 Sep 2025 14:52:12 +0800
+Subject: ASoC: wcd934x: fix error handling in wcd934x_codec_parse_data()
+
+From: Ma Ke <make24@iscas.ac.cn>
+
+commit 4e65bda8273c938039403144730923e77916a3d7 upstream.
+
+wcd934x_codec_parse_data() contains a device reference count leak in
+of_slim_get_device() where device_find_child() increases the reference
+count of the device but this reference is not properly decreased in
+the success path. Add put_device() in wcd934x_codec_parse_data() and
+add devm_add_action_or_reset() in the probe function, which ensures
+that the reference count of the device is correctly managed.
+
+Memory leak in regmap_init_slimbus() as the allocated regmap is not
+released when the device is removed. Using devm_regmap_init_slimbus()
+instead of regmap_init_slimbus() to ensure automatic regmap cleanup on
+device removal.
+
+Calling path: of_slim_get_device() -> of_find_slim_device() ->
+device_find_child(). As comment of device_find_child() says, 'NOTE:
+you will need to drop the reference with put_device() after use.'.
+
+Found by code review.
+
+Cc: stable@vger.kernel.org
+Fixes: a61f3b4f476e ("ASoC: wcd934x: add support to wcd9340/wcd9341 codec")
+Signed-off-by: Ma Ke <make24@iscas.ac.cn>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://patch.msgid.link/20250923065212.26660-1-make24@iscas.ac.cn
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/codecs/wcd934x.c |   17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/codecs/wcd934x.c
++++ b/sound/soc/codecs/wcd934x.c
+@@ -5831,6 +5831,13 @@ static const struct snd_soc_component_dr
+       .endianness = 1,
+ };
++static void wcd934x_put_device_action(void *data)
++{
++      struct device *dev = data;
++
++      put_device(dev);
++}
++
+ static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd)
+ {
+       struct device *dev = &wcd->sdev->dev;
+@@ -5847,11 +5854,13 @@ static int wcd934x_codec_parse_data(stru
+               return dev_err_probe(dev, -EINVAL, "Unable to get SLIM Interface device\n");
+       slim_get_logical_addr(wcd->sidev);
+-      wcd->if_regmap = regmap_init_slimbus(wcd->sidev,
++      wcd->if_regmap = devm_regmap_init_slimbus(wcd->sidev,
+                                 &wcd934x_ifc_regmap_config);
+-      if (IS_ERR(wcd->if_regmap))
++      if (IS_ERR(wcd->if_regmap)) {
++              put_device(&wcd->sidev->dev);
+               return dev_err_probe(dev, PTR_ERR(wcd->if_regmap),
+                                    "Failed to allocate ifc register map\n");
++      }
+       of_property_read_u32(dev->parent->of_node, "qcom,dmic-sample-rate",
+                            &wcd->dmic_sample_rate);
+@@ -5893,6 +5902,10 @@ static int wcd934x_codec_probe(struct pl
+       if (ret)
+               return ret;
++      ret = devm_add_action_or_reset(dev, wcd934x_put_device_action, &wcd->sidev->dev);
++      if (ret)
++              return ret;
++
+       /* set default rate 9P6MHz */
+       regmap_update_bits(wcd->regmap, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+                          WCD934X_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK,
diff --git a/queue-6.17/fs-udf-fix-oob-read-in-lengthallocdescs-handling.patch b/queue-6.17/fs-udf-fix-oob-read-in-lengthallocdescs-handling.patch
new file mode 100644 (file)
index 0000000..fa559e3
--- /dev/null
@@ -0,0 +1,76 @@
+From 3bd5e45c2ce30e239d596becd5db720f7eb83c99 Mon Sep 17 00:00:00 2001
+From: Larshin Sergey <Sergey.Larshin@kaspersky.com>
+Date: Mon, 22 Sep 2025 16:13:58 +0300
+Subject: fs: udf: fix OOB read in lengthAllocDescs handling
+
+From: Larshin Sergey <Sergey.Larshin@kaspersky.com>
+
+commit 3bd5e45c2ce30e239d596becd5db720f7eb83c99 upstream.
+
+When parsing Allocation Extent Descriptor, lengthAllocDescs comes from
+on-disk data and must be validated against the block size. Crafted or
+corrupted images may set lengthAllocDescs so that the total descriptor
+length (sizeof(allocExtDesc) + lengthAllocDescs) exceeds the buffer,
+leading udf_update_tag() to call crc_itu_t() on out-of-bounds memory and
+trigger a KASAN use-after-free read.
+
+BUG: KASAN: use-after-free in crc_itu_t+0x1d5/0x2b0 lib/crc-itu-t.c:60
+Read of size 1 at addr ffff888041e7d000 by task syz-executor317/5309
+
+CPU: 0 UID: 0 PID: 5309 Comm: syz-executor317 Not tainted 6.12.0-rc4-syzkaller-00261-g850925a8133c #0
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:94 [inline]
+ dump_stack_lvl+0x241/0x360 lib/dump_stack.c:120
+ print_address_description mm/kasan/report.c:377 [inline]
+ print_report+0x169/0x550 mm/kasan/report.c:488
+ kasan_report+0x143/0x180 mm/kasan/report.c:601
+ crc_itu_t+0x1d5/0x2b0 lib/crc-itu-t.c:60
+ udf_update_tag+0x70/0x6a0 fs/udf/misc.c:261
+ udf_write_aext+0x4d8/0x7b0 fs/udf/inode.c:2179
+ extent_trunc+0x2f7/0x4a0 fs/udf/truncate.c:46
+ udf_truncate_tail_extent+0x527/0x7e0 fs/udf/truncate.c:106
+ udf_release_file+0xc1/0x120 fs/udf/file.c:185
+ __fput+0x23f/0x880 fs/file_table.c:431
+ task_work_run+0x24f/0x310 kernel/task_work.c:239
+ exit_task_work include/linux/task_work.h:43 [inline]
+ do_exit+0xa2f/0x28e0 kernel/exit.c:939
+ do_group_exit+0x207/0x2c0 kernel/exit.c:1088
+ __do_sys_exit_group kernel/exit.c:1099 [inline]
+ __se_sys_exit_group kernel/exit.c:1097 [inline]
+ __x64_sys_exit_group+0x3f/0x40 kernel/exit.c:1097
+ x64_sys_call+0x2634/0x2640 arch/x86/include/generated/asm/syscalls_64.h:232
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+ </TASK>
+
+Validate the computed total length against epos->bh->b_size.
+
+Found by Linux Verification Center (linuxtesting.org) with Syzkaller.
+
+Reported-by: syzbot+8743fca924afed42f93e@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=8743fca924afed42f93e
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Larshin Sergey <Sergey.Larshin@kaspersky.com>
+Link: https://patch.msgid.link/20250922131358.745579-1-Sergey.Larshin@kaspersky.com
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/udf/inode.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -2272,6 +2272,9 @@ int udf_current_aext(struct inode *inode
+               if (check_add_overflow(sizeof(struct allocExtDesc),
+                               le32_to_cpu(header->lengthAllocDescs), &alen))
+                       return -1;
++
++              if (alen > epos->bh->b_size)
++                      return -1;
+       }
+       switch (iinfo->i_alloc_type) {
diff --git a/queue-6.17/hisi_acc_vfio_pci-fix-reference-leak-in-hisi_acc_vfio_debug_init.patch b/queue-6.17/hisi_acc_vfio_pci-fix-reference-leak-in-hisi_acc_vfio_debug_init.patch
new file mode 100644 (file)
index 0000000..42a02d8
--- /dev/null
@@ -0,0 +1,46 @@
+From eaba58355ecd124b4a8c91df7335970ad9fe2624 Mon Sep 17 00:00:00 2001
+From: Miaoqian Lin <linmq006@gmail.com>
+Date: Mon, 1 Sep 2025 16:18:08 +0800
+Subject: hisi_acc_vfio_pci: Fix reference leak in hisi_acc_vfio_debug_init
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+commit eaba58355ecd124b4a8c91df7335970ad9fe2624 upstream.
+
+The debugfs_lookup() function returns a dentry with an increased reference
+count that must be released by calling dput().
+
+Fixes: b398f91779b8 ("hisi_acc_vfio_pci: register debugfs for hisilicon migration driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Reviewed-by: Longfang Liu <liulongfang@huawei.com>
+Link: https://lore.kernel.org/r/20250901081809.2286649-1-linmq006@gmail.com
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
++++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+@@ -1612,8 +1612,10 @@ static void hisi_acc_vfio_debug_init(str
+       }
+       migf = kzalloc(sizeof(*migf), GFP_KERNEL);
+-      if (!migf)
++      if (!migf) {
++              dput(vfio_dev_migration);
+               return;
++      }
+       hisi_acc_vdev->debug_migf = migf;
+       vfio_hisi_acc = debugfs_create_dir("hisi_acc", vfio_dev_migration);
+@@ -1623,6 +1625,8 @@ static void hisi_acc_vfio_debug_init(str
+                                   hisi_acc_vf_migf_read);
+       debugfs_create_devm_seqfile(dev, "cmd_state", vfio_hisi_acc,
+                                   hisi_acc_vf_debug_cmd);
++
++      dput(vfio_dev_migration);
+ }
+ static void hisi_acc_vf_debugfs_exit(struct hisi_acc_vf_core_device *hisi_acc_vdev)
diff --git a/queue-6.17/io_uring-waitid-always-prune-wait-queue-entry-in-io_waitid_wait.patch b/queue-6.17/io_uring-waitid-always-prune-wait-queue-entry-in-io_waitid_wait.patch
new file mode 100644 (file)
index 0000000..5630fe0
--- /dev/null
@@ -0,0 +1,43 @@
+From 2f8229d53d984c6a05b71ac9e9583d4354e3b91f Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Tue, 7 Oct 2025 07:46:00 -0600
+Subject: io_uring/waitid: always prune wait queue entry in io_waitid_wait()
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 2f8229d53d984c6a05b71ac9e9583d4354e3b91f upstream.
+
+For a successful return, always remove our entry from the wait queue
+entry list. Previously this was skipped if a cancelation was in
+progress, but this can race with another invocation of the wait queue
+entry callback.
+
+Cc: stable@vger.kernel.org
+Fixes: f31ecf671ddc ("io_uring: add IORING_OP_WAITID support")
+Reported-by: syzbot+b9e83021d9c642a33d8c@syzkaller.appspotmail.com
+Tested-by: syzbot+b9e83021d9c642a33d8c@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/io-uring/68e5195e.050a0220.256323.001f.GAE@google.com/
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/waitid.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/io_uring/waitid.c
++++ b/io_uring/waitid.c
+@@ -232,13 +232,14 @@ static int io_waitid_wait(struct wait_qu
+       if (!pid_child_should_wake(wo, p))
+               return 0;
++      list_del_init(&wait->entry);
++
+       /* cancel is in progress */
+       if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
+               return 1;
+       req->io_task_work.func = io_waitid_cb;
+       io_req_task_work_add(req);
+-      list_del_init(&wait->entry);
+       return 1;
+ }
diff --git a/queue-6.17/io_uring-zcrx-fix-overshooting-recv-limit.patch b/queue-6.17/io_uring-zcrx-fix-overshooting-recv-limit.patch
new file mode 100644 (file)
index 0000000..66a85fd
--- /dev/null
@@ -0,0 +1,44 @@
+From 09cfd3c52ea76f43b3cb15e570aeddf633d65e80 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Wed, 8 Oct 2025 13:38:06 +0100
+Subject: io_uring/zcrx: fix overshooting recv limit
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 09cfd3c52ea76f43b3cb15e570aeddf633d65e80 upstream.
+
+It's reported that sometimes a zcrx request can receive more than was
+requested. It's caused by io_zcrx_recv_skb() adjusting desc->count for
+all received buffers including frag lists, but then doing recursive
+calls to process frag list skbs, which leads to desc->count double
+accounting and underflow.
+
+Reported-and-tested-by: Matthias Jasny <matthiasjasny@gmail.com>
+Fixes: 6699ec9a23f85 ("io_uring/zcrx: add a read limit to recvzc requests")
+Cc: stable@vger.kernel.org
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/zcrx.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/io_uring/zcrx.c
++++ b/io_uring/zcrx.c
+@@ -1154,12 +1154,16 @@ io_zcrx_recv_skb(read_descriptor_t *desc
+               end = start + frag_iter->len;
+               if (offset < end) {
++                      size_t count;
++
+                       copy = end - offset;
+                       if (copy > len)
+                               copy = len;
+                       off = offset - start;
++                      count = desc->count;
+                       ret = io_zcrx_recv_skb(desc, frag_iter, off, copy);
++                      desc->count = count;
+                       if (ret < 0)
+                               goto out;
diff --git a/queue-6.17/mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch b/queue-6.17/mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch
new file mode 100644 (file)
index 0000000..d00aa25
--- /dev/null
@@ -0,0 +1,39 @@
+From 64e0d839c589f4f2ecd2e3e5bdb5cee6ba6bade9 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hansg@kernel.org>
+Date: Mon, 4 Aug 2025 15:32:40 +0200
+Subject: mfd: intel_soc_pmic_chtdc_ti: Set use_single_read regmap_config flag
+
+From: Hans de Goede <hansg@kernel.org>
+
+commit 64e0d839c589f4f2ecd2e3e5bdb5cee6ba6bade9 upstream.
+
+Testing has shown that reading multiple registers at once (for 10-bit
+ADC values) does not work. Set the use_single_read regmap_config flag
+to make regmap split these for us.
+
+This should fix temperature opregion accesses done by
+drivers/acpi/pmic/intel_pmic_chtdc_ti.c and is also necessary for
+the upcoming drivers for the ADC and battery MFD cells.
+
+Fixes: 6bac0606fdba ("mfd: Add support for Cherry Trail Dollar Cove TI PMIC")
+Cc: stable@vger.kernel.org
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Signed-off-by: Hans de Goede <hansg@kernel.org>
+Link: https://lore.kernel.org/r/20250804133240.312383-1-hansg@kernel.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mfd/intel_soc_pmic_chtdc_ti.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
++++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+@@ -82,6 +82,8 @@ static const struct regmap_config chtdc_
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 0xff,
++      /* The hardware does not support reading multiple registers at once */
++      .use_single_read = true,
+ };
+ static const struct regmap_irq chtdc_ti_irqs[] = {
diff --git a/queue-6.17/mfd-rz-mtu3-fix-mtu5-nfcr-register-offset.patch b/queue-6.17/mfd-rz-mtu3-fix-mtu5-nfcr-register-offset.patch
new file mode 100644 (file)
index 0000000..a3293c6
--- /dev/null
@@ -0,0 +1,35 @@
+From da32b0e82c523b76265ba1ad25d7ea74f0ece402 Mon Sep 17 00:00:00 2001
+From: Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>
+Date: Wed, 10 Sep 2025 20:59:06 +0300
+Subject: mfd: rz-mtu3: Fix MTU5 NFCR register offset
+
+From: Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>
+
+commit da32b0e82c523b76265ba1ad25d7ea74f0ece402 upstream.
+
+The NFCR register for MTU5 is at 0x1a95 offset according to Datasheet
+Page 725, Table 16.4. The address of all registers is offset by 0x1200,
+making the proper address of MTU5 NFCR register be 0x895.
+
+Cc: stable@vger.kernel.org
+Fixes: 654c293e1687 ("mfd: Add Renesas RZ/G2L MTU3a core driver")
+Signed-off-by: Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>
+Reviewed-by: Biju Das <biju.das.jz@bp.renesas.com>
+Link: https://lore.kernel.org/r/20250910175914.12956-1-cosmin-gabriel.tanislav.xa@renesas.com
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mfd/rz-mtu3.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mfd/rz-mtu3.c
++++ b/drivers/mfd/rz-mtu3.c
+@@ -32,7 +32,7 @@ static const unsigned long rz_mtu3_8bit_
+       [RZ_MTU3_CHAN_2] = MTU_8BIT_CH_1_2(0x204, 0x092, 0x205, 0x200, 0x20c, 0x201, 0x202),
+       [RZ_MTU3_CHAN_3] = MTU_8BIT_CH_3_4_6_7(0x008, 0x093, 0x02c, 0x000, 0x04c, 0x002, 0x004, 0x005, 0x038),
+       [RZ_MTU3_CHAN_4] = MTU_8BIT_CH_3_4_6_7(0x009, 0x094, 0x02d, 0x001, 0x04d, 0x003, 0x006, 0x007, 0x039),
+-      [RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x1eb, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6),
++      [RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x895, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6),
+       [RZ_MTU3_CHAN_6] = MTU_8BIT_CH_3_4_6_7(0x808, 0x893, 0x82c, 0x800, 0x84c, 0x802, 0x804, 0x805, 0x838),
+       [RZ_MTU3_CHAN_7] = MTU_8BIT_CH_3_4_6_7(0x809, 0x894, 0x82d, 0x801, 0x84d, 0x803, 0x806, 0x807, 0x839),
+       [RZ_MTU3_CHAN_8] = MTU_8BIT_CH_8(0x404, 0x098, 0x400, 0x406, 0x401, 0x402, 0x403)
diff --git a/queue-6.17/mfd-vexpress-sysreg-check-the-return-value-of-devm_gpiochip_add_data.patch b/queue-6.17/mfd-vexpress-sysreg-check-the-return-value-of-devm_gpiochip_add_data.patch
new file mode 100644 (file)
index 0000000..462143d
--- /dev/null
@@ -0,0 +1,48 @@
+From 1efbee6852f1ff698a9981bd731308dd027189fb Mon Sep 17 00:00:00 2001
+From: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Date: Mon, 11 Aug 2025 15:36:16 +0200
+Subject: mfd: vexpress-sysreg: Check the return value of devm_gpiochip_add_data()
+
+From: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+
+commit 1efbee6852f1ff698a9981bd731308dd027189fb upstream.
+
+Commit 974cc7b93441 ("mfd: vexpress: Define the device as MFD cells")
+removed the return value check from the call to gpiochip_add_data() (or
+rather gpiochip_add() back then and later converted to devres) with no
+explanation. This function however can still fail, so check the return
+value and bail-out if it does.
+
+Cc: stable@vger.kernel.org
+Fixes: 974cc7b93441 ("mfd: vexpress: Define the device as MFD cells")
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Link: https://lore.kernel.org/r/20250811-gpio-mmio-mfd-conv-v1-1-68c5c958cf80@linaro.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mfd/vexpress-sysreg.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/mfd/vexpress-sysreg.c
++++ b/drivers/mfd/vexpress-sysreg.c
+@@ -99,6 +99,7 @@ static int vexpress_sysreg_probe(struct
+       struct resource *mem;
+       void __iomem *base;
+       struct gpio_chip *mmc_gpio_chip;
++      int ret;
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem)
+@@ -119,7 +120,10 @@ static int vexpress_sysreg_probe(struct
+       bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI,
+                       NULL, NULL, NULL, NULL, 0);
+       mmc_gpio_chip->ngpio = 2;
+-      devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL);
++
++      ret = devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL);
++      if (ret)
++              return ret;
+       return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+                       vexpress_sysreg_cells,
diff --git a/queue-6.17/net-nfc-nci-add-parameter-validation-for-packet-data.patch b/queue-6.17/net-nfc-nci-add-parameter-validation-for-packet-data.patch
new file mode 100644 (file)
index 0000000..9917ce1
--- /dev/null
@@ -0,0 +1,342 @@
+From 9c328f54741bd5465ca1dc717c84c04242fac2e1 Mon Sep 17 00:00:00 2001
+From: Deepak Sharma <deepak.sharma.472935@gmail.com>
+Date: Thu, 25 Sep 2025 18:58:46 +0530
+Subject: net: nfc: nci: Add parameter validation for packet data
+
+From: Deepak Sharma <deepak.sharma.472935@gmail.com>
+
+commit 9c328f54741bd5465ca1dc717c84c04242fac2e1 upstream.
+
+Syzbot reported an uninitialized value bug in nci_init_req, which was
+introduced by commit 5aca7966d2a7 ("Merge tag
+'perf-tools-fixes-for-v6.17-2025-09-16' of
+git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools").
+
+This bug arises due to very limited and poor input validation
+that was done at nic_valid_size(). This validation only
+validates the skb->len (directly reflects size provided at the
+userspace interface) with the length provided in the buffer
+itself (interpreted as NCI_HEADER). This leads to the processing
+of memory content at the address assuming the correct layout
+per what opcode requires there. This leads to the accesses to
+buffer of `skb_buff->data` which is not assigned anything yet.
+
+Following the same silent drop of packets of invalid sizes at
+`nic_valid_size()`, add validation of the data in the respective
+handlers and return error values in case of failure. Release
+the skb if error values are returned from handlers in
+`nci_nft_packet` and effectively do a silent drop
+
+Possible TODO: because we silently drop the packets, the
+call to `nci_request` will be waiting for completion of request
+and will face timeouts. These timeouts can get excessively logged
+in the dmesg. A proper handling of them may require to export
+`nci_request_cancel` (or propagate error handling from the
+nft packets handlers).
+
+Reported-by: syzbot+740e04c2a93467a0f8c8@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=740e04c2a93467a0f8c8
+Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
+Tested-by: syzbot+740e04c2a93467a0f8c8@syzkaller.appspotmail.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Deepak Sharma <deepak.sharma.472935@gmail.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Link: https://patch.msgid.link/20250925132846.213425-1-deepak.sharma.472935@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/nfc/nci/ntf.c |  135 +++++++++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 99 insertions(+), 36 deletions(-)
+
+--- a/net/nfc/nci/ntf.c
++++ b/net/nfc/nci/ntf.c
+@@ -27,11 +27,16 @@
+ /* Handle NCI Notification packets */
+-static void nci_core_reset_ntf_packet(struct nci_dev *ndev,
+-                                    const struct sk_buff *skb)
++static int nci_core_reset_ntf_packet(struct nci_dev *ndev,
++                                   const struct sk_buff *skb)
+ {
+       /* Handle NCI 2.x core reset notification */
+-      const struct nci_core_reset_ntf *ntf = (void *)skb->data;
++      const struct nci_core_reset_ntf *ntf;
++
++      if (skb->len < sizeof(struct nci_core_reset_ntf))
++              return -EINVAL;
++
++      ntf = (struct nci_core_reset_ntf *)skb->data;
+       ndev->nci_ver = ntf->nci_ver;
+       pr_debug("nci_ver 0x%x, config_status 0x%x\n",
+@@ -42,15 +47,22 @@ static void nci_core_reset_ntf_packet(st
+               __le32_to_cpu(ntf->manufact_specific_info);
+       nci_req_complete(ndev, NCI_STATUS_OK);
++
++      return 0;
+ }
+-static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
+-                                           struct sk_buff *skb)
++static int nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
++                                          struct sk_buff *skb)
+ {
+-      struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
++      struct nci_core_conn_credit_ntf *ntf;
+       struct nci_conn_info *conn_info;
+       int i;
++      if (skb->len < sizeof(struct nci_core_conn_credit_ntf))
++              return -EINVAL;
++
++      ntf = (struct nci_core_conn_credit_ntf *)skb->data;
++
+       pr_debug("num_entries %d\n", ntf->num_entries);
+       if (ntf->num_entries > NCI_MAX_NUM_CONN)
+@@ -68,7 +80,7 @@ static void nci_core_conn_credits_ntf_pa
+               conn_info = nci_get_conn_info_by_conn_id(ndev,
+                                                        ntf->conn_entries[i].conn_id);
+               if (!conn_info)
+-                      return;
++                      return 0;
+               atomic_add(ntf->conn_entries[i].credits,
+                          &conn_info->credits_cnt);
+@@ -77,12 +89,19 @@ static void nci_core_conn_credits_ntf_pa
+       /* trigger the next tx */
+       if (!skb_queue_empty(&ndev->tx_q))
+               queue_work(ndev->tx_wq, &ndev->tx_work);
++
++      return 0;
+ }
+-static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
+-                                            const struct sk_buff *skb)
++static int nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
++                                           const struct sk_buff *skb)
+ {
+-      __u8 status = skb->data[0];
++      __u8 status;
++
++      if (skb->len < 1)
++              return -EINVAL;
++
++      status = skb->data[0];
+       pr_debug("status 0x%x\n", status);
+@@ -91,12 +110,19 @@ static void nci_core_generic_error_ntf_p
+                  (the state remains the same) */
+               nci_req_complete(ndev, status);
+       }
++
++      return 0;
+ }
+-static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
+-                                              struct sk_buff *skb)
++static int nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
++                                             struct sk_buff *skb)
+ {
+-      struct nci_core_intf_error_ntf *ntf = (void *) skb->data;
++      struct nci_core_intf_error_ntf *ntf;
++
++      if (skb->len < sizeof(struct nci_core_intf_error_ntf))
++              return -EINVAL;
++
++      ntf = (struct nci_core_intf_error_ntf *)skb->data;
+       ntf->conn_id = nci_conn_id(&ntf->conn_id);
+@@ -105,6 +131,8 @@ static void nci_core_conn_intf_error_ntf
+       /* complete the data exchange transaction, if exists */
+       if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
+               nci_data_exchange_complete(ndev, NULL, ntf->conn_id, -EIO);
++
++      return 0;
+ }
+ static const __u8 *
+@@ -329,13 +357,18 @@ void nci_clear_target_list(struct nci_de
+       ndev->n_targets = 0;
+ }
+-static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
+-                                     const struct sk_buff *skb)
++static int nci_rf_discover_ntf_packet(struct nci_dev *ndev,
++                                    const struct sk_buff *skb)
+ {
+       struct nci_rf_discover_ntf ntf;
+-      const __u8 *data = skb->data;
++      const __u8 *data;
+       bool add_target = true;
++      if (skb->len < sizeof(struct nci_rf_discover_ntf))
++              return -EINVAL;
++
++      data = skb->data;
++
+       ntf.rf_discovery_id = *data++;
+       ntf.rf_protocol = *data++;
+       ntf.rf_tech_and_mode = *data++;
+@@ -390,6 +423,8 @@ static void nci_rf_discover_ntf_packet(s
+               nfc_targets_found(ndev->nfc_dev, ndev->targets,
+                                 ndev->n_targets);
+       }
++
++      return 0;
+ }
+ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
+@@ -553,14 +588,19 @@ static int nci_store_ats_nfc_iso_dep(str
+       return NCI_STATUS_OK;
+ }
+-static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
+-                                           const struct sk_buff *skb)
++static int nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
++                                          const struct sk_buff *skb)
+ {
+       struct nci_conn_info *conn_info;
+       struct nci_rf_intf_activated_ntf ntf;
+-      const __u8 *data = skb->data;
++      const __u8 *data;
+       int err = NCI_STATUS_OK;
++      if (skb->len < sizeof(struct nci_rf_intf_activated_ntf))
++              return -EINVAL;
++
++      data = skb->data;
++
+       ntf.rf_discovery_id = *data++;
+       ntf.rf_interface = *data++;
+       ntf.rf_protocol = *data++;
+@@ -667,7 +707,7 @@ exit:
+       if (err == NCI_STATUS_OK) {
+               conn_info = ndev->rf_conn_info;
+               if (!conn_info)
+-                      return;
++                      return 0;
+               conn_info->max_pkt_payload_len = ntf.max_data_pkt_payload_size;
+               conn_info->initial_num_credits = ntf.initial_num_credits;
+@@ -721,19 +761,26 @@ listen:
+                               pr_err("error when signaling tm activation\n");
+               }
+       }
++
++      return 0;
+ }
+-static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
+-                                       const struct sk_buff *skb)
++static int nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
++                                      const struct sk_buff *skb)
+ {
+       const struct nci_conn_info *conn_info;
+-      const struct nci_rf_deactivate_ntf *ntf = (void *)skb->data;
++      const struct nci_rf_deactivate_ntf *ntf;
++
++      if (skb->len < sizeof(struct nci_rf_deactivate_ntf))
++              return -EINVAL;
++
++      ntf = (struct nci_rf_deactivate_ntf *)skb->data;
+       pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
+       conn_info = ndev->rf_conn_info;
+       if (!conn_info)
+-              return;
++              return 0;
+       /* drop tx data queue */
+       skb_queue_purge(&ndev->tx_q);
+@@ -765,14 +812,20 @@ static void nci_rf_deactivate_ntf_packet
+       }
+       nci_req_complete(ndev, NCI_STATUS_OK);
++
++      return 0;
+ }
+-static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
+-                                        const struct sk_buff *skb)
++static int nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
++                                       const struct sk_buff *skb)
+ {
+       u8 status = NCI_STATUS_OK;
+-      const struct nci_nfcee_discover_ntf *nfcee_ntf =
+-                              (struct nci_nfcee_discover_ntf *)skb->data;
++      const struct nci_nfcee_discover_ntf *nfcee_ntf;
++
++      if (skb->len < sizeof(struct nci_nfcee_discover_ntf))
++              return -EINVAL;
++
++      nfcee_ntf = (struct nci_nfcee_discover_ntf *)skb->data;
+       /* NFCForum NCI 9.2.1 HCI Network Specific Handling
+        * If the NFCC supports the HCI Network, it SHALL return one,
+@@ -783,6 +836,8 @@ static void nci_nfcee_discover_ntf_packe
+       ndev->cur_params.id = nfcee_ntf->nfcee_id;
+       nci_req_complete(ndev, status);
++
++      return 0;
+ }
+ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
+@@ -809,35 +864,43 @@ void nci_ntf_packet(struct nci_dev *ndev
+       switch (ntf_opcode) {
+       case NCI_OP_CORE_RESET_NTF:
+-              nci_core_reset_ntf_packet(ndev, skb);
++              if (nci_core_reset_ntf_packet(ndev, skb))
++                      goto end;
+               break;
+       case NCI_OP_CORE_CONN_CREDITS_NTF:
+-              nci_core_conn_credits_ntf_packet(ndev, skb);
++              if (nci_core_conn_credits_ntf_packet(ndev, skb))
++                      goto end;
+               break;
+       case NCI_OP_CORE_GENERIC_ERROR_NTF:
+-              nci_core_generic_error_ntf_packet(ndev, skb);
++              if (nci_core_generic_error_ntf_packet(ndev, skb))
++                      goto end;
+               break;
+       case NCI_OP_CORE_INTF_ERROR_NTF:
+-              nci_core_conn_intf_error_ntf_packet(ndev, skb);
++              if (nci_core_conn_intf_error_ntf_packet(ndev, skb))
++                      goto end;
+               break;
+       case NCI_OP_RF_DISCOVER_NTF:
+-              nci_rf_discover_ntf_packet(ndev, skb);
++              if (nci_rf_discover_ntf_packet(ndev, skb))
++                      goto end;
+               break;
+       case NCI_OP_RF_INTF_ACTIVATED_NTF:
+-              nci_rf_intf_activated_ntf_packet(ndev, skb);
++              if (nci_rf_intf_activated_ntf_packet(ndev, skb))
++                      goto end;
+               break;
+       case NCI_OP_RF_DEACTIVATE_NTF:
+-              nci_rf_deactivate_ntf_packet(ndev, skb);
++              if (nci_rf_deactivate_ntf_packet(ndev, skb))
++                      goto end;
+               break;
+       case NCI_OP_NFCEE_DISCOVER_NTF:
+-              nci_nfcee_discover_ntf_packet(ndev, skb);
++              if (nci_nfcee_discover_ntf_packet(ndev, skb))
++                      goto end;
+               break;
+       case NCI_OP_RF_NFCEE_ACTION_NTF:
index 5c303b1a2b3de2384d0feded071feb76206260a9..30024c472df5fcec97b3c6674c9f5d7deb835c7e 100644 (file)
@@ -499,3 +499,23 @@ tpm-disable-tpm2_tcg_hmac-by-default.patch
 alsa-hda-hdmi-add-pin-fix-for-hp-prodesk-model.patch
 alsa-hda-realtek-add-quirk-for-hp-spectre-14t-ea100.patch
 squashfs-fix-uninit-value-in-squashfs_get_parent.patch
+uio_hv_generic-let-userspace-take-care-of-interrupt-mask.patch
+hisi_acc_vfio_pci-fix-reference-leak-in-hisi_acc_vfio_debug_init.patch
+io_uring-waitid-always-prune-wait-queue-entry-in-io_waitid_wait.patch
+io_uring-zcrx-fix-overshooting-recv-limit.patch
+asoc-wcd934x-fix-error-handling-in-wcd934x_codec_parse_data.patch
+asoc-sof-ipc3-topology-fix-multi-core-and-static-pipelines-tear-down.patch
+asoc-codecs-wcd937x-set-the-comp-soundwire-port-correctly.patch
+asoc-codecs-wcd937x-make-stub-functions-inline.patch
+asoc-sof-ipc4-pcm-fix-delay-calculation-when-dsp-resamples.patch
+asoc-sof-ipc4-pcm-fix-start-offset-calculation-for-chain-dma.patch
+fs-udf-fix-oob-read-in-lengthallocdescs-handling.patch
+net-nfc-nci-add-parameter-validation-for-packet-data.patch
+mfd-rz-mtu3-fix-mtu5-nfcr-register-offset.patch
+mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch
+mfd-vexpress-sysreg-check-the-return-value-of-devm_gpiochip_add_data.patch
+tracing-fix-lock-imbalance-in-s_start-memory-allocation-failure-path.patch
+tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch
+tracing-fix-wakeup-tracers-on-failure-of-acquiring-calltime.patch
+tracing-fix-irqoff-tracers-on-failure-of-acquiring-calltime.patch
+tracing-have-trace_marker-use-per-cpu-data-to-read-user-space.patch
diff --git a/queue-6.17/tracing-fix-irqoff-tracers-on-failure-of-acquiring-calltime.patch b/queue-6.17/tracing-fix-irqoff-tracers-on-failure-of-acquiring-calltime.patch
new file mode 100644 (file)
index 0000000..b34234e
--- /dev/null
@@ -0,0 +1,85 @@
+From c834a97962c708ff5bb8582ca76b0e1225feb675 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Wed, 8 Oct 2025 11:49:43 -0400
+Subject: tracing: Fix irqoff tracers on failure of acquiring calltime
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit c834a97962c708ff5bb8582ca76b0e1225feb675 upstream.
+
+The functions irqsoff_graph_entry() and irqsoff_graph_return() both call
+func_prolog_dec() that will test if the data->disable is already set and
+if not, increment it and return. If it was set, it returns false and the
+caller exits.
+
+The caller of this function must decrement the disable counter, but misses
+doing so if the calltime fails to be acquired.
+
+Instead of exiting out when calltime is NULL, change the logic to do the
+work if it is not NULL and still do the clean up at the end of the
+function if it is NULL.
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Link: https://lore.kernel.org/20251008114943.6f60f30f@gandalf.local.home
+Fixes: a485ea9e3ef3 ("tracing: Fix irqsoff and wakeup latency tracers when using function graph")
+Reported-by: Sasha Levin <sashal@kernel.org>
+Closes: https://lore.kernel.org/linux-trace-kernel/20251006175848.1906912-2-sashal@kernel.org/
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_irqsoff.c | 23 ++++++++++-------------
+ 1 file changed, 10 insertions(+), 13 deletions(-)
+
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 5496758b6c76..4c45c49b06c8 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -184,7 +184,7 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
+       unsigned long flags;
+       unsigned int trace_ctx;
+       u64 *calltime;
+-      int ret;
++      int ret = 0;
+       if (ftrace_graph_ignore_func(gops, trace))
+               return 0;
+@@ -202,13 +202,11 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
+               return 0;
+       calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
+-      if (!calltime)
+-              return 0;
+-
+-      *calltime = trace_clock_local();
+-
+-      trace_ctx = tracing_gen_ctx_flags(flags);
+-      ret = __trace_graph_entry(tr, trace, trace_ctx);
++      if (calltime) {
++              *calltime = trace_clock_local();
++              trace_ctx = tracing_gen_ctx_flags(flags);
++              ret = __trace_graph_entry(tr, trace, trace_ctx);
++      }
+       local_dec(&data->disabled);
+       return ret;
+@@ -233,11 +231,10 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
+       rettime = trace_clock_local();
+       calltime = fgraph_retrieve_data(gops->idx, &size);
+-      if (!calltime)
+-              return;
+-
+-      trace_ctx = tracing_gen_ctx_flags(flags);
+-      __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
++      if (calltime) {
++              trace_ctx = tracing_gen_ctx_flags(flags);
++              __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
++      }
+       local_dec(&data->disabled);
+ }
+-- 
+2.51.0
+
diff --git a/queue-6.17/tracing-fix-lock-imbalance-in-s_start-memory-allocation-failure-path.patch b/queue-6.17/tracing-fix-lock-imbalance-in-s_start-memory-allocation-failure-path.patch
new file mode 100644 (file)
index 0000000..916b3e4
--- /dev/null
@@ -0,0 +1,58 @@
+From 61e19cd2e5c5235326a13a68df1a2f8ec4eeed7b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Sep 2025 07:32:38 -0400
+Subject: tracing: Fix lock imbalance in s_start() memory allocation failure path
+
+From: Sasha Levin <sashal@kernel.org>
+
+commit 61e19cd2e5c5235326a13a68df1a2f8ec4eeed7b upstream.
+
+When s_start() fails to allocate memory for set_event_iter, it returns NULL
+before acquiring event_mutex. However, the corresponding s_stop() function
+always tries to unlock the mutex, causing a lock imbalance warning:
+
+  WARNING: bad unlock balance detected!
+  6.17.0-rc7-00175-g2b2e0c04f78c #7 Not tainted
+  -------------------------------------
+  syz.0.85611/376514 is trying to release lock (event_mutex) at:
+  [<ffffffff8dafc7a4>] traverse.part.0.constprop.0+0x2c4/0x650 fs/seq_file.c:131
+  but there are no more locks to release!
+
+The issue was introduced by commit b355247df104 ("tracing: Cache ':mod:'
+events for modules not loaded yet") which added the kzalloc() allocation before
+the mutex lock, creating a path where s_start() could return without locking
+the mutex while s_stop() would still try to unlock it.
+
+Fix this by unconditionally acquiring the mutex immediately after allocation,
+regardless of whether the allocation succeeded.
+
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/20250929113238.3722055-1-sashal@kernel.org
+Fixes: b355247df104 ("tracing: Cache ":mod:" events for modules not loaded yet")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_events.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 9f3e9537417d..e00da4182deb 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1629,11 +1629,10 @@ static void *s_start(struct seq_file *m, loff_t *pos)
+       loff_t l;
+       iter = kzalloc(sizeof(*iter), GFP_KERNEL);
++      mutex_lock(&event_mutex);
+       if (!iter)
+               return NULL;
+-      mutex_lock(&event_mutex);
+-
+       iter->type = SET_EVENT_FILE;
+       iter->file = list_entry(&tr->events, struct trace_event_file, list);
+-- 
+2.51.0
+
diff --git a/queue-6.17/tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch b/queue-6.17/tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch
new file mode 100644 (file)
index 0000000..01559f0
--- /dev/null
@@ -0,0 +1,272 @@
+From 9cf9aa7b0acfde7545c1a1d912576e9bab28dc6f Mon Sep 17 00:00:00 2001
+From: Yuan Chen <chenyuan@kylinos.cn>
+Date: Wed, 1 Oct 2025 03:20:25 +0100
+Subject: tracing: Fix race condition in kprobe initialization causing NULL pointer dereference
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Yuan Chen <chenyuan@kylinos.cn>
+
+commit 9cf9aa7b0acfde7545c1a1d912576e9bab28dc6f upstream.
+
+There is a critical race condition in kprobe initialization that can lead to
+NULL pointer dereference and kernel crash.
+
+[1135630.084782] Unable to handle kernel paging request at virtual address 0000710a04630000
+...
+[1135630.260314] pstate: 404003c9 (nZcv DAIF +PAN -UAO)
+[1135630.269239] pc : kprobe_perf_func+0x30/0x260
+[1135630.277643] lr : kprobe_dispatcher+0x44/0x60
+[1135630.286041] sp : ffffaeff4977fa40
+[1135630.293441] x29: ffffaeff4977fa40 x28: ffffaf015340e400
+[1135630.302837] x27: 0000000000000000 x26: 0000000000000000
+[1135630.312257] x25: ffffaf029ed108a8 x24: ffffaf015340e528
+[1135630.321705] x23: ffffaeff4977fc50 x22: ffffaeff4977fc50
+[1135630.331154] x21: 0000000000000000 x20: ffffaeff4977fc50
+[1135630.340586] x19: ffffaf015340e400 x18: 0000000000000000
+[1135630.349985] x17: 0000000000000000 x16: 0000000000000000
+[1135630.359285] x15: 0000000000000000 x14: 0000000000000000
+[1135630.368445] x13: 0000000000000000 x12: 0000000000000000
+[1135630.377473] x11: 0000000000000000 x10: 0000000000000000
+[1135630.386411] x9 : 0000000000000000 x8 : 0000000000000000
+[1135630.395252] x7 : 0000000000000000 x6 : 0000000000000000
+[1135630.403963] x5 : 0000000000000000 x4 : 0000000000000000
+[1135630.412545] x3 : 0000710a04630000 x2 : 0000000000000006
+[1135630.421021] x1 : ffffaeff4977fc50 x0 : 0000710a04630000
+[1135630.429410] Call trace:
+[1135630.434828]  kprobe_perf_func+0x30/0x260
+[1135630.441661]  kprobe_dispatcher+0x44/0x60
+[1135630.448396]  aggr_pre_handler+0x70/0xc8
+[1135630.454959]  kprobe_breakpoint_handler+0x140/0x1e0
+[1135630.462435]  brk_handler+0xbc/0xd8
+[1135630.468437]  do_debug_exception+0x84/0x138
+[1135630.475074]  el1_dbg+0x18/0x8c
+[1135630.480582]  security_file_permission+0x0/0xd0
+[1135630.487426]  vfs_write+0x70/0x1c0
+[1135630.493059]  ksys_write+0x5c/0xc8
+[1135630.498638]  __arm64_sys_write+0x24/0x30
+[1135630.504821]  el0_svc_common+0x78/0x130
+[1135630.510838]  el0_svc_handler+0x38/0x78
+[1135630.516834]  el0_svc+0x8/0x1b0
+
+kernel/trace/trace_kprobe.c: 1308
+0xffff3df8995039ec <kprobe_perf_func+0x2c>:     ldr     x21, [x24,#120]
+include/linux/compiler.h: 294
+0xffff3df8995039f0 <kprobe_perf_func+0x30>:     ldr     x1, [x21,x0]
+
+kernel/trace/trace_kprobe.c
+1308: head = this_cpu_ptr(call->perf_events);
+1309: if (hlist_empty(head))
+1310:  return 0;
+
+crash> struct trace_event_call -o
+struct trace_event_call {
+  ...
+  [120] struct hlist_head *perf_events;  //(call->perf_event)
+  ...
+}
+
+crash> struct trace_event_call ffffaf015340e528
+struct trace_event_call {
+  ...
+  perf_events = 0xffff0ad5fa89f088, //this value is correct, but x21 = 0
+  ...
+}
+
+Race Condition Analysis:
+
+The race occurs between kprobe activation and perf_events initialization:
+
+  CPU0                                    CPU1
+  ====                                    ====
+  perf_kprobe_init
+    perf_trace_event_init
+      tp_event->perf_events = list;(1)
+      tp_event->class->reg (2)← KPROBE ACTIVE
+                                          Debug exception triggers
+                                          ...
+                                          kprobe_dispatcher
+                                            kprobe_perf_func (tk->tp.flags & TP_FLAG_PROFILE)
+                                              head = this_cpu_ptr(call->perf_events)(3)
+                                              (perf_events is still NULL)
+
+Problem:
+1. CPU0 executes (1) assigning tp_event->perf_events = list
+2. CPU0 executes (2) enabling kprobe functionality via class->reg()
+3. CPU1 triggers and reaches kprobe_dispatcher
+4. CPU1 checks TP_FLAG_PROFILE - condition passes (step 2 completed)
+5. CPU1 calls kprobe_perf_func() and crashes at (3) because
+   call->perf_events is still NULL
+
+CPU1 sees that kprobe functionality is enabled but does not see that
+perf_events has been assigned.
+
+Add pairing read and write memory barriers to guarantee that if CPU1
+sees that kprobe functionality is enabled, it must also see that
+perf_events has been assigned.
+
+Link: https://lore.kernel.org/all/20251001022025.44626-1-chenyuan_fl@163.com/
+
+Fixes: 50d780560785 ("tracing/kprobes: Add probe handler dispatcher to support perf and ftrace concurrent use")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yuan Chen <chenyuan@kylinos.cn>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_fprobe.c |   10 ++++++----
+ kernel/trace/trace_kprobe.c |   11 +++++++----
+ kernel/trace/trace_probe.h  |    9 +++++++--
+ kernel/trace/trace_uprobe.c |   12 ++++++++----
+ 4 files changed, 28 insertions(+), 14 deletions(-)
+
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -522,13 +522,14 @@ static int fentry_dispatcher(struct fpro
+                            void *entry_data)
+ {
+       struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
++      unsigned int flags = trace_probe_load_flag(&tf->tp);
+       int ret = 0;
+-      if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
++      if (flags & TP_FLAG_TRACE)
+               fentry_trace_func(tf, entry_ip, fregs);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               ret = fentry_perf_func(tf, entry_ip, fregs);
+ #endif
+       return ret;
+@@ -540,11 +541,12 @@ static void fexit_dispatcher(struct fpro
+                            void *entry_data)
+ {
+       struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
++      unsigned int flags = trace_probe_load_flag(&tf->tp);
+-      if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
++      if (flags & TP_FLAG_TRACE)
+               fexit_trace_func(tf, entry_ip, ret_ip, fregs, entry_data);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               fexit_perf_func(tf, entry_ip, ret_ip, fregs, entry_data);
+ #endif
+ }
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1815,14 +1815,15 @@ static int kprobe_register(struct trace_
+ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
+ {
+       struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
++      unsigned int flags = trace_probe_load_flag(&tk->tp);
+       int ret = 0;
+       raw_cpu_inc(*tk->nhit);
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
++      if (flags & TP_FLAG_TRACE)
+               kprobe_trace_func(tk, regs);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               ret = kprobe_perf_func(tk, regs);
+ #endif
+       return ret;
+@@ -1834,6 +1835,7 @@ kretprobe_dispatcher(struct kretprobe_in
+ {
+       struct kretprobe *rp = get_kretprobe(ri);
+       struct trace_kprobe *tk;
++      unsigned int flags;
+       /*
+        * There is a small chance that get_kretprobe(ri) returns NULL when
+@@ -1846,10 +1848,11 @@ kretprobe_dispatcher(struct kretprobe_in
+       tk = container_of(rp, struct trace_kprobe, rp);
+       raw_cpu_inc(*tk->nhit);
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
++      flags = trace_probe_load_flag(&tk->tp);
++      if (flags & TP_FLAG_TRACE)
+               kretprobe_trace_func(tk, ri, regs);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               kretprobe_perf_func(tk, ri, regs);
+ #endif
+       return 0;       /* We don't tweak kernel, so just return 0 */
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -271,16 +271,21 @@ struct event_file_link {
+       struct list_head                list;
+ };
++static inline unsigned int trace_probe_load_flag(struct trace_probe *tp)
++{
++      return smp_load_acquire(&tp->event->flags);
++}
++
+ static inline bool trace_probe_test_flag(struct trace_probe *tp,
+                                        unsigned int flag)
+ {
+-      return !!(tp->event->flags & flag);
++      return !!(trace_probe_load_flag(tp) & flag);
+ }
+ static inline void trace_probe_set_flag(struct trace_probe *tp,
+                                       unsigned int flag)
+ {
+-      tp->event->flags |= flag;
++      smp_store_release(&tp->event->flags, tp->event->flags | flag);
+ }
+ static inline void trace_probe_clear_flag(struct trace_probe *tp,
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -1547,6 +1547,7 @@ static int uprobe_dispatcher(struct upro
+       struct trace_uprobe *tu;
+       struct uprobe_dispatch_data udd;
+       struct uprobe_cpu_buffer *ucb = NULL;
++      unsigned int flags;
+       int ret = 0;
+       tu = container_of(con, struct trace_uprobe, consumer);
+@@ -1561,11 +1562,12 @@ static int uprobe_dispatcher(struct upro
+       if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+               return 0;
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
++      flags = trace_probe_load_flag(&tu->tp);
++      if (flags & TP_FLAG_TRACE)
+               ret |= uprobe_trace_func(tu, regs, &ucb);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               ret |= uprobe_perf_func(tu, regs, &ucb);
+ #endif
+       uprobe_buffer_put(ucb);
+@@ -1579,6 +1581,7 @@ static int uretprobe_dispatcher(struct u
+       struct trace_uprobe *tu;
+       struct uprobe_dispatch_data udd;
+       struct uprobe_cpu_buffer *ucb = NULL;
++      unsigned int flags;
+       tu = container_of(con, struct trace_uprobe, consumer);
+@@ -1590,11 +1593,12 @@ static int uretprobe_dispatcher(struct u
+       if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+               return 0;
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
++      flags = trace_probe_load_flag(&tu->tp);
++      if (flags & TP_FLAG_TRACE)
+               uretprobe_trace_func(tu, func, regs, &ucb);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               uretprobe_perf_func(tu, func, regs, &ucb);
+ #endif
+       uprobe_buffer_put(ucb);
diff --git a/queue-6.17/tracing-fix-wakeup-tracers-on-failure-of-acquiring-calltime.patch b/queue-6.17/tracing-fix-wakeup-tracers-on-failure-of-acquiring-calltime.patch
new file mode 100644 (file)
index 0000000..5d56261
--- /dev/null
@@ -0,0 +1,68 @@
+From 4f7bf54b07e5acf79edd58dafede4096854776cd Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Wed, 8 Oct 2025 11:48:35 -0400
+Subject: tracing: Fix wakeup tracers on failure of acquiring calltime
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit 4f7bf54b07e5acf79edd58dafede4096854776cd upstream.
+
+The functions wakeup_graph_entry() and wakeup_graph_return() both call
+func_prolog_preempt_disable() that will test if the data->disable is
+already set and if not, increment it and disable preemption. If it was
+set, it returns false and the caller exits.
+
+The caller of this function must decrement the disable counter, but misses
+doing so if the calltime fails to be acquired.
+
+Instead of exiting out when calltime is NULL, change the logic to do the
+work if it is not NULL and still do the clean up at the end of the
+function if it is NULL.
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Link: https://lore.kernel.org/20251008114835.027b878a@gandalf.local.home
+Fixes: a485ea9e3ef3 ("tracing: Fix irqsoff and wakeup latency tracers when using function graph")
+Reported-by: Sasha Levin <sashal@kernel.org>
+Closes: https://lore.kernel.org/linux-trace-kernel/20251006175848.1906912-1-sashal@kernel.org/
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_sched_wakeup.c |   16 ++++++----------
+ 1 file changed, 6 insertions(+), 10 deletions(-)
+
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -138,12 +138,10 @@ static int wakeup_graph_entry(struct ftr
+               return 0;
+       calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
+-      if (!calltime)
+-              return 0;
+-
+-      *calltime = trace_clock_local();
+-
+-      ret = __trace_graph_entry(tr, trace, trace_ctx);
++      if (calltime) {
++              *calltime = trace_clock_local();
++              ret = __trace_graph_entry(tr, trace, trace_ctx);
++      }
+       local_dec(&data->disabled);
+       preempt_enable_notrace();
+@@ -169,12 +167,10 @@ static void wakeup_graph_return(struct f
+       rettime = trace_clock_local();
+       calltime = fgraph_retrieve_data(gops->idx, &size);
+-      if (!calltime)
+-              return;
++      if (calltime)
++              __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
+-      __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
+       local_dec(&data->disabled);
+-
+       preempt_enable_notrace();
+       return;
+ }
diff --git a/queue-6.17/tracing-have-trace_marker-use-per-cpu-data-to-read-user-space.patch b/queue-6.17/tracing-have-trace_marker-use-per-cpu-data-to-read-user-space.patch
new file mode 100644 (file)
index 0000000..1bad723
--- /dev/null
@@ -0,0 +1,480 @@
+From 64cf7d058a005c5c31eb8a0b741f35dc12915d18 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Wed, 8 Oct 2025 12:45:10 -0400
+Subject: tracing: Have trace_marker use per-cpu data to read user space
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit 64cf7d058a005c5c31eb8a0b741f35dc12915d18 upstream.
+
+It was reported that using __copy_from_user_inatomic() can actually
+schedule. Which is bad when preemption is disabled. Even though there's
+logic to check in_atomic() is set, but this is a nop when the kernel is
+configured with PREEMPT_NONE. This is due to page faulting and the code
+could schedule with preemption disabled.
+
+Link: https://lore.kernel.org/all/20250819105152.2766363-1-luogengkun@huaweicloud.com/
+
+The solution was to change the __copy_from_user_inatomic() to
+copy_from_user_nofault(). But then it was reported that this caused a
+regression in Android. There's several applications writing into
+trace_marker() in Android, but now instead of showing the expected data,
+it is showing:
+
+  tracing_mark_write: <faulted>
+
+After reverting the conversion to copy_from_user_nofault(), Android was
+able to get the data again.
+
+Writes to the trace_marker is a way to efficiently and quickly enter data
+into the Linux tracing buffer. It takes no locks and was designed to be as
+non-intrusive as possible. This means it cannot allocate memory, and must
+use pre-allocated data.
+
+A method that is actively being worked on to have faultable system call
+tracepoints read user space data is to allocate per CPU buffers, and use
+them in the callback. The method uses a technique similar to seqcount.
+That is something like this:
+
+       preempt_disable();
+       cpu = smp_processor_id();
+       buffer = this_cpu_ptr(&pre_allocated_cpu_buffers, cpu);
+       do {
+               cnt = nr_context_switches_cpu(cpu);
+               migrate_disable();
+               preempt_enable();
+               ret = copy_from_user(buffer, ptr, size);
+               preempt_disable();
+               migrate_enable();
+       } while (!ret && cnt != nr_context_switches_cpu(cpu));
+
+       if (!ret)
+               ring_buffer_write(buffer);
+       preempt_enable();
+
+It's a little more involved than that, but the above is the basic logic.
+The idea is to acquire the current CPU buffer, disable migration, and then
+enable preemption. At this moment, it can safely use copy_from_user().
+After reading the data from user space, it disables preemption again. It
+then checks to see if there was any new scheduling on this CPU. If there
+was, it must assume that the buffer was corrupted by another task. If
+there wasn't, then the buffer is still valid as only tasks in preemptable
+context can write to this buffer and only those that are running on the
+CPU.
+
+By using this method, where trace_marker open allocates the per CPU
+buffers, trace_marker writes can access user space and even fault it in,
+without having to allocate or take any locks of its own.
+
+Cc: stable@vger.kernel.org
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Luo Gengkun <luogengkun@huaweicloud.com>
+Cc: Wattson CI <wattson-external@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lore.kernel.org/20251008124510.6dba541a@gandalf.local.home
+Fixes: 3d62ab32df065 ("tracing: Fix tracing_marker may trigger page fault during preempt_disable")
+Reported-by: Runping Lai <runpinglai@google.com>
+Tested-by: Runping Lai <runpinglai@google.com>
+Closes: https://lore.kernel.org/linux-trace-kernel/20251007003417.3470979-2-runpinglai@google.com/
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.c |  268 +++++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 220 insertions(+), 48 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4791,12 +4791,6 @@ int tracing_single_release_file_tr(struc
+       return single_release(inode, filp);
+ }
+-static int tracing_mark_open(struct inode *inode, struct file *filp)
+-{
+-      stream_open(inode, filp);
+-      return tracing_open_generic_tr(inode, filp);
+-}
+-
+ static int tracing_release(struct inode *inode, struct file *file)
+ {
+       struct trace_array *tr = inode->i_private;
+@@ -7163,7 +7157,7 @@ tracing_free_buffer_release(struct inode
+ #define TRACE_MARKER_MAX_SIZE         4096
+-static ssize_t write_marker_to_buffer(struct trace_array *tr, const char __user *ubuf,
++static ssize_t write_marker_to_buffer(struct trace_array *tr, const char *buf,
+                                     size_t cnt, unsigned long ip)
+ {
+       struct ring_buffer_event *event;
+@@ -7173,20 +7167,11 @@ static ssize_t write_marker_to_buffer(st
+       int meta_size;
+       ssize_t written;
+       size_t size;
+-      int len;
+-
+-/* Used in tracing_mark_raw_write() as well */
+-#define FAULTED_STR "<faulted>"
+-#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
+       meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
+  again:
+       size = cnt + meta_size;
+-      /* If less than "<faulted>", then make sure we can still add that */
+-      if (cnt < FAULTED_SIZE)
+-              size += FAULTED_SIZE - cnt;
+-
+       buffer = tr->array_buffer.buffer;
+       event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+                                           tracing_gen_ctx());
+@@ -7196,9 +7181,6 @@ static ssize_t write_marker_to_buffer(st
+                * make it smaller and try again.
+                */
+               if (size > ring_buffer_max_event_size(buffer)) {
+-                      /* cnt < FAULTED size should never be bigger than max */
+-                      if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
+-                              return -EBADF;
+                       cnt = ring_buffer_max_event_size(buffer) - meta_size;
+                       /* The above should only happen once */
+                       if (WARN_ON_ONCE(cnt + meta_size == size))
+@@ -7212,14 +7194,8 @@ static ssize_t write_marker_to_buffer(st
+       entry = ring_buffer_event_data(event);
+       entry->ip = ip;
+-
+-      len = copy_from_user_nofault(&entry->buf, ubuf, cnt);
+-      if (len) {
+-              memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
+-              cnt = FAULTED_SIZE;
+-              written = -EFAULT;
+-      } else
+-              written = cnt;
++      memcpy(&entry->buf, buf, cnt);
++      written = cnt;
+       if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
+               /* do not add \n before testing triggers, but add \0 */
+@@ -7243,6 +7219,169 @@ static ssize_t write_marker_to_buffer(st
+       return written;
+ }
++struct trace_user_buf {
++      char            *buf;
++};
++
++struct trace_user_buf_info {
++      struct trace_user_buf __percpu  *tbuf;
++      int                             ref;
++};
++
++
++static DEFINE_MUTEX(trace_user_buffer_mutex);
++static struct trace_user_buf_info *trace_user_buffer;
++
++static void trace_user_fault_buffer_free(struct trace_user_buf_info *tinfo)
++{
++      char *buf;
++      int cpu;
++
++      for_each_possible_cpu(cpu) {
++              buf = per_cpu_ptr(tinfo->tbuf, cpu)->buf;
++              kfree(buf);
++      }
++      free_percpu(tinfo->tbuf);
++      kfree(tinfo);
++}
++
++static int trace_user_fault_buffer_enable(void)
++{
++      struct trace_user_buf_info *tinfo;
++      char *buf;
++      int cpu;
++
++      guard(mutex)(&trace_user_buffer_mutex);
++
++      if (trace_user_buffer) {
++              trace_user_buffer->ref++;
++              return 0;
++      }
++
++      tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
++      if (!tinfo)
++              return -ENOMEM;
++
++      tinfo->tbuf = alloc_percpu(struct trace_user_buf);
++      if (!tinfo->tbuf) {
++              kfree(tinfo);
++              return -ENOMEM;
++      }
++
++      tinfo->ref = 1;
++
++      /* Clear each buffer in case of error */
++      for_each_possible_cpu(cpu) {
++              per_cpu_ptr(tinfo->tbuf, cpu)->buf = NULL;
++      }
++
++      for_each_possible_cpu(cpu) {
++              buf = kmalloc_node(TRACE_MARKER_MAX_SIZE, GFP_KERNEL,
++                                 cpu_to_node(cpu));
++              if (!buf) {
++                      trace_user_fault_buffer_free(tinfo);
++                      return -ENOMEM;
++              }
++              per_cpu_ptr(tinfo->tbuf, cpu)->buf = buf;
++      }
++
++      trace_user_buffer = tinfo;
++
++      return 0;
++}
++
++static void trace_user_fault_buffer_disable(void)
++{
++      struct trace_user_buf_info *tinfo;
++
++      guard(mutex)(&trace_user_buffer_mutex);
++
++      tinfo = trace_user_buffer;
++
++      if (WARN_ON_ONCE(!tinfo))
++              return;
++
++      if (--tinfo->ref)
++              return;
++
++      trace_user_fault_buffer_free(tinfo);
++      trace_user_buffer = NULL;
++}
++
++/* Must be called with preemption disabled */
++static char *trace_user_fault_read(struct trace_user_buf_info *tinfo,
++                                 const char __user *ptr, size_t size,
++                                 size_t *read_size)
++{
++      int cpu = smp_processor_id();
++      char *buffer = per_cpu_ptr(tinfo->tbuf, cpu)->buf;
++      unsigned int cnt;
++      int trys = 0;
++      int ret;
++
++      if (size > TRACE_MARKER_MAX_SIZE)
++              size = TRACE_MARKER_MAX_SIZE;
++      *read_size = 0;
++
++      /*
++       * This acts similar to a seqcount. The per CPU context switches are
++       * recorded, migration is disabled and preemption is enabled. The
++       * read of the user space memory is copied into the per CPU buffer.
++       * Preemption is disabled again, and if the per CPU context switches count
++       * is still the same, it means the buffer has not been corrupted.
++       * If the count is different, it is assumed the buffer is corrupted
++       * and reading must be tried again.
++       */
++
++      do {
++              /*
++               * If for some reason, copy_from_user() always causes a context
++               * switch, this would then cause an infinite loop.
++               * If this task is preempted by another user space task, it
++               * will cause this task to try again. But just in case something
++               * changes where the copying from user space causes another task
++               * to run, prevent this from going into an infinite loop.
++               * 100 tries should be plenty.
++               */
++              if (WARN_ONCE(trys++ > 100, "Error: Too many tries to read user space"))
++                      return NULL;
++
++              /* Read the current CPU context switch counter */
++              cnt = nr_context_switches_cpu(cpu);
++
++              /*
++               * Preemption is going to be enabled, but this task must
++               * remain on this CPU.
++               */
++              migrate_disable();
++
++              /*
++               * Now preemption is being enabed and another task can come in
++               * and use the same buffer and corrupt our data.
++               */
++              preempt_enable_notrace();
++
++              ret = __copy_from_user(buffer, ptr, size);
++
++              preempt_disable_notrace();
++              migrate_enable();
++
++              /* if it faulted, no need to test if the buffer was corrupted */
++              if (ret)
++                      return NULL;
++
++              /*
++               * Preemption is disabled again, now check the per CPU context
++               * switch counter. If it doesn't match, then another user space
++               * process may have schedule in and corrupted our buffer. In that
++               * case the copying must be retried.
++               */
++      } while (nr_context_switches_cpu(cpu) != cnt);
++
++      *read_size = size;
++      return buffer;
++}
++
+ static ssize_t
+ tracing_mark_write(struct file *filp, const char __user *ubuf,
+                                       size_t cnt, loff_t *fpos)
+@@ -7250,6 +7389,8 @@ tracing_mark_write(struct file *filp, co
+       struct trace_array *tr = filp->private_data;
+       ssize_t written = -ENODEV;
+       unsigned long ip;
++      size_t size;
++      char *buf;
+       if (tracing_disabled)
+               return -EINVAL;
+@@ -7263,6 +7404,16 @@ tracing_mark_write(struct file *filp, co
+       if (cnt > TRACE_MARKER_MAX_SIZE)
+               cnt = TRACE_MARKER_MAX_SIZE;
++      /* Must have preemption disabled while having access to the buffer */
++      guard(preempt_notrace)();
++
++      buf = trace_user_fault_read(trace_user_buffer, ubuf, cnt, &size);
++      if (!buf)
++              return -EFAULT;
++
++      if (cnt > size)
++              cnt = size;
++
+       /* The selftests expect this function to be the IP address */
+       ip = _THIS_IP_;
+@@ -7270,32 +7421,27 @@ tracing_mark_write(struct file *filp, co
+       if (tr == &global_trace) {
+               guard(rcu)();
+               list_for_each_entry_rcu(tr, &marker_copies, marker_list) {
+-                      written = write_marker_to_buffer(tr, ubuf, cnt, ip);
++                      written = write_marker_to_buffer(tr, buf, cnt, ip);
+                       if (written < 0)
+                               break;
+               }
+       } else {
+-              written = write_marker_to_buffer(tr, ubuf, cnt, ip);
++              written = write_marker_to_buffer(tr, buf, cnt, ip);
+       }
+       return written;
+ }
+ static ssize_t write_raw_marker_to_buffer(struct trace_array *tr,
+-                                        const char __user *ubuf, size_t cnt)
++                                        const char *buf, size_t cnt)
+ {
+       struct ring_buffer_event *event;
+       struct trace_buffer *buffer;
+       struct raw_data_entry *entry;
+       ssize_t written;
+-      int size;
+-      int len;
+-
+-#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
++      size_t size;
+       size = sizeof(*entry) + cnt;
+-      if (cnt < FAULT_SIZE_ID)
+-              size += FAULT_SIZE_ID - cnt;
+       buffer = tr->array_buffer.buffer;
+@@ -7309,14 +7455,8 @@ static ssize_t write_raw_marker_to_buffe
+               return -EBADF;
+       entry = ring_buffer_event_data(event);
+-
+-      len = copy_from_user_nofault(&entry->id, ubuf, cnt);
+-      if (len) {
+-              entry->id = -1;
+-              memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
+-              written = -EFAULT;
+-      } else
+-              written = cnt;
++      memcpy(&entry->id, buf, cnt);
++      written = cnt;
+       __buffer_unlock_commit(buffer, event);
+@@ -7329,8 +7469,8 @@ tracing_mark_raw_write(struct file *filp
+ {
+       struct trace_array *tr = filp->private_data;
+       ssize_t written = -ENODEV;
+-
+-#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
++      size_t size;
++      char *buf;
+       if (tracing_disabled)
+               return -EINVAL;
+@@ -7342,6 +7482,17 @@ tracing_mark_raw_write(struct file *filp
+       if (cnt < sizeof(unsigned int))
+               return -EINVAL;
++      /* Must have preemption disabled while having access to the buffer */
++      guard(preempt_notrace)();
++
++      buf = trace_user_fault_read(trace_user_buffer, ubuf, cnt, &size);
++      if (!buf)
++              return -EFAULT;
++
++      /* raw write is all or nothing */
++      if (cnt > size)
++              return -EINVAL;
++
+       /* The global trace_marker_raw can go to multiple instances */
+       if (tr == &global_trace) {
+               guard(rcu)();
+@@ -7357,6 +7508,27 @@ tracing_mark_raw_write(struct file *filp
+       return written;
+ }
++static int tracing_mark_open(struct inode *inode, struct file *filp)
++{
++      int ret;
++
++      ret = trace_user_fault_buffer_enable();
++      if (ret < 0)
++              return ret;
++
++      stream_open(inode, filp);
++      ret = tracing_open_generic_tr(inode, filp);
++      if (ret < 0)
++              trace_user_fault_buffer_disable();
++      return ret;
++}
++
++static int tracing_mark_release(struct inode *inode, struct file *file)
++{
++      trace_user_fault_buffer_disable();
++      return tracing_release_generic_tr(inode, file);
++}
++
+ static int tracing_clock_show(struct seq_file *m, void *v)
+ {
+       struct trace_array *tr = m->private;
+@@ -7764,13 +7936,13 @@ static const struct file_operations trac
+ static const struct file_operations tracing_mark_fops = {
+       .open           = tracing_mark_open,
+       .write          = tracing_mark_write,
+-      .release        = tracing_release_generic_tr,
++      .release        = tracing_mark_release,
+ };
+ static const struct file_operations tracing_mark_raw_fops = {
+       .open           = tracing_mark_open,
+       .write          = tracing_mark_raw_write,
+-      .release        = tracing_release_generic_tr,
++      .release        = tracing_mark_release,
+ };
+ static const struct file_operations trace_clock_fops = {
diff --git a/queue-6.17/uio_hv_generic-let-userspace-take-care-of-interrupt-mask.patch b/queue-6.17/uio_hv_generic-let-userspace-take-care-of-interrupt-mask.patch
new file mode 100644 (file)
index 0000000..ecd1e9a
--- /dev/null
@@ -0,0 +1,97 @@
+From b15b7d2a1b09ef5428a8db260251897405a19496 Mon Sep 17 00:00:00 2001
+From: Naman Jain <namjain@linux.microsoft.com>
+Date: Thu, 28 Aug 2025 10:12:00 +0530
+Subject: uio_hv_generic: Let userspace take care of interrupt mask
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Naman Jain <namjain@linux.microsoft.com>
+
+commit b15b7d2a1b09ef5428a8db260251897405a19496 upstream.
+
+Remove the logic to set interrupt mask by default in uio_hv_generic
+driver as the interrupt mask value is supposed to be controlled
+completely by the user space. If the mask bit gets changed
+by the driver, concurrently with user mode operating on the ring,
+the mask bit may be set when it is supposed to be clear, and the
+user-mode driver will miss an interrupt which will cause a hang.
+
+For eg- when the driver sets inbound ring buffer interrupt mask to 1,
+the host does not interrupt the guest on the UIO VMBus channel.
+However, setting the mask does not prevent the host from putting a
+message in the inbound ring buffer. So let’s assume that happens,
+the host puts a message into the ring buffer but does not interrupt.
+
+Subsequently, the user space code in the guest sets the inbound ring
+buffer interrupt mask to 0, saying “Hey, I’m ready for interrupts”.
+User space code then calls pread() to wait for an interrupt.
+Then one of two things happens:
+
+* The host never sends another message. So the pread() waits forever.
+* The host does send another message. But because there’s already a
+  message in the ring buffer, it doesn’t generate an interrupt.
+  This is the correct behavior, because the host should only send an
+  interrupt when the inbound ring buffer transitions from empty to
+  not-empty. Adding an additional message to a ring buffer that is not
+  empty is not supposed to generate an interrupt on the guest.
+  Since the guest is waiting in pread() and not removing messages from
+  the ring buffer, the pread() waits forever.
+
+This could be easily reproduced in hv_fcopy_uio_daemon if we delay
+setting interrupt mask to 0.
+
+Similarly if hv_uio_channel_cb() sets the interrupt_mask to 1,
+there’s a race condition. Once user space empties the inbound ring
+buffer, but before user space sets interrupt_mask to 0, the host could
+put another message in the ring buffer but it wouldn’t interrupt.
+Then the next pread() would hang.
+
+Fix these by removing all instances where interrupt_mask is changed,
+while keeping the one in set_event() unchanged to enable userspace
+control the interrupt mask by writing 0/1 to /dev/uioX.
+
+Fixes: 95096f2fbd10 ("uio-hv-generic: new userspace i/o driver for VMBus")
+Suggested-by: John Starks <jostarks@microsoft.com>
+Signed-off-by: Naman Jain <namjain@linux.microsoft.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Michael Kelley <mhklinux@outlook.com>
+Reviewed-by: Long Li <longli@microsoft.com>
+Reviewed-by: Tianyu Lan <tiala@microsoft.com>
+Tested-by: Tianyu Lan <tiala@microsoft.com>
+Link: https://lore.kernel.org/r/20250828044200.492030-1-namjain@linux.microsoft.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/uio/uio_hv_generic.c |    7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+--- a/drivers/uio/uio_hv_generic.c
++++ b/drivers/uio/uio_hv_generic.c
+@@ -111,7 +111,6 @@ static void hv_uio_channel_cb(void *cont
+       struct hv_device *hv_dev;
+       struct hv_uio_private_data *pdata;
+-      chan->inbound.ring_buffer->interrupt_mask = 1;
+       virt_mb();
+       /*
+@@ -183,8 +182,6 @@ hv_uio_new_channel(struct vmbus_channel
+               return;
+       }
+-      /* Disable interrupts on sub channel */
+-      new_sc->inbound.ring_buffer->interrupt_mask = 1;
+       set_channel_read_mode(new_sc, HV_CALL_ISR);
+       ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap);
+       if (ret) {
+@@ -227,9 +224,7 @@ hv_uio_open(struct uio_info *info, struc
+       ret = vmbus_connect_ring(dev->channel,
+                                hv_uio_channel_cb, dev->channel);
+-      if (ret == 0)
+-              dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+-      else
++      if (ret)
+               atomic_dec(&pdata->refcnt);
+       return ret;