]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Oct 2025 07:51:06 +0000 (09:51 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Oct 2025 07:51:06 +0000 (09:51 +0200)
added patches:
acpi-property-add-code-comments-explaining-what-is-going-on.patch
acpi-property-disregard-references-in-data-only-subnode-lists.patch
acpi-property-do-not-pass-null-handles-to-acpi_attach_data.patch
asm-generic-io-add-_ret_ip_-to-mmio-trace-for-more-accurate-debug-info.patch
asm-generic-io.h-skip-trace-helpers-if-rwmmio-events-are-disabled.patch
asm-generic-io.h-suppress-endianness-warnings-for-relaxed-accessors.patch
asoc-codecs-wcd934x-simplify-with-dev_err_probe.patch
asoc-wcd934x-fix-error-handling-in-wcd934x_codec_parse_data.patch
btrfs-fix-the-incorrect-max_bytes-value-for-find_lock_delalloc_range.patch
ipmi-fix-handling-of-messages-with-provided-receive-message-pointer.patch
ipmi-rework-user-message-limit-handling.patch
ksmbd-add-max-ip-connections-parameter.patch
kvm-x86-don-t-re-check-l1-intercepts-when-completing-userspace-i-o.patch
media-mc-clear-minor-number-before-put-device.patch
mfd-intel_soc_pmic_chtdc_ti-drop-unneeded-assignment-for-cache_type.patch
mfd-intel_soc_pmic_chtdc_ti-fix-invalid-regmap-config-max_register-value.patch
mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch
pci-endpoint-pci-epf-test-add-null-check-for-dma-channels-before-release.patch
pci-endpoint-remove-surplus-return-statement-from-pci_epf_test_clean_dma_chan.patch
rseq-protect-event-mask-against-membarrier-ipi.patch
selftests-mm-skip-soft-dirty-tests-when-config_mem_soft_dirty-is-disabled.patch
squashfs-add-additional-inode-sanity-checking.patch
squashfs-reject-negative-file-sizes-in-squashfs_read_inode.patch
tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch

25 files changed:
queue-6.1/acpi-property-add-code-comments-explaining-what-is-going-on.patch [new file with mode: 0644]
queue-6.1/acpi-property-disregard-references-in-data-only-subnode-lists.patch [new file with mode: 0644]
queue-6.1/acpi-property-do-not-pass-null-handles-to-acpi_attach_data.patch [new file with mode: 0644]
queue-6.1/asm-generic-io-add-_ret_ip_-to-mmio-trace-for-more-accurate-debug-info.patch [new file with mode: 0644]
queue-6.1/asm-generic-io.h-skip-trace-helpers-if-rwmmio-events-are-disabled.patch [new file with mode: 0644]
queue-6.1/asm-generic-io.h-suppress-endianness-warnings-for-relaxed-accessors.patch [new file with mode: 0644]
queue-6.1/asoc-codecs-wcd934x-simplify-with-dev_err_probe.patch [new file with mode: 0644]
queue-6.1/asoc-wcd934x-fix-error-handling-in-wcd934x_codec_parse_data.patch [new file with mode: 0644]
queue-6.1/btrfs-fix-the-incorrect-max_bytes-value-for-find_lock_delalloc_range.patch [new file with mode: 0644]
queue-6.1/ipmi-fix-handling-of-messages-with-provided-receive-message-pointer.patch [new file with mode: 0644]
queue-6.1/ipmi-rework-user-message-limit-handling.patch [new file with mode: 0644]
queue-6.1/ksmbd-add-max-ip-connections-parameter.patch [new file with mode: 0644]
queue-6.1/kvm-x86-don-t-re-check-l1-intercepts-when-completing-userspace-i-o.patch [new file with mode: 0644]
queue-6.1/media-mc-clear-minor-number-before-put-device.patch [new file with mode: 0644]
queue-6.1/mfd-intel_soc_pmic_chtdc_ti-drop-unneeded-assignment-for-cache_type.patch [new file with mode: 0644]
queue-6.1/mfd-intel_soc_pmic_chtdc_ti-fix-invalid-regmap-config-max_register-value.patch [new file with mode: 0644]
queue-6.1/mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch [new file with mode: 0644]
queue-6.1/pci-endpoint-pci-epf-test-add-null-check-for-dma-channels-before-release.patch [new file with mode: 0644]
queue-6.1/pci-endpoint-remove-surplus-return-statement-from-pci_epf_test_clean_dma_chan.patch [new file with mode: 0644]
queue-6.1/rseq-protect-event-mask-against-membarrier-ipi.patch [new file with mode: 0644]
queue-6.1/selftests-mm-skip-soft-dirty-tests-when-config_mem_soft_dirty-is-disabled.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/squashfs-add-additional-inode-sanity-checking.patch [new file with mode: 0644]
queue-6.1/squashfs-reject-negative-file-sizes-in-squashfs_read_inode.patch [new file with mode: 0644]
queue-6.1/tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch [new file with mode: 0644]

diff --git a/queue-6.1/acpi-property-add-code-comments-explaining-what-is-going-on.patch b/queue-6.1/acpi-property-add-code-comments-explaining-what-is-going-on.patch
new file mode 100644 (file)
index 0000000..4cf0e6d
--- /dev/null
@@ -0,0 +1,115 @@
+From stable+bounces-186215-greg=kroah.com@vger.kernel.org Fri Oct 17 01:00:14 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:00:06 -0400
+Subject: ACPI: property: Add code comments explaining what is going on
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, Sakari Ailus <sakari.ailus@linux.intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251016230007.3453571-3-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 737c3a09dcf69ba2814f3674947ccaec1861c985 ]
+
+In some places in the ACPI device properties handling code, it is
+unclear why the code is what it is.  Some assumptions are not documented
+and some pieces of code are based on knowledge that is not mentioned
+anywhere.
+
+Add code comments explaining these things.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Tested-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Stable-dep-of: baf60d5cb8bc ("ACPI: property: Do not pass NULL handles to acpi_attach_data()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/property.c |   46 ++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 44 insertions(+), 2 deletions(-)
+
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -96,7 +96,18 @@ static bool acpi_nondev_subnode_extract(
+       if (handle)
+               acpi_get_parent(handle, &scope);
++      /*
++       * Extract properties from the _DSD-equivalent package pointed to by
++       * desc and use scope (if not NULL) for the completion of relative
++       * pathname segments.
++       *
++       * The extracted properties will be held in the new data node dn.
++       */
+       result = acpi_extract_properties(scope, desc, &dn->data);
++      /*
++       * Look for subnodes in the _DSD-equivalent package pointed to by desc
++       * and create child nodes of dn if there are any.
++       */
+       if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode))
+               result = true;
+@@ -121,6 +132,12 @@ static bool acpi_nondev_subnode_ok(acpi_
+       acpi_handle handle;
+       acpi_status status;
++      /*
++       * If the scope is unknown, the _DSD-equivalent package being parsed
++       * was embedded in an outer _DSD-equivalent package as a result of
++       * direct evaluation of an object pointed to by a reference.  In that
++       * case, using a pathname as the target object pointer is invalid.
++       */
+       if (!scope)
+               return false;
+@@ -150,6 +167,10 @@ static bool acpi_add_nondev_subnodes(acp
+       bool ret = false;
+       int i;
++      /*
++       * Every element in the links package is expected to represent a link
++       * to a non-device node in a tree containing device-specific data.
++       */
+       for (i = 0; i < links->package.count; i++) {
+               union acpi_object *link, *desc;
+               bool result;
+@@ -159,17 +180,38 @@ static bool acpi_add_nondev_subnodes(acp
+               if (link->package.count != 2)
+                       continue;
+-              /* The first one must be a string. */
++              /* The first one (the key) must be a string. */
+               if (link->package.elements[0].type != ACPI_TYPE_STRING)
+                       continue;
+-              /* The second one may be a string or a package. */
++              /* The second one (the target) may be a string or a package. */
+               switch (link->package.elements[1].type) {
+               case ACPI_TYPE_STRING:
++                      /*
++                       * The string is expected to be a full pathname or a
++                       * pathname segment relative to the given scope.  That
++                       * pathname is expected to point to an object returning
++                       * a package that contains _DSD-equivalent information.
++                       */
+                       result = acpi_nondev_subnode_ok(scope, link, list,
+                                                        parent);
+                       break;
+               case ACPI_TYPE_PACKAGE:
++                      /*
++                       * This happens when a reference is used in AML to
++                       * point to the target.  Since the target is expected
++                       * to be a named object, a reference to it will cause it
++                       * to be avaluated in place and its return package will
++                       * be embedded in the links package at the location of
++                       * the reference.
++                       *
++                       * The target package is expected to contain _DSD-
++                       * equivalent information, but the scope in which it
++                       * is located in the original AML is unknown.  Thus
++                       * it cannot contain pathname segments represented as
++                       * strings because there is no way to build full
++                       * pathnames out of them.
++                       */
+                       desc = &link->package.elements[1];
+                       result = acpi_nondev_subnode_extract(desc, NULL, link,
+                                                            list, parent);
diff --git a/queue-6.1/acpi-property-disregard-references-in-data-only-subnode-lists.patch b/queue-6.1/acpi-property-disregard-references-in-data-only-subnode-lists.patch
new file mode 100644 (file)
index 0000000..6ddfa7e
--- /dev/null
@@ -0,0 +1,138 @@
+From stable+bounces-186214-greg=kroah.com@vger.kernel.org Fri Oct 17 01:00:13 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:00:05 -0400
+Subject: ACPI: property: Disregard references in data-only subnode lists
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, Sakari Ailus <sakari.ailus@linux.intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251016230007.3453571-2-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit d06118fe9b03426484980ed4c189a8c7b99fa631 ]
+
+Data-only subnode links following the ACPI data subnode GUID in a _DSD
+package are expected to point to named objects returning _DSD-equivalent
+packages.  If a reference to such an object is used in the target field
+of any of those links, that object will be evaluated in place (as a
+named object) and its return data will be embedded in the outer _DSD
+package.
+
+For this reason, it is not expected to see a subnode link with the
+target field containing a local reference (that would mean pointing
+to a device or another object that cannot be evaluated in place and
+therefore cannot return a _DSD-equivalent package).
+
+Accordingly, simplify the code parsing data-only subnode links to
+simply print a message when it encounters a local reference in the
+target field of one of those links.
+
+Moreover, since acpi_nondev_subnode_data_ok() would only have one
+caller after the change above, fold it into that caller.
+
+Link: https://lore.kernel.org/linux-acpi/CAJZ5v0jVeSrDO6hrZhKgRZrH=FpGD4vNUjFD8hV9WwN9TLHjzQ@mail.gmail.com/
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Tested-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Stable-dep-of: baf60d5cb8bc ("ACPI: property: Do not pass NULL handles to acpi_attach_data()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/property.c |   51 ++++++++++++++++++++----------------------------
+ 1 file changed, 22 insertions(+), 29 deletions(-)
+
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -112,32 +112,12 @@ static bool acpi_nondev_subnode_extract(
+       return false;
+ }
+-static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
+-                                      const union acpi_object *link,
+-                                      struct list_head *list,
+-                                      struct fwnode_handle *parent)
+-{
+-      struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+-      acpi_status status;
+-
+-      status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+-                                          ACPI_TYPE_PACKAGE);
+-      if (ACPI_FAILURE(status))
+-              return false;
+-
+-      if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
+-                                      parent))
+-              return true;
+-
+-      ACPI_FREE(buf.pointer);
+-      return false;
+-}
+-
+ static bool acpi_nondev_subnode_ok(acpi_handle scope,
+                                  const union acpi_object *link,
+                                  struct list_head *list,
+                                  struct fwnode_handle *parent)
+ {
++      struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+       acpi_handle handle;
+       acpi_status status;
+@@ -149,7 +129,17 @@ static bool acpi_nondev_subnode_ok(acpi_
+       if (ACPI_FAILURE(status))
+               return false;
+-      return acpi_nondev_subnode_data_ok(handle, link, list, parent);
++      status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
++                                          ACPI_TYPE_PACKAGE);
++      if (ACPI_FAILURE(status))
++              return false;
++
++      if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
++                                      parent))
++              return true;
++
++      ACPI_FREE(buf.pointer);
++      return false;
+ }
+ static bool acpi_add_nondev_subnodes(acpi_handle scope,
+@@ -162,7 +152,6 @@ static bool acpi_add_nondev_subnodes(acp
+       for (i = 0; i < links->package.count; i++) {
+               union acpi_object *link, *desc;
+-              acpi_handle handle;
+               bool result;
+               link = &links->package.elements[i];
+@@ -174,22 +163,26 @@ static bool acpi_add_nondev_subnodes(acp
+               if (link->package.elements[0].type != ACPI_TYPE_STRING)
+                       continue;
+-              /* The second one may be a string, a reference or a package. */
++              /* The second one may be a string or a package. */
+               switch (link->package.elements[1].type) {
+               case ACPI_TYPE_STRING:
+                       result = acpi_nondev_subnode_ok(scope, link, list,
+                                                        parent);
+                       break;
+-              case ACPI_TYPE_LOCAL_REFERENCE:
+-                      handle = link->package.elements[1].reference.handle;
+-                      result = acpi_nondev_subnode_data_ok(handle, link, list,
+-                                                           parent);
+-                      break;
+               case ACPI_TYPE_PACKAGE:
+                       desc = &link->package.elements[1];
+                       result = acpi_nondev_subnode_extract(desc, NULL, link,
+                                                            list, parent);
+                       break;
++              case ACPI_TYPE_LOCAL_REFERENCE:
++                      /*
++                       * It is not expected to see any local references in
++                       * the links package because referencing a named object
++                       * should cause it to be evaluated in place.
++                       */
++                      acpi_handle_info(scope, "subnode %s: Unexpected reference\n",
++                                       link->package.elements[0].string.pointer);
++                      fallthrough;
+               default:
+                       result = false;
+                       break;
diff --git a/queue-6.1/acpi-property-do-not-pass-null-handles-to-acpi_attach_data.patch b/queue-6.1/acpi-property-do-not-pass-null-handles-to-acpi_attach_data.patch
new file mode 100644 (file)
index 0000000..b8d3e70
--- /dev/null
@@ -0,0 +1,77 @@
+From stable+bounces-186216-greg=kroah.com@vger.kernel.org Fri Oct 17 01:00:16 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 19:00:07 -0400
+Subject: ACPI: property: Do not pass NULL handles to acpi_attach_data()
+To: stable@vger.kernel.org
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>, Sakari Ailus <sakari.ailus@linux.intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251016230007.3453571-4-sashal@kernel.org>
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+[ Upstream commit baf60d5cb8bc6b85511c5df5f0ad7620bb66d23c ]
+
+In certain circumstances, the ACPI handle of a data-only node may be
+NULL, in which case it does not make sense to attempt to attach that
+node to an ACPI namespace object, so update the code to avoid attempts
+to do so.
+
+This prevents confusing and unuseful error messages from being printed.
+
+Also document the fact that the ACPI handle of a data-only node may be
+NULL and when that happens in a code comment.  In addition, make
+acpi_add_nondev_subnodes() print a diagnostic message for each data-only
+node with an unknown ACPI namespace scope.
+
+Fixes: 1d52f10917a7 ("ACPI: property: Tie data nodes to acpi handles")
+Cc: 6.0+ <stable@vger.kernel.org> # 6.0+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Tested-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/property.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -112,6 +112,10 @@ static bool acpi_nondev_subnode_extract(
+               result = true;
+       if (result) {
++              /*
++               * This will be NULL if the desc package is embedded in an outer
++               * _DSD-equivalent package and its scope cannot be determined.
++               */
+               dn->handle = handle;
+               dn->data.pointer = desc;
+               list_add_tail(&dn->sibling, list);
+@@ -212,6 +216,8 @@ static bool acpi_add_nondev_subnodes(acp
+                        * strings because there is no way to build full
+                        * pathnames out of them.
+                        */
++                      acpi_handle_debug(scope, "subnode %s: Unknown scope\n",
++                                        link->package.elements[0].string.pointer);
+                       desc = &link->package.elements[1];
+                       result = acpi_nondev_subnode_extract(desc, NULL, link,
+                                                            list, parent);
+@@ -384,6 +390,9 @@ static void acpi_untie_nondev_subnodes(s
+       struct acpi_data_node *dn;
+       list_for_each_entry(dn, &data->subnodes, sibling) {
++              if (!dn->handle)
++                      continue;
++
+               acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
+               acpi_untie_nondev_subnodes(&dn->data);
+@@ -398,6 +407,9 @@ static bool acpi_tie_nondev_subnodes(str
+               acpi_status status;
+               bool ret;
++              if (!dn->handle)
++                      continue;
++
+               status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
+               if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+                       acpi_handle_err(dn->handle, "Can't tag data node\n");
diff --git a/queue-6.1/asm-generic-io-add-_ret_ip_-to-mmio-trace-for-more-accurate-debug-info.patch b/queue-6.1/asm-generic-io-add-_ret_ip_-to-mmio-trace-for-more-accurate-debug-info.patch
new file mode 100644 (file)
index 0000000..a25a319
--- /dev/null
@@ -0,0 +1,460 @@
+From stable+bounces-186005-greg=kroah.com@vger.kernel.org Thu Oct 16 13:57:17 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 07:57:07 -0400
+Subject: asm-generic/io: Add _RET_IP_ to MMIO trace for more accurate debug info
+To: stable@vger.kernel.org
+Cc: Sai Prakash Ranjan <quic_saipraka@quicinc.com>, Arnd Bergmann <arnd@arndb.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251016115709.3259702-1-sashal@kernel.org>
+
+From: Sai Prakash Ranjan <quic_saipraka@quicinc.com>
+
+[ Upstream commit 5e5ff73c2e5863f93fc5fd78d178cd8f2af12464 ]
+
+Due to compiler optimizations like inlining, there are cases where
+MMIO traces using _THIS_IP_ for caller information might not be
+sufficient to provide accurate debug traces.
+
+1) With optimizations (Seen with GCC):
+
+In this case, _THIS_IP_ works fine and prints the caller information
+since it will be inlined into the caller and we get the debug traces
+on who made the MMIO access, for ex:
+
+rwmmio_read: qcom_smmu_tlb_sync+0xe0/0x1b0 width=32 addr=0xffff8000087447f4
+rwmmio_post_read: qcom_smmu_tlb_sync+0xe0/0x1b0 width=32 val=0x0 addr=0xffff8000087447f4
+
+2) Without optimizations (Seen with Clang):
+
+_THIS_IP_ will not be sufficient in this case as it will print only
+the MMIO accessors itself which is of not much use since it is not
+inlined as below for example:
+
+rwmmio_read: readl+0x4/0x80 width=32 addr=0xffff8000087447f4
+rwmmio_post_read: readl+0x48/0x80 width=32 val=0x4 addr=0xffff8000087447f4
+
+So in order to handle this second case as well irrespective of the compiler
+optimizations, add _RET_IP_ to MMIO trace to make it provide more accurate
+debug information in all these scenarios.
+
+Before:
+
+rwmmio_read: readl+0x4/0x80 width=32 addr=0xffff8000087447f4
+rwmmio_post_read: readl+0x48/0x80 width=32 val=0x4 addr=0xffff8000087447f4
+
+After:
+
+rwmmio_read: qcom_smmu_tlb_sync+0xe0/0x1b0 -> readl+0x4/0x80 width=32 addr=0xffff8000087447f4
+rwmmio_post_read: qcom_smmu_tlb_sync+0xe0/0x1b0 -> readl+0x4/0x80 width=32 val=0x0 addr=0xffff8000087447f4
+
+Fixes: 210031971cdd ("asm-generic/io: Add logging support for MMIO accessors")
+Signed-off-by: Sai Prakash Ranjan <quic_saipraka@quicinc.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Stable-dep-of: 8327bd4fcb6c ("asm-generic/io.h: Skip trace helpers if rwmmio events are disabled")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/asm-generic/io.h      |   80 +++++++++++++++++++++---------------------
+ include/trace/events/rwmmio.h |   43 ++++++++++++++--------
+ lib/trace_readwrite.c         |   16 ++++----
+ 3 files changed, 75 insertions(+), 64 deletions(-)
+
+--- a/include/asm-generic/io.h
++++ b/include/asm-generic/io.h
+@@ -80,24 +80,24 @@ DECLARE_TRACEPOINT(rwmmio_read);
+ DECLARE_TRACEPOINT(rwmmio_post_read);
+ void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+-                  unsigned long caller_addr);
++                  unsigned long caller_addr, unsigned long caller_addr0);
+ void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+-                       unsigned long caller_addr);
++                       unsigned long caller_addr, unsigned long caller_addr0);
+ void log_read_mmio(u8 width, const volatile void __iomem *addr,
+-                 unsigned long caller_addr);
++                 unsigned long caller_addr, unsigned long caller_addr0);
+ void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+-                      unsigned long caller_addr);
++                      unsigned long caller_addr, unsigned long caller_addr0);
+ #else
+ static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+-                                unsigned long caller_addr) {}
++                                unsigned long caller_addr, unsigned long caller_addr0) {}
+ static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+-                                     unsigned long caller_addr) {}
++                                     unsigned long caller_addr, unsigned long caller_addr0) {}
+ static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
+-                               unsigned long caller_addr) {}
++                               unsigned long caller_addr, unsigned long caller_addr0) {}
+ static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+-                                    unsigned long caller_addr) {}
++                                    unsigned long caller_addr, unsigned long caller_addr0) {}
+ #endif /* CONFIG_TRACE_MMIO_ACCESS */
+@@ -188,11 +188,11 @@ static inline u8 readb(const volatile vo
+ {
+       u8 val;
+-      log_read_mmio(8, addr, _THIS_IP_);
++      log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+       __io_br();
+       val = __raw_readb(addr);
+       __io_ar(val);
+-      log_post_read_mmio(val, 8, addr, _THIS_IP_);
++      log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -203,11 +203,11 @@ static inline u16 readw(const volatile v
+ {
+       u16 val;
+-      log_read_mmio(16, addr, _THIS_IP_);
++      log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+       __io_br();
+       val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+       __io_ar(val);
+-      log_post_read_mmio(val, 16, addr, _THIS_IP_);
++      log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -218,11 +218,11 @@ static inline u32 readl(const volatile v
+ {
+       u32 val;
+-      log_read_mmio(32, addr, _THIS_IP_);
++      log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+       __io_br();
+       val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+       __io_ar(val);
+-      log_post_read_mmio(val, 32, addr, _THIS_IP_);
++      log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -234,11 +234,11 @@ static inline u64 readq(const volatile v
+ {
+       u64 val;
+-      log_read_mmio(64, addr, _THIS_IP_);
++      log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+       __io_br();
+       val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+       __io_ar(val);
+-      log_post_read_mmio(val, 64, addr, _THIS_IP_);
++      log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -248,11 +248,11 @@ static inline u64 readq(const volatile v
+ #define writeb writeb
+ static inline void writeb(u8 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 8, addr, _THIS_IP_);
++      log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+       __io_bw();
+       __raw_writeb(value, addr);
+       __io_aw();
+-      log_post_write_mmio(value, 8, addr, _THIS_IP_);
++      log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -260,11 +260,11 @@ static inline void writeb(u8 value, vola
+ #define writew writew
+ static inline void writew(u16 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 16, addr, _THIS_IP_);
++      log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+       __io_bw();
+       __raw_writew((u16 __force)cpu_to_le16(value), addr);
+       __io_aw();
+-      log_post_write_mmio(value, 16, addr, _THIS_IP_);
++      log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -272,11 +272,11 @@ static inline void writew(u16 value, vol
+ #define writel writel
+ static inline void writel(u32 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 32, addr, _THIS_IP_);
++      log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+       __io_bw();
+       __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+       __io_aw();
+-      log_post_write_mmio(value, 32, addr, _THIS_IP_);
++      log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -285,11 +285,11 @@ static inline void writel(u32 value, vol
+ #define writeq writeq
+ static inline void writeq(u64 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 64, addr, _THIS_IP_);
++      log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+       __io_bw();
+       __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+       __io_aw();
+-      log_post_write_mmio(value, 64, addr, _THIS_IP_);
++      log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ #endif /* CONFIG_64BIT */
+@@ -305,9 +305,9 @@ static inline u8 readb_relaxed(const vol
+ {
+       u8 val;
+-      log_read_mmio(8, addr, _THIS_IP_);
++      log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+       val = __raw_readb(addr);
+-      log_post_read_mmio(val, 8, addr, _THIS_IP_);
++      log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -318,9 +318,9 @@ static inline u16 readw_relaxed(const vo
+ {
+       u16 val;
+-      log_read_mmio(16, addr, _THIS_IP_);
++      log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+       val = __le16_to_cpu(__raw_readw(addr));
+-      log_post_read_mmio(val, 16, addr, _THIS_IP_);
++      log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -331,9 +331,9 @@ static inline u32 readl_relaxed(const vo
+ {
+       u32 val;
+-      log_read_mmio(32, addr, _THIS_IP_);
++      log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+       val = __le32_to_cpu(__raw_readl(addr));
+-      log_post_read_mmio(val, 32, addr, _THIS_IP_);
++      log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -344,9 +344,9 @@ static inline u64 readq_relaxed(const vo
+ {
+       u64 val;
+-      log_read_mmio(64, addr, _THIS_IP_);
++      log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+       val = __le64_to_cpu(__raw_readq(addr));
+-      log_post_read_mmio(val, 64, addr, _THIS_IP_);
++      log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -355,9 +355,9 @@ static inline u64 readq_relaxed(const vo
+ #define writeb_relaxed writeb_relaxed
+ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 8, addr, _THIS_IP_);
++      log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+       __raw_writeb(value, addr);
+-      log_post_write_mmio(value, 8, addr, _THIS_IP_);
++      log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -365,9 +365,9 @@ static inline void writeb_relaxed(u8 val
+ #define writew_relaxed writew_relaxed
+ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 16, addr, _THIS_IP_);
++      log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+       __raw_writew(cpu_to_le16(value), addr);
+-      log_post_write_mmio(value, 16, addr, _THIS_IP_);
++      log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -375,9 +375,9 @@ static inline void writew_relaxed(u16 va
+ #define writel_relaxed writel_relaxed
+ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 32, addr, _THIS_IP_);
++      log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+       __raw_writel(__cpu_to_le32(value), addr);
+-      log_post_write_mmio(value, 32, addr, _THIS_IP_);
++      log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -385,9 +385,9 @@ static inline void writel_relaxed(u32 va
+ #define writeq_relaxed writeq_relaxed
+ static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 64, addr, _THIS_IP_);
++      log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+       __raw_writeq(__cpu_to_le64(value), addr);
+-      log_post_write_mmio(value, 64, addr, _THIS_IP_);
++      log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+--- a/include/trace/events/rwmmio.h
++++ b/include/trace/events/rwmmio.h
+@@ -12,12 +12,14 @@
+ DECLARE_EVENT_CLASS(rwmmio_rw_template,
+-      TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
++      TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width,
++               volatile void __iomem *addr),
+-      TP_ARGS(caller, val, width, addr),
++      TP_ARGS(caller, caller0, val, width, addr),
+       TP_STRUCT__entry(
+               __field(unsigned long, caller)
++              __field(unsigned long, caller0)
+               __field(unsigned long, addr)
+               __field(u64, val)
+               __field(u8, width)
+@@ -25,56 +27,64 @@ DECLARE_EVENT_CLASS(rwmmio_rw_template,
+       TP_fast_assign(
+               __entry->caller = caller;
++              __entry->caller0 = caller0;
+               __entry->val = val;
+               __entry->addr = (unsigned long)addr;
+               __entry->width = width;
+       ),
+-      TP_printk("%pS width=%d val=%#llx addr=%#lx",
+-              (void *)__entry->caller, __entry->width,
++      TP_printk("%pS -> %pS width=%d val=%#llx addr=%#lx",
++              (void *)__entry->caller0, (void *)__entry->caller, __entry->width,
+               __entry->val, __entry->addr)
+ );
+ DEFINE_EVENT(rwmmio_rw_template, rwmmio_write,
+-      TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
+-      TP_ARGS(caller, val, width, addr)
++      TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width,
++               volatile void __iomem *addr),
++      TP_ARGS(caller, caller0, val, width, addr)
+ );
+ DEFINE_EVENT(rwmmio_rw_template, rwmmio_post_write,
+-      TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr),
+-      TP_ARGS(caller, val, width, addr)
++      TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width,
++               volatile void __iomem *addr),
++      TP_ARGS(caller, caller0, val, width, addr)
+ );
+ TRACE_EVENT(rwmmio_read,
+-      TP_PROTO(unsigned long caller, u8 width, const volatile void __iomem *addr),
++      TP_PROTO(unsigned long caller, unsigned long caller0, u8 width,
++               const volatile void __iomem *addr),
+-      TP_ARGS(caller, width, addr),
++      TP_ARGS(caller, caller0, width, addr),
+       TP_STRUCT__entry(
+               __field(unsigned long, caller)
++              __field(unsigned long, caller0)
+               __field(unsigned long, addr)
+               __field(u8, width)
+       ),
+       TP_fast_assign(
+               __entry->caller = caller;
++              __entry->caller0 = caller0;
+               __entry->addr = (unsigned long)addr;
+               __entry->width = width;
+       ),
+-      TP_printk("%pS width=%d addr=%#lx",
+-               (void *)__entry->caller, __entry->width, __entry->addr)
++      TP_printk("%pS -> %pS width=%d addr=%#lx",
++               (void *)__entry->caller0, (void *)__entry->caller, __entry->width, __entry->addr)
+ );
+ TRACE_EVENT(rwmmio_post_read,
+-      TP_PROTO(unsigned long caller, u64 val, u8 width, const volatile void __iomem *addr),
++      TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width,
++               const volatile void __iomem *addr),
+-      TP_ARGS(caller, val, width, addr),
++      TP_ARGS(caller, caller0, val, width, addr),
+       TP_STRUCT__entry(
+               __field(unsigned long, caller)
++              __field(unsigned long, caller0)
+               __field(unsigned long, addr)
+               __field(u64, val)
+               __field(u8, width)
+@@ -82,13 +92,14 @@ TRACE_EVENT(rwmmio_post_read,
+       TP_fast_assign(
+               __entry->caller = caller;
++              __entry->caller0 = caller0;
+               __entry->val = val;
+               __entry->addr = (unsigned long)addr;
+               __entry->width = width;
+       ),
+-      TP_printk("%pS width=%d val=%#llx addr=%#lx",
+-               (void *)__entry->caller, __entry->width,
++      TP_printk("%pS -> %pS width=%d val=%#llx addr=%#lx",
++               (void *)__entry->caller0, (void *)__entry->caller, __entry->width,
+                __entry->val, __entry->addr)
+ );
+--- a/lib/trace_readwrite.c
++++ b/lib/trace_readwrite.c
+@@ -14,33 +14,33 @@
+ #ifdef CONFIG_TRACE_MMIO_ACCESS
+ void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+-                  unsigned long caller_addr)
++                  unsigned long caller_addr, unsigned long caller_addr0)
+ {
+-      trace_rwmmio_write(caller_addr, val, width, addr);
++      trace_rwmmio_write(caller_addr, caller_addr0, val, width, addr);
+ }
+ EXPORT_SYMBOL_GPL(log_write_mmio);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_write);
+ void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+-                       unsigned long caller_addr)
++                       unsigned long caller_addr, unsigned long caller_addr0)
+ {
+-      trace_rwmmio_post_write(caller_addr, val, width, addr);
++      trace_rwmmio_post_write(caller_addr, caller_addr0, val, width, addr);
+ }
+ EXPORT_SYMBOL_GPL(log_post_write_mmio);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_write);
+ void log_read_mmio(u8 width, const volatile void __iomem *addr,
+-                 unsigned long caller_addr)
++                 unsigned long caller_addr, unsigned long caller_addr0)
+ {
+-      trace_rwmmio_read(caller_addr, width, addr);
++      trace_rwmmio_read(caller_addr, caller_addr0, width, addr);
+ }
+ EXPORT_SYMBOL_GPL(log_read_mmio);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_read);
+ void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+-                      unsigned long caller_addr)
++                      unsigned long caller_addr, unsigned long caller_addr0)
+ {
+-      trace_rwmmio_post_read(caller_addr, val, width, addr);
++      trace_rwmmio_post_read(caller_addr, caller_addr0, val, width, addr);
+ }
+ EXPORT_SYMBOL_GPL(log_post_read_mmio);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_read);
diff --git a/queue-6.1/asm-generic-io.h-skip-trace-helpers-if-rwmmio-events-are-disabled.patch b/queue-6.1/asm-generic-io.h-skip-trace-helpers-if-rwmmio-events-are-disabled.patch
new file mode 100644 (file)
index 0000000..5370ee4
--- /dev/null
@@ -0,0 +1,290 @@
+From stable+bounces-186007-greg=kroah.com@vger.kernel.org Thu Oct 16 13:57:21 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 07:57:09 -0400
+Subject: asm-generic/io.h: Skip trace helpers if rwmmio events are disabled
+To: stable@vger.kernel.org
+Cc: Varad Gautam <varadgautam@google.com>, Arnd Bergmann <arnd@arndb.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251016115709.3259702-3-sashal@kernel.org>
+
+From: Varad Gautam <varadgautam@google.com>
+
+[ Upstream commit 8327bd4fcb6c1dab01ce5c6ff00b42496836dcd2 ]
+
+With `CONFIG_TRACE_MMIO_ACCESS=y`, the `{read,write}{b,w,l,q}{_relaxed}()`
+mmio accessors unconditionally call `log_{post_}{read,write}_mmio()`
+helpers, which in turn call the ftrace ops for `rwmmio` trace events
+
+This adds a performance penalty per mmio accessor call, even when
+`rwmmio` events are disabled at runtime (~80% overhead on local
+measurement).
+
+Guard these with `tracepoint_enabled()`.
+
+Signed-off-by: Varad Gautam <varadgautam@google.com>
+Fixes: 210031971cdd ("asm-generic/io: Add logging support for MMIO accessors")
+Cc: stable@vger.kernel.org
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/asm-generic/io.h |   98 +++++++++++++++++++++++++++++++----------------
+ 1 file changed, 66 insertions(+), 32 deletions(-)
+
+--- a/include/asm-generic/io.h
++++ b/include/asm-generic/io.h
+@@ -74,6 +74,7 @@
+ #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
+ #include <linux/tracepoint-defs.h>
++#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint)
+ DECLARE_TRACEPOINT(rwmmio_write);
+ DECLARE_TRACEPOINT(rwmmio_post_write);
+ DECLARE_TRACEPOINT(rwmmio_read);
+@@ -90,6 +91,7 @@ void log_post_read_mmio(u64 val, u8 widt
+ #else
++#define rwmmio_tracepoint_enabled(tracepoint) false
+ static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+                                 unsigned long caller_addr, unsigned long caller_addr0) {}
+ static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+@@ -188,11 +190,13 @@ static inline u8 readb(const volatile vo
+ {
+       u8 val;
+-      log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_read))
++              log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+       __io_br();
+       val = __raw_readb(addr);
+       __io_ar(val);
+-      log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++              log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -203,11 +207,13 @@ static inline u16 readw(const volatile v
+ {
+       u16 val;
+-      log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_read))
++              log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+       __io_br();
+       val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+       __io_ar(val);
+-      log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++              log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -218,11 +224,13 @@ static inline u32 readl(const volatile v
+ {
+       u32 val;
+-      log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_read))
++              log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+       __io_br();
+       val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+       __io_ar(val);
+-      log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++              log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -234,11 +242,13 @@ static inline u64 readq(const volatile v
+ {
+       u64 val;
+-      log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_read))
++              log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+       __io_br();
+       val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+       __io_ar(val);
+-      log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++              log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -248,11 +258,13 @@ static inline u64 readq(const volatile v
+ #define writeb writeb
+ static inline void writeb(u8 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_write))
++              log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+       __io_bw();
+       __raw_writeb(value, addr);
+       __io_aw();
+-      log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++              log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -260,11 +272,13 @@ static inline void writeb(u8 value, vola
+ #define writew writew
+ static inline void writew(u16 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_write))
++              log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+       __io_bw();
+       __raw_writew((u16 __force)cpu_to_le16(value), addr);
+       __io_aw();
+-      log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++              log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -272,11 +286,13 @@ static inline void writew(u16 value, vol
+ #define writel writel
+ static inline void writel(u32 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_write))
++              log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+       __io_bw();
+       __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+       __io_aw();
+-      log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++              log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -285,11 +301,13 @@ static inline void writel(u32 value, vol
+ #define writeq writeq
+ static inline void writeq(u64 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_write))
++              log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+       __io_bw();
+       __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+       __io_aw();
+-      log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++              log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+ #endif /* CONFIG_64BIT */
+@@ -305,9 +323,11 @@ static inline u8 readb_relaxed(const vol
+ {
+       u8 val;
+-      log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_read))
++              log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+       val = __raw_readb(addr);
+-      log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++              log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -318,9 +338,11 @@ static inline u16 readw_relaxed(const vo
+ {
+       u16 val;
+-      log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_read))
++              log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+       val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+-      log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++              log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -331,9 +353,11 @@ static inline u32 readl_relaxed(const vo
+ {
+       u32 val;
+-      log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_read))
++              log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+       val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+-      log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++              log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -344,9 +368,11 @@ static inline u64 readq_relaxed(const vo
+ {
+       u64 val;
+-      log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_read))
++              log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+       val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+-      log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_read))
++              log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+ #endif
+@@ -355,9 +381,11 @@ static inline u64 readq_relaxed(const vo
+ #define writeb_relaxed writeb_relaxed
+ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_write))
++              log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+       __raw_writeb(value, addr);
+-      log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++              log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -365,9 +393,11 @@ static inline void writeb_relaxed(u8 val
+ #define writew_relaxed writew_relaxed
+ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_write))
++              log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+       __raw_writew((u16 __force)cpu_to_le16(value), addr);
+-      log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++              log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -375,9 +405,11 @@ static inline void writew_relaxed(u16 va
+ #define writel_relaxed writel_relaxed
+ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_write))
++              log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+       __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+-      log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++              log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -385,9 +417,11 @@ static inline void writel_relaxed(u32 va
+ #define writeq_relaxed writeq_relaxed
+ static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+ {
+-      log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_write))
++              log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+       __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+-      log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
++      if (rwmmio_tracepoint_enabled(rwmmio_post_write))
++              log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
diff --git a/queue-6.1/asm-generic-io.h-suppress-endianness-warnings-for-relaxed-accessors.patch b/queue-6.1/asm-generic-io.h-suppress-endianness-warnings-for-relaxed-accessors.patch
new file mode 100644 (file)
index 0000000..a19d556
--- /dev/null
@@ -0,0 +1,81 @@
+From stable+bounces-186006-greg=kroah.com@vger.kernel.org Thu Oct 16 13:57:19 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 07:57:08 -0400
+Subject: asm-generic/io.h: suppress endianness warnings for relaxed accessors
+To: stable@vger.kernel.org
+Cc: Vladimir Oltean <vladimir.oltean@nxp.com>, Arnd Bergmann <arnd@arndb.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251016115709.3259702-2-sashal@kernel.org>
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 05d3855b4d21ef3c2df26be1cbba9d2c68915fcb ]
+
+Copy the forced type casts from the normal MMIO accessors to suppress
+the sparse warnings that point out __raw_readl() returns a native endian
+word (just like readl()).
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Stable-dep-of: 8327bd4fcb6c ("asm-generic/io.h: Skip trace helpers if rwmmio events are disabled")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/asm-generic/io.h |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/include/asm-generic/io.h
++++ b/include/asm-generic/io.h
+@@ -319,7 +319,7 @@ static inline u16 readw_relaxed(const vo
+       u16 val;
+       log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+-      val = __le16_to_cpu(__raw_readw(addr));
++      val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+       log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+@@ -332,7 +332,7 @@ static inline u32 readl_relaxed(const vo
+       u32 val;
+       log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+-      val = __le32_to_cpu(__raw_readl(addr));
++      val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+       log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+@@ -345,7 +345,7 @@ static inline u64 readq_relaxed(const vo
+       u64 val;
+       log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+-      val = __le64_to_cpu(__raw_readq(addr));
++      val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+       log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+       return val;
+ }
+@@ -366,7 +366,7 @@ static inline void writeb_relaxed(u8 val
+ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+ {
+       log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+-      __raw_writew(cpu_to_le16(value), addr);
++      __raw_writew((u16 __force)cpu_to_le16(value), addr);
+       log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -376,7 +376,7 @@ static inline void writew_relaxed(u16 va
+ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+ {
+       log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+-      __raw_writel(__cpu_to_le32(value), addr);
++      __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+       log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -386,7 +386,7 @@ static inline void writel_relaxed(u32 va
+ static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+ {
+       log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+-      __raw_writeq(__cpu_to_le64(value), addr);
++      __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+       log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
diff --git a/queue-6.1/asoc-codecs-wcd934x-simplify-with-dev_err_probe.patch b/queue-6.1/asoc-codecs-wcd934x-simplify-with-dev_err_probe.patch
new file mode 100644 (file)
index 0000000..0435ae2
--- /dev/null
@@ -0,0 +1,66 @@
+From stable+bounces-185494-greg=kroah.com@vger.kernel.org Mon Oct 13 20:35:15 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 14:34:33 -0400
+Subject: ASoC: codecs: wcd934x: Simplify with dev_err_probe
+To: stable@vger.kernel.org
+Cc: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>, Mark Brown <broonie@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013183434.3507752-1-sashal@kernel.org>
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+[ Upstream commit fa92f4294283cc7d1f29151420be9e9336182518 ]
+
+Replace dev_err() in probe() path with dev_err_probe() to:
+1. Make code a bit simpler and easier to read,
+2. Do not print messages on deferred probe.
+
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Link: https://lore.kernel.org/r/20230418074630.8681-2-krzysztof.kozlowski@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Stable-dep-of: 4e65bda8273c ("ASoC: wcd934x: fix error handling in wcd934x_codec_parse_data()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/codecs/wcd934x.c |   19 +++++++------------
+ 1 file changed, 7 insertions(+), 12 deletions(-)
+
+--- a/sound/soc/codecs/wcd934x.c
++++ b/sound/soc/codecs/wcd934x.c
+@@ -5884,10 +5884,9 @@ static int wcd934x_codec_parse_data(stru
+       slim_get_logical_addr(wcd->sidev);
+       wcd->if_regmap = regmap_init_slimbus(wcd->sidev,
+                                 &wcd934x_ifc_regmap_config);
+-      if (IS_ERR(wcd->if_regmap)) {
+-              dev_err(dev, "Failed to allocate ifc register map\n");
+-              return PTR_ERR(wcd->if_regmap);
+-      }
++      if (IS_ERR(wcd->if_regmap))
++              return dev_err_probe(dev, PTR_ERR(wcd->if_regmap),
++                                   "Failed to allocate ifc register map\n");
+       of_property_read_u32(dev->parent->of_node, "qcom,dmic-sample-rate",
+                            &wcd->dmic_sample_rate);
+@@ -5939,19 +5938,15 @@ static int wcd934x_codec_probe(struct pl
+       memcpy(wcd->tx_chs, wcd934x_tx_chs, sizeof(wcd934x_tx_chs));
+       irq = regmap_irq_get_virq(data->irq_data, WCD934X_IRQ_SLIMBUS);
+-      if (irq < 0) {
+-              dev_err(wcd->dev, "Failed to get SLIM IRQ\n");
+-              return irq;
+-      }
++      if (irq < 0)
++              return dev_err_probe(wcd->dev, irq, "Failed to get SLIM IRQ\n");
+       ret = devm_request_threaded_irq(dev, irq, NULL,
+                                       wcd934x_slim_irq_handler,
+                                       IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                       "slim", wcd);
+-      if (ret) {
+-              dev_err(dev, "Failed to request slimbus irq\n");
+-              return ret;
+-      }
++      if (ret)
++              return dev_err_probe(dev, ret, "Failed to request slimbus irq\n");
+       wcd934x_register_mclk_output(wcd);
+       platform_set_drvdata(pdev, wcd);
diff --git a/queue-6.1/asoc-wcd934x-fix-error-handling-in-wcd934x_codec_parse_data.patch b/queue-6.1/asoc-wcd934x-fix-error-handling-in-wcd934x_codec_parse_data.patch
new file mode 100644 (file)
index 0000000..eb213cc
--- /dev/null
@@ -0,0 +1,85 @@
+From stable+bounces-185495-greg=kroah.com@vger.kernel.org Mon Oct 13 20:34:57 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 14:34:34 -0400
+Subject: ASoC: wcd934x: fix error handling in wcd934x_codec_parse_data()
+To: stable@vger.kernel.org
+Cc: Ma Ke <make24@iscas.ac.cn>, Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>, Mark Brown <broonie@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013183434.3507752-2-sashal@kernel.org>
+
+From: Ma Ke <make24@iscas.ac.cn>
+
+[ Upstream commit 4e65bda8273c938039403144730923e77916a3d7 ]
+
+wcd934x_codec_parse_data() contains a device reference count leak in
+of_slim_get_device() where device_find_child() increases the reference
+count of the device but this reference is not properly decreased in
+the success path. Add put_device() in wcd934x_codec_parse_data() and
+add devm_add_action_or_reset() in the probe function, which ensures
+that the reference count of the device is correctly managed.
+
+Memory leak in regmap_init_slimbus() as the allocated regmap is not
+released when the device is removed. Using devm_regmap_init_slimbus()
+instead of regmap_init_slimbus() to ensure automatic regmap cleanup on
+device removal.
+
+Calling path: of_slim_get_device() -> of_find_slim_device() ->
+device_find_child(). As comment of device_find_child() says, 'NOTE:
+you will need to drop the reference with put_device() after use.'.
+
+Found by code review.
+
+Cc: stable@vger.kernel.org
+Fixes: a61f3b4f476e ("ASoC: wcd934x: add support to wcd9340/wcd9341 codec")
+Signed-off-by: Ma Ke <make24@iscas.ac.cn>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://patch.msgid.link/20250923065212.26660-1-make24@iscas.ac.cn
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/codecs/wcd934x.c |   17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/codecs/wcd934x.c
++++ b/sound/soc/codecs/wcd934x.c
+@@ -5862,6 +5862,13 @@ static const struct snd_soc_component_dr
+       .endianness = 1,
+ };
++static void wcd934x_put_device_action(void *data)
++{
++      struct device *dev = data;
++
++      put_device(dev);
++}
++
+ static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd)
+ {
+       struct device *dev = &wcd->sdev->dev;
+@@ -5882,11 +5889,13 @@ static int wcd934x_codec_parse_data(stru
+       }
+       slim_get_logical_addr(wcd->sidev);
+-      wcd->if_regmap = regmap_init_slimbus(wcd->sidev,
++      wcd->if_regmap = devm_regmap_init_slimbus(wcd->sidev,
+                                 &wcd934x_ifc_regmap_config);
+-      if (IS_ERR(wcd->if_regmap))
++      if (IS_ERR(wcd->if_regmap)) {
++              put_device(&wcd->sidev->dev);
+               return dev_err_probe(dev, PTR_ERR(wcd->if_regmap),
+                                    "Failed to allocate ifc register map\n");
++      }
+       of_property_read_u32(dev->parent->of_node, "qcom,dmic-sample-rate",
+                            &wcd->dmic_sample_rate);
+@@ -5930,6 +5939,10 @@ static int wcd934x_codec_probe(struct pl
+               return ret;
+       }
++      ret = devm_add_action_or_reset(dev, wcd934x_put_device_action, &wcd->sidev->dev);
++      if (ret)
++              return ret;
++
+       /* set default rate 9P6MHz */
+       regmap_update_bits(wcd->regmap, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+                          WCD934X_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK,
diff --git a/queue-6.1/btrfs-fix-the-incorrect-max_bytes-value-for-find_lock_delalloc_range.patch b/queue-6.1/btrfs-fix-the-incorrect-max_bytes-value-for-find_lock_delalloc_range.patch
new file mode 100644 (file)
index 0000000..b19f809
--- /dev/null
@@ -0,0 +1,147 @@
+From stable+bounces-185833-greg=kroah.com@vger.kernel.org Wed Oct 15 17:24:07 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Oct 2025 11:23:57 -0400
+Subject: btrfs: fix the incorrect max_bytes value for find_lock_delalloc_range()
+To: stable@vger.kernel.org
+Cc: Qu Wenruo <wqu@suse.com>, David Sterba <dsterba@suse.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251015152357.1457706-1-sashal@kernel.org>
+
+From: Qu Wenruo <wqu@suse.com>
+
+[ Upstream commit 7b26da407420e5054e3f06c5d13271697add9423 ]
+
+[BUG]
+With my local branch to enable bs > ps support for btrfs, sometimes I
+hit the following ASSERT() inside submit_one_sector():
+
+       ASSERT(block_start != EXTENT_MAP_HOLE);
+
+Please note that it's not yet possible to hit this ASSERT() in the wild
+yet, as it requires btrfs bs > ps support, which is not even in the
+development branch.
+
+But on the other hand, there is also a very low chance to hit above
+ASSERT() with bs < ps cases, so this is an existing bug affect not only
+the incoming bs > ps support but also the existing bs < ps support.
+
+[CAUSE]
+Firstly that ASSERT() means we're trying to submit a dirty block but
+without a real extent map nor ordered extent map backing it.
+
+Furthermore with extra debugging, the folio triggering such ASSERT() is
+always larger than the fs block size in my bs > ps case.
+(8K block size, 4K page size)
+
+After some more debugging, the ASSERT() is trigger by the following
+sequence:
+
+ extent_writepage()
+ |  We got a 32K folio (4 fs blocks) at file offset 0, and the fs block
+ |  size is 8K, page size is 4K.
+ |  And there is another 8K folio at file offset 32K, which is also
+ |  dirty.
+ |  So the filemap layout looks like the following:
+ |
+ |  "||" is the filio boundary in the filemap.
+ |  "//| is the dirty range.
+ |
+ |  0        8K       16K        24K         32K       40K
+ |  |////////|        |//////////////////////||////////|
+ |
+ |- writepage_delalloc()
+ |  |- find_lock_delalloc_range() for [0, 8K)
+ |  |  Now range [0, 8K) is properly locked.
+ |  |
+ |  |- find_lock_delalloc_range() for [16K, 40K)
+ |  |  |- btrfs_find_delalloc_range() returned range [16K, 40K)
+ |  |  |- lock_delalloc_folios() locked folio 0 successfully
+ |  |  |
+ |  |  |  The filemap range [32K, 40K) got dropped from filemap.
+ |  |  |
+ |  |  |- lock_delalloc_folios() failed with -EAGAIN on folio 32K
+ |  |  |  As the folio at 32K is dropped.
+ |  |  |
+ |  |  |- loops = 1;
+ |  |  |- max_bytes = PAGE_SIZE;
+ |  |  |- goto again;
+ |  |  |  This will re-do the lookup for dirty delalloc ranges.
+ |  |  |
+ |  |  |- btrfs_find_delalloc_range() called with @max_bytes == 4K
+ |  |  |  This is smaller than block size, so
+ |  |  |  btrfs_find_delalloc_range() is unable to return any range.
+ |  |  \- return false;
+ |  |
+ |  \- Now only range [0, 8K) has an OE for it, but for dirty range
+ |     [16K, 32K) it's dirty without an OE.
+ |     This breaks the assumption that writepage_delalloc() will find
+ |     and lock all dirty ranges inside the folio.
+ |
+ |- extent_writepage_io()
+    |- submit_one_sector() for [0, 8K)
+    |  Succeeded
+    |
+    |- submit_one_sector() for [16K, 24K)
+       Triggering the ASSERT(), as there is no OE, and the original
+       extent map is a hole.
+
+Please note that, this also exposed the same problem for bs < ps
+support. E.g. with 64K page size and 4K block size.
+
+If we failed to lock a folio, and falls back into the "loops = 1;"
+branch, we will re-do the search using 64K as max_bytes.
+Which may fail again to lock the next folio, and exit early without
+handling all dirty blocks inside the folio.
+
+[FIX]
+Instead of using the fixed size PAGE_SIZE as @max_bytes, use
+@sectorsize, so that we are ensured to find and lock any remaining
+blocks inside the folio.
+
+And since we're here, add an extra ASSERT() to
+before calling btrfs_find_delalloc_range() to make sure the @max_bytes is
+at least no smaller than a block to avoid false negative.
+
+Cc: stable@vger.kernel.org # 5.15+
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+[ adapted folio terminology and API calls to page-based equivalents ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/extent_io.c |   14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -415,6 +415,13 @@ again:
+       /* step one, find a bunch of delalloc bytes starting at start */
+       delalloc_start = *start;
+       delalloc_end = 0;
++
++      /*
++       * If @max_bytes is smaller than a block, btrfs_find_delalloc_range() can
++       * return early without handling any dirty ranges.
++       */
++      ASSERT(max_bytes >= fs_info->sectorsize);
++
+       found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
+                                         max_bytes, &cached_state);
+       if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
+@@ -445,13 +452,14 @@ again:
+                                 delalloc_start, delalloc_end);
+       ASSERT(!ret || ret == -EAGAIN);
+       if (ret == -EAGAIN) {
+-              /* some of the pages are gone, lets avoid looping by
+-               * shortening the size of the delalloc range we're searching
++              /*
++               * Some of the pages are gone, lets avoid looping by
++               * shortening the size of the delalloc range we're searching.
+                */
+               free_extent_state(cached_state);
+               cached_state = NULL;
+               if (!loops) {
+-                      max_bytes = PAGE_SIZE;
++                      max_bytes = fs_info->sectorsize;
+                       loops = 1;
+                       goto again;
+               } else {
diff --git a/queue-6.1/ipmi-fix-handling-of-messages-with-provided-receive-message-pointer.patch b/queue-6.1/ipmi-fix-handling-of-messages-with-provided-receive-message-pointer.patch
new file mode 100644 (file)
index 0000000..479710a
--- /dev/null
@@ -0,0 +1,50 @@
+From stable+bounces-186189-greg=kroah.com@vger.kernel.org Thu Oct 16 20:51:30 2025
+From: Corey Minyard <corey@minyard.net>
+Date: Thu, 16 Oct 2025 13:50:58 -0500
+Subject: ipmi: Fix handling of messages with provided receive message pointer
+To: stable@vger.kernel.org
+Cc: Guenter Roeck <linux@roeck-us.net>, Eric Dumazet <edumazet@google.com>, Greg Thelen <gthelen@google.com>, Corey Minyard <corey@minyard.net>
+Message-ID: <20251016185058.1876213-2-corey@minyard.net>
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+commit e2c69490dda5d4c9f1bfbb2898989c8f3530e354 upstream
+
+Prior to commit b52da4054ee0 ("ipmi: Rework user message limit handling"),
+i_ipmi_request() used to increase the user reference counter if the receive
+message is provided by the caller of IPMI API functions. This is no longer
+the case. However, ipmi_free_recv_msg() is still called and decreases the
+reference counter. This results in the reference counter reaching zero,
+the user data pointer is released, and all kinds of interesting crashes are
+seen.
+
+Fix the problem by increasing user reference counter if the receive message
+has been provided by the caller.
+
+Fixes: b52da4054ee0 ("ipmi: Rework user message limit handling")
+Reported-by: Eric Dumazet <edumazet@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Greg Thelen <gthelen@google.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Message-ID: <20251006201857.3433837-1-linux@roeck-us.net>
+Signed-off-by: Corey Minyard <corey@minyard.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_msghandler.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -2311,8 +2311,11 @@ static int i_ipmi_request(struct ipmi_us
+       if (supplied_recv) {
+               recv_msg = supplied_recv;
+               recv_msg->user = user;
+-              if (user)
++              if (user) {
+                       atomic_inc(&user->nr_msgs);
++                      /* The put happens when the message is freed. */
++                      kref_get(&user->refcount);
++              }
+       } else {
+               recv_msg = ipmi_alloc_recv_msg(user);
+               if (IS_ERR(recv_msg))
diff --git a/queue-6.1/ipmi-rework-user-message-limit-handling.patch b/queue-6.1/ipmi-rework-user-message-limit-handling.patch
new file mode 100644 (file)
index 0000000..c91381e
--- /dev/null
@@ -0,0 +1,646 @@
+From stable+bounces-186188-greg=kroah.com@vger.kernel.org Thu Oct 16 20:51:14 2025
+From: Corey Minyard <corey@minyard.net>
+Date: Thu, 16 Oct 2025 13:50:57 -0500
+Subject: ipmi: Rework user message limit handling
+To: stable@vger.kernel.org
+Cc: Corey Minyard <corey@minyard.net>, Gilles BULOZ <gilles.buloz@kontron.com>
+Message-ID: <20251016185058.1876213-1-corey@minyard.net>
+
+From: Corey Minyard <corey@minyard.net>
+
+commit b52da4054ee0bf9ecb44996f2c83236ff50b3812 upstream
+
+This patch required quite a bit of work to backport due to a number
+of unrelated changes that do not make sense to backport.  This has
+been run against my test suite and passes all tests.
+
+The limit on the number of user messages had a number of issues,
+improper counting in some cases and a use after free.
+
+Restructure how this is all done to handle more in the receive message
+allocation routine, so all refcouting and user message limit counts
+are done in that routine.  It's a lot cleaner and safer.
+
+Reported-by: Gilles BULOZ <gilles.buloz@kontron.com>
+Closes: https://lore.kernel.org/lkml/aLsw6G0GyqfpKs2S@mail.minyard.net/
+Fixes: 8e76741c3d8b ("ipmi: Add a limit on the number of users that may use IPMI")
+Cc: <stable@vger.kernel.org> # 4.19
+Signed-off-by: Corey Minyard <corey@minyard.net>
+Tested-by: Gilles BULOZ <gilles.buloz@kontron.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_msghandler.c |  415 +++++++++++++++++-------------------
+ 1 file changed, 198 insertions(+), 217 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -39,7 +39,9 @@
+ #define IPMI_DRIVER_VERSION "39.2"
+-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
++static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
++static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
++                                 struct ipmi_user *user);
+ static int ipmi_init_msghandler(void);
+ static void smi_recv_tasklet(struct tasklet_struct *t);
+ static void handle_new_recv_msgs(struct ipmi_smi *intf);
+@@ -939,13 +941,11 @@ static int deliver_response(struct ipmi_
+                * risk.  At this moment, simply skip it in that case.
+                */
+               ipmi_free_recv_msg(msg);
+-              atomic_dec(&msg->user->nr_msgs);
+       } else {
+               int index;
+               struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
+               if (user) {
+-                      atomic_dec(&user->nr_msgs);
+                       user->handler->ipmi_recv_hndl(msg, user->handler_data);
+                       release_ipmi_user(user, index);
+               } else {
+@@ -1634,8 +1634,7 @@ int ipmi_set_gets_events(struct ipmi_use
+               spin_unlock_irqrestore(&intf->events_lock, flags);
+               list_for_each_entry_safe(msg, msg2, &msgs, link) {
+-                      msg->user = user;
+-                      kref_get(&user->refcount);
++                      ipmi_set_recv_msg_user(msg, user);
+                       deliver_local_response(intf, msg);
+               }
+@@ -2309,22 +2308,15 @@ static int i_ipmi_request(struct ipmi_us
+       struct ipmi_recv_msg *recv_msg;
+       int rv = 0;
+-      if (user) {
+-              if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+-                      /* Decrement will happen at the end of the routine. */
+-                      rv = -EBUSY;
+-                      goto out;
+-              }
+-      }
+-
+-      if (supplied_recv)
++      if (supplied_recv) {
+               recv_msg = supplied_recv;
+-      else {
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (recv_msg == NULL) {
+-                      rv = -ENOMEM;
+-                      goto out;
+-              }
++              recv_msg->user = user;
++              if (user)
++                      atomic_inc(&user->nr_msgs);
++      } else {
++              recv_msg = ipmi_alloc_recv_msg(user);
++              if (IS_ERR(recv_msg))
++                      return PTR_ERR(recv_msg);
+       }
+       recv_msg->user_msg_data = user_msg_data;
+@@ -2335,8 +2327,7 @@ static int i_ipmi_request(struct ipmi_us
+               if (smi_msg == NULL) {
+                       if (!supplied_recv)
+                               ipmi_free_recv_msg(recv_msg);
+-                      rv = -ENOMEM;
+-                      goto out;
++                      return -ENOMEM;
+               }
+       }
+@@ -2346,10 +2337,6 @@ static int i_ipmi_request(struct ipmi_us
+               goto out_err;
+       }
+-      recv_msg->user = user;
+-      if (user)
+-              /* The put happens when the message is freed. */
+-              kref_get(&user->refcount);
+       recv_msg->msgid = msgid;
+       /*
+        * Store the message to send in the receive message so timeout
+@@ -2378,8 +2365,10 @@ static int i_ipmi_request(struct ipmi_us
+       if (rv) {
+ out_err:
+-              ipmi_free_smi_msg(smi_msg);
+-              ipmi_free_recv_msg(recv_msg);
++              if (!supplied_smi)
++                      ipmi_free_smi_msg(smi_msg);
++              if (!supplied_recv)
++                      ipmi_free_recv_msg(recv_msg);
+       } else {
+               dev_dbg(intf->si_dev, "Send: %*ph\n",
+                       smi_msg->data_size, smi_msg->data);
+@@ -2388,9 +2377,6 @@ out_err:
+       }
+       rcu_read_unlock();
+-out:
+-      if (rv && user)
+-              atomic_dec(&user->nr_msgs);
+       return rv;
+ }
+@@ -3883,7 +3869,7 @@ static int handle_ipmb_get_msg_cmd(struc
+       unsigned char            chan;
+       struct ipmi_user         *user = NULL;
+       struct ipmi_ipmb_addr    *ipmb_addr;
+-      struct ipmi_recv_msg     *recv_msg;
++      struct ipmi_recv_msg     *recv_msg = NULL;
+       if (msg->rsp_size < 10) {
+               /* Message not big enough, just ignore it. */
+@@ -3904,9 +3890,8 @@ static int handle_ipmb_get_msg_cmd(struc
+       rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+       if (rcvr) {
+               user = rcvr->user;
+-              kref_get(&user->refcount);
+-      } else
+-              user = NULL;
++              recv_msg = ipmi_alloc_recv_msg(user);
++      }
+       rcu_read_unlock();
+       if (user == NULL) {
+@@ -3941,47 +3926,41 @@ static int handle_ipmb_get_msg_cmd(struc
+                       rv = -1;
+               }
+               rcu_read_unlock();
+-      } else {
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
+-                      /*
+-                       * We couldn't allocate memory for the
+-                       * message, so requeue it for handling
+-                       * later.
+-                       */
+-                      rv = 1;
+-                      kref_put(&user->refcount, free_user);
+-              } else {
+-                      /* Extract the source address from the data. */
+-                      ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+-                      ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+-                      ipmb_addr->slave_addr = msg->rsp[6];
+-                      ipmb_addr->lun = msg->rsp[7] & 3;
+-                      ipmb_addr->channel = msg->rsp[3] & 0xf;
++      } else if (!IS_ERR(recv_msg)) {
++              /* Extract the source address from the data. */
++              ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
++              ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
++              ipmb_addr->slave_addr = msg->rsp[6];
++              ipmb_addr->lun = msg->rsp[7] & 3;
++              ipmb_addr->channel = msg->rsp[3] & 0xf;
+-                      /*
+-                       * Extract the rest of the message information
+-                       * from the IPMB header.
+-                       */
+-                      recv_msg->user = user;
+-                      recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-                      recv_msg->msgid = msg->rsp[7] >> 2;
+-                      recv_msg->msg.netfn = msg->rsp[4] >> 2;
+-                      recv_msg->msg.cmd = msg->rsp[8];
+-                      recv_msg->msg.data = recv_msg->msg_data;
++              /*
++               * Extract the rest of the message information
++               * from the IPMB header.
++               */
++              recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++              recv_msg->msgid = msg->rsp[7] >> 2;
++              recv_msg->msg.netfn = msg->rsp[4] >> 2;
++              recv_msg->msg.cmd = msg->rsp[8];
++              recv_msg->msg.data = recv_msg->msg_data;
+-                      /*
+-                       * We chop off 10, not 9 bytes because the checksum
+-                       * at the end also needs to be removed.
+-                       */
+-                      recv_msg->msg.data_len = msg->rsp_size - 10;
+-                      memcpy(recv_msg->msg_data, &msg->rsp[9],
+-                             msg->rsp_size - 10);
+-                      if (deliver_response(intf, recv_msg))
+-                              ipmi_inc_stat(intf, unhandled_commands);
+-                      else
+-                              ipmi_inc_stat(intf, handled_commands);
+-              }
++              /*
++               * We chop off 10, not 9 bytes because the checksum
++               * at the end also needs to be removed.
++               */
++              recv_msg->msg.data_len = msg->rsp_size - 10;
++              memcpy(recv_msg->msg_data, &msg->rsp[9],
++                     msg->rsp_size - 10);
++              if (deliver_response(intf, recv_msg))
++                      ipmi_inc_stat(intf, unhandled_commands);
++              else
++                      ipmi_inc_stat(intf, handled_commands);
++      } else {
++              /*
++               * We couldn't allocate memory for the message, so
++               * requeue it for handling later.
++               */
++              rv = 1;
+       }
+       return rv;
+@@ -3994,7 +3973,7 @@ static int handle_ipmb_direct_rcv_cmd(st
+       int                      rv = 0;
+       struct ipmi_user         *user = NULL;
+       struct ipmi_ipmb_direct_addr *daddr;
+-      struct ipmi_recv_msg     *recv_msg;
++      struct ipmi_recv_msg     *recv_msg = NULL;
+       unsigned char netfn = msg->rsp[0] >> 2;
+       unsigned char cmd = msg->rsp[3];
+@@ -4003,9 +3982,8 @@ static int handle_ipmb_direct_rcv_cmd(st
+       rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
+       if (rcvr) {
+               user = rcvr->user;
+-              kref_get(&user->refcount);
+-      } else
+-              user = NULL;
++              recv_msg = ipmi_alloc_recv_msg(user);
++      }
+       rcu_read_unlock();
+       if (user == NULL) {
+@@ -4032,44 +4010,38 @@ static int handle_ipmb_direct_rcv_cmd(st
+                       rv = -1;
+               }
+               rcu_read_unlock();
+-      } else {
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
+-                      /*
+-                       * We couldn't allocate memory for the
+-                       * message, so requeue it for handling
+-                       * later.
+-                       */
+-                      rv = 1;
+-                      kref_put(&user->refcount, free_user);
+-              } else {
+-                      /* Extract the source address from the data. */
+-                      daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+-                      daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+-                      daddr->channel = 0;
+-                      daddr->slave_addr = msg->rsp[1];
+-                      daddr->rs_lun = msg->rsp[0] & 3;
+-                      daddr->rq_lun = msg->rsp[2] & 3;
++      } else if (!IS_ERR(recv_msg)) {
++              /* Extract the source address from the data. */
++              daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
++              daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
++              daddr->channel = 0;
++              daddr->slave_addr = msg->rsp[1];
++              daddr->rs_lun = msg->rsp[0] & 3;
++              daddr->rq_lun = msg->rsp[2] & 3;
+-                      /*
+-                       * Extract the rest of the message information
+-                       * from the IPMB header.
+-                       */
+-                      recv_msg->user = user;
+-                      recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-                      recv_msg->msgid = (msg->rsp[2] >> 2);
+-                      recv_msg->msg.netfn = msg->rsp[0] >> 2;
+-                      recv_msg->msg.cmd = msg->rsp[3];
+-                      recv_msg->msg.data = recv_msg->msg_data;
+-
+-                      recv_msg->msg.data_len = msg->rsp_size - 4;
+-                      memcpy(recv_msg->msg_data, msg->rsp + 4,
+-                             msg->rsp_size - 4);
+-                      if (deliver_response(intf, recv_msg))
+-                              ipmi_inc_stat(intf, unhandled_commands);
+-                      else
+-                              ipmi_inc_stat(intf, handled_commands);
+-              }
++              /*
++               * Extract the rest of the message information
++               * from the IPMB header.
++               */
++              recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++              recv_msg->msgid = (msg->rsp[2] >> 2);
++              recv_msg->msg.netfn = msg->rsp[0] >> 2;
++              recv_msg->msg.cmd = msg->rsp[3];
++              recv_msg->msg.data = recv_msg->msg_data;
++
++              recv_msg->msg.data_len = msg->rsp_size - 4;
++              memcpy(recv_msg->msg_data, msg->rsp + 4,
++                     msg->rsp_size - 4);
++              if (deliver_response(intf, recv_msg))
++                      ipmi_inc_stat(intf, unhandled_commands);
++              else
++                      ipmi_inc_stat(intf, handled_commands);
++      } else {
++              /*
++               * We couldn't allocate memory for the message, so
++               * requeue it for handling later.
++               */
++              rv = 1;
+       }
+       return rv;
+@@ -4183,7 +4155,7 @@ static int handle_lan_get_msg_cmd(struct
+       unsigned char            chan;
+       struct ipmi_user         *user = NULL;
+       struct ipmi_lan_addr     *lan_addr;
+-      struct ipmi_recv_msg     *recv_msg;
++      struct ipmi_recv_msg     *recv_msg = NULL;
+       if (msg->rsp_size < 12) {
+               /* Message not big enough, just ignore it. */
+@@ -4204,9 +4176,8 @@ static int handle_lan_get_msg_cmd(struct
+       rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+       if (rcvr) {
+               user = rcvr->user;
+-              kref_get(&user->refcount);
+-      } else
+-              user = NULL;
++              recv_msg = ipmi_alloc_recv_msg(user);
++      }
+       rcu_read_unlock();
+       if (user == NULL) {
+@@ -4218,49 +4189,44 @@ static int handle_lan_get_msg_cmd(struct
+                * them to be freed.
+                */
+               rv = 0;
+-      } else {
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
+-                      /*
+-                       * We couldn't allocate memory for the
+-                       * message, so requeue it for handling later.
+-                       */
+-                      rv = 1;
+-                      kref_put(&user->refcount, free_user);
+-              } else {
+-                      /* Extract the source address from the data. */
+-                      lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+-                      lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+-                      lan_addr->session_handle = msg->rsp[4];
+-                      lan_addr->remote_SWID = msg->rsp[8];
+-                      lan_addr->local_SWID = msg->rsp[5];
+-                      lan_addr->lun = msg->rsp[9] & 3;
+-                      lan_addr->channel = msg->rsp[3] & 0xf;
+-                      lan_addr->privilege = msg->rsp[3] >> 4;
++      } else if (!IS_ERR(recv_msg)) {
++              /* Extract the source address from the data. */
++              lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
++              lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
++              lan_addr->session_handle = msg->rsp[4];
++              lan_addr->remote_SWID = msg->rsp[8];
++              lan_addr->local_SWID = msg->rsp[5];
++              lan_addr->lun = msg->rsp[9] & 3;
++              lan_addr->channel = msg->rsp[3] & 0xf;
++              lan_addr->privilege = msg->rsp[3] >> 4;
+-                      /*
+-                       * Extract the rest of the message information
+-                       * from the IPMB header.
+-                       */
+-                      recv_msg->user = user;
+-                      recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+-                      recv_msg->msgid = msg->rsp[9] >> 2;
+-                      recv_msg->msg.netfn = msg->rsp[6] >> 2;
+-                      recv_msg->msg.cmd = msg->rsp[10];
+-                      recv_msg->msg.data = recv_msg->msg_data;
++              /*
++               * Extract the rest of the message information
++               * from the IPMB header.
++               */
++              recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
++              recv_msg->msgid = msg->rsp[9] >> 2;
++              recv_msg->msg.netfn = msg->rsp[6] >> 2;
++              recv_msg->msg.cmd = msg->rsp[10];
++              recv_msg->msg.data = recv_msg->msg_data;
+-                      /*
+-                       * We chop off 12, not 11 bytes because the checksum
+-                       * at the end also needs to be removed.
+-                       */
+-                      recv_msg->msg.data_len = msg->rsp_size - 12;
+-                      memcpy(recv_msg->msg_data, &msg->rsp[11],
+-                             msg->rsp_size - 12);
+-                      if (deliver_response(intf, recv_msg))
+-                              ipmi_inc_stat(intf, unhandled_commands);
+-                      else
+-                              ipmi_inc_stat(intf, handled_commands);
+-              }
++              /*
++               * We chop off 12, not 11 bytes because the checksum
++               * at the end also needs to be removed.
++               */
++              recv_msg->msg.data_len = msg->rsp_size - 12;
++              memcpy(recv_msg->msg_data, &msg->rsp[11],
++                     msg->rsp_size - 12);
++              if (deliver_response(intf, recv_msg))
++                      ipmi_inc_stat(intf, unhandled_commands);
++              else
++                      ipmi_inc_stat(intf, handled_commands);
++      } else {
++              /*
++               * We couldn't allocate memory for the message, so
++               * requeue it for handling later.
++               */
++              rv = 1;
+       }
+       return rv;
+@@ -4282,7 +4248,7 @@ static int handle_oem_get_msg_cmd(struct
+       unsigned char         chan;
+       struct ipmi_user *user = NULL;
+       struct ipmi_system_interface_addr *smi_addr;
+-      struct ipmi_recv_msg  *recv_msg;
++      struct ipmi_recv_msg  *recv_msg = NULL;
+       /*
+        * We expect the OEM SW to perform error checking
+@@ -4311,9 +4277,8 @@ static int handle_oem_get_msg_cmd(struct
+       rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+       if (rcvr) {
+               user = rcvr->user;
+-              kref_get(&user->refcount);
+-      } else
+-              user = NULL;
++              recv_msg = ipmi_alloc_recv_msg(user);
++      }
+       rcu_read_unlock();
+       if (user == NULL) {
+@@ -4326,48 +4291,42 @@ static int handle_oem_get_msg_cmd(struct
+                */
+               rv = 0;
+-      } else {
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
+-                      /*
+-                       * We couldn't allocate memory for the
+-                       * message, so requeue it for handling
+-                       * later.
+-                       */
+-                      rv = 1;
+-                      kref_put(&user->refcount, free_user);
+-              } else {
+-                      /*
+-                       * OEM Messages are expected to be delivered via
+-                       * the system interface to SMS software.  We might
+-                       * need to visit this again depending on OEM
+-                       * requirements
+-                       */
+-                      smi_addr = ((struct ipmi_system_interface_addr *)
+-                                  &recv_msg->addr);
+-                      smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+-                      smi_addr->channel = IPMI_BMC_CHANNEL;
+-                      smi_addr->lun = msg->rsp[0] & 3;
+-
+-                      recv_msg->user = user;
+-                      recv_msg->user_msg_data = NULL;
+-                      recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+-                      recv_msg->msg.netfn = msg->rsp[0] >> 2;
+-                      recv_msg->msg.cmd = msg->rsp[1];
+-                      recv_msg->msg.data = recv_msg->msg_data;
++      } else if (!IS_ERR(recv_msg)) {
++              /*
++               * OEM Messages are expected to be delivered via
++               * the system interface to SMS software.  We might
++               * need to visit this again depending on OEM
++               * requirements
++               */
++              smi_addr = ((struct ipmi_system_interface_addr *)
++                          &recv_msg->addr);
++              smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
++              smi_addr->channel = IPMI_BMC_CHANNEL;
++              smi_addr->lun = msg->rsp[0] & 3;
++
++              recv_msg->user_msg_data = NULL;
++              recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
++              recv_msg->msg.netfn = msg->rsp[0] >> 2;
++              recv_msg->msg.cmd = msg->rsp[1];
++              recv_msg->msg.data = recv_msg->msg_data;
+-                      /*
+-                       * The message starts at byte 4 which follows the
+-                       * Channel Byte in the "GET MESSAGE" command
+-                       */
+-                      recv_msg->msg.data_len = msg->rsp_size - 4;
+-                      memcpy(recv_msg->msg_data, &msg->rsp[4],
+-                             msg->rsp_size - 4);
+-                      if (deliver_response(intf, recv_msg))
+-                              ipmi_inc_stat(intf, unhandled_commands);
+-                      else
+-                              ipmi_inc_stat(intf, handled_commands);
+-              }
++              /*
++               * The message starts at byte 4 which follows the
++               * Channel Byte in the "GET MESSAGE" command
++               */
++              recv_msg->msg.data_len = msg->rsp_size - 4;
++              memcpy(recv_msg->msg_data, &msg->rsp[4],
++                     msg->rsp_size - 4);
++              if (deliver_response(intf, recv_msg))
++                      ipmi_inc_stat(intf, unhandled_commands);
++              else
++                      ipmi_inc_stat(intf, handled_commands);
++      } else {
++              /*
++               * We couldn't allocate memory for the message, so
++               * requeue it for handling later.
++               */
++              rv = 1;
+       }
+       return rv;
+@@ -4426,8 +4385,8 @@ static int handle_read_event_rsp(struct
+               if (!user->gets_events)
+                       continue;
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
++              recv_msg = ipmi_alloc_recv_msg(user);
++              if (IS_ERR(recv_msg)) {
+                       rcu_read_unlock();
+                       list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
+                                                link) {
+@@ -4446,8 +4405,6 @@ static int handle_read_event_rsp(struct
+               deliver_count++;
+               copy_event_into_recv_msg(recv_msg, msg);
+-              recv_msg->user = user;
+-              kref_get(&user->refcount);
+               list_add_tail(&recv_msg->link, &msgs);
+       }
+       srcu_read_unlock(&intf->users_srcu, index);
+@@ -4463,8 +4420,8 @@ static int handle_read_event_rsp(struct
+                * No one to receive the message, put it in queue if there's
+                * not already too many things in the queue.
+                */
+-              recv_msg = ipmi_alloc_recv_msg();
+-              if (!recv_msg) {
++              recv_msg = ipmi_alloc_recv_msg(NULL);
++              if (IS_ERR(recv_msg)) {
+                       /*
+                        * We couldn't allocate memory for the
+                        * message, so requeue it for handling
+@@ -5156,27 +5113,51 @@ static void free_recv_msg(struct ipmi_re
+               kfree(msg);
+ }
+-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
++static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
+ {
+       struct ipmi_recv_msg *rv;
++      if (user) {
++              if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
++                      atomic_dec(&user->nr_msgs);
++                      return ERR_PTR(-EBUSY);
++              }
++      }
++
+       rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
+-      if (rv) {
+-              rv->user = NULL;
+-              rv->done = free_recv_msg;
+-              atomic_inc(&recv_msg_inuse_count);
++      if (!rv) {
++              if (user)
++                      atomic_dec(&user->nr_msgs);
++              return ERR_PTR(-ENOMEM);
+       }
++
++      rv->user = user;
++      rv->done = free_recv_msg;
++      if (user)
++              kref_get(&user->refcount);
++      atomic_inc(&recv_msg_inuse_count);
+       return rv;
+ }
+ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
+ {
+-      if (msg->user && !oops_in_progress)
++      if (msg->user && !oops_in_progress) {
++              atomic_dec(&msg->user->nr_msgs);
+               kref_put(&msg->user->refcount, free_user);
++      }
+       msg->done(msg);
+ }
+ EXPORT_SYMBOL(ipmi_free_recv_msg);
++static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
++                                 struct ipmi_user *user)
++{
++      WARN_ON_ONCE(msg->user); /* User should not be set. */
++      msg->user = user;
++      atomic_inc(&user->nr_msgs);
++      kref_get(&user->refcount);
++}
++
+ static atomic_t panic_done_count = ATOMIC_INIT(0);
+ static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
diff --git a/queue-6.1/ksmbd-add-max-ip-connections-parameter.patch b/queue-6.1/ksmbd-add-max-ip-connections-parameter.patch
new file mode 100644 (file)
index 0000000..da73c22
--- /dev/null
@@ -0,0 +1,127 @@
+From stable+bounces-185692-greg=kroah.com@vger.kernel.org Tue Oct 14 17:29:22 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Oct 2025 11:25:39 -0400
+Subject: ksmbd: add max ip connections parameter
+To: stable@vger.kernel.org
+Cc: Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251014152539.122137-1-sashal@kernel.org>
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit d8b6dc9256762293048bf122fc11c4e612d0ef5d ]
+
+This parameter set the maximum number of connections per ip address.
+The default is 8.
+
+Cc: stable@vger.kernel.org
+Fixes: c0d41112f1a5 ("ksmbd: extend the connection limiting mechanism to support IPv6")
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ adjust reserved room ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/ksmbd_netlink.h |    5 +++--
+ fs/smb/server/server.h        |    1 +
+ fs/smb/server/transport_ipc.c |    3 +++
+ fs/smb/server/transport_tcp.c |   27 ++++++++++++++++-----------
+ 4 files changed, 23 insertions(+), 13 deletions(-)
+
+--- a/fs/smb/server/ksmbd_netlink.h
++++ b/fs/smb/server/ksmbd_netlink.h
+@@ -107,10 +107,11 @@ struct ksmbd_startup_request {
+       __u32   smb2_max_credits;       /* MAX credits */
+       __u32   smbd_max_io_size;       /* smbd read write size */
+       __u32   max_connections;        /* Number of maximum simultaneous connections */
+-      __u32   reserved[126];          /* Reserved room */
++      __u32   max_ip_connections;     /* Number of maximum connection per ip address */
++      __u32   reserved[125];          /* Reserved room */
+       __u32   ifc_list_sz;            /* interfaces list size */
+       __s8    ____payload[];
+-};
++} __packed;
+ #define KSMBD_STARTUP_CONFIG_INTERFACES(s)    ((s)->____payload)
+--- a/fs/smb/server/server.h
++++ b/fs/smb/server/server.h
+@@ -42,6 +42,7 @@ struct ksmbd_server_config {
+       struct smb_sid          domain_sid;
+       unsigned int            auth_mechs;
+       unsigned int            max_connections;
++      unsigned int            max_ip_connections;
+       char                    *conf[SERVER_CONF_WORK_GROUP + 1];
+ };
+--- a/fs/smb/server/transport_ipc.c
++++ b/fs/smb/server/transport_ipc.c
+@@ -318,6 +318,9 @@ static int ipc_server_config_on_startup(
+       if (req->max_connections)
+               server_conf.max_connections = req->max_connections;
++      if (req->max_ip_connections)
++              server_conf.max_ip_connections = req->max_ip_connections;
++
+       ret = ksmbd_set_netbios_name(req->netbios_name);
+       ret |= ksmbd_set_server_string(req->server_string);
+       ret |= ksmbd_set_work_group(req->work_group);
+--- a/fs/smb/server/transport_tcp.c
++++ b/fs/smb/server/transport_tcp.c
+@@ -236,6 +236,7 @@ static int ksmbd_kthread_fn(void *p)
+       struct interface *iface = (struct interface *)p;
+       struct ksmbd_conn *conn;
+       int ret;
++      unsigned int max_ip_conns;
+       while (!kthread_should_stop()) {
+               mutex_lock(&iface->sock_release_lock);
+@@ -253,34 +254,38 @@ static int ksmbd_kthread_fn(void *p)
+                       continue;
+               }
++              if (!server_conf.max_ip_connections)
++                      goto skip_max_ip_conns_limit;
++
+               /*
+                * Limits repeated connections from clients with the same IP.
+                */
++              max_ip_conns = 0;
+               down_read(&conn_list_lock);
+-              list_for_each_entry(conn, &conn_list, conns_list)
++              list_for_each_entry(conn, &conn_list, conns_list) {
+ #if IS_ENABLED(CONFIG_IPV6)
+                       if (client_sk->sk->sk_family == AF_INET6) {
+                               if (memcmp(&client_sk->sk->sk_v6_daddr,
+-                                         &conn->inet6_addr, 16) == 0) {
+-                                      ret = -EAGAIN;
+-                                      break;
+-                              }
++                                         &conn->inet6_addr, 16) == 0)
++                                      max_ip_conns++;
+                       } else if (inet_sk(client_sk->sk)->inet_daddr ==
+-                               conn->inet_addr) {
+-                              ret = -EAGAIN;
+-                              break;
+-                      }
++                               conn->inet_addr)
++                              max_ip_conns++;
+ #else
+                       if (inet_sk(client_sk->sk)->inet_daddr ==
+-                          conn->inet_addr) {
++                          conn->inet_addr)
++                              max_ip_conns++;
++#endif
++                      if (server_conf.max_ip_connections <= max_ip_conns) {
+                               ret = -EAGAIN;
+                               break;
+                       }
+-#endif
++              }
+               up_read(&conn_list_lock);
+               if (ret == -EAGAIN)
+                       continue;
++skip_max_ip_conns_limit:
+               if (server_conf.max_connections &&
+                   atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
+                       pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
diff --git a/queue-6.1/kvm-x86-don-t-re-check-l1-intercepts-when-completing-userspace-i-o.patch b/queue-6.1/kvm-x86-don-t-re-check-l1-intercepts-when-completing-userspace-i-o.patch
new file mode 100644 (file)
index 0000000..4d19bc2
--- /dev/null
@@ -0,0 +1,145 @@
+From stable+bounces-184741-greg=kroah.com@vger.kernel.org Mon Oct 13 17:48:35 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 11:11:40 -0400
+Subject: KVM: x86: Don't (re)check L1 intercepts when completing userspace I/O
+To: stable@vger.kernel.org
+Cc: Sean Christopherson <seanjc@google.com>, syzbot+cc2032ba16cc2018ca25@syzkaller.appspotmail.com, Jim Mattson <jmattson@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013151140.3383954-1-sashal@kernel.org>
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit e750f85391286a4c8100275516973324b621a269 ]
+
+When completing emulation of instruction that generated a userspace exit
+for I/O, don't recheck L1 intercepts as KVM has already finished that
+phase of instruction execution, i.e. has already committed to allowing L2
+to perform I/O.  If L1 (or host userspace) modifies the I/O permission
+bitmaps during the exit to userspace,  KVM will treat the access as being
+intercepted despite already having emulated the I/O access.
+
+Pivot on EMULTYPE_NO_DECODE to detect that KVM is completing emulation.
+Of the three users of EMULTYPE_NO_DECODE, only complete_emulated_io() (the
+intended "recipient") can reach the code in question.  gp_interception()'s
+use is mutually exclusive with is_guest_mode(), and
+complete_emulated_insn_gp() unconditionally pairs EMULTYPE_NO_DECODE with
+EMULTYPE_SKIP.
+
+The bad behavior was detected by a syzkaller program that toggles port I/O
+interception during the userspace I/O exit, ultimately resulting in a WARN
+on vcpu->arch.pio.count being non-zero due to KVM no completing emulation
+of the I/O instruction.
+
+  WARNING: CPU: 23 PID: 1083 at arch/x86/kvm/x86.c:8039 emulator_pio_in_out+0x154/0x170 [kvm]
+  Modules linked in: kvm_intel kvm irqbypass
+  CPU: 23 UID: 1000 PID: 1083 Comm: repro Not tainted 6.16.0-rc5-c1610d2d66b1-next-vm #74 NONE
+  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
+  RIP: 0010:emulator_pio_in_out+0x154/0x170 [kvm]
+  PKRU: 55555554
+  Call Trace:
+   <TASK>
+   kvm_fast_pio+0xd6/0x1d0 [kvm]
+   vmx_handle_exit+0x149/0x610 [kvm_intel]
+   kvm_arch_vcpu_ioctl_run+0xda8/0x1ac0 [kvm]
+   kvm_vcpu_ioctl+0x244/0x8c0 [kvm]
+   __x64_sys_ioctl+0x8a/0xd0
+   do_syscall_64+0x5d/0xc60
+   entry_SYSCALL_64_after_hwframe+0x4b/0x53
+   </TASK>
+
+Reported-by: syzbot+cc2032ba16cc2018ca25@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68790db4.a00a0220.3af5df.0020.GAE@google.com
+Fixes: 8a76d7f25f8f ("KVM: x86: Add x86 callback for intercept check")
+Cc: stable@vger.kernel.org
+Cc: Jim Mattson <jmattson@google.com>
+Link: https://lore.kernel.org/r/20250715190638.1899116-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+[ is_guest_mode() was open coded ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/emulate.c     |   11 ++++-------
+ arch/x86/kvm/kvm_emulate.h |    2 +-
+ arch/x86/kvm/x86.c         |    9 ++++++++-
+ 3 files changed, 13 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -5478,12 +5478,11 @@ void init_decode_cache(struct x86_emulat
+       ctxt->mem_read.end = 0;
+ }
+-int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
++int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts)
+ {
+       const struct x86_emulate_ops *ops = ctxt->ops;
+       int rc = X86EMUL_CONTINUE;
+       int saved_dst_type = ctxt->dst.type;
+-      unsigned emul_flags;
+       ctxt->mem_read.pos = 0;
+@@ -5497,8 +5496,6 @@ int x86_emulate_insn(struct x86_emulate_
+               rc = emulate_ud(ctxt);
+               goto done;
+       }
+-
+-      emul_flags = ctxt->ops->get_hflags(ctxt);
+       if (unlikely(ctxt->d &
+                    (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
+               if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
+@@ -5532,7 +5529,7 @@ int x86_emulate_insn(struct x86_emulate_
+                               fetch_possible_mmx_operand(&ctxt->dst);
+               }
+-              if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
++              if (unlikely(check_intercepts) && ctxt->intercept) {
+                       rc = emulator_check_intercept(ctxt, ctxt->intercept,
+                                                     X86_ICPT_PRE_EXCEPT);
+                       if (rc != X86EMUL_CONTINUE)
+@@ -5561,7 +5558,7 @@ int x86_emulate_insn(struct x86_emulate_
+                               goto done;
+               }
+-              if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
++              if (unlikely(check_intercepts) && (ctxt->d & Intercept)) {
+                       rc = emulator_check_intercept(ctxt, ctxt->intercept,
+                                                     X86_ICPT_POST_EXCEPT);
+                       if (rc != X86EMUL_CONTINUE)
+@@ -5615,7 +5612,7 @@ int x86_emulate_insn(struct x86_emulate_
+ special_insn:
+-      if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
++      if (unlikely(check_intercepts) && (ctxt->d & Intercept)) {
+               rc = emulator_check_intercept(ctxt, ctxt->intercept,
+                                             X86_ICPT_POST_MEMACCESS);
+               if (rc != X86EMUL_CONTINUE)
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -517,7 +517,7 @@ bool x86_page_table_writing_insn(struct
+ #define EMULATION_RESTART 1
+ #define EMULATION_INTERCEPTED 2
+ void init_decode_cache(struct x86_emulate_ctxt *ctxt);
+-int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
++int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts);
+ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
+                        u16 tss_selector, int idt_index, int reason,
+                        bool has_error_code, u32 error_code);
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8976,7 +8976,14 @@ restart:
+               ctxt->exception.address = 0;
+       }
+-      r = x86_emulate_insn(ctxt);
++      /*
++       * Check L1's instruction intercepts when emulating instructions for
++       * L2, unless KVM is re-emulating a previously decoded instruction,
++       * e.g. to complete userspace I/O, in which case KVM has already
++       * checked the intercepts.
++       */
++      r = x86_emulate_insn(ctxt, is_guest_mode(vcpu) &&
++                                 !(emulation_type & EMULTYPE_NO_DECODE));
+       if (r == EMULATION_INTERCEPTED)
+               return 1;
diff --git a/queue-6.1/media-mc-clear-minor-number-before-put-device.patch b/queue-6.1/media-mc-clear-minor-number-before-put-device.patch
new file mode 100644 (file)
index 0000000..18e84a3
--- /dev/null
@@ -0,0 +1,51 @@
+From stable+bounces-185524-greg=kroah.com@vger.kernel.org Tue Oct 14 00:12:19 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 18:11:45 -0400
+Subject: media: mc: Clear minor number before put device
+To: stable@vger.kernel.org
+Cc: Edward Adam Davis <eadavis@qq.com>, syzbot+031d0cfd7c362817963f@syzkaller.appspotmail.com, Sakari Ailus <sakari.ailus@linux.intel.com>, Hans Verkuil <hverkuil+cisco@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013221145.3655607-1-sashal@kernel.org>
+
+From: Edward Adam Davis <eadavis@qq.com>
+
+[ Upstream commit 8cfc8cec1b4da88a47c243a11f384baefd092a50 ]
+
+The device minor should not be cleared after the device is released.
+
+Fixes: 9e14868dc952 ("media: mc: Clear minor number reservation at unregistration time")
+Cc: stable@vger.kernel.org
+Reported-by: syzbot+031d0cfd7c362817963f@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=031d0cfd7c362817963f
+Tested-by: syzbot+031d0cfd7c362817963f@syzkaller.appspotmail.com
+Signed-off-by: Edward Adam Davis <eadavis@qq.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+[ moved clear_bit from media_devnode_release callback to media_devnode_unregister before put_device ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/mc/mc-devnode.c |    6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/drivers/media/mc/mc-devnode.c
++++ b/drivers/media/mc/mc-devnode.c
+@@ -50,11 +50,6 @@ static void media_devnode_release(struct
+ {
+       struct media_devnode *devnode = to_media_devnode(cd);
+-      mutex_lock(&media_devnode_lock);
+-      /* Mark device node number as free */
+-      clear_bit(devnode->minor, media_devnode_nums);
+-      mutex_unlock(&media_devnode_lock);
+-
+       /* Release media_devnode and perform other cleanups as needed. */
+       if (devnode->release)
+               devnode->release(devnode);
+@@ -283,6 +278,7 @@ void media_devnode_unregister(struct med
+       /* Delete the cdev on this minor as well */
+       cdev_device_del(&devnode->cdev, &devnode->dev);
+       devnode->media_dev = NULL;
++      clear_bit(devnode->minor, media_devnode_nums);
+       mutex_unlock(&media_devnode_lock);
+       put_device(&devnode->dev);
diff --git a/queue-6.1/mfd-intel_soc_pmic_chtdc_ti-drop-unneeded-assignment-for-cache_type.patch b/queue-6.1/mfd-intel_soc_pmic_chtdc_ti-drop-unneeded-assignment-for-cache_type.patch
new file mode 100644 (file)
index 0000000..580e5c3
--- /dev/null
@@ -0,0 +1,39 @@
+From stable+bounces-185543-greg=kroah.com@vger.kernel.org Tue Oct 14 01:07:54 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 19:07:43 -0400
+Subject: mfd: intel_soc_pmic_chtdc_ti: Drop unneeded assignment for cache_type
+To: stable@vger.kernel.org
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Hans de Goede <hdegoede@redhat.com>, Lee Jones <lee@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013230744.3697280-2-sashal@kernel.org>
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 9eb99c08508714906db078b5efbe075329a3fb06 ]
+
+REGCACHE_NONE is the default type of the cache when not provided.
+Drop unneeded explicit assignment to it.
+
+Note, it's defined to 0, and if ever be redefined, it will break
+literally a lot of the drivers, so it very unlikely to happen.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20250129152823.1802273-1-andriy.shevchenko@linux.intel.com
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 64e0d839c589 ("mfd: intel_soc_pmic_chtdc_ti: Set use_single_read regmap_config flag")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mfd/intel_soc_pmic_chtdc_ti.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
++++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+@@ -82,7 +82,6 @@ static const struct regmap_config chtdc_
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 0xff,
+-      .cache_type = REGCACHE_NONE,
+ };
+ static const struct regmap_irq chtdc_ti_irqs[] = {
diff --git a/queue-6.1/mfd-intel_soc_pmic_chtdc_ti-fix-invalid-regmap-config-max_register-value.patch b/queue-6.1/mfd-intel_soc_pmic_chtdc_ti-fix-invalid-regmap-config-max_register-value.patch
new file mode 100644 (file)
index 0000000..ab2648e
--- /dev/null
@@ -0,0 +1,40 @@
+From stable+bounces-185542-greg=kroah.com@vger.kernel.org Tue Oct 14 01:07:52 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 19:07:42 -0400
+Subject: mfd: intel_soc_pmic_chtdc_ti: Fix invalid regmap-config max_register value
+To: stable@vger.kernel.org
+Cc: Hans de Goede <hdegoede@redhat.com>, Andy Shevchenko <andy@kernel.org>, Lee Jones <lee@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013230744.3697280-1-sashal@kernel.org>
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+[ Upstream commit 70e997e0107e5ed85c1a3ef2adfccbe351c29d71 ]
+
+The max_register = 128 setting in the regmap config is not valid.
+
+The Intel Dollar Cove TI PMIC has an eeprom unlock register at address 0x88
+and a number of EEPROM registers at 0xF?. Increase max_register to 0xff so
+that these registers can be accessed.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Link: https://lore.kernel.org/r/20241208150028.325349-1-hdegoede@redhat.com
+Signed-off-by: Lee Jones <lee@kernel.org>
+Stable-dep-of: 64e0d839c589 ("mfd: intel_soc_pmic_chtdc_ti: Set use_single_read regmap_config flag")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mfd/intel_soc_pmic_chtdc_ti.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
++++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+@@ -81,7 +81,7 @@ static struct mfd_cell chtdc_ti_dev[] =
+ static const struct regmap_config chtdc_ti_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+-      .max_register = 128,
++      .max_register = 0xff,
+       .cache_type = REGCACHE_NONE,
+ };
diff --git a/queue-6.1/mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch b/queue-6.1/mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch
new file mode 100644 (file)
index 0000000..4482db6
--- /dev/null
@@ -0,0 +1,43 @@
+From stable+bounces-185544-greg=kroah.com@vger.kernel.org Tue Oct 14 01:07:55 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 19:07:44 -0400
+Subject: mfd: intel_soc_pmic_chtdc_ti: Set use_single_read regmap_config flag
+To: stable@vger.kernel.org
+Cc: Hans de Goede <hansg@kernel.org>, Andy Shevchenko <andy@kernel.org>, Lee Jones <lee@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013230744.3697280-3-sashal@kernel.org>
+
+From: Hans de Goede <hansg@kernel.org>
+
+[ Upstream commit 64e0d839c589f4f2ecd2e3e5bdb5cee6ba6bade9 ]
+
+Testing has shown that reading multiple registers at once (for 10-bit
+ADC values) does not work. Set the use_single_read regmap_config flag
+to make regmap split these for us.
+
+This should fix temperature opregion accesses done by
+drivers/acpi/pmic/intel_pmic_chtdc_ti.c and is also necessary for
+the upcoming drivers for the ADC and battery MFD cells.
+
+Fixes: 6bac0606fdba ("mfd: Add support for Cherry Trail Dollar Cove TI PMIC")
+Cc: stable@vger.kernel.org
+Reviewed-by: Andy Shevchenko <andy@kernel.org>
+Signed-off-by: Hans de Goede <hansg@kernel.org>
+Link: https://lore.kernel.org/r/20250804133240.312383-1-hansg@kernel.org
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mfd/intel_soc_pmic_chtdc_ti.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
++++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+@@ -82,6 +82,8 @@ static const struct regmap_config chtdc_
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 0xff,
++      /* The hardware does not support reading multiple registers at once */
++      .use_single_read = true,
+ };
+ static const struct regmap_irq chtdc_ti_irqs[] = {
diff --git a/queue-6.1/pci-endpoint-pci-epf-test-add-null-check-for-dma-channels-before-release.patch b/queue-6.1/pci-endpoint-pci-epf-test-add-null-check-for-dma-channels-before-release.patch
new file mode 100644 (file)
index 0000000..5599539
--- /dev/null
@@ -0,0 +1,75 @@
+From stable+bounces-185701-greg=kroah.com@vger.kernel.org Tue Oct 14 18:14:16 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Oct 2025 12:14:06 -0400
+Subject: PCI: endpoint: pci-epf-test: Add NULL check for DMA channels before release
+To: stable@vger.kernel.org
+Cc: "Shin'ichiro Kawasaki" <shinichiro.kawasaki@wdc.com>, "Manivannan Sadhasivam" <mani@kernel.org>, "Damien Le Moal" <dlemoal@kernel.org>, "Krzysztof Wilczyński" <kwilczynski@kernel.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251014161406.164458-2-sashal@kernel.org>
+
+From: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+
+[ Upstream commit 85afa9ea122dd9d4a2ead104a951d318975dcd25 ]
+
+The fields dma_chan_tx and dma_chan_rx of the struct pci_epf_test can be
+NULL even after EPF initialization. Then it is prudent to check that
+they have non-NULL values before releasing the channels. Add the checks
+in pci_epf_test_clean_dma_chan().
+
+Without the checks, NULL pointer dereferences happen and they can lead
+to a kernel panic in some cases:
+
+  Unable to handle kernel NULL pointer dereference at virtual address 0000000000000050
+  Call trace:
+   dma_release_channel+0x2c/0x120 (P)
+   pci_epf_test_epc_deinit+0x94/0xc0 [pci_epf_test]
+   pci_epc_deinit_notify+0x74/0xc0
+   tegra_pcie_ep_pex_rst_irq+0x250/0x5d8
+   irq_thread_fn+0x34/0xb8
+   irq_thread+0x18c/0x2e8
+   kthread+0x14c/0x210
+   ret_from_fork+0x10/0x20
+
+Fixes: 8353813c88ef ("PCI: endpoint: Enable DMA tests for endpoints with DMA capabilities")
+Fixes: 5ebf3fc59bd2 ("PCI: endpoint: functions/pci-epf-test: Add DMA support to transfer data")
+Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+[mani: trimmed the stack trace]
+Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20250916025756.34807-1-shinichiro.kawasaki@wdc.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/endpoint/functions/pci-epf-test.c |   17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -282,15 +282,20 @@ static void pci_epf_test_clean_dma_chan(
+       if (!epf_test->dma_supported)
+               return;
+-      dma_release_channel(epf_test->dma_chan_tx);
+-      if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
++      if (epf_test->dma_chan_tx) {
++              dma_release_channel(epf_test->dma_chan_tx);
++              if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
++                      epf_test->dma_chan_tx = NULL;
++                      epf_test->dma_chan_rx = NULL;
++                      return;
++              }
+               epf_test->dma_chan_tx = NULL;
+-              epf_test->dma_chan_rx = NULL;
+-              return;
+       }
+-      dma_release_channel(epf_test->dma_chan_rx);
+-      epf_test->dma_chan_rx = NULL;
++      if (epf_test->dma_chan_rx) {
++              dma_release_channel(epf_test->dma_chan_rx);
++              epf_test->dma_chan_rx = NULL;
++      }
+ }
+ static void pci_epf_test_print_rate(const char *ops, u64 size,
diff --git a/queue-6.1/pci-endpoint-remove-surplus-return-statement-from-pci_epf_test_clean_dma_chan.patch b/queue-6.1/pci-endpoint-remove-surplus-return-statement-from-pci_epf_test_clean_dma_chan.patch
new file mode 100644 (file)
index 0000000..1764142
--- /dev/null
@@ -0,0 +1,48 @@
+From stable+bounces-185700-greg=kroah.com@vger.kernel.org Tue Oct 14 18:14:14 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Oct 2025 12:14:05 -0400
+Subject: PCI: endpoint: Remove surplus return statement from pci_epf_test_clean_dma_chan()
+To: stable@vger.kernel.org
+Cc: "Wang Jiang" <jiangwang@kylinos.cn>, "Krzysztof Wilczyński" <kwilczynski@kernel.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251014161406.164458-1-sashal@kernel.org>
+
+From: Wang Jiang <jiangwang@kylinos.cn>
+
+[ Upstream commit 9b80bdb10aee04ce7289896e6bdad13e33972636 ]
+
+Remove a surplus return statement from the void function that has been
+added in the commit commit 8353813c88ef ("PCI: endpoint: Enable DMA
+tests for endpoints with DMA capabilities").
+
+Especially, as an empty return statements at the end of a void functions
+serve little purpose.
+
+This fixes the following checkpatch.pl script warning:
+
+  WARNING: void function return statements are not generally useful
+  #296: FILE: drivers/pci/endpoint/functions/pci-epf-test.c:296:
+  +     return;
+  +}
+
+Link: https://lore.kernel.org/r/tencent_F250BEE2A65745A524E2EFE70CF615CA8F06@qq.com
+Signed-off-by: Wang Jiang <jiangwang@kylinos.cn>
+[kwilczynski: commit log]
+Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
+Stable-dep-of: 85afa9ea122d ("PCI: endpoint: pci-epf-test: Add NULL check for DMA channels before release")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/endpoint/functions/pci-epf-test.c |    2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -291,8 +291,6 @@ static void pci_epf_test_clean_dma_chan(
+       dma_release_channel(epf_test->dma_chan_rx);
+       epf_test->dma_chan_rx = NULL;
+-
+-      return;
+ }
+ static void pci_epf_test_print_rate(const char *ops, u64 size,
diff --git a/queue-6.1/rseq-protect-event-mask-against-membarrier-ipi.patch b/queue-6.1/rseq-protect-event-mask-against-membarrier-ipi.patch
new file mode 100644 (file)
index 0000000..5c84d9f
--- /dev/null
@@ -0,0 +1,80 @@
+From stable+bounces-186009-greg=kroah.com@vger.kernel.org Thu Oct 16 13:59:28 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Oct 2025 07:59:18 -0400
+Subject: rseq: Protect event mask against membarrier IPI
+To: stable@vger.kernel.org
+Cc: Thomas Gleixner <tglx@linutronix.de>, Boqun Feng <boqun.feng@gmail.com>, Mathieu Desnoyers <mathieu.desnoyers@efficios.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251016115918.3270535-1-sashal@kernel.org>
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 6eb350a2233100a283f882c023e5ad426d0ed63b ]
+
+rseq_need_restart() reads and clears task::rseq_event_mask with preemption
+disabled to guard against the scheduler.
+
+But membarrier() uses an IPI and sets the PREEMPT bit in the event mask
+from the IPI, which leaves that RMW operation unprotected.
+
+Use guard(irq) if CONFIG_MEMBARRIER is enabled to fix that.
+
+Fixes: 2a36ab717e8f ("rseq/membarrier: Add MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Boqun Feng <boqun.feng@gmail.com>
+Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: stable@vger.kernel.org
+[ Applied changes to include/linux/sched.h instead of include/linux/rseq.h ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/sched.h |   11 ++++++++---
+ kernel/rseq.c         |   10 +++++-----
+ 2 files changed, 13 insertions(+), 8 deletions(-)
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2313,6 +2313,12 @@ enum rseq_event_mask {
+       RSEQ_EVENT_MIGRATE      = (1U << RSEQ_EVENT_MIGRATE_BIT),
+ };
++#ifdef CONFIG_MEMBARRIER
++# define RSEQ_EVENT_GUARD     irq
++#else
++# define RSEQ_EVENT_GUARD     preempt
++#endif
++
+ static inline void rseq_set_notify_resume(struct task_struct *t)
+ {
+       if (t->rseq)
+@@ -2331,9 +2337,8 @@ static inline void rseq_handle_notify_re
+ static inline void rseq_signal_deliver(struct ksignal *ksig,
+                                      struct pt_regs *regs)
+ {
+-      preempt_disable();
+-      __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+-      preempt_enable();
++      scoped_guard(RSEQ_EVENT_GUARD)
++              __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+       rseq_handle_notify_resume(ksig, regs);
+ }
+--- a/kernel/rseq.c
++++ b/kernel/rseq.c
+@@ -226,12 +226,12 @@ static int rseq_need_restart(struct task
+       /*
+        * Load and clear event mask atomically with respect to
+-       * scheduler preemption.
++       * scheduler preemption and membarrier IPIs.
+        */
+-      preempt_disable();
+-      event_mask = t->rseq_event_mask;
+-      t->rseq_event_mask = 0;
+-      preempt_enable();
++      scoped_guard(RSEQ_EVENT_GUARD) {
++              event_mask = t->rseq_event_mask;
++              t->rseq_event_mask = 0;
++      }
+       return !!event_mask;
+ }
diff --git a/queue-6.1/selftests-mm-skip-soft-dirty-tests-when-config_mem_soft_dirty-is-disabled.patch b/queue-6.1/selftests-mm-skip-soft-dirty-tests-when-config_mem_soft_dirty-is-disabled.patch
new file mode 100644 (file)
index 0000000..50de6b5
--- /dev/null
@@ -0,0 +1,183 @@
+From 0389c305ef56cbadca4cbef44affc0ec3213ed30 Mon Sep 17 00:00:00 2001
+From: Lance Yang <lance.yang@linux.dev>
+Date: Wed, 17 Sep 2025 21:31:37 +0800
+Subject: selftests/mm: skip soft-dirty tests when CONFIG_MEM_SOFT_DIRTY is disabled
+
+From: Lance Yang <lance.yang@linux.dev>
+
+commit 0389c305ef56cbadca4cbef44affc0ec3213ed30 upstream.
+
+The madv_populate and soft-dirty kselftests currently fail on systems
+where CONFIG_MEM_SOFT_DIRTY is disabled.
+
+Introduce a new helper softdirty_supported() into vm_util.c/h to ensure
+tests are properly skipped when the feature is not enabled.
+
+Link: https://lkml.kernel.org/r/20250917133137.62802-1-lance.yang@linux.dev
+Fixes: 9f3265db6ae8 ("selftests: vm: add test for Soft-Dirty PTE bit")
+Signed-off-by: Lance Yang <lance.yang@linux.dev>
+Acked-by: David Hildenbrand <david@redhat.com>
+Suggested-by: David Hildenbrand <david@redhat.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Gabriel Krisman Bertazi <krisman@collabora.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/vm/madv_populate.c |    9 ++-
+ tools/testing/selftests/vm/soft-dirty.c    |    5 +
+ tools/testing/selftests/vm/vm_util.c       |   77 +++++++++++++++++++++++++++++
+ tools/testing/selftests/vm/vm_util.h       |    2 
+ 4 files changed, 90 insertions(+), 3 deletions(-)
+
+--- a/tools/testing/selftests/vm/madv_populate.c
++++ b/tools/testing/selftests/vm/madv_populate.c
+@@ -274,12 +274,16 @@ static void test_softdirty(void)
+ int main(int argc, char **argv)
+ {
++      int nr_tests = 16;
+       int err;
+       pagesize = getpagesize();
++      if (softdirty_supported())
++              nr_tests += 5;
++
+       ksft_print_header();
+-      ksft_set_plan(21);
++      ksft_set_plan(nr_tests);
+       sense_support();
+       test_prot_read();
+@@ -287,7 +291,8 @@ int main(int argc, char **argv)
+       test_holes();
+       test_populate_read();
+       test_populate_write();
+-      test_softdirty();
++      if (softdirty_supported())
++              test_softdirty();
+       err = ksft_get_fail_cnt();
+       if (err)
+--- a/tools/testing/selftests/vm/soft-dirty.c
++++ b/tools/testing/selftests/vm/soft-dirty.c
+@@ -190,8 +190,11 @@ int main(int argc, char **argv)
+       int pagesize;
+       ksft_print_header();
+-      ksft_set_plan(15);
++      if (!softdirty_supported())
++              ksft_exit_skip("soft-dirty is not support\n");
++
++      ksft_set_plan(15);
+       pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY);
+       if (pagemap_fd < 0)
+               ksft_exit_fail_msg("Failed to open %s\n", PAGEMAP_FILE_PATH);
+--- a/tools/testing/selftests/vm/vm_util.c
++++ b/tools/testing/selftests/vm/vm_util.c
+@@ -72,6 +72,42 @@ uint64_t read_pmd_pagesize(void)
+       return strtoul(buf, NULL, 10);
+ }
++char *__get_smap_entry(void *addr, const char *pattern, char *buf, size_t len)
++{
++      int ret;
++      FILE *fp;
++      char *entry = NULL;
++      char addr_pattern[MAX_LINE_LENGTH];
++
++      ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-",
++                     (unsigned long)addr);
++      if (ret >= MAX_LINE_LENGTH)
++              ksft_exit_fail_msg("%s: Pattern is too long\n", __func__);
++
++      fp = fopen(SMAP_FILE_PATH, "r");
++      if (!fp)
++              ksft_exit_fail_msg("%s: Failed to open file %s\n", __func__,
++                                 SMAP_FILE_PATH);
++
++      if (!check_for_pattern(fp, addr_pattern, buf, len))
++              goto err_out;
++
++      /* Fetch the pattern in the same block */
++      if (!check_for_pattern(fp, pattern, buf, len))
++              goto err_out;
++
++      /* Trim trailing newline */
++      entry = strchr(buf, '\n');
++      if (entry)
++              *entry = '\0';
++
++      entry = buf + strlen(pattern);
++
++err_out:
++      fclose(fp);
++      return entry;
++}
++
+ bool __check_huge(void *addr, char *pattern, int nr_hpages,
+                 uint64_t hpage_size)
+ {
+@@ -124,3 +160,44 @@ bool check_huge_shmem(void *addr, int nr
+ {
+       return __check_huge(addr, "ShmemPmdMapped:", nr_hpages, hpage_size);
+ }
++
++static bool check_vmflag(void *addr, const char *flag)
++{
++      char buffer[MAX_LINE_LENGTH];
++      const char *flags;
++      size_t flaglen;
++
++      flags = __get_smap_entry(addr, "VmFlags:", buffer, sizeof(buffer));
++      if (!flags)
++              ksft_exit_fail_msg("%s: No VmFlags for %p\n", __func__, addr);
++
++      while (true) {
++              flags += strspn(flags, " ");
++
++              flaglen = strcspn(flags, " ");
++              if (!flaglen)
++                      return false;
++
++              if (flaglen == strlen(flag) && !memcmp(flags, flag, flaglen))
++                      return true;
++
++              flags += flaglen;
++      }
++}
++
++bool softdirty_supported(void)
++{
++      char *addr;
++      bool supported = false;
++      const size_t pagesize = getpagesize();
++
++      /* New mappings are expected to be marked with VM_SOFTDIRTY (sd). */
++      addr = mmap(0, pagesize, PROT_READ | PROT_WRITE,
++                  MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
++      if (!addr)
++              ksft_exit_fail_msg("mmap failed\n");
++
++      supported = check_vmflag(addr, "sd");
++      munmap(addr, pagesize);
++      return supported;
++}
+--- a/tools/testing/selftests/vm/vm_util.h
++++ b/tools/testing/selftests/vm/vm_util.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ #include <stdint.h>
+ #include <stdbool.h>
++#include <sys/mman.h>
+ uint64_t pagemap_get_entry(int fd, char *start);
+ bool pagemap_is_softdirty(int fd, char *start);
+@@ -10,3 +11,4 @@ uint64_t read_pmd_pagesize(void);
+ bool check_huge_anon(void *addr, int nr_hpages, uint64_t hpage_size);
+ bool check_huge_file(void *addr, int nr_hpages, uint64_t hpage_size);
+ bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size);
++bool softdirty_supported(void);
index f9367734a49a783963a3ca1285a68d96d7b9a114..9b6ebc08ee93f50a2c09fd8f39b1356f4a7cdf91 100644 (file)
@@ -133,3 +133,27 @@ ext4-guard-against-ea-inode-refcount-underflow-in-xattr-update.patch
 acpica-allow-to-skip-global-lock-initialization.patch
 ext4-free-orphan-info-with-kvfree.patch
 lib-crypto-curve25519-hacl64-disable-kasan-with-clang-17-and-older.patch
+asoc-codecs-wcd934x-simplify-with-dev_err_probe.patch
+asoc-wcd934x-fix-error-handling-in-wcd934x_codec_parse_data.patch
+kvm-x86-don-t-re-check-l1-intercepts-when-completing-userspace-i-o.patch
+media-mc-clear-minor-number-before-put-device.patch
+squashfs-add-additional-inode-sanity-checking.patch
+squashfs-reject-negative-file-sizes-in-squashfs_read_inode.patch
+tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch
+ksmbd-add-max-ip-connections-parameter.patch
+pci-endpoint-remove-surplus-return-statement-from-pci_epf_test_clean_dma_chan.patch
+pci-endpoint-pci-epf-test-add-null-check-for-dma-channels-before-release.patch
+mfd-intel_soc_pmic_chtdc_ti-fix-invalid-regmap-config-max_register-value.patch
+mfd-intel_soc_pmic_chtdc_ti-drop-unneeded-assignment-for-cache_type.patch
+mfd-intel_soc_pmic_chtdc_ti-set-use_single_read-regmap_config-flag.patch
+btrfs-fix-the-incorrect-max_bytes-value-for-find_lock_delalloc_range.patch
+rseq-protect-event-mask-against-membarrier-ipi.patch
+ipmi-rework-user-message-limit-handling.patch
+ipmi-fix-handling-of-messages-with-provided-receive-message-pointer.patch
+acpi-property-disregard-references-in-data-only-subnode-lists.patch
+acpi-property-add-code-comments-explaining-what-is-going-on.patch
+acpi-property-do-not-pass-null-handles-to-acpi_attach_data.patch
+selftests-mm-skip-soft-dirty-tests-when-config_mem_soft_dirty-is-disabled.patch
+asm-generic-io-add-_ret_ip_-to-mmio-trace-for-more-accurate-debug-info.patch
+asm-generic-io.h-suppress-endianness-warnings-for-relaxed-accessors.patch
+asm-generic-io.h-skip-trace-helpers-if-rwmmio-events-are-disabled.patch
diff --git a/queue-6.1/squashfs-add-additional-inode-sanity-checking.patch b/queue-6.1/squashfs-add-additional-inode-sanity-checking.patch
new file mode 100644 (file)
index 0000000..bf1556b
--- /dev/null
@@ -0,0 +1,90 @@
+From stable+bounces-185515-greg=kroah.com@vger.kernel.org Mon Oct 13 22:47:45 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 16:41:26 -0400
+Subject: Squashfs: add additional inode sanity checking
+To: stable@vger.kernel.org
+Cc: Phillip Lougher <phillip@squashfs.org.uk>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013204127.3599792-1-sashal@kernel.org>
+
+From: Phillip Lougher <phillip@squashfs.org.uk>
+
+[ Upstream commit 9ee94bfbe930a1b39df53fa2d7b31141b780eb5a ]
+
+Patch series "Squashfs: performance improvement and a sanity check".
+
+This patchset adds an additional sanity check when reading regular file
+inodes, and adds support for SEEK_DATA/SEEK_HOLE lseek() whence values.
+
+This patch (of 2):
+
+Add an additional sanity check when reading regular file inodes.
+
+A regular file if the file size is an exact multiple of the filesystem
+block size cannot have a fragment.  This is because by definition a
+fragment block stores tailends which are not a whole block in size.
+
+Link: https://lkml.kernel.org/r/20250923220652.568416-1-phillip@squashfs.org.uk
+Link: https://lkml.kernel.org/r/20250923220652.568416-2-phillip@squashfs.org.uk
+Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 9f1c14c1de1b ("Squashfs: reject negative file sizes in squashfs_read_inode()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/squashfs/inode.c |   20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -140,8 +140,17 @@ int squashfs_read_inode(struct inode *in
+               if (err < 0)
+                       goto failed_read;
++              inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+               frag = le32_to_cpu(sqsh_ino->fragment);
+               if (frag != SQUASHFS_INVALID_FRAG) {
++                      /*
++                       * the file cannot have a fragment (tailend) and have a
++                       * file size a multiple of the block size
++                       */
++                      if ((inode->i_size & (msblk->block_size - 1)) == 0) {
++                              err = -EINVAL;
++                              goto failed_read;
++                      }
+                       frag_offset = le32_to_cpu(sqsh_ino->offset);
+                       frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+                       if (frag_size < 0) {
+@@ -155,7 +164,6 @@ int squashfs_read_inode(struct inode *in
+               }
+               set_nlink(inode, 1);
+-              inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+               inode->i_fop = &generic_ro_fops;
+               inode->i_mode |= S_IFREG;
+               inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
+@@ -184,8 +192,17 @@ int squashfs_read_inode(struct inode *in
+               if (err < 0)
+                       goto failed_read;
++              inode->i_size = le64_to_cpu(sqsh_ino->file_size);
+               frag = le32_to_cpu(sqsh_ino->fragment);
+               if (frag != SQUASHFS_INVALID_FRAG) {
++                      /*
++                       * the file cannot have a fragment (tailend) and have a
++                       * file size a multiple of the block size
++                       */
++                      if ((inode->i_size & (msblk->block_size - 1)) == 0) {
++                              err = -EINVAL;
++                              goto failed_read;
++                      }
+                       frag_offset = le32_to_cpu(sqsh_ino->offset);
+                       frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+                       if (frag_size < 0) {
+@@ -200,7 +217,6 @@ int squashfs_read_inode(struct inode *in
+               xattr_id = le32_to_cpu(sqsh_ino->xattr);
+               set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+-              inode->i_size = le64_to_cpu(sqsh_ino->file_size);
+               inode->i_op = &squashfs_inode_ops;
+               inode->i_fop = &generic_ro_fops;
+               inode->i_mode |= S_IFREG;
diff --git a/queue-6.1/squashfs-reject-negative-file-sizes-in-squashfs_read_inode.patch b/queue-6.1/squashfs-reject-negative-file-sizes-in-squashfs_read_inode.patch
new file mode 100644 (file)
index 0000000..31e2480
--- /dev/null
@@ -0,0 +1,48 @@
+From stable+bounces-185516-greg=kroah.com@vger.kernel.org Mon Oct 13 22:47:45 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 16:41:27 -0400
+Subject: Squashfs: reject negative file sizes in squashfs_read_inode()
+To: stable@vger.kernel.org
+Cc: Phillip Lougher <phillip@squashfs.org.uk>, syzbot+f754e01116421e9754b9@syzkaller.appspotmail.com, Amir Goldstein <amir73il@gmail.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251013204127.3599792-2-sashal@kernel.org>
+
+From: Phillip Lougher <phillip@squashfs.org.uk>
+
+[ Upstream commit 9f1c14c1de1bdde395f6cc893efa4f80a2ae3b2b ]
+
+Syskaller reports a "WARNING in ovl_copy_up_file" in overlayfs.
+
+This warning is ultimately caused because the underlying Squashfs file
+system returns a file with a negative file size.
+
+This commit checks for a negative file size and returns EINVAL.
+
+[phillip@squashfs.org.uk: only need to check 64 bit quantity]
+  Link: https://lkml.kernel.org/r/20250926222305.110103-1-phillip@squashfs.org.uk
+Link: https://lkml.kernel.org/r/20250926215935.107233-1-phillip@squashfs.org.uk
+Fixes: 6545b246a2c8 ("Squashfs: inode operations")
+Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk>
+Reported-by: syzbot+f754e01116421e9754b9@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68d580e5.a00a0220.303701.0019.GAE@google.com/
+Cc: Amir Goldstein <amir73il@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/squashfs/inode.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -193,6 +193,10 @@ int squashfs_read_inode(struct inode *in
+                       goto failed_read;
+               inode->i_size = le64_to_cpu(sqsh_ino->file_size);
++              if (inode->i_size < 0) {
++                      err = -EINVAL;
++                      goto failed_read;
++              }
+               frag = le32_to_cpu(sqsh_ino->fragment);
+               if (frag != SQUASHFS_INVALID_FRAG) {
+                       /*
diff --git a/queue-6.1/tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch b/queue-6.1/tracing-fix-race-condition-in-kprobe-initialization-causing-null-pointer-dereference.patch
new file mode 100644 (file)
index 0000000..ef7ea6f
--- /dev/null
@@ -0,0 +1,240 @@
+From stable+bounces-185569-greg=kroah.com@vger.kernel.org Tue Oct 14 05:05:39 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Oct 2025 23:05:28 -0400
+Subject: tracing: Fix race condition in kprobe initialization causing NULL pointer dereference
+To: stable@vger.kernel.org
+Cc: Yuan Chen <chenyuan@kylinos.cn>, "Masami Hiramatsu (Google)" <mhiramat@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251014030528.3869596-1-sashal@kernel.org>
+
+From: Yuan Chen <chenyuan@kylinos.cn>
+
+[ Upstream commit 9cf9aa7b0acfde7545c1a1d912576e9bab28dc6f ]
+
+There is a critical race condition in kprobe initialization that can lead to
+NULL pointer dereference and kernel crash.
+
+[1135630.084782] Unable to handle kernel paging request at virtual address 0000710a04630000
+...
+[1135630.260314] pstate: 404003c9 (nZcv DAIF +PAN -UAO)
+[1135630.269239] pc : kprobe_perf_func+0x30/0x260
+[1135630.277643] lr : kprobe_dispatcher+0x44/0x60
+[1135630.286041] sp : ffffaeff4977fa40
+[1135630.293441] x29: ffffaeff4977fa40 x28: ffffaf015340e400
+[1135630.302837] x27: 0000000000000000 x26: 0000000000000000
+[1135630.312257] x25: ffffaf029ed108a8 x24: ffffaf015340e528
+[1135630.321705] x23: ffffaeff4977fc50 x22: ffffaeff4977fc50
+[1135630.331154] x21: 0000000000000000 x20: ffffaeff4977fc50
+[1135630.340586] x19: ffffaf015340e400 x18: 0000000000000000
+[1135630.349985] x17: 0000000000000000 x16: 0000000000000000
+[1135630.359285] x15: 0000000000000000 x14: 0000000000000000
+[1135630.368445] x13: 0000000000000000 x12: 0000000000000000
+[1135630.377473] x11: 0000000000000000 x10: 0000000000000000
+[1135630.386411] x9 : 0000000000000000 x8 : 0000000000000000
+[1135630.395252] x7 : 0000000000000000 x6 : 0000000000000000
+[1135630.403963] x5 : 0000000000000000 x4 : 0000000000000000
+[1135630.412545] x3 : 0000710a04630000 x2 : 0000000000000006
+[1135630.421021] x1 : ffffaeff4977fc50 x0 : 0000710a04630000
+[1135630.429410] Call trace:
+[1135630.434828]  kprobe_perf_func+0x30/0x260
+[1135630.441661]  kprobe_dispatcher+0x44/0x60
+[1135630.448396]  aggr_pre_handler+0x70/0xc8
+[1135630.454959]  kprobe_breakpoint_handler+0x140/0x1e0
+[1135630.462435]  brk_handler+0xbc/0xd8
+[1135630.468437]  do_debug_exception+0x84/0x138
+[1135630.475074]  el1_dbg+0x18/0x8c
+[1135630.480582]  security_file_permission+0x0/0xd0
+[1135630.487426]  vfs_write+0x70/0x1c0
+[1135630.493059]  ksys_write+0x5c/0xc8
+[1135630.498638]  __arm64_sys_write+0x24/0x30
+[1135630.504821]  el0_svc_common+0x78/0x130
+[1135630.510838]  el0_svc_handler+0x38/0x78
+[1135630.516834]  el0_svc+0x8/0x1b0
+
+kernel/trace/trace_kprobe.c: 1308
+0xffff3df8995039ec <kprobe_perf_func+0x2c>:     ldr     x21, [x24,#120]
+include/linux/compiler.h: 294
+0xffff3df8995039f0 <kprobe_perf_func+0x30>:     ldr     x1, [x21,x0]
+
+kernel/trace/trace_kprobe.c
+1308: head = this_cpu_ptr(call->perf_events);
+1309: if (hlist_empty(head))
+1310:  return 0;
+
+crash> struct trace_event_call -o
+struct trace_event_call {
+  ...
+  [120] struct hlist_head *perf_events;  //(call->perf_event)
+  ...
+}
+
+crash> struct trace_event_call ffffaf015340e528
+struct trace_event_call {
+  ...
+  perf_events = 0xffff0ad5fa89f088, //this value is correct, but x21 = 0
+  ...
+}
+
+Race Condition Analysis:
+
+The race occurs between kprobe activation and perf_events initialization:
+
+  CPU0                                    CPU1
+  ====                                    ====
+  perf_kprobe_init
+    perf_trace_event_init
+      tp_event->perf_events = list;(1)
+      tp_event->class->reg (2)← KPROBE ACTIVE
+                                          Debug exception triggers
+                                          ...
+                                          kprobe_dispatcher
+                                            kprobe_perf_func (tk->tp.flags & TP_FLAG_PROFILE)
+                                              head = this_cpu_ptr(call->perf_events)(3)
+                                              (perf_events is still NULL)
+
+Problem:
+1. CPU0 executes (1) assigning tp_event->perf_events = list
+2. CPU0 executes (2) enabling kprobe functionality via class->reg()
+3. CPU1 triggers and reaches kprobe_dispatcher
+4. CPU1 checks TP_FLAG_PROFILE - condition passes (step 2 completed)
+5. CPU1 calls kprobe_perf_func() and crashes at (3) because
+   call->perf_events is still NULL
+
+CPU1 sees that kprobe functionality is enabled but does not see that
+perf_events has been assigned.
+
+Add pairing read and write memory barriers to guarantee that if CPU1
+sees that kprobe functionality is enabled, it must also see that
+perf_events has been assigned.
+
+Link: https://lore.kernel.org/all/20251001022025.44626-1-chenyuan_fl@163.com/
+
+Fixes: 50d780560785 ("tracing/kprobes: Add probe handler dispatcher to support perf and ftrace concurrent use")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yuan Chen <chenyuan@kylinos.cn>
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+[ Drop fprobe changes + context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_kprobe.c |   11 +++++++----
+ kernel/trace/trace_probe.h  |    9 +++++++--
+ kernel/trace/trace_uprobe.c |   13 +++++++++----
+ 3 files changed, 23 insertions(+), 10 deletions(-)
+
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1715,14 +1715,15 @@ static int kprobe_register(struct trace_
+ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
+ {
+       struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
++      unsigned int flags = trace_probe_load_flag(&tk->tp);
+       int ret = 0;
+       raw_cpu_inc(*tk->nhit);
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
++      if (flags & TP_FLAG_TRACE)
+               kprobe_trace_func(tk, regs);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               ret = kprobe_perf_func(tk, regs);
+ #endif
+       return ret;
+@@ -1734,6 +1735,7 @@ kretprobe_dispatcher(struct kretprobe_in
+ {
+       struct kretprobe *rp = get_kretprobe(ri);
+       struct trace_kprobe *tk;
++      unsigned int flags;
+       /*
+        * There is a small chance that get_kretprobe(ri) returns NULL when
+@@ -1746,10 +1748,11 @@ kretprobe_dispatcher(struct kretprobe_in
+       tk = container_of(rp, struct trace_kprobe, rp);
+       raw_cpu_inc(*tk->nhit);
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
++      flags = trace_probe_load_flag(&tk->tp);
++      if (flags & TP_FLAG_TRACE)
+               kretprobe_trace_func(tk, ri, regs);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               kretprobe_perf_func(tk, ri, regs);
+ #endif
+       return 0;       /* We don't tweak kernel, so just return 0 */
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -257,16 +257,21 @@ struct event_file_link {
+       struct list_head                list;
+ };
++static inline unsigned int trace_probe_load_flag(struct trace_probe *tp)
++{
++      return smp_load_acquire(&tp->event->flags);
++}
++
+ static inline bool trace_probe_test_flag(struct trace_probe *tp,
+                                        unsigned int flag)
+ {
+-      return !!(tp->event->flags & flag);
++      return !!(trace_probe_load_flag(tp) & flag);
+ }
+ static inline void trace_probe_set_flag(struct trace_probe *tp,
+                                       unsigned int flag)
+ {
+-      tp->event->flags |= flag;
++      smp_store_release(&tp->event->flags, tp->event->flags | flag);
+ }
+ static inline void trace_probe_clear_flag(struct trace_probe *tp,
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -1497,6 +1497,7 @@ static int uprobe_dispatcher(struct upro
+       struct trace_uprobe *tu;
+       struct uprobe_dispatch_data udd;
+       struct uprobe_cpu_buffer *ucb;
++      unsigned int flags;
+       int ret = 0;
+       tu = container_of(con, struct trace_uprobe, consumer);
+@@ -1512,11 +1513,12 @@ static int uprobe_dispatcher(struct upro
+       ucb = prepare_uprobe_buffer(tu, regs);
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
++      flags = trace_probe_load_flag(&tu->tp);
++      if (flags & TP_FLAG_TRACE)
+               ret |= uprobe_trace_func(tu, regs, ucb);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               ret |= uprobe_perf_func(tu, regs, ucb);
+ #endif
+       uprobe_buffer_put(ucb);
+@@ -1529,6 +1531,7 @@ static int uretprobe_dispatcher(struct u
+       struct trace_uprobe *tu;
+       struct uprobe_dispatch_data udd;
+       struct uprobe_cpu_buffer *ucb;
++      unsigned int flags;
+       tu = container_of(con, struct trace_uprobe, consumer);
+@@ -1541,11 +1544,13 @@ static int uretprobe_dispatcher(struct u
+               return 0;
+       ucb = prepare_uprobe_buffer(tu, regs);
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
++
++      flags = trace_probe_load_flag(&tu->tp);
++      if (flags & TP_FLAG_TRACE)
+               uretprobe_trace_func(tu, func, regs, ucb);
+ #ifdef CONFIG_PERF_EVENTS
+-      if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
++      if (flags & TP_FLAG_PROFILE)
+               uretprobe_perf_func(tu, func, regs, ucb);
+ #endif
+       uprobe_buffer_put(ucb);