]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 13 Feb 2023 11:51:25 +0000 (12:51 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 13 Feb 2023 11:51:25 +0000 (12:51 +0100)
added patches:
arm64-dts-meson-axg-make-mmc-host-controller-interrupts-level-sensitive.patch
arm64-dts-meson-g12-common-make-mmc-host-controller-interrupts-level-sensitive.patch
arm64-dts-meson-gx-make-mmc-host-controller-interrupts-level-sensitive.patch
btrfs-free-device-in-btrfs_close_devices-for-a-single-device-filesystem.patch
btrfs-simplify-update-of-last_dir_index_offset-when-logging-a-directory.patch
ceph-flush-cap-releases-when-the-session-is-flushed.patch
clk-ingenic-jz4760-update-m-n-od-calculation-algorithm.patch
cxl-region-fix-null-pointer-dereference-for-resetting-decoder.patch
cxl-region-fix-passthrough-decoder-detection.patch
drm-amd-pm-add-smu-13.0.7-missing-getpptlimit-message-mapping.patch
drm-amdgpu-use-the-tgid-for-trace_amdgpu_vm_update_ptes.patch
fix-page-corruption-caused-by-racy-check-in-__free_pages.patch
mptcp-be-careful-on-subflow-status-propagation-on-errors.patch
mptcp-do-not-wait-for-bare-sockets-timeout.patch
nvdimm-support-sizeof-struct-page-max_struct_page_size.patch
pinctrl-qcom-sm8450-lpass-lpi-correct-swr_rx_data-group.patch
powerpc-64s-interrupt-fix-interrupt-exit-race-with-security-mitigation-switch.patch
riscv-fixup-race-condition-on-pg_dcache_clean-in-flush_icache_pte.patch
riscv-kprobe-fixup-misaligned-load-text.patch
rtmutex-ensure-that-the-top-waiter-is-always-woken-up.patch
selftests-mptcp-allow-more-slack-for-slow-test-case.patch
selftests-mptcp-stop-tests-earlier.patch
tracing-fix-task_comm_len-in-trace-event-format-file.patch
usb-core-add-quirk-for-alcor-link-ak9563-smartcard-reader.patch
usb-typec-altmodes-displayport-fix-probe-pin-assign-check.patch

26 files changed:
queue-6.1/arm64-dts-meson-axg-make-mmc-host-controller-interrupts-level-sensitive.patch [new file with mode: 0644]
queue-6.1/arm64-dts-meson-g12-common-make-mmc-host-controller-interrupts-level-sensitive.patch [new file with mode: 0644]
queue-6.1/arm64-dts-meson-gx-make-mmc-host-controller-interrupts-level-sensitive.patch [new file with mode: 0644]
queue-6.1/btrfs-free-device-in-btrfs_close_devices-for-a-single-device-filesystem.patch [new file with mode: 0644]
queue-6.1/btrfs-simplify-update-of-last_dir_index_offset-when-logging-a-directory.patch [new file with mode: 0644]
queue-6.1/ceph-flush-cap-releases-when-the-session-is-flushed.patch [new file with mode: 0644]
queue-6.1/clk-ingenic-jz4760-update-m-n-od-calculation-algorithm.patch [new file with mode: 0644]
queue-6.1/cxl-region-fix-null-pointer-dereference-for-resetting-decoder.patch [new file with mode: 0644]
queue-6.1/cxl-region-fix-passthrough-decoder-detection.patch [new file with mode: 0644]
queue-6.1/drm-amd-pm-add-smu-13.0.7-missing-getpptlimit-message-mapping.patch [new file with mode: 0644]
queue-6.1/drm-amdgpu-use-the-tgid-for-trace_amdgpu_vm_update_ptes.patch [new file with mode: 0644]
queue-6.1/fix-page-corruption-caused-by-racy-check-in-__free_pages.patch [new file with mode: 0644]
queue-6.1/mptcp-be-careful-on-subflow-status-propagation-on-errors.patch [new file with mode: 0644]
queue-6.1/mptcp-do-not-wait-for-bare-sockets-timeout.patch [new file with mode: 0644]
queue-6.1/nvdimm-support-sizeof-struct-page-max_struct_page_size.patch [new file with mode: 0644]
queue-6.1/pinctrl-qcom-sm8450-lpass-lpi-correct-swr_rx_data-group.patch [new file with mode: 0644]
queue-6.1/powerpc-64s-interrupt-fix-interrupt-exit-race-with-security-mitigation-switch.patch [new file with mode: 0644]
queue-6.1/riscv-fixup-race-condition-on-pg_dcache_clean-in-flush_icache_pte.patch [new file with mode: 0644]
queue-6.1/riscv-kprobe-fixup-misaligned-load-text.patch [new file with mode: 0644]
queue-6.1/rtmutex-ensure-that-the-top-waiter-is-always-woken-up.patch [new file with mode: 0644]
queue-6.1/selftests-mptcp-allow-more-slack-for-slow-test-case.patch [new file with mode: 0644]
queue-6.1/selftests-mptcp-stop-tests-earlier.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/tracing-fix-task_comm_len-in-trace-event-format-file.patch [new file with mode: 0644]
queue-6.1/usb-core-add-quirk-for-alcor-link-ak9563-smartcard-reader.patch [new file with mode: 0644]
queue-6.1/usb-typec-altmodes-displayport-fix-probe-pin-assign-check.patch [new file with mode: 0644]

diff --git a/queue-6.1/arm64-dts-meson-axg-make-mmc-host-controller-interrupts-level-sensitive.patch b/queue-6.1/arm64-dts-meson-axg-make-mmc-host-controller-interrupts-level-sensitive.patch
new file mode 100644 (file)
index 0000000..addff4c
--- /dev/null
@@ -0,0 +1,51 @@
+From d182bcf300772d8b2e5f43e47fa0ebda2b767cc4 Mon Sep 17 00:00:00 2001
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Thu, 9 Feb 2023 21:10:31 +0100
+Subject: arm64: dts: meson-axg: Make mmc host controller interrupts level-sensitive
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+commit d182bcf300772d8b2e5f43e47fa0ebda2b767cc4 upstream.
+
+The usage of edge-triggered interrupts lead to lost interrupts under load,
+see [0]. This was confirmed to be fixed by using level-triggered
+interrupts.
+The report was about SDIO. However, as the host controller is the same
+for SD and MMC, apply the change to all mmc controller instances.
+
+[0] https://www.spinics.net/lists/linux-mmc/msg73991.html
+
+Fixes: 221cf34bac54 ("ARM64: dts: meson-axg: enable the eMMC controller")
+Reported-by: Peter Suti <peter.suti@streamunlimited.com>
+Tested-by: Vyacheslav Bocharov <adeep@lexina.in>
+Tested-by: Peter Suti <peter.suti@streamunlimited.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Acked-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://lore.kernel.org/r/c00655d3-02f8-6f5f-4239-ca2412420cad@gmail.com
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/amlogic/meson-axg.dtsi |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+@@ -1885,7 +1885,7 @@
+                       sd_emmc_b: sd@5000 {
+                               compatible = "amlogic,meson-axg-mmc";
+                               reg = <0x0 0x5000 0x0 0x800>;
+-                              interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
++                              interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
+                               status = "disabled";
+                               clocks = <&clkc CLKID_SD_EMMC_B>,
+                                       <&clkc CLKID_SD_EMMC_B_CLK0>,
+@@ -1897,7 +1897,7 @@
+                       sd_emmc_c: mmc@7000 {
+                               compatible = "amlogic,meson-axg-mmc";
+                               reg = <0x0 0x7000 0x0 0x800>;
+-                              interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
++                              interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
+                               status = "disabled";
+                               clocks = <&clkc CLKID_SD_EMMC_C>,
+                                       <&clkc CLKID_SD_EMMC_C_CLK0>,
diff --git a/queue-6.1/arm64-dts-meson-g12-common-make-mmc-host-controller-interrupts-level-sensitive.patch b/queue-6.1/arm64-dts-meson-g12-common-make-mmc-host-controller-interrupts-level-sensitive.patch
new file mode 100644 (file)
index 0000000..d20253a
--- /dev/null
@@ -0,0 +1,60 @@
+From ac8db4cceed218cca21c84f9d75ce88182d8b04f Mon Sep 17 00:00:00 2001
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Thu, 9 Feb 2023 21:11:10 +0100
+Subject: arm64: dts: meson-g12-common: Make mmc host controller interrupts level-sensitive
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+commit ac8db4cceed218cca21c84f9d75ce88182d8b04f upstream.
+
+The usage of edge-triggered interrupts lead to lost interrupts under load,
+see [0]. This was confirmed to be fixed by using level-triggered
+interrupts.
+The report was about SDIO. However, as the host controller is the same
+for SD and MMC, apply the change to all mmc controller instances.
+
+[0] https://www.spinics.net/lists/linux-mmc/msg73991.html
+
+Fixes: 4759fd87b928 ("arm64: dts: meson: g12a: add mmc nodes")
+Tested-by: FUKAUMI Naoki <naoki@radxa.com>
+Tested-by: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+Tested-by: Jerome Brunet <jbrunet@baylibre.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Acked-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://lore.kernel.org/r/27d89baa-b8fa-baca-541b-ef17a97cde3c@gmail.com
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+@@ -2318,7 +2318,7 @@
+               sd_emmc_a: sd@ffe03000 {
+                       compatible = "amlogic,meson-axg-mmc";
+                       reg = <0x0 0xffe03000 0x0 0x800>;
+-                      interrupts = <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>;
++                      interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+                       status = "disabled";
+                       clocks = <&clkc CLKID_SD_EMMC_A>,
+                                <&clkc CLKID_SD_EMMC_A_CLK0>,
+@@ -2330,7 +2330,7 @@
+               sd_emmc_b: sd@ffe05000 {
+                       compatible = "amlogic,meson-axg-mmc";
+                       reg = <0x0 0xffe05000 0x0 0x800>;
+-                      interrupts = <GIC_SPI 190 IRQ_TYPE_EDGE_RISING>;
++                      interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+                       status = "disabled";
+                       clocks = <&clkc CLKID_SD_EMMC_B>,
+                                <&clkc CLKID_SD_EMMC_B_CLK0>,
+@@ -2342,7 +2342,7 @@
+               sd_emmc_c: mmc@ffe07000 {
+                       compatible = "amlogic,meson-axg-mmc";
+                       reg = <0x0 0xffe07000 0x0 0x800>;
+-                      interrupts = <GIC_SPI 191 IRQ_TYPE_EDGE_RISING>;
++                      interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+                       status = "disabled";
+                       clocks = <&clkc CLKID_SD_EMMC_C>,
+                                <&clkc CLKID_SD_EMMC_C_CLK0>,
diff --git a/queue-6.1/arm64-dts-meson-gx-make-mmc-host-controller-interrupts-level-sensitive.patch b/queue-6.1/arm64-dts-meson-gx-make-mmc-host-controller-interrupts-level-sensitive.patch
new file mode 100644 (file)
index 0000000..5cdd49d
--- /dev/null
@@ -0,0 +1,55 @@
+From 66e45351f7d6798751f98001d1fcd572024d87f0 Mon Sep 17 00:00:00 2001
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Thu, 9 Feb 2023 21:11:47 +0100
+Subject: arm64: dts: meson-gx: Make mmc host controller interrupts level-sensitive
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+commit 66e45351f7d6798751f98001d1fcd572024d87f0 upstream.
+
+The usage of edge-triggered interrupts lead to lost interrupts under load,
+see [0]. This was confirmed to be fixed by using level-triggered
+interrupts.
+The report was about SDIO. However, as the host controller is the same
+for SD and MMC, apply the change to all mmc controller instances.
+
+[0] https://www.spinics.net/lists/linux-mmc/msg73991.html
+
+Fixes: ef8d2ffedf18 ("ARM64: dts: meson-gxbb: add MMC support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Acked-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://lore.kernel.org/r/76e042e0-a610-5ed5-209f-c4d7f879df44@gmail.com
+Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/amlogic/meson-gx.dtsi |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+@@ -602,21 +602,21 @@
+                       sd_emmc_a: mmc@70000 {
+                               compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
+                               reg = <0x0 0x70000 0x0 0x800>;
+-                              interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
++                              interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
+                               status = "disabled";
+                       };
+                       sd_emmc_b: mmc@72000 {
+                               compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
+                               reg = <0x0 0x72000 0x0 0x800>;
+-                              interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
++                              interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
+                               status = "disabled";
+                       };
+                       sd_emmc_c: mmc@74000 {
+                               compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
+                               reg = <0x0 0x74000 0x0 0x800>;
+-                              interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
++                              interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
+                               status = "disabled";
+                       };
+               };
diff --git a/queue-6.1/btrfs-free-device-in-btrfs_close_devices-for-a-single-device-filesystem.patch b/queue-6.1/btrfs-free-device-in-btrfs_close_devices-for-a-single-device-filesystem.patch
new file mode 100644 (file)
index 0000000..5689c9b
--- /dev/null
@@ -0,0 +1,70 @@
+From 5f58d783fd7823b2c2d5954d1126e702f94bfc4c Mon Sep 17 00:00:00 2001
+From: Anand Jain <anand.jain@oracle.com>
+Date: Fri, 20 Jan 2023 21:47:16 +0800
+Subject: btrfs: free device in btrfs_close_devices for a single device filesystem
+
+From: Anand Jain <anand.jain@oracle.com>
+
+commit 5f58d783fd7823b2c2d5954d1126e702f94bfc4c upstream.
+
+We have this check to make sure we don't accidentally add older devices
+that may have disappeared and re-appeared with an older generation from
+being added to an fs_devices (such as a replace source device). This
+makes sense, we don't want stale disks in our file system. However for
+single disks this doesn't really make sense.
+
+I've seen this in testing, but I was provided a reproducer from a
+project that builds btrfs images on loopback devices. The loopback
+device gets cached with the new generation, and then if it is re-used to
+generate a new file system we'll fail to mount it because the new fs is
+"older" than what we have in cache.
+
+Fix this by freeing the cache when closing the device for a single device
+filesystem. This will ensure that the mount command passed device path is
+scanned successfully during the next mount.
+
+CC: stable@vger.kernel.org # 5.10+
+Reported-by: Daan De Meyer <daandemeyer@fb.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Anand Jain <anand.jain@oracle.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/volumes.c |   16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -408,6 +408,7 @@ void btrfs_free_device(struct btrfs_devi
+ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
+ {
+       struct btrfs_device *device;
++
+       WARN_ON(fs_devices->opened);
+       while (!list_empty(&fs_devices->devices)) {
+               device = list_entry(fs_devices->devices.next,
+@@ -1194,9 +1195,22 @@ void btrfs_close_devices(struct btrfs_fs
+       mutex_lock(&uuid_mutex);
+       close_fs_devices(fs_devices);
+-      if (!fs_devices->opened)
++      if (!fs_devices->opened) {
+               list_splice_init(&fs_devices->seed_list, &list);
++              /*
++               * If the struct btrfs_fs_devices is not assembled with any
++               * other device, it can be re-initialized during the next mount
++               * without the needing device-scan step. Therefore, it can be
++               * fully freed.
++               */
++              if (fs_devices->num_devices == 1) {
++                      list_del(&fs_devices->fs_list);
++                      free_fs_devices(fs_devices);
++              }
++      }
++
++
+       list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
+               close_fs_devices(fs_devices);
+               list_del(&fs_devices->seed_list);
diff --git a/queue-6.1/btrfs-simplify-update-of-last_dir_index_offset-when-logging-a-directory.patch b/queue-6.1/btrfs-simplify-update-of-last_dir_index_offset-when-logging-a-directory.patch
new file mode 100644 (file)
index 0000000..237b5ba
--- /dev/null
@@ -0,0 +1,124 @@
+From 6afaed53cc9adde69d8a76ff5b4d740d5efbc54c Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Tue, 10 Jan 2023 14:56:39 +0000
+Subject: btrfs: simplify update of last_dir_index_offset when logging a directory
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit 6afaed53cc9adde69d8a76ff5b4d740d5efbc54c upstream.
+
+When logging a directory, we always set the inode's last_dir_index_offset
+to the offset of the last dir index item we found. This is using an extra
+field in the log context structure, and it makes more sense to update it
+only after we insert dir index items, and we could directly update the
+inode's last_dir_index_offset field instead.
+
+So make this simpler by updating the inode's last_dir_index_offset only
+when we actually insert dir index keys in the log tree, and getting rid
+of the last_dir_item_offset field in the log context structure.
+
+Reported-by: David Arendt <admin@prnet.org>
+Link: https://lore.kernel.org/linux-btrfs/ae169fc6-f504-28f0-a098-6fa6a4dfb612@leemhuis.info/
+Reported-by: Maxim Mikityanskiy <maxtram95@gmail.com>
+Link: https://lore.kernel.org/linux-btrfs/Y8voyTXdnPDz8xwY@mail.gmail.com/
+Reported-by: Hunter Wardlaw <wardlawhunter@gmail.com>
+Link: https://bugzilla.suse.com/show_bug.cgi?id=1207231
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=216851
+CC: stable@vger.kernel.org # 6.1+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/tree-log.c |   23 +++++++++++++++++------
+ fs/btrfs/tree-log.h |    2 --
+ 2 files changed, 17 insertions(+), 8 deletions(-)
+
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3607,17 +3607,19 @@ static noinline int insert_dir_log_key(s
+ }
+ static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
+-                               struct btrfs_root *log,
++                               struct btrfs_inode *inode,
+                                struct extent_buffer *src,
+                                struct btrfs_path *dst_path,
+                                int start_slot,
+                                int count)
+ {
++      struct btrfs_root *log = inode->root->log_root;
+       char *ins_data = NULL;
+       struct btrfs_item_batch batch;
+       struct extent_buffer *dst;
+       unsigned long src_offset;
+       unsigned long dst_offset;
++      u64 last_index;
+       struct btrfs_key key;
+       u32 item_size;
+       int ret;
+@@ -3675,6 +3677,19 @@ static int flush_dir_items_batch(struct
+       src_offset = btrfs_item_ptr_offset(src, start_slot + count - 1);
+       copy_extent_buffer(dst, src, dst_offset, src_offset, batch.total_data_size);
+       btrfs_release_path(dst_path);
++
++      last_index = batch.keys[count - 1].offset;
++      ASSERT(last_index > inode->last_dir_index_offset);
++
++      /*
++       * If for some unexpected reason the last item's index is not greater
++       * than the last index we logged, warn and return an error to fallback
++       * to a transaction commit.
++       */
++      if (WARN_ON(last_index <= inode->last_dir_index_offset))
++              ret = -EUCLEAN;
++      else
++              inode->last_dir_index_offset = last_index;
+ out:
+       kfree(ins_data);
+@@ -3724,7 +3739,6 @@ static int process_dir_items_leaf(struct
+               }
+               di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
+-              ctx->last_dir_item_offset = key.offset;
+               /*
+                * Skip ranges of items that consist only of dir item keys created
+@@ -3787,7 +3801,7 @@ static int process_dir_items_leaf(struct
+       if (batch_size > 0) {
+               int ret;
+-              ret = flush_dir_items_batch(trans, log, src, dst_path,
++              ret = flush_dir_items_batch(trans, inode, src, dst_path,
+                                           batch_start, batch_size);
+               if (ret < 0)
+                       return ret;
+@@ -4075,7 +4089,6 @@ static noinline int log_directory_change
+       min_key = BTRFS_DIR_START_INDEX;
+       max_key = 0;
+-      ctx->last_dir_item_offset = inode->last_dir_index_offset;
+       while (1) {
+               ret = log_dir_items(trans, inode, path, dst_path,
+@@ -4087,8 +4100,6 @@ static noinline int log_directory_change
+               min_key = max_key + 1;
+       }
+-      inode->last_dir_index_offset = ctx->last_dir_item_offset;
+-
+       return 0;
+ }
+--- a/fs/btrfs/tree-log.h
++++ b/fs/btrfs/tree-log.h
+@@ -23,8 +23,6 @@ struct btrfs_log_ctx {
+       bool logging_new_delayed_dentries;
+       /* Indicate if the inode being logged was logged before. */
+       bool logged_before;
+-      /* Tracks the last logged dir item/index key offset. */
+-      u64 last_dir_item_offset;
+       struct inode *inode;
+       struct list_head list;
+       /* Only used for fast fsyncs. */
diff --git a/queue-6.1/ceph-flush-cap-releases-when-the-session-is-flushed.patch b/queue-6.1/ceph-flush-cap-releases-when-the-session-is-flushed.patch
new file mode 100644 (file)
index 0000000..3ba5f77
--- /dev/null
@@ -0,0 +1,38 @@
+From e7d84c6a1296d059389f7342d9b4b7defb518d3a Mon Sep 17 00:00:00 2001
+From: Xiubo Li <xiubli@redhat.com>
+Date: Tue, 7 Feb 2023 13:04:52 +0800
+Subject: ceph: flush cap releases when the session is flushed
+
+From: Xiubo Li <xiubli@redhat.com>
+
+commit e7d84c6a1296d059389f7342d9b4b7defb518d3a upstream.
+
+MDS expects the completed cap release prior to responding to the
+session flush for cache drop.
+
+Cc: stable@vger.kernel.org
+Link: http://tracker.ceph.com/issues/38009
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Venky Shankar <vshankar@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/mds_client.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -3662,6 +3662,12 @@ static void handle_session(struct ceph_m
+               break;
+       case CEPH_SESSION_FLUSHMSG:
++              /* flush cap releases */
++              spin_lock(&session->s_cap_lock);
++              if (session->s_num_cap_releases)
++                      ceph_flush_cap_releases(mdsc, session);
++              spin_unlock(&session->s_cap_lock);
++
+               send_flushmsg_ack(mdsc, session, seq);
+               break;
diff --git a/queue-6.1/clk-ingenic-jz4760-update-m-n-od-calculation-algorithm.patch b/queue-6.1/clk-ingenic-jz4760-update-m-n-od-calculation-algorithm.patch
new file mode 100644 (file)
index 0000000..281356b
--- /dev/null
@@ -0,0 +1,77 @@
+From ecfb9f404771dde909ce7743df954370933c3be2 Mon Sep 17 00:00:00 2001
+From: Paul Cercueil <paul@crapouillou.net>
+Date: Wed, 14 Dec 2022 13:37:04 +0100
+Subject: clk: ingenic: jz4760: Update M/N/OD calculation algorithm
+
+From: Paul Cercueil <paul@crapouillou.net>
+
+commit ecfb9f404771dde909ce7743df954370933c3be2 upstream.
+
+The previous algorithm was pretty broken.
+
+- The inner loop had a '(m > m_max)' condition, and the value of 'm'
+  would increase in each iteration;
+
+- Each iteration would actually multiply 'm' by two, so it is not needed
+  to re-compute the whole equation at each iteration;
+
+- It would loop until (m & 1) == 0, which means it would loop at most
+  once.
+
+- The outer loop would divide the 'n' value by two at the end of each
+  iteration. This meant that for a 12 MHz parent clock and a 1.2 GHz
+  requested clock, it would first try n=12, then n=6, then n=3, then
+  n=1, none of which would work; the only valid value is n=2 in this
+  case.
+
+Simplify this algorithm with a single for loop, which decrements 'n'
+after each iteration, addressing all of the above problems.
+
+Fixes: bdbfc029374f ("clk: ingenic: Add support for the JZ4760")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Paul Cercueil <paul@crapouillou.net>
+Link: https://lore.kernel.org/r/20221214123704.7305-1-paul@crapouillou.net
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/ingenic/jz4760-cgu.c |   18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+--- a/drivers/clk/ingenic/jz4760-cgu.c
++++ b/drivers/clk/ingenic/jz4760-cgu.c
+@@ -58,7 +58,7 @@ jz4760_cgu_calc_m_n_od(const struct inge
+                      unsigned long rate, unsigned long parent_rate,
+                      unsigned int *pm, unsigned int *pn, unsigned int *pod)
+ {
+-      unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 2;
++      unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 1;
+       /* The frequency after the N divider must be between 1 and 50 MHz. */
+       n = parent_rate / (1 * MHZ);
+@@ -66,19 +66,17 @@ jz4760_cgu_calc_m_n_od(const struct inge
+       /* The N divider must be >= 2. */
+       n = clamp_val(n, 2, 1 << pll_info->n_bits);
+-      for (;; n >>= 1) {
+-              od = (unsigned int)-1;
++      rate /= MHZ;
++      parent_rate /= MHZ;
+-              do {
+-                      m = (rate / MHZ) * (1 << ++od) * n / (parent_rate / MHZ);
+-              } while ((m > m_max || m & 1) && (od < 4));
+-
+-              if (od < 4 && m >= 4 && m <= m_max)
+-                      break;
++      for (m = m_max; m >= m_max && n >= 2; n--) {
++              m = rate * n / parent_rate;
++              od = m & 1;
++              m <<= od;
+       }
+       *pm = m;
+-      *pn = n;
++      *pn = n + 1;
+       *pod = 1 << od;
+ }
diff --git a/queue-6.1/cxl-region-fix-null-pointer-dereference-for-resetting-decoder.patch b/queue-6.1/cxl-region-fix-null-pointer-dereference-for-resetting-decoder.patch
new file mode 100644 (file)
index 0000000..aa70afb
--- /dev/null
@@ -0,0 +1,81 @@
+From 4fa4302d6dc7de7e8e74dc7405611a2efb4bf54b Mon Sep 17 00:00:00 2001
+From: Fan Ni <fan.ni@samsung.com>
+Date: Thu, 15 Dec 2022 17:09:14 +0000
+Subject: cxl/region: Fix null pointer dereference for resetting decoder
+
+From: Fan Ni <fan.ni@samsung.com>
+
+commit 4fa4302d6dc7de7e8e74dc7405611a2efb4bf54b upstream.
+
+Not all decoders have a reset callback.
+
+The CXL specification allows a host bridge with a single root port to
+have no explicit HDM decoders. Currently the region driver assumes there
+are none.  As such the CXL core creates a special pass through decoder
+instance without a commit/reset callback.
+
+Prior to this patch, the ->reset() callback was called unconditionally when
+calling cxl_region_decode_reset. Thus a configuration with 1 Host Bridge,
+1 Root Port, and one directly attached CXL type 3 device or multiple CXL
+type 3 devices attached to downstream ports of a switch can cause a null
+pointer dereference.
+
+Before the fix, a kernel crash was observed when we destroy the region, and
+a pass through decoder is reset.
+
+The issue can be reproduced as below,
+    1) create a region with a CXL setup which includes a HB with a
+    single root port under which a memdev is attached directly.
+    2) destroy the region with cxl destroy-region regionX -f.
+
+Fixes: 176baefb2eb5 ("cxl/hdm: Commit decoder state to hardware")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Fan Ni <fan.ni@samsung.com>
+Reviewed-by: Davidlohr Bueso <dave@stgolabs.net>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Tested-by: Gregory Price <gregory.price@memverge.com>
+Reviewed-by: Gregory Price <gregory.price@memverge.com>
+Link: https://lore.kernel.org/r/20221215170909.2650271-1-fan.ni@samsung.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cxl/core/region.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index 02f28da519e3..02275e6b621b 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -131,7 +131,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+               struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+               struct cxl_port *iter = cxled_to_port(cxled);
+               struct cxl_ep *ep;
+-              int rc;
++              int rc = 0;
+               while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+                       iter = to_cxl_port(iter->dev.parent);
+@@ -143,7 +143,8 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+                       cxl_rr = cxl_rr_load(iter, cxlr);
+                       cxld = cxl_rr->decoder;
+-                      rc = cxld->reset(cxld);
++                      if (cxld->reset)
++                              rc = cxld->reset(cxld);
+                       if (rc)
+                               return rc;
+               }
+@@ -186,7 +187,8 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
+                            iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+                               cxl_rr = cxl_rr_load(iter, cxlr);
+                               cxld = cxl_rr->decoder;
+-                              cxld->reset(cxld);
++                              if (cxld->reset)
++                                      cxld->reset(cxld);
+                       }
+                       cxled->cxld.reset(&cxled->cxld);
+-- 
+2.39.1
+
diff --git a/queue-6.1/cxl-region-fix-passthrough-decoder-detection.patch b/queue-6.1/cxl-region-fix-passthrough-decoder-detection.patch
new file mode 100644 (file)
index 0000000..1b3d658
--- /dev/null
@@ -0,0 +1,47 @@
+From 711442e29f16f0d39dd0e2460c9baacfccb9d5a7 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Tue, 7 Feb 2023 11:04:30 -0800
+Subject: cxl/region: Fix passthrough-decoder detection
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 711442e29f16f0d39dd0e2460c9baacfccb9d5a7 upstream.
+
+A passthrough decoder is a decoder that maps only 1 target. It is a
+special case because it does not impose any constraints on the
+interleave-math as compared to a decoder with multiple targets. Extend
+the passthrough case to multi-target-capable decoders that only have one
+target selected. I.e. the current code was only considering passthrough
+*ports* which are only a subset of the potential passthrough decoder
+scenarios.
+
+Fixes: e4f6dfa9ef75 ("cxl/region: Fix 'distance' calculation with passthrough ports")
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://lore.kernel.org/r/167564540422.847146.13816934143225777888.stgit@dwillia2-xfh.jf.intel.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cxl/core/region.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
+index 02275e6b621b..940f805b1534 100644
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -993,10 +993,10 @@ static int cxl_port_setup_targets(struct cxl_port *port,
+               int i, distance;
+               /*
+-               * Passthrough ports impose no distance requirements between
++               * Passthrough decoders impose no distance requirements between
+                * peers
+                */
+-              if (port->nr_dports == 1)
++              if (cxl_rr->nr_targets == 1)
+                       distance = 0;
+               else
+                       distance = p->nr_targets / cxl_rr->nr_targets;
+-- 
+2.39.1
+
diff --git a/queue-6.1/drm-amd-pm-add-smu-13.0.7-missing-getpptlimit-message-mapping.patch b/queue-6.1/drm-amd-pm-add-smu-13.0.7-missing-getpptlimit-message-mapping.patch
new file mode 100644 (file)
index 0000000..672754f
--- /dev/null
@@ -0,0 +1,35 @@
+From 0e763afcb50814e256ecb780fcc0f3bade2e1a0c Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Fri, 3 Feb 2023 15:33:59 +0800
+Subject: drm/amd/pm: add SMU 13.0.7 missing GetPptLimit message mapping
+
+From: Evan Quan <evan.quan@amd.com>
+
+commit 0e763afcb50814e256ecb780fcc0f3bade2e1a0c upstream.
+
+Add missing GetPptLimit message mapping.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 6.1.x
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index e87db7e02e8a..9e1967d8049e 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -124,6 +124,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
+       MSG_MAP(DFCstateControl,                PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+       MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                       0),
+       MSG_MAP(AllowGpo,                       PPSMC_MSG_SetGpoAllow,           0),
++      MSG_MAP(GetPptLimit,                    PPSMC_MSG_GetPptLimit,                 0),
+ };
+ static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
+-- 
+2.39.1
+
diff --git a/queue-6.1/drm-amdgpu-use-the-tgid-for-trace_amdgpu_vm_update_ptes.patch b/queue-6.1/drm-amdgpu-use-the-tgid-for-trace_amdgpu_vm_update_ptes.patch
new file mode 100644 (file)
index 0000000..8894661
--- /dev/null
@@ -0,0 +1,41 @@
+From e53448e0a1efa5133c7db78f1df1f4caf177676b Mon Sep 17 00:00:00 2001
+From: Friedrich Vock <friedrich.vock@gmx.de>
+Date: Thu, 2 Feb 2023 17:21:03 +0100
+Subject: drm/amdgpu: Use the TGID for trace_amdgpu_vm_update_ptes
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Friedrich Vock <friedrich.vock@gmx.de>
+
+commit e53448e0a1efa5133c7db78f1df1f4caf177676b upstream.
+
+The pid field corresponds to the result of gettid() in userspace.
+However, userspace cannot reliably attribute PTE events to processes
+with just the thread id. This patch allows userspace to easily
+attribute PTE update events to specific processes by comparing this
+field with the result of getpid().
+
+For attributing events to specific threads, the thread id is also
+contained in the common fields of each trace event.
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Friedrich Vock <friedrich.vock@gmx.de>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+@@ -974,7 +974,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_
+                       trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
+                                                   min(nptes, 32u), dst, incr,
+                                                   upd_flags,
+-                                                  vm->task_info.pid,
++                                                  vm->task_info.tgid,
+                                                   vm->immediate.fence_context);
+                       amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
+                                                  cursor.level, pe_start, dst,
diff --git a/queue-6.1/fix-page-corruption-caused-by-racy-check-in-__free_pages.patch b/queue-6.1/fix-page-corruption-caused-by-racy-check-in-__free_pages.patch
new file mode 100644 (file)
index 0000000..e0b059c
--- /dev/null
@@ -0,0 +1,78 @@
+From 462a8e08e0e6287e5ce13187257edbf24213ed03 Mon Sep 17 00:00:00 2001
+From: David Chen <david.chen@nutanix.com>
+Date: Thu, 9 Feb 2023 17:48:28 +0000
+Subject: Fix page corruption caused by racy check in __free_pages
+
+From: David Chen <david.chen@nutanix.com>
+
+commit 462a8e08e0e6287e5ce13187257edbf24213ed03 upstream.
+
+When we upgraded our kernel, we started seeing some page corruption like
+the following consistently:
+
+  BUG: Bad page state in process ganesha.nfsd  pfn:1304ca
+  page:0000000022261c55 refcount:0 mapcount:-128 mapping:0000000000000000 index:0x0 pfn:0x1304ca
+  flags: 0x17ffffc0000000()
+  raw: 0017ffffc0000000 ffff8a513ffd4c98 ffffeee24b35ec08 0000000000000000
+  raw: 0000000000000000 0000000000000001 00000000ffffff7f 0000000000000000
+  page dumped because: nonzero mapcount
+  CPU: 0 PID: 15567 Comm: ganesha.nfsd Kdump: loaded Tainted: P    B      O      5.10.158-1.nutanix.20221209.el7.x86_64 #1
+  Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 04/05/2016
+  Call Trace:
+   dump_stack+0x74/0x96
+   bad_page.cold+0x63/0x94
+   check_new_page_bad+0x6d/0x80
+   rmqueue+0x46e/0x970
+   get_page_from_freelist+0xcb/0x3f0
+   ? _cond_resched+0x19/0x40
+   __alloc_pages_nodemask+0x164/0x300
+   alloc_pages_current+0x87/0xf0
+   skb_page_frag_refill+0x84/0x110
+   ...
+
+Sometimes, it would also show up as corruption in the free list pointer
+and cause crashes.
+
+After bisecting the issue, we found the issue started from commit
+e320d3012d25 ("mm/page_alloc.c: fix freeing non-compound pages"):
+
+       if (put_page_testzero(page))
+               free_the_page(page, order);
+       else if (!PageHead(page))
+               while (order-- > 0)
+                       free_the_page(page + (1 << order), order);
+
+So the problem is the check PageHead is racy because at this point we
+already dropped our reference to the page.  So even if we came in with
+compound page, the page can already be freed and PageHead can return
+false and we will end up freeing all the tail pages causing double free.
+
+Fixes: e320d3012d25 ("mm/page_alloc.c: fix freeing non-compound pages")
+Link: https://lore.kernel.org/lkml/BYAPR02MB448855960A9656EEA81141FC94D99@BYAPR02MB4488.namprd02.prod.outlook.com/
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Chunwei Chen <david.chen@nutanix.com>
+Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_alloc.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5640,9 +5640,12 @@ EXPORT_SYMBOL(get_zeroed_page);
+  */
+ void __free_pages(struct page *page, unsigned int order)
+ {
++      /* get PageHead before we drop reference */
++      int head = PageHead(page);
++
+       if (put_page_testzero(page))
+               free_the_page(page, order);
+-      else if (!PageHead(page))
++      else if (!head)
+               while (order-- > 0)
+                       free_the_page(page + (1 << order), order);
+ }
diff --git a/queue-6.1/mptcp-be-careful-on-subflow-status-propagation-on-errors.patch b/queue-6.1/mptcp-be-careful-on-subflow-status-propagation-on-errors.patch
new file mode 100644 (file)
index 0000000..ca41c5a
--- /dev/null
@@ -0,0 +1,63 @@
+From 1249db44a102d9d3541ed7798d4b01ffdcf03524 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 7 Feb 2023 14:04:16 +0100
+Subject: mptcp: be careful on subflow status propagation on errors
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit 1249db44a102d9d3541ed7798d4b01ffdcf03524 upstream.
+
+Currently the subflow error report callback unconditionally
+propagates the fallback subflow status to the owning msk.
+
+If the msk is already orphaned, the above prevents the code
+from correctly tracking the msk moving to the TCP_CLOSE state
+and doing the appropriate cleanup.
+
+All the above causes increasing memory usage over time and
+sporadic self-tests failures.
+
+There is a great deal of infrastructure trying to propagate
+correctly the fallback subflow status to the owning mptcp socket,
+e.g. via mptcp_subflow_eof() and subflow_sched_work_if_closed():
+in the error propagation path we need only to cope with unorphaned
+sockets.
+
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/339
+Fixes: 15cc10453398 ("mptcp: deliver ssk errors to msk")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/subflow.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1344,6 +1344,7 @@ void __mptcp_error_report(struct sock *s
+       mptcp_for_each_subflow(msk, subflow) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+               int err = sock_error(ssk);
++              int ssk_state;
+               if (!err)
+                       continue;
+@@ -1354,7 +1355,14 @@ void __mptcp_error_report(struct sock *s
+               if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
+                       continue;
+-              inet_sk_state_store(sk, inet_sk_state_load(ssk));
++              /* We need to propagate only transition to CLOSE state.
++               * Orphaned socket will see such state change via
++               * subflow_sched_work_if_closed() and that path will properly
++               * destroy the msk as needed.
++               */
++              ssk_state = inet_sk_state_load(ssk);
++              if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
++                      inet_sk_state_store(sk, ssk_state);
+               sk->sk_err = -err;
+               /* This barrier is coupled with smp_rmb() in mptcp_poll() */
diff --git a/queue-6.1/mptcp-do-not-wait-for-bare-sockets-timeout.patch b/queue-6.1/mptcp-do-not-wait-for-bare-sockets-timeout.patch
new file mode 100644 (file)
index 0000000..1afcd01
--- /dev/null
@@ -0,0 +1,64 @@
+From d4e85922e3e7ef2071f91f65e61629b60f3a9cf4 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 7 Feb 2023 14:04:13 +0100
+Subject: mptcp: do not wait for bare sockets' timeout
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit d4e85922e3e7ef2071f91f65e61629b60f3a9cf4 upstream.
+
+If the peer closes all the existing subflows for a given
+mptcp socket and later the application closes it, the current
+implementation let it survive until the timewait timeout expires.
+
+While the above is allowed by the protocol specification it
+consumes resources for almost no reason and additionally
+causes sporadic self-tests failures.
+
+Let's move the mptcp socket to the TCP_CLOSE state when there are
+no alive subflows at close time, so that the allocated resources
+will be freed immediately.
+
+Fixes: e16163b6e2b7 ("mptcp: refactor shutdown and close")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2908,6 +2908,7 @@ bool __mptcp_close(struct sock *sk, long
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk = mptcp_sk(sk);
+       bool do_cancel_work = false;
++      int subflows_alive = 0;
+       sk->sk_shutdown = SHUTDOWN_MASK;
+@@ -2933,6 +2934,8 @@ cleanup:
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+               bool slow = lock_sock_fast_nested(ssk);
++              subflows_alive += ssk->sk_state != TCP_CLOSE;
++
+               /* since the close timeout takes precedence on the fail one,
+                * cancel the latter
+                */
+@@ -2948,6 +2951,12 @@ cleanup:
+       }
+       sock_orphan(sk);
++      /* all the subflows are closed, only timeout can change the msk
++       * state, let's not keep resources busy for no reasons
++       */
++      if (subflows_alive == 0)
++              inet_sk_state_store(sk, TCP_CLOSE);
++
+       sock_hold(sk);
+       pr_debug("msk=%p state=%d", sk, sk->sk_state);
+       if (mptcp_sk(sk)->token)
diff --git a/queue-6.1/nvdimm-support-sizeof-struct-page-max_struct_page_size.patch b/queue-6.1/nvdimm-support-sizeof-struct-page-max_struct_page_size.patch
new file mode 100644 (file)
index 0000000..1ebd9f1
--- /dev/null
@@ -0,0 +1,168 @@
+From c91d713630848460de8669e6570307b7e559863b Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Wed, 25 Jan 2023 12:23:46 -0800
+Subject: nvdimm: Support sizeof(struct page) > MAX_STRUCT_PAGE_SIZE
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit c91d713630848460de8669e6570307b7e559863b upstream.
+
+Commit 6e9f05dc66f9 ("libnvdimm/pfn_dev: increase MAX_STRUCT_PAGE_SIZE")
+
+...updated MAX_STRUCT_PAGE_SIZE to account for sizeof(struct page)
+potentially doubling in the case of CONFIG_KMSAN=y. Unfortunately this
+doubles the amount of capacity stolen from user addressable capacity for
+everyone, regardless of whether they are using the debug option. Revert
+that change, mandate that MAX_STRUCT_PAGE_SIZE never exceed 64, but
+allow for debug scenarios to proceed with creating debug sized page maps
+with a compile option to support debug scenarios.
+
+Note that this only applies to cases where the page map is permanent,
+i.e. stored in a reservation of the pmem itself ("--map=dev" in "ndctl
+create-namespace" terms). For the "--map=mem" case, since the allocation
+is ephemeral for the lifespan of the namespace, there are no explicit
+restriction. However, the implicit restriction, of having enough
+available "System RAM" to store the page map for the typically large
+pmem, still applies.
+
+Fixes: 6e9f05dc66f9 ("libnvdimm/pfn_dev: increase MAX_STRUCT_PAGE_SIZE")
+Cc: <stable@vger.kernel.org>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Marco Elver <elver@google.com>
+Reported-by: Jeff Moyer <jmoyer@redhat.com>
+Acked-by: Yu Zhao <yuzhao@google.com>
+Link: https://lore.kernel.org/r/167467815773.463042.7022545814443036382.stgit@dwillia2-xfh.jf.intel.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvdimm/Kconfig    | 19 ++++++++++++++++++
+ drivers/nvdimm/nd.h       |  2 +-
+ drivers/nvdimm/pfn_devs.c | 42 +++++++++++++++++++++++++--------------
+ 3 files changed, 47 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
+index 79d93126453d..77b06d54cc62 100644
+--- a/drivers/nvdimm/Kconfig
++++ b/drivers/nvdimm/Kconfig
+@@ -102,6 +102,25 @@ config NVDIMM_KEYS
+       depends on ENCRYPTED_KEYS
+       depends on (LIBNVDIMM=ENCRYPTED_KEYS) || LIBNVDIMM=m
++config NVDIMM_KMSAN
++      bool
++      depends on KMSAN
++      help
++        KMSAN, and other memory debug facilities, increase the size of
++        'struct page' to contain extra metadata. This collides with
++        the NVDIMM capability to store a potentially
++        larger-than-"System RAM" size 'struct page' array in a
++        reservation of persistent memory rather than limited /
++        precious DRAM. However, that reservation needs to persist for
++        the life of the given NVDIMM namespace. If you are using KMSAN
++        to debug an issue unrelated to NVDIMMs or DAX then say N to this
++        option. Otherwise, say Y but understand that any namespaces
++        (with the page array stored pmem) created with this build of
++        the kernel will permanently reserve and strand excess
++        capacity compared to the CONFIG_KMSAN=n case.
++
++        Select N if unsure.
++
+ config NVDIMM_TEST_BUILD
+       tristate "Build the unit test core"
+       depends on m
+diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
+index 85ca5b4da3cf..ec5219680092 100644
+--- a/drivers/nvdimm/nd.h
++++ b/drivers/nvdimm/nd.h
+@@ -652,7 +652,7 @@ void devm_namespace_disable(struct device *dev,
+               struct nd_namespace_common *ndns);
+ #if IS_ENABLED(CONFIG_ND_CLAIM)
+ /* max struct page size independent of kernel config */
+-#define MAX_STRUCT_PAGE_SIZE 128
++#define MAX_STRUCT_PAGE_SIZE 64
+ int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
+ #else
+ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
+index 61af072ac98f..af7d9301520c 100644
+--- a/drivers/nvdimm/pfn_devs.c
++++ b/drivers/nvdimm/pfn_devs.c
+@@ -13,6 +13,8 @@
+ #include "pfn.h"
+ #include "nd.h"
++static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
++
+ static void nd_pfn_release(struct device *dev)
+ {
+       struct nd_region *nd_region = to_nd_region(dev->parent);
+@@ -758,12 +760,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+               return -ENXIO;
+       }
+-      /*
+-       * Note, we use 64 here for the standard size of struct page,
+-       * debugging options may cause it to be larger in which case the
+-       * implementation will limit the pfns advertised through
+-       * ->direct_access() to those that are included in the memmap.
+-       */
+       start = nsio->res.start;
+       size = resource_size(&nsio->res);
+       npfns = PHYS_PFN(size - SZ_8K);
+@@ -782,20 +778,33 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+       }
+       end_trunc = start + size - ALIGN_DOWN(start + size, align);
+       if (nd_pfn->mode == PFN_MODE_PMEM) {
++              unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
++
+               /*
+                * The altmap should be padded out to the block size used
+                * when populating the vmemmap. This *should* be equal to
+                * PMD_SIZE for most architectures.
+                *
+-               * Also make sure size of struct page is less than 128. We
+-               * want to make sure we use large enough size here so that
+-               * we don't have a dynamic reserve space depending on
+-               * struct page size. But we also want to make sure we notice
+-               * when we end up adding new elements to struct page.
++               * Also make sure size of struct page is less than
++               * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
++               * face of production kernel configurations that reduce the
++               * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
++               * kernel configurations that increase the 'struct page' size
++               * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
++               * for continuing with the capacity that will be wasted when
++               * reverting to a production kernel configuration. Otherwise,
++               * those configurations are blocked by default.
+                */
+-              BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
+-              offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align)
+-                      - start;
++              if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
++                      if (page_struct_override)
++                              page_map_size = sizeof(struct page) * npfns;
++                      else {
++                              dev_err(&nd_pfn->dev,
++                                      "Memory debug options prevent using pmem for the page map\n");
++                              return -EINVAL;
++                      }
++              }
++              offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
+       } else if (nd_pfn->mode == PFN_MODE_RAM)
+               offset = ALIGN(start + SZ_8K, align) - start;
+       else
+@@ -818,7 +827,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+       pfn_sb->version_minor = cpu_to_le16(4);
+       pfn_sb->end_trunc = cpu_to_le32(end_trunc);
+       pfn_sb->align = cpu_to_le32(nd_pfn->align);
+-      pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
++      if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
++              pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
++      else
++              pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
+       pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
+       checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
+       pfn_sb->checksum = cpu_to_le64(checksum);
+-- 
+2.39.1
+
diff --git a/queue-6.1/pinctrl-qcom-sm8450-lpass-lpi-correct-swr_rx_data-group.patch b/queue-6.1/pinctrl-qcom-sm8450-lpass-lpi-correct-swr_rx_data-group.patch
new file mode 100644 (file)
index 0000000..4a534d8
--- /dev/null
@@ -0,0 +1,41 @@
+From 5921b250f43870e7d8044ca14e402292ceb3e3a8 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Date: Fri, 3 Feb 2023 17:50:54 +0100
+Subject: pinctrl: qcom: sm8450-lpass-lpi: correct swr_rx_data group
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+commit 5921b250f43870e7d8044ca14e402292ceb3e3a8 upstream.
+
+According to hardware programming guide, the swr_rx_data pin group has
+only two pins (GPIO5 and GPIO6).  This is also visible in "struct
+sm8450_groups" in the driver - GPIO15 does not have swr_rx_data
+function.
+
+Fixes: ec1652fc4d56 ("pinctrl: qcom: Add sm8450 lpass lpi pinctrl driver")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@linaro.org>
+Link: https://lore.kernel.org/r/20230203165054.390762-1-krzysztof.kozlowski@linaro.org
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
+index c3c8c34148f1..e22d03ce292e 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
++++ b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
+@@ -105,7 +105,7 @@ static const struct pinctrl_pin_desc sm8450_lpi_pins[] = {
+ static const char * const swr_tx_clk_groups[] = { "gpio0" };
+ static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
+ static const char * const swr_rx_clk_groups[] = { "gpio3" };
+-static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
++static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
+ static const char * const dmic1_clk_groups[] = { "gpio6" };
+ static const char * const dmic1_data_groups[] = { "gpio7" };
+ static const char * const dmic2_clk_groups[] = { "gpio8" };
+-- 
+2.39.1
+
diff --git a/queue-6.1/powerpc-64s-interrupt-fix-interrupt-exit-race-with-security-mitigation-switch.patch b/queue-6.1/powerpc-64s-interrupt-fix-interrupt-exit-race-with-security-mitigation-switch.patch
new file mode 100644 (file)
index 0000000..5c8fd8b
--- /dev/null
@@ -0,0 +1,57 @@
+From 2ea31e2e62bbc4d11c411eeb36f1b02841dbcab1 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Mon, 6 Feb 2023 14:22:40 +1000
+Subject: powerpc/64s/interrupt: Fix interrupt exit race with security mitigation switch
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 2ea31e2e62bbc4d11c411eeb36f1b02841dbcab1 upstream.
+
+The RFI and STF security mitigation options can flip the
+interrupt_exit_not_reentrant static branch condition concurrently with
+the interrupt exit code which tests that branch.
+
+Interrupt exit tests this condition to set MSR[EE|RI] for exit, then
+again in the case a soft-masked interrupt is found pending, to recover
+the MSR so the interrupt can be replayed before attempting to exit
+again. If the condition changes between these two tests, the MSR and irq
+soft-mask state will become corrupted, leading to warnings and possible
+crashes. For example, if the branch is initially true then false,
+MSR[EE] will be 0 but PACA_IRQ_HARD_DIS clear and EE may not get
+enabled, leading to warnings in irq_64.c.
+
+Fixes: 13799748b957 ("powerpc/64: use interrupt restart table to speed up return from interrupt")
+Cc: stable@vger.kernel.org # v5.14+
+Reported-by: Sachin Sant <sachinp@linux.ibm.com>
+Tested-by: Sachin Sant <sachinp@linux.ibm.com>
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20230206042240.92103-1-npiggin@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/interrupt.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/interrupt.c
++++ b/arch/powerpc/kernel/interrupt.c
+@@ -50,16 +50,18 @@ static inline bool exit_must_hard_disabl
+  */
+ static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
+ {
++      bool must_hard_disable = (exit_must_hard_disable() || !restartable);
++
+       /* This must be done with RI=1 because tracing may touch vmaps */
+       trace_hardirqs_on();
+-      if (exit_must_hard_disable() || !restartable)
++      if (must_hard_disable)
+               __hard_EE_RI_disable();
+ #ifdef CONFIG_PPC64
+       /* This pattern matches prep_irq_for_idle */
+       if (unlikely(lazy_irq_pending_nocheck())) {
+-              if (exit_must_hard_disable() || !restartable) {
++              if (must_hard_disable) {
+                       local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+                       __hard_RI_enable();
+               }
diff --git a/queue-6.1/riscv-fixup-race-condition-on-pg_dcache_clean-in-flush_icache_pte.patch b/queue-6.1/riscv-fixup-race-condition-on-pg_dcache_clean-in-flush_icache_pte.patch
new file mode 100644 (file)
index 0000000..c07b3fd
--- /dev/null
@@ -0,0 +1,43 @@
+From 950b879b7f0251317d26bae0687e72592d607532 Mon Sep 17 00:00:00 2001
+From: Guo Ren <guoren@linux.alibaba.com>
+Date: Thu, 26 Jan 2023 22:53:06 -0500
+Subject: riscv: Fixup race condition on PG_dcache_clean in flush_icache_pte
+
+From: Guo Ren <guoren@linux.alibaba.com>
+
+commit 950b879b7f0251317d26bae0687e72592d607532 upstream.
+
+In commit 588a513d3425 ("arm64: Fix race condition on PG_dcache_clean
+in __sync_icache_dcache()"), we found RISC-V has the same issue as the
+previous arm64. The previous implementation didn't guarantee the correct
+sequence of operations, which means flush_icache_all() hasn't been
+called when the PG_dcache_clean was set. That would cause a risk of page
+synchronization.
+
+Fixes: 08f051eda33b ("RISC-V: Flush I$ when making a dirty page executable")
+Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
+Signed-off-by: Guo Ren <guoren@kernel.org>
+Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
+Reviewed-by: Conor Dooley <conor.dooley@microchip.com>
+Link: https://lore.kernel.org/r/20230127035306.1819561-1-guoren@kernel.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/mm/cacheflush.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/riscv/mm/cacheflush.c
++++ b/arch/riscv/mm/cacheflush.c
+@@ -83,8 +83,10 @@ void flush_icache_pte(pte_t pte)
+ {
+       struct page *page = pte_page(pte);
+-      if (!test_and_set_bit(PG_dcache_clean, &page->flags))
++      if (!test_bit(PG_dcache_clean, &page->flags)) {
+               flush_icache_all();
++              set_bit(PG_dcache_clean, &page->flags);
++      }
+ }
+ #endif /* CONFIG_MMU */
diff --git a/queue-6.1/riscv-kprobe-fixup-misaligned-load-text.patch b/queue-6.1/riscv-kprobe-fixup-misaligned-load-text.patch
new file mode 100644 (file)
index 0000000..c8f6fed
--- /dev/null
@@ -0,0 +1,53 @@
+From eb7423273cc9922ee2d05bf660c034d7d515bb91 Mon Sep 17 00:00:00 2001
+From: Guo Ren <guoren@linux.alibaba.com>
+Date: Sat, 4 Feb 2023 01:35:31 -0500
+Subject: riscv: kprobe: Fixup misaligned load text
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Guo Ren <guoren@linux.alibaba.com>
+
+commit eb7423273cc9922ee2d05bf660c034d7d515bb91 upstream.
+
+The current kprobe would cause a misaligned load for the probe point.
+This patch fixup it with two half-word loads instead.
+
+Fixes: c22b0bcb1dd0 ("riscv: Add kprobes supported")
+Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
+Signed-off-by: Guo Ren <guoren@kernel.org>
+Link: https://lore.kernel.org/linux-riscv/878rhig9zj.fsf@all.your.base.are.belong.to.us/
+Reported-by: Bjorn Topel <bjorn.topel@gmail.com>
+Reviewed-by: Björn Töpel <bjorn@kernel.org>
+Link: https://lore.kernel.org/r/20230204063531.740220-1-guoren@kernel.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/kernel/probes/kprobes.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/riscv/kernel/probes/kprobes.c
++++ b/arch/riscv/kernel/probes/kprobes.c
+@@ -65,16 +65,18 @@ static bool __kprobes arch_check_kprobe(
+ int __kprobes arch_prepare_kprobe(struct kprobe *p)
+ {
+-      unsigned long probe_addr = (unsigned long)p->addr;
++      u16 *insn = (u16 *)p->addr;
+-      if (probe_addr & 0x1)
++      if ((unsigned long)insn & 0x1)
+               return -EILSEQ;
+       if (!arch_check_kprobe(p))
+               return -EILSEQ;
+       /* copy instruction */
+-      p->opcode = *p->addr;
++      p->opcode = (kprobe_opcode_t)(*insn++);
++      if (GET_INSN_LENGTH(p->opcode) == 4)
++              p->opcode |= (kprobe_opcode_t)(*insn) << 16;
+       /* decode instruction */
+       switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
diff --git a/queue-6.1/rtmutex-ensure-that-the-top-waiter-is-always-woken-up.patch b/queue-6.1/rtmutex-ensure-that-the-top-waiter-is-always-woken-up.patch
new file mode 100644 (file)
index 0000000..a965e99
--- /dev/null
@@ -0,0 +1,120 @@
+From db370a8b9f67ae5f17e3d5482493294467784504 Mon Sep 17 00:00:00 2001
+From: Wander Lairson Costa <wander@redhat.com>
+Date: Thu, 2 Feb 2023 09:30:20 -0300
+Subject: rtmutex: Ensure that the top waiter is always woken up
+
+From: Wander Lairson Costa <wander@redhat.com>
+
+commit db370a8b9f67ae5f17e3d5482493294467784504 upstream.
+
+Let L1 and L2 be two spinlocks.
+
+Let T1 be a task holding L1 and blocked on L2. T1, currently, is the top
+waiter of L2.
+
+Let T2 be the task holding L2.
+
+Let T3 be a task trying to acquire L1.
+
+The following events will lead to a state in which the wait queue of L2
+isn't empty, but no task actually holds the lock.
+
+T1                T2                                  T3
+==                ==                                  ==
+
+                                                      spin_lock(L1)
+                                                      | raw_spin_lock(L1->wait_lock)
+                                                      | rtlock_slowlock_locked(L1)
+                                                      | | task_blocks_on_rt_mutex(L1, T3)
+                                                      | | | orig_waiter->lock = L1
+                                                      | | | orig_waiter->task = T3
+                                                      | | | raw_spin_unlock(L1->wait_lock)
+                                                      | | | rt_mutex_adjust_prio_chain(T1, L1, L2, orig_waiter, T3)
+                  spin_unlock(L2)                     | | | |
+                  | rt_mutex_slowunlock(L2)           | | | |
+                  | | raw_spin_lock(L2->wait_lock)    | | | |
+                  | | wakeup(T1)                      | | | |
+                  | | raw_spin_unlock(L2->wait_lock)  | | | |
+                                                      | | | | waiter = T1->pi_blocked_on
+                                                      | | | | waiter == rt_mutex_top_waiter(L2)
+                                                      | | | | waiter->task == T1
+                                                      | | | | raw_spin_lock(L2->wait_lock)
+                                                      | | | | dequeue(L2, waiter)
+                                                      | | | | update_prio(waiter, T1)
+                                                      | | | | enqueue(L2, waiter)
+                                                      | | | | waiter != rt_mutex_top_waiter(L2)
+                                                      | | | | L2->owner == NULL
+                                                      | | | | wakeup(T1)
+                                                      | | | | raw_spin_unlock(L2->wait_lock)
+T1 wakes up
+T1 != top_waiter(L2)
+schedule_rtlock()
+
+If the deadline of T1 is updated before the call to update_prio(), and the
+new deadline is greater than the deadline of the second top waiter, then
+after the requeue, T1 is no longer the top waiter, and the wrong task is
+woken up which will then go back to sleep because it is not the top waiter.
+
+This can be reproduced in PREEMPT_RT with stress-ng:
+
+while true; do
+    stress-ng --sched deadline --sched-period 1000000000 \
+           --sched-runtime 800000000 --sched-deadline \
+           1000000000 --mmapfork 23 -t 20
+done
+
+A similar issue was pointed out by Thomas versus the cases where the top
+waiter drops out early due to a signal or timeout, which is a general issue
+for all regular rtmutex use cases, e.g. futex.
+
+The problematic code is in rt_mutex_adjust_prio_chain():
+
+       // Save the top waiter before dequeue/enqueue
+       prerequeue_top_waiter = rt_mutex_top_waiter(lock);
+
+       rt_mutex_dequeue(lock, waiter);
+       waiter_update_prio(waiter, task);
+       rt_mutex_enqueue(lock, waiter);
+
+       // Lock has no owner?
+       if (!rt_mutex_owner(lock)) {
+               // Top waiter changed
+  ---->                if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
+  ---->                        wake_up_state(waiter->task, waiter->wake_state);
+
+This only takes the case into account where @waiter is the new top waiter
+due to the requeue operation.
+
+But it fails to handle the case where @waiter is not longer the top
+waiter due to the requeue operation.
+
+Ensure that the new top waiter is woken up so in all cases so it can take
+over the ownerless lock.
+
+[ tglx: Amend changelog, add Fixes tag ]
+
+Fixes: c014ef69b3ac ("locking/rtmutex: Add wake_state to rt_mutex_waiter")
+Signed-off-by: Wander Lairson Costa <wander@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230117172649.52465-1-wander@redhat.com
+Link: https://lore.kernel.org/r/20230202123020.14844-1-wander@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/locking/rtmutex.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -901,8 +901,9 @@ static int __sched rt_mutex_adjust_prio_
+                * then we need to wake the new top waiter up to try
+                * to get the lock.
+                */
+-              if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
+-                      wake_up_state(waiter->task, waiter->wake_state);
++              top_waiter = rt_mutex_top_waiter(lock);
++              if (prerequeue_top_waiter != top_waiter)
++                      wake_up_state(top_waiter->task, top_waiter->wake_state);
+               raw_spin_unlock_irq(&lock->wait_lock);
+               return 0;
+       }
diff --git a/queue-6.1/selftests-mptcp-allow-more-slack-for-slow-test-case.patch b/queue-6.1/selftests-mptcp-allow-more-slack-for-slow-test-case.patch
new file mode 100644 (file)
index 0000000..62ea554
--- /dev/null
@@ -0,0 +1,64 @@
+From a635a8c3df66ab68dc088c08a4e9e955e22c0e64 Mon Sep 17 00:00:00 2001
+From: Paolo Abeni <pabeni@redhat.com>
+Date: Tue, 7 Feb 2023 14:04:17 +0100
+Subject: selftests: mptcp: allow more slack for slow test-case
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+commit a635a8c3df66ab68dc088c08a4e9e955e22c0e64 upstream.
+
+A test-case is frequently failing on some extremely slow VMs.
+The mptcp transfer completes before the script is able to do
+all the required PM manipulation.
+
+Address the issue in the simplest possible way, making the
+transfer even more slow.
+
+Additionally dump more info in case of failures, to help debugging
+similar problems in the future and init dump_stats var.
+
+Fixes: e274f7154008 ("selftests: mptcp: add subflow limits test-cases")
+Cc: stable@vger.kernel.org
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/323
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -1688,6 +1688,7 @@ chk_subflow_nr()
+       local subflow_nr=$3
+       local cnt1
+       local cnt2
++      local dump_stats
+       if [ -n "${need_title}" ]; then
+               printf "%03u %-36s %s" "${TEST_COUNT}" "${TEST_NAME}" "${msg}"
+@@ -1705,7 +1706,12 @@ chk_subflow_nr()
+               echo "[ ok ]"
+       fi
+-      [ "${dump_stats}" = 1 ] && ( ss -N $ns1 -tOni ; ss -N $ns1 -tOni | grep token; ip -n $ns1 mptcp endpoint )
++      if [ "${dump_stats}" = 1 ]; then
++              ss -N $ns1 -tOni
++              ss -N $ns1 -tOni | grep token
++              ip -n $ns1 mptcp endpoint
++              dump_stats
++      fi
+ }
+ chk_link_usage()
+@@ -3005,7 +3011,7 @@ endpoint_tests()
+               pm_nl_set_limits $ns1 1 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+-              run_tests $ns1 $ns2 10.0.1.1 4 0 0 slow &
++              run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 &
+               wait_mpj $ns2
+               pm_nl_del_endpoint $ns2 2 10.0.2.2
diff --git a/queue-6.1/selftests-mptcp-stop-tests-earlier.patch b/queue-6.1/selftests-mptcp-stop-tests-earlier.patch
new file mode 100644 (file)
index 0000000..567e71d
--- /dev/null
@@ -0,0 +1,80 @@
+From 070d6dafacbaa9d1f2e4e3edc263853d194af15e Mon Sep 17 00:00:00 2001
+From: Matthieu Baerts <matthieu.baerts@tessares.net>
+Date: Tue, 7 Feb 2023 14:04:18 +0100
+Subject: selftests: mptcp: stop tests earlier
+
+From: Matthieu Baerts <matthieu.baerts@tessares.net>
+
+commit 070d6dafacbaa9d1f2e4e3edc263853d194af15e upstream.
+
+These 'endpoint' tests from 'mptcp_join.sh' selftest start a transfer in
+the background and check the status during this transfer.
+
+Once the expected events have been recorded, there is no reason to wait
+for the data transfer to finish. It can be stopped earlier to reduce the
+execution time by more than half.
+
+For these tests, the exchanged data were not verified. Errors, if any,
+were ignored but that's fine, plenty of other tests are looking at that.
+It is then OK to mute stderr now that we are sure errors will be printed
+(and still ignored) because the transfer is stopped before the end.
+
+Fixes: e274f7154008 ("selftests: mptcp: add subflow limits test-cases")
+Cc: stable@vger.kernel.org
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh |   14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -472,6 +472,12 @@ kill_wait()
+       wait $1 2>/dev/null
+ }
++kill_tests_wait()
++{
++      kill -SIGUSR1 $(ip netns pids $ns2) $(ip netns pids $ns1)
++      wait
++}
++
+ pm_nl_set_limits()
+ {
+       local ns=$1
+@@ -2991,7 +2997,7 @@ endpoint_tests()
+               pm_nl_set_limits $ns1 2 2
+               pm_nl_set_limits $ns2 2 2
+               pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+-              run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow &
++              run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow 2>/dev/null &
+               wait_mpj $ns1
+               pm_nl_check_endpoint 1 "creation" \
+@@ -3004,14 +3010,14 @@ endpoint_tests()
+               pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
+               pm_nl_check_endpoint 0 "modif is allowed" \
+                       $ns2 10.0.2.2 id 1 flags signal
+-              wait
++              kill_tests_wait
+       fi
+       if reset "delete and re-add"; then
+               pm_nl_set_limits $ns1 1 1
+               pm_nl_set_limits $ns2 1 1
+               pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+-              run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 &
++              run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
+               wait_mpj $ns2
+               pm_nl_del_endpoint $ns2 2 10.0.2.2
+@@ -3021,7 +3027,7 @@ endpoint_tests()
+               pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
+               wait_mpj $ns2
+               chk_subflow_nr "" "after re-add" 2
+-              wait
++              kill_tests_wait
+       fi
+ }
index 5112412f281243205b0a796eee4bda6adedc385a..175bf2a46c5a3e2f983f9c7456ceda560b6ea900 100644 (file)
@@ -76,3 +76,28 @@ pinctrl-aspeed-revert-force-to-disable-the-function-.patch
 pinctrl-intel-restore-the-pins-that-used-to-be-in-di.patch
 cifs-fix-use-after-free-in-rdata-read_into_pages.patch
 net-usb-fix-wrong-direction-warning-in-plusb.c.patch
+mptcp-do-not-wait-for-bare-sockets-timeout.patch
+mptcp-be-careful-on-subflow-status-propagation-on-errors.patch
+selftests-mptcp-allow-more-slack-for-slow-test-case.patch
+selftests-mptcp-stop-tests-earlier.patch
+btrfs-simplify-update-of-last_dir_index_offset-when-logging-a-directory.patch
+btrfs-free-device-in-btrfs_close_devices-for-a-single-device-filesystem.patch
+usb-core-add-quirk-for-alcor-link-ak9563-smartcard-reader.patch
+usb-typec-altmodes-displayport-fix-probe-pin-assign-check.patch
+cxl-region-fix-null-pointer-dereference-for-resetting-decoder.patch
+cxl-region-fix-passthrough-decoder-detection.patch
+clk-ingenic-jz4760-update-m-n-od-calculation-algorithm.patch
+pinctrl-qcom-sm8450-lpass-lpi-correct-swr_rx_data-group.patch
+drm-amd-pm-add-smu-13.0.7-missing-getpptlimit-message-mapping.patch
+ceph-flush-cap-releases-when-the-session-is-flushed.patch
+nvdimm-support-sizeof-struct-page-max_struct_page_size.patch
+riscv-fixup-race-condition-on-pg_dcache_clean-in-flush_icache_pte.patch
+riscv-kprobe-fixup-misaligned-load-text.patch
+powerpc-64s-interrupt-fix-interrupt-exit-race-with-security-mitigation-switch.patch
+drm-amdgpu-use-the-tgid-for-trace_amdgpu_vm_update_ptes.patch
+tracing-fix-task_comm_len-in-trace-event-format-file.patch
+rtmutex-ensure-that-the-top-waiter-is-always-woken-up.patch
+arm64-dts-meson-gx-make-mmc-host-controller-interrupts-level-sensitive.patch
+arm64-dts-meson-g12-common-make-mmc-host-controller-interrupts-level-sensitive.patch
+arm64-dts-meson-axg-make-mmc-host-controller-interrupts-level-sensitive.patch
+fix-page-corruption-caused-by-racy-check-in-__free_pages.patch
diff --git a/queue-6.1/tracing-fix-task_comm_len-in-trace-event-format-file.patch b/queue-6.1/tracing-fix-task_comm_len-in-trace-event-format-file.patch
new file mode 100644 (file)
index 0000000..ea5900e
--- /dev/null
@@ -0,0 +1,203 @@
+From b6c7abd1c28a63ad633433d037ee15a1bc3023ba Mon Sep 17 00:00:00 2001
+From: Yafang Shao <laoar.shao@gmail.com>
+Date: Sun, 12 Feb 2023 15:13:03 +0000
+Subject: tracing: Fix TASK_COMM_LEN in trace event format file
+
+From: Yafang Shao <laoar.shao@gmail.com>
+
+commit b6c7abd1c28a63ad633433d037ee15a1bc3023ba upstream.
+
+After commit 3087c61ed2c4 ("tools/testing/selftests/bpf: replace open-coded 16 with TASK_COMM_LEN"),
+the content of the format file under
+/sys/kernel/tracing/events/task/task_newtask was changed from
+  field:char comm[16];    offset:12;    size:16;    signed:0;
+to
+  field:char comm[TASK_COMM_LEN];    offset:12;    size:16;    signed:0;
+
+John reported that this change breaks older versions of perfetto.
+Then Mathieu pointed out that this behavioral change was caused by the
+use of __stringify(_len), which happens to work on macros, but not on enum
+labels. And he also gave the suggestion on how to fix it:
+  :One possible solution to make this more robust would be to extend
+  :struct trace_event_fields with one more field that indicates the length
+  :of an array as an actual integer, without storing it in its stringified
+  :form in the type, and do the formatting in f_show where it belongs.
+
+The result as follows after this change,
+$ cat /sys/kernel/tracing/events/task/task_newtask/format
+        field:char comm[16];    offset:12;      size:16;        signed:0;
+
+Link: https://lore.kernel.org/lkml/Y+QaZtz55LIirsUO@google.com/
+Link: https://lore.kernel.org/linux-trace-kernel/20230210155921.4610-1-laoar.shao@gmail.com/
+Link: https://lore.kernel.org/linux-trace-kernel/20230212151303.12353-1-laoar.shao@gmail.com
+
+Cc: stable@vger.kernel.org
+Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
+Cc: Kajetan Puchalski <kajetan.puchalski@arm.com>
+CC: Qais Yousef <qyousef@layalina.io>
+Fixes: 3087c61ed2c4 ("tools/testing/selftests/bpf: replace open-coded 16 with TASK_COMM_LEN")
+Reported-by: John Stultz <jstultz@google.com>
+Debugged-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Suggested-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Suggested-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/trace_events.h               |  1 +
+ include/trace/stages/stage4_event_fields.h |  3 +-
+ kernel/trace/trace.h                       |  1 +
+ kernel/trace/trace_events.c                | 39 +++++++++++++++++-----
+ kernel/trace/trace_export.c                |  3 +-
+ 5 files changed, 36 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 4342e996bcdb..0e373222a6df 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -270,6 +270,7 @@ struct trace_event_fields {
+                       const int  align;
+                       const int  is_signed;
+                       const int  filter_type;
++                      const int  len;
+               };
+               int (*define_fields)(struct trace_event_call *);
+       };
+diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h
+index affd541fd25e..b6f679ae21aa 100644
+--- a/include/trace/stages/stage4_event_fields.h
++++ b/include/trace/stages/stage4_event_fields.h
+@@ -26,7 +26,8 @@
+ #define __array(_type, _item, _len) {                                 \
+       .type = #_type"["__stringify(_len)"]", .name = #_item,          \
+       .size = sizeof(_type[_len]), .align = ALIGN_STRUCTFIELD(_type), \
+-      .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
++      .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER,\
++      .len = _len },
+ #undef __dynamic_array
+ #define __dynamic_array(_type, _item, _len) {                         \
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 4eb6d6b97a9f..085a31b978a5 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1282,6 +1282,7 @@ struct ftrace_event_field {
+       int                     offset;
+       int                     size;
+       int                     is_signed;
++      int                     len;
+ };
+ struct prog_entry;
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 33e0b4f8ebe6..6a4696719297 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -114,7 +114,7 @@ trace_find_event_field(struct trace_event_call *call, char *name)
+ static int __trace_define_field(struct list_head *head, const char *type,
+                               const char *name, int offset, int size,
+-                              int is_signed, int filter_type)
++                              int is_signed, int filter_type, int len)
+ {
+       struct ftrace_event_field *field;
+@@ -133,6 +133,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
+       field->offset = offset;
+       field->size = size;
+       field->is_signed = is_signed;
++      field->len = len;
+       list_add(&field->link, head);
+@@ -150,14 +151,28 @@ int trace_define_field(struct trace_event_call *call, const char *type,
+       head = trace_get_fields(call);
+       return __trace_define_field(head, type, name, offset, size,
+-                                  is_signed, filter_type);
++                                  is_signed, filter_type, 0);
+ }
+ EXPORT_SYMBOL_GPL(trace_define_field);
++int trace_define_field_ext(struct trace_event_call *call, const char *type,
++                     const char *name, int offset, int size, int is_signed,
++                     int filter_type, int len)
++{
++      struct list_head *head;
++
++      if (WARN_ON(!call->class))
++              return 0;
++
++      head = trace_get_fields(call);
++      return __trace_define_field(head, type, name, offset, size,
++                                  is_signed, filter_type, len);
++}
++
+ #define __generic_field(type, item, filter_type)                      \
+       ret = __trace_define_field(&ftrace_generic_fields, #type,       \
+                                  #item, 0, 0, is_signed_type(type),   \
+-                                 filter_type);                        \
++                                 filter_type, 0);                     \
+       if (ret)                                                        \
+               return ret;
+@@ -166,7 +181,7 @@ EXPORT_SYMBOL_GPL(trace_define_field);
+                                  "common_" #item,                     \
+                                  offsetof(typeof(ent), item),         \
+                                  sizeof(ent.item),                    \
+-                                 is_signed_type(type), FILTER_OTHER); \
++                                 is_signed_type(type), FILTER_OTHER, 0);      \
+       if (ret)                                                        \
+               return ret;
+@@ -1588,12 +1603,17 @@ static int f_show(struct seq_file *m, void *v)
+               seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+                          field->type, field->name, field->offset,
+                          field->size, !!field->is_signed);
+-      else
+-              seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
++      else if (field->len)
++              seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
+                          (int)(array_descriptor - field->type),
+                          field->type, field->name,
+-                         array_descriptor, field->offset,
++                         field->len, field->offset,
+                          field->size, !!field->is_signed);
++      else
++              seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n",
++                              (int)(array_descriptor - field->type),
++                              field->type, field->name,
++                              field->offset, field->size, !!field->is_signed);
+       return 0;
+ }
+@@ -2379,9 +2399,10 @@ event_define_fields(struct trace_event_call *call)
+                       }
+                       offset = ALIGN(offset, field->align);
+-                      ret = trace_define_field(call, field->type, field->name,
++                      ret = trace_define_field_ext(call, field->type, field->name,
+                                                offset, field->size,
+-                                               field->is_signed, field->filter_type);
++                                               field->is_signed, field->filter_type,
++                                               field->len);
+                       if (WARN_ON_ONCE(ret)) {
+                               pr_err("error code is %d\n", ret);
+                               break;
+diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
+index d960f6b11b5e..58f3946081e2 100644
+--- a/kernel/trace/trace_export.c
++++ b/kernel/trace/trace_export.c
+@@ -111,7 +111,8 @@ static void __always_unused ____ftrace_check_##name(void)          \
+ #define __array(_type, _item, _len) {                                 \
+       .type = #_type"["__stringify(_len)"]", .name = #_item,          \
+       .size = sizeof(_type[_len]), .align = __alignof__(_type),       \
+-      is_signed_type(_type), .filter_type = FILTER_OTHER },
++      is_signed_type(_type), .filter_type = FILTER_OTHER,                     \
++      .len = _len },
+ #undef __array_desc
+ #define __array_desc(_type, _container, _item, _len) __array(_type, _item, _len)
+-- 
+2.39.1
+
diff --git a/queue-6.1/usb-core-add-quirk-for-alcor-link-ak9563-smartcard-reader.patch b/queue-6.1/usb-core-add-quirk-for-alcor-link-ak9563-smartcard-reader.patch
new file mode 100644 (file)
index 0000000..edf6027
--- /dev/null
@@ -0,0 +1,38 @@
+From 303e724d7b1e1a0a93daf0b1ab5f7c4f53543b34 Mon Sep 17 00:00:00 2001
+From: Mark Pearson <mpearson-lenovo@squebb.ca>
+Date: Wed, 8 Feb 2023 13:12:23 -0500
+Subject: usb: core: add quirk for Alcor Link AK9563 smartcard reader
+
+From: Mark Pearson <mpearson-lenovo@squebb.ca>
+
+commit 303e724d7b1e1a0a93daf0b1ab5f7c4f53543b34 upstream.
+
+The Alcor Link AK9563 smartcard reader used on some Lenovo platforms
+doesn't work. If LPM is enabled the reader will provide an invalid
+usb config descriptor. Added quirk to disable LPM.
+
+Verified fix on Lenovo P16 G1 and T14 G3
+
+Tested-by: Miroslav Zatko <mzatko@mirexoft.com>
+Tested-by: Dennis Wassenberg <dennis.wassenberg@secunet.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Dennis Wassenberg <dennis.wassenberg@secunet.com>
+Signed-off-by: Mark Pearson <mpearson-lenovo@squebb.ca>
+Link: https://lore.kernel.org/r/20230208181223.1092654-1-mpearson-lenovo@squebb.ca
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/core/quirks.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -526,6 +526,9 @@ static const struct usb_device_id usb_qu
+       /* DJI CineSSD */
+       { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
++      /* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */
++      { USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM },
++
+       /* DELL USB GEN2 */
+       { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
diff --git a/queue-6.1/usb-typec-altmodes-displayport-fix-probe-pin-assign-check.patch b/queue-6.1/usb-typec-altmodes-displayport-fix-probe-pin-assign-check.patch
new file mode 100644 (file)
index 0000000..bac7b2e
--- /dev/null
@@ -0,0 +1,53 @@
+From 54e5c00a4eb0a4c663445b245f641bbfab142430 Mon Sep 17 00:00:00 2001
+From: Prashant Malani <pmalani@chromium.org>
+Date: Wed, 8 Feb 2023 20:53:19 +0000
+Subject: usb: typec: altmodes/displayport: Fix probe pin assign check
+
+From: Prashant Malani <pmalani@chromium.org>
+
+commit 54e5c00a4eb0a4c663445b245f641bbfab142430 upstream.
+
+While checking Pin Assignments of the port and partner during probe, we
+don't take into account whether the peripheral is a plug or receptacle.
+
+This manifests itself in a mode entry failure on certain docks and
+dongles with captive cables. For instance, the Startech.com Type-C to DP
+dongle (Model #CDP2DP) advertises its DP VDO as 0x405. This would fail
+the Pin Assignment compatibility check, despite it supporting
+Pin Assignment C as a UFP.
+
+Update the check to use the correct DP Pin Assign macros that
+take the peripheral's receptacle bit into account.
+
+Fixes: c1e5c2f0cb8a ("usb: typec: altmodes/displayport: correct pin assignment for UFP receptacles")
+Cc: stable@vger.kernel.org
+Reported-by: Diana Zigterman <dzigterman@chromium.org>
+Signed-off-by: Prashant Malani <pmalani@chromium.org>
+Link: https://lore.kernel.org/r/20230208205318.131385-1-pmalani@chromium.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/altmodes/displayport.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index 9a6860285fbe..50b24096eb7f 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -535,10 +535,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
+       /* FIXME: Port can only be DFP_U. */
+       /* Make sure we have compatiple pin configurations */
+-      if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) &
+-            DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) &&
+-          !(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) &
+-            DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo)))
++      if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
++            DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
++          !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
++            DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
+               return -ENODEV;
+       ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
+-- 
+2.39.1
+