]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.1
authorSasha Levin <sashal@kernel.org>
Wed, 15 Feb 2023 16:33:43 +0000 (11:33 -0500)
committerSasha Levin <sashal@kernel.org>
Wed, 15 Feb 2023 16:33:43 +0000 (11:33 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
48 files changed:
queue-6.1/alsa-hda-do-not-unset-preset-when-cleaning-up-codec.patch [new file with mode: 0644]
queue-6.1/alsa-usb-audio-add-fixed_rate-quirk-for-jbl-quantum6.patch [new file with mode: 0644]
queue-6.1/asoc-amd-yc-add-dmi-support-for-new-acer-emdoor-plat.patch [new file with mode: 0644]
queue-6.1/asoc-amd-yc-add-xiaomi-redmi-book-pro-15-2022-into-d.patch [new file with mode: 0644]
queue-6.1/asoc-cs42l56-fix-dt-probe.patch [new file with mode: 0644]
queue-6.1/asoc-intel-sof_cs42l42-always-set-dpcm_capture-for-a.patch [new file with mode: 0644]
queue-6.1/asoc-intel-sof_nau8825-always-set-dpcm_capture-for-a.patch [new file with mode: 0644]
queue-6.1/asoc-intel-sof_rt5682-always-set-dpcm_capture-for-am.patch [new file with mode: 0644]
queue-6.1/asoc-intel-sof_ssp_amp-always-set-dpcm_capture-for-a.patch [new file with mode: 0644]
queue-6.1/asoc-sof-sof-audio-start-with-the-right-widget-type.patch [new file with mode: 0644]
queue-6.1/bpf-sockmap-don-t-let-sock_map_-close-destroy-unhash.patch [new file with mode: 0644]
queue-6.1/btrfs-lock-the-inode-in-shared-mode-before-starting-.patch [new file with mode: 0644]
queue-6.1/btrfs-move-the-auto-defrag-code-to-defrag.c.patch [new file with mode: 0644]
queue-6.1/ceph-blocklist-the-kclient-when-receiving-corrupted-.patch [new file with mode: 0644]
queue-6.1/ceph-move-mount-state-enum-to-super.h.patch [new file with mode: 0644]
queue-6.1/dma-buf-add-unlocked-variant-of-vmapping-functions.patch [new file with mode: 0644]
queue-6.1/drm-amd-display-add-missing-brackets-in-calculation.patch [new file with mode: 0644]
queue-6.1/drm-amd-display-adjust-downscaling-limits-for-dcn314.patch [new file with mode: 0644]
queue-6.1/drm-amd-display-properly-handle-additional-cases-whe.patch [new file with mode: 0644]
queue-6.1/drm-amd-display-reset-dmub-mailbox-sw-state-after-hw.patch [new file with mode: 0644]
queue-6.1/drm-amd-display-unassign-does_plane_fit_in_mall-func.patch [new file with mode: 0644]
queue-6.1/drm-amdgpu-enable-hdp-sd-for-gfx-11.0.3.patch [new file with mode: 0644]
queue-6.1/drm-amdgpu-enable-vclk-dclk-node-for-gc11.0.3.patch [new file with mode: 0644]
queue-6.1/drm-client-fix-circular-reference-counting-issue.patch [new file with mode: 0644]
queue-6.1/drm-client-prevent-null-dereference-in-drm_client_bu.patch [new file with mode: 0644]
queue-6.1/drm-client-switch-drm_client_buffer_delete-to-unlock.patch [new file with mode: 0644]
queue-6.1/drm-gem-take-reservation-lock-for-vmap-vunmap-operat.patch [new file with mode: 0644]
queue-6.1/drm-nouveau-devinit-tu102-wait-for-gfw_boot_progress.patch [new file with mode: 0644]
queue-6.1/fscache-use-clear_and_wake_up_bit-in-fscache_create_.patch [new file with mode: 0644]
queue-6.1/mptcp-deduplicate-error-paths-on-endpoint-creation.patch [new file with mode: 0644]
queue-6.1/mptcp-fix-locking-for-in-kernel-listener-creation.patch [new file with mode: 0644]
queue-6.1/mptcp-fix-locking-for-setsockopt-corner-case.patch [new file with mode: 0644]
queue-6.1/mptcp-sockopt-make-tcp_fastopen_connect-generic.patch [new file with mode: 0644]
queue-6.1/net-ethernet-mtk_eth_soc-avoid-truncating-allocation.patch [new file with mode: 0644]
queue-6.1/net-rose-fix-to-not-accept-on-connected-socket.patch [new file with mode: 0644]
queue-6.1/net-sched-sch-bounds-check-priority.patch [new file with mode: 0644]
queue-6.1/net-stmmac-do-not-stop-rx_clk-in-rx-lpi-state-for-qc.patch [new file with mode: 0644]
queue-6.1/nvme-clear-the-request_queue-pointers-on-failure-in-.patch [new file with mode: 0644]
queue-6.1/nvme-clear-the-request_queue-pointers-on-failure-in-.patch-7477 [new file with mode: 0644]
queue-6.1/nvme-fc-fix-a-missing-queue-put-in-nvmet_fc_ls_creat.patch [new file with mode: 0644]
queue-6.1/platform-x86-touchscreen_dmi-add-chuwi-vi8-cwi501-dm.patch [new file with mode: 0644]
queue-6.1/powerpc-64-fix-perf-profiling-asynchronous-interrupt.patch [new file with mode: 0644]
queue-6.1/s390-decompressor-specify-__decompress-buf-len-to-av.patch [new file with mode: 0644]
queue-6.1/selftest-net-improve-ipv6_tclass-ipv6_hoplimit-tests.patch [new file with mode: 0644]
queue-6.1/selftests-bpf-verify-copy_register_state-preserves-p.patch [new file with mode: 0644]
queue-6.1/series [new file with mode: 0644]
queue-6.1/tools-virtio-fix-the-vringh-test-for-virtio-ring-cha.patch [new file with mode: 0644]
queue-6.1/vdpa-ifcvf-do-proper-cleanup-if-ifcvf-init-fails.patch [new file with mode: 0644]

diff --git a/queue-6.1/alsa-hda-do-not-unset-preset-when-cleaning-up-codec.patch b/queue-6.1/alsa-hda-do-not-unset-preset-when-cleaning-up-codec.patch
new file mode 100644 (file)
index 0000000..67d78fa
--- /dev/null
@@ -0,0 +1,71 @@
+From cd6c9f969c741ca52de7cb7dbad87bb995626bf9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jan 2023 15:32:35 +0100
+Subject: ALSA: hda: Do not unset preset when cleaning up codec
+
+From: Cezary Rojewski <cezary.rojewski@intel.com>
+
+[ Upstream commit 87978e6ad45a16835cc58234451111091be3c59a ]
+
+Several functions that take part in codec's initialization and removal
+are re-used by ASoC codec drivers implementations. Drivers mimic the
+behavior of hda_codec_driver_probe/remove() found in
+sound/pci/hda/hda_bind.c with their component->probe/remove() instead.
+
+One of the reasons for that is the expectation of
+snd_hda_codec_device_new() to receive a valid pointer to an instance of
+struct snd_card. This expectation can be met only once sound card
+components probing commences.
+
+As ASoC sound card may be unbound without codec device being actually
+removed from the system, unsetting ->preset in
+snd_hda_codec_cleanup_for_unbind() interferes with module unload -> load
+scenario causing null-ptr-deref. Preset is assigned only once, during
+device/driver matching whereas ASoC codec driver's module reloading may
+occur several times throughout the lifetime of an audio stack.
+
+Suggested-by: Takashi Iwai <tiwai@suse.com>
+Signed-off-by: Cezary Rojewski <cezary.rojewski@intel.com>
+Link: https://lore.kernel.org/r/20230119143235.1159814-1-cezary.rojewski@intel.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/hda_bind.c  | 2 ++
+ sound/pci/hda/hda_codec.c | 1 -
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
+index 1a868dd9dc4b6..890c2f7c33fc2 100644
+--- a/sound/pci/hda/hda_bind.c
++++ b/sound/pci/hda/hda_bind.c
+@@ -144,6 +144,7 @@ static int hda_codec_driver_probe(struct device *dev)
+  error:
+       snd_hda_codec_cleanup_for_unbind(codec);
++      codec->preset = NULL;
+       return err;
+ }
+@@ -166,6 +167,7 @@ static int hda_codec_driver_remove(struct device *dev)
+       if (codec->patch_ops.free)
+               codec->patch_ops.free(codec);
+       snd_hda_codec_cleanup_for_unbind(codec);
++      codec->preset = NULL;
+       module_put(dev->driver->owner);
+       return 0;
+ }
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index edd653ece70d7..ac1cc7c5290e3 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -795,7 +795,6 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
+       snd_array_free(&codec->cvt_setups);
+       snd_array_free(&codec->spdif_out);
+       snd_array_free(&codec->verbs);
+-      codec->preset = NULL;
+       codec->follower_dig_outs = NULL;
+       codec->spdif_status_reset = 0;
+       snd_array_free(&codec->mixers);
+-- 
+2.39.0
+
diff --git a/queue-6.1/alsa-usb-audio-add-fixed_rate-quirk-for-jbl-quantum6.patch b/queue-6.1/alsa-usb-audio-add-fixed_rate-quirk-for-jbl-quantum6.patch
new file mode 100644 (file)
index 0000000..732e17f
--- /dev/null
@@ -0,0 +1,36 @@
+From a8c899920e0aaa23872b46fac2ca5f5976afce41 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Jan 2023 17:59:47 +0100
+Subject: ALSA: usb-audio: Add FIXED_RATE quirk for JBL Quantum610 Wireless
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit dfd5fe19db7dc7006642f8109ee8965e5d031897 ]
+
+JBL Quantum610 Wireless (0ecb:205c) requires the same workaround that
+was used for JBL Quantum810 for limiting the sample rate.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=216798
+Link: https://lore.kernel.org/r/20230118165947.22317-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/quirks.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 3d13fdf7590cd..3ecd1ba7fd4b1 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2152,6 +2152,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+                  QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+       DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
+                  QUIRK_FLAG_IFACE_SKIP_CLOSE),
++      DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
++                 QUIRK_FLAG_FIXED_RATE),
+       DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+                  QUIRK_FLAG_FIXED_RATE),
+-- 
+2.39.0
+
diff --git a/queue-6.1/asoc-amd-yc-add-dmi-support-for-new-acer-emdoor-plat.patch b/queue-6.1/asoc-amd-yc-add-dmi-support-for-new-acer-emdoor-plat.patch
new file mode 100644 (file)
index 0000000..782194e
--- /dev/null
@@ -0,0 +1,48 @@
+From 6b0eb93fc1650498ee237a7e712e75b4dd189423 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Jan 2023 15:51:23 +0530
+Subject: ASoC: amd: yc: Add DMI support for new acer/emdoor platforms
+
+From: Syed Saba Kareem <Syed.SabaKareem@amd.com>
+
+[ Upstream commit 7fd26a27680aa9032920f798a5a8b38a2c61075f ]
+
+Adding DMI entries to support new acer/emdoor platforms.
+
+Suggested-by: shanshengwang <shansheng.wang@amd.com>
+Signed-off-by: Syed Saba Kareem <Syed.SabaKareem@amd.com>
+Link: https://lore.kernel.org/r/20230111102130.2276391-1-Syed.SabaKareem@amd.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/amd/yc/acp6x-mach.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 0d283e41f66dc..00fb976e0b81e 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -234,6 +234,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Blade 14 (2022) - RZ09-0427"),
+               }
+       },
++      {
++              .driver_data = &acp6x_card,
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_VENDOR, "RB"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Swift SFA16-41"),
++              }
++      },
++      {
++              .driver_data = &acp6x_card,
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_VENDOR, "IRBIS"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "15NBC1011"),
++              }
++      },
+       {}
+ };
+-- 
+2.39.0
+
diff --git a/queue-6.1/asoc-amd-yc-add-xiaomi-redmi-book-pro-15-2022-into-d.patch b/queue-6.1/asoc-amd-yc-add-xiaomi-redmi-book-pro-15-2022-into-d.patch
new file mode 100644 (file)
index 0000000..23adba3
--- /dev/null
@@ -0,0 +1,42 @@
+From d037810060a36b5217afe9be50ccf6f9bc79e6d4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 22 Jan 2023 01:51:06 +0800
+Subject: ASoC: amd: yc: Add Xiaomi Redmi Book Pro 15 2022 into DMI table
+
+From: fengwk <fengwk94@gmail.com>
+
+[ Upstream commit dcff8b7ca92d724bdaf474a3fa37a7748377813a ]
+
+This model requires an additional detection quirk to enable the
+internal microphone - BIOS doesn't seem to support AcpDmicConnected
+(nothing in acpidump output).
+
+Signed-off-by: fengwk <fengwk94@gmail.com>
+Link: https://lore.kernel.org/r/Y8wmCutc74j/tyHP@arch
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/amd/yc/acp6x-mach.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 00fb976e0b81e..36314753923b8 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -227,6 +227,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"),
+               }
+       },
++      {
++              .driver_data = &acp6x_card,
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_VENDOR, "TIMI"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 15 2022"),
++              }
++      },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+-- 
+2.39.0
+
diff --git a/queue-6.1/asoc-cs42l56-fix-dt-probe.patch b/queue-6.1/asoc-cs42l56-fix-dt-probe.patch
new file mode 100644 (file)
index 0000000..9b0b812
--- /dev/null
@@ -0,0 +1,56 @@
+From 20e88054b5dab495960c367cbe730b7a3e2de1ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Jan 2023 17:21:24 +0100
+Subject: ASoC: cs42l56: fix DT probe
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit e18c6da62edc780e4f4f3c9ce07bdacd69505182 ]
+
+While looking through legacy platform data users, I noticed that
+the DT probing never uses data from the DT properties, as the
+platform_data structure gets overwritten directly after it
+is initialized.
+
+There have never been any boards defining the platform_data in
+the mainline kernel either, so this driver so far only worked
+with patched kernels or with the default values.
+
+For the benefit of possible downstream users, fix the DT probe
+by no longer overwriting the data.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Link: https://lore.kernel.org/r/20230126162203.2986339-1-arnd@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/cs42l56.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
+index 26066682c983e..3b0e715549c9c 100644
+--- a/sound/soc/codecs/cs42l56.c
++++ b/sound/soc/codecs/cs42l56.c
+@@ -1191,18 +1191,12 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client)
+       if (pdata) {
+               cs42l56->pdata = *pdata;
+       } else {
+-              pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+-                                   GFP_KERNEL);
+-              if (!pdata)
+-                      return -ENOMEM;
+-
+               if (i2c_client->dev.of_node) {
+                       ret = cs42l56_handle_of_data(i2c_client,
+                                                    &cs42l56->pdata);
+                       if (ret != 0)
+                               return ret;
+               }
+-              cs42l56->pdata = *pdata;
+       }
+       if (cs42l56->pdata.gpio_nreset) {
+-- 
+2.39.0
+
diff --git a/queue-6.1/asoc-intel-sof_cs42l42-always-set-dpcm_capture-for-a.patch b/queue-6.1/asoc-intel-sof_cs42l42-always-set-dpcm_capture-for-a.patch
new file mode 100644 (file)
index 0000000..c2bcd31
--- /dev/null
@@ -0,0 +1,49 @@
+From 55a553186c127efcd858977dce85360e7cf6a203 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jan 2023 18:34:57 +0200
+Subject: ASoC: Intel: sof_cs42l42: always set dpcm_capture for amplifiers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+
+[ Upstream commit e0a52220344ab7defe25b9cdd58fe1dc1122e67c ]
+
+The amplifier may provide hardware support for I/V feedback, or
+alternatively the firmware may generate an echo reference attached to
+the SSP and dailink used for the amplifier.
+
+To avoid any issues with invalid/NULL substreams in the latter case,
+always unconditionally set dpcm_capture.
+
+Link: https://github.com/thesofproject/linux/issues/4083
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Link: https://lore.kernel.org/r/20230119163459.2235843-3-kai.vehmanen@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/intel/boards/sof_cs42l42.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/sound/soc/intel/boards/sof_cs42l42.c b/sound/soc/intel/boards/sof_cs42l42.c
+index e38bd2831e6ac..e9d190cb13b0a 100644
+--- a/sound/soc/intel/boards/sof_cs42l42.c
++++ b/sound/soc/intel/boards/sof_cs42l42.c
+@@ -336,6 +336,9 @@ static int create_spk_amp_dai_links(struct device *dev,
+       links[*id].platforms = platform_component;
+       links[*id].num_platforms = ARRAY_SIZE(platform_component);
+       links[*id].dpcm_playback = 1;
++      /* firmware-generated echo reference */
++      links[*id].dpcm_capture = 1;
++
+       links[*id].no_pcm = 1;
+       links[*id].cpus = &cpus[*id];
+       links[*id].num_cpus = 1;
+-- 
+2.39.0
+
diff --git a/queue-6.1/asoc-intel-sof_nau8825-always-set-dpcm_capture-for-a.patch b/queue-6.1/asoc-intel-sof_nau8825-always-set-dpcm_capture-for-a.patch
new file mode 100644 (file)
index 0000000..c18e5c0
--- /dev/null
@@ -0,0 +1,58 @@
+From 8a531cca8a9e899be8b488702679040233fa5752 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jan 2023 18:34:58 +0200
+Subject: ASoC: Intel: sof_nau8825: always set dpcm_capture for amplifiers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+
+[ Upstream commit 36a71a0eb7cdb5ccf4b0214dbd41ab00dff18c7f ]
+
+The amplifier may provide hardware support for I/V feedback, or
+alternatively the firmware may generate an echo reference attached to
+the SSP and dailink used for the amplifier.
+
+To avoid any issues with invalid/NULL substreams in the latter case,
+always unconditionally set dpcm_capture.
+
+Link: https://github.com/thesofproject/linux/issues/4083
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Link: https://lore.kernel.org/r/20230119163459.2235843-4-kai.vehmanen@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/intel/boards/sof_nau8825.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/intel/boards/sof_nau8825.c b/sound/soc/intel/boards/sof_nau8825.c
+index 009a41fbefa10..0c723d4d2d63b 100644
+--- a/sound/soc/intel/boards/sof_nau8825.c
++++ b/sound/soc/intel/boards/sof_nau8825.c
+@@ -479,8 +479,6 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+                       links[id].num_codecs = ARRAY_SIZE(max_98373_components);
+                       links[id].init = max_98373_spk_codec_init;
+                       links[id].ops = &max_98373_ops;
+-                      /* feedback stream */
+-                      links[id].dpcm_capture = 1;
+               } else if (sof_nau8825_quirk &
+                               SOF_MAX98360A_SPEAKER_AMP_PRESENT) {
+                       max_98360a_dai_link(&links[id]);
+@@ -493,6 +491,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+               links[id].platforms = platform_component;
+               links[id].num_platforms = ARRAY_SIZE(platform_component);
+               links[id].dpcm_playback = 1;
++              /* feedback stream or firmware-generated echo reference */
++              links[id].dpcm_capture = 1;
++
+               links[id].no_pcm = 1;
+               links[id].cpus = &cpus[id];
+               links[id].num_cpus = 1;
+-- 
+2.39.0
+
diff --git a/queue-6.1/asoc-intel-sof_rt5682-always-set-dpcm_capture-for-am.patch b/queue-6.1/asoc-intel-sof_rt5682-always-set-dpcm_capture-for-am.patch
new file mode 100644 (file)
index 0000000..9a19160
--- /dev/null
@@ -0,0 +1,58 @@
+From 58d77f1eda3dae82d06c6e43ffb871e9a8d3be5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jan 2023 18:34:56 +0200
+Subject: ASoC: Intel: sof_rt5682: always set dpcm_capture for amplifiers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+
+[ Upstream commit 324f065cdbaba1b879a63bf07e61ca156b789537 ]
+
+The amplifier may provide hardware support for I/V feedback, or
+alternatively the firmware may generate an echo reference attached to
+the SSP and dailink used for the amplifier.
+
+To avoid any issues with invalid/NULL substreams in the latter case,
+always unconditionally set dpcm_capture.
+
+Link: https://github.com/thesofproject/linux/issues/4083
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Link: https://lore.kernel.org/r/20230119163459.2235843-2-kai.vehmanen@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/intel/boards/sof_rt5682.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
+index 2358be208c1fd..59c58ef932e4d 100644
+--- a/sound/soc/intel/boards/sof_rt5682.c
++++ b/sound/soc/intel/boards/sof_rt5682.c
+@@ -761,8 +761,6 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+                       links[id].num_codecs = ARRAY_SIZE(max_98373_components);
+                       links[id].init = max_98373_spk_codec_init;
+                       links[id].ops = &max_98373_ops;
+-                      /* feedback stream */
+-                      links[id].dpcm_capture = 1;
+               } else if (sof_rt5682_quirk &
+                               SOF_MAX98360A_SPEAKER_AMP_PRESENT) {
+                       max_98360a_dai_link(&links[id]);
+@@ -789,6 +787,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+               links[id].platforms = platform_component;
+               links[id].num_platforms = ARRAY_SIZE(platform_component);
+               links[id].dpcm_playback = 1;
++              /* feedback stream or firmware-generated echo reference */
++              links[id].dpcm_capture = 1;
++
+               links[id].no_pcm = 1;
+               links[id].cpus = &cpus[id];
+               links[id].num_cpus = 1;
+-- 
+2.39.0
+
diff --git a/queue-6.1/asoc-intel-sof_ssp_amp-always-set-dpcm_capture-for-a.patch b/queue-6.1/asoc-intel-sof_ssp_amp-always-set-dpcm_capture-for-a.patch
new file mode 100644 (file)
index 0000000..e4e89dd
--- /dev/null
@@ -0,0 +1,55 @@
+From dca1fbf486e9ab0df58d3646d84b9008db7d0c7f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jan 2023 18:34:59 +0200
+Subject: ASoC: Intel: sof_ssp_amp: always set dpcm_capture for amplifiers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+
+[ Upstream commit b3c00316a2f847791bae395ea6dd91aa7a221471 ]
+
+The amplifier may provide hardware support for I/V feedback, or
+alternatively the firmware may generate an echo reference attached to
+the SSP and dailink used for the amplifier.
+
+To avoid any issues with invalid/NULL substreams in the latter case,
+always unconditionally set dpcm_capture.
+
+Link: https://github.com/thesofproject/linux/issues/4083
+Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Link: https://lore.kernel.org/r/20230119163459.2235843-5-kai.vehmanen@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/intel/boards/sof_ssp_amp.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/sound/soc/intel/boards/sof_ssp_amp.c b/sound/soc/intel/boards/sof_ssp_amp.c
+index 94d25aeb6e7ce..7b74f122e3400 100644
+--- a/sound/soc/intel/boards/sof_ssp_amp.c
++++ b/sound/soc/intel/boards/sof_ssp_amp.c
+@@ -258,13 +258,12 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
+               sof_rt1308_dai_link(&links[id]);
+       } else if (sof_ssp_amp_quirk & SOF_CS35L41_SPEAKER_AMP_PRESENT) {
+               cs35l41_set_dai_link(&links[id]);
+-
+-              /* feedback from amplifier */
+-              links[id].dpcm_capture = 1;
+       }
+       links[id].platforms = platform_component;
+       links[id].num_platforms = ARRAY_SIZE(platform_component);
+       links[id].dpcm_playback = 1;
++      /* feedback from amplifier or firmware-generated echo reference */
++      links[id].dpcm_capture = 1;
+       links[id].no_pcm = 1;
+       links[id].cpus = &cpus[id];
+       links[id].num_cpus = 1;
+-- 
+2.39.0
+
diff --git a/queue-6.1/asoc-sof-sof-audio-start-with-the-right-widget-type.patch b/queue-6.1/asoc-sof-sof-audio-start-with-the-right-widget-type.patch
new file mode 100644 (file)
index 0000000..eb55768
--- /dev/null
@@ -0,0 +1,55 @@
+From 4e233e8a20d75b53377444802663e6f1270352b0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Jan 2023 14:35:34 +0200
+Subject: ASoC: SOF: sof-audio: start with the right widget type
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bard Liao <yung-chuan.liao@linux.intel.com>
+
+[ Upstream commit fcc4348adafe53928fda46d104c1798e5a4de4ff ]
+
+If there is a connection between a playback stream and a capture stream,
+all widgets that are connected to the playback stream and the capture
+stream will be in the list.
+So, we have to start with the exactly right widget type.
+snd_soc_dapm_aif_out is for capture stream and a playback stream should
+start with a snd_soc_dapm_aif_in widget.
+Contrarily, snd_soc_dapm_dai_in is for playback stream, and a capture
+stream should start with a snd_soc_dapm_dai_out widget.
+
+Signed-off-by: Bard Liao <yung-chuan.liao@linux.intel.com>
+Reviewed-by: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
+Reviewed-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Reviewed-by: Péter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@linux.intel.com>
+Link: https://lore.kernel.org/r/20230117123534.2075-1-peter.ujfalusi@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sof/sof-audio.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
+index 2df433c6ef55f..cf2c0db57d899 100644
+--- a/sound/soc/sof/sof-audio.c
++++ b/sound/soc/sof/sof-audio.c
+@@ -431,11 +431,11 @@ sof_walk_widgets_in_order(struct snd_sof_dev *sdev, struct snd_soc_dapm_widget_l
+       for_each_dapm_widgets(list, i, widget) {
+               /* starting widget for playback is AIF type */
+-              if (dir == SNDRV_PCM_STREAM_PLAYBACK && !WIDGET_IS_AIF(widget->id))
++              if (dir == SNDRV_PCM_STREAM_PLAYBACK && widget->id != snd_soc_dapm_aif_in)
+                       continue;
+               /* starting widget for capture is DAI type */
+-              if (dir == SNDRV_PCM_STREAM_CAPTURE && !WIDGET_IS_DAI(widget->id))
++              if (dir == SNDRV_PCM_STREAM_CAPTURE && widget->id != snd_soc_dapm_dai_out)
+                       continue;
+               switch (op) {
+-- 
+2.39.0
+
diff --git a/queue-6.1/bpf-sockmap-don-t-let-sock_map_-close-destroy-unhash.patch b/queue-6.1/bpf-sockmap-don-t-let-sock_map_-close-destroy-unhash.patch
new file mode 100644 (file)
index 0000000..38158ea
--- /dev/null
@@ -0,0 +1,117 @@
+From 31307ce4cbe16ab5587f9afcedd998b1cf8059e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Jan 2023 13:41:43 +0100
+Subject: bpf, sockmap: Don't let sock_map_{close,destroy,unhash} call itself
+
+From: Jakub Sitnicki <jakub@cloudflare.com>
+
+[ Upstream commit 5b4a79ba65a1ab479903fff2e604865d229b70a9 ]
+
+sock_map proto callbacks should never call themselves by design. Protect
+against bugs like [1] and break out of the recursive loop to avoid a stack
+overflow in favor of a resource leak.
+
+[1] https://lore.kernel.org/all/00000000000073b14905ef2e7401@google.com/
+
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/r/20230113-sockmap-fix-v2-1-1e0ee7ac2f90@cloudflare.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/sock_map.c | 61 +++++++++++++++++++++++++--------------------
+ 1 file changed, 34 insertions(+), 27 deletions(-)
+
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 22fa2c5bc6ec9..a68a7290a3b2b 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -1569,15 +1569,16 @@ void sock_map_unhash(struct sock *sk)
+       psock = sk_psock(sk);
+       if (unlikely(!psock)) {
+               rcu_read_unlock();
+-              if (sk->sk_prot->unhash)
+-                      sk->sk_prot->unhash(sk);
+-              return;
++              saved_unhash = READ_ONCE(sk->sk_prot)->unhash;
++      } else {
++              saved_unhash = psock->saved_unhash;
++              sock_map_remove_links(sk, psock);
++              rcu_read_unlock();
+       }
+-
+-      saved_unhash = psock->saved_unhash;
+-      sock_map_remove_links(sk, psock);
+-      rcu_read_unlock();
+-      saved_unhash(sk);
++      if (WARN_ON_ONCE(saved_unhash == sock_map_unhash))
++              return;
++      if (saved_unhash)
++              saved_unhash(sk);
+ }
+ EXPORT_SYMBOL_GPL(sock_map_unhash);
+@@ -1590,17 +1591,18 @@ void sock_map_destroy(struct sock *sk)
+       psock = sk_psock_get(sk);
+       if (unlikely(!psock)) {
+               rcu_read_unlock();
+-              if (sk->sk_prot->destroy)
+-                      sk->sk_prot->destroy(sk);
+-              return;
++              saved_destroy = READ_ONCE(sk->sk_prot)->destroy;
++      } else {
++              saved_destroy = psock->saved_destroy;
++              sock_map_remove_links(sk, psock);
++              rcu_read_unlock();
++              sk_psock_stop(psock);
++              sk_psock_put(sk, psock);
+       }
+-
+-      saved_destroy = psock->saved_destroy;
+-      sock_map_remove_links(sk, psock);
+-      rcu_read_unlock();
+-      sk_psock_stop(psock);
+-      sk_psock_put(sk, psock);
+-      saved_destroy(sk);
++      if (WARN_ON_ONCE(saved_destroy == sock_map_destroy))
++              return;
++      if (saved_destroy)
++              saved_destroy(sk);
+ }
+ EXPORT_SYMBOL_GPL(sock_map_destroy);
+@@ -1615,16 +1617,21 @@ void sock_map_close(struct sock *sk, long timeout)
+       if (unlikely(!psock)) {
+               rcu_read_unlock();
+               release_sock(sk);
+-              return sk->sk_prot->close(sk, timeout);
++              saved_close = READ_ONCE(sk->sk_prot)->close;
++      } else {
++              saved_close = psock->saved_close;
++              sock_map_remove_links(sk, psock);
++              rcu_read_unlock();
++              sk_psock_stop(psock);
++              release_sock(sk);
++              cancel_work_sync(&psock->work);
++              sk_psock_put(sk, psock);
+       }
+-
+-      saved_close = psock->saved_close;
+-      sock_map_remove_links(sk, psock);
+-      rcu_read_unlock();
+-      sk_psock_stop(psock);
+-      release_sock(sk);
+-      cancel_work_sync(&psock->work);
+-      sk_psock_put(sk, psock);
++      /* Make sure we do not recurse. This is a bug.
++       * Leak the socket instead of crashing on a stack overflow.
++       */
++      if (WARN_ON_ONCE(saved_close == sock_map_close))
++              return;
+       saved_close(sk, timeout);
+ }
+ EXPORT_SYMBOL_GPL(sock_map_close);
+-- 
+2.39.0
+
diff --git a/queue-6.1/btrfs-lock-the-inode-in-shared-mode-before-starting-.patch b/queue-6.1/btrfs-lock-the-inode-in-shared-mode-before-starting-.patch
new file mode 100644 (file)
index 0000000..e652b11
--- /dev/null
@@ -0,0 +1,178 @@
+From e794f0f956a05efe9ac9de1ee7108b1c20057102 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Jan 2023 16:54:46 +0000
+Subject: btrfs: lock the inode in shared mode before starting fiemap
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 519b7e13b5ae8dd38da1e52275705343be6bb508 ]
+
+Currently fiemap does not take the inode's lock (VFS lock), it only locks
+a file range in the inode's io tree. This however can lead to a deadlock
+if we have a concurrent fsync on the file and fiemap code triggers a fault
+when accessing the user space buffer with fiemap_fill_next_extent(). The
+deadlock happens on the inode's i_mmap_lock semaphore, which is taken both
+by fsync and btrfs_page_mkwrite(). This deadlock was recently reported by
+syzbot and triggers a trace like the following:
+
+   task:syz-executor361 state:D stack:20264 pid:5668  ppid:5119   flags:0x00004004
+   Call Trace:
+    <TASK>
+    context_switch kernel/sched/core.c:5293 [inline]
+    __schedule+0x995/0xe20 kernel/sched/core.c:6606
+    schedule+0xcb/0x190 kernel/sched/core.c:6682
+    wait_on_state fs/btrfs/extent-io-tree.c:707 [inline]
+    wait_extent_bit+0x577/0x6f0 fs/btrfs/extent-io-tree.c:751
+    lock_extent+0x1c2/0x280 fs/btrfs/extent-io-tree.c:1742
+    find_lock_delalloc_range+0x4e6/0x9c0 fs/btrfs/extent_io.c:488
+    writepage_delalloc+0x1ef/0x540 fs/btrfs/extent_io.c:1863
+    __extent_writepage+0x736/0x14e0 fs/btrfs/extent_io.c:2174
+    extent_write_cache_pages+0x983/0x1220 fs/btrfs/extent_io.c:3091
+    extent_writepages+0x219/0x540 fs/btrfs/extent_io.c:3211
+    do_writepages+0x3c3/0x680 mm/page-writeback.c:2581
+    filemap_fdatawrite_wbc+0x11e/0x170 mm/filemap.c:388
+    __filemap_fdatawrite_range mm/filemap.c:421 [inline]
+    filemap_fdatawrite_range+0x175/0x200 mm/filemap.c:439
+    btrfs_fdatawrite_range fs/btrfs/file.c:3850 [inline]
+    start_ordered_ops fs/btrfs/file.c:1737 [inline]
+    btrfs_sync_file+0x4ff/0x1190 fs/btrfs/file.c:1839
+    generic_write_sync include/linux/fs.h:2885 [inline]
+    btrfs_do_write_iter+0xcd3/0x1280 fs/btrfs/file.c:1684
+    call_write_iter include/linux/fs.h:2189 [inline]
+    new_sync_write fs/read_write.c:491 [inline]
+    vfs_write+0x7dc/0xc50 fs/read_write.c:584
+    ksys_write+0x177/0x2a0 fs/read_write.c:637
+    do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+    do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80
+    entry_SYSCALL_64_after_hwframe+0x63/0xcd
+   RIP: 0033:0x7f7d4054e9b9
+   RSP: 002b:00007f7d404fa2f8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+   RAX: ffffffffffffffda RBX: 00007f7d405d87a0 RCX: 00007f7d4054e9b9
+   RDX: 0000000000000090 RSI: 0000000020000000 RDI: 0000000000000006
+   RBP: 00007f7d405a51d0 R08: 0000000000000000 R09: 0000000000000000
+   R10: 0000000000000000 R11: 0000000000000246 R12: 61635f65646f6e69
+   R13: 65646f7475616f6e R14: 7261637369646f6e R15: 00007f7d405d87a8
+    </TASK>
+   INFO: task syz-executor361:5697 blocked for more than 145 seconds.
+         Not tainted 6.2.0-rc3-syzkaller-00376-g7c6984405241 #0
+   "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+   task:syz-executor361 state:D stack:21216 pid:5697  ppid:5119   flags:0x00004004
+   Call Trace:
+    <TASK>
+    context_switch kernel/sched/core.c:5293 [inline]
+    __schedule+0x995/0xe20 kernel/sched/core.c:6606
+    schedule+0xcb/0x190 kernel/sched/core.c:6682
+    rwsem_down_read_slowpath+0x5f9/0x930 kernel/locking/rwsem.c:1095
+    __down_read_common+0x54/0x2a0 kernel/locking/rwsem.c:1260
+    btrfs_page_mkwrite+0x417/0xc80 fs/btrfs/inode.c:8526
+    do_page_mkwrite+0x19e/0x5e0 mm/memory.c:2947
+    wp_page_shared+0x15e/0x380 mm/memory.c:3295
+    handle_pte_fault mm/memory.c:4949 [inline]
+    __handle_mm_fault mm/memory.c:5073 [inline]
+    handle_mm_fault+0x1b79/0x26b0 mm/memory.c:5219
+    do_user_addr_fault+0x69b/0xcb0 arch/x86/mm/fault.c:1428
+    handle_page_fault arch/x86/mm/fault.c:1519 [inline]
+    exc_page_fault+0x7a/0x110 arch/x86/mm/fault.c:1575
+    asm_exc_page_fault+0x22/0x30 arch/x86/include/asm/idtentry.h:570
+   RIP: 0010:copy_user_short_string+0xd/0x40 arch/x86/lib/copy_user_64.S:233
+   Code: 74 0a 89 (...)
+   RSP: 0018:ffffc9000570f330 EFLAGS: 00050202
+   RAX: ffffffff843e6601 RBX: 00007fffffffefc8 RCX: 0000000000000007
+   RDX: 0000000000000000 RSI: ffffc9000570f3e0 RDI: 0000000020000120
+   RBP: ffffc9000570f490 R08: 0000000000000000 R09: fffff52000ae1e83
+   R10: fffff52000ae1e83 R11: 1ffff92000ae1e7c R12: 0000000000000038
+   R13: ffffc9000570f3e0 R14: 0000000020000120 R15: ffffc9000570f3e0
+    copy_user_generic arch/x86/include/asm/uaccess_64.h:37 [inline]
+    raw_copy_to_user arch/x86/include/asm/uaccess_64.h:58 [inline]
+    _copy_to_user+0xe9/0x130 lib/usercopy.c:34
+    copy_to_user include/linux/uaccess.h:169 [inline]
+    fiemap_fill_next_extent+0x22e/0x410 fs/ioctl.c:144
+    emit_fiemap_extent+0x22d/0x3c0 fs/btrfs/extent_io.c:3458
+    fiemap_process_hole+0xa00/0xad0 fs/btrfs/extent_io.c:3716
+    extent_fiemap+0xe27/0x2100 fs/btrfs/extent_io.c:3922
+    btrfs_fiemap+0x172/0x1e0 fs/btrfs/inode.c:8209
+    ioctl_fiemap fs/ioctl.c:219 [inline]
+    do_vfs_ioctl+0x185b/0x2980 fs/ioctl.c:810
+    __do_sys_ioctl fs/ioctl.c:868 [inline]
+    __se_sys_ioctl+0x83/0x170 fs/ioctl.c:856
+    do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+    do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80
+    entry_SYSCALL_64_after_hwframe+0x63/0xcd
+   RIP: 0033:0x7f7d4054e9b9
+   RSP: 002b:00007f7d390d92f8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+   RAX: ffffffffffffffda RBX: 00007f7d405d87b0 RCX: 00007f7d4054e9b9
+   RDX: 0000000020000100 RSI: 00000000c020660b RDI: 0000000000000005
+   RBP: 00007f7d405a51d0 R08: 00007f7d390d9700 R09: 0000000000000000
+   R10: 00007f7d390d9700 R11: 0000000000000246 R12: 61635f65646f6e69
+   R13: 65646f7475616f6e R14: 7261637369646f6e R15: 00007f7d405d87b8
+    </TASK>
+
+What happens is the following:
+
+1) Task A is doing an fsync, enters btrfs_sync_file() and flushes delalloc
+   before locking the inode and the i_mmap_lock semaphore, that is, before
+   calling btrfs_inode_lock();
+
+2) After task A flushes delalloc and before it calls btrfs_inode_lock(),
+   another task dirties a page;
+
+3) Task B starts a fiemap without FIEMAP_FLAG_SYNC, so the page dirtied
+   at step 2 remains dirty and unflushed. Then when it enters
+   extent_fiemap() and it locks a file range that includes the range of
+   the page dirtied in step 2;
+
+4) Task A calls btrfs_inode_lock() and locks the inode (VFS lock) and the
+   inode's i_mmap_lock semaphore in write mode. Then it tries to flush
+   delalloc by calling start_ordered_ops(), which will block, at
+   find_lock_delalloc_range(), when trying to lock the range of the page
+   dirtied at step 2, since this range was locked by the fiemap task (at
+   step 3);
+
+5) Task B generates a page fault when accessing the user space fiemap
+   buffer with a call to fiemap_fill_next_extent().
+
+   The fault handler needs to call btrfs_page_mkwrite() for some other
+   page of our inode, and there we deadlock when trying to lock the
+   inode's i_mmap_lock semaphore in read mode, since the fsync task locked
+   it in write mode (step 4) and the fsync task can not progress because
+   it's waiting to lock a file range that is currently locked by us (the
+   fiemap task, step 3).
+
+Fix this by taking the inode's lock (VFS lock) in shared mode when
+entering fiemap. This effectively serializes fiemap with fsync (except the
+most expensive part of fsync, the log sync), preventing this deadlock.
+
+Reported-by: syzbot+cc35f55c41e34c30dcb5@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/linux-btrfs/00000000000032dc7305f2a66f46@google.com/
+CC: stable@vger.kernel.org # 6.1+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent_io.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index acb3c5c3b0251..58785dc7080ad 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3938,6 +3938,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+       lockend = round_up(start + len, root->fs_info->sectorsize);
+       prev_extent_end = lockstart;
++      btrfs_inode_lock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
+       lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+       ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
+@@ -4129,6 +4130,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ out_unlock:
+       unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
++      btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
+ out:
+       kfree(backref_cache);
+       btrfs_free_path(path);
+-- 
+2.39.0
+
diff --git a/queue-6.1/btrfs-move-the-auto-defrag-code-to-defrag.c.patch b/queue-6.1/btrfs-move-the-auto-defrag-code-to-defrag.c.patch
new file mode 100644 (file)
index 0000000..227afaf
--- /dev/null
@@ -0,0 +1,736 @@
+From 7b00e9ec454f5340ea5316ef3405af0be2d9e3f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Oct 2022 15:08:23 -0400
+Subject: btrfs: move the auto defrag code to defrag.c
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 6e3df18ba7e8e68015dd66bcab326a4b7aaed085 ]
+
+This currently exists in file.c, move it to the more natural location in
+defrag.c.
+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+[ reformat comments ]
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 519b7e13b5ae ("btrfs: lock the inode in shared mode before starting fiemap")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/file.c        | 340 -----------------------------------------
+ fs/btrfs/tree-defrag.c | 337 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 337 insertions(+), 340 deletions(-)
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 23056d9914d84..1bda59c683602 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -31,329 +31,6 @@
+ #include "reflink.h"
+ #include "subpage.h"
+-static struct kmem_cache *btrfs_inode_defrag_cachep;
+-/*
+- * when auto defrag is enabled we
+- * queue up these defrag structs to remember which
+- * inodes need defragging passes
+- */
+-struct inode_defrag {
+-      struct rb_node rb_node;
+-      /* objectid */
+-      u64 ino;
+-      /*
+-       * transid where the defrag was added, we search for
+-       * extents newer than this
+-       */
+-      u64 transid;
+-
+-      /* root objectid */
+-      u64 root;
+-
+-      /*
+-       * The extent size threshold for autodefrag.
+-       *
+-       * This value is different for compressed/non-compressed extents,
+-       * thus needs to be passed from higher layer.
+-       * (aka, inode_should_defrag())
+-       */
+-      u32 extent_thresh;
+-};
+-
+-static int __compare_inode_defrag(struct inode_defrag *defrag1,
+-                                struct inode_defrag *defrag2)
+-{
+-      if (defrag1->root > defrag2->root)
+-              return 1;
+-      else if (defrag1->root < defrag2->root)
+-              return -1;
+-      else if (defrag1->ino > defrag2->ino)
+-              return 1;
+-      else if (defrag1->ino < defrag2->ino)
+-              return -1;
+-      else
+-              return 0;
+-}
+-
+-/* pop a record for an inode into the defrag tree.  The lock
+- * must be held already
+- *
+- * If you're inserting a record for an older transid than an
+- * existing record, the transid already in the tree is lowered
+- *
+- * If an existing record is found the defrag item you
+- * pass in is freed
+- */
+-static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
+-                                  struct inode_defrag *defrag)
+-{
+-      struct btrfs_fs_info *fs_info = inode->root->fs_info;
+-      struct inode_defrag *entry;
+-      struct rb_node **p;
+-      struct rb_node *parent = NULL;
+-      int ret;
+-
+-      p = &fs_info->defrag_inodes.rb_node;
+-      while (*p) {
+-              parent = *p;
+-              entry = rb_entry(parent, struct inode_defrag, rb_node);
+-
+-              ret = __compare_inode_defrag(defrag, entry);
+-              if (ret < 0)
+-                      p = &parent->rb_left;
+-              else if (ret > 0)
+-                      p = &parent->rb_right;
+-              else {
+-                      /* if we're reinserting an entry for
+-                       * an old defrag run, make sure to
+-                       * lower the transid of our existing record
+-                       */
+-                      if (defrag->transid < entry->transid)
+-                              entry->transid = defrag->transid;
+-                      entry->extent_thresh = min(defrag->extent_thresh,
+-                                                 entry->extent_thresh);
+-                      return -EEXIST;
+-              }
+-      }
+-      set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
+-      rb_link_node(&defrag->rb_node, parent, p);
+-      rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
+-      return 0;
+-}
+-
+-static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
+-{
+-      if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
+-              return 0;
+-
+-      if (btrfs_fs_closing(fs_info))
+-              return 0;
+-
+-      return 1;
+-}
+-
+-/*
+- * insert a defrag record for this inode if auto defrag is
+- * enabled
+- */
+-int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
+-                         struct btrfs_inode *inode, u32 extent_thresh)
+-{
+-      struct btrfs_root *root = inode->root;
+-      struct btrfs_fs_info *fs_info = root->fs_info;
+-      struct inode_defrag *defrag;
+-      u64 transid;
+-      int ret;
+-
+-      if (!__need_auto_defrag(fs_info))
+-              return 0;
+-
+-      if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
+-              return 0;
+-
+-      if (trans)
+-              transid = trans->transid;
+-      else
+-              transid = inode->root->last_trans;
+-
+-      defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
+-      if (!defrag)
+-              return -ENOMEM;
+-
+-      defrag->ino = btrfs_ino(inode);
+-      defrag->transid = transid;
+-      defrag->root = root->root_key.objectid;
+-      defrag->extent_thresh = extent_thresh;
+-
+-      spin_lock(&fs_info->defrag_inodes_lock);
+-      if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
+-              /*
+-               * If we set IN_DEFRAG flag and evict the inode from memory,
+-               * and then re-read this inode, this new inode doesn't have
+-               * IN_DEFRAG flag. At the case, we may find the existed defrag.
+-               */
+-              ret = __btrfs_add_inode_defrag(inode, defrag);
+-              if (ret)
+-                      kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+-      } else {
+-              kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+-      }
+-      spin_unlock(&fs_info->defrag_inodes_lock);
+-      return 0;
+-}
+-
+-/*
+- * pick the defragable inode that we want, if it doesn't exist, we will get
+- * the next one.
+- */
+-static struct inode_defrag *
+-btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
+-{
+-      struct inode_defrag *entry = NULL;
+-      struct inode_defrag tmp;
+-      struct rb_node *p;
+-      struct rb_node *parent = NULL;
+-      int ret;
+-
+-      tmp.ino = ino;
+-      tmp.root = root;
+-
+-      spin_lock(&fs_info->defrag_inodes_lock);
+-      p = fs_info->defrag_inodes.rb_node;
+-      while (p) {
+-              parent = p;
+-              entry = rb_entry(parent, struct inode_defrag, rb_node);
+-
+-              ret = __compare_inode_defrag(&tmp, entry);
+-              if (ret < 0)
+-                      p = parent->rb_left;
+-              else if (ret > 0)
+-                      p = parent->rb_right;
+-              else
+-                      goto out;
+-      }
+-
+-      if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
+-              parent = rb_next(parent);
+-              if (parent)
+-                      entry = rb_entry(parent, struct inode_defrag, rb_node);
+-              else
+-                      entry = NULL;
+-      }
+-out:
+-      if (entry)
+-              rb_erase(parent, &fs_info->defrag_inodes);
+-      spin_unlock(&fs_info->defrag_inodes_lock);
+-      return entry;
+-}
+-
+-void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
+-{
+-      struct inode_defrag *defrag;
+-      struct rb_node *node;
+-
+-      spin_lock(&fs_info->defrag_inodes_lock);
+-      node = rb_first(&fs_info->defrag_inodes);
+-      while (node) {
+-              rb_erase(node, &fs_info->defrag_inodes);
+-              defrag = rb_entry(node, struct inode_defrag, rb_node);
+-              kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+-
+-              cond_resched_lock(&fs_info->defrag_inodes_lock);
+-
+-              node = rb_first(&fs_info->defrag_inodes);
+-      }
+-      spin_unlock(&fs_info->defrag_inodes_lock);
+-}
+-
+-#define BTRFS_DEFRAG_BATCH    1024
+-
+-static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
+-                                  struct inode_defrag *defrag)
+-{
+-      struct btrfs_root *inode_root;
+-      struct inode *inode;
+-      struct btrfs_ioctl_defrag_range_args range;
+-      int ret = 0;
+-      u64 cur = 0;
+-
+-again:
+-      if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
+-              goto cleanup;
+-      if (!__need_auto_defrag(fs_info))
+-              goto cleanup;
+-
+-      /* get the inode */
+-      inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
+-      if (IS_ERR(inode_root)) {
+-              ret = PTR_ERR(inode_root);
+-              goto cleanup;
+-      }
+-
+-      inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
+-      btrfs_put_root(inode_root);
+-      if (IS_ERR(inode)) {
+-              ret = PTR_ERR(inode);
+-              goto cleanup;
+-      }
+-
+-      if (cur >= i_size_read(inode)) {
+-              iput(inode);
+-              goto cleanup;
+-      }
+-
+-      /* do a chunk of defrag */
+-      clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
+-      memset(&range, 0, sizeof(range));
+-      range.len = (u64)-1;
+-      range.start = cur;
+-      range.extent_thresh = defrag->extent_thresh;
+-
+-      sb_start_write(fs_info->sb);
+-      ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
+-                                     BTRFS_DEFRAG_BATCH);
+-      sb_end_write(fs_info->sb);
+-      iput(inode);
+-
+-      if (ret < 0)
+-              goto cleanup;
+-
+-      cur = max(cur + fs_info->sectorsize, range.start);
+-      goto again;
+-
+-cleanup:
+-      kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+-      return ret;
+-}
+-
+-/*
+- * run through the list of inodes in the FS that need
+- * defragging
+- */
+-int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
+-{
+-      struct inode_defrag *defrag;
+-      u64 first_ino = 0;
+-      u64 root_objectid = 0;
+-
+-      atomic_inc(&fs_info->defrag_running);
+-      while (1) {
+-              /* Pause the auto defragger. */
+-              if (test_bit(BTRFS_FS_STATE_REMOUNTING,
+-                           &fs_info->fs_state))
+-                      break;
+-
+-              if (!__need_auto_defrag(fs_info))
+-                      break;
+-
+-              /* find an inode to defrag */
+-              defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
+-                                               first_ino);
+-              if (!defrag) {
+-                      if (root_objectid || first_ino) {
+-                              root_objectid = 0;
+-                              first_ino = 0;
+-                              continue;
+-                      } else {
+-                              break;
+-                      }
+-              }
+-
+-              first_ino = defrag->ino + 1;
+-              root_objectid = defrag->root;
+-
+-              __btrfs_run_defrag_inode(fs_info, defrag);
+-      }
+-      atomic_dec(&fs_info->defrag_running);
+-
+-      /*
+-       * during unmount, we use the transaction_wait queue to
+-       * wait for the defragger to stop
+-       */
+-      wake_up(&fs_info->transaction_wait);
+-      return 0;
+-}
+-
+ /* simple helper to fault in pages and copy.  This should go away
+  * and be replaced with calls into generic code.
+  */
+@@ -4130,23 +3807,6 @@ const struct file_operations btrfs_file_operations = {
+       .remap_file_range = btrfs_remap_file_range,
+ };
+-void __cold btrfs_auto_defrag_exit(void)
+-{
+-      kmem_cache_destroy(btrfs_inode_defrag_cachep);
+-}
+-
+-int __init btrfs_auto_defrag_init(void)
+-{
+-      btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
+-                                      sizeof(struct inode_defrag), 0,
+-                                      SLAB_MEM_SPREAD,
+-                                      NULL);
+-      if (!btrfs_inode_defrag_cachep)
+-              return -ENOMEM;
+-
+-      return 0;
+-}
+-
+ int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
+ {
+       int ret;
+diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
+index 072ab9a1374b5..0520d6d32a2db 100644
+--- a/fs/btrfs/tree-defrag.c
++++ b/fs/btrfs/tree-defrag.c
+@@ -10,6 +10,326 @@
+ #include "transaction.h"
+ #include "locking.h"
++static struct kmem_cache *btrfs_inode_defrag_cachep;
++
++/*
++ * When auto defrag is enabled we queue up these defrag structs to remember
++ * which inodes need defragging passes.
++ */
++struct inode_defrag {
++      struct rb_node rb_node;
++      /* Inode number */
++      u64 ino;
++      /*
++       * Transid where the defrag was added, we search for extents newer than
++       * this.
++       */
++      u64 transid;
++
++      /* Root objectid */
++      u64 root;
++
++      /*
++       * The extent size threshold for autodefrag.
++       *
++       * This value is different for compressed/non-compressed extents, thus
++       * needs to be passed from higher layer.
++       * (aka, inode_should_defrag())
++       */
++      u32 extent_thresh;
++};
++
++static int __compare_inode_defrag(struct inode_defrag *defrag1,
++                                struct inode_defrag *defrag2)
++{
++      if (defrag1->root > defrag2->root)
++              return 1;
++      else if (defrag1->root < defrag2->root)
++              return -1;
++      else if (defrag1->ino > defrag2->ino)
++              return 1;
++      else if (defrag1->ino < defrag2->ino)
++              return -1;
++      else
++              return 0;
++}
++
++/*
++ * Pop a record for an inode into the defrag tree.  The lock must be held
++ * already.
++ *
++ * If you're inserting a record for an older transid than an existing record,
++ * the transid already in the tree is lowered.
++ *
++ * If an existing record is found the defrag item you pass in is freed.
++ */
++static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
++                                  struct inode_defrag *defrag)
++{
++      struct btrfs_fs_info *fs_info = inode->root->fs_info;
++      struct inode_defrag *entry;
++      struct rb_node **p;
++      struct rb_node *parent = NULL;
++      int ret;
++
++      p = &fs_info->defrag_inodes.rb_node;
++      while (*p) {
++              parent = *p;
++              entry = rb_entry(parent, struct inode_defrag, rb_node);
++
++              ret = __compare_inode_defrag(defrag, entry);
++              if (ret < 0)
++                      p = &parent->rb_left;
++              else if (ret > 0)
++                      p = &parent->rb_right;
++              else {
++                      /*
++                       * If we're reinserting an entry for an old defrag run,
++                       * make sure to lower the transid of our existing
++                       * record.
++                       */
++                      if (defrag->transid < entry->transid)
++                              entry->transid = defrag->transid;
++                      entry->extent_thresh = min(defrag->extent_thresh,
++                                                 entry->extent_thresh);
++                      return -EEXIST;
++              }
++      }
++      set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
++      rb_link_node(&defrag->rb_node, parent, p);
++      rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
++      return 0;
++}
++
++static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
++{
++      if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
++              return 0;
++
++      if (btrfs_fs_closing(fs_info))
++              return 0;
++
++      return 1;
++}
++
++/*
++ * Insert a defrag record for this inode if auto defrag is enabled.
++ */
++int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
++                         struct btrfs_inode *inode, u32 extent_thresh)
++{
++      struct btrfs_root *root = inode->root;
++      struct btrfs_fs_info *fs_info = root->fs_info;
++      struct inode_defrag *defrag;
++      u64 transid;
++      int ret;
++
++      if (!__need_auto_defrag(fs_info))
++              return 0;
++
++      if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
++              return 0;
++
++      if (trans)
++              transid = trans->transid;
++      else
++              transid = inode->root->last_trans;
++
++      defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
++      if (!defrag)
++              return -ENOMEM;
++
++      defrag->ino = btrfs_ino(inode);
++      defrag->transid = transid;
++      defrag->root = root->root_key.objectid;
++      defrag->extent_thresh = extent_thresh;
++
++      spin_lock(&fs_info->defrag_inodes_lock);
++      if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
++              /*
++               * If we set IN_DEFRAG flag and evict the inode from memory,
++               * and then re-read this inode, this new inode doesn't have
++               * IN_DEFRAG flag. At the case, we may find the existed defrag.
++               */
++              ret = __btrfs_add_inode_defrag(inode, defrag);
++              if (ret)
++                      kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
++      } else {
++              kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
++      }
++      spin_unlock(&fs_info->defrag_inodes_lock);
++      return 0;
++}
++
++/*
++ * Pick the defragable inode that we want, if it doesn't exist, we will get the
++ * next one.
++ */
++static struct inode_defrag *btrfs_pick_defrag_inode(
++                      struct btrfs_fs_info *fs_info, u64 root, u64 ino)
++{
++      struct inode_defrag *entry = NULL;
++      struct inode_defrag tmp;
++      struct rb_node *p;
++      struct rb_node *parent = NULL;
++      int ret;
++
++      tmp.ino = ino;
++      tmp.root = root;
++
++      spin_lock(&fs_info->defrag_inodes_lock);
++      p = fs_info->defrag_inodes.rb_node;
++      while (p) {
++              parent = p;
++              entry = rb_entry(parent, struct inode_defrag, rb_node);
++
++              ret = __compare_inode_defrag(&tmp, entry);
++              if (ret < 0)
++                      p = parent->rb_left;
++              else if (ret > 0)
++                      p = parent->rb_right;
++              else
++                      goto out;
++      }
++
++      if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
++              parent = rb_next(parent);
++              if (parent)
++                      entry = rb_entry(parent, struct inode_defrag, rb_node);
++              else
++                      entry = NULL;
++      }
++out:
++      if (entry)
++              rb_erase(parent, &fs_info->defrag_inodes);
++      spin_unlock(&fs_info->defrag_inodes_lock);
++      return entry;
++}
++
++void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
++{
++      struct inode_defrag *defrag;
++      struct rb_node *node;
++
++      spin_lock(&fs_info->defrag_inodes_lock);
++      node = rb_first(&fs_info->defrag_inodes);
++      while (node) {
++              rb_erase(node, &fs_info->defrag_inodes);
++              defrag = rb_entry(node, struct inode_defrag, rb_node);
++              kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
++
++              cond_resched_lock(&fs_info->defrag_inodes_lock);
++
++              node = rb_first(&fs_info->defrag_inodes);
++      }
++      spin_unlock(&fs_info->defrag_inodes_lock);
++}
++
++#define BTRFS_DEFRAG_BATCH    1024
++
++static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
++                                  struct inode_defrag *defrag)
++{
++      struct btrfs_root *inode_root;
++      struct inode *inode;
++      struct btrfs_ioctl_defrag_range_args range;
++      int ret = 0;
++      u64 cur = 0;
++
++again:
++      if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
++              goto cleanup;
++      if (!__need_auto_defrag(fs_info))
++              goto cleanup;
++
++      /* Get the inode */
++      inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
++      if (IS_ERR(inode_root)) {
++              ret = PTR_ERR(inode_root);
++              goto cleanup;
++      }
++
++      inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
++      btrfs_put_root(inode_root);
++      if (IS_ERR(inode)) {
++              ret = PTR_ERR(inode);
++              goto cleanup;
++      }
++
++      if (cur >= i_size_read(inode)) {
++              iput(inode);
++              goto cleanup;
++      }
++
++      /* Do a chunk of defrag */
++      clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
++      memset(&range, 0, sizeof(range));
++      range.len = (u64)-1;
++      range.start = cur;
++      range.extent_thresh = defrag->extent_thresh;
++
++      sb_start_write(fs_info->sb);
++      ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
++                                     BTRFS_DEFRAG_BATCH);
++      sb_end_write(fs_info->sb);
++      iput(inode);
++
++      if (ret < 0)
++              goto cleanup;
++
++      cur = max(cur + fs_info->sectorsize, range.start);
++      goto again;
++
++cleanup:
++      kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
++      return ret;
++}
++
++/*
++ * Run through the list of inodes in the FS that need defragging.
++ */
++int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
++{
++      struct inode_defrag *defrag;
++      u64 first_ino = 0;
++      u64 root_objectid = 0;
++
++      atomic_inc(&fs_info->defrag_running);
++      while (1) {
++              /* Pause the auto defragger. */
++              if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
++                      break;
++
++              if (!__need_auto_defrag(fs_info))
++                      break;
++
++              /* find an inode to defrag */
++              defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, first_ino);
++              if (!defrag) {
++                      if (root_objectid || first_ino) {
++                              root_objectid = 0;
++                              first_ino = 0;
++                              continue;
++                      } else {
++                              break;
++                      }
++              }
++
++              first_ino = defrag->ino + 1;
++              root_objectid = defrag->root;
++
++              __btrfs_run_defrag_inode(fs_info, defrag);
++      }
++      atomic_dec(&fs_info->defrag_running);
++
++      /*
++       * During unmount, we use the transaction_wait queue to wait for the
++       * defragger to stop.
++       */
++      wake_up(&fs_info->transaction_wait);
++      return 0;
++}
++
+ /*
+  * Defrag all the leaves in a given btree.
+  * Read all the leaves and try to get key order to
+@@ -132,3 +452,20 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
+       return ret;
+ }
++
++void __cold btrfs_auto_defrag_exit(void)
++{
++      kmem_cache_destroy(btrfs_inode_defrag_cachep);
++}
++
++int __init btrfs_auto_defrag_init(void)
++{
++      btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
++                                      sizeof(struct inode_defrag), 0,
++                                      SLAB_MEM_SPREAD,
++                                      NULL);
++      if (!btrfs_inode_defrag_cachep)
++              return -ENOMEM;
++
++      return 0;
++}
+-- 
+2.39.0
+
diff --git a/queue-6.1/ceph-blocklist-the-kclient-when-receiving-corrupted-.patch b/queue-6.1/ceph-blocklist-the-kclient-when-receiving-corrupted-.patch
new file mode 100644 (file)
index 0000000..8f7aa1d
--- /dev/null
@@ -0,0 +1,337 @@
+From f6bcfaf97e6e9d877399f8ff33150badd468f19e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Feb 2023 09:36:45 +0800
+Subject: ceph: blocklist the kclient when receiving corrupted snap trace
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit a68e564adcaa69b0930809fb64d9d5f7d9c32ba9 ]
+
+When received corrupted snap trace we don't know what exactly has
+happened in MDS side. And we shouldn't continue IOs and metadatas
+access to MDS, which may corrupt or get incorrect contents.
+
+This patch will just block all the further IO/MDS requests
+immediately and then evict the kclient itself.
+
+The reason why we still need to evict the kclient just after
+blocking all the further IOs is that the MDS could revoke the caps
+faster.
+
+Link: https://tracker.ceph.com/issues/57686
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Venky Shankar <vshankar@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/addr.c       | 17 +++++++++++++++--
+ fs/ceph/caps.c       | 16 +++++++++++++---
+ fs/ceph/file.c       |  3 +++
+ fs/ceph/mds_client.c | 30 +++++++++++++++++++++++++++---
+ fs/ceph/snap.c       | 36 ++++++++++++++++++++++++++++++++++--
+ fs/ceph/super.h      |  1 +
+ 6 files changed, 93 insertions(+), 10 deletions(-)
+
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index 61f47debec5ac..478c03bfba663 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -305,7 +305,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
+       struct inode *inode = rreq->inode;
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+-      struct ceph_osd_request *req;
++      struct ceph_osd_request *req = NULL;
+       struct ceph_vino vino = ceph_vino(inode);
+       struct iov_iter iter;
+       struct page **pages;
+@@ -313,6 +313,11 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
+       int err = 0;
+       u64 len = subreq->len;
++      if (ceph_inode_is_shutdown(inode)) {
++              err = -EIO;
++              goto out;
++      }
++
+       if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
+               return;
+@@ -563,6 +568,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
+       dout("writepage %p idx %lu\n", page, page->index);
++      if (ceph_inode_is_shutdown(inode))
++              return -EIO;
++
+       /* verify this is a writeable snap context */
+       snapc = page_snap_context(page);
+       if (!snapc) {
+@@ -1643,7 +1651,7 @@ int ceph_uninline_data(struct file *file)
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_osd_request *req = NULL;
+-      struct ceph_cap_flush *prealloc_cf;
++      struct ceph_cap_flush *prealloc_cf = NULL;
+       struct folio *folio = NULL;
+       u64 inline_version = CEPH_INLINE_NONE;
+       struct page *pages[1];
+@@ -1657,6 +1665,11 @@ int ceph_uninline_data(struct file *file)
+       dout("uninline_data %p %llx.%llx inline_version %llu\n",
+            inode, ceph_vinop(inode), inline_version);
++      if (ceph_inode_is_shutdown(inode)) {
++              err = -EIO;
++              goto out;
++      }
++
+       if (inline_version == CEPH_INLINE_NONE)
+               return 0;
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index cd69bf267d1b1..795fd6d84bde0 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -4081,6 +4081,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+       void *p, *end;
+       struct cap_extra_info extra_info = {};
+       bool queue_trunc;
++      bool close_sessions = false;
+       dout("handle_caps from mds%d\n", session->s_mds);
+@@ -4218,9 +4219,13 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+               realm = NULL;
+               if (snaptrace_len) {
+                       down_write(&mdsc->snap_rwsem);
+-                      ceph_update_snap_trace(mdsc, snaptrace,
+-                                             snaptrace + snaptrace_len,
+-                                             false, &realm);
++                      if (ceph_update_snap_trace(mdsc, snaptrace,
++                                                 snaptrace + snaptrace_len,
++                                                 false, &realm)) {
++                              up_write(&mdsc->snap_rwsem);
++                              close_sessions = true;
++                              goto done;
++                      }
+                       downgrade_write(&mdsc->snap_rwsem);
+               } else {
+                       down_read(&mdsc->snap_rwsem);
+@@ -4280,6 +4285,11 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+       iput(inode);
+ out:
+       ceph_put_string(extra_info.pool_ns);
++
++      /* Defer closing the sessions after s_mutex lock being released */
++      if (close_sessions)
++              ceph_mdsc_close_sessions(mdsc);
++
+       return;
+ flush_cap_releases:
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 6f9580defb2b3..5895797f3104a 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -2004,6 +2004,9 @@ static int ceph_zero_partial_object(struct inode *inode,
+       loff_t zero = 0;
+       int op;
++      if (ceph_inode_is_shutdown(inode))
++              return -EIO;
++
+       if (!length) {
+               op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
+               length = &zero;
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 756560df3bdbd..27a245d959c0a 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -806,6 +806,9 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
+ {
+       struct ceph_mds_session *s;
++      if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
++              return ERR_PTR(-EIO);
++
+       if (mds >= mdsc->mdsmap->possible_max_rank)
+               return ERR_PTR(-EINVAL);
+@@ -1478,6 +1481,9 @@ static int __open_session(struct ceph_mds_client *mdsc,
+       int mstate;
+       int mds = session->s_mds;
++      if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
++              return -EIO;
++
+       /* wait for mds to go active? */
+       mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
+       dout("open_session to mds%d (%s)\n", mds,
+@@ -2860,6 +2866,11 @@ static void __do_request(struct ceph_mds_client *mdsc,
+               return;
+       }
++      if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
++              dout("do_request metadata corrupted\n");
++              err = -EIO;
++              goto finish;
++      }
+       if (req->r_timeout &&
+           time_after_eq(jiffies, req->r_started + req->r_timeout)) {
+               dout("do_request timed out\n");
+@@ -3245,6 +3256,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
+       u64 tid;
+       int err, result;
+       int mds = session->s_mds;
++      bool close_sessions = false;
+       if (msg->front.iov_len < sizeof(*head)) {
+               pr_err("mdsc_handle_reply got corrupt (short) reply\n");
+@@ -3351,10 +3363,17 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
+       realm = NULL;
+       if (rinfo->snapblob_len) {
+               down_write(&mdsc->snap_rwsem);
+-              ceph_update_snap_trace(mdsc, rinfo->snapblob,
++              err = ceph_update_snap_trace(mdsc, rinfo->snapblob,
+                               rinfo->snapblob + rinfo->snapblob_len,
+                               le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
+                               &realm);
++              if (err) {
++                      up_write(&mdsc->snap_rwsem);
++                      close_sessions = true;
++                      if (err == -EIO)
++                              ceph_msg_dump(msg);
++                      goto out_err;
++              }
+               downgrade_write(&mdsc->snap_rwsem);
+       } else {
+               down_read(&mdsc->snap_rwsem);
+@@ -3412,6 +3431,10 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
+                                    req->r_end_latency, err);
+ out:
+       ceph_mdsc_put_request(req);
++
++      /* Defer closing the sessions after s_mutex lock being released */
++      if (close_sessions)
++              ceph_mdsc_close_sessions(mdsc);
+       return;
+ }
+@@ -5017,7 +5040,7 @@ static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
+ }
+ /*
+- * called after sb is ro.
++ * called after sb is ro or when metadata corrupted.
+  */
+ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
+ {
+@@ -5307,7 +5330,8 @@ static void mds_peer_reset(struct ceph_connection *con)
+       struct ceph_mds_client *mdsc = s->s_mdsc;
+       pr_warn("mds%d closed our session\n", s->s_mds);
+-      send_mds_reconnect(mdsc, s);
++      if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
++              send_mds_reconnect(mdsc, s);
+ }
+ static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index e4151852184e0..87007203f130e 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/ceph/ceph_debug.h>
++#include <linux/fs.h>
+ #include <linux/sort.h>
+ #include <linux/slab.h>
+ #include <linux/iversion.h>
+@@ -766,8 +767,10 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
+       struct ceph_snap_realm *realm;
+       struct ceph_snap_realm *first_realm = NULL;
+       struct ceph_snap_realm *realm_to_rebuild = NULL;
++      struct ceph_client *client = mdsc->fsc->client;
+       int rebuild_snapcs;
+       int err = -ENOMEM;
++      int ret;
+       LIST_HEAD(dirty_realms);
+       lockdep_assert_held_write(&mdsc->snap_rwsem);
+@@ -884,6 +887,27 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
+       if (first_realm)
+               ceph_put_snap_realm(mdsc, first_realm);
+       pr_err("%s error %d\n", __func__, err);
++
++      /*
++       * When receiving a corrupted snap trace we don't know what
++       * exactly has happened in MDS side. And we shouldn't continue
++       * writing to OSD, which may corrupt the snapshot contents.
++       *
++       * Just try to blocklist this kclient and then this kclient
++       * must be remounted to continue after the corrupted metadata
++       * fixed in the MDS side.
++       */
++      WRITE_ONCE(mdsc->fsc->mount_state, CEPH_MOUNT_FENCE_IO);
++      ret = ceph_monc_blocklist_add(&client->monc, &client->msgr.inst.addr);
++      if (ret)
++              pr_err("%s failed to blocklist %s: %d\n", __func__,
++                     ceph_pr_addr(&client->msgr.inst.addr), ret);
++
++      WARN(1, "%s: %s%sdo remount to continue%s",
++           __func__, ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
++           ret ? "" : " was blocklisted, ",
++           err == -EIO ? " after corrupted snaptrace is fixed" : "");
++
+       return err;
+ }
+@@ -984,6 +1008,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
+       __le64 *split_inos = NULL, *split_realms = NULL;
+       int i;
+       int locked_rwsem = 0;
++      bool close_sessions = false;
+       /* decode */
+       if (msg->front.iov_len < sizeof(*h))
+@@ -1092,8 +1117,12 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
+        * update using the provided snap trace. if we are deleting a
+        * snap, we can avoid queueing cap_snaps.
+        */
+-      ceph_update_snap_trace(mdsc, p, e,
+-                             op == CEPH_SNAP_OP_DESTROY, NULL);
++      if (ceph_update_snap_trace(mdsc, p, e,
++                                 op == CEPH_SNAP_OP_DESTROY,
++                                 NULL)) {
++              close_sessions = true;
++              goto bad;
++      }
+       if (op == CEPH_SNAP_OP_SPLIT)
+               /* we took a reference when we created the realm, above */
+@@ -1112,6 +1141,9 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
+ out:
+       if (locked_rwsem)
+               up_write(&mdsc->snap_rwsem);
++
++      if (close_sessions)
++              ceph_mdsc_close_sessions(mdsc);
+       return;
+ }
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 735279b2ceb55..3599fefa91f99 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -108,6 +108,7 @@ enum {
+       CEPH_MOUNT_UNMOUNTED,
+       CEPH_MOUNT_SHUTDOWN,
+       CEPH_MOUNT_RECOVER,
++      CEPH_MOUNT_FENCE_IO,
+ };
+ #define CEPH_ASYNC_CREATE_CONFLICT_BITS 8
+-- 
+2.39.0
+
diff --git a/queue-6.1/ceph-move-mount-state-enum-to-super.h.patch b/queue-6.1/ceph-move-mount-state-enum-to-super.h.patch
new file mode 100644 (file)
index 0000000..9dd65fc
--- /dev/null
@@ -0,0 +1,66 @@
+From fe5e2abe272a6b383498fcd0dc9fbdee8af55b24 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Feb 2023 09:36:44 +0800
+Subject: ceph: move mount state enum to super.h
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit b38b17b6a01ca4e738af097a1529910646ef4270 ]
+
+These flags are only used in ceph filesystem in fs/ceph, so just
+move it to the place it should be.
+
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Venky Shankar <vshankar@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/super.h              | 10 ++++++++++
+ include/linux/ceph/libceph.h | 10 ----------
+ 2 files changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index ae4126f634101..735279b2ceb55 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -100,6 +100,16 @@ struct ceph_mount_options {
+       char *mon_addr;
+ };
++/* mount state */
++enum {
++      CEPH_MOUNT_MOUNTING,
++      CEPH_MOUNT_MOUNTED,
++      CEPH_MOUNT_UNMOUNTING,
++      CEPH_MOUNT_UNMOUNTED,
++      CEPH_MOUNT_SHUTDOWN,
++      CEPH_MOUNT_RECOVER,
++};
++
+ #define CEPH_ASYNC_CREATE_CONFLICT_BITS 8
+ struct ceph_fs_client {
+diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
+index 00af2c98da75a..4497d0a6772cd 100644
+--- a/include/linux/ceph/libceph.h
++++ b/include/linux/ceph/libceph.h
+@@ -99,16 +99,6 @@ struct ceph_options {
+ #define CEPH_AUTH_NAME_DEFAULT   "guest"
+-/* mount state */
+-enum {
+-      CEPH_MOUNT_MOUNTING,
+-      CEPH_MOUNT_MOUNTED,
+-      CEPH_MOUNT_UNMOUNTING,
+-      CEPH_MOUNT_UNMOUNTED,
+-      CEPH_MOUNT_SHUTDOWN,
+-      CEPH_MOUNT_RECOVER,
+-};
+-
+ static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
+ {
+       return timeout ?: MAX_SCHEDULE_TIMEOUT;
+-- 
+2.39.0
+
diff --git a/queue-6.1/dma-buf-add-unlocked-variant-of-vmapping-functions.patch b/queue-6.1/dma-buf-add-unlocked-variant-of-vmapping-functions.patch
new file mode 100644 (file)
index 0000000..c126985
--- /dev/null
@@ -0,0 +1,101 @@
+From a5b01b5f0c0e769850cb21d1a94f53e63a837b22 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Oct 2022 20:22:09 +0300
+Subject: dma-buf: Add unlocked variant of vmapping functions
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+[ Upstream commit 56e5abba8c3ec5c6098007693f9cefafaa2aa010 ]
+
+Add unlocked variant of dma_buf_vmap/vunmap() that will be utilized
+by drivers that don't take the reservation lock explicitly.
+
+Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20221017172229.42269-2-dmitry.osipenko@collabora.com
+Stable-dep-of: 85e26dd5100a ("drm/client: fix circular reference counting issue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma-buf/dma-buf.c | 43 +++++++++++++++++++++++++++++++++++++++
+ include/linux/dma-buf.h   |  2 ++
+ 2 files changed, 45 insertions(+)
+
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index eb6b59363c4f5..7f8d45ed6b843 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -1430,6 +1430,33 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
+ }
+ EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
++/**
++ * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
++ * address space. Same restrictions as for vmap and friends apply.
++ * @dmabuf:   [in]    buffer to vmap
++ * @map:      [out]   returns the vmap pointer
++ *
++ * Unlocked version of dma_buf_vmap()
++ *
++ * Returns 0 on success, or a negative errno code otherwise.
++ */
++int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
++{
++      int ret;
++
++      iosys_map_clear(map);
++
++      if (WARN_ON(!dmabuf))
++              return -EINVAL;
++
++      dma_resv_lock(dmabuf->resv, NULL);
++      ret = dma_buf_vmap(dmabuf, map);
++      dma_resv_unlock(dmabuf->resv);
++
++      return ret;
++}
++EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
++
+ /**
+  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
+  * @dmabuf:   [in]    buffer to vunmap
+@@ -1454,6 +1481,22 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
+ }
+ EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
++/**
++ * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
++ * @dmabuf:   [in]    buffer to vunmap
++ * @map:      [in]    vmap pointer to vunmap
++ */
++void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
++{
++      if (WARN_ON(!dmabuf))
++              return;
++
++      dma_resv_lock(dmabuf->resv, NULL);
++      dma_buf_vunmap(dmabuf, map);
++      dma_resv_unlock(dmabuf->resv);
++}
++EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
++
+ #ifdef CONFIG_DEBUG_FS
+ static int dma_buf_debug_show(struct seq_file *s, void *unused)
+ {
+diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
+index 71731796c8c3a..8daa054dd7fed 100644
+--- a/include/linux/dma-buf.h
++++ b/include/linux/dma-buf.h
+@@ -632,4 +632,6 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+                unsigned long);
+ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
+ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
++int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
++void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
+ #endif /* __DMA_BUF_H__ */
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-amd-display-add-missing-brackets-in-calculation.patch b/queue-6.1/drm-amd-display-add-missing-brackets-in-calculation.patch
new file mode 100644 (file)
index 0000000..71bb9e0
--- /dev/null
@@ -0,0 +1,41 @@
+From e843c1097294518913d97f7af07706d53463fdb4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Jan 2023 15:34:35 -0500
+Subject: drm/amd/display: Add missing brackets in calculation
+
+From: Daniel Miess <Daniel.Miess@amd.com>
+
+[ Upstream commit ea062fd28f922cb118bfb33229f405b81aff7781 ]
+
+[Why]
+Brackets missing in the calculation for MIN_DST_Y_NEXT_START
+
+[How]
+Add missing brackets for this calculation
+
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Daniel Miess <Daniel.Miess@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c    | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+index 0d12fd079cd61..3afd3c80e6da8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+@@ -3184,7 +3184,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+               } else {
+                       v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];
+               }
+-              v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0;
++              v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
+               if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
+                               <= (isInterlaceTiming ?
+                                               dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-amd-display-adjust-downscaling-limits-for-dcn314.patch b/queue-6.1/drm-amd-display-adjust-downscaling-limits-for-dcn314.patch
new file mode 100644 (file)
index 0000000..8a84657
--- /dev/null
@@ -0,0 +1,54 @@
+From e4bcbed9ffddd5dd5a8675f99a7d6580494f9645 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Jan 2023 15:45:30 -0500
+Subject: drm/amd/display: Adjust downscaling limits for dcn314
+
+From: Daniel Miess <Daniel.Miess@amd.com>
+
+[ Upstream commit dd2db2dc4bd298f33dea50c80c3c11bee4e3b0a4 ]
+
+[Why]
+Lower max_downscale_ratio and ARGB888 downscale factor
+to prevent cases where underflow may occur on dcn314
+
+[How]
+Set max_downscale_ratio to 400 and ARGB downscale factor
+to 250 for dcn314
+
+Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+Acked-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Daniel Miess <Daniel.Miess@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 9066c511a0529..c80c8c8f51e97 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -871,8 +871,9 @@ static const struct dc_plane_cap plane_cap = {
+       },
+       // 6:1 downscaling ratio: 1000/6 = 166.666
++      // 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250
+       .max_downscale_factor = {
+-                      .argb8888 = 167,
++                      .argb8888 = 250,
+                       .nv12 = 167,
+                       .fp16 = 167
+       },
+@@ -1755,7 +1756,7 @@ static bool dcn314_resource_construct(
+       pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+       pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+       pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
+-      dc->caps.max_downscale_ratio = 600;
++      dc->caps.max_downscale_ratio = 400;
+       dc->caps.i2c_speed_in_khz = 100;
+       dc->caps.i2c_speed_in_khz_hdcp = 100;
+       dc->caps.max_cursor_size = 256;
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-amd-display-properly-handle-additional-cases-whe.patch b/queue-6.1/drm-amd-display-properly-handle-additional-cases-whe.patch
new file mode 100644 (file)
index 0000000..749a0bb
--- /dev/null
@@ -0,0 +1,51 @@
+From 28cfca1f62e5b32d72394f71cfc66f432a760f0a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Jan 2023 14:35:16 -0500
+Subject: drm/amd/display: Properly handle additional cases where DCN is not
+ supported
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 6fc547a5a2ef5ce05b16924106663ab92f8f87a7 ]
+
+There could be boards with DCN listed in IP discovery, but no
+display hardware actually wired up.  In this case the vbios
+display table will not be populated.  Detect this case and
+skip loading DM when we detect it.
+
+v2: Mark DCN as harvested as well so other display checks
+elsewhere in the driver are handled properly.
+
+Cc: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Reviewed-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 988b1c947aefc..2d63248d09bbb 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4526,6 +4526,17 @@ DEVICE_ATTR_WO(s3_debug);
+ static int dm_early_init(void *handle)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++      struct amdgpu_mode_info *mode_info = &adev->mode_info;
++      struct atom_context *ctx = mode_info->atom_context;
++      int index = GetIndexIntoMasterTable(DATA, Object_Header);
++      u16 data_offset;
++
++      /* if there is no object header, skip DM */
++      if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
++              adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
++              dev_info(adev->dev, "No object header, skipping DM\n");
++              return -ENOENT;
++      }
+       switch (adev->asic_type) {
+ #if defined(CONFIG_DRM_AMD_DC_SI)
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-amd-display-reset-dmub-mailbox-sw-state-after-hw.patch b/queue-6.1/drm-amd-display-reset-dmub-mailbox-sw-state-after-hw.patch
new file mode 100644 (file)
index 0000000..7a2d9b1
--- /dev/null
@@ -0,0 +1,61 @@
+From 1b44d9fd71360e62a035fac7877c7a2471a119e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jan 2023 11:14:30 -0500
+Subject: drm/amd/display: Reset DMUB mailbox SW state after HW reset
+
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+
+[ Upstream commit 154711aa5759ef9b45903124fa813c4c29ee681c ]
+
+[Why]
+Otherwise we can be out of sync with what's in the hardware, leading
+to us rerunning every command that's presently in the ringbuffer.
+
+[How]
+Reset software state for the mailboxes in hw_reset callback.
+This is already done as part of the mailbox init in hw_init, but we
+do need to remember to reset the last cached wptr value as well here.
+
+Reviewed-by: Hansen Dsouza <hansen.dsouza@amd.com>
+Acked-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+index 4a122925c3ae9..92c18bfb98b3b 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+@@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
+       if (dmub->hw_funcs.reset)
+               dmub->hw_funcs.reset(dmub);
++      /* reset the cache of the last wptr as well now that hw is reset */
++      dmub->inbox1_last_wptr = 0;
++
+       cw0.offset.quad_part = inst_fb->gpu_addr;
+       cw0.region.base = DMUB_CW0_BASE;
+       cw0.region.top = cw0.region.base + inst_fb->size - 1;
+@@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
+       if (dmub->hw_funcs.reset)
+               dmub->hw_funcs.reset(dmub);
++      /* mailboxes have been reset in hw, so reset the sw state as well */
++      dmub->inbox1_last_wptr = 0;
++      dmub->inbox1_rb.wrpt = 0;
++      dmub->inbox1_rb.rptr = 0;
++      dmub->outbox0_rb.wrpt = 0;
++      dmub->outbox0_rb.rptr = 0;
++      dmub->outbox1_rb.wrpt = 0;
++      dmub->outbox1_rb.rptr = 0;
++
+       dmub->hw_init = false;
+       return DMUB_STATUS_OK;
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-amd-display-unassign-does_plane_fit_in_mall-func.patch b/queue-6.1/drm-amd-display-unassign-does_plane_fit_in_mall-func.patch
new file mode 100644 (file)
index 0000000..9290c4c
--- /dev/null
@@ -0,0 +1,42 @@
+From 17e301f67192e8d3c4109e58f3e87ad9a3b584b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jan 2023 17:09:54 -0500
+Subject: drm/amd/display: Unassign does_plane_fit_in_mall function from dcn3.2
+
+From: George Shen <george.shen@amd.com>
+
+[ Upstream commit 275d8a1db261a1272a818d40ebc61b3b865b60e5 ]
+
+[Why]
+The hwss function does_plane_fit_in_mall not applicable to dcn3.2 asics.
+Using it with dcn3.2 can result in undefined behaviour.
+
+[How]
+Assign the function pointer to NULL.
+
+Reviewed-by: Alvin Lee <Alvin.Lee2@amd.com>
+Acked-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: George Shen <george.shen@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+index 45a949ba6f3f3..7b7f0e6b2a2ff 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+@@ -94,7 +94,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
+       .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+       .calc_vupdate_position = dcn10_calc_vupdate_position,
+       .apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations,
+-      .does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
++      .does_plane_fit_in_mall = NULL,
+       .set_backlight_level = dcn21_set_backlight_level,
+       .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
+       .hardware_release = dcn30_hardware_release,
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-amdgpu-enable-hdp-sd-for-gfx-11.0.3.patch b/queue-6.1/drm-amdgpu-enable-hdp-sd-for-gfx-11.0.3.patch
new file mode 100644 (file)
index 0000000..449d807
--- /dev/null
@@ -0,0 +1,36 @@
+From ee209252708317bc77f62e42d3b301510746f222 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 28 Jan 2023 14:24:34 +0800
+Subject: drm/amdgpu: enable HDP SD for gfx 11.0.3
+
+From: Evan Quan <evan.quan@amd.com>
+
+[ Upstream commit bb25849c0fa550b26cecc9c476c519a927c66898 ]
+
+Enable HDP clock gating control for gfx 11.0.3.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Reviewed-by: Feifei Xu <Feifei.Xu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/soc21.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 9bc9852b9cda9..230e15fed755c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -643,7 +643,8 @@ static int soc21_common_early_init(void *handle)
+                       AMD_CG_SUPPORT_GFX_CGCG |
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_REPEATER_FGCG |
+-                      AMD_CG_SUPPORT_GFX_MGCG;
++                      AMD_CG_SUPPORT_GFX_MGCG |
++                      AMD_CG_SUPPORT_HDP_SD;
+               adev->pg_flags = AMD_PG_SUPPORT_VCN |
+                       AMD_PG_SUPPORT_VCN_DPG |
+                       AMD_PG_SUPPORT_JPEG;
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-amdgpu-enable-vclk-dclk-node-for-gc11.0.3.patch b/queue-6.1/drm-amdgpu-enable-vclk-dclk-node-for-gc11.0.3.patch
new file mode 100644 (file)
index 0000000..cdee210
--- /dev/null
@@ -0,0 +1,45 @@
+From 0063eccdab655887df0b2998c8131f1a6c221923 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Jan 2023 16:02:47 +0800
+Subject: drm/amdgpu: Enable vclk dclk node for gc11.0.3
+
+From: Yiqing Yao <yiqing.yao@amd.com>
+
+[ Upstream commit ac7170082c0e140663f0853d3de733a5341ce7b0 ]
+
+These sysfs nodes are tested supported, so enable them.
+
+Signed-off-by: Yiqing Yao <yiqing.yao@amd.com>
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/pm/amdgpu_pm.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 41635694e5216..2f3e239e623dc 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -2009,14 +2009,16 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
+                     gc_ver == IP_VERSION(10, 3, 0) ||
+                     gc_ver == IP_VERSION(10, 1, 2) ||
+                     gc_ver == IP_VERSION(11, 0, 0) ||
+-                    gc_ver == IP_VERSION(11, 0, 2)))
++                    gc_ver == IP_VERSION(11, 0, 2) ||
++                    gc_ver == IP_VERSION(11, 0, 3)))
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
+               if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+                     gc_ver == IP_VERSION(10, 3, 0) ||
+                     gc_ver == IP_VERSION(10, 1, 2) ||
+                     gc_ver == IP_VERSION(11, 0, 0) ||
+-                    gc_ver == IP_VERSION(11, 0, 2)))
++                    gc_ver == IP_VERSION(11, 0, 2) ||
++                    gc_ver == IP_VERSION(11, 0, 3)))
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
+               if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-client-fix-circular-reference-counting-issue.patch b/queue-6.1/drm-client-fix-circular-reference-counting-issue.patch
new file mode 100644 (file)
index 0000000..a58e09a
--- /dev/null
@@ -0,0 +1,151 @@
+From db92a4cf57796e08928a386287b7112ba810a7ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Jan 2023 10:24:26 +0100
+Subject: drm/client: fix circular reference counting issue
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian König <christian.koenig@amd.com>
+
+[ Upstream commit 85e26dd5100a182bf8448050427539c0a66ab793 ]
+
+We reference dump buffers both by their handle as well as their
+object. The problem is now that when anybody iterates over the DRM
+framebuffers and exports the underlying GEM objects through DMA-buf
+we run into a circular reference count situation.
+
+The result is that the fbdev handling holds the GEM handle preventing
+the DMA-buf in the GEM object to be released. This DMA-buf in turn
+holds a reference to the driver module which on unload would release
+the fbdev.
+
+Break that loop by releasing the handle as soon as the DRM
+framebuffer object is created. The DRM framebuffer and the DRM client
+buffer structure still hold a reference to the underlying GEM object
+preventing its destruction.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Fixes: c76f0f7cb546 ("drm: Begin an API for in-kernel clients")
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Tested-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230126102814.8722-1-christian.koenig@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_client.c | 33 ++++++++++++++++++++-------------
+ include/drm/drm_client.h     |  5 -----
+ 2 files changed, 20 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
+index fd67efe37c636..056ab9d5f313b 100644
+--- a/drivers/gpu/drm/drm_client.c
++++ b/drivers/gpu/drm/drm_client.c
+@@ -233,21 +233,17 @@ void drm_client_dev_restore(struct drm_device *dev)
+ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
+ {
+-      struct drm_device *dev = buffer->client->dev;
+-
+       if (buffer->gem) {
+               drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
+               drm_gem_object_put(buffer->gem);
+       }
+-      if (buffer->handle)
+-              drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
+-
+       kfree(buffer);
+ }
+ static struct drm_client_buffer *
+-drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
++drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
++                       u32 format, u32 *handle)
+ {
+       const struct drm_format_info *info = drm_format_info(format);
+       struct drm_mode_create_dumb dumb_args = { };
+@@ -269,16 +265,15 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
+       if (ret)
+               goto err_delete;
+-      buffer->handle = dumb_args.handle;
+-      buffer->pitch = dumb_args.pitch;
+-
+       obj = drm_gem_object_lookup(client->file, dumb_args.handle);
+       if (!obj)  {
+               ret = -ENOENT;
+               goto err_delete;
+       }
++      buffer->pitch = dumb_args.pitch;
+       buffer->gem = obj;
++      *handle = dumb_args.handle;
+       return buffer;
+@@ -365,7 +360,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
+ }
+ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
+-                                 u32 width, u32 height, u32 format)
++                                 u32 width, u32 height, u32 format,
++                                 u32 handle)
+ {
+       struct drm_client_dev *client = buffer->client;
+       struct drm_mode_fb_cmd fb_req = { };
+@@ -377,7 +373,7 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
+       fb_req.depth = info->depth;
+       fb_req.width = width;
+       fb_req.height = height;
+-      fb_req.handle = buffer->handle;
++      fb_req.handle = handle;
+       fb_req.pitch = buffer->pitch;
+       ret = drm_mode_addfb(client->dev, &fb_req, client->file);
+@@ -414,13 +410,24 @@ struct drm_client_buffer *
+ drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+ {
+       struct drm_client_buffer *buffer;
++      u32 handle;
+       int ret;
+-      buffer = drm_client_buffer_create(client, width, height, format);
++      buffer = drm_client_buffer_create(client, width, height, format,
++                                        &handle);
+       if (IS_ERR(buffer))
+               return buffer;
+-      ret = drm_client_buffer_addfb(buffer, width, height, format);
++      ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
++
++      /*
++       * The handle is only needed for creating the framebuffer, destroy it
++       * again to solve a circular dependency should anybody export the GEM
++       * object as DMA-buf. The framebuffer and our buffer structure are still
++       * holding references to the GEM object to prevent its destruction.
++       */
++      drm_mode_destroy_dumb(client->dev, handle, client->file);
++
+       if (ret) {
+               drm_client_buffer_delete(buffer);
+               return ERR_PTR(ret);
+diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
+index 4fc8018edddad..1220d185c776b 100644
+--- a/include/drm/drm_client.h
++++ b/include/drm/drm_client.h
+@@ -126,11 +126,6 @@ struct drm_client_buffer {
+        */
+       struct drm_client_dev *client;
+-      /**
+-       * @handle: Buffer handle
+-       */
+-      u32 handle;
+-
+       /**
+        * @pitch: Buffer pitch
+        */
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-client-prevent-null-dereference-in-drm_client_bu.patch b/queue-6.1/drm-client-prevent-null-dereference-in-drm_client_bu.patch
new file mode 100644 (file)
index 0000000..5ddfd54
--- /dev/null
@@ -0,0 +1,52 @@
+From 90648d86814df13b130b722c653bc4913b8b4d7f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 30 Oct 2022 18:44:12 +0300
+Subject: drm/client: Prevent NULL dereference in drm_client_buffer_delete()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+[ Upstream commit 444bbba708e804c13ad757068d1cb31ed6460754 ]
+
+The drm_gem_vunmap() will crash with a NULL dereference if the passed
+object pointer is NULL. It wasn't a problem before we added the locking
+support to drm_gem_vunmap function because the mapping argument was always
+NULL together with the object. Make drm_client_buffer_delete() to check
+whether GEM is NULL before trying to unmap the GEM, it will happen on
+framebuffer creation error.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Link: https://lore.kernel.org/dri-devel/Y1kFEGxT8MVlf32V@kili/
+Fixes: 79e2cf2e7a19 ("drm/gem: Take reservation lock for vmap/vunmap operations")
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20221030154412.8320-3-dmitry.osipenko@collabora.com
+Stable-dep-of: 85e26dd5100a ("drm/client: fix circular reference counting issue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_client.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
+index 38e1be991caa5..fd67efe37c636 100644
+--- a/drivers/gpu/drm/drm_client.c
++++ b/drivers/gpu/drm/drm_client.c
+@@ -235,10 +235,10 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
+ {
+       struct drm_device *dev = buffer->client->dev;
+-      drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
+-
+-      if (buffer->gem)
++      if (buffer->gem) {
++              drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
+               drm_gem_object_put(buffer->gem);
++      }
+       if (buffer->handle)
+               drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-client-switch-drm_client_buffer_delete-to-unlock.patch b/queue-6.1/drm-client-switch-drm_client_buffer_delete-to-unlock.patch
new file mode 100644 (file)
index 0000000..aaeca22
--- /dev/null
@@ -0,0 +1,47 @@
+From b58f7e943a7894851a260d56ec72acb51374517a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Oct 2022 00:33:35 +0300
+Subject: drm/client: Switch drm_client_buffer_delete() to unlocked
+ drm_gem_vunmap
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+[ Upstream commit 27b2ae654370e1a8e446b0e48c4e406abed12ca1 ]
+
+The drm_client_buffer_delete() wasn't switched to unlocked GEM vunmapping
+by accident when rest of drm_client code transitioned to the unlocked
+variants of the vmapping functions. Make drm_client_buffer_delete() use
+the unlocked variant. This fixes lockdep warning splat about missing
+reservation lock when framebuffer is released.
+
+Reported-by: kernel test robot <yujie.liu@intel.com>
+Link: https://lore.kernel.org/dri-devel/890f70db-68b0-8456-ca3c-c5496ef90517@collabora.com/T/
+Fixes: 79e2cf2e7a19 ("drm/gem: Take reservation lock for vmap/vunmap operations")
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20221020213335.309092-1-dmitry.osipenko@collabora.com
+Stable-dep-of: 85e26dd5100a ("drm/client: fix circular reference counting issue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_client.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
+index fbcb1e995384a..38e1be991caa5 100644
+--- a/drivers/gpu/drm/drm_client.c
++++ b/drivers/gpu/drm/drm_client.c
+@@ -235,7 +235,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
+ {
+       struct drm_device *dev = buffer->client->dev;
+-      drm_gem_vunmap(buffer->gem, &buffer->map);
++      drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
+       if (buffer->gem)
+               drm_gem_object_put(buffer->gem);
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-gem-take-reservation-lock-for-vmap-vunmap-operat.patch b/queue-6.1/drm-gem-take-reservation-lock-for-vmap-vunmap-operat.patch
new file mode 100644 (file)
index 0000000..4700eda
--- /dev/null
@@ -0,0 +1,363 @@
+From 5aba85285dfd0791457a02ef31250dc78b6c521e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Oct 2022 20:22:11 +0300
+Subject: drm/gem: Take reservation lock for vmap/vunmap operations
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+[ Upstream commit 79e2cf2e7a193473dfb0da3b9b869682b43dc60f ]
+
+The new common dma-buf locking convention will require buffer importers
+to hold the reservation lock around mapping operations. Make DRM GEM core
+to take the lock around the vmapping operations and update DRM drivers to
+use the locked functions for the case where DRM core now holds the lock.
+This patch prepares DRM core and drivers to the common dynamic dma-buf
+locking convention.
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20221017172229.42269-4-dmitry.osipenko@collabora.com
+Stable-dep-of: 85e26dd5100a ("drm/client: fix circular reference counting issue")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_client.c                 |  4 ++--
+ drivers/gpu/drm/drm_gem.c                    | 24 ++++++++++++++++++++
+ drivers/gpu/drm/drm_gem_dma_helper.c         |  6 ++---
+ drivers/gpu/drm/drm_gem_framebuffer_helper.c |  6 ++---
+ drivers/gpu/drm/drm_gem_ttm_helper.c         |  9 +-------
+ drivers/gpu/drm/lima/lima_sched.c            |  4 ++--
+ drivers/gpu/drm/panfrost/panfrost_dump.c     |  4 ++--
+ drivers/gpu/drm/panfrost/panfrost_perfcnt.c  |  6 ++---
+ drivers/gpu/drm/qxl/qxl_object.c             | 17 +++++++-------
+ drivers/gpu/drm/qxl/qxl_prime.c              |  4 ++--
+ include/drm/drm_gem.h                        |  3 +++
+ 11 files changed, 54 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
+index 2b230b4d69423..fbcb1e995384a 100644
+--- a/drivers/gpu/drm/drm_client.c
++++ b/drivers/gpu/drm/drm_client.c
+@@ -323,7 +323,7 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer,
+        * fd_install step out of the driver backend hooks, to make that
+        * final step optional for internal users.
+        */
+-      ret = drm_gem_vmap(buffer->gem, map);
++      ret = drm_gem_vmap_unlocked(buffer->gem, map);
+       if (ret)
+               return ret;
+@@ -345,7 +345,7 @@ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
+ {
+       struct iosys_map *map = &buffer->map;
+-      drm_gem_vunmap(buffer->gem, map);
++      drm_gem_vunmap_unlocked(buffer->gem, map);
+ }
+ EXPORT_SYMBOL(drm_client_buffer_vunmap);
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 8b68a3c1e6ab6..b8db675e7fb5e 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -1158,6 +1158,8 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+ {
+       int ret;
++      dma_resv_assert_held(obj->resv);
++
+       if (!obj->funcs->vmap)
+               return -EOPNOTSUPP;
+@@ -1173,6 +1175,8 @@ EXPORT_SYMBOL(drm_gem_vmap);
+ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+ {
++      dma_resv_assert_held(obj->resv);
++
+       if (iosys_map_is_null(map))
+               return;
+@@ -1184,6 +1188,26 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+ }
+ EXPORT_SYMBOL(drm_gem_vunmap);
++int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
++{
++      int ret;
++
++      dma_resv_lock(obj->resv, NULL);
++      ret = drm_gem_vmap(obj, map);
++      dma_resv_unlock(obj->resv);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_gem_vmap_unlocked);
++
++void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
++{
++      dma_resv_lock(obj->resv, NULL);
++      drm_gem_vunmap(obj, map);
++      dma_resv_unlock(obj->resv);
++}
++EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
++
+ /**
+  * drm_gem_lock_reservations - Sets up the ww context and acquires
+  * the lock on an array of GEM objects.
+diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
+index f6901ff97bbb5..1e658c4483668 100644
+--- a/drivers/gpu/drm/drm_gem_dma_helper.c
++++ b/drivers/gpu/drm/drm_gem_dma_helper.c
+@@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
+       if (gem_obj->import_attach) {
+               if (dma_obj->vaddr)
+-                      dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
++                      dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
+               drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
+       } else if (dma_obj->vaddr) {
+               if (dma_obj->map_noncoherent)
+@@ -581,7 +581,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
+       struct iosys_map map;
+       int ret;
+-      ret = dma_buf_vmap(attach->dmabuf, &map);
++      ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
+       if (ret) {
+               DRM_ERROR("Failed to vmap PRIME buffer\n");
+               return ERR_PTR(ret);
+@@ -589,7 +589,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
+       obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
+       if (IS_ERR(obj)) {
+-              dma_buf_vunmap(attach->dmabuf, &map);
++              dma_buf_vunmap_unlocked(attach->dmabuf, &map);
+               return obj;
+       }
+diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+index 880a4975507fc..e35e224e6303a 100644
+--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
++++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+@@ -354,7 +354,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
+                       ret = -EINVAL;
+                       goto err_drm_gem_vunmap;
+               }
+-              ret = drm_gem_vmap(obj, &map[i]);
++              ret = drm_gem_vmap_unlocked(obj, &map[i]);
+               if (ret)
+                       goto err_drm_gem_vunmap;
+       }
+@@ -376,7 +376,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
+               obj = drm_gem_fb_get_obj(fb, i);
+               if (!obj)
+                       continue;
+-              drm_gem_vunmap(obj, &map[i]);
++              drm_gem_vunmap_unlocked(obj, &map[i]);
+       }
+       return ret;
+ }
+@@ -403,7 +403,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
+                       continue;
+               if (iosys_map_is_null(&map[i]))
+                       continue;
+-              drm_gem_vunmap(obj, &map[i]);
++              drm_gem_vunmap_unlocked(obj, &map[i]);
+       }
+ }
+ EXPORT_SYMBOL(drm_gem_fb_vunmap);
+diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
+index e5fc875990c4f..d5962a34c01d5 100644
+--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
++++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
+@@ -64,13 +64,8 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
+                    struct iosys_map *map)
+ {
+       struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+-      int ret;
+-
+-      dma_resv_lock(gem->resv, NULL);
+-      ret = ttm_bo_vmap(bo, map);
+-      dma_resv_unlock(gem->resv);
+-      return ret;
++      return ttm_bo_vmap(bo, map);
+ }
+ EXPORT_SYMBOL(drm_gem_ttm_vmap);
+@@ -87,9 +82,7 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
+ {
+       struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+-      dma_resv_lock(gem->resv, NULL);
+       ttm_bo_vunmap(bo, map);
+-      dma_resv_unlock(gem->resv);
+ }
+ EXPORT_SYMBOL(drm_gem_ttm_vunmap);
+diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
+index e82931712d8a2..ff003403fbbc7 100644
+--- a/drivers/gpu/drm/lima/lima_sched.c
++++ b/drivers/gpu/drm/lima/lima_sched.c
+@@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
+               } else {
+                       buffer_chunk->size = lima_bo_size(bo);
+-                      ret = drm_gem_shmem_vmap(&bo->base, &map);
++                      ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+                       if (ret) {
+                               kvfree(et);
+                               goto out;
+@@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
+                       memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
+-                      drm_gem_shmem_vunmap(&bo->base, &map);
++                      drm_gem_vunmap_unlocked(&bo->base.base, &map);
+               }
+               buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
+index 6bd0634e2d580..e7942ac449c68 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
++++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
+@@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job)
+                       goto dump_header;
+               }
+-              ret = drm_gem_shmem_vmap(&bo->base, &map);
++              ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+               if (ret) {
+                       dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
+                       iter.hdr->bomap.valid = 0;
+@@ -236,7 +236,7 @@ void panfrost_core_dump(struct panfrost_job *job)
+               vaddr = map.vaddr;
+               memcpy(iter.data, vaddr, bo->base.base.size);
+-              drm_gem_shmem_vunmap(&bo->base, &map);
++              drm_gem_vunmap_unlocked(&bo->base.base, &map);
+               iter.hdr->bomap.valid = 1;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+index bc0df93f7f215..ba9b6e2b26363 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
++++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+@@ -106,7 +106,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
+               goto err_close_bo;
+       }
+-      ret = drm_gem_shmem_vmap(bo, &map);
++      ret = drm_gem_vmap_unlocked(&bo->base, &map);
+       if (ret)
+               goto err_put_mapping;
+       perfcnt->buf = map.vaddr;
+@@ -165,7 +165,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
+       return 0;
+ err_vunmap:
+-      drm_gem_shmem_vunmap(bo, &map);
++      drm_gem_vunmap_unlocked(&bo->base, &map);
+ err_put_mapping:
+       panfrost_gem_mapping_put(perfcnt->mapping);
+ err_close_bo:
+@@ -195,7 +195,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
+                 GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
+       perfcnt->user = NULL;
+-      drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base, &map);
++      drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map);
+       perfcnt->buf = NULL;
+       panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+       panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
+diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
+index 695d9308d1f08..06a58dad5f5cf 100644
+--- a/drivers/gpu/drm/qxl/qxl_object.c
++++ b/drivers/gpu/drm/qxl/qxl_object.c
+@@ -168,9 +168,16 @@ int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
+               bo->map_count++;
+               goto out;
+       }
+-      r = ttm_bo_vmap(&bo->tbo, &bo->map);
++
++      r = __qxl_bo_pin(bo);
+       if (r)
+               return r;
++
++      r = ttm_bo_vmap(&bo->tbo, &bo->map);
++      if (r) {
++              __qxl_bo_unpin(bo);
++              return r;
++      }
+       bo->map_count = 1;
+       /* TODO: Remove kptr in favor of map everywhere. */
+@@ -192,12 +199,6 @@ int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map)
+       if (r)
+               return r;
+-      r = __qxl_bo_pin(bo);
+-      if (r) {
+-              qxl_bo_unreserve(bo);
+-              return r;
+-      }
+-
+       r = qxl_bo_vmap_locked(bo, map);
+       qxl_bo_unreserve(bo);
+       return r;
+@@ -247,6 +248,7 @@ void qxl_bo_vunmap_locked(struct qxl_bo *bo)
+               return;
+       bo->kptr = NULL;
+       ttm_bo_vunmap(&bo->tbo, &bo->map);
++      __qxl_bo_unpin(bo);
+ }
+ int qxl_bo_vunmap(struct qxl_bo *bo)
+@@ -258,7 +260,6 @@ int qxl_bo_vunmap(struct qxl_bo *bo)
+               return r;
+       qxl_bo_vunmap_locked(bo);
+-      __qxl_bo_unpin(bo);
+       qxl_bo_unreserve(bo);
+       return 0;
+ }
+diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
+index 142d01415acb3..9169c26357d36 100644
+--- a/drivers/gpu/drm/qxl/qxl_prime.c
++++ b/drivers/gpu/drm/qxl/qxl_prime.c
+@@ -59,7 +59,7 @@ int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+       struct qxl_bo *bo = gem_to_qxl_bo(obj);
+       int ret;
+-      ret = qxl_bo_vmap(bo, map);
++      ret = qxl_bo_vmap_locked(bo, map);
+       if (ret < 0)
+               return ret;
+@@ -71,5 +71,5 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj,
+ {
+       struct qxl_bo *bo = gem_to_qxl_bo(obj);
+-      qxl_bo_vunmap(bo);
++      qxl_bo_vunmap_locked(bo);
+ }
+diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
+index bd42f25e449c2..a17c2f903f81e 100644
+--- a/include/drm/drm_gem.h
++++ b/include/drm/drm_gem.h
+@@ -457,6 +457,9 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj);
+ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+               bool dirty, bool accessed);
++int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
++void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
++
+ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
+                          int count, struct drm_gem_object ***objs_out);
+ struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
+-- 
+2.39.0
+
diff --git a/queue-6.1/drm-nouveau-devinit-tu102-wait-for-gfw_boot_progress.patch b/queue-6.1/drm-nouveau-devinit-tu102-wait-for-gfw_boot_progress.patch
new file mode 100644 (file)
index 0000000..1c6cb61
--- /dev/null
@@ -0,0 +1,69 @@
+From 67c46ea25374f0b2a0366fec602b3a5431afecc6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Jan 2023 08:37:13 +1000
+Subject: drm/nouveau/devinit/tu102-: wait for GFW_BOOT_PROGRESS == COMPLETED
+
+From: Ben Skeggs <bskeggs@redhat.com>
+
+[ Upstream commit d22915d22ded21fd5b24b60d174775789f173997 ]
+
+Starting from Turing, the driver is no longer responsible for initiating
+DEVINIT when required as the GPU started loading a FW image from ROM and
+executing DEVINIT itself after power-on.
+
+However - we apparently still need to wait for it to complete.
+
+This should correct some issues with runpm on some systems, where we get
+control of the HW before it's been fully reinitialised after resume from
+suspend.
+
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230130223715.1831509-1-bskeggs@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../drm/nouveau/nvkm/subdev/devinit/tu102.c   | 23 +++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+index 634f64f88fc8b..81a1ad2c88a7e 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+@@ -65,10 +65,33 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+       return ret;
+ }
++static int
++tu102_devinit_wait(struct nvkm_device *device)
++{
++      unsigned timeout = 50 + 2000;
++
++      do {
++              if (nvkm_rd32(device, 0x118128) & 0x00000001) {
++                      if ((nvkm_rd32(device, 0x118234) & 0x000000ff) == 0xff)
++                              return 0;
++              }
++
++              usleep_range(1000, 2000);
++      } while (timeout--);
++
++      return -ETIMEDOUT;
++}
++
+ int
+ tu102_devinit_post(struct nvkm_devinit *base, bool post)
+ {
+       struct nv50_devinit *init = nv50_devinit(base);
++      int ret;
++
++      ret = tu102_devinit_wait(init->base.subdev.device);
++      if (ret)
++              return ret;
++
+       gm200_devinit_preos(init, post);
+       return 0;
+ }
+-- 
+2.39.0
+
diff --git a/queue-6.1/fscache-use-clear_and_wake_up_bit-in-fscache_create_.patch b/queue-6.1/fscache-use-clear_and_wake_up_bit-in-fscache_create_.patch
new file mode 100644 (file)
index 0000000..526a74e
--- /dev/null
@@ -0,0 +1,45 @@
+From 69e4027ed53bb7dec77677c3e3fe2586dbb6aca9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Jan 2023 19:52:11 +0800
+Subject: fscache: Use clear_and_wake_up_bit() in fscache_create_volume_work()
+
+From: Hou Tao <houtao1@huawei.com>
+
+[ Upstream commit 3288666c72568fe1cc7f5c5ae33dfd3ab18004c8 ]
+
+fscache_create_volume_work() uses wake_up_bit() to wake up the processes
+which are waiting for the completion of volume creation. According to
+comments in wake_up_bit() and waitqueue_active(), an extra smp_mb() is
+needed to guarantee the memory order between FSCACHE_VOLUME_CREATING
+flag and waitqueue_active() before invoking wake_up_bit().
+
+Fixing it by using clear_and_wake_up_bit() to add the missing memory
+barrier.
+
+Reviewed-by: Jingbo Xu <jefflexu@linux.alibaba.com>
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Link: https://lore.kernel.org/r/20230113115211.2895845-3-houtao@huaweicloud.com/ # v3
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fscache/volume.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c
+index 903af9d85f8b9..cdf991bdd9def 100644
+--- a/fs/fscache/volume.c
++++ b/fs/fscache/volume.c
+@@ -280,8 +280,7 @@ static void fscache_create_volume_work(struct work_struct *work)
+       fscache_end_cache_access(volume->cache,
+                                fscache_access_acquire_volume_end);
+-      clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags);
+-      wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING);
++      clear_and_wake_up_bit(FSCACHE_VOLUME_CREATING, &volume->flags);
+       fscache_put_volume(volume, fscache_volume_put_create_work);
+ }
+-- 
+2.39.0
+
diff --git a/queue-6.1/mptcp-deduplicate-error-paths-on-endpoint-creation.patch b/queue-6.1/mptcp-deduplicate-error-paths-on-endpoint-creation.patch
new file mode 100644 (file)
index 0000000..c1cb721
--- /dev/null
@@ -0,0 +1,114 @@
+From 6f43b15f59b848769b112e2058f7d0c647135afe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Nov 2022 10:46:07 -0800
+Subject: mptcp: deduplicate error paths on endpoint creation
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 976d302fb6165ad620778d7ba834cde6e3fe9f9f ]
+
+When endpoint creation fails, we need to free the newly allocated
+entry and eventually destroy the paired mptcp listener socket.
+
+Consolidate such action in a single point let all the errors path
+reach it.
+
+Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: ad2171009d96 ("mptcp: fix locking for in-kernel listener creation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/pm_netlink.c | 35 +++++++++++++----------------------
+ 1 file changed, 13 insertions(+), 22 deletions(-)
+
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 9813ed0fde9bd..fdf2ee29f7623 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1003,16 +1003,12 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+               return err;
+       msk = mptcp_sk(entry->lsk->sk);
+-      if (!msk) {
+-              err = -EINVAL;
+-              goto out;
+-      }
++      if (!msk)
++              return -EINVAL;
+       ssock = __mptcp_nmpc_socket(msk);
+-      if (!ssock) {
+-              err = -EINVAL;
+-              goto out;
+-      }
++      if (!ssock)
++              return -EINVAL;
+       mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family);
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+@@ -1022,20 +1018,16 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+       err = kernel_bind(ssock, (struct sockaddr *)&addr, addrlen);
+       if (err) {
+               pr_warn("kernel_bind error, err=%d", err);
+-              goto out;
++              return err;
+       }
+       err = kernel_listen(ssock, backlog);
+       if (err) {
+               pr_warn("kernel_listen error, err=%d", err);
+-              goto out;
++              return err;
+       }
+       return 0;
+-
+-out:
+-      sock_release(entry->lsk);
+-      return err;
+ }
+ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+@@ -1327,7 +1319,7 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+               return -EINVAL;
+       }
+-      entry = kmalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT);
++      entry = kzalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT);
+       if (!entry) {
+               GENL_SET_ERR_MSG(info, "can't allocate addr");
+               return -ENOMEM;
+@@ -1338,22 +1330,21 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+               ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry);
+               if (ret) {
+                       GENL_SET_ERR_MSG(info, "create listen socket error");
+-                      kfree(entry);
+-                      return ret;
++                      goto out_free;
+               }
+       }
+       ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
+       if (ret < 0) {
+               GENL_SET_ERR_MSG(info, "too many addresses or duplicate one");
+-              if (entry->lsk)
+-                      sock_release(entry->lsk);
+-              kfree(entry);
+-              return ret;
++              goto out_free;
+       }
+       mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
+-
+       return 0;
++
++out_free:
++      __mptcp_pm_release_addr_entry(entry);
++      return ret;
+ }
+ int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
+-- 
+2.39.0
+
diff --git a/queue-6.1/mptcp-fix-locking-for-in-kernel-listener-creation.patch b/queue-6.1/mptcp-fix-locking-for-in-kernel-listener-creation.patch
new file mode 100644 (file)
index 0000000..1c8d434
--- /dev/null
@@ -0,0 +1,79 @@
+From 446187deb58937f8bda106c7987900a70dbceae1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Feb 2023 14:04:15 +0100
+Subject: mptcp: fix locking for in-kernel listener creation
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit ad2171009d968104ccda9dc517f5a3ba891515db ]
+
+For consistency, in mptcp_pm_nl_create_listen_socket(), we need to
+call the __mptcp_nmpc_socket() under the msk socket lock.
+
+Note that as a side effect, mptcp_subflow_create_socket() needs a
+'nested' lockdep annotation, as it will acquire the subflow (kernel)
+socket lock under the in-kernel listener msk socket lock.
+
+The current lack of locking is almost harmless, because the relevant
+socket is not exposed to the user space, but in future we will add
+more complexity to the mentioned helper, let's play safe.
+
+Fixes: 1729cf186d8a ("mptcp: create the listening socket for new port")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/pm_netlink.c | 10 ++++++----
+ net/mptcp/subflow.c    |  2 +-
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index fdf2ee29f7623..5e38a0abbabae 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -992,8 +992,8 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ {
+       int addrlen = sizeof(struct sockaddr_in);
+       struct sockaddr_storage addr;
+-      struct mptcp_sock *msk;
+       struct socket *ssock;
++      struct sock *newsk;
+       int backlog = 1024;
+       int err;
+@@ -1002,11 +1002,13 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+       if (err)
+               return err;
+-      msk = mptcp_sk(entry->lsk->sk);
+-      if (!msk)
++      newsk = entry->lsk->sk;
++      if (!newsk)
+               return -EINVAL;
+-      ssock = __mptcp_nmpc_socket(msk);
++      lock_sock(newsk);
++      ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
++      release_sock(newsk);
+       if (!ssock)
+               return -EINVAL;
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 929b0ee8b3d5f..c4971bc42f60f 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1631,7 +1631,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
+       if (err)
+               return err;
+-      lock_sock(sf->sk);
++      lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
+       /* the newly created socket has to be in the same cgroup as its parent */
+       mptcp_attach_cgroup(sk, sf->sk);
+-- 
+2.39.0
+
diff --git a/queue-6.1/mptcp-fix-locking-for-setsockopt-corner-case.patch b/queue-6.1/mptcp-fix-locking-for-setsockopt-corner-case.patch
new file mode 100644 (file)
index 0000000..6399ddc
--- /dev/null
@@ -0,0 +1,55 @@
+From fece5c1a01a999db819c9829adf15e07a33069a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Feb 2023 14:04:14 +0100
+Subject: mptcp: fix locking for setsockopt corner-case
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 21e43569685de4ad773fb060c11a15f3fd5e7ac4 ]
+
+We need to call the __mptcp_nmpc_socket(), and later subflow socket
+access under the msk socket lock, or e.g. a racing connect() could
+change the socket status under the hood, with unexpected results.
+
+Fixes: 54635bd04701 ("mptcp: add TCP_FASTOPEN_CONNECT socket option")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/sockopt.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index 8d3b09d75c3ae..696ba398d699a 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -772,14 +772,21 @@ static int mptcp_setsockopt_sol_tcp_defer(struct mptcp_sock *msk, sockptr_t optv
+ static int mptcp_setsockopt_first_sf_only(struct mptcp_sock *msk, int level, int optname,
+                                         sockptr_t optval, unsigned int optlen)
+ {
++      struct sock *sk = (struct sock *)msk;
+       struct socket *sock;
++      int ret = -EINVAL;
+       /* Limit to first subflow, before the connection establishment */
++      lock_sock(sk);
+       sock = __mptcp_nmpc_socket(msk);
+       if (!sock)
+-              return -EINVAL;
++              goto unlock;
+-      return tcp_setsockopt(sock->sk, level, optname, optval, optlen);
++      ret = tcp_setsockopt(sock->sk, level, optname, optval, optlen);
++
++unlock:
++      release_sock(sk);
++      return ret;
+ }
+ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+-- 
+2.39.0
+
diff --git a/queue-6.1/mptcp-sockopt-make-tcp_fastopen_connect-generic.patch b/queue-6.1/mptcp-sockopt-make-tcp_fastopen_connect-generic.patch
new file mode 100644 (file)
index 0000000..e39279f
--- /dev/null
@@ -0,0 +1,66 @@
+From 1e32894f6fab187d542e988e2e426dd4f9abaf42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Oct 2022 17:45:03 -0700
+Subject: mptcp: sockopt: make 'tcp_fastopen_connect' generic
+
+From: Matthieu Baerts <matthieu.baerts@tessares.net>
+
+[ Upstream commit d3d429047cc66ff49780c93e4fccd9527723d385 ]
+
+There are other socket options that need to act only on the first
+subflow, e.g. all TCP_FASTOPEN* socket options.
+
+This is similar to the getsockopt version.
+
+In the next commit, this new mptcp_setsockopt_first_sf_only() helper is
+used by other another option.
+
+Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 21e43569685d ("mptcp: fix locking for setsockopt corner-case")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/sockopt.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
+index c7cb68c725b29..8d3b09d75c3ae 100644
+--- a/net/mptcp/sockopt.c
++++ b/net/mptcp/sockopt.c
+@@ -769,17 +769,17 @@ static int mptcp_setsockopt_sol_tcp_defer(struct mptcp_sock *msk, sockptr_t optv
+       return tcp_setsockopt(listener->sk, SOL_TCP, TCP_DEFER_ACCEPT, optval, optlen);
+ }
+-static int mptcp_setsockopt_sol_tcp_fastopen_connect(struct mptcp_sock *msk, sockptr_t optval,
+-                                                   unsigned int optlen)
++static int mptcp_setsockopt_first_sf_only(struct mptcp_sock *msk, int level, int optname,
++                                        sockptr_t optval, unsigned int optlen)
+ {
+       struct socket *sock;
+-      /* Limit to first subflow */
++      /* Limit to first subflow, before the connection establishment */
+       sock = __mptcp_nmpc_socket(msk);
+       if (!sock)
+               return -EINVAL;
+-      return tcp_setsockopt(sock->sk, SOL_TCP, TCP_FASTOPEN_CONNECT, optval, optlen);
++      return tcp_setsockopt(sock->sk, level, optname, optval, optlen);
+ }
+ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+@@ -811,7 +811,8 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
+       case TCP_DEFER_ACCEPT:
+               return mptcp_setsockopt_sol_tcp_defer(msk, optval, optlen);
+       case TCP_FASTOPEN_CONNECT:
+-              return mptcp_setsockopt_sol_tcp_fastopen_connect(msk, optval, optlen);
++              return mptcp_setsockopt_first_sf_only(msk, SOL_TCP, optname,
++                                                    optval, optlen);
+       }
+       return -EOPNOTSUPP;
+-- 
+2.39.0
+
diff --git a/queue-6.1/net-ethernet-mtk_eth_soc-avoid-truncating-allocation.patch b/queue-6.1/net-ethernet-mtk_eth_soc-avoid-truncating-allocation.patch
new file mode 100644 (file)
index 0000000..4a16a3a
--- /dev/null
@@ -0,0 +1,72 @@
+From 954e7e1f4863814e856f6b8327bece646351ea96 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Jan 2023 14:38:54 -0800
+Subject: net: ethernet: mtk_eth_soc: Avoid truncating allocation
+
+From: Kees Cook <keescook@chromium.org>
+
+[ Upstream commit f3eceaed9edd7c0e0d9fb057613131f92973626f ]
+
+There doesn't appear to be a reason to truncate the allocation used for
+flow_info, so do a full allocation and remove the unused empty struct.
+GCC does not like having a reference to an object that has been
+partially allocated, as bounds checking may become impossible when
+such an object is passed to other code. Seen with GCC 13:
+
+../drivers/net/ethernet/mediatek/mtk_ppe.c: In function 'mtk_foe_entry_commit_subflow':
+../drivers/net/ethernet/mediatek/mtk_ppe.c:623:18: warning: array subscript 'struct mtk_flow_entry[0]' is partly outside array bounds of 'unsigned char[48]' [-Warray-bounds=]
+  623 |         flow_info->l2_data.base_flow = entry;
+      |                  ^~
+
+Cc: Felix Fietkau <nbd@nbd.name>
+Cc: John Crispin <john@phrozen.org>
+Cc: Sean Wang <sean.wang@mediatek.com>
+Cc: Mark Lee <Mark-MC.Lee@mediatek.com>
+Cc: Lorenzo Bianconi <lorenzo@kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Jakub Kicinski <kuba@kernel.org>
+Cc: Paolo Abeni <pabeni@redhat.com>
+Cc: Matthias Brugger <matthias.bgg@gmail.com>
+Cc: netdev@vger.kernel.org
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-mediatek@lists.infradead.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20230127223853.never.014-kees@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_ppe.c | 3 +--
+ drivers/net/ethernet/mediatek/mtk_ppe.h | 1 -
+ 2 files changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
+index 784ecb2dc9fbd..34ea8af48c3d0 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -595,8 +595,7 @@ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
+       u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
+       int type;
+-      flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
+-                          GFP_ATOMIC);
++      flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
+       if (!flow_info)
+               return;
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
+index a09c32539bcc9..e66283b1bc79e 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -277,7 +277,6 @@ struct mtk_flow_entry {
+               struct {
+                       struct mtk_flow_entry *base_flow;
+                       struct hlist_node list;
+-                      struct {} end;
+               } l2_data;
+       };
+       struct rhash_head node;
+-- 
+2.39.0
+
diff --git a/queue-6.1/net-rose-fix-to-not-accept-on-connected-socket.patch b/queue-6.1/net-rose-fix-to-not-accept-on-connected-socket.patch
new file mode 100644 (file)
index 0000000..8858080
--- /dev/null
@@ -0,0 +1,63 @@
+From fb01a063c838d6853528395a7b35e91767e76693 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Jan 2023 02:59:44 -0800
+Subject: net/rose: Fix to not accept on connected socket
+
+From: Hyunwoo Kim <v4bel@theori.io>
+
+[ Upstream commit 14caefcf9837a2be765a566005ad82cd0d2a429f ]
+
+If you call listen() and accept() on an already connect()ed
+rose socket, accept() can successfully connect.
+This is because when the peer socket sends data to sendmsg,
+the skb with its own sk stored in the connected socket's
+sk->sk_receive_queue is connected, and rose_accept() dequeues
+the skb waiting in the sk->sk_receive_queue.
+
+This creates a child socket with the sk of the parent
+rose socket, which can cause confusion.
+
+Fix rose_listen() to return -EINVAL if the socket has
+already been successfully connected, and add lock_sock
+to prevent this issue.
+
+Signed-off-by: Hyunwoo Kim <v4bel@theori.io>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://lore.kernel.org/r/20230125105944.GA133314@ubuntu
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rose/af_rose.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 36fefc3957d77..ca2b17f32670d 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -488,6 +488,12 @@ static int rose_listen(struct socket *sock, int backlog)
+ {
+       struct sock *sk = sock->sk;
++      lock_sock(sk);
++      if (sock->state != SS_UNCONNECTED) {
++              release_sock(sk);
++              return -EINVAL;
++      }
++
+       if (sk->sk_state != TCP_LISTEN) {
+               struct rose_sock *rose = rose_sk(sk);
+@@ -497,8 +503,10 @@ static int rose_listen(struct socket *sock, int backlog)
+               memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
+               sk->sk_max_ack_backlog = backlog;
+               sk->sk_state           = TCP_LISTEN;
++              release_sock(sk);
+               return 0;
+       }
++      release_sock(sk);
+       return -EOPNOTSUPP;
+ }
+-- 
+2.39.0
+
diff --git a/queue-6.1/net-sched-sch-bounds-check-priority.patch b/queue-6.1/net-sched-sch-bounds-check-priority.patch
new file mode 100644 (file)
index 0000000..353ec91
--- /dev/null
@@ -0,0 +1,57 @@
+From d69ba695adee0efc40b5098c940229a6db59df64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Jan 2023 14:40:37 -0800
+Subject: net: sched: sch: Bounds check priority
+
+From: Kees Cook <keescook@chromium.org>
+
+[ Upstream commit de5ca4c3852f896cacac2bf259597aab5e17d9e3 ]
+
+Nothing was explicitly bounds checking the priority index used to access
+clpriop[]. WARN and bail out early if it's pathological. Seen with GCC 13:
+
+../net/sched/sch_htb.c: In function 'htb_activate_prios':
+../net/sched/sch_htb.c:437:44: warning: array subscript [0, 31] is outside array bounds of 'struct htb_prio[8]' [-Warray-bounds=]
+  437 |                         if (p->inner.clprio[prio].feed.rb_node)
+      |                             ~~~~~~~~~~~~~~~^~~~~~
+../net/sched/sch_htb.c:131:41: note: while referencing 'clprio'
+  131 |                         struct htb_prio clprio[TC_HTB_NUMPRIO];
+      |                                         ^~~~~~
+
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Cong Wang <xiyou.wangcong@gmail.com>
+Cc: Jiri Pirko <jiri@resnulli.us>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Jakub Kicinski <kuba@kernel.org>
+Cc: Paolo Abeni <pabeni@redhat.com>
+Cc: netdev@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Simon Horman <simon.horman@corigine.com>
+Reviewed-by: Cong Wang <cong.wang@bytedance.com>
+Link: https://lore.kernel.org/r/20230127224036.never.561-kees@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_htb.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 3afac9c21a763..14a202b5a3187 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -427,7 +427,10 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
+       while (cl->cmode == HTB_MAY_BORROW && p && mask) {
+               m = mask;
+               while (m) {
+-                      int prio = ffz(~m);
++                      unsigned int prio = ffz(~m);
++
++                      if (WARN_ON_ONCE(prio > ARRAY_SIZE(p->inner.clprio)))
++                              break;
+                       m &= ~(1 << prio);
+                       if (p->inner.clprio[prio].feed.rb_node)
+-- 
+2.39.0
+
diff --git a/queue-6.1/net-stmmac-do-not-stop-rx_clk-in-rx-lpi-state-for-qc.patch b/queue-6.1/net-stmmac-do-not-stop-rx_clk-in-rx-lpi-state-for-qc.patch
new file mode 100644 (file)
index 0000000..51736d9
--- /dev/null
@@ -0,0 +1,67 @@
+From a0a3bfd2ac7670ecc9de8eaaa564705905cbb15f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Jan 2023 00:35:39 +0300
+Subject: net: stmmac: do not stop RX_CLK in Rx LPI state for qcs404 SoC
+
+From: Andrey Konovalov <andrey.konovalov@linaro.org>
+
+[ Upstream commit 54aa39a513dbf2164ca462a19f04519b2407a224 ]
+
+Currently in phy_init_eee() the driver unconditionally configures the PHY
+to stop RX_CLK after entering Rx LPI state. This causes an LPI interrupt
+storm on my qcs404-base board.
+
+Change the PHY initialization so that for "qcom,qcs404-ethqos" compatible
+device RX_CLK continues to run even in Rx LPI state.
+
+Signed-off-by: Andrey Konovalov <andrey.konovalov@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c | 2 ++
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c       | 3 ++-
+ include/linux/stmmac.h                                  | 1 +
+ 3 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+index 835caa15d55ff..732774645c1a6 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+@@ -560,6 +560,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
+       plat_dat->has_gmac4 = 1;
+       plat_dat->pmt = 1;
+       plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
++      if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
++              plat_dat->rx_clk_runs_in_lpi = 1;
+       ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       if (ret)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 4bba0444c764a..84e1740b12f1b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1077,7 +1077,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
+       stmmac_mac_set(priv, priv->ioaddr, true);
+       if (phy && priv->dma_cap.eee) {
+-              priv->eee_active = phy_init_eee(phy, 1) >= 0;
++              priv->eee_active =
++                      phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
+               priv->eee_enabled = stmmac_eee_init(priv);
+               priv->tx_lpi_enabled = priv->eee_enabled;
+               stmmac_set_eee_pls(priv, priv->hw, true);
+diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
+index fb2e88614f5d1..313edd19bf545 100644
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -252,6 +252,7 @@ struct plat_stmmacenet_data {
+       int rss_en;
+       int mac_port_sel_speed;
+       bool en_tx_lpi_clockgating;
++      bool rx_clk_runs_in_lpi;
+       int has_xgmac;
+       bool vlan_fail_q_en;
+       u8 vlan_fail_q;
+-- 
+2.39.0
+
diff --git a/queue-6.1/nvme-clear-the-request_queue-pointers-on-failure-in-.patch b/queue-6.1/nvme-clear-the-request_queue-pointers-on-failure-in-.patch
new file mode 100644 (file)
index 0000000..2173795
--- /dev/null
@@ -0,0 +1,48 @@
+From 61991049c6efd0478896d0e0a2ce02608ee4ca82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Jan 2023 16:42:37 +0100
+Subject: nvme: clear the request_queue pointers on failure in
+ nvme_alloc_admin_tag_set
+
+From: Maurizio Lombardi <mlombard@redhat.com>
+
+[ Upstream commit fd62678ab55cb01e11a404d302cdade222bf4022 ]
+
+If nvme_alloc_admin_tag_set() fails, the admin_q and fabrics_q pointers
+are left with an invalid, non-NULL value. Other functions may then check
+the pointers and dereference them, e.g. in
+
+  nvme_probe() -> out_disable: -> nvme_dev_remove_admin().
+
+Fix the bug by setting admin_q and fabrics_q to NULL in case of error.
+
+Also use the set variable to free the tag_set as ctrl->admin_tagset isn't
+initialized yet.
+
+Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 25ade4ce8e0a7..e189ce17deb3e 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4881,7 +4881,9 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ out_cleanup_admin_q:
+       blk_mq_destroy_queue(ctrl->admin_q);
+ out_free_tagset:
+-      blk_mq_free_tag_set(ctrl->admin_tagset);
++      blk_mq_free_tag_set(set);
++      ctrl->admin_q = NULL;
++      ctrl->fabrics_q = NULL;
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
+-- 
+2.39.0
+
diff --git a/queue-6.1/nvme-clear-the-request_queue-pointers-on-failure-in-.patch-7477 b/queue-6.1/nvme-clear-the-request_queue-pointers-on-failure-in-.patch-7477
new file mode 100644 (file)
index 0000000..a04ebf2
--- /dev/null
@@ -0,0 +1,36 @@
+From 88c03cf03e3bd634d9c00fe0ef5bf0a8276e1d03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Jan 2023 17:38:42 +0100
+Subject: nvme: clear the request_queue pointers on failure in
+ nvme_alloc_io_tag_set
+
+From: Maurizio Lombardi <mlombard@redhat.com>
+
+[ Upstream commit 6fbf13c0e24fd86ab2e4477cd8484a485b687421 ]
+
+In nvme_alloc_io_tag_set(), the connect_q pointer should be set to NULL
+in case of error to avoid potential invalid pointer dereferences.
+
+Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index e189ce17deb3e..5acc9ae225df3 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4933,6 +4933,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ out_free_tag_set:
+       blk_mq_free_tag_set(set);
++      ctrl->connect_q = NULL;
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
+-- 
+2.39.0
+
diff --git a/queue-6.1/nvme-fc-fix-a-missing-queue-put-in-nvmet_fc_ls_creat.patch b/queue-6.1/nvme-fc-fix-a-missing-queue-put-in-nvmet_fc_ls_creat.patch
new file mode 100644 (file)
index 0000000..14f877a
--- /dev/null
@@ -0,0 +1,41 @@
+From 1b987f1d8461d90eabe89ee3622e368c4eb471d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Jan 2023 14:37:28 +0200
+Subject: nvme-fc: fix a missing queue put in nvmet_fc_ls_create_association
+
+From: Amit Engel <Amit.Engel@dell.com>
+
+[ Upstream commit 0cab4404874f2de52617de8400c844891c6ea1ce ]
+
+As part of nvmet_fc_ls_create_association there is a case where
+nvmet_fc_alloc_target_queue fails right after a new association with an
+admin queue is created. In this case, no one releases the get taken in
+nvmet_fc_alloc_target_assoc.  This fix is adding the missing put.
+
+Signed-off-by: Amit Engel <Amit.Engel@dell.com>
+Reviewed-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/fc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index ab2627e17bb97..1ab6601fdd5cf 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
+               else {
+                       queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
+                                       be16_to_cpu(rqst->assoc_cmd.sqsize));
+-                      if (!queue)
++                      if (!queue) {
+                               ret = VERR_QUEUE_ALLOC_FAIL;
++                              nvmet_fc_tgt_a_put(iod->assoc);
++                      }
+               }
+       }
+-- 
+2.39.0
+
diff --git a/queue-6.1/platform-x86-touchscreen_dmi-add-chuwi-vi8-cwi501-dm.patch b/queue-6.1/platform-x86-touchscreen_dmi-add-chuwi-vi8-cwi501-dm.patch
new file mode 100644 (file)
index 0000000..6c09d27
--- /dev/null
@@ -0,0 +1,43 @@
+From 7b00c145f9a7db2afa1eea7cf02b3154c11b5e41 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Feb 2023 11:34:13 +0100
+Subject: platform/x86: touchscreen_dmi: Add Chuwi Vi8 (CWI501) DMI match
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+[ Upstream commit eecf2acd4a580e9364e5087daf0effca60a240b7 ]
+
+Add a DMI match for the CWI501 version of the Chuwi Vi8 tablet,
+pointing to the same chuwi_vi8_data as the existing CWI506 version
+DMI match.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20230202103413.331459-1-hdegoede@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/touchscreen_dmi.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index f00995390fdfe..13802a3c3591d 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -1097,6 +1097,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+                       DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+               },
+       },
++      {
++              /* Chuwi Vi8 (CWI501) */
++              .driver_data = (void *)&chuwi_vi8_data,
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
++                      DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"),
++              },
++      },
+       {
+               /* Chuwi Vi8 (CWI506) */
+               .driver_data = (void *)&chuwi_vi8_data,
+-- 
+2.39.0
+
diff --git a/queue-6.1/powerpc-64-fix-perf-profiling-asynchronous-interrupt.patch b/queue-6.1/powerpc-64-fix-perf-profiling-asynchronous-interrupt.patch
new file mode 100644 (file)
index 0000000..fe0127b
--- /dev/null
@@ -0,0 +1,158 @@
+From daae9dca0312a0c654172b34fe48582674461428 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Jan 2023 20:01:56 +1000
+Subject: powerpc/64: Fix perf profiling asynchronous interrupt handlers
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+[ Upstream commit c28548012ee2bac55772ef7685138bd1124b80c3 ]
+
+Interrupt entry sets the soft mask to IRQS_ALL_DISABLED to match the
+hard irq disabled state. So when should_hard_irq_enable() returns true
+because we want PMI interrupts in irq handlers, MSR[EE] is enabled but
+PMIs just get soft-masked. Fix this by clearing IRQS_PMI_DISABLED before
+enabling MSR[EE].
+
+This also tidies some of the warnings, no need to duplicate them in
+both should_hard_irq_enable() and do_hard_irq_enable().
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20230121100156.2824054-1-npiggin@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/hw_irq.h | 41 ++++++++++++++++++++++---------
+ arch/powerpc/kernel/dbell.c       |  2 +-
+ arch/powerpc/kernel/irq.c         |  2 +-
+ arch/powerpc/kernel/time.c        |  2 +-
+ 4 files changed, 32 insertions(+), 15 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
+index 0b7d01d408ac8..eb6d094083fd6 100644
+--- a/arch/powerpc/include/asm/hw_irq.h
++++ b/arch/powerpc/include/asm/hw_irq.h
+@@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
+       return flags;
+ }
++static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
++{
++      unsigned long flags = irq_soft_mask_return();
++
++      irq_soft_mask_set(flags & ~mask);
++
++      return flags;
++}
++
+ static inline unsigned long arch_local_save_flags(void)
+ {
+       return irq_soft_mask_return();
+@@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);
+  * is a different soft-masked interrupt pending that requires hard
+  * masking.
+  */
+-static inline bool should_hard_irq_enable(void)
++static inline bool should_hard_irq_enable(struct pt_regs *regs)
+ {
+       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+-              WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
++              WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
++              WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
+               WARN_ON(mfmsr() & MSR_EE);
+       }
+@@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)
+        *
+        * TODO: Add test for 64e
+        */
+-      if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
+-              return false;
++      if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
++              if (!power_pmu_wants_prompt_pmi())
++                      return false;
++              /*
++               * If PMIs are disabled then IRQs should be disabled as well,
++               * so we shouldn't see this condition, check for it just in
++               * case because we are about to enable PMIs.
++               */
++              if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
++                      return false;
++      }
+       if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
+               return false;
+@@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)
+ /*
+  * Do the hard enabling, only call this if should_hard_irq_enable is true.
++ * This allows PMI interrupts to profile irq handlers.
+  */
+ static inline void do_hard_irq_enable(void)
+ {
+-      if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+-              WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+-              WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
+-              WARN_ON(mfmsr() & MSR_EE);
+-      }
+       /*
+-       * This allows PMI interrupts (and watchdog soft-NMIs) through.
+-       * There is no other reason to enable this way.
++       * Asynch interrupts come in with IRQS_ALL_DISABLED,
++       * PACA_IRQ_HARD_DIS, and MSR[EE]=0.
+        */
++      if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
++              irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
+       get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
+       __hard_irq_enable();
+ }
+@@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+       return !(regs->msr & MSR_EE);
+ }
+-static __always_inline bool should_hard_irq_enable(void)
++static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
+ {
+       return false;
+ }
+diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
+index f55c6fb34a3a0..5712dd846263c 100644
+--- a/arch/powerpc/kernel/dbell.c
++++ b/arch/powerpc/kernel/dbell.c
+@@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
+       ppc_msgsync();
+-      if (should_hard_irq_enable())
++      if (should_hard_irq_enable(regs))
+               do_hard_irq_enable();
+       kvmppc_clear_host_ipi(smp_processor_id());
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 9ede61a5a469e..55142ff649f3f 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
+       irq = static_call(ppc_get_irq)();
+       /* We can hard enable interrupts now to allow perf interrupts */
+-      if (should_hard_irq_enable())
++      if (should_hard_irq_enable(regs))
+               do_hard_irq_enable();
+       /* And finally process it */
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index a2ab397065c66..f157552d79b38 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -533,7 +533,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
+       }
+       /* Conditionally hard-enable interrupts. */
+-      if (should_hard_irq_enable()) {
++      if (should_hard_irq_enable(regs)) {
+               /*
+                * Ensure a positive value is written to the decrementer, or
+                * else some CPUs will continue to take decrementer exceptions.
+-- 
+2.39.0
+
diff --git a/queue-6.1/s390-decompressor-specify-__decompress-buf-len-to-av.patch b/queue-6.1/s390-decompressor-specify-__decompress-buf-len-to-av.patch
new file mode 100644 (file)
index 0000000..6571990
--- /dev/null
@@ -0,0 +1,47 @@
+From 915f1e1a2d8aca8fa19f00434cd713d5b6eaa1cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 29 Jan 2023 23:47:23 +0100
+Subject: s390/decompressor: specify __decompress() buf len to avoid overflow
+
+From: Vasily Gorbik <gor@linux.ibm.com>
+
+[ Upstream commit 7ab41c2c08a32132ba8c14624910e2fe8ce4ba4b ]
+
+Historically calls to __decompress() didn't specify "out_len" parameter
+on many architectures including s390, expecting that no writes beyond
+uncompressed kernel image are performed. This has changed since commit
+2aa14b1ab2c4 ("zstd: import usptream v1.5.2") which includes zstd library
+commit 6a7ede3dfccb ("Reduce size of dctx by reutilizing dst buffer
+(#2751)"). Now zstd decompression code might store literal buffer in
+the unwritten portion of the destination buffer. Since "out_len" is
+not set, it is considered to be unlimited and hence free to use for
+optimization needs. On s390 this might corrupt initrd or ipl report
+which are often placed right after the decompressor buffer. Luckily the
+size of uncompressed kernel image is already known to the decompressor,
+so to avoid the problem simply specify it in the "out_len" parameter.
+
+Link: https://github.com/facebook/zstd/commit/6a7ede3dfccb
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Tested-by: Alexander Egorenkov <egorenar@linux.ibm.com>
+Link: https://lore.kernel.org/r/patch-1.thread-41c676.git-41c676c2d153.your-ad-here.call-01675030179-ext-9637@work.hours
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/boot/decompressor.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/s390/boot/decompressor.c b/arch/s390/boot/decompressor.c
+index e27c2140d6206..623f6775d01d7 100644
+--- a/arch/s390/boot/decompressor.c
++++ b/arch/s390/boot/decompressor.c
+@@ -80,6 +80,6 @@ void *decompress_kernel(void)
+       void *output = (void *)decompress_offset;
+       __decompress(_compressed_start, _compressed_end - _compressed_start,
+-                   NULL, NULL, output, 0, NULL, error);
++                   NULL, NULL, output, vmlinux.image_size, NULL, error);
+       return output;
+ }
+-- 
+2.39.0
+
diff --git a/queue-6.1/selftest-net-improve-ipv6_tclass-ipv6_hoplimit-tests.patch b/queue-6.1/selftest-net-improve-ipv6_tclass-ipv6_hoplimit-tests.patch
new file mode 100644 (file)
index 0000000..77d422c
--- /dev/null
@@ -0,0 +1,46 @@
+From ab2c0f8d375601503c81f60844461590e9b11017 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Jan 2023 16:55:48 +0000
+Subject: selftest: net: Improve IPV6_TCLASS/IPV6_HOPLIMIT tests apparmor
+ compatibility
+
+From: Andrei Gherzan <andrei.gherzan@canonical.com>
+
+[ Upstream commit a6efc42a86c0c87cfe2f1c3d1f09a4c9b13ba890 ]
+
+"tcpdump" is used to capture traffic in these tests while using a random,
+temporary and not suffixed file for it. This can interfere with apparmor
+configuration where the tool is only allowed to read from files with
+'known' extensions.
+
+The MINE type application/vnd.tcpdump.pcap was registered with IANA for
+pcap files and .pcap is the extension that is both most common but also
+aligned with standard apparmor configurations. See TCPDUMP(8) for more
+details.
+
+This improves compatibility with standard apparmor configurations by
+using ".pcap" as the file extension for the tests' temporary files.
+
+Signed-off-by: Andrei Gherzan <andrei.gherzan@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/cmsg_ipv6.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/net/cmsg_ipv6.sh b/tools/testing/selftests/net/cmsg_ipv6.sh
+index 2d89cb0ad2889..330d0b1ceced3 100755
+--- a/tools/testing/selftests/net/cmsg_ipv6.sh
++++ b/tools/testing/selftests/net/cmsg_ipv6.sh
+@@ -6,7 +6,7 @@ ksft_skip=4
+ NS=ns
+ IP6=2001:db8:1::1/64
+ TGT6=2001:db8:1::2
+-TMPF=`mktemp`
++TMPF=$(mktemp --suffix ".pcap")
+ cleanup()
+ {
+-- 
+2.39.0
+
diff --git a/queue-6.1/selftests-bpf-verify-copy_register_state-preserves-p.patch b/queue-6.1/selftests-bpf-verify-copy_register_state-preserves-p.patch
new file mode 100644 (file)
index 0000000..c12165a
--- /dev/null
@@ -0,0 +1,68 @@
+From 925b928b3b4e3f3968b01db24e327b09d46cf69c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Jan 2023 16:22:14 +0200
+Subject: selftests/bpf: Verify copy_register_state() preserves parent/live
+ fields
+
+From: Eduard Zingerman <eddyz87@gmail.com>
+
+[ Upstream commit b9fa9bc839291020b362ab5392e5f18ba79657ac ]
+
+A testcase to check that verifier.c:copy_register_state() preserves
+register parentage chain and livness information.
+
+Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
+Link: https://lore.kernel.org/r/20230106142214.1040390-3-eddyz87@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../selftests/bpf/verifier/search_pruning.c   | 36 +++++++++++++++++++
+ 1 file changed, 36 insertions(+)
+
+diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
+index 68b14fdfebdb1..d63fd8991b03a 100644
+--- a/tools/testing/selftests/bpf/verifier/search_pruning.c
++++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
+@@ -225,3 +225,39 @@
+       .result_unpriv = ACCEPT,
+       .insn_processed = 15,
+ },
++/* The test performs a conditional 64-bit write to a stack location
++ * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
++ * then data is read from fp[-8]. This sequence is unsafe.
++ *
++ * The test would be mistakenly marked as safe w/o dst register parent
++ * preservation in verifier.c:copy_register_state() function.
++ *
++ * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
++ * checkpoint state after conditional 64-bit assignment.
++ */
++{
++      "write tracking and register parent chain bug",
++      .insns = {
++      /* r6 = ktime_get_ns() */
++      BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
++      /* r0 = ktime_get_ns() */
++      BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
++      /* if r0 > r6 goto +1 */
++      BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1),
++      /* *(u64 *)(r10 - 8) = 0xdeadbeef */
++      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef),
++      /* r1 = 42 */
++      BPF_MOV64_IMM(BPF_REG_1, 42),
++      /* *(u8 *)(r10 - 8) = r1 */
++      BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8),
++      /* r2 = *(u64 *)(r10 - 8) */
++      BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8),
++      /* exit(0) */
++      BPF_MOV64_IMM(BPF_REG_0, 0),
++      BPF_EXIT_INSN(),
++      },
++      .flags = BPF_F_TEST_STATE_FREQ,
++      .errstr = "invalid read from stack off -8+1 size 8",
++      .result = REJECT,
++},
+-- 
+2.39.0
+
diff --git a/queue-6.1/series b/queue-6.1/series
new file mode 100644 (file)
index 0000000..48a33a3
--- /dev/null
@@ -0,0 +1,47 @@
+mptcp-sockopt-make-tcp_fastopen_connect-generic.patch
+mptcp-fix-locking-for-setsockopt-corner-case.patch
+mptcp-deduplicate-error-paths-on-endpoint-creation.patch
+mptcp-fix-locking-for-in-kernel-listener-creation.patch
+dma-buf-add-unlocked-variant-of-vmapping-functions.patch
+drm-gem-take-reservation-lock-for-vmap-vunmap-operat.patch
+drm-client-switch-drm_client_buffer_delete-to-unlock.patch
+drm-client-prevent-null-dereference-in-drm_client_bu.patch
+drm-client-fix-circular-reference-counting-issue.patch
+btrfs-move-the-auto-defrag-code-to-defrag.c.patch
+btrfs-lock-the-inode-in-shared-mode-before-starting-.patch
+asoc-amd-yc-add-dmi-support-for-new-acer-emdoor-plat.patch
+asoc-sof-sof-audio-start-with-the-right-widget-type.patch
+alsa-usb-audio-add-fixed_rate-quirk-for-jbl-quantum6.patch
+asoc-intel-sof_rt5682-always-set-dpcm_capture-for-am.patch
+asoc-intel-sof_cs42l42-always-set-dpcm_capture-for-a.patch
+asoc-intel-sof_nau8825-always-set-dpcm_capture-for-a.patch
+asoc-intel-sof_ssp_amp-always-set-dpcm_capture-for-a.patch
+selftests-bpf-verify-copy_register_state-preserves-p.patch
+alsa-hda-do-not-unset-preset-when-cleaning-up-codec.patch
+asoc-amd-yc-add-xiaomi-redmi-book-pro-15-2022-into-d.patch
+bpf-sockmap-don-t-let-sock_map_-close-destroy-unhash.patch
+asoc-cs42l56-fix-dt-probe.patch
+tools-virtio-fix-the-vringh-test-for-virtio-ring-cha.patch
+vdpa-ifcvf-do-proper-cleanup-if-ifcvf-init-fails.patch
+net-rose-fix-to-not-accept-on-connected-socket.patch
+selftest-net-improve-ipv6_tclass-ipv6_hoplimit-tests.patch
+net-stmmac-do-not-stop-rx_clk-in-rx-lpi-state-for-qc.patch
+powerpc-64-fix-perf-profiling-asynchronous-interrupt.patch
+fscache-use-clear_and_wake_up_bit-in-fscache_create_.patch
+drm-nouveau-devinit-tu102-wait-for-gfw_boot_progress.patch
+net-ethernet-mtk_eth_soc-avoid-truncating-allocation.patch
+net-sched-sch-bounds-check-priority.patch
+s390-decompressor-specify-__decompress-buf-len-to-av.patch
+nvme-fc-fix-a-missing-queue-put-in-nvmet_fc_ls_creat.patch
+nvme-clear-the-request_queue-pointers-on-failure-in-.patch
+nvme-clear-the-request_queue-pointers-on-failure-in-.patch-7477
+drm-amd-display-add-missing-brackets-in-calculation.patch
+drm-amd-display-adjust-downscaling-limits-for-dcn314.patch
+drm-amd-display-unassign-does_plane_fit_in_mall-func.patch
+drm-amd-display-reset-dmub-mailbox-sw-state-after-hw.patch
+drm-amdgpu-enable-hdp-sd-for-gfx-11.0.3.patch
+drm-amdgpu-enable-vclk-dclk-node-for-gc11.0.3.patch
+drm-amd-display-properly-handle-additional-cases-whe.patch
+platform-x86-touchscreen_dmi-add-chuwi-vi8-cwi501-dm.patch
+ceph-move-mount-state-enum-to-super.h.patch
+ceph-blocklist-the-kclient-when-receiving-corrupted-.patch
diff --git a/queue-6.1/tools-virtio-fix-the-vringh-test-for-virtio-ring-cha.patch b/queue-6.1/tools-virtio-fix-the-vringh-test-for-virtio-ring-cha.patch
new file mode 100644 (file)
index 0000000..e730c2a
--- /dev/null
@@ -0,0 +1,150 @@
+From 656946277c8b3c9671e6178f7f796f7695351bd9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Jan 2023 12:43:10 +0900
+Subject: tools/virtio: fix the vringh test for virtio ring changes
+
+From: Shunsuke Mie <mie@igel.co.jp>
+
+[ Upstream commit 3f7b75abf41cc4143aa295f62acbb060a012868d ]
+
+Fix the build caused by missing kmsan_handle_dma() and is_power_of_2() that
+are used in drivers/virtio/virtio_ring.c.
+
+Signed-off-by: Shunsuke Mie <mie@igel.co.jp>
+Message-Id: <20230110034310.779744-1-mie@igel.co.jp>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/virtio/linux/bug.h         |  8 +++-----
+ tools/virtio/linux/build_bug.h   |  7 +++++++
+ tools/virtio/linux/cpumask.h     |  7 +++++++
+ tools/virtio/linux/gfp.h         |  7 +++++++
+ tools/virtio/linux/kernel.h      |  1 +
+ tools/virtio/linux/kmsan.h       | 12 ++++++++++++
+ tools/virtio/linux/scatterlist.h |  1 +
+ tools/virtio/linux/topology.h    |  7 +++++++
+ 8 files changed, 45 insertions(+), 5 deletions(-)
+ create mode 100644 tools/virtio/linux/build_bug.h
+ create mode 100644 tools/virtio/linux/cpumask.h
+ create mode 100644 tools/virtio/linux/gfp.h
+ create mode 100644 tools/virtio/linux/kmsan.h
+ create mode 100644 tools/virtio/linux/topology.h
+
+diff --git a/tools/virtio/linux/bug.h b/tools/virtio/linux/bug.h
+index 813baf13f62a2..51a919083d9b8 100644
+--- a/tools/virtio/linux/bug.h
++++ b/tools/virtio/linux/bug.h
+@@ -1,13 +1,11 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef BUG_H
+-#define BUG_H
++#ifndef _LINUX_BUG_H
++#define _LINUX_BUG_H
+ #include <asm/bug.h>
+ #define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond))
+-#define BUILD_BUG_ON(x)
+-
+ #define BUG() abort()
+-#endif /* BUG_H */
++#endif /* _LINUX_BUG_H */
+diff --git a/tools/virtio/linux/build_bug.h b/tools/virtio/linux/build_bug.h
+new file mode 100644
+index 0000000000000..cdbb75e28a604
+--- /dev/null
++++ b/tools/virtio/linux/build_bug.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_BUILD_BUG_H
++#define _LINUX_BUILD_BUG_H
++
++#define BUILD_BUG_ON(x)
++
++#endif        /* _LINUX_BUILD_BUG_H */
+diff --git a/tools/virtio/linux/cpumask.h b/tools/virtio/linux/cpumask.h
+new file mode 100644
+index 0000000000000..307da69d6b26c
+--- /dev/null
++++ b/tools/virtio/linux/cpumask.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_CPUMASK_H
++#define _LINUX_CPUMASK_H
++
++#include <linux/kernel.h>
++
++#endif /* _LINUX_CPUMASK_H */
+diff --git a/tools/virtio/linux/gfp.h b/tools/virtio/linux/gfp.h
+new file mode 100644
+index 0000000000000..43d146f236f14
+--- /dev/null
++++ b/tools/virtio/linux/gfp.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __LINUX_GFP_H
++#define __LINUX_GFP_H
++
++#include <linux/topology.h>
++
++#endif
+diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
+index 21593bf977552..8b877167933d1 100644
+--- a/tools/virtio/linux/kernel.h
++++ b/tools/virtio/linux/kernel.h
+@@ -10,6 +10,7 @@
+ #include <stdarg.h>
+ #include <linux/compiler.h>
++#include <linux/log2.h>
+ #include <linux/types.h>
+ #include <linux/overflow.h>
+ #include <linux/list.h>
+diff --git a/tools/virtio/linux/kmsan.h b/tools/virtio/linux/kmsan.h
+new file mode 100644
+index 0000000000000..272b5aa285d5a
+--- /dev/null
++++ b/tools/virtio/linux/kmsan.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_KMSAN_H
++#define _LINUX_KMSAN_H
++
++#include <linux/gfp.h>
++
++inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
++                           enum dma_data_direction dir)
++{
++}
++
++#endif /* _LINUX_KMSAN_H */
+diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h
+index 369ee308b6686..74d9e1825748e 100644
+--- a/tools/virtio/linux/scatterlist.h
++++ b/tools/virtio/linux/scatterlist.h
+@@ -2,6 +2,7 @@
+ #ifndef SCATTERLIST_H
+ #define SCATTERLIST_H
+ #include <linux/kernel.h>
++#include <linux/bug.h>
+ struct scatterlist {
+       unsigned long   page_link;
+diff --git a/tools/virtio/linux/topology.h b/tools/virtio/linux/topology.h
+new file mode 100644
+index 0000000000000..910794afb993a
+--- /dev/null
++++ b/tools/virtio/linux/topology.h
+@@ -0,0 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_TOPOLOGY_H
++#define _LINUX_TOPOLOGY_H
++
++#include <linux/cpumask.h>
++
++#endif /* _LINUX_TOPOLOGY_H */
+-- 
+2.39.0
+
diff --git a/queue-6.1/vdpa-ifcvf-do-proper-cleanup-if-ifcvf-init-fails.patch b/queue-6.1/vdpa-ifcvf-do-proper-cleanup-if-ifcvf-init-fails.patch
new file mode 100644 (file)
index 0000000..e5d1069
--- /dev/null
@@ -0,0 +1,40 @@
+From c564f9f9f23c084650907b61e06d02f47472d942 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Dec 2022 22:02:16 +0100
+Subject: vdpa: ifcvf: Do proper cleanup if IFCVF init fails
+
+From: Tanmay Bhushan <007047221b@gmail.com>
+
+[ Upstream commit 6b04456e248761cf68f562f2fd7c04e591fcac94 ]
+
+ifcvf_mgmt_dev leaks memory if it is not freed before
+returning. Call is made to correct return statement
+so memory does not leak. ifcvf_init_hw does not take
+care of this so it is needed to do it here.
+
+Signed-off-by: Tanmay Bhushan <007047221b@gmail.com>
+Message-Id: <772e9fe133f21fa78fb98a2ebe8969efbbd58e3c.camel@gmail.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Acked-by: Zhu Lingshan <lingshan.zhu@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/ifcvf/ifcvf_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
+index f9c0044c6442e..44b29289aa193 100644
+--- a/drivers/vdpa/ifcvf/ifcvf_main.c
++++ b/drivers/vdpa/ifcvf/ifcvf_main.c
+@@ -849,7 +849,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+       ret = ifcvf_init_hw(vf, pdev);
+       if (ret) {
+               IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
+-              return ret;
++              goto err;
+       }
+       for (i = 0; i < vf->nr_vring; i++)
+-- 
+2.39.0
+