From: Sasha Levin Date: Wed, 15 Feb 2023 16:33:44 +0000 (-0500) Subject: Fixes for 5.15 X-Git-Tag: v4.14.306~67 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=4ab2fbf2f1e1f656fefad94c56fe2eeeb2355d07;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 5.15 Signed-off-by: Sasha Levin --- diff --git a/queue-5.15/acpi-x86-add-support-for-lps0-callback-handler.patch b/queue-5.15/acpi-x86-add-support-for-lps0-callback-handler.patch new file mode 100644 index 00000000000..4530d0d9d06 --- /dev/null +++ b/queue-5.15/acpi-x86-add-support-for-lps0-callback-handler.patch @@ -0,0 +1,131 @@ +From 461254da71a841d1ed778d02e3165a301407772a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 17 Mar 2022 09:14:42 -0500 +Subject: ACPI / x86: Add support for LPS0 callback handler + +From: Mario Limonciello + +[ Upstream commit 20e1d6402a71dba7ad2b81f332a3c14c7d3b939b ] + +Currenty the latest thing run during a suspend to idle attempt is +the LPS0 `prepare_late` callback and the earliest thing is the +`resume_early` callback. + +There is a desire for the `amd-pmc` driver to suspend later in the +suspend process (ideally the very last thing), so create a callback +that it or any other driver can hook into to do this. + +Signed-off-by: Mario Limonciello +Acked-by: Rafael J. Wysocki +Link: https://lore.kernel.org/r/20220317141445.6498-1-mario.limonciello@amd.com +Reviewed-by: Hans de Goede +Signed-off-by: Hans de Goede +Stable-dep-of: 8e60615e8932 ("platform/x86/amd: pmc: Disable IRQ1 wakeup for RN/CZN") +Signed-off-by: Sasha Levin +--- + drivers/acpi/x86/s2idle.c | 40 +++++++++++++++++++++++++++++++++++++++ + include/linux/acpi.h | 10 +++++++++- + 2 files changed, 49 insertions(+), 1 deletion(-) + +diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c +index 2af1ae1721021..4a11a38764321 100644 +--- a/drivers/acpi/x86/s2idle.c ++++ b/drivers/acpi/x86/s2idle.c +@@ -86,6 +86,8 @@ struct lpi_device_constraint_amd { + int min_dstate; + }; + ++static LIST_HEAD(lps0_s2idle_devops_head); ++ + static struct lpi_constraints *lpi_constraints_table; + static int lpi_constraints_table_size; + static int rev_id; +@@ -434,6 +436,8 @@ static struct acpi_scan_handler lps0_handler = { + + int acpi_s2idle_prepare_late(void) + { ++ struct acpi_s2idle_dev_ops *handler; ++ + if (!lps0_device_handle || sleep_no_lps0) + return 0; + +@@ -464,14 +468,26 @@ int acpi_s2idle_prepare_late(void) + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, + lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); + } ++ ++ list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) { ++ if (handler->prepare) ++ handler->prepare(); ++ } ++ + return 0; + } + + void acpi_s2idle_restore_early(void) + { ++ struct acpi_s2idle_dev_ops *handler; ++ + if (!lps0_device_handle || sleep_no_lps0) + return; + ++ list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) ++ if (handler->restore) ++ handler->restore(); ++ + /* Modern standby exit */ + if (lps0_dsm_func_mask_microsoft > 0) + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, +@@ -514,4 +530,28 @@ void acpi_s2idle_setup(void) + s2idle_set_ops(&acpi_s2idle_ops_lps0); + } + ++int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg) ++{ ++ if (!lps0_device_handle || sleep_no_lps0) ++ return -ENODEV; ++ ++ lock_system_sleep(); ++ list_add(&arg->list_node, &lps0_s2idle_devops_head); ++ unlock_system_sleep(); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(acpi_register_lps0_dev); ++ ++void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg) ++{ ++ if (!lps0_device_handle || sleep_no_lps0) ++ return; ++ ++ lock_system_sleep(); ++ list_del(&arg->list_node); ++ unlock_system_sleep(); ++} ++EXPORT_SYMBOL_GPL(acpi_unregister_lps0_dev); ++ + #endif /* CONFIG_SUSPEND */ +diff --git a/include/linux/acpi.h b/include/linux/acpi.h +index 6224b1e32681c..2d7df5cea2494 100644 +--- a/include/linux/acpi.h ++++ b/include/linux/acpi.h +@@ -1005,7 +1005,15 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, + + acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, + u32 val_a, u32 val_b); +- ++#ifdef CONFIG_X86 ++struct acpi_s2idle_dev_ops { ++ struct list_head list_node; ++ void (*prepare)(void); ++ void (*restore)(void); ++}; ++int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg); ++void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg); ++#endif /* CONFIG_X86 */ + #ifndef CONFIG_IA64 + void arch_reserve_mem_area(acpi_physical_address addr, size_t size); + #else +-- +2.39.0 + diff --git a/queue-5.15/alsa-hda-do-not-unset-preset-when-cleaning-up-codec.patch b/queue-5.15/alsa-hda-do-not-unset-preset-when-cleaning-up-codec.patch new file mode 100644 index 00000000000..0924648258f --- /dev/null +++ b/queue-5.15/alsa-hda-do-not-unset-preset-when-cleaning-up-codec.patch @@ -0,0 +1,71 @@ +From b40fde804fdc1eee97ec92fa28e1d7dbeb3b288a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 19 Jan 2023 15:32:35 +0100 +Subject: ALSA: hda: Do not unset preset when cleaning up codec + +From: Cezary Rojewski + +[ Upstream commit 87978e6ad45a16835cc58234451111091be3c59a ] + +Several functions that take part in codec's initialization and removal +are re-used by ASoC codec drivers implementations. Drivers mimic the +behavior of hda_codec_driver_probe/remove() found in +sound/pci/hda/hda_bind.c with their component->probe/remove() instead. + +One of the reasons for that is the expectation of +snd_hda_codec_device_new() to receive a valid pointer to an instance of +struct snd_card. This expectation can be met only once sound card +components probing commences. + +As ASoC sound card may be unbound without codec device being actually +removed from the system, unsetting ->preset in +snd_hda_codec_cleanup_for_unbind() interferes with module unload -> load +scenario causing null-ptr-deref. Preset is assigned only once, during +device/driver matching whereas ASoC codec driver's module reloading may +occur several times throughout the lifetime of an audio stack. + +Suggested-by: Takashi Iwai +Signed-off-by: Cezary Rojewski +Link: https://lore.kernel.org/r/20230119143235.1159814-1-cezary.rojewski@intel.com +Signed-off-by: Takashi Iwai +Signed-off-by: Sasha Levin +--- + sound/pci/hda/hda_bind.c | 2 ++ + sound/pci/hda/hda_codec.c | 1 - + 2 files changed, 2 insertions(+), 1 deletion(-) + +diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c +index 7af2515735957..8e35009ec25cb 100644 +--- a/sound/pci/hda/hda_bind.c ++++ b/sound/pci/hda/hda_bind.c +@@ -144,6 +144,7 @@ static int hda_codec_driver_probe(struct device *dev) + + error: + snd_hda_codec_cleanup_for_unbind(codec); ++ codec->preset = NULL; + return err; + } + +@@ -166,6 +167,7 @@ static int hda_codec_driver_remove(struct device *dev) + if (codec->patch_ops.free) + codec->patch_ops.free(codec); + snd_hda_codec_cleanup_for_unbind(codec); ++ codec->preset = NULL; + module_put(dev->driver->owner); + return 0; + } +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c +index f552785d301e0..19be60bb57810 100644 +--- a/sound/pci/hda/hda_codec.c ++++ b/sound/pci/hda/hda_codec.c +@@ -791,7 +791,6 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec) + snd_array_free(&codec->cvt_setups); + snd_array_free(&codec->spdif_out); + snd_array_free(&codec->verbs); +- codec->preset = NULL; + codec->follower_dig_outs = NULL; + codec->spdif_status_reset = 0; + snd_array_free(&codec->mixers); +-- +2.39.0 + diff --git a/queue-5.15/asoc-cs42l56-fix-dt-probe.patch b/queue-5.15/asoc-cs42l56-fix-dt-probe.patch new file mode 100644 index 00000000000..63ebd916ec2 --- /dev/null +++ b/queue-5.15/asoc-cs42l56-fix-dt-probe.patch @@ -0,0 +1,56 @@ +From a430e3dd916552c02789f72948cc110c526b4cee Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 26 Jan 2023 17:21:24 +0100 +Subject: ASoC: cs42l56: fix DT probe + +From: Arnd Bergmann + +[ Upstream commit e18c6da62edc780e4f4f3c9ce07bdacd69505182 ] + +While looking through legacy platform data users, I noticed that +the DT probing never uses data from the DT properties, as the +platform_data structure gets overwritten directly after it +is initialized. + +There have never been any boards defining the platform_data in +the mainline kernel either, so this driver so far only worked +with patched kernels or with the default values. + +For the benefit of possible downstream users, fix the DT probe +by no longer overwriting the data. + +Signed-off-by: Arnd Bergmann +Acked-by: Charles Keepax +Link: https://lore.kernel.org/r/20230126162203.2986339-1-arnd@kernel.org +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + sound/soc/codecs/cs42l56.c | 6 ------ + 1 file changed, 6 deletions(-) + +diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c +index b39c25409c239..f0af8c18e5efa 100644 +--- a/sound/soc/codecs/cs42l56.c ++++ b/sound/soc/codecs/cs42l56.c +@@ -1193,18 +1193,12 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, + if (pdata) { + cs42l56->pdata = *pdata; + } else { +- pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata), +- GFP_KERNEL); +- if (!pdata) +- return -ENOMEM; +- + if (i2c_client->dev.of_node) { + ret = cs42l56_handle_of_data(i2c_client, + &cs42l56->pdata); + if (ret != 0) + return ret; + } +- cs42l56->pdata = *pdata; + } + + if (cs42l56->pdata.gpio_nreset) { +-- +2.39.0 + diff --git a/queue-5.15/asoc-intel-sof_cs42l42-always-set-dpcm_capture-for-a.patch b/queue-5.15/asoc-intel-sof_cs42l42-always-set-dpcm_capture-for-a.patch new file mode 100644 index 00000000000..b603b0b746a --- /dev/null +++ b/queue-5.15/asoc-intel-sof_cs42l42-always-set-dpcm_capture-for-a.patch @@ -0,0 +1,49 @@ +From 2a188a6217e784f5fffe3d3bc47e20be7b436f7c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 19 Jan 2023 18:34:57 +0200 +Subject: ASoC: Intel: sof_cs42l42: always set dpcm_capture for amplifiers +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Pierre-Louis Bossart + +[ Upstream commit e0a52220344ab7defe25b9cdd58fe1dc1122e67c ] + +The amplifier may provide hardware support for I/V feedback, or +alternatively the firmware may generate an echo reference attached to +the SSP and dailink used for the amplifier. + +To avoid any issues with invalid/NULL substreams in the latter case, +always unconditionally set dpcm_capture. + +Link: https://github.com/thesofproject/linux/issues/4083 +Signed-off-by: Pierre-Louis Bossart +Reviewed-by: Ranjani Sridharan +Reviewed-by: Péter Ujfalusi +Reviewed-by: Bard Liao +Signed-off-by: Kai Vehmanen +Link: https://lore.kernel.org/r/20230119163459.2235843-3-kai.vehmanen@linux.intel.com +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + sound/soc/intel/boards/sof_cs42l42.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/sound/soc/intel/boards/sof_cs42l42.c b/sound/soc/intel/boards/sof_cs42l42.c +index ce78c18798876..8061082d9fbf3 100644 +--- a/sound/soc/intel/boards/sof_cs42l42.c ++++ b/sound/soc/intel/boards/sof_cs42l42.c +@@ -311,6 +311,9 @@ static int create_spk_amp_dai_links(struct device *dev, + links[*id].platforms = platform_component; + links[*id].num_platforms = ARRAY_SIZE(platform_component); + links[*id].dpcm_playback = 1; ++ /* firmware-generated echo reference */ ++ links[*id].dpcm_capture = 1; ++ + links[*id].no_pcm = 1; + links[*id].cpus = &cpus[*id]; + links[*id].num_cpus = 1; +-- +2.39.0 + diff --git a/queue-5.15/asoc-intel-sof_rt5682-always-set-dpcm_capture-for-am.patch b/queue-5.15/asoc-intel-sof_rt5682-always-set-dpcm_capture-for-am.patch new file mode 100644 index 00000000000..bfed6c9c48b --- /dev/null +++ b/queue-5.15/asoc-intel-sof_rt5682-always-set-dpcm_capture-for-am.patch @@ -0,0 +1,58 @@ +From a64a18d4dd9e634502235d1cb9b9ceafd807e6ab Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 19 Jan 2023 18:34:56 +0200 +Subject: ASoC: Intel: sof_rt5682: always set dpcm_capture for amplifiers +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Pierre-Louis Bossart + +[ Upstream commit 324f065cdbaba1b879a63bf07e61ca156b789537 ] + +The amplifier may provide hardware support for I/V feedback, or +alternatively the firmware may generate an echo reference attached to +the SSP and dailink used for the amplifier. + +To avoid any issues with invalid/NULL substreams in the latter case, +always unconditionally set dpcm_capture. + +Link: https://github.com/thesofproject/linux/issues/4083 +Signed-off-by: Pierre-Louis Bossart +Reviewed-by: Ranjani Sridharan +Reviewed-by: Péter Ujfalusi +Reviewed-by: Bard Liao +Signed-off-by: Kai Vehmanen +Link: https://lore.kernel.org/r/20230119163459.2235843-2-kai.vehmanen@linux.intel.com +Signed-off-by: Mark Brown +Signed-off-by: Sasha Levin +--- + sound/soc/intel/boards/sof_rt5682.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c +index f096bd6d69be7..d0ce2f06b30c6 100644 +--- a/sound/soc/intel/boards/sof_rt5682.c ++++ b/sound/soc/intel/boards/sof_rt5682.c +@@ -737,8 +737,6 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev, + links[id].num_codecs = ARRAY_SIZE(max_98373_components); + links[id].init = max_98373_spk_codec_init; + links[id].ops = &max_98373_ops; +- /* feedback stream */ +- links[id].dpcm_capture = 1; + } else if (sof_rt5682_quirk & + SOF_MAX98360A_SPEAKER_AMP_PRESENT) { + max_98360a_dai_link(&links[id]); +@@ -751,6 +749,9 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev, + links[id].platforms = platform_component; + links[id].num_platforms = ARRAY_SIZE(platform_component); + links[id].dpcm_playback = 1; ++ /* feedback stream or firmware-generated echo reference */ ++ links[id].dpcm_capture = 1; ++ + links[id].no_pcm = 1; + links[id].cpus = &cpus[id]; + links[id].num_cpus = 1; +-- +2.39.0 + diff --git a/queue-5.15/bpf-sockmap-don-t-let-sock_map_-close-destroy-unhash.patch b/queue-5.15/bpf-sockmap-don-t-let-sock_map_-close-destroy-unhash.patch new file mode 100644 index 00000000000..9b15f56498a --- /dev/null +++ b/queue-5.15/bpf-sockmap-don-t-let-sock_map_-close-destroy-unhash.patch @@ -0,0 +1,117 @@ +From 3073d332ecef87d0a632c7c423d13ad2a3f710e3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 21 Jan 2023 13:41:43 +0100 +Subject: bpf, sockmap: Don't let sock_map_{close,destroy,unhash} call itself + +From: Jakub Sitnicki + +[ Upstream commit 5b4a79ba65a1ab479903fff2e604865d229b70a9 ] + +sock_map proto callbacks should never call themselves by design. Protect +against bugs like [1] and break out of the recursive loop to avoid a stack +overflow in favor of a resource leak. + +[1] https://lore.kernel.org/all/00000000000073b14905ef2e7401@google.com/ + +Suggested-by: Eric Dumazet +Signed-off-by: Jakub Sitnicki +Acked-by: John Fastabend +Link: https://lore.kernel.org/r/20230113-sockmap-fix-v2-1-1e0ee7ac2f90@cloudflare.com +Signed-off-by: Alexei Starovoitov +Signed-off-by: Sasha Levin +--- + net/core/sock_map.c | 61 +++++++++++++++++++++++++-------------------- + 1 file changed, 34 insertions(+), 27 deletions(-) + +diff --git a/net/core/sock_map.c b/net/core/sock_map.c +index ae6013a8bce53..86b4e8909ad1e 100644 +--- a/net/core/sock_map.c ++++ b/net/core/sock_map.c +@@ -1514,15 +1514,16 @@ void sock_map_unhash(struct sock *sk) + psock = sk_psock(sk); + if (unlikely(!psock)) { + rcu_read_unlock(); +- if (sk->sk_prot->unhash) +- sk->sk_prot->unhash(sk); +- return; ++ saved_unhash = READ_ONCE(sk->sk_prot)->unhash; ++ } else { ++ saved_unhash = psock->saved_unhash; ++ sock_map_remove_links(sk, psock); ++ rcu_read_unlock(); + } +- +- saved_unhash = psock->saved_unhash; +- sock_map_remove_links(sk, psock); +- rcu_read_unlock(); +- saved_unhash(sk); ++ if (WARN_ON_ONCE(saved_unhash == sock_map_unhash)) ++ return; ++ if (saved_unhash) ++ saved_unhash(sk); + } + EXPORT_SYMBOL_GPL(sock_map_unhash); + +@@ -1535,17 +1536,18 @@ void sock_map_destroy(struct sock *sk) + psock = sk_psock_get(sk); + if (unlikely(!psock)) { + rcu_read_unlock(); +- if (sk->sk_prot->destroy) +- sk->sk_prot->destroy(sk); +- return; ++ saved_destroy = READ_ONCE(sk->sk_prot)->destroy; ++ } else { ++ saved_destroy = psock->saved_destroy; ++ sock_map_remove_links(sk, psock); ++ rcu_read_unlock(); ++ sk_psock_stop(psock); ++ sk_psock_put(sk, psock); + } +- +- saved_destroy = psock->saved_destroy; +- sock_map_remove_links(sk, psock); +- rcu_read_unlock(); +- sk_psock_stop(psock); +- sk_psock_put(sk, psock); +- saved_destroy(sk); ++ if (WARN_ON_ONCE(saved_destroy == sock_map_destroy)) ++ return; ++ if (saved_destroy) ++ saved_destroy(sk); + } + EXPORT_SYMBOL_GPL(sock_map_destroy); + +@@ -1560,16 +1562,21 @@ void sock_map_close(struct sock *sk, long timeout) + if (unlikely(!psock)) { + rcu_read_unlock(); + release_sock(sk); +- return sk->sk_prot->close(sk, timeout); ++ saved_close = READ_ONCE(sk->sk_prot)->close; ++ } else { ++ saved_close = psock->saved_close; ++ sock_map_remove_links(sk, psock); ++ rcu_read_unlock(); ++ sk_psock_stop(psock); ++ release_sock(sk); ++ cancel_work_sync(&psock->work); ++ sk_psock_put(sk, psock); + } +- +- saved_close = psock->saved_close; +- sock_map_remove_links(sk, psock); +- rcu_read_unlock(); +- sk_psock_stop(psock); +- release_sock(sk); +- cancel_work_sync(&psock->work); +- sk_psock_put(sk, psock); ++ /* Make sure we do not recurse. This is a bug. ++ * Leak the socket instead of crashing on a stack overflow. ++ */ ++ if (WARN_ON_ONCE(saved_close == sock_map_close)) ++ return; + saved_close(sk, timeout); + } + EXPORT_SYMBOL_GPL(sock_map_close); +-- +2.39.0 + diff --git a/queue-5.15/drm-amd-display-properly-handle-additional-cases-whe.patch b/queue-5.15/drm-amd-display-properly-handle-additional-cases-whe.patch new file mode 100644 index 00000000000..f2e4a8df245 --- /dev/null +++ b/queue-5.15/drm-amd-display-properly-handle-additional-cases-whe.patch @@ -0,0 +1,51 @@ +From 6e57ee2652ffb28b7ec0181a76e868bb27a0a84a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 25 Jan 2023 14:35:16 -0500 +Subject: drm/amd/display: Properly handle additional cases where DCN is not + supported + +From: Alex Deucher + +[ Upstream commit 6fc547a5a2ef5ce05b16924106663ab92f8f87a7 ] + +There could be boards with DCN listed in IP discovery, but no +display hardware actually wired up. In this case the vbios +display table will not be populated. Detect this case and +skip loading DM when we detect it. + +v2: Mark DCN as harvested as well so other display checks +elsewhere in the driver are handled properly. + +Cc: Aurabindo Pillai +Reviewed-by: Aurabindo Pillai +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index e9797439bb0eb..3f325e1e440c9 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -4428,6 +4428,17 @@ DEVICE_ATTR_WO(s3_debug); + static int dm_early_init(void *handle) + { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; ++ struct amdgpu_mode_info *mode_info = &adev->mode_info; ++ struct atom_context *ctx = mode_info->atom_context; ++ int index = GetIndexIntoMasterTable(DATA, Object_Header); ++ u16 data_offset; ++ ++ /* if there is no object header, skip DM */ ++ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { ++ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; ++ dev_info(adev->dev, "No object header, skipping DM\n"); ++ return -ENOENT; ++ } + + switch (adev->asic_type) { + #if defined(CONFIG_DRM_AMD_DC_SI) +-- +2.39.0 + diff --git a/queue-5.15/drm-nouveau-devinit-tu102-wait-for-gfw_boot_progress.patch b/queue-5.15/drm-nouveau-devinit-tu102-wait-for-gfw_boot_progress.patch new file mode 100644 index 00000000000..7d9c4fe746b --- /dev/null +++ b/queue-5.15/drm-nouveau-devinit-tu102-wait-for-gfw_boot_progress.patch @@ -0,0 +1,69 @@ +From d038ed9bb5b4f85f1eb5d1aaf23fdd59fc4a9153 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 31 Jan 2023 08:37:13 +1000 +Subject: drm/nouveau/devinit/tu102-: wait for GFW_BOOT_PROGRESS == COMPLETED + +From: Ben Skeggs + +[ Upstream commit d22915d22ded21fd5b24b60d174775789f173997 ] + +Starting from Turing, the driver is no longer responsible for initiating +DEVINIT when required as the GPU started loading a FW image from ROM and +executing DEVINIT itself after power-on. + +However - we apparently still need to wait for it to complete. + +This should correct some issues with runpm on some systems, where we get +control of the HW before it's been fully reinitialised after resume from +suspend. + +Signed-off-by: Ben Skeggs +Reviewed-by: Lyude Paul +Signed-off-by: Lyude Paul +Link: https://patchwork.freedesktop.org/patch/msgid/20230130223715.1831509-1-bskeggs@redhat.com +Signed-off-by: Sasha Levin +--- + .../drm/nouveau/nvkm/subdev/devinit/tu102.c | 23 +++++++++++++++++++ + 1 file changed, 23 insertions(+) + +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c +index 634f64f88fc8b..81a1ad2c88a7e 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c +@@ -65,10 +65,33 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq) + return ret; + } + ++static int ++tu102_devinit_wait(struct nvkm_device *device) ++{ ++ unsigned timeout = 50 + 2000; ++ ++ do { ++ if (nvkm_rd32(device, 0x118128) & 0x00000001) { ++ if ((nvkm_rd32(device, 0x118234) & 0x000000ff) == 0xff) ++ return 0; ++ } ++ ++ usleep_range(1000, 2000); ++ } while (timeout--); ++ ++ return -ETIMEDOUT; ++} ++ + int + tu102_devinit_post(struct nvkm_devinit *base, bool post) + { + struct nv50_devinit *init = nv50_devinit(base); ++ int ret; ++ ++ ret = tu102_devinit_wait(init->base.subdev.device); ++ if (ret) ++ return ret; ++ + gm200_devinit_preos(init, post); + return 0; + } +-- +2.39.0 + diff --git a/queue-5.15/kprobes-treewide-cleanup-the-error-messages-for-kpro.patch b/queue-5.15/kprobes-treewide-cleanup-the-error-messages-for-kpro.patch new file mode 100644 index 00000000000..d2fe100911c --- /dev/null +++ b/queue-5.15/kprobes-treewide-cleanup-the-error-messages-for-kpro.patch @@ -0,0 +1,356 @@ +From 8fd90b6c03598fcb9d027b626c9f71e86c1ef7ec Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Sep 2021 23:39:25 +0900 +Subject: kprobes: treewide: Cleanup the error messages for kprobes + +From: Masami Hiramatsu + +[ Upstream commit 9c89bb8e327203bc27e09ebd82d8f61ac2ae8b24 ] + +This clean up the error/notification messages in kprobes related code. +Basically this defines 'pr_fmt()' macros for each files and update +the messages which describes + + - what happened, + - what is the kernel going to do or not do, + - is the kernel fine, + - what can the user do about it. + +Also, if the message is not needed (e.g. the function returns unique +error code, or other error message is already shown.) remove it, +and replace the message with WARN_*() macros if suitable. + +Link: https://lkml.kernel.org/r/163163036568.489837.14085396178727185469.stgit@devnote2 + +Signed-off-by: Masami Hiramatsu +Signed-off-by: Steven Rostedt (VMware) +Stable-dep-of: eb7423273cc9 ("riscv: kprobe: Fixup misaligned load text") +Signed-off-by: Sasha Levin +--- + arch/arm/probes/kprobes/core.c | 4 +++- + arch/arm64/kernel/probes/kprobes.c | 5 ++++- + arch/csky/kernel/probes/kprobes.c | 10 ++++----- + arch/mips/kernel/kprobes.c | 11 +++++---- + arch/riscv/kernel/probes/kprobes.c | 11 +++++---- + arch/s390/kernel/kprobes.c | 4 +++- + kernel/kprobes.c | 36 +++++++++++++----------------- + 7 files changed, 41 insertions(+), 40 deletions(-) + +diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c +index 9d8634e2f12f7..9bcae72dda440 100644 +--- a/arch/arm/probes/kprobes/core.c ++++ b/arch/arm/probes/kprobes/core.c +@@ -11,6 +11,8 @@ + * Copyright (C) 2007 Marvell Ltd. + */ + ++#define pr_fmt(fmt) "kprobes: " fmt ++ + #include + #include + #include +@@ -278,7 +280,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs) + break; + case KPROBE_REENTER: + /* A nested probe was hit in FIQ, it is a BUG */ +- pr_warn("Unrecoverable kprobe detected.\n"); ++ pr_warn("Failed to recover from reentered kprobes.\n"); + dump_kprobe(p); + fallthrough; + default: +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c +index b7404dba0d623..2162b6fd7251d 100644 +--- a/arch/arm64/kernel/probes/kprobes.c ++++ b/arch/arm64/kernel/probes/kprobes.c +@@ -7,6 +7,9 @@ + * Copyright (C) 2013 Linaro Limited. + * Author: Sandeepa Prabhu + */ ++ ++#define pr_fmt(fmt) "kprobes: " fmt ++ + #include + #include + #include +@@ -218,7 +221,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: +- pr_warn("Unrecoverable kprobe detected.\n"); ++ pr_warn("Failed to recover from reentered kprobes.\n"); + dump_kprobe(p); + BUG(); + break; +diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c +index 584ed9f36290f..bd92ac376e157 100644 +--- a/arch/csky/kernel/probes/kprobes.c ++++ b/arch/csky/kernel/probes/kprobes.c +@@ -1,5 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0+ + ++#define pr_fmt(fmt) "kprobes: " fmt ++ + #include + #include + #include +@@ -77,10 +79,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) + { + unsigned long probe_addr = (unsigned long)p->addr; + +- if (probe_addr & 0x1) { +- pr_warn("Address not aligned.\n"); +- return -EINVAL; +- } ++ if (probe_addr & 0x1) ++ return -EILSEQ; + + /* copy instruction */ + p->opcode = le32_to_cpu(*p->addr); +@@ -229,7 +229,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: +- pr_warn("Unrecoverable kprobe detected.\n"); ++ pr_warn("Failed to recover from reentered kprobes.\n"); + dump_kprobe(p); + BUG(); + break; +diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c +index 75bff0f773198..b0934a0d7aedd 100644 +--- a/arch/mips/kernel/kprobes.c ++++ b/arch/mips/kernel/kprobes.c +@@ -11,6 +11,8 @@ + * Copyright (C) IBM Corporation, 2002, 2004 + */ + ++#define pr_fmt(fmt) "kprobes: " fmt ++ + #include + #include + #include +@@ -80,8 +82,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) + insn = p->addr[0]; + + if (insn_has_ll_or_sc(insn)) { +- pr_notice("Kprobes for ll and sc instructions are not" +- "supported\n"); ++ pr_notice("Kprobes for ll and sc instructions are not supported\n"); + ret = -EINVAL; + goto out; + } +@@ -219,7 +220,7 @@ static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs, + return 0; + + unaligned: +- pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm); ++ pr_notice("Failed to emulate branch instruction because of unaligned epc - sending SIGBUS to %s.\n", current->comm); + force_sig(SIGBUS); + return -EFAULT; + +@@ -238,10 +239,8 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs, + regs->cp0_epc = (unsigned long)p->addr; + else if (insn_has_delayslot(p->opcode)) { + ret = evaluate_branch_instruction(p, regs, kcb); +- if (ret < 0) { +- pr_notice("Kprobes: Error in evaluating branch\n"); ++ if (ret < 0) + return; +- } + } + regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; + } +diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c +index 125241ce82d6a..b53aa0209e079 100644 +--- a/arch/riscv/kernel/probes/kprobes.c ++++ b/arch/riscv/kernel/probes/kprobes.c +@@ -1,5 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0+ + ++#define pr_fmt(fmt) "kprobes: " fmt ++ + #include + #include + #include +@@ -65,11 +67,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) + { + unsigned long probe_addr = (unsigned long)p->addr; + +- if (probe_addr & 0x1) { +- pr_warn("Address not aligned.\n"); +- +- return -EINVAL; +- } ++ if (probe_addr & 0x1) ++ return -EILSEQ; + + if (!arch_check_kprobe(p)) + return -EILSEQ; +@@ -209,7 +208,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: +- pr_warn("Unrecoverable kprobe detected.\n"); ++ pr_warn("Failed to recover from reentered kprobes.\n"); + dump_kprobe(p); + BUG(); + break; +diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c +index 52d056a5f89fc..952d44b0610b0 100644 +--- a/arch/s390/kernel/kprobes.c ++++ b/arch/s390/kernel/kprobes.c +@@ -7,6 +7,8 @@ + * s390 port, used ppc64 as template. Mike Grundy + */ + ++#define pr_fmt(fmt) "kprobes: " fmt ++ + #include + #include + #include +@@ -259,7 +261,7 @@ static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) + * is a BUG. The code path resides in the .kprobes.text + * section and is executed with interrupts disabled. + */ +- pr_err("Invalid kprobe detected.\n"); ++ pr_err("Failed to recover from reentered kprobes.\n"); + dump_kprobe(p); + BUG(); + } +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 23af2f8e8563e..8818f3a89fef3 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -18,6 +18,9 @@ + * and Prasanna S Panchamukhi + * added function-return probes. + */ ++ ++#define pr_fmt(fmt) "kprobes: " fmt ++ + #include + #include + #include +@@ -892,7 +895,7 @@ static void optimize_all_kprobes(void) + optimize_kprobe(p); + } + cpus_read_unlock(); +- printk(KERN_INFO "Kprobes globally optimized\n"); ++ pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n"); + out: + mutex_unlock(&kprobe_mutex); + } +@@ -925,7 +928,7 @@ static void unoptimize_all_kprobes(void) + + /* Wait for unoptimizing completion */ + wait_for_kprobe_optimizer(); +- printk(KERN_INFO "Kprobes globally unoptimized\n"); ++ pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n"); + } + + static DEFINE_MUTEX(kprobe_sysctl_mutex); +@@ -1003,7 +1006,7 @@ static int reuse_unused_kprobe(struct kprobe *ap) + * unregistered. + * Thus there should be no chance to reuse unused kprobe. + */ +- printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); ++ WARN_ON_ONCE(1); + return -EINVAL; + } + +@@ -1049,18 +1052,13 @@ static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, + int ret = 0; + + ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); +- if (ret) { +- pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n", +- p->addr, ret); ++ if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret)) + return ret; +- } + + if (*cnt == 0) { + ret = register_ftrace_function(ops); +- if (ret) { +- pr_debug("Failed to init kprobe-ftrace (%d)\n", ret); ++ if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) + goto err_ftrace; +- } + } + + (*cnt)++; +@@ -1092,14 +1090,14 @@ static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, + + if (*cnt == 1) { + ret = unregister_ftrace_function(ops); +- if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret)) ++ if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret)) + return ret; + } + + (*cnt)--; + + ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); +- WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n", ++ WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n", + p->addr, ret); + return ret; + } +@@ -1894,7 +1892,7 @@ unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, + + node = node->next; + } +- pr_err("Oops! Kretprobe fails to find correct return address.\n"); ++ pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n"); + BUG_ON(1); + + found: +@@ -2229,8 +2227,7 @@ EXPORT_SYMBOL_GPL(enable_kprobe); + /* Caller must NOT call this in usual path. This is only for critical case */ + void dump_kprobe(struct kprobe *kp) + { +- pr_err("Dumping kprobe:\n"); +- pr_err("Name: %s\nOffset: %x\nAddress: %pS\n", ++ pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n", + kp->symbol_name, kp->offset, kp->addr); + } + NOKPROBE_SYMBOL(dump_kprobe); +@@ -2493,8 +2490,7 @@ static int __init init_kprobes(void) + err = populate_kprobe_blacklist(__start_kprobe_blacklist, + __stop_kprobe_blacklist); + if (err) { +- pr_err("kprobes: failed to populate blacklist: %d\n", err); +- pr_err("Please take care of using kprobes.\n"); ++ pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err); + } + + if (kretprobe_blacklist_size) { +@@ -2503,7 +2499,7 @@ static int __init init_kprobes(void) + kretprobe_blacklist[i].addr = + kprobe_lookup_name(kretprobe_blacklist[i].name, 0); + if (!kretprobe_blacklist[i].addr) +- printk("kretprobe: lookup failed: %s\n", ++ pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n", + kretprobe_blacklist[i].name); + } + } +@@ -2707,7 +2703,7 @@ static int arm_all_kprobes(void) + } + + if (errors) +- pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n", ++ pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n", + errors, total); + else + pr_info("Kprobes globally enabled\n"); +@@ -2750,7 +2746,7 @@ static int disarm_all_kprobes(void) + } + + if (errors) +- pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n", ++ pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n", + errors, total); + else + pr_info("Kprobes globally disabled\n"); +-- +2.39.0 + diff --git a/queue-5.15/mptcp-fix-locking-for-in-kernel-listener-creation.patch b/queue-5.15/mptcp-fix-locking-for-in-kernel-listener-creation.patch new file mode 100644 index 00000000000..97358d1f12e --- /dev/null +++ b/queue-5.15/mptcp-fix-locking-for-in-kernel-listener-creation.patch @@ -0,0 +1,81 @@ +From a2c50dc9e420be8c2e8d8443cfc87b5b206db329 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 7 Feb 2023 14:04:15 +0100 +Subject: mptcp: fix locking for in-kernel listener creation + +From: Paolo Abeni + +[ Upstream commit ad2171009d968104ccda9dc517f5a3ba891515db ] + +For consistency, in mptcp_pm_nl_create_listen_socket(), we need to +call the __mptcp_nmpc_socket() under the msk socket lock. + +Note that as a side effect, mptcp_subflow_create_socket() needs a +'nested' lockdep annotation, as it will acquire the subflow (kernel) +socket lock under the in-kernel listener msk socket lock. + +The current lack of locking is almost harmless, because the relevant +socket is not exposed to the user space, but in future we will add +more complexity to the mentioned helper, let's play safe. + +Fixes: 1729cf186d8a ("mptcp: create the listening socket for new port") +Cc: stable@vger.kernel.org +Signed-off-by: Paolo Abeni +Reviewed-by: Matthieu Baerts +Signed-off-by: Matthieu Baerts +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/mptcp/pm_netlink.c | 10 ++++++---- + net/mptcp/subflow.c | 2 +- + 2 files changed, 7 insertions(+), 5 deletions(-) + +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index 2b1b40199c617..3a1e8f2388665 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -891,8 +891,8 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk, + { + int addrlen = sizeof(struct sockaddr_in); + struct sockaddr_storage addr; +- struct mptcp_sock *msk; + struct socket *ssock; ++ struct sock *newsk; + int backlog = 1024; + int err; + +@@ -901,13 +901,15 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk, + if (err) + return err; + +- msk = mptcp_sk(entry->lsk->sk); +- if (!msk) { ++ newsk = entry->lsk->sk; ++ if (!newsk) { + err = -EINVAL; + goto out; + } + +- ssock = __mptcp_nmpc_socket(msk); ++ lock_sock(newsk); ++ ssock = __mptcp_nmpc_socket(mptcp_sk(newsk)); ++ release_sock(newsk); + if (!ssock) { + err = -EINVAL; + goto out; +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index 15dbaa202c7cf..b0e9548f00bf1 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -1570,7 +1570,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) + if (err) + return err; + +- lock_sock(sf->sk); ++ lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING); + + /* the newly created socket has to be in the same cgroup as its parent */ + mptcp_attach_cgroup(sk, sf->sk); +-- +2.39.0 + diff --git a/queue-5.15/net-rose-fix-to-not-accept-on-connected-socket.patch b/queue-5.15/net-rose-fix-to-not-accept-on-connected-socket.patch new file mode 100644 index 00000000000..527dedb441f --- /dev/null +++ b/queue-5.15/net-rose-fix-to-not-accept-on-connected-socket.patch @@ -0,0 +1,63 @@ +From fa3d7b68f3d48ef4dfbf8b6e755e3db174601cbc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 25 Jan 2023 02:59:44 -0800 +Subject: net/rose: Fix to not accept on connected socket + +From: Hyunwoo Kim + +[ Upstream commit 14caefcf9837a2be765a566005ad82cd0d2a429f ] + +If you call listen() and accept() on an already connect()ed +rose socket, accept() can successfully connect. +This is because when the peer socket sends data to sendmsg, +the skb with its own sk stored in the connected socket's +sk->sk_receive_queue is connected, and rose_accept() dequeues +the skb waiting in the sk->sk_receive_queue. + +This creates a child socket with the sk of the parent +rose socket, which can cause confusion. + +Fix rose_listen() to return -EINVAL if the socket has +already been successfully connected, and add lock_sock +to prevent this issue. + +Signed-off-by: Hyunwoo Kim +Reviewed-by: Kuniyuki Iwashima +Link: https://lore.kernel.org/r/20230125105944.GA133314@ubuntu +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/rose/af_rose.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c +index 29a208ed8fb88..86c93cf1744b0 100644 +--- a/net/rose/af_rose.c ++++ b/net/rose/af_rose.c +@@ -487,6 +487,12 @@ static int rose_listen(struct socket *sock, int backlog) + { + struct sock *sk = sock->sk; + ++ lock_sock(sk); ++ if (sock->state != SS_UNCONNECTED) { ++ release_sock(sk); ++ return -EINVAL; ++ } ++ + if (sk->sk_state != TCP_LISTEN) { + struct rose_sock *rose = rose_sk(sk); + +@@ -496,8 +502,10 @@ static int rose_listen(struct socket *sock, int backlog) + memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); + sk->sk_max_ack_backlog = backlog; + sk->sk_state = TCP_LISTEN; ++ release_sock(sk); + return 0; + } ++ release_sock(sk); + + return -EOPNOTSUPP; + } +-- +2.39.0 + diff --git a/queue-5.15/net-sched-sch-bounds-check-priority.patch b/queue-5.15/net-sched-sch-bounds-check-priority.patch new file mode 100644 index 00000000000..ca0e4de2126 --- /dev/null +++ b/queue-5.15/net-sched-sch-bounds-check-priority.patch @@ -0,0 +1,57 @@ +From 924a3b5ce8cec26861ed8d1fbc2d28ae59f88cfc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 27 Jan 2023 14:40:37 -0800 +Subject: net: sched: sch: Bounds check priority + +From: Kees Cook + +[ Upstream commit de5ca4c3852f896cacac2bf259597aab5e17d9e3 ] + +Nothing was explicitly bounds checking the priority index used to access +clpriop[]. WARN and bail out early if it's pathological. Seen with GCC 13: + +../net/sched/sch_htb.c: In function 'htb_activate_prios': +../net/sched/sch_htb.c:437:44: warning: array subscript [0, 31] is outside array bounds of 'struct htb_prio[8]' [-Warray-bounds=] + 437 | if (p->inner.clprio[prio].feed.rb_node) + | ~~~~~~~~~~~~~~~^~~~~~ +../net/sched/sch_htb.c:131:41: note: while referencing 'clprio' + 131 | struct htb_prio clprio[TC_HTB_NUMPRIO]; + | ^~~~~~ + +Cc: Jamal Hadi Salim +Cc: Cong Wang +Cc: Jiri Pirko +Cc: "David S. Miller" +Cc: Eric Dumazet +Cc: Jakub Kicinski +Cc: Paolo Abeni +Cc: netdev@vger.kernel.org +Signed-off-by: Kees Cook +Reviewed-by: Simon Horman +Reviewed-by: Cong Wang +Link: https://lore.kernel.org/r/20230127224036.never.561-kees@kernel.org +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + net/sched/sch_htb.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c +index 45b92e40082ef..7ea8c73ddeff0 100644 +--- a/net/sched/sch_htb.c ++++ b/net/sched/sch_htb.c +@@ -427,7 +427,10 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) + while (cl->cmode == HTB_MAY_BORROW && p && mask) { + m = mask; + while (m) { +- int prio = ffz(~m); ++ unsigned int prio = ffz(~m); ++ ++ if (WARN_ON_ONCE(prio > ARRAY_SIZE(p->inner.clprio))) ++ break; + m &= ~(1 << prio); + + if (p->inner.clprio[prio].feed.rb_node) +-- +2.39.0 + diff --git a/queue-5.15/net-stmmac-do-not-stop-rx_clk-in-rx-lpi-state-for-qc.patch b/queue-5.15/net-stmmac-do-not-stop-rx_clk-in-rx-lpi-state-for-qc.patch new file mode 100644 index 00000000000..1cfceaa7fb1 --- /dev/null +++ b/queue-5.15/net-stmmac-do-not-stop-rx_clk-in-rx-lpi-state-for-qc.patch @@ -0,0 +1,67 @@ +From e09e54b9d9a27873840d5303e853e1d4d6f85f23 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 27 Jan 2023 00:35:39 +0300 +Subject: net: stmmac: do not stop RX_CLK in Rx LPI state for qcs404 SoC + +From: Andrey Konovalov + +[ Upstream commit 54aa39a513dbf2164ca462a19f04519b2407a224 ] + +Currently in phy_init_eee() the driver unconditionally configures the PHY +to stop RX_CLK after entering Rx LPI state. This causes an LPI interrupt +storm on my qcs404-base board. + +Change the PHY initialization so that for "qcom,qcs404-ethqos" compatible +device RX_CLK continues to run even in Rx LPI state. + +Signed-off-by: Andrey Konovalov +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c | 2 ++ + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 3 ++- + include/linux/stmmac.h | 1 + + 3 files changed, 5 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +index 6b1d9e8879f46..d0c7f22a4e55a 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +@@ -505,6 +505,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) + plat_dat->has_gmac4 = 1; + plat_dat->pmt = 1; + plat_dat->tso_en = of_property_read_bool(np, "snps,tso"); ++ if (of_device_is_compatible(np, "qcom,qcs404-ethqos")) ++ plat_dat->rx_clk_runs_in_lpi = 1; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 4191502d6472f..d56f65338ea66 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -1174,7 +1174,8 @@ static void stmmac_mac_link_up(struct phylink_config *config, + + stmmac_mac_set(priv, priv->ioaddr, true); + if (phy && priv->dma_cap.eee) { +- priv->eee_active = phy_init_eee(phy, 1) >= 0; ++ priv->eee_active = ++ phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0; + priv->eee_enabled = stmmac_eee_init(priv); + priv->tx_lpi_enabled = priv->eee_enabled; + stmmac_set_eee_pls(priv, priv->hw, true); +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 48d015ed21752..cc338c6c74954 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -251,6 +251,7 @@ struct plat_stmmacenet_data { + int rss_en; + int mac_port_sel_speed; + bool en_tx_lpi_clockgating; ++ bool rx_clk_runs_in_lpi; + int has_xgmac; + bool vlan_fail_q_en; + u8 vlan_fail_q; +-- +2.39.0 + diff --git a/queue-5.15/nvme-fc-fix-a-missing-queue-put-in-nvmet_fc_ls_creat.patch b/queue-5.15/nvme-fc-fix-a-missing-queue-put-in-nvmet_fc_ls_creat.patch new file mode 100644 index 00000000000..80713c111b1 --- /dev/null +++ b/queue-5.15/nvme-fc-fix-a-missing-queue-put-in-nvmet_fc_ls_creat.patch @@ -0,0 +1,41 @@ +From 8cfbd6b8d041b67aadd26d72dd03c9de75a756be Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 23 Jan 2023 14:37:28 +0200 +Subject: nvme-fc: fix a missing queue put in nvmet_fc_ls_create_association + +From: Amit Engel + +[ Upstream commit 0cab4404874f2de52617de8400c844891c6ea1ce ] + +As part of nvmet_fc_ls_create_association there is a case where +nvmet_fc_alloc_target_queue fails right after a new association with an +admin queue is created. In this case, no one releases the get taken in +nvmet_fc_alloc_target_assoc. This fix is adding the missing put. + +Signed-off-by: Amit Engel +Reviewed-by: James Smart +Signed-off-by: Christoph Hellwig +Signed-off-by: Sasha Levin +--- + drivers/nvme/target/fc.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c +index c43bc5e1c7a28..00a2a591f5c1f 100644 +--- a/drivers/nvme/target/fc.c ++++ b/drivers/nvme/target/fc.c +@@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, + else { + queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, + be16_to_cpu(rqst->assoc_cmd.sqsize)); +- if (!queue) ++ if (!queue) { + ret = VERR_QUEUE_ALLOC_FAIL; ++ nvmet_fc_tgt_a_put(iod->assoc); ++ } + } + } + +-- +2.39.0 + diff --git a/queue-5.15/nvmem-core-add-error-handling-for-dev_set_name.patch b/queue-5.15/nvmem-core-add-error-handling-for-dev_set_name.patch new file mode 100644 index 00000000000..3b8fc01eddc --- /dev/null +++ b/queue-5.15/nvmem-core-add-error-handling-for-dev_set_name.patch @@ -0,0 +1,58 @@ +From 1d437b95f790f0c620f70e548f1343595b9c989d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 16 Sep 2022 13:20:50 +0100 +Subject: nvmem: core: add error handling for dev_set_name + +From: Gaosheng Cui + +[ Upstream commit 5544e90c81261e82e02bbf7c6015a4b9c8c825ef ] + +The type of return value of dev_set_name is int, which may return +wrong result, so we add error handling for it to reclaim memory +of nvmem resource, and return early when an error occurs. + +Signed-off-by: Gaosheng Cui +Signed-off-by: Srinivas Kandagatla +Link: https://lore.kernel.org/r/20220916122100.170016-4-srinivas.kandagatla@linaro.org +Signed-off-by: Greg Kroah-Hartman +Stable-dep-of: ab3428cfd9aa ("nvmem: core: fix registration vs use race") +Signed-off-by: Sasha Levin +--- + drivers/nvmem/core.c | 12 +++++++++--- + 1 file changed, 9 insertions(+), 3 deletions(-) + +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index ee86022c4f2b8..51bec9f8a3bf9 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -804,18 +804,24 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) + + switch (config->id) { + case NVMEM_DEVID_NONE: +- dev_set_name(&nvmem->dev, "%s", config->name); ++ rval = dev_set_name(&nvmem->dev, "%s", config->name); + break; + case NVMEM_DEVID_AUTO: +- dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); ++ rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id); + break; + default: +- dev_set_name(&nvmem->dev, "%s%d", ++ rval = dev_set_name(&nvmem->dev, "%s%d", + config->name ? : "nvmem", + config->name ? config->id : nvmem->id); + break; + } + ++ if (rval) { ++ ida_free(&nvmem_ida, nvmem->id); ++ kfree(nvmem); ++ return ERR_PTR(rval); ++ } ++ + nvmem->read_only = device_property_present(config->dev, "read-only") || + config->read_only || !nvmem->reg_write; + +-- +2.39.0 + diff --git a/queue-5.15/nvmem-core-fix-cleanup-after-dev_set_name.patch b/queue-5.15/nvmem-core-fix-cleanup-after-dev_set_name.patch new file mode 100644 index 00000000000..5d0a4f20644 --- /dev/null +++ b/queue-5.15/nvmem-core-fix-cleanup-after-dev_set_name.patch @@ -0,0 +1,97 @@ +From d8cbf2482e5367ebe370d506d58d8f6c5c756419 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 27 Jan 2023 10:40:10 +0000 +Subject: nvmem: core: fix cleanup after dev_set_name() + +From: Russell King (Oracle) + +[ Upstream commit 560181d3ace61825f4ca9dd3481d6c0ee6709fa8 ] + +If dev_set_name() fails, we leak nvmem->wp_gpio as the cleanup does not +put this. While a minimal fix for this would be to add the gpiod_put() +call, we can do better if we split device_register(), and use the +tested nvmem_release() cleanup code by initialising the device early, +and putting the device. + +This results in a slightly larger fix, but results in clear code. + +Note: this patch depends on "nvmem: core: initialise nvmem->id early" +and "nvmem: core: remove nvmem_config wp_gpio". + +Fixes: 5544e90c8126 ("nvmem: core: add error handling for dev_set_name") +Cc: stable@vger.kernel.org +Reported-by: kernel test robot +Reported-by: Dan Carpenter +Signed-off-by: Russell King (Oracle) +[Srini: Fixed subject line and error code handing with wp_gpio while applying.] +Signed-off-by: Srinivas Kandagatla +Link: https://lore.kernel.org/r/20230127104015.23839-6-srinivas.kandagatla@linaro.org +Signed-off-by: Greg Kroah-Hartman +Stable-dep-of: ab3428cfd9aa ("nvmem: core: fix registration vs use race") +Signed-off-by: Sasha Levin +--- + drivers/nvmem/core.c | 22 ++++++++++------------ + 1 file changed, 10 insertions(+), 12 deletions(-) + +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index 51bec9f8a3bf9..f06b65f0d410b 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -768,14 +768,18 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) + + nvmem->id = rval; + ++ nvmem->dev.type = &nvmem_provider_type; ++ nvmem->dev.bus = &nvmem_bus_type; ++ nvmem->dev.parent = config->dev; ++ ++ device_initialize(&nvmem->dev); ++ + if (!config->ignore_wp) + nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", + GPIOD_OUT_HIGH); + if (IS_ERR(nvmem->wp_gpio)) { +- ida_free(&nvmem_ida, nvmem->id); + rval = PTR_ERR(nvmem->wp_gpio); +- kfree(nvmem); +- return ERR_PTR(rval); ++ goto err_put_device; + } + + kref_init(&nvmem->refcnt); +@@ -787,9 +791,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) + nvmem->stride = config->stride ?: 1; + nvmem->word_size = config->word_size ?: 1; + nvmem->size = config->size; +- nvmem->dev.type = &nvmem_provider_type; +- nvmem->dev.bus = &nvmem_bus_type; +- nvmem->dev.parent = config->dev; + nvmem->root_only = config->root_only; + nvmem->priv = config->priv; + nvmem->type = config->type; +@@ -816,11 +817,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) + break; + } + +- if (rval) { +- ida_free(&nvmem_ida, nvmem->id); +- kfree(nvmem); +- return ERR_PTR(rval); +- } ++ if (rval) ++ goto err_put_device; + + nvmem->read_only = device_property_present(config->dev, "read-only") || + config->read_only || !nvmem->reg_write; +@@ -831,7 +829,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) + + dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); + +- rval = device_register(&nvmem->dev); ++ rval = device_add(&nvmem->dev); + if (rval) + goto err_put_device; + +-- +2.39.0 + diff --git a/queue-5.15/nvmem-core-fix-registration-vs-use-race.patch b/queue-5.15/nvmem-core-fix-registration-vs-use-race.patch new file mode 100644 index 00000000000..c68e2a6550f --- /dev/null +++ b/queue-5.15/nvmem-core-fix-registration-vs-use-race.patch @@ -0,0 +1,87 @@ +From 1e93229ac7304eeee08f35863d40880bc7452329 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 27 Jan 2023 10:40:11 +0000 +Subject: nvmem: core: fix registration vs use race + +From: Russell King (Oracle) + +[ Upstream commit ab3428cfd9aa2f3463ee4b2909b5bb2193bd0c4a ] + +The i.MX6 CPU frequency driver sometimes fails to register at boot time +due to nvmem_cell_read_u32() sporadically returning -ENOENT. + +This happens because there is a window where __nvmem_device_get() in +of_nvmem_cell_get() is able to return the nvmem device, but as cells +have been setup, nvmem_find_cell_entry_by_node() returns NULL. + +The occurs because the nvmem core registration code violates one of the +fundamental principles of kernel programming: do not publish data +structures before their setup is complete. + +Fix this by making nvmem core code conform with this principle. + +Fixes: eace75cfdcf7 ("nvmem: Add a simple NVMEM framework for nvmem providers") +Cc: stable@vger.kernel.org +Signed-off-by: Russell King (Oracle) +Signed-off-by: Srinivas Kandagatla +Link: https://lore.kernel.org/r/20230127104015.23839-7-srinivas.kandagatla@linaro.org +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/nvmem/core.c | 18 ++++++++---------- + 1 file changed, 8 insertions(+), 10 deletions(-) + +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index f06b65f0d410b..6a74e38746057 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -827,22 +827,16 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) + nvmem->dev.groups = nvmem_dev_groups; + #endif + +- dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); +- +- rval = device_add(&nvmem->dev); +- if (rval) +- goto err_put_device; +- + if (nvmem->nkeepout) { + rval = nvmem_validate_keepouts(nvmem); + if (rval) +- goto err_device_del; ++ goto err_put_device; + } + + if (config->compat) { + rval = nvmem_sysfs_setup_compat(nvmem, config); + if (rval) +- goto err_device_del; ++ goto err_put_device; + } + + if (config->cells) { +@@ -859,6 +853,12 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) + if (rval) + goto err_remove_cells; + ++ dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); ++ ++ rval = device_add(&nvmem->dev); ++ if (rval) ++ goto err_remove_cells; ++ + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); + + return nvmem; +@@ -867,8 +867,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) + nvmem_device_remove_all_cells(nvmem); + if (config->compat) + nvmem_sysfs_remove_compat(nvmem, config); +-err_device_del: +- device_del(&nvmem->dev); + err_put_device: + put_device(&nvmem->dev); + +-- +2.39.0 + diff --git a/queue-5.15/nvmem-core-fix-return-value.patch b/queue-5.15/nvmem-core-fix-return-value.patch new file mode 100644 index 00000000000..908e8207906 --- /dev/null +++ b/queue-5.15/nvmem-core-fix-return-value.patch @@ -0,0 +1,42 @@ +From befc8232414d9f05bc0f42658a9260a2fc3f6624 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 27 Jan 2023 10:40:14 +0000 +Subject: nvmem: core: fix return value + +From: Russell King (Oracle) + +[ Upstream commit 0c4862b1c1465e473bc961a02765490578bf5c20 ] + +Dan Carpenter points out that the return code was not set in commit +60c8b4aebd8e ("nvmem: core: fix cleanup after dev_set_name()"), but +this is not the only issue - we also need to zero wp_gpio to prevent +gpiod_put() being called on an error value. + +Fixes: 560181d3ace6 ("nvmem: core: fix cleanup after dev_set_name()") +Cc: stable@vger.kernel.org +Reported-by: kernel test robot +Reported-by: Dan Carpenter +Signed-off-by: Russell King (Oracle) +Signed-off-by: Srinivas Kandagatla +Link: https://lore.kernel.org/r/20230127104015.23839-10-srinivas.kandagatla@linaro.org +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/nvmem/core.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index 6a74e38746057..47c1487dcf8cc 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -779,6 +779,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) + GPIOD_OUT_HIGH); + if (IS_ERR(nvmem->wp_gpio)) { + rval = PTR_ERR(nvmem->wp_gpio); ++ nvmem->wp_gpio = NULL; + goto err_put_device; + } + +-- +2.39.0 + diff --git a/queue-5.15/platform-x86-touchscreen_dmi-add-chuwi-vi8-cwi501-dm.patch b/queue-5.15/platform-x86-touchscreen_dmi-add-chuwi-vi8-cwi501-dm.patch new file mode 100644 index 00000000000..d9b093eed54 --- /dev/null +++ b/queue-5.15/platform-x86-touchscreen_dmi-add-chuwi-vi8-cwi501-dm.patch @@ -0,0 +1,43 @@ +From b1e97d9c84230c064df59f6d586096f830118ea1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 2 Feb 2023 11:34:13 +0100 +Subject: platform/x86: touchscreen_dmi: Add Chuwi Vi8 (CWI501) DMI match + +From: Hans de Goede + +[ Upstream commit eecf2acd4a580e9364e5087daf0effca60a240b7 ] + +Add a DMI match for the CWI501 version of the Chuwi Vi8 tablet, +pointing to the same chuwi_vi8_data as the existing CWI506 version +DMI match. + +Signed-off-by: Hans de Goede +Link: https://lore.kernel.org/r/20230202103413.331459-1-hdegoede@redhat.com +Signed-off-by: Sasha Levin +--- + drivers/platform/x86/touchscreen_dmi.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c +index 93671037fd598..69ba2c5182610 100644 +--- a/drivers/platform/x86/touchscreen_dmi.c ++++ b/drivers/platform/x86/touchscreen_dmi.c +@@ -1073,6 +1073,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { + DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"), + }, + }, ++ { ++ /* Chuwi Vi8 (CWI501) */ ++ .driver_data = (void *)&chuwi_vi8_data, ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "i86"), ++ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"), ++ }, ++ }, + { + /* Chuwi Vi8 (CWI506) */ + .driver_data = (void *)&chuwi_vi8_data, +-- +2.39.0 + diff --git a/queue-5.15/riscv-kprobe-fixup-misaligned-load-text.patch b/queue-5.15/riscv-kprobe-fixup-misaligned-load-text.patch new file mode 100644 index 00000000000..38749f940d7 --- /dev/null +++ b/queue-5.15/riscv-kprobe-fixup-misaligned-load-text.patch @@ -0,0 +1,58 @@ +From 419b0c116d99bb365588d161d8c47bd12e3573ff Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 4 Feb 2023 01:35:31 -0500 +Subject: riscv: kprobe: Fixup misaligned load text +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Guo Ren + +[ Upstream commit eb7423273cc9922ee2d05bf660c034d7d515bb91 ] + +The current kprobe would cause a misaligned load for the probe point. +This patch fixup it with two half-word loads instead. + +Fixes: c22b0bcb1dd0 ("riscv: Add kprobes supported") +Signed-off-by: Guo Ren +Signed-off-by: Guo Ren +Link: https://lore.kernel.org/linux-riscv/878rhig9zj.fsf@all.your.base.are.belong.to.us/ +Reported-by: Bjorn Topel +Reviewed-by: Björn Töpel +Link: https://lore.kernel.org/r/20230204063531.740220-1-guoren@kernel.org +Cc: stable@vger.kernel.org +Signed-off-by: Palmer Dabbelt +Signed-off-by: Sasha Levin +--- + arch/riscv/kernel/probes/kprobes.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c +index b53aa0209e079..7548b1d62509c 100644 +--- a/arch/riscv/kernel/probes/kprobes.c ++++ b/arch/riscv/kernel/probes/kprobes.c +@@ -65,16 +65,18 @@ static bool __kprobes arch_check_kprobe(struct kprobe *p) + + int __kprobes arch_prepare_kprobe(struct kprobe *p) + { +- unsigned long probe_addr = (unsigned long)p->addr; ++ u16 *insn = (u16 *)p->addr; + +- if (probe_addr & 0x1) ++ if ((unsigned long)insn & 0x1) + return -EILSEQ; + + if (!arch_check_kprobe(p)) + return -EILSEQ; + + /* copy instruction */ +- p->opcode = *p->addr; ++ p->opcode = (kprobe_opcode_t)(*insn++); ++ if (GET_INSN_LENGTH(p->opcode) == 4) ++ p->opcode |= (kprobe_opcode_t)(*insn) << 16; + + /* decode instruction */ + switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) { +-- +2.39.0 + diff --git a/queue-5.15/s390-decompressor-specify-__decompress-buf-len-to-av.patch b/queue-5.15/s390-decompressor-specify-__decompress-buf-len-to-av.patch new file mode 100644 index 00000000000..f151d0bd94e --- /dev/null +++ b/queue-5.15/s390-decompressor-specify-__decompress-buf-len-to-av.patch @@ -0,0 +1,47 @@ +From a25bcb25f9ec18e62ea124749616a820d0d74fbd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 29 Jan 2023 23:47:23 +0100 +Subject: s390/decompressor: specify __decompress() buf len to avoid overflow + +From: Vasily Gorbik + +[ Upstream commit 7ab41c2c08a32132ba8c14624910e2fe8ce4ba4b ] + +Historically calls to __decompress() didn't specify "out_len" parameter +on many architectures including s390, expecting that no writes beyond +uncompressed kernel image are performed. This has changed since commit +2aa14b1ab2c4 ("zstd: import usptream v1.5.2") which includes zstd library +commit 6a7ede3dfccb ("Reduce size of dctx by reutilizing dst buffer +(#2751)"). Now zstd decompression code might store literal buffer in +the unwritten portion of the destination buffer. Since "out_len" is +not set, it is considered to be unlimited and hence free to use for +optimization needs. On s390 this might corrupt initrd or ipl report +which are often placed right after the decompressor buffer. Luckily the +size of uncompressed kernel image is already known to the decompressor, +so to avoid the problem simply specify it in the "out_len" parameter. + +Link: https://github.com/facebook/zstd/commit/6a7ede3dfccb +Signed-off-by: Vasily Gorbik +Tested-by: Alexander Egorenkov +Link: https://lore.kernel.org/r/patch-1.thread-41c676.git-41c676c2d153.your-ad-here.call-01675030179-ext-9637@work.hours +Signed-off-by: Heiko Carstens +Signed-off-by: Sasha Levin +--- + arch/s390/boot/compressed/decompressor.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c +index e27c2140d6206..623f6775d01d7 100644 +--- a/arch/s390/boot/compressed/decompressor.c ++++ b/arch/s390/boot/compressed/decompressor.c +@@ -80,6 +80,6 @@ void *decompress_kernel(void) + void *output = (void *)decompress_offset; + + __decompress(_compressed_start, _compressed_end - _compressed_start, +- NULL, NULL, output, 0, NULL, error); ++ NULL, NULL, output, vmlinux.image_size, NULL, error); + return output; + } +-- +2.39.0 + diff --git a/queue-5.15/selftests-bpf-verify-copy_register_state-preserves-p.patch b/queue-5.15/selftests-bpf-verify-copy_register_state-preserves-p.patch new file mode 100644 index 00000000000..b9e19e66270 --- /dev/null +++ b/queue-5.15/selftests-bpf-verify-copy_register_state-preserves-p.patch @@ -0,0 +1,68 @@ +From f282528682e2800b72f422ba10406e913ff5679e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 6 Jan 2023 16:22:14 +0200 +Subject: selftests/bpf: Verify copy_register_state() preserves parent/live + fields + +From: Eduard Zingerman + +[ Upstream commit b9fa9bc839291020b362ab5392e5f18ba79657ac ] + +A testcase to check that verifier.c:copy_register_state() preserves +register parentage chain and livness information. + +Signed-off-by: Eduard Zingerman +Link: https://lore.kernel.org/r/20230106142214.1040390-3-eddyz87@gmail.com +Signed-off-by: Alexei Starovoitov +Signed-off-by: Sasha Levin +--- + .../selftests/bpf/verifier/search_pruning.c | 36 +++++++++++++++++++ + 1 file changed, 36 insertions(+) + +diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c +index 7e50cb80873a5..7e36078f8f482 100644 +--- a/tools/testing/selftests/bpf/verifier/search_pruning.c ++++ b/tools/testing/selftests/bpf/verifier/search_pruning.c +@@ -154,3 +154,39 @@ + .result_unpriv = ACCEPT, + .insn_processed = 15, + }, ++/* The test performs a conditional 64-bit write to a stack location ++ * fp[-8], this is followed by an unconditional 8-bit write to fp[-8], ++ * then data is read from fp[-8]. This sequence is unsafe. ++ * ++ * The test would be mistakenly marked as safe w/o dst register parent ++ * preservation in verifier.c:copy_register_state() function. ++ * ++ * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the ++ * checkpoint state after conditional 64-bit assignment. ++ */ ++{ ++ "write tracking and register parent chain bug", ++ .insns = { ++ /* r6 = ktime_get_ns() */ ++ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), ++ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), ++ /* r0 = ktime_get_ns() */ ++ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), ++ /* if r0 > r6 goto +1 */ ++ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1), ++ /* *(u64 *)(r10 - 8) = 0xdeadbeef */ ++ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef), ++ /* r1 = 42 */ ++ BPF_MOV64_IMM(BPF_REG_1, 42), ++ /* *(u8 *)(r10 - 8) = r1 */ ++ BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8), ++ /* r2 = *(u64 *)(r10 - 8) */ ++ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8), ++ /* exit(0) */ ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .flags = BPF_F_TEST_STATE_FREQ, ++ .errstr = "invalid read from stack off -8+1 size 8", ++ .result = REJECT, ++}, +-- +2.39.0 + diff --git a/queue-5.15/series b/queue-5.15/series new file mode 100644 index 00000000000..94450643aae --- /dev/null +++ b/queue-5.15/series @@ -0,0 +1,33 @@ +mptcp-fix-locking-for-in-kernel-listener-creation.patch +kprobes-treewide-cleanup-the-error-messages-for-kpro.patch +riscv-kprobe-fixup-misaligned-load-text.patch +acpi-x86-add-support-for-lps0-callback-handler.patch +asoc-intel-sof_rt5682-always-set-dpcm_capture-for-am.patch +asoc-intel-sof_cs42l42-always-set-dpcm_capture-for-a.patch +selftests-bpf-verify-copy_register_state-preserves-p.patch +alsa-hda-do-not-unset-preset-when-cleaning-up-codec.patch +bpf-sockmap-don-t-let-sock_map_-close-destroy-unhash.patch +asoc-cs42l56-fix-dt-probe.patch +tools-virtio-fix-the-vringh-test-for-virtio-ring-cha.patch +net-rose-fix-to-not-accept-on-connected-socket.patch +net-stmmac-do-not-stop-rx_clk-in-rx-lpi-state-for-qc.patch +drm-nouveau-devinit-tu102-wait-for-gfw_boot_progress.patch +net-sched-sch-bounds-check-priority.patch +s390-decompressor-specify-__decompress-buf-len-to-av.patch +nvme-fc-fix-a-missing-queue-put-in-nvmet_fc_ls_creat.patch +drm-amd-display-properly-handle-additional-cases-whe.patch +platform-x86-touchscreen_dmi-add-chuwi-vi8-cwi501-dm.patch +nvmem-core-add-error-handling-for-dev_set_name.patch +nvmem-core-fix-cleanup-after-dev_set_name.patch +nvmem-core-fix-registration-vs-use-race.patch +nvmem-core-fix-return-value.patch +xfs-zero-inode-fork-buffer-at-allocation.patch +xfs-fix-potential-log-item-leak.patch +xfs-detect-self-referencing-btree-sibling-pointers.patch +xfs-set-xfs_feat_nlink-correctly.patch +xfs-validate-v5-feature-fields.patch +xfs-avoid-unnecessary-runtime-sibling-pointer-endian.patch +xfs-don-t-assert-fail-on-perag-references-on-teardow.patch +xfs-assert-in-xfs_btree_del_cursor-should-take-into-.patch +xfs-purge-dquots-after-inode-walk-fails-during-quota.patch +xfs-don-t-leak-btree-cursor-when-insrec-fails-after-.patch diff --git a/queue-5.15/tools-virtio-fix-the-vringh-test-for-virtio-ring-cha.patch b/queue-5.15/tools-virtio-fix-the-vringh-test-for-virtio-ring-cha.patch new file mode 100644 index 00000000000..b51ae479570 --- /dev/null +++ b/queue-5.15/tools-virtio-fix-the-vringh-test-for-virtio-ring-cha.patch @@ -0,0 +1,150 @@ +From aada6d786a27503d0e2ed4701bc20dd949666370 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 10 Jan 2023 12:43:10 +0900 +Subject: tools/virtio: fix the vringh test for virtio ring changes + +From: Shunsuke Mie + +[ Upstream commit 3f7b75abf41cc4143aa295f62acbb060a012868d ] + +Fix the build caused by missing kmsan_handle_dma() and is_power_of_2() that +are used in drivers/virtio/virtio_ring.c. + +Signed-off-by: Shunsuke Mie +Message-Id: <20230110034310.779744-1-mie@igel.co.jp> +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Sasha Levin +--- + tools/virtio/linux/bug.h | 8 +++----- + tools/virtio/linux/build_bug.h | 7 +++++++ + tools/virtio/linux/cpumask.h | 7 +++++++ + tools/virtio/linux/gfp.h | 7 +++++++ + tools/virtio/linux/kernel.h | 1 + + tools/virtio/linux/kmsan.h | 12 ++++++++++++ + tools/virtio/linux/scatterlist.h | 1 + + tools/virtio/linux/topology.h | 7 +++++++ + 8 files changed, 45 insertions(+), 5 deletions(-) + create mode 100644 tools/virtio/linux/build_bug.h + create mode 100644 tools/virtio/linux/cpumask.h + create mode 100644 tools/virtio/linux/gfp.h + create mode 100644 tools/virtio/linux/kmsan.h + create mode 100644 tools/virtio/linux/topology.h + +diff --git a/tools/virtio/linux/bug.h b/tools/virtio/linux/bug.h +index 813baf13f62a2..51a919083d9b8 100644 +--- a/tools/virtio/linux/bug.h ++++ b/tools/virtio/linux/bug.h +@@ -1,13 +1,11 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-#ifndef BUG_H +-#define BUG_H ++#ifndef _LINUX_BUG_H ++#define _LINUX_BUG_H + + #include + + #define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond)) + +-#define BUILD_BUG_ON(x) +- + #define BUG() abort() + +-#endif /* BUG_H */ ++#endif /* _LINUX_BUG_H */ +diff --git a/tools/virtio/linux/build_bug.h b/tools/virtio/linux/build_bug.h +new file mode 100644 +index 0000000000000..cdbb75e28a604 +--- /dev/null ++++ b/tools/virtio/linux/build_bug.h +@@ -0,0 +1,7 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _LINUX_BUILD_BUG_H ++#define _LINUX_BUILD_BUG_H ++ ++#define BUILD_BUG_ON(x) ++ ++#endif /* _LINUX_BUILD_BUG_H */ +diff --git a/tools/virtio/linux/cpumask.h b/tools/virtio/linux/cpumask.h +new file mode 100644 +index 0000000000000..307da69d6b26c +--- /dev/null ++++ b/tools/virtio/linux/cpumask.h +@@ -0,0 +1,7 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _LINUX_CPUMASK_H ++#define _LINUX_CPUMASK_H ++ ++#include ++ ++#endif /* _LINUX_CPUMASK_H */ +diff --git a/tools/virtio/linux/gfp.h b/tools/virtio/linux/gfp.h +new file mode 100644 +index 0000000000000..43d146f236f14 +--- /dev/null ++++ b/tools/virtio/linux/gfp.h +@@ -0,0 +1,7 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __LINUX_GFP_H ++#define __LINUX_GFP_H ++ ++#include ++ ++#endif +diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h +index 0b493542e61a6..a4beb719d2174 100644 +--- a/tools/virtio/linux/kernel.h ++++ b/tools/virtio/linux/kernel.h +@@ -10,6 +10,7 @@ + #include + + #include ++#include + #include + #include + #include +diff --git a/tools/virtio/linux/kmsan.h b/tools/virtio/linux/kmsan.h +new file mode 100644 +index 0000000000000..272b5aa285d5a +--- /dev/null ++++ b/tools/virtio/linux/kmsan.h +@@ -0,0 +1,12 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _LINUX_KMSAN_H ++#define _LINUX_KMSAN_H ++ ++#include ++ ++inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size, ++ enum dma_data_direction dir) ++{ ++} ++ ++#endif /* _LINUX_KMSAN_H */ +diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h +index 369ee308b6686..74d9e1825748e 100644 +--- a/tools/virtio/linux/scatterlist.h ++++ b/tools/virtio/linux/scatterlist.h +@@ -2,6 +2,7 @@ + #ifndef SCATTERLIST_H + #define SCATTERLIST_H + #include ++#include + + struct scatterlist { + unsigned long page_link; +diff --git a/tools/virtio/linux/topology.h b/tools/virtio/linux/topology.h +new file mode 100644 +index 0000000000000..910794afb993a +--- /dev/null ++++ b/tools/virtio/linux/topology.h +@@ -0,0 +1,7 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _LINUX_TOPOLOGY_H ++#define _LINUX_TOPOLOGY_H ++ ++#include ++ ++#endif /* _LINUX_TOPOLOGY_H */ +-- +2.39.0 + diff --git a/queue-5.15/xfs-assert-in-xfs_btree_del_cursor-should-take-into-.patch b/queue-5.15/xfs-assert-in-xfs_btree_del_cursor-should-take-into-.patch new file mode 100644 index 00000000000..c9adf623750 --- /dev/null +++ b/queue-5.15/xfs-assert-in-xfs_btree_del_cursor-should-take-into-.patch @@ -0,0 +1,85 @@ +From d007aa3e5c90235558940db517b15efb6532e8e6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Feb 2023 13:25:32 -0800 +Subject: xfs: assert in xfs_btree_del_cursor should take into account error + +From: Dave Chinner + +[ Upstream commit 56486f307100e8fc66efa2ebd8a71941fa10bf6f ] + +xfs/538 on a 1kB block filesystem failed with this assert: + +XFS: Assertion failed: cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 || xfs_is_shutdown(cur->bc_mp), file: fs/xfs/libxfs/xfs_btree.c, line: 448 + +The problem was that an allocation failed unexpectedly in +xfs_bmbt_alloc_block() after roughly 150,000 minlen allocation error +injections, resulting in an EFSCORRUPTED error being returned to +xfs_bmapi_write(). The error occurred on extent-to-btree format +conversion allocating the new root block: + + RIP: 0010:xfs_bmbt_alloc_block+0x177/0x210 + Call Trace: + + xfs_btree_new_iroot+0xdf/0x520 + xfs_btree_make_block_unfull+0x10d/0x1c0 + xfs_btree_insrec+0x364/0x790 + xfs_btree_insert+0xaa/0x210 + xfs_bmap_add_extent_hole_real+0x1fe/0x9a0 + xfs_bmapi_allocate+0x34c/0x420 + xfs_bmapi_write+0x53c/0x9c0 + xfs_alloc_file_space+0xee/0x320 + xfs_file_fallocate+0x36b/0x450 + vfs_fallocate+0x148/0x340 + __x64_sys_fallocate+0x3c/0x70 + do_syscall_64+0x35/0x80 + entry_SYSCALL_64_after_hwframe+0x44/0xa + +Why the allocation failed at this point is unknown, but is likely +that we ran the transaction out of reserved space and filesystem out +of space with bmbt blocks because of all the minlen allocations +being done causing worst case fragmentation of a large allocation. + +Regardless of the cause, we've then called xfs_bmapi_finish() which +calls xfs_btree_del_cursor(cur, error) to tear down the cursor. + +So we have a failed operation, error != 0, cur->bc_ino.allocated > 0 +and the filesystem is still up. The assert fails to take into +account that allocation can fail with an error and the transaction +teardown will shut the filesystem down if necessary. i.e. the +assert needs to check "|| error != 0" as well, because at this point +shutdown is pending because the current transaction is dirty.... + +Signed-off-by: Dave Chinner +Reviewed-by: Darrick J. Wong +Reviewed-by: Christoph Hellwig +Signed-off-by: Dave Chinner +Signed-off-by: Leah Rumancik +Acked-by: Darrick J. Wong +Signed-off-by: Sasha Levin +--- + fs/xfs/libxfs/xfs_btree.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c +index b4b5bf4bfed7f..482a4ccc65682 100644 +--- a/fs/xfs/libxfs/xfs_btree.c ++++ b/fs/xfs/libxfs/xfs_btree.c +@@ -445,8 +445,14 @@ xfs_btree_del_cursor( + break; + } + ++ /* ++ * If we are doing a BMBT update, the number of unaccounted blocks ++ * allocated during this cursor life time should be zero. If it's not ++ * zero, then we should be shut down or on our way to shutdown due to ++ * cancelling a dirty transaction on error. ++ */ + ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 || +- xfs_is_shutdown(cur->bc_mp)); ++ xfs_is_shutdown(cur->bc_mp) || error != 0); + if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) + kmem_free(cur->bc_ops); + if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag) +-- +2.39.0 + diff --git a/queue-5.15/xfs-avoid-unnecessary-runtime-sibling-pointer-endian.patch b/queue-5.15/xfs-avoid-unnecessary-runtime-sibling-pointer-endian.patch new file mode 100644 index 00000000000..58304ff7248 --- /dev/null +++ b/queue-5.15/xfs-avoid-unnecessary-runtime-sibling-pointer-endian.patch @@ -0,0 +1,165 @@ +From 57912e41110f94f421b4eb33dcba15090e20cf16 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Feb 2023 13:25:30 -0800 +Subject: xfs: avoid unnecessary runtime sibling pointer endian conversions + +From: Dave Chinner + +[ Upstream commit 5672225e8f2a872a22b0cecedba7a6644af1fb84 ] + +Commit dc04db2aa7c9 has caused a small aim7 regression, showing a +small increase in CPU usage in __xfs_btree_check_sblock() as a +result of the extra checking. + +This is likely due to the endian conversion of the sibling poitners +being unconditional instead of relying on the compiler to endian +convert the NULL pointer at compile time and avoiding the runtime +conversion for this common case. + +Rework the checks so that endian conversion of the sibling pointers +is only done if they are not null as the original code did. + +.... and these need to be "inline" because the compiler completely +fails to inline them automatically like it should be doing. + +$ size fs/xfs/libxfs/xfs_btree.o* + text data bss dec hex filename + 51874 240 0 52114 cb92 fs/xfs/libxfs/xfs_btree.o.orig + 51562 240 0 51802 ca5a fs/xfs/libxfs/xfs_btree.o.inline + +Just when you think the tools have advanced sufficiently we don't +have to care about stuff like this anymore, along comes a reminder +that *our tools still suck*. + +Fixes: dc04db2aa7c9 ("xfs: detect self referencing btree sibling pointers") +Reported-by: kernel test robot +Signed-off-by: Dave Chinner +Reviewed-by: Darrick J. Wong +Reviewed-by: Christoph Hellwig +Signed-off-by: Dave Chinner +Signed-off-by: Leah Rumancik +Acked-by: Darrick J. Wong +Signed-off-by: Sasha Levin +--- + fs/xfs/libxfs/xfs_btree.c | 47 +++++++++++++++++++++++++++------------ + 1 file changed, 33 insertions(+), 14 deletions(-) + +diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c +index 5bec048343b0c..b4b5bf4bfed7f 100644 +--- a/fs/xfs/libxfs/xfs_btree.c ++++ b/fs/xfs/libxfs/xfs_btree.c +@@ -51,16 +51,31 @@ xfs_btree_magic( + return magic; + } + +-static xfs_failaddr_t ++/* ++ * These sibling pointer checks are optimised for null sibling pointers. This ++ * happens a lot, and we don't need to byte swap at runtime if the sibling ++ * pointer is NULL. ++ * ++ * These are explicitly marked at inline because the cost of calling them as ++ * functions instead of inlining them is about 36 bytes extra code per call site ++ * on x86-64. Yes, gcc-11 fails to inline them, and explicit inlining of these ++ * two sibling check functions reduces the compiled code size by over 300 ++ * bytes. ++ */ ++static inline xfs_failaddr_t + xfs_btree_check_lblock_siblings( + struct xfs_mount *mp, + struct xfs_btree_cur *cur, + int level, + xfs_fsblock_t fsb, +- xfs_fsblock_t sibling) ++ __be64 dsibling) + { +- if (sibling == NULLFSBLOCK) ++ xfs_fsblock_t sibling; ++ ++ if (dsibling == cpu_to_be64(NULLFSBLOCK)) + return NULL; ++ ++ sibling = be64_to_cpu(dsibling); + if (sibling == fsb) + return __this_address; + if (level >= 0) { +@@ -74,17 +89,21 @@ xfs_btree_check_lblock_siblings( + return NULL; + } + +-static xfs_failaddr_t ++static inline xfs_failaddr_t + xfs_btree_check_sblock_siblings( + struct xfs_mount *mp, + struct xfs_btree_cur *cur, + int level, + xfs_agnumber_t agno, + xfs_agblock_t agbno, +- xfs_agblock_t sibling) ++ __be32 dsibling) + { +- if (sibling == NULLAGBLOCK) ++ xfs_agblock_t sibling; ++ ++ if (dsibling == cpu_to_be32(NULLAGBLOCK)) + return NULL; ++ ++ sibling = be32_to_cpu(dsibling); + if (sibling == agbno) + return __this_address; + if (level >= 0) { +@@ -136,10 +155,10 @@ __xfs_btree_check_lblock( + fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); + + fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb, +- be64_to_cpu(block->bb_u.l.bb_leftsib)); ++ block->bb_u.l.bb_leftsib); + if (!fa) + fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb, +- be64_to_cpu(block->bb_u.l.bb_rightsib)); ++ block->bb_u.l.bb_rightsib); + return fa; + } + +@@ -204,10 +223,10 @@ __xfs_btree_check_sblock( + } + + fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno, agbno, +- be32_to_cpu(block->bb_u.s.bb_leftsib)); ++ block->bb_u.s.bb_leftsib); + if (!fa) + fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno, +- agbno, be32_to_cpu(block->bb_u.s.bb_rightsib)); ++ agbno, block->bb_u.s.bb_rightsib); + return fa; + } + +@@ -4517,10 +4536,10 @@ xfs_btree_lblock_verify( + /* sibling pointer verification */ + fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); + fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb, +- be64_to_cpu(block->bb_u.l.bb_leftsib)); ++ block->bb_u.l.bb_leftsib); + if (!fa) + fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb, +- be64_to_cpu(block->bb_u.l.bb_rightsib)); ++ block->bb_u.l.bb_rightsib); + return fa; + } + +@@ -4574,10 +4593,10 @@ xfs_btree_sblock_verify( + agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp)); + agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp)); + fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno, +- be32_to_cpu(block->bb_u.s.bb_leftsib)); ++ block->bb_u.s.bb_leftsib); + if (!fa) + fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno, +- be32_to_cpu(block->bb_u.s.bb_rightsib)); ++ block->bb_u.s.bb_rightsib); + return fa; + } + +-- +2.39.0 + diff --git a/queue-5.15/xfs-detect-self-referencing-btree-sibling-pointers.patch b/queue-5.15/xfs-detect-self-referencing-btree-sibling-pointers.patch new file mode 100644 index 00000000000..9f8df2bb228 --- /dev/null +++ b/queue-5.15/xfs-detect-self-referencing-btree-sibling-pointers.patch @@ -0,0 +1,241 @@ +From 4aa324ce5fdb8f165716befacd10e5dc8eb8d2c8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Feb 2023 13:25:27 -0800 +Subject: xfs: detect self referencing btree sibling pointers + +From: Dave Chinner + +[ Upstream commit dc04db2aa7c9307e740d6d0e173085301c173b1a ] + +To catch the obvious graph cycle problem and hence potential endless +looping. + +Signed-off-by: Dave Chinner +Reviewed-by: Christoph Hellwig +Reviewed-by: Darrick J. Wong +Signed-off-by: Dave Chinner +Signed-off-by: Leah Rumancik +Acked-by: Darrick J. Wong +Signed-off-by: Sasha Levin +--- + fs/xfs/libxfs/xfs_btree.c | 140 ++++++++++++++++++++++++++++---------- + 1 file changed, 105 insertions(+), 35 deletions(-) + +diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c +index 2983954817135..5bec048343b0c 100644 +--- a/fs/xfs/libxfs/xfs_btree.c ++++ b/fs/xfs/libxfs/xfs_btree.c +@@ -51,6 +51,52 @@ xfs_btree_magic( + return magic; + } + ++static xfs_failaddr_t ++xfs_btree_check_lblock_siblings( ++ struct xfs_mount *mp, ++ struct xfs_btree_cur *cur, ++ int level, ++ xfs_fsblock_t fsb, ++ xfs_fsblock_t sibling) ++{ ++ if (sibling == NULLFSBLOCK) ++ return NULL; ++ if (sibling == fsb) ++ return __this_address; ++ if (level >= 0) { ++ if (!xfs_btree_check_lptr(cur, sibling, level + 1)) ++ return __this_address; ++ } else { ++ if (!xfs_verify_fsbno(mp, sibling)) ++ return __this_address; ++ } ++ ++ return NULL; ++} ++ ++static xfs_failaddr_t ++xfs_btree_check_sblock_siblings( ++ struct xfs_mount *mp, ++ struct xfs_btree_cur *cur, ++ int level, ++ xfs_agnumber_t agno, ++ xfs_agblock_t agbno, ++ xfs_agblock_t sibling) ++{ ++ if (sibling == NULLAGBLOCK) ++ return NULL; ++ if (sibling == agbno) ++ return __this_address; ++ if (level >= 0) { ++ if (!xfs_btree_check_sptr(cur, sibling, level + 1)) ++ return __this_address; ++ } else { ++ if (!xfs_verify_agbno(mp, agno, sibling)) ++ return __this_address; ++ } ++ return NULL; ++} ++ + /* + * Check a long btree block header. Return the address of the failing check, + * or NULL if everything is ok. +@@ -65,6 +111,8 @@ __xfs_btree_check_lblock( + struct xfs_mount *mp = cur->bc_mp; + xfs_btnum_t btnum = cur->bc_btnum; + int crc = xfs_has_crc(mp); ++ xfs_failaddr_t fa; ++ xfs_fsblock_t fsb = NULLFSBLOCK; + + if (crc) { + if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)) +@@ -83,16 +131,16 @@ __xfs_btree_check_lblock( + if (be16_to_cpu(block->bb_numrecs) > + cur->bc_ops->get_maxrecs(cur, level)) + return __this_address; +- if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) && +- !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_leftsib), +- level + 1)) +- return __this_address; +- if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) && +- !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_rightsib), +- level + 1)) +- return __this_address; + +- return NULL; ++ if (bp) ++ fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); ++ ++ fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb, ++ be64_to_cpu(block->bb_u.l.bb_leftsib)); ++ if (!fa) ++ fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb, ++ be64_to_cpu(block->bb_u.l.bb_rightsib)); ++ return fa; + } + + /* Check a long btree block header. */ +@@ -130,6 +178,9 @@ __xfs_btree_check_sblock( + struct xfs_mount *mp = cur->bc_mp; + xfs_btnum_t btnum = cur->bc_btnum; + int crc = xfs_has_crc(mp); ++ xfs_failaddr_t fa; ++ xfs_agblock_t agbno = NULLAGBLOCK; ++ xfs_agnumber_t agno = NULLAGNUMBER; + + if (crc) { + if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid)) +@@ -146,16 +197,18 @@ __xfs_btree_check_sblock( + if (be16_to_cpu(block->bb_numrecs) > + cur->bc_ops->get_maxrecs(cur, level)) + return __this_address; +- if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) && +- !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_leftsib), +- level + 1)) +- return __this_address; +- if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) && +- !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_rightsib), +- level + 1)) +- return __this_address; + +- return NULL; ++ if (bp) { ++ agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp)); ++ agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp)); ++ } ++ ++ fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno, agbno, ++ be32_to_cpu(block->bb_u.s.bb_leftsib)); ++ if (!fa) ++ fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno, ++ agbno, be32_to_cpu(block->bb_u.s.bb_rightsib)); ++ return fa; + } + + /* Check a short btree block header. */ +@@ -4265,6 +4318,21 @@ xfs_btree_visit_block( + if (xfs_btree_ptr_is_null(cur, &rptr)) + return -ENOENT; + ++ /* ++ * We only visit blocks once in this walk, so we have to avoid the ++ * internal xfs_btree_lookup_get_block() optimisation where it will ++ * return the same block without checking if the right sibling points ++ * back to us and creates a cyclic reference in the btree. ++ */ ++ if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { ++ if (be64_to_cpu(rptr.l) == XFS_DADDR_TO_FSB(cur->bc_mp, ++ xfs_buf_daddr(bp))) ++ return -EFSCORRUPTED; ++ } else { ++ if (be32_to_cpu(rptr.s) == xfs_daddr_to_agbno(cur->bc_mp, ++ xfs_buf_daddr(bp))) ++ return -EFSCORRUPTED; ++ } + return xfs_btree_lookup_get_block(cur, level, &rptr, &block); + } + +@@ -4439,20 +4507,21 @@ xfs_btree_lblock_verify( + { + struct xfs_mount *mp = bp->b_mount; + struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); ++ xfs_fsblock_t fsb; ++ xfs_failaddr_t fa; + + /* numrecs verification */ + if (be16_to_cpu(block->bb_numrecs) > max_recs) + return __this_address; + + /* sibling pointer verification */ +- if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) && +- !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))) +- return __this_address; +- if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) && +- !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))) +- return __this_address; +- +- return NULL; ++ fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); ++ fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb, ++ be64_to_cpu(block->bb_u.l.bb_leftsib)); ++ if (!fa) ++ fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb, ++ be64_to_cpu(block->bb_u.l.bb_rightsib)); ++ return fa; + } + + /** +@@ -4493,7 +4562,9 @@ xfs_btree_sblock_verify( + { + struct xfs_mount *mp = bp->b_mount; + struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); +- xfs_agblock_t agno; ++ xfs_agnumber_t agno; ++ xfs_agblock_t agbno; ++ xfs_failaddr_t fa; + + /* numrecs verification */ + if (be16_to_cpu(block->bb_numrecs) > max_recs) +@@ -4501,14 +4572,13 @@ xfs_btree_sblock_verify( + + /* sibling pointer verification */ + agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp)); +- if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) && +- !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_leftsib))) +- return __this_address; +- if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) && +- !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_rightsib))) +- return __this_address; +- +- return NULL; ++ agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp)); ++ fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno, ++ be32_to_cpu(block->bb_u.s.bb_leftsib)); ++ if (!fa) ++ fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno, ++ be32_to_cpu(block->bb_u.s.bb_rightsib)); ++ return fa; + } + + /* +-- +2.39.0 + diff --git a/queue-5.15/xfs-don-t-assert-fail-on-perag-references-on-teardow.patch b/queue-5.15/xfs-don-t-assert-fail-on-perag-references-on-teardow.patch new file mode 100644 index 00000000000..8881bdd19e4 --- /dev/null +++ b/queue-5.15/xfs-don-t-assert-fail-on-perag-references-on-teardow.patch @@ -0,0 +1,53 @@ +From 75a93a8ee528a6e30782e3e532d843ef0f56cc6e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Feb 2023 13:25:31 -0800 +Subject: xfs: don't assert fail on perag references on teardown + +From: Dave Chinner + +[ Upstream commit 5b55cbc2d72632e874e50d2e36bce608e55aaaea ] + +Not fatal, the assert is there to catch developer attention. I'm +seeing this occasionally during recoveryloop testing after a +shutdown, and I don't want this to stop an overnight recoveryloop +run as it is currently doing. + +Convert the ASSERT to a XFS_IS_CORRUPT() check so it will dump a +corruption report into the log and cause a test failure that way, +but it won't stop the machine dead. + +Signed-off-by: Dave Chinner +Reviewed-by: Darrick J. Wong +Reviewed-by: Christoph Hellwig +Signed-off-by: Dave Chinner +Signed-off-by: Leah Rumancik +Acked-by: Darrick J. Wong +Signed-off-by: Sasha Levin +--- + fs/xfs/libxfs/xfs_ag.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c +index 005abfd9fd347..aff6fb5281f63 100644 +--- a/fs/xfs/libxfs/xfs_ag.c ++++ b/fs/xfs/libxfs/xfs_ag.c +@@ -173,7 +173,6 @@ __xfs_free_perag( + struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); + + ASSERT(!delayed_work_pending(&pag->pag_blockgc_work)); +- ASSERT(atomic_read(&pag->pag_ref) == 0); + kmem_free(pag); + } + +@@ -192,7 +191,7 @@ xfs_free_perag( + pag = radix_tree_delete(&mp->m_perag_tree, agno); + spin_unlock(&mp->m_perag_lock); + ASSERT(pag); +- ASSERT(atomic_read(&pag->pag_ref) == 0); ++ XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0); + + cancel_delayed_work_sync(&pag->pag_blockgc_work); + xfs_iunlink_destroy(pag); +-- +2.39.0 + diff --git a/queue-5.15/xfs-don-t-leak-btree-cursor-when-insrec-fails-after-.patch b/queue-5.15/xfs-don-t-leak-btree-cursor-when-insrec-fails-after-.patch new file mode 100644 index 00000000000..21aa1acf00a --- /dev/null +++ b/queue-5.15/xfs-don-t-leak-btree-cursor-when-insrec-fails-after-.patch @@ -0,0 +1,96 @@ +From 050be05accc05cfe9285957c2a0e1f1aab0b3b85 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Feb 2023 13:25:34 -0800 +Subject: xfs: don't leak btree cursor when insrec fails after a split + +From: Darrick J. Wong + +[ Upstream commit a54f78def73d847cb060b18c4e4a3d1d26c9ca6d ] + +The recent patch to improve btree cycle checking caused a regression +when I rebased the in-memory btree branch atop the 5.19 for-next branch, +because in-memory short-pointer btrees do not have AG numbers. This +produced the following complaint from kmemleak: + +unreferenced object 0xffff88803d47dde8 (size 264): + comm "xfs_io", pid 4889, jiffies 4294906764 (age 24.072s) + hex dump (first 32 bytes): + 90 4d 0b 0f 80 88 ff ff 00 a0 bd 05 80 88 ff ff .M.............. + e0 44 3a a0 ff ff ff ff 00 df 08 06 80 88 ff ff .D:............. + backtrace: + [] xfbtree_dup_cursor+0x49/0xc0 [xfs] + [] xfs_btree_dup_cursor+0x3b/0x200 [xfs] + [] __xfs_btree_split+0x6ad/0x820 [xfs] + [] xfs_btree_split+0x60/0x110 [xfs] + [] xfs_btree_make_block_unfull+0x19a/0x1f0 [xfs] + [] xfs_btree_insrec+0x3aa/0x810 [xfs] + [] xfs_btree_insert+0xb3/0x240 [xfs] + [] xfs_rmap_insert+0x99/0x200 [xfs] + [] xfs_rmap_map_shared+0x192/0x5f0 [xfs] + [] xfs_rmap_map_raw+0x6b/0x90 [xfs] + [] xrep_rmap_stash+0xd5/0x1d0 [xfs] + [] xrep_rmap_visit_bmbt+0xa0/0xf0 [xfs] + [] xrep_rmap_scan_iext+0x56/0xa0 [xfs] + [] xrep_rmap_scan_ifork+0xd8/0x160 [xfs] + [] xrep_rmap_scan_inode+0x35/0x80 [xfs] + [] xrep_rmap_find_rmaps+0x10e/0x270 [xfs] + +I noticed that xfs_btree_insrec has a bunch of debug code that return +out of the function immediately, without freeing the "new" btree cursor +that can be returned when _make_block_unfull calls xfs_btree_split. Fix +the error return in this function to free the btree cursor. + +Signed-off-by: Darrick J. Wong +Reviewed-by: Christoph Hellwig +Reviewed-by: Dave Chinner +Signed-off-by: Dave Chinner +Signed-off-by: Leah Rumancik +Acked-by: Darrick J. Wong +Signed-off-by: Sasha Levin +--- + fs/xfs/libxfs/xfs_btree.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c +index 482a4ccc65682..dffe4ca584935 100644 +--- a/fs/xfs/libxfs/xfs_btree.c ++++ b/fs/xfs/libxfs/xfs_btree.c +@@ -3266,7 +3266,7 @@ xfs_btree_insrec( + struct xfs_btree_block *block; /* btree block */ + struct xfs_buf *bp; /* buffer for block */ + union xfs_btree_ptr nptr; /* new block ptr */ +- struct xfs_btree_cur *ncur; /* new btree cursor */ ++ struct xfs_btree_cur *ncur = NULL; /* new btree cursor */ + union xfs_btree_key nkey; /* new block key */ + union xfs_btree_key *lkey; + int optr; /* old key/record index */ +@@ -3346,7 +3346,7 @@ xfs_btree_insrec( + #ifdef DEBUG + error = xfs_btree_check_block(cur, block, level, bp); + if (error) +- return error; ++ goto error0; + #endif + + /* +@@ -3366,7 +3366,7 @@ xfs_btree_insrec( + for (i = numrecs - ptr; i >= 0; i--) { + error = xfs_btree_debug_check_ptr(cur, pp, i, level); + if (error) +- return error; ++ goto error0; + } + + xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1); +@@ -3451,6 +3451,8 @@ xfs_btree_insrec( + return 0; + + error0: ++ if (ncur) ++ xfs_btree_del_cursor(ncur, error); + return error; + } + +-- +2.39.0 + diff --git a/queue-5.15/xfs-fix-potential-log-item-leak.patch b/queue-5.15/xfs-fix-potential-log-item-leak.patch new file mode 100644 index 00000000000..1b5bf0ebb07 --- /dev/null +++ b/queue-5.15/xfs-fix-potential-log-item-leak.patch @@ -0,0 +1,119 @@ +From 731405f0d89a890171a318420a5ac2cb22aee365 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Feb 2023 13:25:26 -0800 +Subject: xfs: fix potential log item leak + +From: Dave Chinner + +[ Upstream commit c230a4a85bcdbfc1a7415deec6caf04e8fca1301 ] + +Ever since we added shadown format buffers to the log items, log +items need to handle the item being released with shadow buffers +attached. Due to the fact this requirement was added at the same +time we added new rmap/reflink intents, we missed the cleanup of +those items. + +In theory, this means shadow buffers can be leaked in a very small +window when a shutdown is initiated. Testing with KASAN shows this +leak does not happen in practice - we haven't identified a single +leak in several years of shutdown testing since ~v4.8 kernels. + +However, the intent whiteout cleanup mechanism results in every +cancelled intent in exactly the same state as this tiny race window +creates and so if intents down clean up shadow buffers on final +release we will leak the shadow buffer for just about every intent +we create. + +Hence we start with this patch to close this condition off and +ensure that when whiteouts start to be used we don't leak lots of +memory. + +Signed-off-by: Dave Chinner +Reviewed-by: Darrick J. Wong +Reviewed-by: Allison Henderson +Signed-off-by: Dave Chinner +Signed-off-by: Leah Rumancik +Acked-by: Darrick J. Wong +Signed-off-by: Sasha Levin +--- + fs/xfs/xfs_bmap_item.c | 2 ++ + fs/xfs/xfs_icreate_item.c | 1 + + fs/xfs/xfs_refcount_item.c | 2 ++ + fs/xfs/xfs_rmap_item.c | 2 ++ + 4 files changed, 7 insertions(+) + +diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c +index 03159970133ff..51ffdec5e4faa 100644 +--- a/fs/xfs/xfs_bmap_item.c ++++ b/fs/xfs/xfs_bmap_item.c +@@ -39,6 +39,7 @@ STATIC void + xfs_bui_item_free( + struct xfs_bui_log_item *buip) + { ++ kmem_free(buip->bui_item.li_lv_shadow); + kmem_cache_free(xfs_bui_zone, buip); + } + +@@ -198,6 +199,7 @@ xfs_bud_item_release( + struct xfs_bud_log_item *budp = BUD_ITEM(lip); + + xfs_bui_release(budp->bud_buip); ++ kmem_free(budp->bud_item.li_lv_shadow); + kmem_cache_free(xfs_bud_zone, budp); + } + +diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c +index 017904a34c023..c265ae20946d5 100644 +--- a/fs/xfs/xfs_icreate_item.c ++++ b/fs/xfs/xfs_icreate_item.c +@@ -63,6 +63,7 @@ STATIC void + xfs_icreate_item_release( + struct xfs_log_item *lip) + { ++ kmem_free(ICR_ITEM(lip)->ic_item.li_lv_shadow); + kmem_cache_free(xfs_icreate_zone, ICR_ITEM(lip)); + } + +diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c +index 46904b793bd48..8ef842d17916a 100644 +--- a/fs/xfs/xfs_refcount_item.c ++++ b/fs/xfs/xfs_refcount_item.c +@@ -35,6 +35,7 @@ STATIC void + xfs_cui_item_free( + struct xfs_cui_log_item *cuip) + { ++ kmem_free(cuip->cui_item.li_lv_shadow); + if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS) + kmem_free(cuip); + else +@@ -204,6 +205,7 @@ xfs_cud_item_release( + struct xfs_cud_log_item *cudp = CUD_ITEM(lip); + + xfs_cui_release(cudp->cud_cuip); ++ kmem_free(cudp->cud_item.li_lv_shadow); + kmem_cache_free(xfs_cud_zone, cudp); + } + +diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c +index 5f06959804678..15e7b01740a77 100644 +--- a/fs/xfs/xfs_rmap_item.c ++++ b/fs/xfs/xfs_rmap_item.c +@@ -35,6 +35,7 @@ STATIC void + xfs_rui_item_free( + struct xfs_rui_log_item *ruip) + { ++ kmem_free(ruip->rui_item.li_lv_shadow); + if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS) + kmem_free(ruip); + else +@@ -227,6 +228,7 @@ xfs_rud_item_release( + struct xfs_rud_log_item *rudp = RUD_ITEM(lip); + + xfs_rui_release(rudp->rud_ruip); ++ kmem_free(rudp->rud_item.li_lv_shadow); + kmem_cache_free(xfs_rud_zone, rudp); + } + +-- +2.39.0 + diff --git a/queue-5.15/xfs-purge-dquots-after-inode-walk-fails-during-quota.patch b/queue-5.15/xfs-purge-dquots-after-inode-walk-fails-during-quota.patch new file mode 100644 index 00000000000..fa2b2ceb036 --- /dev/null +++ b/queue-5.15/xfs-purge-dquots-after-inode-walk-fails-during-quota.patch @@ -0,0 +1,124 @@ +From 62a7ea659d8b91586680396e1c753d9caff1d3f6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Feb 2023 13:25:33 -0800 +Subject: xfs: purge dquots after inode walk fails during quotacheck + +From: Darrick J. Wong + +[ Upstream commit 86d40f1e49e9a909d25c35ba01bea80dbcd758cb ] + +xfs/434 and xfs/436 have been reporting occasional memory leaks of +xfs_dquot objects. These tests themselves were the messenger, not the +culprit, since they unload the xfs module, which trips the slub +debugging code while tearing down all the xfs slab caches: + +============================================================================= +BUG xfs_dquot (Tainted: G W ): Objects remaining in xfs_dquot on __kmem_cache_shutdown() +----------------------------------------------------------------------------- + +Slab 0xffffea000606de00 objects=30 used=5 fp=0xffff888181b78a78 flags=0x17ff80000010200(slab|head|node=0|zone=2|lastcpupid=0xfff) +CPU: 0 PID: 3953166 Comm: modprobe Tainted: G W 5.18.0-rc6-djwx #rc6 d5824be9e46a2393677bda868f9b154d917ca6a7 +Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20171121_152543-x86-ol7-builder-01.us.oracle.com-4.el7.1 04/01/2014 + +Since we don't generally rmmod the xfs module between fstests, this +means that xfs/434 is really just the canary in the coal mine -- +something leaked a dquot, but we don't know who. After days of pounding +on fstests with kmemleak enabled, I finally got it to spit this out: + +unreferenced object 0xffff8880465654c0 (size 536): + comm "u10:4", pid 88, jiffies 4294935810 (age 29.512s) + hex dump (first 32 bytes): + 60 4a 56 46 80 88 ff ff 58 ea e4 5c 80 88 ff ff `JVF....X..\.... + 00 e0 52 49 80 88 ff ff 01 00 01 00 00 00 00 00 ..RI............ + backtrace: + [] xfs_dquot_alloc+0x2c/0x530 [xfs] + [] xfs_qm_dqread+0x6f/0x330 [xfs] + [] xfs_qm_dqget+0x132/0x4e0 [xfs] + [] xfs_qm_quotacheck_dqadjust+0xa0/0x3e0 [xfs] + [] xfs_qm_dqusage_adjust+0x35d/0x4f0 [xfs] + [] xfs_iwalk_ag_recs+0x348/0x5d0 [xfs] + [] xfs_iwalk_run_callbacks+0x273/0x540 [xfs] + [] xfs_iwalk_ag+0x5ed/0x890 [xfs] + [] xfs_iwalk_ag_work+0xff/0x170 [xfs] + [] xfs_pwork_work+0x79/0x130 [xfs] + [] process_one_work+0x672/0x1040 + [] worker_thread+0x59b/0xec0 + [] kthread+0x29e/0x340 + [] ret_from_fork+0x1f/0x30 + +Now we know that quotacheck is at fault, but even this report was +canaryish -- it was triggered by xfs/494, which doesn't actually mount +any filesystems. (kmemleak can be a little slow to notice leaks, even +with fstests repeatedly whacking it to look for them.) Looking at the +*previous* fstest, however, showed that the test run before xfs/494 was +xfs/117. The tipoff to the problem is in this excerpt from dmesg: + +XFS (sda4): Quotacheck needed: Please wait. +XFS (sda4): Metadata corruption detected at xfs_dinode_verify.part.0+0xdb/0x7b0 [xfs], inode 0x119 dinode +XFS (sda4): Unmount and run xfs_repair +XFS (sda4): First 128 bytes of corrupted metadata buffer: +00000000: 49 4e 81 a4 03 02 00 00 00 00 00 00 00 00 00 00 IN.............. +00000010: 00 00 00 01 00 00 00 00 00 90 57 54 54 1a 4c 68 ..........WTT.Lh +00000020: 81 f9 7d e1 6d ee 16 00 34 bd 7d e1 6d ee 16 00 ..}.m...4.}.m... +00000030: 34 bd 7d e1 6d ee 16 00 00 00 00 00 00 00 00 00 4.}.m........... +00000040: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ +00000050: 00 00 00 02 00 00 00 00 00 00 00 00 96 80 f3 ab ................ +00000060: ff ff ff ff da 57 7b 11 00 00 00 00 00 00 00 03 .....W{......... +00000070: 00 00 00 01 00 00 00 10 00 00 00 00 00 00 00 08 ................ +XFS (sda4): Quotacheck: Unsuccessful (Error -117): Disabling quotas. + +The dinode verifier decided that the inode was corrupt, which causes +iget to return with EFSCORRUPTED. Since this happened during +quotacheck, it is obvious that the kernel aborted the inode walk on +account of the corruption error and disabled quotas. Unfortunately, we +neglect to purge the dquot cache before doing that, which is how the +dquots leaked. + +The problems started 10 years ago in commit b84a3a, when the dquot lists +were converted to a radix tree, but the error handling behavior was not +correctly preserved -- in that commit, if the bulkstat failed and +usrquota was enabled, the bulkstat failure code would be overwritten by +the result of flushing all the dquots to disk. As long as that +succeeds, we'd continue the quota mount as if everything were ok, but +instead we're now operating with a corrupt inode and incorrect quota +usage counts. I didn't notice this bug in 2019 when I wrote commit +ebd126a, which changed quotacheck to skip the dqflush when the scan +doesn't complete due to inode walk failures. + +Introduced-by: b84a3a96751f ("xfs: remove the per-filesystem list of dquots") +Fixes: ebd126a651f8 ("xfs: convert quotacheck to use the new iwalk functions") +Signed-off-by: Darrick J. Wong +Reviewed-by: Christoph Hellwig +Reviewed-by: Dave Chinner +Signed-off-by: Dave Chinner +Signed-off-by: Leah Rumancik +Acked-by: Darrick J. Wong +Signed-off-by: Sasha Levin +--- + fs/xfs/xfs_qm.c | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c +index 5608066d6e539..623244650a2f0 100644 +--- a/fs/xfs/xfs_qm.c ++++ b/fs/xfs/xfs_qm.c +@@ -1317,8 +1317,15 @@ xfs_qm_quotacheck( + + error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true, + NULL); +- if (error) ++ if (error) { ++ /* ++ * The inode walk may have partially populated the dquot ++ * caches. We must purge them before disabling quota and ++ * tearing down the quotainfo, or else the dquots will leak. ++ */ ++ xfs_qm_dqpurge_all(mp); + goto error_return; ++ } + + /* + * We've made all the changes that we need to make incore. Flush them +-- +2.39.0 + diff --git a/queue-5.15/xfs-set-xfs_feat_nlink-correctly.patch b/queue-5.15/xfs-set-xfs_feat_nlink-correctly.patch new file mode 100644 index 00000000000..1d558e3f1bd --- /dev/null +++ b/queue-5.15/xfs-set-xfs_feat_nlink-correctly.patch @@ -0,0 +1,40 @@ +From 677a1c8c2a4a3aba36ea4b1144d5f58d2f55b8a9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Feb 2023 13:25:28 -0800 +Subject: xfs: set XFS_FEAT_NLINK correctly + +From: Dave Chinner + +[ Upstream commit dd0d2f9755191690541b09e6385d0f8cd8bc9d8f ] + +While xfs_has_nlink() is not used in kernel, it is used in userspace +(e.g. by xfs_db) so we need to set the XFS_FEAT_NLINK flag correctly +in xfs_sb_version_to_features(). + +Signed-off-by: Dave Chinner +Reviewed-by: Christoph Hellwig +Reviewed-by: Darrick J. Wong +Signed-off-by: Dave Chinner +Signed-off-by: Leah Rumancik +Acked-by: Darrick J. Wong +Signed-off-by: Sasha Levin +--- + fs/xfs/libxfs/xfs_sb.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c +index e58349be78bd5..72c05485c8706 100644 +--- a/fs/xfs/libxfs/xfs_sb.c ++++ b/fs/xfs/libxfs/xfs_sb.c +@@ -70,6 +70,8 @@ xfs_sb_version_to_features( + /* optional V4 features */ + if (sbp->sb_rblocks > 0) + features |= XFS_FEAT_REALTIME; ++ if (sbp->sb_versionnum & XFS_SB_VERSION_NLINKBIT) ++ features |= XFS_FEAT_NLINK; + if (sbp->sb_versionnum & XFS_SB_VERSION_ATTRBIT) + features |= XFS_FEAT_ATTR; + if (sbp->sb_versionnum & XFS_SB_VERSION_QUOTABIT) +-- +2.39.0 + diff --git a/queue-5.15/xfs-validate-v5-feature-fields.patch b/queue-5.15/xfs-validate-v5-feature-fields.patch new file mode 100644 index 00000000000..f2873a4140e --- /dev/null +++ b/queue-5.15/xfs-validate-v5-feature-fields.patch @@ -0,0 +1,132 @@ +From 5891dc885a0dbcdd1377f4b9bac3b0403a101783 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Feb 2023 13:25:29 -0800 +Subject: xfs: validate v5 feature fields + +From: Dave Chinner + +[ Upstream commit f0f5f658065a5af09126ec892e4c383540a1c77f ] + +We don't check that the v4 feature flags taht v5 requires to be set +are actually set anywhere. Do this check when we see that the +filesystem is a v5 filesystem. + +Signed-off-by: Dave Chinner +Reviewed-by: Christoph Hellwig +Reviewed-by: Darrick J. Wong +Signed-off-by: Dave Chinner +Signed-off-by: Leah Rumancik +Acked-by: Darrick J. Wong +Signed-off-by: Sasha Levin +--- + fs/xfs/libxfs/xfs_sb.c | 68 +++++++++++++++++++++++++++++++++++------- + 1 file changed, 58 insertions(+), 10 deletions(-) + +diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c +index 72c05485c8706..04e2a57313fa0 100644 +--- a/fs/xfs/libxfs/xfs_sb.c ++++ b/fs/xfs/libxfs/xfs_sb.c +@@ -30,6 +30,47 @@ + * Physical superblock buffer manipulations. Shared with libxfs in userspace. + */ + ++/* ++ * Check that all the V4 feature bits that the V5 filesystem format requires are ++ * correctly set. ++ */ ++static bool ++xfs_sb_validate_v5_features( ++ struct xfs_sb *sbp) ++{ ++ /* We must not have any unknown V4 feature bits set */ ++ if (sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ++ return false; ++ ++ /* ++ * The CRC bit is considered an invalid V4 flag, so we have to add it ++ * manually to the OKBITS mask. ++ */ ++ if (sbp->sb_features2 & ~(XFS_SB_VERSION2_OKBITS | ++ XFS_SB_VERSION2_CRCBIT)) ++ return false; ++ ++ /* Now check all the required V4 feature flags are set. */ ++ ++#define V5_VERS_FLAGS (XFS_SB_VERSION_NLINKBIT | \ ++ XFS_SB_VERSION_ALIGNBIT | \ ++ XFS_SB_VERSION_LOGV2BIT | \ ++ XFS_SB_VERSION_EXTFLGBIT | \ ++ XFS_SB_VERSION_DIRV2BIT | \ ++ XFS_SB_VERSION_MOREBITSBIT) ++ ++#define V5_FEAT_FLAGS (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \ ++ XFS_SB_VERSION2_ATTR2BIT | \ ++ XFS_SB_VERSION2_PROJID32BIT | \ ++ XFS_SB_VERSION2_CRCBIT) ++ ++ if ((sbp->sb_versionnum & V5_VERS_FLAGS) != V5_VERS_FLAGS) ++ return false; ++ if ((sbp->sb_features2 & V5_FEAT_FLAGS) != V5_FEAT_FLAGS) ++ return false; ++ return true; ++} ++ + /* + * We support all XFS versions newer than a v4 superblock with V2 directories. + */ +@@ -37,9 +78,19 @@ bool + xfs_sb_good_version( + struct xfs_sb *sbp) + { +- /* all v5 filesystems are supported */ ++ /* ++ * All v5 filesystems are supported, but we must check that all the ++ * required v4 feature flags are enabled correctly as the code checks ++ * those flags and not for v5 support. ++ */ + if (xfs_sb_is_v5(sbp)) +- return true; ++ return xfs_sb_validate_v5_features(sbp); ++ ++ /* We must not have any unknown v4 feature bits set */ ++ if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) || ++ ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) && ++ (sbp->sb_features2 & ~XFS_SB_VERSION2_OKBITS))) ++ return false; + + /* versions prior to v4 are not supported */ + if (XFS_SB_VERSION_NUM(sbp) < XFS_SB_VERSION_4) +@@ -51,12 +102,6 @@ xfs_sb_good_version( + if (!(sbp->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT)) + return false; + +- /* And must not have any unknown v4 feature bits set */ +- if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) || +- ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) && +- (sbp->sb_features2 & ~XFS_SB_VERSION2_OKBITS))) +- return false; +- + /* It's a supported v4 filesystem */ + return true; + } +@@ -264,12 +309,15 @@ xfs_validate_sb_common( + bool has_dalign; + + if (!xfs_verify_magic(bp, dsb->sb_magicnum)) { +- xfs_warn(mp, "bad magic number"); ++ xfs_warn(mp, ++"Superblock has bad magic number 0x%x. Not an XFS filesystem?", ++ be32_to_cpu(dsb->sb_magicnum)); + return -EWRONGFS; + } + + if (!xfs_sb_good_version(sbp)) { +- xfs_warn(mp, "bad version"); ++ xfs_warn(mp, ++"Superblock has unknown features enabled or corrupted feature masks."); + return -EWRONGFS; + } + +-- +2.39.0 + diff --git a/queue-5.15/xfs-zero-inode-fork-buffer-at-allocation.patch b/queue-5.15/xfs-zero-inode-fork-buffer-at-allocation.patch new file mode 100644 index 00000000000..82465bff2cb --- /dev/null +++ b/queue-5.15/xfs-zero-inode-fork-buffer-at-allocation.patch @@ -0,0 +1,62 @@ +From f106ff4ed77f23bcedc8170b68b21169f476df2c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 14 Feb 2023 13:25:25 -0800 +Subject: xfs: zero inode fork buffer at allocation + +From: Dave Chinner + +[ Upstream commit cb512c921639613ce03f87e62c5e93ed9fe8c84d ] + +When we first allocate or resize an inline inode fork, we round up +the allocation to 4 byte alingment to make journal alignment +constraints. We don't clear the unused bytes, so we can copy up to +three uninitialised bytes into the journal. Zero those bytes so we +only ever copy zeros into the journal. + +Signed-off-by: Dave Chinner +Reviewed-by: Darrick J. Wong +Reviewed-by: Allison Henderson +Signed-off-by: Dave Chinner +Signed-off-by: Leah Rumancik +Acked-by: Darrick J. Wong +Signed-off-by: Sasha Levin +--- + fs/xfs/libxfs/xfs_inode_fork.c | 12 +++++++++--- + 1 file changed, 9 insertions(+), 3 deletions(-) + +diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c +index 1d174909f9bdf..20095233d7bc0 100644 +--- a/fs/xfs/libxfs/xfs_inode_fork.c ++++ b/fs/xfs/libxfs/xfs_inode_fork.c +@@ -50,8 +50,13 @@ xfs_init_local_fork( + mem_size++; + + if (size) { ++ /* ++ * As we round up the allocation here, we need to ensure the ++ * bytes we don't copy data into are zeroed because the log ++ * vectors still copy them into the journal. ++ */ + real_size = roundup(mem_size, 4); +- ifp->if_u1.if_data = kmem_alloc(real_size, KM_NOFS); ++ ifp->if_u1.if_data = kmem_zalloc(real_size, KM_NOFS); + memcpy(ifp->if_u1.if_data, data, size); + if (zero_terminate) + ifp->if_u1.if_data[size] = '\0'; +@@ -500,10 +505,11 @@ xfs_idata_realloc( + /* + * For inline data, the underlying buffer must be a multiple of 4 bytes + * in size so that it can be logged and stay on word boundaries. +- * We enforce that here. ++ * We enforce that here, and use __GFP_ZERO to ensure that size ++ * extensions always zero the unused roundup area. + */ + ifp->if_u1.if_data = krealloc(ifp->if_u1.if_data, roundup(new_size, 4), +- GFP_NOFS | __GFP_NOFAIL); ++ GFP_NOFS | __GFP_NOFAIL | __GFP_ZERO); + ifp->if_bytes = new_size; + } + +-- +2.39.0 +