From: Sasha Levin Date: Fri, 11 Oct 2024 12:10:03 +0000 (-0400) Subject: Fixes for 6.1 X-Git-Tag: v5.10.227~83 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=ee094e2e796644ce8e8a30a3a419adbe1488a225;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 6.1 Signed-off-by: Sasha Levin --- diff --git a/queue-6.1/alsa-hda-realtek-cs35l41-fix-device-id-model-name.patch b/queue-6.1/alsa-hda-realtek-cs35l41-fix-device-id-model-name.patch new file mode 100644 index 00000000000..6dd36889cac --- /dev/null +++ b/queue-6.1/alsa-hda-realtek-cs35l41-fix-device-id-model-name.patch @@ -0,0 +1,50 @@ +From d8c5176d471ce9163f835a2600d46564377efe98 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 Feb 2024 00:42:12 +0100 +Subject: ALSA: hda/realtek: cs35l41: Fix device ID / model name +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Jean-Loïc Charroud + +[ Upstream commit b91050448897663b60b6d15525c8c3ecae28a368 ] + +The patch 51d976079976c800ef19ed1b542602fcf63f0edb ("ALSA: hda/realtek: +Add quirks for ASUS Zenbook 2022 Models") modified the entry 1043:1e2e +from "ASUS UM3402" to "ASUS UM6702RA/RC" and added another entry for +"ASUS UM3402" with 104e:1ee2. +The first entry was correct, while the new one corresponds to model +"ASUS UM6702RA/RC" +Fix the model names for both devices. + +Fixes: 51d976079976 ("ALSA: hda/realtek: Add quirks for ASUS Zenbook 2022 Models") +Signed-off-by: Jean-Loïc Charroud +Link: https://lore.kernel.org/r/1656546983.650349575.1707867732866.JavaMail.zimbra@free.fr +Signed-off-by: Takashi Iwai +Signed-off-by: Sasha Levin +--- + sound/pci/hda/patch_realtek.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 279857edc0f70..8c8a3f6499c22 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -9928,11 +9928,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), +- SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2), ++ SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), + SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS), + SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), +- SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2), ++ SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2), +-- +2.43.0 + diff --git a/queue-6.1/alsa-hda-realtek-cs35l41-fix-order-and-duplicates-in.patch b/queue-6.1/alsa-hda-realtek-cs35l41-fix-order-and-duplicates-in.patch new file mode 100644 index 00000000000..53d4943d2a5 --- /dev/null +++ b/queue-6.1/alsa-hda-realtek-cs35l41-fix-order-and-duplicates-in.patch @@ -0,0 +1,48 @@ +From 771ca3a2060556cf8f5e1580775941eeb9358603 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 Feb 2024 00:44:24 +0100 +Subject: ALSA: hda/realtek: cs35l41: Fix order and duplicates in quirks table +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Jean-Loïc Charroud + +[ Upstream commit 852d432a14dbcd34e15a3a3910c5c6869a6d1929 ] + +Move entry {0x1043, 0x16a3, "ASUS UX3402VA"} following device ID order. +Remove duplicate entry for device {0x1043, 0x1f62, "ASUS UX7602ZM"}. + +Fixes: 51d976079976 ("ALSA: hda/realtek: Add quirks for ASUS Zenbook 2022 Models") +Signed-off-by: Jean-Loïc Charroud +Link: https://lore.kernel.org/r/1969151851.650354669.1707867864074.JavaMail.zimbra@free.fr +Signed-off-by: Takashi Iwai +Signed-off-by: Sasha Levin +--- + sound/pci/hda/patch_realtek.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 7bb32a98f1761..279857edc0f70 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -9896,6 +9896,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), + SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2), ++ SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS), +@@ -9926,8 +9927,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), + SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2), +- SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2), +- SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), + SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), +-- +2.43.0 + diff --git a/queue-6.1/bluetooth-fix-usage-of-__hci_cmd_sync_status.patch b/queue-6.1/bluetooth-fix-usage-of-__hci_cmd_sync_status.patch new file mode 100644 index 00000000000..56bf0d4a0d0 --- /dev/null +++ b/queue-6.1/bluetooth-fix-usage-of-__hci_cmd_sync_status.patch @@ -0,0 +1,83 @@ +From c04df155fbdd3a8396f059abac22935271dbae9d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 1 Jul 2024 12:07:46 -0400 +Subject: Bluetooth: Fix usage of __hci_cmd_sync_status + +From: Luiz Augusto von Dentz + +[ Upstream commit 87be7b189b2c50d4b51512f59e4e97db4eedee8a ] + +__hci_cmd_sync_status shall only be used if hci_req_sync_lock is _not_ +required which is not the case of hci_dev_cmd so it needs to use +hci_cmd_sync_status which uses hci_req_sync_lock internally. + +Fixes: f1a8f402f13f ("Bluetooth: L2CAP: Fix deadlock") +Reported-by: Pauli Virtanen +Signed-off-by: Luiz Augusto von Dentz +Signed-off-by: Sasha Levin +--- + net/bluetooth/hci_core.c | 27 ++++++++++++--------------- + 1 file changed, 12 insertions(+), 15 deletions(-) + +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c +index dc19a0b1a2f6d..993b98257bc28 100644 +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -721,8 +721,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) + + switch (cmd) { + case HCISETAUTH: +- err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, +- 1, &dr.dev_opt, HCI_CMD_TIMEOUT); ++ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, ++ 1, &dr.dev_opt, HCI_CMD_TIMEOUT); + break; + + case HCISETENCRYPT: +@@ -733,23 +733,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) + + if (!test_bit(HCI_AUTH, &hdev->flags)) { + /* Auth must be enabled first */ +- err = __hci_cmd_sync_status(hdev, +- HCI_OP_WRITE_AUTH_ENABLE, +- 1, &dr.dev_opt, +- HCI_CMD_TIMEOUT); ++ err = hci_cmd_sync_status(hdev, ++ HCI_OP_WRITE_AUTH_ENABLE, ++ 1, &dr.dev_opt, ++ HCI_CMD_TIMEOUT); + if (err) + break; + } + +- err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, +- 1, &dr.dev_opt, +- HCI_CMD_TIMEOUT); ++ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, ++ 1, &dr.dev_opt, HCI_CMD_TIMEOUT); + break; + + case HCISETSCAN: +- err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, +- 1, &dr.dev_opt, +- HCI_CMD_TIMEOUT); ++ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, ++ 1, &dr.dev_opt, HCI_CMD_TIMEOUT); + + /* Ensure that the connectable and discoverable states + * get correctly modified as this was a non-mgmt change. +@@ -761,9 +759,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) + case HCISETLINKPOL: + policy = cpu_to_le16(dr.dev_opt); + +- err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, +- 2, &policy, +- HCI_CMD_TIMEOUT); ++ err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, ++ 2, &policy, HCI_CMD_TIMEOUT); + break; + + case HCISETLINKMODE: +-- +2.43.0 + diff --git a/queue-6.1/bootconfig-fix-the-kerneldoc-of-_xbc_exit.patch b/queue-6.1/bootconfig-fix-the-kerneldoc-of-_xbc_exit.patch new file mode 100644 index 00000000000..2ac9c484f61 --- /dev/null +++ b/queue-6.1/bootconfig-fix-the-kerneldoc-of-_xbc_exit.patch @@ -0,0 +1,40 @@ +From 1d748703d607017874bcd4f74ed70a4fe725b79c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 16 Apr 2024 06:44:04 +0900 +Subject: bootconfig: Fix the kerneldoc of _xbc_exit() + +From: Masami Hiramatsu (Google) + +[ Upstream commit 298b871cd55a607037ac8af0011b9fdeb54c1e65 ] + +Fix the kerneldoc of _xbc_exit() which is updated to have an @early +argument and the function name is changed. + +Link: https://lore.kernel.org/all/171321744474.599864.13532445969528690358.stgit@devnote2/ + +Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202404150036.kPJ3HEFA-lkp@intel.com/ +Fixes: 89f9a1e876b5 ("bootconfig: use memblock_free_late to free xbc memory to buddy") +Signed-off-by: Masami Hiramatsu (Google) +Signed-off-by: Sasha Levin +--- + lib/bootconfig.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/lib/bootconfig.c b/lib/bootconfig.c +index 8841554432d5b..97f8911ea339e 100644 +--- a/lib/bootconfig.c ++++ b/lib/bootconfig.c +@@ -901,7 +901,8 @@ static int __init xbc_parse_tree(void) + } + + /** +- * xbc_exit() - Clean up all parsed bootconfig ++ * _xbc_exit() - Clean up all parsed bootconfig ++ * @early: Set true if this is called before budy system is initialized. + * + * This clears all data structures of parsed bootconfig on memory. + * If you need to reuse xbc_init() with new boot config, you can +-- +2.43.0 + diff --git a/queue-6.1/cpufreq-amd-pstate-ut-convert-nominal_freq-to-khz-du.patch b/queue-6.1/cpufreq-amd-pstate-ut-convert-nominal_freq-to-khz-du.patch new file mode 100644 index 00000000000..b37d0d19fb7 --- /dev/null +++ b/queue-6.1/cpufreq-amd-pstate-ut-convert-nominal_freq-to-khz-du.patch @@ -0,0 +1,73 @@ +From 7c49aa544a7a507b628cf797d693938e74d569a2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 2 Jul 2024 08:14:13 +0000 +Subject: cpufreq/amd-pstate-ut: Convert nominal_freq to khz during comparisons + +From: Dhananjay Ugwekar + +[ Upstream commit f21ab5ed4e8758b06230900f44b9dcbcfdc0c3ae ] + +cpudata->nominal_freq being in MHz whereas other frequencies being in +KHz breaks the amd-pstate-ut frequency sanity check. This fixes it. + +Fixes: e4731baaf294 ("cpufreq: amd-pstate: Fix the inconsistency in max frequency units") +Reported-by: David Arcari +Signed-off-by: Dhananjay Ugwekar +Reviewed-by: Mario Limonciello +Reviewed-by: Gautham R. Shenoy +Link: https://lore.kernel.org/r/20240702081413.5688-2-Dhananjay.Ugwekar@amd.com +Signed-off-by: Mario Limonciello +Signed-off-by: Sasha Levin +--- + drivers/cpufreq/amd-pstate-ut.c | 12 +++++++----- + 1 file changed, 7 insertions(+), 5 deletions(-) + +diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c +index b448c8d6a16dd..9c1fc386c010f 100644 +--- a/drivers/cpufreq/amd-pstate-ut.c ++++ b/drivers/cpufreq/amd-pstate-ut.c +@@ -201,6 +201,7 @@ static void amd_pstate_ut_check_freq(u32 index) + int cpu = 0; + struct cpufreq_policy *policy = NULL; + struct amd_cpudata *cpudata = NULL; ++ u32 nominal_freq_khz; + + for_each_possible_cpu(cpu) { + policy = cpufreq_cpu_get(cpu); +@@ -208,13 +209,14 @@ static void amd_pstate_ut_check_freq(u32 index) + break; + cpudata = policy->driver_data; + +- if (!((cpudata->max_freq >= cpudata->nominal_freq) && +- (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) && ++ nominal_freq_khz = cpudata->nominal_freq*1000; ++ if (!((cpudata->max_freq >= nominal_freq_khz) && ++ (nominal_freq_khz > cpudata->lowest_nonlinear_freq) && + (cpudata->lowest_nonlinear_freq > cpudata->min_freq) && + (cpudata->min_freq > 0))) { + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; + pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n", +- __func__, cpu, cpudata->max_freq, cpudata->nominal_freq, ++ __func__, cpu, cpudata->max_freq, nominal_freq_khz, + cpudata->lowest_nonlinear_freq, cpudata->min_freq); + goto skip_test; + } +@@ -228,13 +230,13 @@ static void amd_pstate_ut_check_freq(u32 index) + + if (cpudata->boost_supported) { + if ((policy->max == cpudata->max_freq) || +- (policy->max == cpudata->nominal_freq)) ++ (policy->max == nominal_freq_khz)) + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; + else { + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; + pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n", + __func__, cpu, policy->max, cpudata->max_freq, +- cpudata->nominal_freq); ++ nominal_freq_khz); + goto skip_test; + } + } else { +-- +2.43.0 + diff --git a/queue-6.1/drm-crtc-fix-uninitialized-variable-use-even-harder.patch b/queue-6.1/drm-crtc-fix-uninitialized-variable-use-even-harder.patch new file mode 100644 index 00000000000..0203525373e --- /dev/null +++ b/queue-6.1/drm-crtc-fix-uninitialized-variable-use-even-harder.patch @@ -0,0 +1,40 @@ +From beb1072e3463f5cbdf269f9f0ea0e80037542ecd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 12 Feb 2024 13:55:34 -0800 +Subject: drm/crtc: fix uninitialized variable use even harder + +From: Rob Clark + +[ Upstream commit b6802b61a9d0e99dcfa6fff7c50db7c48a9623d3 ] + +DRM_MODESET_LOCK_ALL_BEGIN() has a hidden trap-door (aka retry loop), +which means we can't rely too much on variable initializers. + +Fixes: 6e455f5dcdd1 ("drm/crtc: fix uninitialized variable use") +Signed-off-by: Rob Clark +Reviewed-by: Daniel Vetter +Reviewed-by: Abhinav Kumar +Reviewed-by: Dmitry Baryshkov +Tested-by: Dmitry Baryshkov # sc7180, sdm845 +Link: https://patchwork.freedesktop.org/patch/msgid/20240212215534.190682-1-robdclark@gmail.com +Signed-off-by: Dmitry Baryshkov +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/drm_crtc.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c +index cb90e70d85e86..65f9f66933bba 100644 +--- a/drivers/gpu/drm/drm_crtc.c ++++ b/drivers/gpu/drm/drm_crtc.c +@@ -904,6 +904,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, + connector_set = NULL; + fb = NULL; + mode = NULL; ++ num_connectors = 0; + + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); + +-- +2.43.0 + diff --git a/queue-6.1/fs-ntfs3-do-not-call-file_modified-if-collapse-range.patch b/queue-6.1/fs-ntfs3-do-not-call-file_modified-if-collapse-range.patch new file mode 100644 index 00000000000..4b26efd2c67 --- /dev/null +++ b/queue-6.1/fs-ntfs3-do-not-call-file_modified-if-collapse-range.patch @@ -0,0 +1,41 @@ +From 7cd8d55410342cffac472f3f655b04a4e5832c35 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 28 Jun 2024 18:29:46 +0300 +Subject: fs/ntfs3: Do not call file_modified if collapse range failed + +From: Konstantin Komarov + +[ Upstream commit 2db86f7995fe6b62a4d6fee9f3cdeba3c6d27606 ] + +Fixes: 4342306f0f0d ("fs/ntfs3: Add file operations and implementation") +Signed-off-by: Konstantin Komarov +Signed-off-by: Sasha Levin +--- + fs/ntfs3/file.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c +index 6f03de747e375..aedd4f5f459e6 100644 +--- a/fs/ntfs3/file.c ++++ b/fs/ntfs3/file.c +@@ -516,7 +516,7 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size) + } + + /* +- * ntfs_fallocate ++ * ntfs_fallocate - file_operations::ntfs_fallocate + * + * Preallocate space for a file. This implements ntfs's fallocate file + * operation, which gets called from sys_fallocate system call. User +@@ -647,6 +647,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) + ni_lock(ni); + err = attr_collapse_range(ni, vbo, len); + ni_unlock(ni); ++ if (err) ++ goto out; + } else if (mode & FALLOC_FL_INSERT_RANGE) { + /* Check new size. */ + err = inode_newsize_ok(inode, new_size); +-- +2.43.0 + diff --git a/queue-6.1/fs-ntfs3-fix-sparse-warning-in-ni_fiemap.patch b/queue-6.1/fs-ntfs3-fix-sparse-warning-in-ni_fiemap.patch new file mode 100644 index 00000000000..3e03be0d5ae --- /dev/null +++ b/queue-6.1/fs-ntfs3-fix-sparse-warning-in-ni_fiemap.patch @@ -0,0 +1,108 @@ +From b420cb4eaec50283ae18339388761c94927f232a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 19 Aug 2024 16:23:02 +0300 +Subject: fs/ntfs3: Fix sparse warning in ni_fiemap + +From: Konstantin Komarov + +[ Upstream commit 62fea783f96ce825f0ac9e40ce9530ddc1ea2a29 ] + +The interface of fiemap_fill_next_extent_k() was modified +to eliminate the sparse warning. + +Fixes: d57431c6f511 ("fs/ntfs3: Do copy_to_user out of run_lock") +Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-kbuild-all/202406271920.hndE8N6D-lkp@intel.com/ +Signed-off-by: Konstantin Komarov +Signed-off-by: Sasha Levin +--- + fs/ntfs3/frecord.c | 21 ++++++++------------- + 1 file changed, 8 insertions(+), 13 deletions(-) + +diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c +index b3299cda59622..e19510f977112 100644 +--- a/fs/ntfs3/frecord.c ++++ b/fs/ntfs3/frecord.c +@@ -1901,13 +1901,13 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr, + + /* + * fiemap_fill_next_extent_k - a copy of fiemap_fill_next_extent +- * but it accepts kernel address for fi_extents_start ++ * but it uses 'fe_k' instead of fieinfo->fi_extents_start + */ + static int fiemap_fill_next_extent_k(struct fiemap_extent_info *fieinfo, +- u64 logical, u64 phys, u64 len, u32 flags) ++ struct fiemap_extent *fe_k, u64 logical, ++ u64 phys, u64 len, u32 flags) + { + struct fiemap_extent extent; +- struct fiemap_extent __user *dest = fieinfo->fi_extents_start; + + /* only count the extents */ + if (fieinfo->fi_extents_max == 0) { +@@ -1931,8 +1931,7 @@ static int fiemap_fill_next_extent_k(struct fiemap_extent_info *fieinfo, + extent.fe_length = len; + extent.fe_flags = flags; + +- dest += fieinfo->fi_extents_mapped; +- memcpy(dest, &extent, sizeof(extent)); ++ memcpy(fe_k + fieinfo->fi_extents_mapped, &extent, sizeof(extent)); + + fieinfo->fi_extents_mapped++; + if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max) +@@ -1950,7 +1949,6 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, + __u64 vbo, __u64 len) + { + int err = 0; +- struct fiemap_extent __user *fe_u = fieinfo->fi_extents_start; + struct fiemap_extent *fe_k = NULL; + struct ntfs_sb_info *sbi = ni->mi.sbi; + u8 cluster_bits = sbi->cluster_bits; +@@ -2009,7 +2007,6 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, + err = -ENOMEM; + goto out; + } +- fieinfo->fi_extents_start = fe_k; + + end = vbo + len; + alloc_size = le64_to_cpu(attr->nres.alloc_size); +@@ -2099,8 +2096,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, + if (vbo + dlen >= end) + flags |= FIEMAP_EXTENT_LAST; + +- err = fiemap_fill_next_extent_k(fieinfo, vbo, lbo, dlen, +- flags); ++ err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo, ++ dlen, flags); + + if (err < 0) + break; +@@ -2121,7 +2118,7 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, + if (vbo + bytes >= end) + flags |= FIEMAP_EXTENT_LAST; + +- err = fiemap_fill_next_extent_k(fieinfo, vbo, lbo, bytes, ++ err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo, bytes, + flags); + if (err < 0) + break; +@@ -2138,15 +2135,13 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, + /* + * Copy to user memory out of lock + */ +- if (copy_to_user(fe_u, fe_k, ++ if (copy_to_user(fieinfo->fi_extents_start, fe_k, + fieinfo->fi_extents_max * + sizeof(struct fiemap_extent))) { + err = -EFAULT; + } + + out: +- /* Restore original pointer. */ +- fieinfo->fi_extents_start = fe_u; + kfree(fe_k); + return err; + } +-- +2.43.0 + diff --git a/queue-6.1/fs-ntfs3-refactor-enum_rstbl-to-suppress-static-chec.patch b/queue-6.1/fs-ntfs3-refactor-enum_rstbl-to-suppress-static-chec.patch new file mode 100644 index 00000000000..fdaf38776cd --- /dev/null +++ b/queue-6.1/fs-ntfs3-refactor-enum_rstbl-to-suppress-static-chec.patch @@ -0,0 +1,58 @@ +From f60eebfd26abf1ae114c604e7afd0347efbd6ece Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 23 Jul 2024 16:51:18 +0300 +Subject: fs/ntfs3: Refactor enum_rstbl to suppress static checker + +From: Konstantin Komarov + +[ Upstream commit 56c16d5459d5c050a97a138a00a82b105a8e0a66 ] + +Comments and brief description of function enum_rstbl added. + +Fixes: b46acd6a6a62 ("fs/ntfs3: Add NTFS journal") +Reported-by: Dan Carpenter +Signed-off-by: Konstantin Komarov +Signed-off-by: Sasha Levin +--- + fs/ntfs3/fslog.c | 19 +++++++++++++++++-- + 1 file changed, 17 insertions(+), 2 deletions(-) + +diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c +index 8e23bd6cd0f2f..339ce5aa3c75b 100644 +--- a/fs/ntfs3/fslog.c ++++ b/fs/ntfs3/fslog.c +@@ -609,14 +609,29 @@ static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head) + *head = cpu_to_le16(index); + } + ++/* ++ * Enumerate restart table. ++ * ++ * @t - table to enumerate. ++ * @c - current enumerated element. ++ * ++ * enumeration starts with @c == NULL ++ * returns next element or NULL ++ */ + static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c) + { + __le32 *e; + u32 bprt; +- u16 rsize = t ? le16_to_cpu(t->size) : 0; ++ u16 rsize; ++ ++ if (!t) ++ return NULL; ++ ++ rsize = le16_to_cpu(t->size); + + if (!c) { +- if (!t || !t->total) ++ /* start enumeration. */ ++ if (!t->total) + return NULL; + e = Add2Ptr(t, sizeof(struct RESTART_TABLE)); + } else { +-- +2.43.0 + diff --git a/queue-6.1/input-synaptics-rmi4-fix-uaf-of-irq-domain-on-driver.patch b/queue-6.1/input-synaptics-rmi4-fix-uaf-of-irq-domain-on-driver.patch new file mode 100644 index 00000000000..29e1500a9b5 --- /dev/null +++ b/queue-6.1/input-synaptics-rmi4-fix-uaf-of-irq-domain-on-driver.patch @@ -0,0 +1,55 @@ +From d883ea112022e93b19829abe67aebd5d3b16d787 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 9 Oct 2024 05:41:32 +0000 +Subject: Input: synaptics-rmi4 - fix UAF of IRQ domain on driver removal + +From: Mathias Krause + +commit fbf8d71742557abaf558d8efb96742d442720cc2 upstream. + +Calling irq_domain_remove() will lead to freeing the IRQ domain +prematurely. The domain is still referenced and will be attempted to get +used via rmi_free_function_list() -> rmi_unregister_function() -> +irq_dispose_mapping() -> irq_get_irq_data()'s ->domain pointer. + +With PaX's MEMORY_SANITIZE this will lead to an access fault when +attempting to dereference embedded pointers, as in Torsten's report that +was faulting on the 'domain->ops->unmap' test. + +Fix this by releasing the IRQ domain only after all related IRQs have +been deactivated. + +Fixes: 24d28e4f1271 ("Input: synaptics-rmi4 - convert irq distribution to irq_domain") +Reported-by: Torsten Hilbrich +Signed-off-by: Mathias Krause +Link: https://lore.kernel.org/r/20240222142654.856566-1-minipli@grsecurity.net +Signed-off-by: Dmitry Torokhov +Signed-off-by: Tzung-Bi Shih +Signed-off-by: Sasha Levin +--- + drivers/input/rmi4/rmi_driver.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c +index aa32371f04af6..ef9ea295f9e03 100644 +--- a/drivers/input/rmi4/rmi_driver.c ++++ b/drivers/input/rmi4/rmi_driver.c +@@ -978,12 +978,12 @@ static int rmi_driver_remove(struct device *dev) + + rmi_disable_irq(rmi_dev, false); + +- irq_domain_remove(data->irqdomain); +- data->irqdomain = NULL; +- + rmi_f34_remove_sysfs(rmi_dev); + rmi_free_function_list(rmi_dev); + ++ irq_domain_remove(data->irqdomain); ++ data->irqdomain = NULL; ++ + return 0; + } + +-- +2.43.0 + diff --git a/queue-6.1/libsubcmd-don-t-free-the-usage-string.patch b/queue-6.1/libsubcmd-don-t-free-the-usage-string.patch new file mode 100644 index 00000000000..8dc88ef253e --- /dev/null +++ b/queue-6.1/libsubcmd-don-t-free-the-usage-string.patch @@ -0,0 +1,182 @@ +From c87496c7ed0b200f35b7a1d56c2136530abf2315 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 4 Sep 2024 11:48:30 +0530 +Subject: libsubcmd: Don't free the usage string + +From: Aditya Gupta + +[ Upstream commit 1a5efc9e13f357abc396dbf445b25d08914c8060 ] + +Currently, commands which depend on 'parse_options_subcommand()' don't +show the usage string, and instead show '(null)' + + $ ./perf sched + Usage: (null) + + -D, --dump-raw-trace dump raw trace in ASCII + -f, --force don't complain, do it + -i, --input input file name + -v, --verbose be more verbose (show symbol address, etc) + +'parse_options_subcommand()' is generally expected to initialise the usage +string, with information in the passed 'subcommands[]' array + +This behaviour was changed in: + + 230a7a71f92212e7 ("libsubcmd: Fix parse-options memory leak") + +Where the generated usage string is deallocated, and usage[0] string is +reassigned as NULL. + +As discussed in [1], free the allocated usage string in the main +function itself, and don't reset usage string to NULL in +parse_options_subcommand + +With this change, the behaviour is restored. + + $ ./perf sched + Usage: perf sched [] {record|latency|map|replay|script|timehist} + + -D, --dump-raw-trace dump raw trace in ASCII + -f, --force don't complain, do it + -i, --input input file name + -v, --verbose be more verbose (show symbol address, etc) + +[1]: https://lore.kernel.org/linux-perf-users/htq5vhx6piet4nuq2mmhk7fs2bhfykv52dbppwxmo3s7du2odf@styd27tioc6e/ + +Fixes: 230a7a71f92212e7 ("libsubcmd: Fix parse-options memory leak") +Suggested-by: Namhyung Kim +Signed-off-by: Aditya Gupta +Acked-by: Namhyung Kim +Tested-by: Arnaldo Carvalho de Melo +Cc: Athira Rajeev +Cc: Disha Goel +Cc: Ian Rogers +Cc: Jiri Olsa +Cc: Kajol Jain +Cc: Madhavan Srinivasan +Cc: Namhyung Kim +Link: https://lore.kernel.org/r/20240904061836.55873-2-adityag@linux.ibm.com +Signed-off-by: Arnaldo Carvalho de Melo +Signed-off-by: Sasha Levin +--- + tools/lib/subcmd/parse-options.c | 8 +++----- + tools/perf/builtin-kmem.c | 2 ++ + tools/perf/builtin-kvm.c | 3 +++ + tools/perf/builtin-kwork.c | 3 +++ + tools/perf/builtin-lock.c | 3 +++ + tools/perf/builtin-mem.c | 3 +++ + tools/perf/builtin-sched.c | 3 +++ + 7 files changed, 20 insertions(+), 5 deletions(-) + +diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c +index d943d78b787ed..9fa75943f2ed1 100644 +--- a/tools/lib/subcmd/parse-options.c ++++ b/tools/lib/subcmd/parse-options.c +@@ -633,10 +633,11 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o + const char *const subcommands[], const char *usagestr[], int flags) + { + struct parse_opt_ctx_t ctx; +- char *buf = NULL; + + /* build usage string if it's not provided */ + if (subcommands && !usagestr[0]) { ++ char *buf = NULL; ++ + astrcatf(&buf, "%s %s [] {", subcmd_config.exec_name, argv[0]); + + for (int i = 0; subcommands[i]; i++) { +@@ -678,10 +679,7 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o + astrcatf(&error_buf, "unknown switch `%c'", *ctx.opt); + usage_with_options(usagestr, options); + } +- if (buf) { +- usagestr[0] = NULL; +- free(buf); +- } ++ + return parse_options_end(&ctx); + } + +diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c +index 40dd52acc48ae..c74c0da7c8052 100644 +--- a/tools/perf/builtin-kmem.c ++++ b/tools/perf/builtin-kmem.c +@@ -2048,6 +2048,8 @@ int cmd_kmem(int argc, const char **argv) + + out_delete: + perf_session__delete(session); ++ /* free usage string allocated by parse_options_subcommand */ ++ free((void *)kmem_usage[0]); + + return ret; + } +diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c +index 7d9ec1bac1a25..ba6654862883f 100644 +--- a/tools/perf/builtin-kvm.c ++++ b/tools/perf/builtin-kvm.c +@@ -1651,5 +1651,8 @@ int cmd_kvm(int argc, const char **argv) + else + usage_with_options(kvm_usage, kvm_options); + ++ /* free usage string allocated by parse_options_subcommand */ ++ free((void *)kvm_usage[0]); ++ + return 0; + } +diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c +index 25cba0d61736c..4b612a41cdc10 100644 +--- a/tools/perf/builtin-kwork.c ++++ b/tools/perf/builtin-kwork.c +@@ -1831,5 +1831,8 @@ int cmd_kwork(int argc, const char **argv) + } else + usage_with_options(kwork_usage, kwork_options); + ++ /* free usage string allocated by parse_options_subcommand */ ++ free((void *)kwork_usage[0]); ++ + return 0; + } +diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c +index 28fa76ecc0822..27bbc513aeb94 100644 +--- a/tools/perf/builtin-lock.c ++++ b/tools/perf/builtin-lock.c +@@ -2007,6 +2007,9 @@ int cmd_lock(int argc, const char **argv) + usage_with_options(lock_usage, lock_options); + } + ++ /* free usage string allocated by parse_options_subcommand */ ++ free((void *)lock_usage[0]); ++ + zfree(&lockhash_table); + return rc; + } +diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c +index fbd05617c2ddd..ac2e17c093941 100644 +--- a/tools/perf/builtin-mem.c ++++ b/tools/perf/builtin-mem.c +@@ -514,5 +514,8 @@ int cmd_mem(int argc, const char **argv) + else + usage_with_options(mem_usage, mem_options); + ++ /* free usage string allocated by parse_options_subcommand */ ++ free((void *)mem_usage[0]); ++ + return 0; + } +diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c +index b70bc7f3ad5b3..e440a00b1613e 100644 +--- a/tools/perf/builtin-sched.c ++++ b/tools/perf/builtin-sched.c +@@ -3725,5 +3725,8 @@ int cmd_sched(int argc, const char **argv) + usage_with_options(sched_usage, sched_options); + } + ++ /* free usage string allocated by parse_options_subcommand */ ++ free((void *)sched_usage[0]); ++ + return 0; + } +-- +2.43.0 + diff --git a/queue-6.1/net-ethernet-cortina-drop-tso-support.patch b/queue-6.1/net-ethernet-cortina-drop-tso-support.patch new file mode 100644 index 00000000000..e5a0c8691ff --- /dev/null +++ b/queue-6.1/net-ethernet-cortina-drop-tso-support.patch @@ -0,0 +1,88 @@ +From b7de3a043c97ecd9e02cb1ed3591fb1754084a60 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 6 Jan 2024 01:12:22 +0100 +Subject: net: ethernet: cortina: Drop TSO support + +From: Linus Walleij + +[ Upstream commit ac631873c9e7a50d2a8de457cfc4b9f86666403e ] + +The recent change to allow large frames without hardware checksumming +slotted in software checksumming in the driver if hardware could not +do it. + +This will however upset TSO (TCP Segment Offloading). Typical +error dumps includes this: + +skb len=2961 headroom=222 headlen=66 tailroom=0 +(...) +WARNING: CPU: 0 PID: 956 at net/core/dev.c:3259 skb_warn_bad_offload+0x7c/0x108 +gemini-ethernet-port: caps=(0x0000010000154813, 0x00002007ffdd7889) + +And the packets do not go through. + +The TSO implementation is bogus: a TSO enabled driver must propagate +the skb_shinfo(skb)->gso_size value to the TSO engine on the NIC. + +Drop the size check and TSO offloading features for now: this +needs to be fixed up properly. + +After this ethernet works fine on Gemini devices with a direct connected +PHY such as D-Link DNS-313. + +Also tested to still be working with a DSA switch using the Gemini +ethernet as conduit interface. + +Link: https://lore.kernel.org/netdev/CANn89iJLfxng1sYL5Zk0mknXpyYQPCp83m3KgD2KJ2_hKCpEUg@mail.gmail.com/ +Suggested-by: Eric Dumazet +Fixes: d4d0c5b4d279 ("net: ethernet: cortina: Handle large frames") +Signed-off-by: Linus Walleij +Reviewed-by: Eric Dumazet +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/cortina/gemini.c | 15 ++------------- + 1 file changed, 2 insertions(+), 13 deletions(-) + +diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c +index 19fb8c4caab87..e454bbedf29ee 100644 +--- a/drivers/net/ethernet/cortina/gemini.c ++++ b/drivers/net/ethernet/cortina/gemini.c +@@ -79,8 +79,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT) + + #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \ +- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \ +- NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) ++ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM) + + /** + * struct gmac_queue_page - page buffer per-page info +@@ -1148,23 +1147,13 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, + struct gmac_txdesc *txd; + skb_frag_t *skb_frag; + dma_addr_t mapping; +- unsigned short mtu; + void *buffer; + int ret; + +- mtu = ETH_HLEN; +- mtu += netdev->mtu; +- if (skb->protocol == htons(ETH_P_8021Q)) +- mtu += VLAN_HLEN; +- ++ /* TODO: implement proper TSO using MTU in word3 */ + word1 = skb->len; + word3 = SOF_BIT; + +- if (word1 > mtu) { +- word1 |= TSS_MTU_ENABLE_BIT; +- word3 |= mtu; +- } +- + if (skb->len >= ETH_FRAME_LEN) { + /* Hardware offloaded checksumming isn't working on frames + * bigger than 1514 bytes. A hypothesis about this is that the +-- +2.43.0 + diff --git a/queue-6.1/perf-lock-dynamically-allocate-lockhash_table.patch b/queue-6.1/perf-lock-dynamically-allocate-lockhash_table.patch new file mode 100644 index 00000000000..96c1afe38fd --- /dev/null +++ b/queue-6.1/perf-lock-dynamically-allocate-lockhash_table.patch @@ -0,0 +1,119 @@ +From 986d49025563fed11796cfc8c60863c9a2a4b8c9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 26 May 2023 11:33:54 -0700 +Subject: perf lock: Dynamically allocate lockhash_table + +From: Ian Rogers + +[ Upstream commit eef4fee5e52071d563d9a851df1c09869215ee15 ] + +lockhash_table is 32,768 bytes in .bss, make it a memory allocation so +that the space is freed for non-lock perf commands. + +Signed-off-by: Ian Rogers +Link: https://lore.kernel.org/r/20230526183401.2326121-10-irogers@google.com +Cc: K Prateek Nayak +Cc: Ravi Bangoria +Cc: Mark Rutland +Cc: Ross Zwisler +Cc: Steven Rostedt (Google) +Cc: Sean Christopherson +Cc: Yang Jihong +Cc: Peter Zijlstra +Cc: Adrian Hunter +Cc: Arnaldo Carvalho de Melo +Cc: Jiri Olsa +Cc: Masami Hiramatsu (Google) +Cc: Namhyung Kim +Cc: Leo Yan +Cc: Andi Kleen +Cc: Alexander Shishkin +Cc: Kan Liang +Cc: Tiezhu Yang +Cc: Ingo Molnar +Cc: Paolo Bonzini +Cc: linux-kernel@vger.kernel.org +Cc: linux-perf-users@vger.kernel.org +Signed-off-by: Arnaldo Carvalho de Melo +Stable-dep-of: 1a5efc9e13f3 ("libsubcmd: Don't free the usage string") +Signed-off-by: Sasha Levin +--- + tools/perf/builtin-lock.c | 20 ++++++++++++++++---- + 1 file changed, 16 insertions(+), 4 deletions(-) + +diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c +index 470106643ed52..28fa76ecc0822 100644 +--- a/tools/perf/builtin-lock.c ++++ b/tools/perf/builtin-lock.c +@@ -45,7 +45,7 @@ static struct target target; + #define LOCKHASH_BITS 12 + #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS) + +-static struct hlist_head lockhash_table[LOCKHASH_SIZE]; ++static struct hlist_head *lockhash_table; + + #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS) + #define lockhashentry(key) (lockhash_table + __lockhashfn((key))) +@@ -1645,16 +1645,22 @@ static int __cmd_contention(int argc, const char **argv) + }; + struct lock_contention con = { + .target = &target, +- .result = &lockhash_table[0], + .map_nr_entries = bpf_map_entries, + .max_stack = max_stack_depth, + .stack_skip = stack_skip, + }; + ++ lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table)); ++ if (!lockhash_table) ++ return -ENOMEM; ++ ++ con.result = &lockhash_table[0]; ++ + session = perf_session__new(use_bpf ? NULL : &data, &eops); + if (IS_ERR(session)) { + pr_err("Initializing perf session failed\n"); +- return PTR_ERR(session); ++ err = PTR_ERR(session); ++ goto out_delete; + } + + con.machine = &session->machines.host; +@@ -1755,6 +1761,7 @@ static int __cmd_contention(int argc, const char **argv) + evlist__delete(con.evlist); + lock_contention_finish(); + perf_session__delete(session); ++ zfree(&lockhash_table); + return err; + } + +@@ -1946,6 +1953,10 @@ int cmd_lock(int argc, const char **argv) + unsigned int i; + int rc = 0; + ++ lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table)); ++ if (!lockhash_table) ++ return -ENOMEM; ++ + for (i = 0; i < LOCKHASH_SIZE; i++) + INIT_HLIST_HEAD(lockhash_table + i); + +@@ -1967,7 +1978,7 @@ int cmd_lock(int argc, const char **argv) + rc = __cmd_report(false); + } else if (!strcmp(argv[0], "script")) { + /* Aliased to 'perf script' */ +- return cmd_script(argc, argv); ++ rc = cmd_script(argc, argv); + } else if (!strcmp(argv[0], "info")) { + if (argc) { + argc = parse_options(argc, argv, +@@ -1996,5 +2007,6 @@ int cmd_lock(int argc, const char **argv) + usage_with_options(lock_usage, lock_options); + } + ++ zfree(&lockhash_table); + return rc; + } +-- +2.43.0 + diff --git a/queue-6.1/perf-sched-avoid-large-stack-allocations.patch b/queue-6.1/perf-sched-avoid-large-stack-allocations.patch new file mode 100644 index 00000000000..33f4f317268 --- /dev/null +++ b/queue-6.1/perf-sched-avoid-large-stack-allocations.patch @@ -0,0 +1,91 @@ +From fcd6a547184e367ea0f3fd334772e114fa38de3b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 26 May 2023 20:43:19 -0700 +Subject: perf sched: Avoid large stack allocations + +From: Ian Rogers + +[ Upstream commit 232418a0b2e8b8e72dac003b19352f1b647cdb31 ] + +Commit 5ded57ac1bdb ("perf inject: Remove static variables") moved +static variables to local, however, in this case 3 MAX_CPUS (4096) +sized arrays were moved onto the stack making the stack frame quite +large. Avoid the stack usage by dynamically allocating the arrays. + +Signed-off-by: Ian Rogers +Cc: Adrian Hunter +Cc: Alexander Shishkin +Cc: Ingo Molnar +Cc: Jiri Olsa +Cc: Mark Rutland +Cc: Namhyung Kim +Cc: Peter Zijlstra +Link: https://lore.kernel.org/r/20230527034324.2597593-2-irogers@google.com +Signed-off-by: Arnaldo Carvalho de Melo +Stable-dep-of: 1a5efc9e13f3 ("libsubcmd: Don't free the usage string") +Signed-off-by: Sasha Levin +--- + tools/perf/builtin-sched.c | 26 ++++++++++++++++++++++---- + 1 file changed, 22 insertions(+), 4 deletions(-) + +diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c +index d83a7569db0e2..3eff78e7b67a2 100644 +--- a/tools/perf/builtin-sched.c ++++ b/tools/perf/builtin-sched.c +@@ -193,8 +193,8 @@ struct perf_sched { + * weird events, such as a task being switched away that is not current. + */ + struct perf_cpu max_cpu; +- u32 curr_pid[MAX_CPUS]; +- struct thread *curr_thread[MAX_CPUS]; ++ u32 *curr_pid; ++ struct thread **curr_thread; + char next_shortname1; + char next_shortname2; + unsigned int replay_repeat; +@@ -224,7 +224,7 @@ struct perf_sched { + u64 run_avg; + u64 all_runtime; + u64 all_count; +- u64 cpu_last_switched[MAX_CPUS]; ++ u64 *cpu_last_switched; + struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root; + struct list_head sort_list, cmp_pid; + bool force; +@@ -3590,7 +3590,22 @@ int cmd_sched(int argc, const char **argv) + + mutex_init(&sched.start_work_mutex); + mutex_init(&sched.work_done_wait_mutex); +- for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++) ++ sched.curr_thread = calloc(MAX_CPUS, sizeof(*sched.curr_thread)); ++ if (!sched.curr_thread) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ sched.cpu_last_switched = calloc(MAX_CPUS, sizeof(*sched.cpu_last_switched)); ++ if (!sched.cpu_last_switched) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ sched.curr_pid = malloc(MAX_CPUS * sizeof(*sched.curr_pid)); ++ if (!sched.curr_pid) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ for (i = 0; i < MAX_CPUS; i++) + sched.curr_pid[i] = -1; + + argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, +@@ -3659,6 +3674,9 @@ int cmd_sched(int argc, const char **argv) + } + + out: ++ free(sched.curr_pid); ++ free(sched.cpu_last_switched); ++ free(sched.curr_thread); + mutex_destroy(&sched.start_work_mutex); + mutex_destroy(&sched.work_done_wait_mutex); + +-- +2.43.0 + diff --git a/queue-6.1/perf-sched-fix-memory-leak-in-perf_sched__map.patch b/queue-6.1/perf-sched-fix-memory-leak-in-perf_sched__map.patch new file mode 100644 index 00000000000..867bd7e97c5 --- /dev/null +++ b/queue-6.1/perf-sched-fix-memory-leak-in-perf_sched__map.patch @@ -0,0 +1,101 @@ +From 4596ef986353880163f8eb76177f9687df24addb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Feb 2024 08:32:25 +0000 +Subject: perf sched: Fix memory leak in perf_sched__map() + +From: Yang Jihong + +[ Upstream commit ef76a5af819743d405674f6de5d0e63320ac653e ] + +perf_sched__map() needs to free memory of map_cpus, color_pids and +color_cpus in normal path and rollback allocated memory in error path. + +Signed-off-by: Yang Jihong +Signed-off-by: Namhyung Kim +Link: https://lore.kernel.org/r/20240206083228.172607-3-yangjihong1@huawei.com +Stable-dep-of: 1a5efc9e13f3 ("libsubcmd: Don't free the usage string") +Signed-off-by: Sasha Levin +--- + tools/perf/builtin-sched.c | 41 ++++++++++++++++++++++++-------------- + 1 file changed, 26 insertions(+), 15 deletions(-) + +diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c +index 2d595dde2d121..8abd48a99ec5e 100644 +--- a/tools/perf/builtin-sched.c ++++ b/tools/perf/builtin-sched.c +@@ -3240,8 +3240,6 @@ static int perf_sched__lat(struct perf_sched *sched) + + static int setup_map_cpus(struct perf_sched *sched) + { +- struct perf_cpu_map *map; +- + sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF); + + if (sched->map.comp) { +@@ -3250,16 +3248,15 @@ static int setup_map_cpus(struct perf_sched *sched) + return -1; + } + +- if (!sched->map.cpus_str) +- return 0; +- +- map = perf_cpu_map__new(sched->map.cpus_str); +- if (!map) { +- pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); +- return -1; ++ if (sched->map.cpus_str) { ++ sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str); ++ if (!sched->map.cpus) { ++ pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); ++ zfree(&sched->map.comp_cpus); ++ return -1; ++ } + } + +- sched->map.cpus = map; + return 0; + } + +@@ -3299,20 +3296,34 @@ static int setup_color_cpus(struct perf_sched *sched) + + static int perf_sched__map(struct perf_sched *sched) + { ++ int rc = -1; ++ + if (setup_map_cpus(sched)) +- return -1; ++ return rc; + + if (setup_color_pids(sched)) +- return -1; ++ goto out_put_map_cpus; + + if (setup_color_cpus(sched)) +- return -1; ++ goto out_put_color_pids; + + setup_pager(); + if (perf_sched__read_events(sched)) +- return -1; ++ goto out_put_color_cpus; ++ ++ rc = 0; + print_bad_events(sched); +- return 0; ++ ++out_put_color_cpus: ++ perf_cpu_map__put(sched->map.color_cpus); ++ ++out_put_color_pids: ++ perf_thread_map__put(sched->map.color_pids); ++ ++out_put_map_cpus: ++ zfree(&sched->map.comp_cpus); ++ perf_cpu_map__put(sched->map.cpus); ++ return rc; + } + + static int perf_sched__replay(struct perf_sched *sched) +-- +2.43.0 + diff --git a/queue-6.1/perf-sched-move-curr_pid-and-cpu_last_switched-initi.patch b/queue-6.1/perf-sched-move-curr_pid-and-cpu_last_switched-initi.patch new file mode 100644 index 00000000000..0d61c55a44e --- /dev/null +++ b/queue-6.1/perf-sched-move-curr_pid-and-cpu_last_switched-initi.patch @@ -0,0 +1,309 @@ +From fb1d985c68faebf018783218c533ab2fd4cd802c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Feb 2024 08:32:27 +0000 +Subject: perf sched: Move curr_pid and cpu_last_switched initialization to + perf_sched__{lat|map|replay}() + +From: Yang Jihong + +[ Upstream commit bd2cdf26b9ea000339d54adc82e87fdbf22c21c3 ] + +The curr_pid and cpu_last_switched are used only for the +'perf sched replay/latency/map'. Put their initialization in +perf_sched__{lat|map|replay () to reduce unnecessary actions in other +commands. + +Simple functional testing: + + # perf sched record perf bench sched messaging + # Running 'sched/messaging' benchmark: + # 20 sender and receiver processes per group + # 10 groups == 400 processes run + + Total time: 0.209 [sec] + [ perf record: Woken up 1 times to write data ] + [ perf record: Captured and wrote 16.456 MB perf.data (147907 samples) ] + + # perf sched lat + + ------------------------------------------------------------------------------------------------------------------------------------------- + Task | Runtime ms | Switches | Avg delay ms | Max delay ms | Max delay start | Max delay end | + ------------------------------------------------------------------------------------------------------------------------------------------- + sched-messaging:(401) | 2990.699 ms | 38705 | avg: 0.661 ms | max: 67.046 ms | max start: 456532.624830 s | max end: 456532.691876 s + qemu-system-x86:(7) | 179.764 ms | 2191 | avg: 0.152 ms | max: 21.857 ms | max start: 456532.576434 s | max end: 456532.598291 s + sshd:48125 | 0.522 ms | 2 | avg: 0.037 ms | max: 0.046 ms | max start: 456532.514610 s | max end: 456532.514656 s + + ksoftirqd/11:82 | 0.063 ms | 1 | avg: 0.005 ms | max: 0.005 ms | max start: 456532.769366 s | max end: 456532.769371 s + kworker/9:0-mm_:34624 | 0.233 ms | 20 | avg: 0.004 ms | max: 0.007 ms | max start: 456532.690804 s | max end: 456532.690812 s + migration/13:93 | 0.000 ms | 1 | avg: 0.004 ms | max: 0.004 ms | max start: 456532.512669 s | max end: 456532.512674 s + ----------------------------------------------------------------------------------------------------------------- + TOTAL: | 3180.750 ms | 41368 | + --------------------------------------------------- + + # echo $? + 0 + + # perf sched map + *A0 456532.510141 secs A0 => migration/0:15 + *. 456532.510171 secs . => swapper:0 + . *B0 456532.510261 secs B0 => migration/1:21 + . *. 456532.510279 secs + + L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 *L7 . . . . 456532.785979 secs + L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 *L7 . . . 456532.786054 secs + L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 *L7 . . 456532.786127 secs + L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 *L7 . 456532.786197 secs + L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 L7 *L7 456532.786270 secs + # echo $? + 0 + + # perf sched replay + run measurement overhead: 108 nsecs + sleep measurement overhead: 66473 nsecs + the run test took 1000002 nsecs + the sleep test took 1082686 nsecs + nr_run_events: 49334 + nr_sleep_events: 50054 + nr_wakeup_events: 34701 + target-less wakeups: 165 + multi-target wakeups: 766 + task 0 ( swapper: 0), nr_events: 15419 + task 1 ( swapper: 1), nr_events: 1 + task 2 ( swapper: 2), nr_events: 1 + + task 715 ( sched-messaging: 110248), nr_events: 1438 + task 716 ( sched-messaging: 110249), nr_events: 512 + task 717 ( sched-messaging: 110250), nr_events: 500 + task 718 ( sched-messaging: 110251), nr_events: 537 + task 719 ( sched-messaging: 110252), nr_events: 823 + ------------------------------------------------------------ + #1 : 1325.288, ravg: 1325.29, cpu: 7823.35 / 7823.35 + #2 : 1363.606, ravg: 1329.12, cpu: 7655.53 / 7806.56 + #3 : 1349.494, ravg: 1331.16, cpu: 7544.80 / 7780.39 + #4 : 1311.488, ravg: 1329.19, cpu: 7495.13 / 7751.86 + #5 : 1309.902, ravg: 1327.26, cpu: 7266.65 / 7703.34 + #6 : 1309.535, ravg: 1325.49, cpu: 7843.86 / 7717.39 + #7 : 1316.482, ravg: 1324.59, cpu: 7854.41 / 7731.09 + #8 : 1366.604, ravg: 1328.79, cpu: 7955.81 / 7753.57 + #9 : 1326.286, ravg: 1328.54, cpu: 7466.86 / 7724.90 + #10 : 1356.653, ravg: 1331.35, cpu: 7566.60 / 7709.07 + # echo $? + 0 + +Signed-off-by: Yang Jihong +Signed-off-by: Namhyung Kim +Link: https://lore.kernel.org/r/20240206083228.172607-5-yangjihong1@huawei.com +Stable-dep-of: 1a5efc9e13f3 ("libsubcmd: Don't free the usage string") +Signed-off-by: Sasha Levin +--- + tools/perf/builtin-sched.c | 94 +++++++++++++++++++++++++------------- + 1 file changed, 61 insertions(+), 33 deletions(-) + +diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c +index 0cdb220229d39..b70bc7f3ad5b3 100644 +--- a/tools/perf/builtin-sched.c ++++ b/tools/perf/builtin-sched.c +@@ -3199,14 +3199,44 @@ static void perf_sched__merge_lat(struct perf_sched *sched) + } + } + ++static int setup_cpus_switch_event(struct perf_sched *sched) ++{ ++ unsigned int i; ++ ++ sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched))); ++ if (!sched->cpu_last_switched) ++ return -1; ++ ++ sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid))); ++ if (!sched->curr_pid) { ++ zfree(&sched->cpu_last_switched); ++ return -1; ++ } ++ ++ for (i = 0; i < MAX_CPUS; i++) ++ sched->curr_pid[i] = -1; ++ ++ return 0; ++} ++ ++static void free_cpus_switch_event(struct perf_sched *sched) ++{ ++ zfree(&sched->curr_pid); ++ zfree(&sched->cpu_last_switched); ++} ++ + static int perf_sched__lat(struct perf_sched *sched) + { ++ int rc = -1; + struct rb_node *next; + + setup_pager(); + ++ if (setup_cpus_switch_event(sched)) ++ return rc; ++ + if (perf_sched__read_events(sched)) +- return -1; ++ goto out_free_cpus_switch_event; + + perf_sched__merge_lat(sched); + perf_sched__sort_lat(sched); +@@ -3235,7 +3265,11 @@ static int perf_sched__lat(struct perf_sched *sched) + print_bad_events(sched); + printf("\n"); + +- return 0; ++ rc = 0; ++ ++out_free_cpus_switch_event: ++ free_cpus_switch_event(sched); ++ return rc; + } + + static int setup_map_cpus(struct perf_sched *sched) +@@ -3302,9 +3336,12 @@ static int perf_sched__map(struct perf_sched *sched) + if (!sched->curr_thread) + return rc; + +- if (setup_map_cpus(sched)) ++ if (setup_cpus_switch_event(sched)) + goto out_free_curr_thread; + ++ if (setup_map_cpus(sched)) ++ goto out_free_cpus_switch_event; ++ + if (setup_color_pids(sched)) + goto out_put_map_cpus; + +@@ -3328,6 +3365,9 @@ static int perf_sched__map(struct perf_sched *sched) + zfree(&sched->map.comp_cpus); + perf_cpu_map__put(sched->map.cpus); + ++out_free_cpus_switch_event: ++ free_cpus_switch_event(sched); ++ + out_free_curr_thread: + zfree(&sched->curr_thread); + return rc; +@@ -3341,6 +3381,10 @@ static int perf_sched__replay(struct perf_sched *sched) + mutex_init(&sched->start_work_mutex); + mutex_init(&sched->work_done_wait_mutex); + ++ ret = setup_cpus_switch_event(sched); ++ if (ret) ++ goto out_mutex_destroy; ++ + calibrate_run_measurement_overhead(sched); + calibrate_sleep_measurement_overhead(sched); + +@@ -3348,7 +3392,7 @@ static int perf_sched__replay(struct perf_sched *sched) + + ret = perf_sched__read_events(sched); + if (ret) +- goto out_mutex_destroy; ++ goto out_free_cpus_switch_event; + + printf("nr_run_events: %ld\n", sched->nr_run_events); + printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); +@@ -3374,6 +3418,9 @@ static int perf_sched__replay(struct perf_sched *sched) + sched->thread_funcs_exit = true; + destroy_tasks(sched); + ++out_free_cpus_switch_event: ++ free_cpus_switch_event(sched); ++ + out_mutex_destroy: + mutex_destroy(&sched->start_work_mutex); + mutex_destroy(&sched->work_done_wait_mutex); +@@ -3612,21 +3659,7 @@ int cmd_sched(int argc, const char **argv) + .switch_event = replay_switch_event, + .fork_event = replay_fork_event, + }; +- unsigned int i; +- int ret = 0; +- +- sched.cpu_last_switched = calloc(MAX_CPUS, sizeof(*sched.cpu_last_switched)); +- if (!sched.cpu_last_switched) { +- ret = -ENOMEM; +- goto out; +- } +- sched.curr_pid = malloc(MAX_CPUS * sizeof(*sched.curr_pid)); +- if (!sched.curr_pid) { +- ret = -ENOMEM; +- goto out; +- } +- for (i = 0; i < MAX_CPUS; i++) +- sched.curr_pid[i] = -1; ++ int ret; + + argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, + sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); +@@ -3637,9 +3670,9 @@ int cmd_sched(int argc, const char **argv) + * Aliased to 'perf script' for now: + */ + if (!strcmp(argv[0], "script")) { +- ret = cmd_script(argc, argv); ++ return cmd_script(argc, argv); + } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) { +- ret = __cmd_record(argc, argv); ++ return __cmd_record(argc, argv); + } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) { + sched.tp_handler = &lat_ops; + if (argc > 1) { +@@ -3648,7 +3681,7 @@ int cmd_sched(int argc, const char **argv) + usage_with_options(latency_usage, latency_options); + } + setup_sorting(&sched, latency_options, latency_usage); +- ret = perf_sched__lat(&sched); ++ return perf_sched__lat(&sched); + } else if (!strcmp(argv[0], "map")) { + if (argc) { + argc = parse_options(argc, argv, map_options, map_usage, 0); +@@ -3657,7 +3690,7 @@ int cmd_sched(int argc, const char **argv) + } + sched.tp_handler = &map_ops; + setup_sorting(&sched, latency_options, latency_usage); +- ret = perf_sched__map(&sched); ++ return perf_sched__map(&sched); + } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) { + sched.tp_handler = &replay_ops; + if (argc) { +@@ -3665,7 +3698,7 @@ int cmd_sched(int argc, const char **argv) + if (argc) + usage_with_options(replay_usage, replay_options); + } +- ret = perf_sched__replay(&sched); ++ return perf_sched__replay(&sched); + } else if (!strcmp(argv[0], "timehist")) { + if (argc) { + argc = parse_options(argc, argv, timehist_options, +@@ -3681,21 +3714,16 @@ int cmd_sched(int argc, const char **argv) + parse_options_usage(NULL, timehist_options, "w", true); + if (sched.show_next) + parse_options_usage(NULL, timehist_options, "n", true); +- ret = -EINVAL; +- goto out; ++ return -EINVAL; + } + ret = symbol__validate_sym_arguments(); + if (ret) +- goto out; ++ return ret; + +- ret = perf_sched__timehist(&sched); ++ return perf_sched__timehist(&sched); + } else { + usage_with_options(sched_usage, sched_options); + } + +-out: +- free(sched.curr_pid); +- free(sched.cpu_last_switched); +- +- return ret; ++ return 0; + } +-- +2.43.0 + diff --git a/queue-6.1/perf-sched-move-curr_thread-initialization-to-perf_s.patch b/queue-6.1/perf-sched-move-curr_thread-initialization-to-perf_s.patch new file mode 100644 index 00000000000..2d92afbc24e --- /dev/null +++ b/queue-6.1/perf-sched-move-curr_thread-initialization-to-perf_s.patch @@ -0,0 +1,111 @@ +From ca276cac1ced64c9d792fcfa4dbf81552b1d8a3d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Feb 2024 08:32:26 +0000 +Subject: perf sched: Move curr_thread initialization to perf_sched__map() + +From: Yang Jihong + +[ Upstream commit 5e895278697c014e95ae7ae5e79a72ef68c5184e ] + +The curr_thread is used only for the 'perf sched map'. Put initialization +in perf_sched__map() to reduce unnecessary actions in other commands. + +Simple functional testing: + + # perf sched record perf bench sched messaging + # Running 'sched/messaging' benchmark: + # 20 sender and receiver processes per group + # 10 groups == 400 processes run + + Total time: 0.197 [sec] + [ perf record: Woken up 1 times to write data ] + [ perf record: Captured and wrote 15.526 MB perf.data (140095 samples) ] + + # perf sched map + *A0 451264.532445 secs A0 => migration/0:15 + *. 451264.532468 secs . => swapper:0 + . *B0 451264.532537 secs B0 => migration/1:21 + . *. 451264.532560 secs + . . *C0 451264.532644 secs C0 => migration/2:27 + . . *. 451264.532668 secs + . . . *D0 451264.532753 secs D0 => migration/3:33 + . . . *. 451264.532778 secs + . . . . *E0 451264.532861 secs E0 => migration/4:39 + . . . . *. 451264.532886 secs + . . . . . *F0 451264.532973 secs F0 => migration/5:45 + + A7 A7 A7 A7 A7 *A7 . . . . . . . . . . 451264.790785 secs + A7 A7 A7 A7 A7 A7 *A7 . . . . . . . . . 451264.790858 secs + A7 A7 A7 A7 A7 A7 A7 *A7 . . . . . . . . 451264.790934 secs + A7 A7 A7 A7 A7 A7 A7 A7 *A7 . . . . . . . 451264.791004 secs + A7 A7 A7 A7 A7 A7 A7 A7 A7 *A7 . . . . . . 451264.791075 secs + A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 *A7 . . . . . 451264.791143 secs + A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 *A7 . . . . 451264.791232 secs + A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 *A7 . . . 451264.791336 secs + A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 *A7 . . 451264.791407 secs + A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 *A7 . 451264.791484 secs + A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 A7 *A7 451264.791553 secs + # echo $? + 0 + +Signed-off-by: Yang Jihong +Signed-off-by: Namhyung Kim +Link: https://lore.kernel.org/r/20240206083228.172607-4-yangjihong1@huawei.com +Stable-dep-of: 1a5efc9e13f3 ("libsubcmd: Don't free the usage string") +Signed-off-by: Sasha Levin +--- + tools/perf/builtin-sched.c | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c +index 8abd48a99ec5e..0cdb220229d39 100644 +--- a/tools/perf/builtin-sched.c ++++ b/tools/perf/builtin-sched.c +@@ -3298,9 +3298,13 @@ static int perf_sched__map(struct perf_sched *sched) + { + int rc = -1; + +- if (setup_map_cpus(sched)) ++ sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread))); ++ if (!sched->curr_thread) + return rc; + ++ if (setup_map_cpus(sched)) ++ goto out_free_curr_thread; ++ + if (setup_color_pids(sched)) + goto out_put_map_cpus; + +@@ -3323,6 +3327,9 @@ static int perf_sched__map(struct perf_sched *sched) + out_put_map_cpus: + zfree(&sched->map.comp_cpus); + perf_cpu_map__put(sched->map.cpus); ++ ++out_free_curr_thread: ++ zfree(&sched->curr_thread); + return rc; + } + +@@ -3608,11 +3615,6 @@ int cmd_sched(int argc, const char **argv) + unsigned int i; + int ret = 0; + +- sched.curr_thread = calloc(MAX_CPUS, sizeof(*sched.curr_thread)); +- if (!sched.curr_thread) { +- ret = -ENOMEM; +- goto out; +- } + sched.cpu_last_switched = calloc(MAX_CPUS, sizeof(*sched.cpu_last_switched)); + if (!sched.cpu_last_switched) { + ret = -ENOMEM; +@@ -3694,7 +3696,6 @@ int cmd_sched(int argc, const char **argv) + out: + free(sched.curr_pid); + free(sched.cpu_last_switched); +- free(sched.curr_thread); + + return ret; + } +-- +2.43.0 + diff --git a/queue-6.1/perf-sched-move-start_work_mutex-and-work_done_wait_.patch b/queue-6.1/perf-sched-move-start_work_mutex-and-work_done_wait_.patch new file mode 100644 index 00000000000..9696d827edb --- /dev/null +++ b/queue-6.1/perf-sched-move-start_work_mutex-and-work_done_wait_.patch @@ -0,0 +1,130 @@ +From aa8d3b94da47b3df71ce9be9a2d501f8b75a1d4d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Feb 2024 08:32:24 +0000 +Subject: perf sched: Move start_work_mutex and work_done_wait_mutex + initialization to perf_sched__replay() + +From: Yang Jihong + +[ Upstream commit c6907863519cf97ee09653cc8ec338a2328c2b6f ] + +The start_work_mutex and work_done_wait_mutex are used only for the +'perf sched replay'. Put their initialization in perf_sched__replay () to +reduce unnecessary actions in other commands. + +Simple functional testing: + + # perf sched record perf bench sched messaging + # Running 'sched/messaging' benchmark: + # 20 sender and receiver processes per group + # 10 groups == 400 processes run + + Total time: 0.197 [sec] + [ perf record: Woken up 1 times to write data ] + [ perf record: Captured and wrote 14.952 MB perf.data (134165 samples) ] + + # perf sched replay + run measurement overhead: 108 nsecs + sleep measurement overhead: 65658 nsecs + the run test took 999991 nsecs + the sleep test took 1079324 nsecs + nr_run_events: 42378 + nr_sleep_events: 43102 + nr_wakeup_events: 31852 + target-less wakeups: 17 + multi-target wakeups: 712 + task 0 ( swapper: 0), nr_events: 10451 + task 1 ( swapper: 1), nr_events: 3 + task 2 ( swapper: 2), nr_events: 1 + + task 717 ( sched-messaging: 74483), nr_events: 152 + task 718 ( sched-messaging: 74484), nr_events: 1944 + task 719 ( sched-messaging: 74485), nr_events: 73 + task 720 ( sched-messaging: 74486), nr_events: 163 + task 721 ( sched-messaging: 74487), nr_events: 942 + task 722 ( sched-messaging: 74488), nr_events: 78 + task 723 ( sched-messaging: 74489), nr_events: 1090 + ------------------------------------------------------------ + #1 : 1366.507, ravg: 1366.51, cpu: 7682.70 / 7682.70 + #2 : 1410.072, ravg: 1370.86, cpu: 7723.88 / 7686.82 + #3 : 1396.296, ravg: 1373.41, cpu: 7568.20 / 7674.96 + #4 : 1381.019, ravg: 1374.17, cpu: 7531.81 / 7660.64 + #5 : 1393.826, ravg: 1376.13, cpu: 7725.25 / 7667.11 + #6 : 1401.581, ravg: 1378.68, cpu: 7594.82 / 7659.88 + #7 : 1381.337, ravg: 1378.94, cpu: 7371.22 / 7631.01 + #8 : 1373.842, ravg: 1378.43, cpu: 7894.92 / 7657.40 + #9 : 1364.697, ravg: 1377.06, cpu: 7324.91 / 7624.15 + #10 : 1363.613, ravg: 1375.72, cpu: 7209.55 / 7582.69 + # echo $? + 0 + +Signed-off-by: Yang Jihong +Signed-off-by: Namhyung Kim +Link: https://lore.kernel.org/r/20240206083228.172607-2-yangjihong1@huawei.com +Stable-dep-of: 1a5efc9e13f3 ("libsubcmd: Don't free the usage string") +Signed-off-by: Sasha Levin +--- + tools/perf/builtin-sched.c | 19 ++++++++++++------- + 1 file changed, 12 insertions(+), 7 deletions(-) + +diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c +index 3eff78e7b67a2..2d595dde2d121 100644 +--- a/tools/perf/builtin-sched.c ++++ b/tools/perf/builtin-sched.c +@@ -3317,15 +3317,20 @@ static int perf_sched__map(struct perf_sched *sched) + + static int perf_sched__replay(struct perf_sched *sched) + { ++ int ret; + unsigned long i; + ++ mutex_init(&sched->start_work_mutex); ++ mutex_init(&sched->work_done_wait_mutex); ++ + calibrate_run_measurement_overhead(sched); + calibrate_sleep_measurement_overhead(sched); + + test_calibrations(sched); + +- if (perf_sched__read_events(sched)) +- return -1; ++ ret = perf_sched__read_events(sched); ++ if (ret) ++ goto out_mutex_destroy; + + printf("nr_run_events: %ld\n", sched->nr_run_events); + printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); +@@ -3350,7 +3355,11 @@ static int perf_sched__replay(struct perf_sched *sched) + + sched->thread_funcs_exit = true; + destroy_tasks(sched); +- return 0; ++ ++out_mutex_destroy: ++ mutex_destroy(&sched->start_work_mutex); ++ mutex_destroy(&sched->work_done_wait_mutex); ++ return ret; + } + + static void setup_sorting(struct perf_sched *sched, const struct option *options, +@@ -3588,8 +3597,6 @@ int cmd_sched(int argc, const char **argv) + unsigned int i; + int ret = 0; + +- mutex_init(&sched.start_work_mutex); +- mutex_init(&sched.work_done_wait_mutex); + sched.curr_thread = calloc(MAX_CPUS, sizeof(*sched.curr_thread)); + if (!sched.curr_thread) { + ret = -ENOMEM; +@@ -3677,8 +3684,6 @@ int cmd_sched(int argc, const char **argv) + free(sched.curr_pid); + free(sched.cpu_last_switched); + free(sched.curr_thread); +- mutex_destroy(&sched.start_work_mutex); +- mutex_destroy(&sched.work_done_wait_mutex); + + return ret; + } +-- +2.43.0 + diff --git a/queue-6.1/selftests-net-remove-executable-bits-from-library-sc.patch b/queue-6.1/selftests-net-remove-executable-bits-from-library-sc.patch new file mode 100644 index 00000000000..5c6622f2320 --- /dev/null +++ b/queue-6.1/selftests-net-remove-executable-bits-from-library-sc.patch @@ -0,0 +1,39 @@ +From 76dac3ebdf4dd49215574466fd62770c8876b34a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 31 Jan 2024 09:08:46 -0500 +Subject: selftests: net: Remove executable bits from library scripts + +From: Benjamin Poirier + +[ Upstream commit 9d851dd4dab63e95c1911a2fa847796d1ec5d58d ] + +setup_loopback.sh and net_helper.sh are meant to be sourced from other +scripts, not executed directly. Therefore, remove the executable bits from +those files' permissions. + +This change is similar to commit 49078c1b80b6 ("selftests: forwarding: +Remove executable bits from lib.sh") + +Fixes: 7d1575014a63 ("selftests/net: GRO coalesce test") +Fixes: 3bdd9fd29cb0 ("selftests/net: synchronize udpgro tests' tx and rx connection") +Suggested-by: Paolo Abeni +Signed-off-by: Benjamin Poirier +Link: https://lore.kernel.org/r/20240131140848.360618-4-bpoirier@nvidia.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + tools/testing/selftests/net/net_helper.sh | 0 + tools/testing/selftests/net/setup_loopback.sh | 0 + 2 files changed, 0 insertions(+), 0 deletions(-) + mode change 100755 => 100644 tools/testing/selftests/net/net_helper.sh + mode change 100755 => 100644 tools/testing/selftests/net/setup_loopback.sh + +diff --git a/tools/testing/selftests/net/net_helper.sh b/tools/testing/selftests/net/net_helper.sh +old mode 100755 +new mode 100644 +diff --git a/tools/testing/selftests/net/setup_loopback.sh b/tools/testing/selftests/net/setup_loopback.sh +old mode 100755 +new mode 100644 +-- +2.43.0 + diff --git a/queue-6.1/series b/queue-6.1/series index 19dec4ab650..f5cb2e407c6 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -662,3 +662,25 @@ fsdax-dax_unshare_iter-should-return-a-valid-length.patch clk-imx6ul-fix-failed-to-get-parent-error.patch fsdax-unshare-zero-destination-if-srcmap-is-hole-or-unwritten.patch unicode-don-t-special-case-ignorable-code-points.patch +net-ethernet-cortina-drop-tso-support.patch +tracing-remove-precision-vsnprintf-check-from-print-.patch +alsa-hda-realtek-cs35l41-fix-order-and-duplicates-in.patch +alsa-hda-realtek-cs35l41-fix-device-id-model-name.patch +drm-crtc-fix-uninitialized-variable-use-even-harder.patch +tracing-have-saved_cmdlines-arrays-all-in-one-alloca.patch +bootconfig-fix-the-kerneldoc-of-_xbc_exit.patch +perf-lock-dynamically-allocate-lockhash_table.patch +perf-sched-avoid-large-stack-allocations.patch +perf-sched-move-start_work_mutex-and-work_done_wait_.patch +perf-sched-fix-memory-leak-in-perf_sched__map.patch +perf-sched-move-curr_thread-initialization-to-perf_s.patch +perf-sched-move-curr_pid-and-cpu_last_switched-initi.patch +libsubcmd-don-t-free-the-usage-string.patch +selftests-net-remove-executable-bits-from-library-sc.patch +cpufreq-amd-pstate-ut-convert-nominal_freq-to-khz-du.patch +bluetooth-fix-usage-of-__hci_cmd_sync_status.patch +fs-ntfs3-do-not-call-file_modified-if-collapse-range.patch +fs-ntfs3-fix-sparse-warning-in-ni_fiemap.patch +fs-ntfs3-refactor-enum_rstbl-to-suppress-static-chec.patch +virtio_console-fix-misc-probe-bugs.patch +input-synaptics-rmi4-fix-uaf-of-irq-domain-on-driver.patch diff --git a/queue-6.1/tracing-have-saved_cmdlines-arrays-all-in-one-alloca.patch b/queue-6.1/tracing-have-saved_cmdlines-arrays-all-in-one-alloca.patch new file mode 100644 index 00000000000..712fdfbaa1c --- /dev/null +++ b/queue-6.1/tracing-have-saved_cmdlines-arrays-all-in-one-alloca.patch @@ -0,0 +1,104 @@ +From 6a44933800f992ea15698036a559aae660bdb501 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 20 Feb 2024 09:06:14 -0500 +Subject: tracing: Have saved_cmdlines arrays all in one allocation + +From: Steven Rostedt (Google) + +[ Upstream commit 0b18c852cc6fb8284ac0ab97e3e840974a6a8a64 ] + +The saved_cmdlines have three arrays for mapping PIDs to COMMs: + + - map_pid_to_cmdline[] + - map_cmdline_to_pid[] + - saved_cmdlines + +The map_pid_to_cmdline[] is PID_MAX_DEFAULT in size and holds the index +into the other arrays. The map_cmdline_to_pid[] is a mapping back to the +full pid as it can be larger than PID_MAX_DEFAULT. And the +saved_cmdlines[] just holds the COMMs associated to the pids. + +Currently the map_pid_to_cmdline[] and saved_cmdlines[] are allocated +together (in reality the saved_cmdlines is just in the memory of the +rounding of the allocation of the structure as it is always allocated in +powers of two). The map_cmdline_to_pid[] array is allocated separately. + +Since the rounding to a power of two is rather large (it allows for 8000 +elements in saved_cmdlines), also include the map_cmdline_to_pid[] array. +(This drops it to 6000 by default, which is still plenty for most use +cases). This saves even more memory as the map_cmdline_to_pid[] array +doesn't need to be allocated. + +Link: https://lore.kernel.org/linux-trace-kernel/20240212174011.068211d9@gandalf.local.home/ +Link: https://lore.kernel.org/linux-trace-kernel/20240220140703.182330529@goodmis.org + +Cc: Mark Rutland +Cc: Mathieu Desnoyers +Cc: Andrew Morton +Cc: Tim Chen +Cc: Vincent Donnefort +Cc: Sven Schnelle +Cc: Mete Durlu +Fixes: 44dc5c41b5b1 ("tracing: Fix wasted memory in saved_cmdlines logic") +Acked-by: Masami Hiramatsu (Google) +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Sasha Levin +--- + kernel/trace/trace.c | 18 ++++++++---------- + 1 file changed, 8 insertions(+), 10 deletions(-) + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index c9b52e920b8f3..96749a6cf1117 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -2254,6 +2254,10 @@ struct saved_cmdlines_buffer { + }; + static struct saved_cmdlines_buffer *savedcmd; + ++/* Holds the size of a cmdline and pid element */ ++#define SAVED_CMDLINE_MAP_ELEMENT_SIZE(s) \ ++ (TASK_COMM_LEN + sizeof((s)->map_cmdline_to_pid[0])) ++ + static inline char *get_saved_cmdlines(int idx) + { + return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; +@@ -2268,7 +2272,6 @@ static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) + { + int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN); + +- kfree(s->map_cmdline_to_pid); + kmemleak_free(s); + free_pages((unsigned long)s, order); + } +@@ -2281,7 +2284,7 @@ static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val) + int order; + + /* Figure out how much is needed to hold the given number of cmdlines */ +- orig_size = sizeof(*s) + val * TASK_COMM_LEN; ++ orig_size = sizeof(*s) + val * SAVED_CMDLINE_MAP_ELEMENT_SIZE(s); + order = get_order(orig_size); + size = 1 << (order + PAGE_SHIFT); + page = alloc_pages(GFP_KERNEL, order); +@@ -2293,16 +2296,11 @@ static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val) + memset(s, 0, sizeof(*s)); + + /* Round up to actual allocation */ +- val = (size - sizeof(*s)) / TASK_COMM_LEN; ++ val = (size - sizeof(*s)) / SAVED_CMDLINE_MAP_ELEMENT_SIZE(s); + s->cmdline_num = val; + +- s->map_cmdline_to_pid = kmalloc_array(val, +- sizeof(*s->map_cmdline_to_pid), +- GFP_KERNEL); +- if (!s->map_cmdline_to_pid) { +- free_saved_cmdlines_buffer(s); +- return NULL; +- } ++ /* Place map_cmdline_to_pid array right after saved_cmdlines */ ++ s->map_cmdline_to_pid = (unsigned *)&s->saved_cmdlines[val * TASK_COMM_LEN]; + + s->cmdline_idx = 0; + memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, +-- +2.43.0 + diff --git a/queue-6.1/tracing-remove-precision-vsnprintf-check-from-print-.patch b/queue-6.1/tracing-remove-precision-vsnprintf-check-from-print-.patch new file mode 100644 index 00000000000..4ea848b4c0f --- /dev/null +++ b/queue-6.1/tracing-remove-precision-vsnprintf-check-from-print-.patch @@ -0,0 +1,69 @@ +From e3b8c436a1a1f4e47e8580da06be7c4f1654b05c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 4 Mar 2024 17:43:41 -0500 +Subject: tracing: Remove precision vsnprintf() check from print event + +From: Steven Rostedt (Google) + +[ Upstream commit 5efd3e2aef91d2d812290dcb25b2058e6f3f532c ] + +This reverts 60be76eeabb3d ("tracing: Add size check when printing +trace_marker output"). The only reason the precision check was added +was because of a bug that miscalculated the write size of the string into +the ring buffer and it truncated it removing the terminating nul byte. On +reading the trace it crashed the kernel. But this was due to the bug in +the code that happened during development and should never happen in +practice. If anything, the precision can hide bugs where the string in the +ring buffer isn't nul terminated and it will not be checked. + +Link: https://lore.kernel.org/all/C7E7AF1A-D30F-4D18-B8E5-AF1EF58004F5@linux.ibm.com/ +Link: https://lore.kernel.org/linux-trace-kernel/20240227125706.04279ac2@gandalf.local.home +Link: https://lore.kernel.org/all/20240302111244.3a1674be@gandalf.local.home/ +Link: https://lore.kernel.org/linux-trace-kernel/20240304174341.2a561d9f@gandalf.local.home + +Cc: Masami Hiramatsu +Cc: Linus Torvalds +Fixes: 60be76eeabb3d ("tracing: Add size check when printing trace_marker output") +Reported-by: Sachin Sant +Tested-by: Sachin Sant +Reviewed-by: Mathieu Desnoyers +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Sasha Levin +--- + kernel/trace/trace_output.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c +index bf1965b180992..5cd4fb6563068 100644 +--- a/kernel/trace/trace_output.c ++++ b/kernel/trace/trace_output.c +@@ -1445,12 +1445,11 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter, + { + struct print_entry *field; + struct trace_seq *s = &iter->seq; +- int max = iter->ent_size - offsetof(struct print_entry, buf); + + trace_assign_type(field, iter->ent); + + seq_print_ip_sym(s, field->ip, flags); +- trace_seq_printf(s, ": %.*s", max, field->buf); ++ trace_seq_printf(s, ": %s", field->buf); + + return trace_handle_return(s); + } +@@ -1459,11 +1458,10 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, + struct trace_event *event) + { + struct print_entry *field; +- int max = iter->ent_size - offsetof(struct print_entry, buf); + + trace_assign_type(field, iter->ent); + +- trace_seq_printf(&iter->seq, "# %lx %.*s", field->ip, max, field->buf); ++ trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf); + + return trace_handle_return(&iter->seq); + } +-- +2.43.0 + diff --git a/queue-6.1/virtio_console-fix-misc-probe-bugs.patch b/queue-6.1/virtio_console-fix-misc-probe-bugs.patch new file mode 100644 index 00000000000..d340cdca085 --- /dev/null +++ b/queue-6.1/virtio_console-fix-misc-probe-bugs.patch @@ -0,0 +1,71 @@ +From f7fdbe56771c8d6d4763e20887cf33be91727582 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 16 Sep 2024 14:16:44 -0400 +Subject: virtio_console: fix misc probe bugs + +From: Michael S. Tsirkin + +[ Upstream commit b9efbe2b8f0177fa97bfab290d60858900aa196b ] + +This fixes the following issue discovered by code review: + +after vqs have been created, a buggy device can send an interrupt. + +A control vq callback will then try to schedule control_work which has +not been initialized yet. Similarly for config interrupt. Further, in +and out vq callbacks invoke find_port_by_vq which attempts to take +ports_lock which also has not been initialized. + +To fix, init all locks and work before creating vqs. + +Message-ID: +Fixes: 17634ba25544 ("virtio: console: Add a new MULTIPORT feature, support for generic ports") +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Sasha Levin +--- + drivers/char/virtio_console.c | 18 ++++++++++-------- + 1 file changed, 10 insertions(+), 8 deletions(-) + +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index 9fa3c76a267f5..899036ce3802c 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -2055,25 +2055,27 @@ static int virtcons_probe(struct virtio_device *vdev) + multiport = true; + } + +- err = init_vqs(portdev); +- if (err < 0) { +- dev_err(&vdev->dev, "Error %d initializing vqs\n", err); +- goto free_chrdev; +- } +- + spin_lock_init(&portdev->ports_lock); + INIT_LIST_HEAD(&portdev->ports); + INIT_LIST_HEAD(&portdev->list); + +- virtio_device_ready(portdev->vdev); +- + INIT_WORK(&portdev->config_work, &config_work_handler); + INIT_WORK(&portdev->control_work, &control_work_handler); + + if (multiport) { + spin_lock_init(&portdev->c_ivq_lock); + spin_lock_init(&portdev->c_ovq_lock); ++ } + ++ err = init_vqs(portdev); ++ if (err < 0) { ++ dev_err(&vdev->dev, "Error %d initializing vqs\n", err); ++ goto free_chrdev; ++ } ++ ++ virtio_device_ready(portdev->vdev); ++ ++ if (multiport) { + err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); + if (err < 0) { + dev_err(&vdev->dev, +-- +2.43.0 +