From: Greg Kroah-Hartman Date: Wed, 22 Nov 2023 21:37:37 +0000 (+0000) Subject: 5.10-stable patches X-Git-Tag: v4.14.331~101 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=576d610b699ccf1f7ea058562e4afd2effc7eb22;p=thirdparty%2Fkernel%2Fstable-queue.git 5.10-stable patches added patches: arm64-dts-qcom-ipq6018-fix-hwlock-index-for-smem.patch btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch firmware-qcom_scm-use-64-bit-calling-convention-only-when-client-is-64-bit.patch ima-detect-changes-to-the-backing-overlay-file.patch pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch rcu-kmemleak-ignore-kmemleak-false-positives-when-rcu-freeing-objects.patch --- diff --git a/queue-5.10/arm64-dts-qcom-ipq6018-fix-hwlock-index-for-smem.patch b/queue-5.10/arm64-dts-qcom-ipq6018-fix-hwlock-index-for-smem.patch new file mode 100644 index 00000000000..c1826225709 --- /dev/null +++ b/queue-5.10/arm64-dts-qcom-ipq6018-fix-hwlock-index-for-smem.patch @@ -0,0 +1,36 @@ +From 95d97b111e1e184b0c8656137033ed64f2cf21e4 Mon Sep 17 00:00:00 2001 +From: Vignesh Viswanathan +Date: Mon, 4 Sep 2023 22:55:13 +0530 +Subject: arm64: dts: qcom: ipq6018: Fix hwlock index for SMEM + +From: Vignesh Viswanathan + +commit 95d97b111e1e184b0c8656137033ed64f2cf21e4 upstream. + +SMEM uses lock index 3 of the TCSR Mutex hwlock for allocations +in SMEM region shared by the Host and FW. + +Fix the SMEM hwlock index to 3 for IPQ6018. + +Cc: stable@vger.kernel.org +Fixes: 5bf635621245 ("arm64: dts: ipq6018: Add a few device nodes") +Signed-off-by: Vignesh Viswanathan +Acked-by: Konrad Dybcio +Link: https://lore.kernel.org/r/20230904172516.479866-3-quic_viswanat@quicinc.com +Signed-off-by: Bjorn Andersson +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -175,7 +175,7 @@ + smem { + compatible = "qcom,smem"; + memory-region = <&smem_region>; +- hwlocks = <&tcsr_mutex 0>; ++ hwlocks = <&tcsr_mutex 3>; + }; + + soc: soc { diff --git a/queue-5.10/btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch b/queue-5.10/btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch new file mode 100644 index 00000000000..a9234e4efcd --- /dev/null +++ b/queue-5.10/btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch @@ -0,0 +1,38 @@ +From 11aeb97b45ad2e0040cbb2a589bc403152526345 Mon Sep 17 00:00:00 2001 +From: Josef Bacik +Date: Mon, 18 Sep 2023 14:15:33 -0400 +Subject: btrfs: don't arbitrarily slow down delalloc if we're committing + +From: Josef Bacik + +commit 11aeb97b45ad2e0040cbb2a589bc403152526345 upstream. + +We have a random schedule_timeout() if the current transaction is +committing, which seems to be a holdover from the original delalloc +reservation code. + +Remove this, we have the proper flushing stuff, we shouldn't be hoping +for random timing things to make everything work. This just induces +latency for no reason. + +CC: stable@vger.kernel.org # 5.4+ +Signed-off-by: Josef Bacik +Reviewed-by: David Sterba +Signed-off-by: David Sterba +Signed-off-by: Greg Kroah-Hartman +--- + fs/btrfs/delalloc-space.c | 3 --- + 1 file changed, 3 deletions(-) + +--- a/fs/btrfs/delalloc-space.c ++++ b/fs/btrfs/delalloc-space.c +@@ -307,9 +307,6 @@ int btrfs_delalloc_reserve_metadata(stru + } else { + if (current->journal_info) + flush = BTRFS_RESERVE_FLUSH_LIMIT; +- +- if (btrfs_transaction_in_commit(fs_info)) +- schedule_timeout(1); + } + + num_bytes = ALIGN(num_bytes, fs_info->sectorsize); diff --git a/queue-5.10/firmware-qcom_scm-use-64-bit-calling-convention-only-when-client-is-64-bit.patch b/queue-5.10/firmware-qcom_scm-use-64-bit-calling-convention-only-when-client-is-64-bit.patch new file mode 100644 index 00000000000..61621ade047 --- /dev/null +++ b/queue-5.10/firmware-qcom_scm-use-64-bit-calling-convention-only-when-client-is-64-bit.patch @@ -0,0 +1,52 @@ +From 3337a6fea25370d3d244ec6bb38c71ee86fcf837 Mon Sep 17 00:00:00 2001 +From: Kathiravan Thirumoorthy +Date: Mon, 25 Sep 2023 13:59:22 +0530 +Subject: firmware: qcom_scm: use 64-bit calling convention only when client is 64-bit + +From: Kathiravan Thirumoorthy + +commit 3337a6fea25370d3d244ec6bb38c71ee86fcf837 upstream. + +Per the "SMC calling convention specification", the 64-bit calling +convention can only be used when the client is 64-bit. Whereas the +32-bit calling convention can be used by either a 32-bit or a 64-bit +client. + +Currently during SCM probe, irrespective of the client, 64-bit calling +convention is made, which is incorrect and may lead to the undefined +behaviour when the client is 32-bit. Let's fix it. + +Cc: stable@vger.kernel.org +Fixes: 9a434cee773a ("firmware: qcom_scm: Dynamically support SMCCC and legacy conventions") +Reviewed-By: Elliot Berman +Signed-off-by: Kathiravan Thirumoorthy +Link: https://lore.kernel.org/r/20230925-scm-v3-1-8790dff6a749@quicinc.com +Signed-off-by: Bjorn Andersson +Signed-off-by: Greg Kroah-Hartman +--- + drivers/firmware/qcom_scm.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/drivers/firmware/qcom_scm.c ++++ b/drivers/firmware/qcom_scm.c +@@ -137,6 +137,12 @@ static enum qcom_scm_convention __get_co + return qcom_scm_convention; + + /* ++ * Per the "SMC calling convention specification", the 64-bit calling ++ * convention can only be used when the client is 64-bit, otherwise ++ * system will encounter the undefined behaviour. ++ */ ++#if IS_ENABLED(CONFIG_ARM64) ++ /* + * Device isn't required as there is only one argument - no device + * needed to dma_map_single to secure world + */ +@@ -156,6 +162,7 @@ static enum qcom_scm_convention __get_co + forced = true; + goto found; + } ++#endif + + probed_convention = SMC_CONVENTION_ARM_32; + ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); diff --git a/queue-5.10/ima-detect-changes-to-the-backing-overlay-file.patch b/queue-5.10/ima-detect-changes-to-the-backing-overlay-file.patch new file mode 100644 index 00000000000..ca84e49caee --- /dev/null +++ b/queue-5.10/ima-detect-changes-to-the-backing-overlay-file.patch @@ -0,0 +1,113 @@ +From b836c4d29f2744200b2af41e14bf50758dddc818 Mon Sep 17 00:00:00 2001 +From: Mimi Zohar +Date: Wed, 18 Oct 2023 14:47:02 -0400 +Subject: ima: detect changes to the backing overlay file + +From: Mimi Zohar + +commit b836c4d29f2744200b2af41e14bf50758dddc818 upstream. + +Commit 18b44bc5a672 ("ovl: Always reevaluate the file signature for +IMA") forced signature re-evaulation on every file access. + +Instead of always re-evaluating the file's integrity, detect a change +to the backing file, by comparing the cached file metadata with the +backing file's metadata. Verifying just the i_version has not changed +is insufficient. In addition save and compare the i_ino and s_dev +as well. + +Reviewed-by: Amir Goldstein +Tested-by: Eric Snowberg +Tested-by: Raul E Rangel +Cc: stable@vger.kernel.org +Signed-off-by: Mimi Zohar +Signed-off-by: Greg Kroah-Hartman +--- + fs/overlayfs/super.c | 2 +- + security/integrity/ima/ima_api.c | 5 +++++ + security/integrity/ima/ima_main.c | 16 +++++++++++++++- + security/integrity/integrity.h | 2 ++ + 4 files changed, 23 insertions(+), 2 deletions(-) + +--- a/fs/overlayfs/super.c ++++ b/fs/overlayfs/super.c +@@ -2028,7 +2028,7 @@ static int ovl_fill_super(struct super_b + sb->s_xattr = ovl_xattr_handlers; + sb->s_fs_info = ofs; + sb->s_flags |= SB_POSIXACL; +- sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE; ++ sb->s_iflags |= SB_I_SKIP_SYNC; + + err = -ENOMEM; + root_dentry = ovl_get_root(sb, upperpath.dentry, oe); +--- a/security/integrity/ima/ima_api.c ++++ b/security/integrity/ima/ima_api.c +@@ -212,6 +212,7 @@ int ima_collect_measurement(struct integ + { + const char *audit_cause = "failed"; + struct inode *inode = file_inode(file); ++ struct inode *real_inode = d_real_inode(file_dentry(file)); + const char *filename = file->f_path.dentry->d_name.name; + int result = 0; + int length; +@@ -262,6 +263,10 @@ int ima_collect_measurement(struct integ + iint->ima_hash = tmpbuf; + memcpy(iint->ima_hash, &hash, length); + iint->version = i_version; ++ if (real_inode != inode) { ++ iint->real_ino = real_inode->i_ino; ++ iint->real_dev = real_inode->i_sb->s_dev; ++ } + + /* Possibly temporary failure due to type of read (eg. O_DIRECT) */ + if (!result) +--- a/security/integrity/ima/ima_main.c ++++ b/security/integrity/ima/ima_main.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + + #include "ima.h" + +@@ -197,7 +198,7 @@ static int process_measurement(struct fi + u32 secid, char *buf, loff_t size, int mask, + enum ima_hooks func) + { +- struct inode *inode = file_inode(file); ++ struct inode *backing_inode, *inode = file_inode(file); + struct integrity_iint_cache *iint = NULL; + struct ima_template_desc *template_desc = NULL; + char *pathbuf = NULL; +@@ -271,6 +272,19 @@ static int process_measurement(struct fi + iint->measured_pcrs = 0; + } + ++ /* Detect and re-evaluate changes made to the backing file. */ ++ backing_inode = d_real_inode(file_dentry(file)); ++ if (backing_inode != inode && ++ (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) { ++ if (!IS_I_VERSION(backing_inode) || ++ backing_inode->i_sb->s_dev != iint->real_dev || ++ backing_inode->i_ino != iint->real_ino || ++ !inode_eq_iversion(backing_inode, iint->version)) { ++ iint->flags &= ~IMA_DONE_MASK; ++ iint->measured_pcrs = 0; ++ } ++ } ++ + /* Determine if already appraised/measured based on bitmask + * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED, + * IMA_AUDIT, IMA_AUDITED) +--- a/security/integrity/integrity.h ++++ b/security/integrity/integrity.h +@@ -131,6 +131,8 @@ struct integrity_iint_cache { + unsigned long flags; + unsigned long measured_pcrs; + unsigned long atomic_flags; ++ unsigned long real_ino; ++ dev_t real_dev; + enum integrity_status ima_file_status:4; + enum integrity_status ima_mmap_status:4; + enum integrity_status ima_bprm_status:4; diff --git a/queue-5.10/pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch b/queue-5.10/pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch new file mode 100644 index 00000000000..063e070b346 --- /dev/null +++ b/queue-5.10/pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch @@ -0,0 +1,67 @@ +From d08970df1980476f27936e24d452550f3e9e92e1 Mon Sep 17 00:00:00 2001 +From: Brian Geffon +Date: Fri, 22 Sep 2023 12:07:04 -0400 +Subject: PM: hibernate: Clean up sync_read handling in snapshot_write_next() + +From: Brian Geffon + +commit d08970df1980476f27936e24d452550f3e9e92e1 upstream. + +In snapshot_write_next(), sync_read is set and unset in three different +spots unnecessiarly. As a result there is a subtle bug where the first +page after the meta data has been loaded unconditionally sets sync_read +to 0. If this first PFN was actually a highmem page, then the returned +buffer will be the global "buffer," and the page needs to be loaded +synchronously. + +That is, I'm not sure we can always assume the following to be safe: + + handle->buffer = get_buffer(&orig_bm, &ca); + handle->sync_read = 0; + +Because get_buffer() can call get_highmem_page_buffer() which can +return 'buffer'. + +The easiest way to address this is just set sync_read before +snapshot_write_next() returns if handle->buffer == buffer. + +Signed-off-by: Brian Geffon +Fixes: 8357376d3df2 ("[PATCH] swsusp: Improve handling of highmem") +Cc: All applicable +[ rjw: Subject and changelog edits ] +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman +--- + kernel/power/snapshot.c | 6 +----- + 1 file changed, 1 insertion(+), 5 deletions(-) + +--- a/kernel/power/snapshot.c ++++ b/kernel/power/snapshot.c +@@ -2587,8 +2587,6 @@ int snapshot_write_next(struct snapshot_ + if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) + return 0; + +- handle->sync_read = 1; +- + if (!handle->cur) { + if (!buffer) + /* This makes the buffer be freed by swsusp_free() */ +@@ -2624,7 +2622,6 @@ int snapshot_write_next(struct snapshot_ + memory_bm_position_reset(&orig_bm); + restore_pblist = NULL; + handle->buffer = get_buffer(&orig_bm, &ca); +- handle->sync_read = 0; + if (IS_ERR(handle->buffer)) + return PTR_ERR(handle->buffer); + } +@@ -2634,9 +2631,8 @@ int snapshot_write_next(struct snapshot_ + handle->buffer = get_buffer(&orig_bm, &ca); + if (IS_ERR(handle->buffer)) + return PTR_ERR(handle->buffer); +- if (handle->buffer != buffer) +- handle->sync_read = 0; + } ++ handle->sync_read = (handle->buffer == buffer); + handle->cur++; + return PAGE_SIZE; + } diff --git a/queue-5.10/pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch b/queue-5.10/pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch new file mode 100644 index 00000000000..bee277c541e --- /dev/null +++ b/queue-5.10/pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch @@ -0,0 +1,47 @@ +From f0c7183008b41e92fa676406d87f18773724b48b Mon Sep 17 00:00:00 2001 +From: Brian Geffon +Date: Thu, 21 Sep 2023 13:00:45 -0400 +Subject: PM: hibernate: Use __get_safe_page() rather than touching the list + +From: Brian Geffon + +commit f0c7183008b41e92fa676406d87f18773724b48b upstream. + +We found at least one situation where the safe pages list was empty and +get_buffer() would gladly try to use a NULL pointer. + +Signed-off-by: Brian Geffon +Fixes: 8357376d3df2 ("[PATCH] swsusp: Improve handling of highmem") +Cc: All applicable +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman +--- + kernel/power/snapshot.c | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +--- a/kernel/power/snapshot.c ++++ b/kernel/power/snapshot.c +@@ -2372,8 +2372,9 @@ static void *get_highmem_page_buffer(str + pbe->copy_page = tmp; + } else { + /* Copy of the page will be stored in normal memory */ +- kaddr = safe_pages_list; +- safe_pages_list = safe_pages_list->next; ++ kaddr = __get_safe_page(ca->gfp_mask); ++ if (!kaddr) ++ return ERR_PTR(-ENOMEM); + pbe->copy_page = virt_to_page(kaddr); + } + pbe->next = highmem_pblist; +@@ -2553,8 +2554,9 @@ static void *get_buffer(struct memory_bi + return ERR_PTR(-ENOMEM); + } + pbe->orig_address = page_address(page); +- pbe->address = safe_pages_list; +- safe_pages_list = safe_pages_list->next; ++ pbe->address = __get_safe_page(ca->gfp_mask); ++ if (!pbe->address) ++ return ERR_PTR(-ENOMEM); + pbe->next = restore_pblist; + restore_pblist = pbe; + return pbe->address; diff --git a/queue-5.10/rcu-kmemleak-ignore-kmemleak-false-positives-when-rcu-freeing-objects.patch b/queue-5.10/rcu-kmemleak-ignore-kmemleak-false-positives-when-rcu-freeing-objects.patch new file mode 100644 index 00000000000..56b371c5da4 --- /dev/null +++ b/queue-5.10/rcu-kmemleak-ignore-kmemleak-false-positives-when-rcu-freeing-objects.patch @@ -0,0 +1,58 @@ +From 5f98fd034ca6fd1ab8c91a3488968a0e9caaabf6 Mon Sep 17 00:00:00 2001 +From: Catalin Marinas +Date: Sat, 30 Sep 2023 17:46:56 +0000 +Subject: rcu: kmemleak: Ignore kmemleak false positives when RCU-freeing objects + +From: Catalin Marinas + +commit 5f98fd034ca6fd1ab8c91a3488968a0e9caaabf6 upstream. + +Since the actual slab freeing is deferred when calling kvfree_rcu(), so +is the kmemleak_free() callback informing kmemleak of the object +deletion. From the perspective of the kvfree_rcu() caller, the object is +freed and it may remove any references to it. Since kmemleak does not +scan RCU internal data storing the pointer, it will report such objects +as leaks during the grace period. + +Tell kmemleak to ignore such objects on the kvfree_call_rcu() path. Note +that the tiny RCU implementation does not have such issue since the +objects can be tracked from the rcu_ctrlblk structure. + +Signed-off-by: Catalin Marinas +Reported-by: Christoph Paasch +Closes: https://lore.kernel.org/all/F903A825-F05F-4B77-A2B5-7356282FBA2C@apple.com/ +Cc: +Tested-by: Christoph Paasch +Reviewed-by: Paul E. McKenney +Signed-off-by: Joel Fernandes (Google) +Signed-off-by: Frederic Weisbecker +Signed-off-by: Greg Kroah-Hartman +--- + kernel/rcu/tree.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -3547,6 +3548,14 @@ void kvfree_call_rcu(struct rcu_head *he + + WRITE_ONCE(krcp->count, krcp->count + 1); + ++ /* ++ * The kvfree_rcu() caller considers the pointer freed at this point ++ * and likely removes any references to it. Since the actual slab ++ * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore ++ * this object (no scanning or false positives reporting). ++ */ ++ kmemleak_ignore(ptr); ++ + // Set timer to drain after KFREE_DRAIN_JIFFIES. + if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && + !krcp->monitor_todo) { diff --git a/queue-5.10/series b/queue-5.10/series index c3d0e127973..2c2d40371e2 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -112,6 +112,13 @@ clk-qcom-ipq6018-drop-the-clk_set_rate_parent-flag-from-pll-clocks.patch mmc-vub300-fix-an-error-code.patch mmc-sdhci_am654-fix-start-loop-index-for-tap-value-parsing.patch pci-aspm-fix-l1-substate-handling-in-aspm_attr_store_common.patch +arm64-dts-qcom-ipq6018-fix-hwlock-index-for-smem.patch +pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch +pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch +rcu-kmemleak-ignore-kmemleak-false-positives-when-rcu-freeing-objects.patch +btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch +firmware-qcom_scm-use-64-bit-calling-convention-only-when-client-is-64-bit.patch +ima-detect-changes-to-the-backing-overlay-file.patch wifi-ath11k-fix-temperature-event-locking.patch wifi-ath11k-fix-dfs-radar-event-locking.patch wifi-ath11k-fix-htt-pktlog-locking.patch