From: Greg Kroah-Hartman Date: Wed, 22 Nov 2023 21:36:47 +0000 (+0000) Subject: 5.4-stable patches X-Git-Tag: v4.14.331~102 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=aee51d0f5d10b256e75c66d7dd3a448a66d3aaf0;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch --- diff --git a/queue-5.4/btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch b/queue-5.4/btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch new file mode 100644 index 00000000000..d419bf0ce96 --- /dev/null +++ b/queue-5.4/btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch @@ -0,0 +1,38 @@ +From 11aeb97b45ad2e0040cbb2a589bc403152526345 Mon Sep 17 00:00:00 2001 +From: Josef Bacik +Date: Mon, 18 Sep 2023 14:15:33 -0400 +Subject: btrfs: don't arbitrarily slow down delalloc if we're committing + +From: Josef Bacik + +commit 11aeb97b45ad2e0040cbb2a589bc403152526345 upstream. + +We have a random schedule_timeout() if the current transaction is +committing, which seems to be a holdover from the original delalloc +reservation code. + +Remove this, we have the proper flushing stuff, we shouldn't be hoping +for random timing things to make everything work. This just induces +latency for no reason. + +CC: stable@vger.kernel.org # 5.4+ +Signed-off-by: Josef Bacik +Reviewed-by: David Sterba +Signed-off-by: David Sterba +Signed-off-by: Greg Kroah-Hartman +--- + fs/btrfs/delalloc-space.c | 3 --- + 1 file changed, 3 deletions(-) + +--- a/fs/btrfs/delalloc-space.c ++++ b/fs/btrfs/delalloc-space.c +@@ -324,9 +324,6 @@ int btrfs_delalloc_reserve_metadata(stru + } else { + if (current->journal_info) + flush = BTRFS_RESERVE_FLUSH_LIMIT; +- +- if (btrfs_transaction_in_commit(fs_info)) +- schedule_timeout(1); + } + + if (delalloc_lock) diff --git a/queue-5.4/pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch b/queue-5.4/pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch new file mode 100644 index 00000000000..c9c3ce6e097 --- /dev/null +++ b/queue-5.4/pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch @@ -0,0 +1,67 @@ +From d08970df1980476f27936e24d452550f3e9e92e1 Mon Sep 17 00:00:00 2001 +From: Brian Geffon +Date: Fri, 22 Sep 2023 12:07:04 -0400 +Subject: PM: hibernate: Clean up sync_read handling in snapshot_write_next() + +From: Brian Geffon + +commit d08970df1980476f27936e24d452550f3e9e92e1 upstream. + +In snapshot_write_next(), sync_read is set and unset in three different +spots unnecessiarly. As a result there is a subtle bug where the first +page after the meta data has been loaded unconditionally sets sync_read +to 0. If this first PFN was actually a highmem page, then the returned +buffer will be the global "buffer," and the page needs to be loaded +synchronously. + +That is, I'm not sure we can always assume the following to be safe: + + handle->buffer = get_buffer(&orig_bm, &ca); + handle->sync_read = 0; + +Because get_buffer() can call get_highmem_page_buffer() which can +return 'buffer'. + +The easiest way to address this is just set sync_read before +snapshot_write_next() returns if handle->buffer == buffer. + +Signed-off-by: Brian Geffon +Fixes: 8357376d3df2 ("[PATCH] swsusp: Improve handling of highmem") +Cc: All applicable +[ rjw: Subject and changelog edits ] +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman +--- + kernel/power/snapshot.c | 6 +----- + 1 file changed, 1 insertion(+), 5 deletions(-) + +--- a/kernel/power/snapshot.c ++++ b/kernel/power/snapshot.c +@@ -2592,8 +2592,6 @@ int snapshot_write_next(struct snapshot_ + if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) + return 0; + +- handle->sync_read = 1; +- + if (!handle->cur) { + if (!buffer) + /* This makes the buffer be freed by swsusp_free() */ +@@ -2634,7 +2632,6 @@ int snapshot_write_next(struct snapshot_ + memory_bm_position_reset(&orig_bm); + restore_pblist = NULL; + handle->buffer = get_buffer(&orig_bm, &ca); +- handle->sync_read = 0; + if (IS_ERR(handle->buffer)) + return PTR_ERR(handle->buffer); + } +@@ -2646,9 +2643,8 @@ int snapshot_write_next(struct snapshot_ + handle->buffer = get_buffer(&orig_bm, &ca); + if (IS_ERR(handle->buffer)) + return PTR_ERR(handle->buffer); +- if (handle->buffer != buffer) +- handle->sync_read = 0; + } ++ handle->sync_read = (handle->buffer == buffer); + handle->cur++; + return PAGE_SIZE; + } diff --git a/queue-5.4/pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch b/queue-5.4/pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch new file mode 100644 index 00000000000..e60a7df063d --- /dev/null +++ b/queue-5.4/pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch @@ -0,0 +1,47 @@ +From f0c7183008b41e92fa676406d87f18773724b48b Mon Sep 17 00:00:00 2001 +From: Brian Geffon +Date: Thu, 21 Sep 2023 13:00:45 -0400 +Subject: PM: hibernate: Use __get_safe_page() rather than touching the list + +From: Brian Geffon + +commit f0c7183008b41e92fa676406d87f18773724b48b upstream. + +We found at least one situation where the safe pages list was empty and +get_buffer() would gladly try to use a NULL pointer. + +Signed-off-by: Brian Geffon +Fixes: 8357376d3df2 ("[PATCH] swsusp: Improve handling of highmem") +Cc: All applicable +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman +--- + kernel/power/snapshot.c | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +--- a/kernel/power/snapshot.c ++++ b/kernel/power/snapshot.c +@@ -2377,8 +2377,9 @@ static void *get_highmem_page_buffer(str + pbe->copy_page = tmp; + } else { + /* Copy of the page will be stored in normal memory */ +- kaddr = safe_pages_list; +- safe_pages_list = safe_pages_list->next; ++ kaddr = __get_safe_page(ca->gfp_mask); ++ if (!kaddr) ++ return ERR_PTR(-ENOMEM); + pbe->copy_page = virt_to_page(kaddr); + } + pbe->next = highmem_pblist; +@@ -2558,8 +2559,9 @@ static void *get_buffer(struct memory_bi + return ERR_PTR(-ENOMEM); + } + pbe->orig_address = page_address(page); +- pbe->address = safe_pages_list; +- safe_pages_list = safe_pages_list->next; ++ pbe->address = __get_safe_page(ca->gfp_mask); ++ if (!pbe->address) ++ return ERR_PTR(-ENOMEM); + pbe->next = restore_pblist; + restore_pblist = pbe; + return pbe->address; diff --git a/queue-5.4/series b/queue-5.4/series index 21ee3c2dccd..e72daa3fcaa 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -84,3 +84,6 @@ parisc-pdc-add-width-field-to-struct-pdc_model.patch parisc-power-add-power-soft-off-when-running-on-qemu.patch clk-qcom-ipq8074-drop-the-clk_set_rate_parent-flag-from-pll-clocks.patch mmc-vub300-fix-an-error-code.patch +pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch +pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch +btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch