--- /dev/null
+From 11aeb97b45ad2e0040cbb2a589bc403152526345 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Mon, 18 Sep 2023 14:15:33 -0400
+Subject: btrfs: don't arbitrarily slow down delalloc if we're committing
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 11aeb97b45ad2e0040cbb2a589bc403152526345 upstream.
+
+We have a random schedule_timeout() if the current transaction is
+committing, which seems to be a holdover from the original delalloc
+reservation code.
+
+Remove this, we have the proper flushing stuff, we shouldn't be hoping
+for random timing things to make everything work. This just induces
+latency for no reason.
+
+CC: stable@vger.kernel.org # 5.4+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/delalloc-space.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/fs/btrfs/delalloc-space.c
++++ b/fs/btrfs/delalloc-space.c
+@@ -324,9 +324,6 @@ int btrfs_delalloc_reserve_metadata(stru
+ } else {
+ if (current->journal_info)
+ flush = BTRFS_RESERVE_FLUSH_LIMIT;
+-
+- if (btrfs_transaction_in_commit(fs_info))
+- schedule_timeout(1);
+ }
+
+ if (delalloc_lock)
--- /dev/null
+From d08970df1980476f27936e24d452550f3e9e92e1 Mon Sep 17 00:00:00 2001
+From: Brian Geffon <bgeffon@google.com>
+Date: Fri, 22 Sep 2023 12:07:04 -0400
+Subject: PM: hibernate: Clean up sync_read handling in snapshot_write_next()
+
+From: Brian Geffon <bgeffon@google.com>
+
+commit d08970df1980476f27936e24d452550f3e9e92e1 upstream.
+
+In snapshot_write_next(), sync_read is set and unset in three different
+spots unnecessiarly. As a result there is a subtle bug where the first
+page after the meta data has been loaded unconditionally sets sync_read
+to 0. If this first PFN was actually a highmem page, then the returned
+buffer will be the global "buffer," and the page needs to be loaded
+synchronously.
+
+That is, I'm not sure we can always assume the following to be safe:
+
+ handle->buffer = get_buffer(&orig_bm, &ca);
+ handle->sync_read = 0;
+
+Because get_buffer() can call get_highmem_page_buffer() which can
+return 'buffer'.
+
+The easiest way to address this is just set sync_read before
+snapshot_write_next() returns if handle->buffer == buffer.
+
+Signed-off-by: Brian Geffon <bgeffon@google.com>
+Fixes: 8357376d3df2 ("[PATCH] swsusp: Improve handling of highmem")
+Cc: All applicable <stable@vger.kernel.org>
+[ rjw: Subject and changelog edits ]
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/power/snapshot.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -2592,8 +2592,6 @@ int snapshot_write_next(struct snapshot_
+ if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
+ return 0;
+
+- handle->sync_read = 1;
+-
+ if (!handle->cur) {
+ if (!buffer)
+ /* This makes the buffer be freed by swsusp_free() */
+@@ -2634,7 +2632,6 @@ int snapshot_write_next(struct snapshot_
+ memory_bm_position_reset(&orig_bm);
+ restore_pblist = NULL;
+ handle->buffer = get_buffer(&orig_bm, &ca);
+- handle->sync_read = 0;
+ if (IS_ERR(handle->buffer))
+ return PTR_ERR(handle->buffer);
+ }
+@@ -2646,9 +2643,8 @@ int snapshot_write_next(struct snapshot_
+ handle->buffer = get_buffer(&orig_bm, &ca);
+ if (IS_ERR(handle->buffer))
+ return PTR_ERR(handle->buffer);
+- if (handle->buffer != buffer)
+- handle->sync_read = 0;
+ }
++ handle->sync_read = (handle->buffer == buffer);
+ handle->cur++;
+ return PAGE_SIZE;
+ }
--- /dev/null
+From f0c7183008b41e92fa676406d87f18773724b48b Mon Sep 17 00:00:00 2001
+From: Brian Geffon <bgeffon@google.com>
+Date: Thu, 21 Sep 2023 13:00:45 -0400
+Subject: PM: hibernate: Use __get_safe_page() rather than touching the list
+
+From: Brian Geffon <bgeffon@google.com>
+
+commit f0c7183008b41e92fa676406d87f18773724b48b upstream.
+
+We found at least one situation where the safe pages list was empty and
+get_buffer() would gladly try to use a NULL pointer.
+
+Signed-off-by: Brian Geffon <bgeffon@google.com>
+Fixes: 8357376d3df2 ("[PATCH] swsusp: Improve handling of highmem")
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/power/snapshot.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -2377,8 +2377,9 @@ static void *get_highmem_page_buffer(str
+ pbe->copy_page = tmp;
+ } else {
+ /* Copy of the page will be stored in normal memory */
+- kaddr = safe_pages_list;
+- safe_pages_list = safe_pages_list->next;
++ kaddr = __get_safe_page(ca->gfp_mask);
++ if (!kaddr)
++ return ERR_PTR(-ENOMEM);
+ pbe->copy_page = virt_to_page(kaddr);
+ }
+ pbe->next = highmem_pblist;
+@@ -2558,8 +2559,9 @@ static void *get_buffer(struct memory_bi
+ return ERR_PTR(-ENOMEM);
+ }
+ pbe->orig_address = page_address(page);
+- pbe->address = safe_pages_list;
+- safe_pages_list = safe_pages_list->next;
++ pbe->address = __get_safe_page(ca->gfp_mask);
++ if (!pbe->address)
++ return ERR_PTR(-ENOMEM);
+ pbe->next = restore_pblist;
+ restore_pblist = pbe;
+ return pbe->address;
parisc-power-add-power-soft-off-when-running-on-qemu.patch
clk-qcom-ipq8074-drop-the-clk_set_rate_parent-flag-from-pll-clocks.patch
mmc-vub300-fix-an-error-code.patch
+pm-hibernate-use-__get_safe_page-rather-than-touching-the-list.patch
+pm-hibernate-clean-up-sync_read-handling-in-snapshot_write_next.patch
+btrfs-don-t-arbitrarily-slow-down-delalloc-if-we-re-committing.patch