--- /dev/null
+From 1b151e2435fc3a9b10c8946c6aebe9f3e1938c55 Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Mon, 14 Aug 2023 15:41:00 +0100
+Subject: block: Remove special-casing of compound pages
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit 1b151e2435fc3a9b10c8946c6aebe9f3e1938c55 upstream.
+
+The special casing was originally added in pre-git history; reproducing
+the commit log here:
+
+> commit a318a92567d77
+> Author: Andrew Morton <akpm@osdl.org>
+> Date: Sun Sep 21 01:42:22 2003 -0700
+>
+> [PATCH] Speed up direct-io hugetlbpage handling
+>
+> This patch short-circuits all the direct-io page dirtying logic for
+> higher-order pages. Without this, we pointlessly bounce BIOs up to
+> keventd all the time.
+
+In the last twenty years, compound pages have become used for more than
+just hugetlb. Rewrite these functions to operate on folios instead
+of pages and remove the special case for hugetlbfs; I don't think
+it's needed any more (and if it is, we can put it back in as a call
+to folio_test_hugetlb()).
+
+This was found by inspection; as far as I can tell, this bug can lead
+to pages used as the destination of a direct I/O read not being marked
+as dirty. If those pages are then reclaimed by the MM without being
+dirtied for some other reason, they won't be written out. Then when
+they're faulted back in, they will not contain the data they should.
+It'll take a pretty unusual setup to produce this problem with several
+races all going the wrong way.
+
+This problem predates the folio work; it could for example have been
+triggered by mmaping a THP in tmpfs and using that as the target of an
+O_DIRECT read.
+
+Fixes: 800d8c63b2e98 ("shmem: add huge pages support")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bio.c | 38 +++++++++++++++++++++++---------------
+ 1 file changed, 23 insertions(+), 15 deletions(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1109,13 +1109,22 @@ bool bio_add_folio(struct bio *bio, stru
+
+ void __bio_release_pages(struct bio *bio, bool mark_dirty)
+ {
+- struct bvec_iter_all iter_all;
+- struct bio_vec *bvec;
++ struct folio_iter fi;
+
+- bio_for_each_segment_all(bvec, bio, iter_all) {
+- if (mark_dirty && !PageCompound(bvec->bv_page))
+- set_page_dirty_lock(bvec->bv_page);
+- put_page(bvec->bv_page);
++ bio_for_each_folio_all(fi, bio) {
++ struct page *page;
++ size_t done = 0;
++
++ if (mark_dirty) {
++ folio_lock(fi.folio);
++ folio_mark_dirty(fi.folio);
++ folio_unlock(fi.folio);
++ }
++ page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
++ do {
++ folio_put(fi.folio);
++ done += PAGE_SIZE;
++ } while (done < fi.length);
+ }
+ }
+ EXPORT_SYMBOL_GPL(__bio_release_pages);
+@@ -1414,12 +1423,12 @@ EXPORT_SYMBOL(bio_free_pages);
+ */
+ void bio_set_pages_dirty(struct bio *bio)
+ {
+- struct bio_vec *bvec;
+- struct bvec_iter_all iter_all;
++ struct folio_iter fi;
+
+- bio_for_each_segment_all(bvec, bio, iter_all) {
+- if (!PageCompound(bvec->bv_page))
+- set_page_dirty_lock(bvec->bv_page);
++ bio_for_each_folio_all(fi, bio) {
++ folio_lock(fi.folio);
++ folio_mark_dirty(fi.folio);
++ folio_unlock(fi.folio);
+ }
+ }
+
+@@ -1462,12 +1471,11 @@ static void bio_dirty_fn(struct work_str
+
+ void bio_check_pages_dirty(struct bio *bio)
+ {
+- struct bio_vec *bvec;
++ struct folio_iter fi;
+ unsigned long flags;
+- struct bvec_iter_all iter_all;
+
+- bio_for_each_segment_all(bvec, bio, iter_all) {
+- if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
++ bio_for_each_folio_all(fi, bio) {
++ if (!folio_test_dirty(fi.folio))
+ goto defer;
+ }
+