]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
Revert "readahead: properly shorten readahead when falling back to do_page_cache_ra()"
authorJan Kara <jack@suse.cz>
Tue, 26 Nov 2024 14:52:08 +0000 (15:52 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 14 Dec 2024 19:03:26 +0000 (20:03 +0100)
commit a220d6b95b1ae12c7626283d7609f0a1438e6437 upstream.

This reverts commit 7c877586da3178974a8a94577b6045a48377ff25.

Anders and Philippe have reported that recent kernels occasionally hang
when used with NFS in readahead code.  The problem has been bisected to
7c877586da3 ("readahead: properly shorten readahead when falling back to
do_page_cache_ra()").  The cause of the problem is that ra->size can be
shrunk by read_pages() call and subsequently we end up calling
do_page_cache_ra() with negative (read huge positive) number of pages.
Let's revert 7c877586da3 for now until we can find a proper way how the
logic in read_pages() and page_cache_ra_order() can coexist.  This can
lead to reduced readahead throughput due to readahead window confusion but
that's better than outright hangs.

Link: https://lkml.kernel.org/r/20241126145208.985-1-jack@suse.cz
Fixes: 7c877586da31 ("readahead: properly shorten readahead when falling back to do_page_cache_ra()")
Reported-by: Anders Blomdell <anders.blomdell@gmail.com>
Reported-by: Philippe Troin <phil@fifi.org>
Signed-off-by: Jan Kara <jack@suse.cz>
Tested-by: Philippe Troin <phil@fifi.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/readahead.c

index 3dc6c7a128dd35bfeda20ecb165a852f4b46eeaf..99fdb2b5b568623c5e05f3d7bebaebb1751c2cfd 100644 (file)
@@ -453,8 +453,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
                struct file_ra_state *ra, unsigned int new_order)
 {
        struct address_space *mapping = ractl->mapping;
-       pgoff_t start = readahead_index(ractl);
-       pgoff_t index = start;
+       pgoff_t index = readahead_index(ractl);
        unsigned int min_order = mapping_min_folio_order(mapping);
        pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
        pgoff_t mark = index + ra->size - ra->async_size;
@@ -517,7 +516,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
        if (!err)
                return;
 fallback:
-       do_page_cache_ra(ractl, ra->size - (index - start), ra->async_size);
+       do_page_cache_ra(ractl, ra->size, ra->async_size);
 }
 
 static unsigned long ractl_max_pages(struct readahead_control *ractl,