]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge tag 'erofs-for-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Jun 2020 03:16:55 +0000 (20:16 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Jun 2020 03:16:55 +0000 (20:16 -0700)
Pull erofs updates from Gao Xiang:
 "The most interesting part is the new mount api conversion, which is
  actually a old patch already pending for several cycles. And the
  others are recent trivial cleanups here.

  Summary:

   - Convert to use the new mount apis

   - Some random cleanup patches"

* tag 'erofs-for-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: suppress false positive last_block warning
  erofs: convert to use the new mount fs_context api
  erofs: code cleanup by removing ifdef macro surrounding

1  2 
fs/erofs/data.c
fs/erofs/zdata.c

diff --cc fs/erofs/data.c
index d0542151e8c4acab509e60d30d3405bd05f9122e,2812645b361ec73b3a411680d15b202183fb3043..64b56c7df02359f5a6a2b5365c1b5b19c6176637
@@@ -280,32 -280,42 +280,32 @@@ static int erofs_raw_access_readpage(st
        return 0;
  }
  
 -static int erofs_raw_access_readpages(struct file *filp,
 -                                    struct address_space *mapping,
 -                                    struct list_head *pages,
 -                                    unsigned int nr_pages)
 +static void erofs_raw_access_readahead(struct readahead_control *rac)
  {
-       erofs_off_t last_block;
+       erofs_off_t uninitialized_var(last_block);
        struct bio *bio = NULL;
 -      gfp_t gfp = readahead_gfp_mask(mapping);
 -      struct page *page = list_last_entry(pages, struct page, lru);
 -
 -      trace_erofs_readpages(mapping->host, page, nr_pages, true);
 +      struct page *page;
  
 -      for (; nr_pages; --nr_pages) {
 -              page = list_entry(pages->prev, struct page, lru);
 +      trace_erofs_readpages(rac->mapping->host, readahead_index(rac),
 +                      readahead_count(rac), true);
  
 +      while ((page = readahead_page(rac))) {
                prefetchw(&page->flags);
 -              list_del(&page->lru);
  
 -              if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
 -                      bio = erofs_read_raw_page(bio, mapping, page,
 -                                                &last_block, nr_pages, true);
 +              bio = erofs_read_raw_page(bio, rac->mapping, page, &last_block,
 +                              readahead_count(rac), true);
  
 -                      /* all the page errors are ignored when readahead */
 -                      if (IS_ERR(bio)) {
 -                              pr_err("%s, readahead error at page %lu of nid %llu\n",
 -                                     __func__, page->index,
 -                                     EROFS_I(mapping->host)->nid);
 +              /* all the page errors are ignored when readahead */
 +              if (IS_ERR(bio)) {
 +                      pr_err("%s, readahead error at page %lu of nid %llu\n",
 +                             __func__, page->index,
 +                             EROFS_I(rac->mapping->host)->nid);
  
 -                              bio = NULL;
 -                      }
 +                      bio = NULL;
                }
  
 -              /* pages could still be locked */
                put_page(page);
        }
 -      DBG_BUGON(!list_empty(pages));
  
        /* the rare case (end in gaps) */
        if (bio)
index 187f93b4900e121c12d93f9c987d7ff15f842456,5086b1218aac7f8359afa5cb0aba9aef5aabf00f..be50a4d9d273e838cf596da5bd8560ee017c8c1d
@@@ -1302,26 -1302,31 +1302,26 @@@ static int z_erofs_readpage(struct fil
  static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
                                            unsigned int nr)
  {
-       return nr <= sbi->max_sync_decompress_pages;
+       return nr <= sbi->ctx.max_sync_decompress_pages;
  }
  
 -static int z_erofs_readpages(struct file *filp, struct address_space *mapping,
 -                           struct list_head *pages, unsigned int nr_pages)
 +static void z_erofs_readahead(struct readahead_control *rac)
  {
 -      struct inode *const inode = mapping->host;
 +      struct inode *const inode = rac->mapping->host;
        struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
  
 -      bool sync = should_decompress_synchronously(sbi, nr_pages);
 +      bool sync = should_decompress_synchronously(sbi, readahead_count(rac));
        struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
 -      gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
 -      struct page *head = NULL;
 +      struct page *page, *head = NULL;
        LIST_HEAD(pagepool);
  
 -      trace_erofs_readpages(mapping->host, lru_to_page(pages),
 -                            nr_pages, false);
 +      trace_erofs_readpages(inode, readahead_index(rac),
 +                      readahead_count(rac), false);
  
 -      f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
 -
 -      for (; nr_pages; --nr_pages) {
 -              struct page *page = lru_to_page(pages);
 +      f.headoffset = readahead_pos(rac);
  
 +      while ((page = readahead_page(rac))) {
                prefetchw(&page->flags);
 -              list_del(&page->lru);
  
                /*
                 * A pure asynchronous readahead is indicated if