]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm,fs: Remove aops->readpage
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 29 Apr 2022 15:53:28 +0000 (11:53 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 9 May 2022 20:28:36 +0000 (16:28 -0400)
With all implementations of aops->readpage converted to aops->read_folio,
we can stop checking whether it's set and remove the member from aops.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
fs/btrfs/file.c
fs/buffer.c
fs/ceph/addr.c
include/linux/fs.h
kernel/events/uprobes.c
mm/filemap.c
mm/memory.c
mm/readahead.c
mm/shmem.c
mm/swapfile.c

index 373df5ebaf8d18c522e3adf8df51af352688d57e..57fba5abb05990d2ba3b56d088e42e47108254b7 100644 (file)
@@ -2402,7 +2402,7 @@ static int btrfs_file_mmap(struct file    *filp, struct vm_area_struct *vma)
 {
        struct address_space *mapping = filp->f_mapping;
 
-       if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio)
+       if (!mapping->a_ops->read_folio)
                return -ENOEXEC;
 
        file_accessed(filp);
index ec0c52c8848e2426bd2b463fb0b5d42d8b432e0a..786ef5b98c807aaa1e26e0473d99f6f2da196627 100644 (file)
@@ -2827,10 +2827,7 @@ int nobh_truncate_page(struct address_space *mapping,
 
        /* Ok, it's mapped. Make sure it's up-to-date */
        if (!folio_test_uptodate(folio)) {
-               if (mapping->a_ops->read_folio)
-                       err = mapping->a_ops->read_folio(NULL, folio);
-               else
-                       err = mapping->a_ops->readpage(NULL, &folio->page);
+               err = mapping->a_ops->read_folio(NULL, folio);
                if (err) {
                        folio_put(folio);
                        goto out;
index be3e47784f08da25bc7521e84e816eb61c791992..e040b92bb17cd24da87d988f26d224982940b015 100644 (file)
@@ -1772,7 +1772,7 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct address_space *mapping = file->f_mapping;
 
-       if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio)
+       if (!mapping->a_ops->read_folio)
                return -ENOEXEC;
        file_accessed(file);
        vma->vm_ops = &ceph_vmops;
index 5ad942183a2c5d3b58c7f11a687debbff849e846..f812f5aa07ddb6897b4601581f147f70826bf13e 100644 (file)
@@ -262,7 +262,7 @@ struct iattr {
  *                     trying again.  The aop will be taking reasonable
  *                     precautions not to livelock.  If the caller held a page
  *                     reference, it should drop it before retrying.  Returned
- *                     by readpage().
+ *                     by read_folio().
  *
  * address_space_operation functions return these large constants to indicate
  * special semantics to the caller.  These are much larger than the bytes in a
@@ -335,7 +335,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
 
 struct address_space_operations {
        int (*writepage)(struct page *page, struct writeback_control *wbc);
-       int (*readpage)(struct file *, struct page *);
        int (*read_folio)(struct file *, struct folio *);
 
        /* Write back some dirty pages from this mapping. */
index 2c7815d20038fa26e6372802289200b18f8dd0f9..a9bc3c98f76a1bb1d0c1f174c5a4bb47d8fa1a94 100644 (file)
@@ -787,10 +787,10 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
        struct page *page;
        /*
         * Ensure that the page that has the original instruction is populated
-        * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
+        * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
         * see uprobe_register().
         */
-       if (mapping->a_ops->read_folio || mapping->a_ops->readpage)
+       if (mapping->a_ops->read_folio)
                page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
        else
                page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
@@ -1144,7 +1144,6 @@ static int __uprobe_register(struct inode *inode, loff_t offset,
 
        /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
        if (!inode->i_mapping->a_ops->read_folio &&
-           !inode->i_mapping->a_ops->readpage &&
            !shmem_mapping(inode->i_mapping))
                return -EIO;
        /* Racy, just to catch the obvious mistakes */
index 96e3d7ffd98e196b1e6252845522ea23bab0a639..079f8cca7959d7ef3b55cd41518fc7b72c33043c 100644 (file)
@@ -2414,15 +2414,12 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping,
 
        /*
         * A previous I/O error may have been due to temporary failures,
-        * eg. multipath errors.  PG_error will be set again if readpage
+        * eg. multipath errors.  PG_error will be set again if read_folio
         * fails.
         */
        folio_clear_error(folio);
        /* Start the actual read. The read will unlock the page. */
-       if (mapping->a_ops->read_folio)
-               error = mapping->a_ops->read_folio(file, folio);
-       else
-               error = mapping->a_ops->readpage(file, &folio->page);
+       error = mapping->a_ops->read_folio(file, folio);
        if (error)
                return error;
 
@@ -2639,7 +2636,7 @@ err:
  * @already_read: Number of bytes already read by the caller.
  *
  * Copies data from the page cache.  If the data is not currently present,
- * uses the readahead and readpage address_space operations to fetch it.
+ * uses the readahead and read_folio address_space operations to fetch it.
  *
  * Return: Total number of bytes copied, including those already read by
  * the caller.  If an error happens before any bytes are copied, returns
@@ -3450,7 +3447,7 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct address_space *mapping = file->f_mapping;
 
-       if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage)
+       if (!mapping->a_ops->read_folio)
                return -ENOEXEC;
        file_accessed(file);
        vma->vm_ops = &generic_file_vm_ops;
@@ -3508,10 +3505,8 @@ repeat:
 filler:
                if (filler)
                        err = filler(data, &folio->page);
-               else if (mapping->a_ops->read_folio)
-                       err = mapping->a_ops->read_folio(data, folio);
                else
-                       err = mapping->a_ops->readpage(data, &folio->page);
+                       err = mapping->a_ops->read_folio(data, folio);
 
                if (err < 0) {
                        folio_put(folio);
index 76e3af9639d93fa34993693725aac4350febf07d..2a12028a374997b2b812fac3847abb0810493b44 100644 (file)
@@ -555,11 +555,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
                dump_page(page, "bad pte");
        pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
                 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
-       pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
+       pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
                 vma->vm_file,
                 vma->vm_ops ? vma->vm_ops->fault : NULL,
                 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
-                mapping ? mapping->a_ops->readpage : NULL);
+                mapping ? mapping->a_ops->read_folio : NULL);
        dump_stack();
        add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 }
index 76024c20a5a5b4401683157a8e916b864fd1b2cd..39983a3a93f04ef2cfd34b4d2273c21a72e47e70 100644 (file)
@@ -170,12 +170,9 @@ static void read_pages(struct readahead_control *rac)
                        }
                        folio_unlock(folio);
                }
-       } else if (aops->read_folio) {
-               while ((folio = readahead_folio(rac)) != NULL)
-                       aops->read_folio(rac->file, folio);
        } else {
                while ((folio = readahead_folio(rac)) != NULL)
-                       aops->readpage(rac->file, &folio->page);
+                       aops->read_folio(rac->file, folio);
        }
 
        blk_finish_plug(&plug);
@@ -256,8 +253,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
        }
 
        /*
-        * Now start the IO.  We ignore I/O errors - if the page is not
-        * uptodate then the caller will launch readpage again, and
+        * Now start the IO.  We ignore I/O errors - if the folio is not
+        * uptodate then the caller will launch read_folio again, and
         * will then handle the error.
         */
        read_pages(ractl);
@@ -305,8 +302,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
        struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
        unsigned long max_pages, index;
 
-       if (unlikely(!mapping->a_ops->read_folio &&
-                    !mapping->a_ops->readpage && !mapping->a_ops->readahead))
+       if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
                return;
 
        /*
index 0f557a512171f7d843f69916a547b11a94303bab..f3e8de8ff75c0f9ef8dfe5dd24c7fcd17f59c3e2 100644 (file)
@@ -4162,7 +4162,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
  *
  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
  * with any new page allocations done using the specified allocation flags.
- * But read_cache_page_gfp() uses the ->readpage() method: which does not
+ * But read_cache_page_gfp() uses the ->read_folio() method: which does not
  * suit tmpfs, since it may have pages in swapcache, and needs to find those
  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
  *
index 7c19098b8b45fdde8247b14da19b417e801fd702..ecd45bdbad9bd761afa5a78487dfa9a66babdd97 100644 (file)
@@ -3041,7 +3041,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
        /*
         * Read the swap header.
         */
-       if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage) {
+       if (!mapping->a_ops->read_folio) {
                error = -EINVAL;
                goto bad_swap_unlock_inode;
        }