]> git.ipfire.org Git - people/ms/linux.git/blobdiff - drivers/staging/lustre/lustre/llite/rw.c
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
[people/ms/linux.git] / drivers / staging / lustre / lustre / llite / rw.c
index 34614acf3f8ea77a81c484032e3a9d6f8026d2b1..4c7250ab54e6166610e3c34238a99cd3049c2475 100644 (file)
@@ -146,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
                 */
                io->ci_lockreq = CILR_NEVER;
 
-               pos = vmpage->index << PAGE_CACHE_SHIFT;
+               pos = vmpage->index << PAGE_SHIFT;
 
                /* Create a temp IO to serve write. */
-               result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
+               result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
                if (result == 0) {
                        cio->cui_fd = LUSTRE_FPRIVATE(file);
                        cio->cui_iter = NULL;
@@ -498,7 +498,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
                }
                if (rc != 1)
                        unlock_page(vmpage);
-               page_cache_release(vmpage);
+               put_page(vmpage);
        } else {
                which = RA_STAT_FAILED_GRAB_PAGE;
                msg   = "g_c_p_n failed";
@@ -527,7 +527,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
  * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
  * up quickly which will affect read performance significantly. See LU-2816
  */
-#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
 
 static inline int stride_io_mode(struct ll_readahead_state *ras)
 {
@@ -739,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
                        end = rpc_boundary;
 
                /* Truncate RA window to end of file */
-               end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
+               end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT));
 
                ras->ras_next_readahead = max(end, end + 1);
                RAS_CDEBUG(ras);
@@ -776,7 +776,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
        if (reserved != 0)
                ll_ra_count_put(ll_i2sbi(inode), reserved);
 
-       if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
+       if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
                ll_ra_stats_inc(mapping, RA_STAT_EOF);
 
        /* if we didn't get to the end of the region we reserved from
@@ -985,8 +985,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
        if (ras->ras_requests == 2 && !ras->ras_request_index) {
                __u64 kms_pages;
 
-               kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-                           PAGE_CACHE_SHIFT;
+               kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
+                           PAGE_SHIFT;
 
                CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
                       ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
@@ -1173,7 +1173,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
                 * PageWriteback or clean the page.
                 */
                result = cl_sync_file_range(inode, offset,
-                                           offset + PAGE_CACHE_SIZE - 1,
+                                           offset + PAGE_SIZE - 1,
                                            CL_FSYNC_LOCAL, 1);
                if (result > 0) {
                        /* actually we may have written more than one page.
@@ -1211,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
        int ignore_layout = 0;
 
        if (wbc->range_cyclic) {
-               start = mapping->writeback_index << PAGE_CACHE_SHIFT;
+               start = mapping->writeback_index << PAGE_SHIFT;
                end = OBD_OBJECT_EOF;
        } else {
                start = wbc->range_start;
@@ -1241,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
                if (end == OBD_OBJECT_EOF)
                        end = i_size_read(inode);
-               mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
+               mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
        }
        return result;
 }