1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Network filesystem high-level buffered read support.
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/export.h>
9 #include <linux/task_io_accounting_ops.h>
13 * Unlock the folios in a read operation. We need to set PG_fscache on any
14 * folios we're going to write back before we unlock them.
16 void netfs_rreq_unlock_folios(struct netfs_io_request
*rreq
)
18 struct netfs_io_subrequest
*subreq
;
20 pgoff_t start_page
= rreq
->start
/ PAGE_SIZE
;
21 pgoff_t last_page
= ((rreq
->start
+ rreq
->len
) / PAGE_SIZE
) - 1;
23 bool subreq_failed
= false;
25 XA_STATE(xas
, &rreq
->mapping
->i_pages
, start_page
);
27 if (test_bit(NETFS_RREQ_FAILED
, &rreq
->flags
)) {
28 __clear_bit(NETFS_RREQ_COPY_TO_CACHE
, &rreq
->flags
);
29 list_for_each_entry(subreq
, &rreq
->subrequests
, rreq_link
) {
30 __clear_bit(NETFS_SREQ_COPY_TO_CACHE
, &subreq
->flags
);
34 /* Walk through the pagecache and the I/O request lists simultaneously.
35 * We may have a mixture of cached and uncached sections and we only
36 * really want to write out the uncached sections. This is slightly
37 * complicated by the possibility that we might have huge pages with a
40 subreq
= list_first_entry(&rreq
->subrequests
,
41 struct netfs_io_subrequest
, rreq_link
);
42 subreq_failed
= (subreq
->error
< 0);
44 trace_netfs_rreq(rreq
, netfs_rreq_trace_unlock
);
47 xas_for_each(&xas
, folio
, last_page
) {
49 bool pg_failed
= false;
51 if (xas_retry(&xas
, folio
))
54 pg_end
= folio_pos(folio
) + folio_size(folio
) - 1;
63 if (test_bit(NETFS_SREQ_COPY_TO_CACHE
, &subreq
->flags
))
64 folio_start_fscache(folio
);
65 pg_failed
|= subreq_failed
;
66 sreq_end
= subreq
->start
+ subreq
->len
- 1;
67 if (pg_end
< sreq_end
)
70 account
+= subreq
->transferred
;
71 if (!list_is_last(&subreq
->rreq_link
, &rreq
->subrequests
)) {
72 subreq
= list_next_entry(subreq
, rreq_link
);
73 subreq_failed
= (subreq
->error
< 0);
76 subreq_failed
= false;
79 if (pg_end
== sreq_end
)
84 flush_dcache_folio(folio
);
85 folio_mark_uptodate(folio
);
88 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS
, &rreq
->flags
)) {
89 if (folio_index(folio
) == rreq
->no_unlock_folio
&&
90 test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO
, &rreq
->flags
))
98 task_io_account_read(account
);
99 if (rreq
->netfs_ops
->done
)
100 rreq
->netfs_ops
->done(rreq
);
103 static void netfs_cache_expand_readahead(struct netfs_io_request
*rreq
,
104 loff_t
*_start
, size_t *_len
, loff_t i_size
)
106 struct netfs_cache_resources
*cres
= &rreq
->cache_resources
;
108 if (cres
->ops
&& cres
->ops
->expand_readahead
)
109 cres
->ops
->expand_readahead(cres
, _start
, _len
, i_size
);
112 static void netfs_rreq_expand(struct netfs_io_request
*rreq
,
113 struct readahead_control
*ractl
)
115 /* Give the cache a chance to change the request parameters. The
116 * resultant request must contain the original region.
118 netfs_cache_expand_readahead(rreq
, &rreq
->start
, &rreq
->len
, rreq
->i_size
);
120 /* Give the netfs a chance to change the request parameters. The
121 * resultant request must contain the original region.
123 if (rreq
->netfs_ops
->expand_readahead
)
124 rreq
->netfs_ops
->expand_readahead(rreq
);
126 /* Expand the request if the cache wants it to start earlier. Note
127 * that the expansion may get further extended if the VM wishes to
128 * insert THPs and the preferred start and/or end wind up in the middle
131 * If this is the case, however, the THP size should be an integer
132 * multiple of the cache granule size, so we get a whole number of
133 * granules to deal with.
135 if (rreq
->start
!= readahead_pos(ractl
) ||
136 rreq
->len
!= readahead_length(ractl
)) {
137 readahead_expand(ractl
, rreq
->start
, rreq
->len
);
138 rreq
->start
= readahead_pos(ractl
);
139 rreq
->len
= readahead_length(ractl
);
141 trace_netfs_read(rreq
, readahead_pos(ractl
), readahead_length(ractl
),
142 netfs_read_trace_expanded
);
147 * netfs_readahead - Helper to manage a read request
148 * @ractl: The description of the readahead request
150 * Fulfil a readahead request by drawing data from the cache if possible, or
151 * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
152 * requests from different sources will get munged together. If necessary, the
153 * readahead window can be expanded in either direction to a more convenient
154 * alighment for RPC efficiency or to make storage in the cache feasible.
156 * The calling netfs must initialise a netfs context contiguous to the vfs
157 * inode before calling this.
159 * This is usable whether or not caching is enabled.
161 void netfs_readahead(struct readahead_control
*ractl
)
163 struct netfs_io_request
*rreq
;
164 struct netfs_inode
*ctx
= netfs_inode(ractl
->mapping
->host
);
167 _enter("%lx,%x", readahead_index(ractl
), readahead_count(ractl
));
169 if (readahead_count(ractl
) == 0)
172 rreq
= netfs_alloc_request(ractl
->mapping
, ractl
->file
,
173 readahead_pos(ractl
),
174 readahead_length(ractl
),
179 if (ctx
->ops
->begin_cache_operation
) {
180 ret
= ctx
->ops
->begin_cache_operation(rreq
);
181 if (ret
== -ENOMEM
|| ret
== -EINTR
|| ret
== -ERESTARTSYS
)
185 netfs_stat(&netfs_n_rh_readahead
);
186 trace_netfs_read(rreq
, readahead_pos(ractl
), readahead_length(ractl
),
187 netfs_read_trace_readahead
);
189 netfs_rreq_expand(rreq
, ractl
);
191 /* Drop the refs on the folios here rather than in the cache or
192 * filesystem. The locks will be dropped in netfs_rreq_unlock().
194 while (readahead_folio(ractl
))
197 netfs_begin_read(rreq
, false);
201 netfs_put_request(rreq
, false, netfs_rreq_trace_put_failed
);
204 EXPORT_SYMBOL(netfs_readahead
);
207 * netfs_read_folio - Helper to manage a read_folio request
208 * @file: The file to read from
209 * @folio: The folio to read
211 * Fulfil a read_folio request by drawing data from the cache if
212 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
213 * Multiple I/O requests from different sources will get munged together.
215 * The calling netfs must initialise a netfs context contiguous to the vfs
216 * inode before calling this.
218 * This is usable whether or not caching is enabled.
220 int netfs_read_folio(struct file
*file
, struct folio
*folio
)
222 struct address_space
*mapping
= folio_file_mapping(folio
);
223 struct netfs_io_request
*rreq
;
224 struct netfs_inode
*ctx
= netfs_inode(mapping
->host
);
227 _enter("%lx", folio_index(folio
));
229 rreq
= netfs_alloc_request(mapping
, file
,
230 folio_file_pos(folio
), folio_size(folio
),
237 if (ctx
->ops
->begin_cache_operation
) {
238 ret
= ctx
->ops
->begin_cache_operation(rreq
);
239 if (ret
== -ENOMEM
|| ret
== -EINTR
|| ret
== -ERESTARTSYS
)
243 netfs_stat(&netfs_n_rh_readpage
);
244 trace_netfs_read(rreq
, rreq
->start
, rreq
->len
, netfs_read_trace_readpage
);
245 return netfs_begin_read(rreq
, true);
248 netfs_put_request(rreq
, false, netfs_rreq_trace_put_discard
);
253 EXPORT_SYMBOL(netfs_read_folio
);
256 * Prepare a folio for writing without reading first
257 * @folio: The folio being prepared
258 * @pos: starting position for the write
259 * @len: length of write
260 * @always_fill: T if the folio should always be completely filled/cleared
262 * In some cases, write_begin doesn't need to read at all:
264 * - write that lies in a folio that is completely beyond EOF
265 * - write that covers the folio from start to EOF or beyond it
267 * If any of these criteria are met, then zero out the unwritten parts
268 * of the folio and return true. Otherwise, return false.
270 static bool netfs_skip_folio_read(struct folio
*folio
, loff_t pos
, size_t len
,
273 struct inode
*inode
= folio_inode(folio
);
274 loff_t i_size
= i_size_read(inode
);
275 size_t offset
= offset_in_folio(folio
, pos
);
276 size_t plen
= folio_size(folio
);
278 if (unlikely(always_fill
)) {
279 if (pos
- offset
+ len
<= i_size
)
280 return false; /* Page entirely before EOF */
281 zero_user_segment(&folio
->page
, 0, plen
);
282 folio_mark_uptodate(folio
);
286 /* Full folio write */
287 if (offset
== 0 && len
>= plen
)
290 /* Page entirely beyond the end of the file */
291 if (pos
- offset
>= i_size
)
294 /* Write that covers from the start of the folio to EOF or beyond */
295 if (offset
== 0 && (pos
+ len
) >= i_size
)
300 zero_user_segments(&folio
->page
, 0, offset
, offset
+ len
, plen
);
305 * netfs_write_begin - Helper to prepare for writing
306 * @ctx: The netfs context
307 * @file: The file to read from
308 * @mapping: The mapping to read from
309 * @pos: File position at which the write will begin
310 * @len: The length of the write (may extend beyond the end of the folio chosen)
311 * @_folio: Where to put the resultant folio
312 * @_fsdata: Place for the netfs to store a cookie
314 * Pre-read data for a write-begin request by drawing data from the cache if
315 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
316 * Multiple I/O requests from different sources will get munged together. If
317 * necessary, the readahead window can be expanded in either direction to a
318 * more convenient alighment for RPC efficiency or to make storage in the cache
321 * The calling netfs must provide a table of operations, only one of which,
322 * issue_op, is mandatory.
324 * The check_write_begin() operation can be provided to check for and flush
325 * conflicting writes once the folio is grabbed and locked. It is passed a
326 * pointer to the fsdata cookie that gets returned to the VM to be passed to
327 * write_end. It is permitted to sleep. It should return 0 if the request
328 * should go ahead or it may return an error. It may also unlock and put the
329 * folio, provided it sets ``*foliop`` to NULL, in which case a return of 0
330 * will cause the folio to be re-got and the process to be retried.
332 * The calling netfs must initialise a netfs context contiguous to the vfs
333 * inode before calling this.
335 * This is usable whether or not caching is enabled.
337 int netfs_write_begin(struct netfs_inode
*ctx
,
338 struct file
*file
, struct address_space
*mapping
,
339 loff_t pos
, unsigned int len
, struct folio
**_folio
,
342 struct netfs_io_request
*rreq
;
344 pgoff_t index
= pos
>> PAGE_SHIFT
;
347 DEFINE_READAHEAD(ractl
, file
, NULL
, mapping
, index
);
350 folio
= __filemap_get_folio(mapping
, index
, FGP_WRITEBEGIN
,
351 mapping_gfp_mask(mapping
));
353 return PTR_ERR(folio
);
355 if (ctx
->ops
->check_write_begin
) {
356 /* Allow the netfs (eg. ceph) to flush conflicts. */
357 ret
= ctx
->ops
->check_write_begin(file
, pos
, len
, &folio
, _fsdata
);
359 trace_netfs_failure(NULL
, NULL
, ret
, netfs_fail_check_write_begin
);
366 if (folio_test_uptodate(folio
))
369 /* If the page is beyond the EOF, we want to clear it - unless it's
370 * within the cache granule containing the EOF, in which case we need
371 * to preload the granule.
373 if (!netfs_is_cache_enabled(ctx
) &&
374 netfs_skip_folio_read(folio
, pos
, len
, false)) {
375 netfs_stat(&netfs_n_rh_write_zskip
);
376 goto have_folio_no_wait
;
379 rreq
= netfs_alloc_request(mapping
, file
,
380 folio_file_pos(folio
), folio_size(folio
),
381 NETFS_READ_FOR_WRITE
);
386 rreq
->no_unlock_folio
= folio_index(folio
);
387 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO
, &rreq
->flags
);
389 if (ctx
->ops
->begin_cache_operation
) {
390 ret
= ctx
->ops
->begin_cache_operation(rreq
);
391 if (ret
== -ENOMEM
|| ret
== -EINTR
|| ret
== -ERESTARTSYS
)
395 netfs_stat(&netfs_n_rh_write_begin
);
396 trace_netfs_read(rreq
, pos
, len
, netfs_read_trace_write_begin
);
398 /* Expand the request to meet caching requirements and download
401 ractl
._nr_pages
= folio_nr_pages(folio
);
402 netfs_rreq_expand(rreq
, &ractl
);
404 /* We hold the folio locks, so we can drop the references */
406 while (readahead_folio(&ractl
))
409 ret
= netfs_begin_read(rreq
, true);
414 ret
= folio_wait_fscache_killable(folio
);
423 netfs_put_request(rreq
, false, netfs_rreq_trace_put_failed
);
429 _leave(" = %d", ret
);
432 EXPORT_SYMBOL(netfs_write_begin
);