]>
Commit | Line | Data |
---|---|---|
1f327613 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
147b31cf | 2 | /* |
147b31cf EVH |
3 | * This file contians vfs address (mmap) ops for 9P2000. |
4 | * | |
5 | * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com> | |
6 | * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> | |
147b31cf EVH |
7 | */ |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/errno.h> | |
11 | #include <linux/fs.h> | |
12 | #include <linux/file.h> | |
13 | #include <linux/stat.h> | |
14 | #include <linux/string.h> | |
147b31cf | 15 | #include <linux/inet.h> |
147b31cf EVH |
16 | #include <linux/pagemap.h> |
17 | #include <linux/idr.h> | |
e8edc6e0 | 18 | #include <linux/sched.h> |
d7bdba1c | 19 | #include <linux/swap.h> |
e2e40f2c | 20 | #include <linux/uio.h> |
eb497943 | 21 | #include <linux/netfs.h> |
bd238fb4 LI |
22 | #include <net/9p/9p.h> |
23 | #include <net/9p/client.h> | |
147b31cf | 24 | |
147b31cf | 25 | #include "v9fs.h" |
147b31cf | 26 | #include "v9fs_vfs.h" |
60e78d2c | 27 | #include "cache.h" |
7263cebe | 28 | #include "fid.h" |
147b31cf EVH |
29 | |
30 | /** | |
eb497943 DH |
31 | * v9fs_req_issue_op - Issue a read from 9P |
32 | * @subreq: The read to make | |
147b31cf | 33 | */ |
eb497943 | 34 | static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq) |
147b31cf | 35 | { |
eb497943 DH |
36 | struct netfs_read_request *rreq = subreq->rreq; |
37 | struct p9_fid *fid = rreq->netfs_priv; | |
e1200fe6 | 38 | struct iov_iter to; |
eb497943 DH |
39 | loff_t pos = subreq->start + subreq->transferred; |
40 | size_t len = subreq->len - subreq->transferred; | |
41 | int total, err; | |
e03abc0c | 42 | |
eb497943 | 43 | iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len); |
60e78d2c | 44 | |
eb497943 | 45 | total = p9_client_read(fid, pos, &to, &err); |
19d1c326 DM |
46 | |
47 | /* if we just extended the file size, any portion not in | |
48 | * cache won't be on server and is zeroes */ | |
49 | __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); | |
50 | ||
eb497943 DH |
51 | netfs_subreq_terminated(subreq, err ?: total, false); |
52 | } | |
60e78d2c | 53 | |
eb497943 DH |
54 | /** |
55 | * v9fs_init_rreq - Initialise a read request | |
56 | * @rreq: The read request | |
57 | * @file: The file being read from | |
58 | */ | |
59 | static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file) | |
60 | { | |
61 | struct p9_fid *fid = file->private_data; | |
60e78d2c | 62 | |
eb497943 DH |
63 | refcount_inc(&fid->count); |
64 | rreq->netfs_priv = fid; | |
65 | } | |
147b31cf | 66 | |
eb497943 DH |
67 | /** |
68 | * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq | |
69 | * @mapping: unused mapping of request to cleanup | |
70 | * @priv: private data to cleanup, a fid, guaranted non-null. | |
71 | */ | |
72 | static void v9fs_req_cleanup(struct address_space *mapping, void *priv) | |
73 | { | |
74 | struct p9_fid *fid = priv; | |
147b31cf | 75 | |
eb497943 DH |
76 | p9_client_clunk(fid); |
77 | } | |
60e78d2c | 78 | |
eb497943 DH |
79 | /** |
80 | * v9fs_is_cache_enabled - Determine if caching is enabled for an inode | |
81 | * @inode: The inode to check | |
82 | */ | |
83 | static bool v9fs_is_cache_enabled(struct inode *inode) | |
84 | { | |
24e42e32 DH |
85 | struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode)); |
86 | ||
87 | return fscache_cookie_enabled(cookie) && cookie->cache_priv; | |
eb497943 DH |
88 | } |
89 | ||
90 | /** | |
91 | * v9fs_begin_cache_operation - Begin a cache operation for a read | |
92 | * @rreq: The read request | |
93 | */ | |
94 | static int v9fs_begin_cache_operation(struct netfs_read_request *rreq) | |
95 | { | |
2cee6fbb | 96 | #ifdef CONFIG_9P_FSCACHE |
eb497943 DH |
97 | struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode)); |
98 | ||
24e42e32 | 99 | return fscache_begin_read_operation(&rreq->cache_resources, cookie); |
2cee6fbb DH |
100 | #else |
101 | return -ENOBUFS; | |
102 | #endif | |
147b31cf EVH |
103 | } |
104 | ||
eb497943 DH |
105 | static const struct netfs_read_request_ops v9fs_req_ops = { |
106 | .init_rreq = v9fs_init_rreq, | |
107 | .is_cache_enabled = v9fs_is_cache_enabled, | |
108 | .begin_cache_operation = v9fs_begin_cache_operation, | |
109 | .issue_op = v9fs_req_issue_op, | |
110 | .cleanup = v9fs_req_cleanup, | |
111 | }; | |
112 | ||
7263cebe AK |
113 | /** |
114 | * v9fs_vfs_readpage - read an entire page in from 9P | |
eb497943 | 115 | * @file: file being read |
7263cebe AK |
116 | * @page: structure to page |
117 | * | |
118 | */ | |
eb497943 | 119 | static int v9fs_vfs_readpage(struct file *file, struct page *page) |
7263cebe | 120 | { |
78525c74 DH |
121 | struct folio *folio = page_folio(page); |
122 | ||
123 | return netfs_readpage(file, folio, &v9fs_req_ops, NULL); | |
7263cebe AK |
124 | } |
125 | ||
60e78d2c | 126 | /** |
eb497943 DH |
127 | * v9fs_vfs_readahead - read a set of pages from 9P |
128 | * @ractl: The readahead parameters | |
60e78d2c | 129 | */ |
eb497943 | 130 | static void v9fs_vfs_readahead(struct readahead_control *ractl) |
60e78d2c | 131 | { |
eb497943 | 132 | netfs_readahead(ractl, &v9fs_req_ops, NULL); |
60e78d2c AK |
133 | } |
134 | ||
135 | /** | |
136 | * v9fs_release_page - release the private state associated with a page | |
bc868036 DH |
137 | * @page: The page to be released |
138 | * @gfp: The caller's allocation restrictions | |
60e78d2c AK |
139 | * |
140 | * Returns 1 if the page can be released, false otherwise. | |
141 | */ | |
142 | ||
143 | static int v9fs_release_page(struct page *page, gfp_t gfp) | |
144 | { | |
78525c74 | 145 | struct folio *folio = page_folio(page); |
93c84614 | 146 | struct inode *inode = folio_inode(folio); |
78525c74 DH |
147 | |
148 | if (folio_test_private(folio)) | |
60e78d2c | 149 | return 0; |
eb497943 | 150 | #ifdef CONFIG_9P_FSCACHE |
78525c74 | 151 | if (folio_test_fscache(folio)) { |
d7bdba1c | 152 | if (current_is_kswapd() || !(gfp & __GFP_FS)) |
eb497943 | 153 | return 0; |
78525c74 | 154 | folio_wait_fscache(folio); |
eb497943 DH |
155 | } |
156 | #endif | |
93c84614 | 157 | fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode))); |
eb497943 | 158 | return 1; |
60e78d2c AK |
159 | } |
160 | ||
161 | /** | |
162 | * v9fs_invalidate_page - Invalidate a page completely or partially | |
bc868036 DH |
163 | * @page: The page to be invalidated |
164 | * @offset: offset of the invalidated region | |
165 | * @length: length of the invalidated region | |
60e78d2c AK |
166 | */ |
167 | ||
d47992f8 LC |
168 | static void v9fs_invalidate_page(struct page *page, unsigned int offset, |
169 | unsigned int length) | |
60e78d2c | 170 | { |
78525c74 DH |
171 | struct folio *folio = page_folio(page); |
172 | ||
173 | folio_wait_fscache(folio); | |
60e78d2c AK |
174 | } |
175 | ||
93c84614 DH |
176 | static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error, |
177 | bool was_async) | |
178 | { | |
179 | struct v9fs_inode *v9inode = priv; | |
180 | __le32 version; | |
181 | ||
182 | if (IS_ERR_VALUE(transferred_or_error) && | |
183 | transferred_or_error != -ENOBUFS) { | |
184 | version = cpu_to_le32(v9inode->qid.version); | |
185 | fscache_invalidate(v9fs_inode_cookie(v9inode), &version, | |
186 | i_size_read(&v9inode->vfs_inode), 0); | |
187 | } | |
188 | } | |
189 | ||
78525c74 | 190 | static int v9fs_vfs_write_folio_locked(struct folio *folio) |
7263cebe | 191 | { |
78525c74 | 192 | struct inode *inode = folio_inode(folio); |
371098c6 | 193 | struct v9fs_inode *v9inode = V9FS_I(inode); |
93c84614 | 194 | struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode); |
78525c74 DH |
195 | loff_t start = folio_pos(folio); |
196 | loff_t i_size = i_size_read(inode); | |
371098c6 | 197 | struct iov_iter from; |
78525c74 DH |
198 | size_t len = folio_size(folio); |
199 | int err; | |
200 | ||
201 | if (start >= i_size) | |
202 | return 0; /* Simultaneous truncation occurred */ | |
7263cebe | 203 | |
78525c74 | 204 | len = min_t(loff_t, i_size - start, len); |
7263cebe | 205 | |
78525c74 | 206 | iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len); |
7263cebe | 207 | |
6b39f6d2 AK |
208 | /* We should have writeback_fid always set */ |
209 | BUG_ON(!v9inode->writeback_fid); | |
7263cebe | 210 | |
93c84614 | 211 | folio_wait_fscache(folio); |
78525c74 | 212 | folio_start_writeback(folio); |
371098c6 | 213 | |
eb497943 | 214 | p9_client_write(v9inode->writeback_fid, start, &from, &err); |
7263cebe | 215 | |
93c84614 DH |
216 | if (err == 0 && |
217 | fscache_cookie_enabled(cookie) && | |
218 | test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) { | |
219 | folio_start_fscache(folio); | |
220 | fscache_write_to_cache(v9fs_inode_cookie(v9inode), | |
221 | folio_mapping(folio), start, len, i_size, | |
222 | v9fs_write_to_cache_done, v9inode, | |
223 | true); | |
224 | } | |
225 | ||
78525c74 | 226 | folio_end_writeback(folio); |
371098c6 | 227 | return err; |
7263cebe AK |
228 | } |
229 | ||
230 | static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) | |
231 | { | |
78525c74 | 232 | struct folio *folio = page_folio(page); |
7263cebe AK |
233 | int retval; |
234 | ||
78525c74 | 235 | p9_debug(P9_DEBUG_VFS, "folio %p\n", folio); |
fb89b45c | 236 | |
78525c74 | 237 | retval = v9fs_vfs_write_folio_locked(folio); |
7263cebe AK |
238 | if (retval < 0) { |
239 | if (retval == -EAGAIN) { | |
78525c74 | 240 | folio_redirty_for_writepage(wbc, folio); |
7263cebe AK |
241 | retval = 0; |
242 | } else { | |
78525c74 | 243 | mapping_set_error(folio_mapping(folio), retval); |
7263cebe AK |
244 | } |
245 | } else | |
246 | retval = 0; | |
247 | ||
78525c74 | 248 | folio_unlock(folio); |
7263cebe AK |
249 | return retval; |
250 | } | |
251 | ||
60e78d2c AK |
252 | /** |
253 | * v9fs_launder_page - Writeback a dirty page | |
bc868036 DH |
254 | * @page: The page to be cleaned up |
255 | * | |
60e78d2c AK |
256 | * Returns 0 on success. |
257 | */ | |
258 | ||
259 | static int v9fs_launder_page(struct page *page) | |
260 | { | |
78525c74 | 261 | struct folio *folio = page_folio(page); |
7263cebe | 262 | int retval; |
7263cebe | 263 | |
78525c74 DH |
264 | if (folio_clear_dirty_for_io(folio)) { |
265 | retval = v9fs_vfs_write_folio_locked(folio); | |
7263cebe AK |
266 | if (retval) |
267 | return retval; | |
268 | } | |
78525c74 | 269 | folio_wait_fscache(folio); |
60e78d2c AK |
270 | return 0; |
271 | } | |
272 | ||
3e24ad2f | 273 | /** |
274 | * v9fs_direct_IO - 9P address space operation for direct I/O | |
3e24ad2f | 275 | * @iocb: target I/O control block |
bc868036 | 276 | * @iter: The data/buffer to use |
3e24ad2f | 277 | * |
278 | * The presence of v9fs_direct_IO() in the address space ops vector | |
279 | * allowes open() O_DIRECT flags which would have failed otherwise. | |
280 | * | |
281 | * In the non-cached mode, we shunt off direct read and write requests before | |
282 | * the VFS gets them, so this method should never be called. | |
283 | * | |
284 | * Direct IO is not 'yet' supported in the cached mode. Hence when | |
285 | * this routine is called through generic_file_aio_read(), the read/write fails | |
286 | * with an error. | |
287 | * | |
288 | */ | |
e959b549 | 289 | static ssize_t |
c8b8e32d | 290 | v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
3e24ad2f | 291 | { |
9565a544 | 292 | struct file *file = iocb->ki_filp; |
c8b8e32d | 293 | loff_t pos = iocb->ki_pos; |
42b1ab97 AV |
294 | ssize_t n; |
295 | int err = 0; | |
6e195b0f | 296 | |
6f673763 | 297 | if (iov_iter_rw(iter) == WRITE) { |
42b1ab97 AV |
298 | n = p9_client_write(file->private_data, pos, iter, &err); |
299 | if (n) { | |
9565a544 AV |
300 | struct inode *inode = file_inode(file); |
301 | loff_t i_size = i_size_read(inode); | |
6e195b0f | 302 | |
42b1ab97 AV |
303 | if (pos + n > i_size) |
304 | inode_add_bytes(inode, pos + n - i_size); | |
9565a544 | 305 | } |
42b1ab97 AV |
306 | } else { |
307 | n = p9_client_read(file->private_data, pos, iter, &err); | |
9565a544 | 308 | } |
42b1ab97 | 309 | return n ? n : err; |
3e24ad2f | 310 | } |
7263cebe AK |
311 | |
312 | static int v9fs_write_begin(struct file *filp, struct address_space *mapping, | |
6e195b0f | 313 | loff_t pos, unsigned int len, unsigned int flags, |
78525c74 | 314 | struct page **subpagep, void **fsdata) |
7263cebe | 315 | { |
eb497943 | 316 | int retval; |
78525c74 | 317 | struct folio *folio; |
eb497943 | 318 | struct v9fs_inode *v9inode = V9FS_I(mapping->host); |
fb89b45c DM |
319 | |
320 | p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); | |
321 | ||
6b39f6d2 | 322 | BUG_ON(!v9inode->writeback_fid); |
7263cebe | 323 | |
eb497943 DH |
324 | /* Prefetch area to be written into the cache if we're caching this |
325 | * file. We need to do this before we get a lock on the page in case | |
326 | * there's more than one writer competing for the same cache block. | |
327 | */ | |
78525c74 | 328 | retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata, |
eb497943 DH |
329 | &v9fs_req_ops, NULL); |
330 | if (retval < 0) | |
331 | return retval; | |
7263cebe | 332 | |
78525c74 | 333 | *subpagep = &folio->page; |
7263cebe AK |
334 | return retval; |
335 | } | |
336 | ||
337 | static int v9fs_write_end(struct file *filp, struct address_space *mapping, | |
6e195b0f | 338 | loff_t pos, unsigned int len, unsigned int copied, |
78525c74 | 339 | struct page *subpage, void *fsdata) |
7263cebe AK |
340 | { |
341 | loff_t last_pos = pos + copied; | |
78525c74 DH |
342 | struct folio *folio = page_folio(subpage); |
343 | struct inode *inode = mapping->host; | |
93c84614 | 344 | struct v9fs_inode *v9inode = V9FS_I(inode); |
7263cebe | 345 | |
fb89b45c DM |
346 | p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); |
347 | ||
78525c74 | 348 | if (!folio_test_uptodate(folio)) { |
56ae414e AL |
349 | if (unlikely(copied < len)) { |
350 | copied = 0; | |
351 | goto out; | |
56ae414e | 352 | } |
eb497943 | 353 | |
78525c74 | 354 | folio_mark_uptodate(folio); |
7263cebe | 355 | } |
eb497943 | 356 | |
7263cebe AK |
357 | /* |
358 | * No need to use i_size_read() here, the i_size | |
359 | * cannot change under us because we hold the i_mutex. | |
360 | */ | |
361 | if (last_pos > inode->i_size) { | |
362 | inode_add_bytes(inode, last_pos - inode->i_size); | |
363 | i_size_write(inode, last_pos); | |
93c84614 | 364 | fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos); |
7263cebe | 365 | } |
78525c74 | 366 | folio_mark_dirty(folio); |
77469c3f | 367 | out: |
78525c74 DH |
368 | folio_unlock(folio); |
369 | folio_put(folio); | |
7263cebe AK |
370 | |
371 | return copied; | |
372 | } | |
373 | ||
93c84614 DH |
374 | #ifdef CONFIG_9P_FSCACHE |
375 | /* | |
376 | * Mark a page as having been made dirty and thus needing writeback. We also | |
377 | * need to pin the cache object to write back to. | |
378 | */ | |
379 | static int v9fs_set_page_dirty(struct page *page) | |
380 | { | |
381 | struct v9fs_inode *v9inode = V9FS_I(page->mapping->host); | |
382 | ||
383 | return fscache_set_page_dirty(page, v9fs_inode_cookie(v9inode)); | |
384 | } | |
385 | #else | |
386 | #define v9fs_set_page_dirty __set_page_dirty_nobuffers | |
387 | #endif | |
7263cebe | 388 | |
f5e54d6e | 389 | const struct address_space_operations v9fs_addr_operations = { |
7263cebe | 390 | .readpage = v9fs_vfs_readpage, |
eb497943 | 391 | .readahead = v9fs_vfs_readahead, |
93c84614 | 392 | .set_page_dirty = v9fs_set_page_dirty, |
7263cebe AK |
393 | .writepage = v9fs_vfs_writepage, |
394 | .write_begin = v9fs_write_begin, | |
395 | .write_end = v9fs_write_end, | |
396 | .releasepage = v9fs_release_page, | |
397 | .invalidatepage = v9fs_invalidate_page, | |
398 | .launder_page = v9fs_launder_page, | |
399 | .direct_IO = v9fs_direct_IO, | |
147b31cf | 400 | }; |