]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - drivers/staging/erofs/data.c
block: allow bio_for_each_segment_all() to iterate over multi-page bvec
[thirdparty/kernel/linux.git] / drivers / staging / erofs / data.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/drivers/staging/erofs/data.c
4 *
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
12 */
13 #include "internal.h"
14 #include <linux/prefetch.h>
15
16 #include <trace/events/erofs.h>
17
18 static inline void read_endio(struct bio *bio)
19 {
20 int i;
21 struct bio_vec *bvec;
22 const blk_status_t err = bio->bi_status;
23 struct bvec_iter_all iter_all;
24
25 bio_for_each_segment_all(bvec, bio, i, iter_all) {
26 struct page *page = bvec->bv_page;
27
28 /* page is already locked */
29 DBG_BUGON(PageUptodate(page));
30
31 if (unlikely(err))
32 SetPageError(page);
33 else
34 SetPageUptodate(page);
35
36 unlock_page(page);
37 /* page could be reclaimed now */
38 }
39 bio_put(bio);
40 }
41
42 /* prio -- true is used for dir */
43 struct page *__erofs_get_meta_page(struct super_block *sb,
44 erofs_blk_t blkaddr, bool prio, bool nofail)
45 {
46 struct inode *const bd_inode = sb->s_bdev->bd_inode;
47 struct address_space *const mapping = bd_inode->i_mapping;
48 /* prefer retrying in the allocator to blindly looping below */
49 const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
50 (nofail ? __GFP_NOFAIL : 0);
51 unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
52 struct page *page;
53 int err;
54
55 repeat:
56 page = find_or_create_page(mapping, blkaddr, gfp);
57 if (unlikely(!page)) {
58 DBG_BUGON(nofail);
59 return ERR_PTR(-ENOMEM);
60 }
61 DBG_BUGON(!PageLocked(page));
62
63 if (!PageUptodate(page)) {
64 struct bio *bio;
65
66 bio = erofs_grab_bio(sb, blkaddr, 1, read_endio, nofail);
67 if (IS_ERR(bio)) {
68 DBG_BUGON(nofail);
69 err = PTR_ERR(bio);
70 goto err_out;
71 }
72
73 err = bio_add_page(bio, page, PAGE_SIZE, 0);
74 if (unlikely(err != PAGE_SIZE)) {
75 err = -EFAULT;
76 goto err_out;
77 }
78
79 __submit_bio(bio, REQ_OP_READ,
80 REQ_META | (prio ? REQ_PRIO : 0));
81
82 lock_page(page);
83
84 /* this page has been truncated by others */
85 if (unlikely(page->mapping != mapping)) {
86 unlock_repeat:
87 unlock_page(page);
88 put_page(page);
89 goto repeat;
90 }
91
92 /* more likely a read error */
93 if (unlikely(!PageUptodate(page))) {
94 if (io_retries) {
95 --io_retries;
96 goto unlock_repeat;
97 }
98 err = -EIO;
99 goto err_out;
100 }
101 }
102 return page;
103
104 err_out:
105 unlock_page(page);
106 put_page(page);
107 return ERR_PTR(err);
108 }
109
110 static int erofs_map_blocks_flatmode(struct inode *inode,
111 struct erofs_map_blocks *map,
112 int flags)
113 {
114 int err = 0;
115 erofs_blk_t nblocks, lastblk;
116 u64 offset = map->m_la;
117 struct erofs_vnode *vi = EROFS_V(inode);
118
119 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
120
121 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
122 lastblk = nblocks - is_inode_layout_inline(inode);
123
124 if (unlikely(offset >= inode->i_size)) {
125 /* leave out-of-bound access unmapped */
126 map->m_flags = 0;
127 map->m_plen = 0;
128 goto out;
129 }
130
131 /* there is no hole in flatmode */
132 map->m_flags = EROFS_MAP_MAPPED;
133
134 if (offset < blknr_to_addr(lastblk)) {
135 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
136 map->m_plen = blknr_to_addr(lastblk) - offset;
137 } else if (is_inode_layout_inline(inode)) {
138 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
139 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
140
141 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
142 vi->xattr_isize + erofs_blkoff(map->m_la);
143 map->m_plen = inode->i_size - offset;
144
145 /* inline data should locate in one meta block */
146 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
147 DBG_BUGON(1);
148 err = -EIO;
149 goto err_out;
150 }
151
152 map->m_flags |= EROFS_MAP_META;
153 } else {
154 errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
155 vi->nid, inode->i_size, map->m_la);
156 DBG_BUGON(1);
157 err = -EIO;
158 goto err_out;
159 }
160
161 out:
162 map->m_llen = map->m_plen;
163
164 err_out:
165 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
166 return err;
167 }
168
169 #ifdef CONFIG_EROFS_FS_ZIP
170 extern int z_erofs_map_blocks_iter(struct inode *,
171 struct erofs_map_blocks *,
172 struct page **, int);
173 #endif
174
175 int erofs_map_blocks_iter(struct inode *inode,
176 struct erofs_map_blocks *map,
177 struct page **mpage_ret, int flags)
178 {
179 /* by default, reading raw data never use erofs_map_blocks_iter */
180 if (unlikely(!is_inode_layout_compression(inode))) {
181 if (*mpage_ret)
182 put_page(*mpage_ret);
183 *mpage_ret = NULL;
184
185 return erofs_map_blocks(inode, map, flags);
186 }
187
188 #ifdef CONFIG_EROFS_FS_ZIP
189 return z_erofs_map_blocks_iter(inode, map, mpage_ret, flags);
190 #else
191 /* data compression is not available */
192 return -ENOTSUPP;
193 #endif
194 }
195
196 int erofs_map_blocks(struct inode *inode,
197 struct erofs_map_blocks *map, int flags)
198 {
199 if (unlikely(is_inode_layout_compression(inode))) {
200 struct page *mpage = NULL;
201 int err;
202
203 err = erofs_map_blocks_iter(inode, map, &mpage, flags);
204 if (mpage)
205 put_page(mpage);
206 return err;
207 }
208 return erofs_map_blocks_flatmode(inode, map, flags);
209 }
210
211 static inline struct bio *erofs_read_raw_page(struct bio *bio,
212 struct address_space *mapping,
213 struct page *page,
214 erofs_off_t *last_block,
215 unsigned int nblocks,
216 bool ra)
217 {
218 struct inode *inode = mapping->host;
219 erofs_off_t current_block = (erofs_off_t)page->index;
220 int err;
221
222 DBG_BUGON(!nblocks);
223
224 if (PageUptodate(page)) {
225 err = 0;
226 goto has_updated;
227 }
228
229 if (cleancache_get_page(page) == 0) {
230 err = 0;
231 SetPageUptodate(page);
232 goto has_updated;
233 }
234
235 /* note that for readpage case, bio also equals to NULL */
236 if (bio &&
237 /* not continuous */
238 *last_block + 1 != current_block) {
239 submit_bio_retry:
240 __submit_bio(bio, REQ_OP_READ, 0);
241 bio = NULL;
242 }
243
244 if (!bio) {
245 struct erofs_map_blocks map = {
246 .m_la = blknr_to_addr(current_block),
247 };
248 erofs_blk_t blknr;
249 unsigned int blkoff;
250
251 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
252 if (unlikely(err))
253 goto err_out;
254
255 /* zero out the holed page */
256 if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) {
257 zero_user_segment(page, 0, PAGE_SIZE);
258 SetPageUptodate(page);
259
260 /* imply err = 0, see erofs_map_blocks */
261 goto has_updated;
262 }
263
264 /* for RAW access mode, m_plen must be equal to m_llen */
265 DBG_BUGON(map.m_plen != map.m_llen);
266
267 blknr = erofs_blknr(map.m_pa);
268 blkoff = erofs_blkoff(map.m_pa);
269
270 /* deal with inline page */
271 if (map.m_flags & EROFS_MAP_META) {
272 void *vsrc, *vto;
273 struct page *ipage;
274
275 DBG_BUGON(map.m_plen > PAGE_SIZE);
276
277 ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
278
279 if (IS_ERR(ipage)) {
280 err = PTR_ERR(ipage);
281 goto err_out;
282 }
283
284 vsrc = kmap_atomic(ipage);
285 vto = kmap_atomic(page);
286 memcpy(vto, vsrc + blkoff, map.m_plen);
287 memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
288 kunmap_atomic(vto);
289 kunmap_atomic(vsrc);
290 flush_dcache_page(page);
291
292 SetPageUptodate(page);
293 /* TODO: could we unlock the page earlier? */
294 unlock_page(ipage);
295 put_page(ipage);
296
297 /* imply err = 0, see erofs_map_blocks */
298 goto has_updated;
299 }
300
301 /* pa must be block-aligned for raw reading */
302 DBG_BUGON(erofs_blkoff(map.m_pa));
303
304 /* max # of continuous pages */
305 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
306 nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
307 if (nblocks > BIO_MAX_PAGES)
308 nblocks = BIO_MAX_PAGES;
309
310 bio = erofs_grab_bio(inode->i_sb,
311 blknr, nblocks, read_endio, false);
312
313 if (IS_ERR(bio)) {
314 err = PTR_ERR(bio);
315 bio = NULL;
316 goto err_out;
317 }
318 }
319
320 err = bio_add_page(bio, page, PAGE_SIZE, 0);
321 /* out of the extent or bio is full */
322 if (err < PAGE_SIZE)
323 goto submit_bio_retry;
324
325 *last_block = current_block;
326
327 /* shift in advance in case of it followed by too many gaps */
328 if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) {
329 /* err should reassign to 0 after submitting */
330 err = 0;
331 goto submit_bio_out;
332 }
333
334 return bio;
335
336 err_out:
337 /* for sync reading, set page error immediately */
338 if (!ra) {
339 SetPageError(page);
340 ClearPageUptodate(page);
341 }
342 has_updated:
343 unlock_page(page);
344
345 /* if updated manually, continuous pages has a gap */
346 if (bio)
347 submit_bio_out:
348 __submit_bio(bio, REQ_OP_READ, 0);
349
350 return unlikely(err) ? ERR_PTR(err) : NULL;
351 }
352
353 /*
354 * since we dont have write or truncate flows, so no inode
355 * locking needs to be held at the moment.
356 */
357 static int erofs_raw_access_readpage(struct file *file, struct page *page)
358 {
359 erofs_off_t last_block;
360 struct bio *bio;
361
362 trace_erofs_readpage(page, true);
363
364 bio = erofs_read_raw_page(NULL, page->mapping,
365 page, &last_block, 1, false);
366
367 if (IS_ERR(bio))
368 return PTR_ERR(bio);
369
370 DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
371 return 0;
372 }
373
374 static int erofs_raw_access_readpages(struct file *filp,
375 struct address_space *mapping,
376 struct list_head *pages,
377 unsigned int nr_pages)
378 {
379 erofs_off_t last_block;
380 struct bio *bio = NULL;
381 gfp_t gfp = readahead_gfp_mask(mapping);
382 struct page *page = list_last_entry(pages, struct page, lru);
383
384 trace_erofs_readpages(mapping->host, page, nr_pages, true);
385
386 for (; nr_pages; --nr_pages) {
387 page = list_entry(pages->prev, struct page, lru);
388
389 prefetchw(&page->flags);
390 list_del(&page->lru);
391
392 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
393 bio = erofs_read_raw_page(bio, mapping, page,
394 &last_block, nr_pages, true);
395
396 /* all the page errors are ignored when readahead */
397 if (IS_ERR(bio)) {
398 pr_err("%s, readahead error at page %lu of nid %llu\n",
399 __func__, page->index,
400 EROFS_V(mapping->host)->nid);
401
402 bio = NULL;
403 }
404 }
405
406 /* pages could still be locked */
407 put_page(page);
408 }
409 DBG_BUGON(!list_empty(pages));
410
411 /* the rare case (end in gaps) */
412 if (unlikely(bio))
413 __submit_bio(bio, REQ_OP_READ, 0);
414 return 0;
415 }
416
417 /* for uncompressed (aligned) files and raw access for other files */
418 const struct address_space_operations erofs_raw_access_aops = {
419 .readpage = erofs_raw_access_readpage,
420 .readpages = erofs_raw_access_readpages,
421 };
422