1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/data.c
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
14 #include <linux/prefetch.h>
16 #include <trace/events/erofs.h>
18 static inline void read_endio(struct bio
*bio
)
22 const blk_status_t err
= bio
->bi_status
;
23 struct bvec_iter_all iter_all
;
25 bio_for_each_segment_all(bvec
, bio
, i
, iter_all
) {
26 struct page
*page
= bvec
->bv_page
;
28 /* page is already locked */
29 DBG_BUGON(PageUptodate(page
));
34 SetPageUptodate(page
);
37 /* page could be reclaimed now */
42 /* prio -- true is used for dir */
43 struct page
*__erofs_get_meta_page(struct super_block
*sb
,
44 erofs_blk_t blkaddr
, bool prio
, bool nofail
)
46 struct inode
*const bd_inode
= sb
->s_bdev
->bd_inode
;
47 struct address_space
*const mapping
= bd_inode
->i_mapping
;
48 /* prefer retrying in the allocator to blindly looping below */
49 const gfp_t gfp
= mapping_gfp_constraint(mapping
, ~__GFP_FS
) |
50 (nofail
? __GFP_NOFAIL
: 0);
51 unsigned int io_retries
= nofail
? EROFS_IO_MAX_RETRIES_NOFAIL
: 0;
56 page
= find_or_create_page(mapping
, blkaddr
, gfp
);
57 if (unlikely(!page
)) {
59 return ERR_PTR(-ENOMEM
);
61 DBG_BUGON(!PageLocked(page
));
63 if (!PageUptodate(page
)) {
66 bio
= erofs_grab_bio(sb
, blkaddr
, 1, read_endio
, nofail
);
73 err
= bio_add_page(bio
, page
, PAGE_SIZE
, 0);
74 if (unlikely(err
!= PAGE_SIZE
)) {
79 __submit_bio(bio
, REQ_OP_READ
,
80 REQ_META
| (prio
? REQ_PRIO
: 0));
84 /* this page has been truncated by others */
85 if (unlikely(page
->mapping
!= mapping
)) {
92 /* more likely a read error */
93 if (unlikely(!PageUptodate(page
))) {
110 static int erofs_map_blocks_flatmode(struct inode
*inode
,
111 struct erofs_map_blocks
*map
,
115 erofs_blk_t nblocks
, lastblk
;
116 u64 offset
= map
->m_la
;
117 struct erofs_vnode
*vi
= EROFS_V(inode
);
119 trace_erofs_map_blocks_flatmode_enter(inode
, map
, flags
);
121 nblocks
= DIV_ROUND_UP(inode
->i_size
, PAGE_SIZE
);
122 lastblk
= nblocks
- is_inode_layout_inline(inode
);
124 if (unlikely(offset
>= inode
->i_size
)) {
125 /* leave out-of-bound access unmapped */
131 /* there is no hole in flatmode */
132 map
->m_flags
= EROFS_MAP_MAPPED
;
134 if (offset
< blknr_to_addr(lastblk
)) {
135 map
->m_pa
= blknr_to_addr(vi
->raw_blkaddr
) + map
->m_la
;
136 map
->m_plen
= blknr_to_addr(lastblk
) - offset
;
137 } else if (is_inode_layout_inline(inode
)) {
138 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
139 struct erofs_sb_info
*sbi
= EROFS_SB(inode
->i_sb
);
141 map
->m_pa
= iloc(sbi
, vi
->nid
) + vi
->inode_isize
+
142 vi
->xattr_isize
+ erofs_blkoff(map
->m_la
);
143 map
->m_plen
= inode
->i_size
- offset
;
145 /* inline data should locate in one meta block */
146 if (erofs_blkoff(map
->m_pa
) + map
->m_plen
> PAGE_SIZE
) {
152 map
->m_flags
|= EROFS_MAP_META
;
154 errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
155 vi
->nid
, inode
->i_size
, map
->m_la
);
162 map
->m_llen
= map
->m_plen
;
165 trace_erofs_map_blocks_flatmode_exit(inode
, map
, flags
, 0);
169 #ifdef CONFIG_EROFS_FS_ZIP
170 extern int z_erofs_map_blocks_iter(struct inode
*,
171 struct erofs_map_blocks
*,
172 struct page
**, int);
175 int erofs_map_blocks_iter(struct inode
*inode
,
176 struct erofs_map_blocks
*map
,
177 struct page
**mpage_ret
, int flags
)
179 /* by default, reading raw data never use erofs_map_blocks_iter */
180 if (unlikely(!is_inode_layout_compression(inode
))) {
182 put_page(*mpage_ret
);
185 return erofs_map_blocks(inode
, map
, flags
);
188 #ifdef CONFIG_EROFS_FS_ZIP
189 return z_erofs_map_blocks_iter(inode
, map
, mpage_ret
, flags
);
191 /* data compression is not available */
196 int erofs_map_blocks(struct inode
*inode
,
197 struct erofs_map_blocks
*map
, int flags
)
199 if (unlikely(is_inode_layout_compression(inode
))) {
200 struct page
*mpage
= NULL
;
203 err
= erofs_map_blocks_iter(inode
, map
, &mpage
, flags
);
208 return erofs_map_blocks_flatmode(inode
, map
, flags
);
211 static inline struct bio
*erofs_read_raw_page(struct bio
*bio
,
212 struct address_space
*mapping
,
214 erofs_off_t
*last_block
,
215 unsigned int nblocks
,
218 struct inode
*inode
= mapping
->host
;
219 erofs_off_t current_block
= (erofs_off_t
)page
->index
;
224 if (PageUptodate(page
)) {
229 if (cleancache_get_page(page
) == 0) {
231 SetPageUptodate(page
);
235 /* note that for readpage case, bio also equals to NULL */
238 *last_block
+ 1 != current_block
) {
240 __submit_bio(bio
, REQ_OP_READ
, 0);
245 struct erofs_map_blocks map
= {
246 .m_la
= blknr_to_addr(current_block
),
251 err
= erofs_map_blocks(inode
, &map
, EROFS_GET_BLOCKS_RAW
);
255 /* zero out the holed page */
256 if (unlikely(!(map
.m_flags
& EROFS_MAP_MAPPED
))) {
257 zero_user_segment(page
, 0, PAGE_SIZE
);
258 SetPageUptodate(page
);
260 /* imply err = 0, see erofs_map_blocks */
264 /* for RAW access mode, m_plen must be equal to m_llen */
265 DBG_BUGON(map
.m_plen
!= map
.m_llen
);
267 blknr
= erofs_blknr(map
.m_pa
);
268 blkoff
= erofs_blkoff(map
.m_pa
);
270 /* deal with inline page */
271 if (map
.m_flags
& EROFS_MAP_META
) {
275 DBG_BUGON(map
.m_plen
> PAGE_SIZE
);
277 ipage
= erofs_get_meta_page(inode
->i_sb
, blknr
, 0);
280 err
= PTR_ERR(ipage
);
284 vsrc
= kmap_atomic(ipage
);
285 vto
= kmap_atomic(page
);
286 memcpy(vto
, vsrc
+ blkoff
, map
.m_plen
);
287 memset(vto
+ map
.m_plen
, 0, PAGE_SIZE
- map
.m_plen
);
290 flush_dcache_page(page
);
292 SetPageUptodate(page
);
293 /* TODO: could we unlock the page earlier? */
297 /* imply err = 0, see erofs_map_blocks */
301 /* pa must be block-aligned for raw reading */
302 DBG_BUGON(erofs_blkoff(map
.m_pa
));
304 /* max # of continuous pages */
305 if (nblocks
> DIV_ROUND_UP(map
.m_plen
, PAGE_SIZE
))
306 nblocks
= DIV_ROUND_UP(map
.m_plen
, PAGE_SIZE
);
307 if (nblocks
> BIO_MAX_PAGES
)
308 nblocks
= BIO_MAX_PAGES
;
310 bio
= erofs_grab_bio(inode
->i_sb
,
311 blknr
, nblocks
, read_endio
, false);
320 err
= bio_add_page(bio
, page
, PAGE_SIZE
, 0);
321 /* out of the extent or bio is full */
323 goto submit_bio_retry
;
325 *last_block
= current_block
;
327 /* shift in advance in case of it followed by too many gaps */
328 if (unlikely(bio
->bi_vcnt
>= bio
->bi_max_vecs
)) {
329 /* err should reassign to 0 after submitting */
337 /* for sync reading, set page error immediately */
340 ClearPageUptodate(page
);
345 /* if updated manually, continuous pages has a gap */
348 __submit_bio(bio
, REQ_OP_READ
, 0);
350 return unlikely(err
) ? ERR_PTR(err
) : NULL
;
354 * since we dont have write or truncate flows, so no inode
355 * locking needs to be held at the moment.
357 static int erofs_raw_access_readpage(struct file
*file
, struct page
*page
)
359 erofs_off_t last_block
;
362 trace_erofs_readpage(page
, true);
364 bio
= erofs_read_raw_page(NULL
, page
->mapping
,
365 page
, &last_block
, 1, false);
370 DBG_BUGON(bio
); /* since we have only one bio -- must be NULL */
374 static int erofs_raw_access_readpages(struct file
*filp
,
375 struct address_space
*mapping
,
376 struct list_head
*pages
,
377 unsigned int nr_pages
)
379 erofs_off_t last_block
;
380 struct bio
*bio
= NULL
;
381 gfp_t gfp
= readahead_gfp_mask(mapping
);
382 struct page
*page
= list_last_entry(pages
, struct page
, lru
);
384 trace_erofs_readpages(mapping
->host
, page
, nr_pages
, true);
386 for (; nr_pages
; --nr_pages
) {
387 page
= list_entry(pages
->prev
, struct page
, lru
);
389 prefetchw(&page
->flags
);
390 list_del(&page
->lru
);
392 if (!add_to_page_cache_lru(page
, mapping
, page
->index
, gfp
)) {
393 bio
= erofs_read_raw_page(bio
, mapping
, page
,
394 &last_block
, nr_pages
, true);
396 /* all the page errors are ignored when readahead */
398 pr_err("%s, readahead error at page %lu of nid %llu\n",
399 __func__
, page
->index
,
400 EROFS_V(mapping
->host
)->nid
);
406 /* pages could still be locked */
409 DBG_BUGON(!list_empty(pages
));
411 /* the rare case (end in gaps) */
413 __submit_bio(bio
, REQ_OP_READ
, 0);
417 /* for uncompressed (aligned) files and raw access for other files */
418 const struct address_space_operations erofs_raw_access_aops
= {
419 .readpage
= erofs_raw_access_readpage
,
420 .readpages
= erofs_raw_access_readpages
,