]> git.ipfire.org Git - thirdparty/u-boot.git/blob - fs/erofs/zmap.c
Merge tag 'v2023.10-rc3' into next
[thirdparty/u-boot.git] / fs / erofs / zmap.c
1 // SPDX-License-Identifier: GPL-2.0+
2 #include "internal.h"
3
4 static int z_erofs_do_map_blocks(struct erofs_inode *vi,
5 struct erofs_map_blocks *map,
6 int flags);
7
8 int z_erofs_fill_inode(struct erofs_inode *vi)
9 {
10 if (!erofs_sb_has_big_pcluster() &&
11 !erofs_sb_has_ztailpacking() && !erofs_sb_has_fragments() &&
12 vi->datalayout == EROFS_INODE_COMPRESSED_FULL) {
13 vi->z_advise = 0;
14 vi->z_algorithmtype[0] = 0;
15 vi->z_algorithmtype[1] = 0;
16 vi->z_logical_clusterbits = sbi.blkszbits;
17
18 vi->flags |= EROFS_I_Z_INITED;
19 }
20 return 0;
21 }
22
23 static int z_erofs_fill_inode_lazy(struct erofs_inode *vi)
24 {
25 int ret;
26 erofs_off_t pos;
27 struct z_erofs_map_header *h;
28 char buf[sizeof(struct z_erofs_map_header)];
29
30 if (vi->flags & EROFS_I_Z_INITED)
31 return 0;
32
33 pos = round_up(iloc(vi->nid) + vi->inode_isize + vi->xattr_isize, 8);
34 ret = erofs_dev_read(0, buf, pos, sizeof(buf));
35 if (ret < 0)
36 return -EIO;
37
38 h = (struct z_erofs_map_header *)buf;
39 /*
40 * if the highest bit of the 8-byte map header is set, the whole file
41 * is stored in the packed inode. The rest bits keeps z_fragmentoff.
42 */
43 if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
44 vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
45 vi->fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
46 vi->z_tailextent_headlcn = 0;
47 goto out;
48 }
49
50 vi->z_advise = le16_to_cpu(h->h_advise);
51 vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
52 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
53
54 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) {
55 erofs_err("unknown compression format %u for nid %llu",
56 vi->z_algorithmtype[0], (unsigned long long)vi->nid);
57 return -EOPNOTSUPP;
58 }
59
60 vi->z_logical_clusterbits = sbi.blkszbits + (h->h_clusterbits & 7);
61 if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
62 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
63 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
64 erofs_err("big pcluster head1/2 of compact indexes should be consistent for nid %llu",
65 vi->nid * 1ULL);
66 return -EFSCORRUPTED;
67 }
68
69 if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
70 struct erofs_map_blocks map = { .index = UINT_MAX };
71
72 vi->idata_size = le16_to_cpu(h->h_idata_size);
73 ret = z_erofs_do_map_blocks(vi, &map,
74 EROFS_GET_BLOCKS_FINDTAIL);
75 if (!map.m_plen ||
76 erofs_blkoff(map.m_pa) + map.m_plen > erofs_blksiz()) {
77 erofs_err("invalid tail-packing pclustersize %llu",
78 map.m_plen | 0ULL);
79 return -EFSCORRUPTED;
80 }
81 if (ret < 0)
82 return ret;
83 }
84 if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
85 !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
86 struct erofs_map_blocks map = { .index = UINT_MAX };
87
88 vi->fragmentoff = le32_to_cpu(h->h_fragmentoff);
89 ret = z_erofs_do_map_blocks(vi, &map,
90 EROFS_GET_BLOCKS_FINDTAIL);
91 if (ret < 0)
92 return ret;
93 }
94 out:
95 vi->flags |= EROFS_I_Z_INITED;
96 return 0;
97 }
98
99 struct z_erofs_maprecorder {
100 struct erofs_inode *inode;
101 struct erofs_map_blocks *map;
102 void *kaddr;
103
104 unsigned long lcn;
105 /* compression extent information gathered */
106 u8 type, headtype;
107 u16 clusterofs;
108 u16 delta[2];
109 erofs_blk_t pblk, compressedblks;
110 erofs_off_t nextpackoff;
111 bool partialref;
112 };
113
114 static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
115 erofs_blk_t eblk)
116 {
117 int ret;
118 struct erofs_map_blocks *const map = m->map;
119 char *mpage = map->mpage;
120
121 if (map->index == eblk)
122 return 0;
123
124 ret = erofs_blk_read(mpage, eblk, 1);
125 if (ret < 0)
126 return -EIO;
127
128 map->index = eblk;
129
130 return 0;
131 }
132
133 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
134 unsigned long lcn)
135 {
136 struct erofs_inode *const vi = m->inode;
137 const erofs_off_t ibase = iloc(vi->nid);
138 const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(ibase +
139 vi->inode_isize + vi->xattr_isize) +
140 lcn * sizeof(struct z_erofs_lcluster_index);
141 struct z_erofs_lcluster_index *di;
142 unsigned int advise, type;
143 int err;
144
145 err = z_erofs_reload_indexes(m, erofs_blknr(pos));
146 if (err)
147 return err;
148
149 m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
150 m->lcn = lcn;
151 di = m->kaddr + erofs_blkoff(pos);
152
153 advise = le16_to_cpu(di->di_advise);
154 type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) &
155 ((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1);
156 switch (type) {
157 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
158 m->clusterofs = 1 << vi->z_logical_clusterbits;
159 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
160 if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
161 if (!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
162 DBG_BUGON(1);
163 return -EFSCORRUPTED;
164 }
165 m->compressedblks = m->delta[0] &
166 ~Z_EROFS_LI_D0_CBLKCNT;
167 m->delta[0] = 1;
168 }
169 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
170 break;
171 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
172 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
173 if (advise & Z_EROFS_LI_PARTIAL_REF)
174 m->partialref = true;
175 m->clusterofs = le16_to_cpu(di->di_clusterofs);
176 m->pblk = le32_to_cpu(di->di_u.blkaddr);
177 break;
178 default:
179 DBG_BUGON(1);
180 return -EOPNOTSUPP;
181 }
182 m->type = type;
183 return 0;
184 }
185
186 static unsigned int decode_compactedbits(unsigned int lobits,
187 unsigned int lomask,
188 u8 *in, unsigned int pos, u8 *type)
189 {
190 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
191 const unsigned int lo = v & lomask;
192
193 *type = (v >> lobits) & 3;
194 return lo;
195 }
196
197 static int get_compacted_la_distance(unsigned int lclusterbits,
198 unsigned int encodebits,
199 unsigned int vcnt, u8 *in, int i)
200 {
201 const unsigned int lomask = (1 << lclusterbits) - 1;
202 unsigned int lo, d1 = 0;
203 u8 type;
204
205 DBG_BUGON(i >= vcnt);
206
207 do {
208 lo = decode_compactedbits(lclusterbits, lomask,
209 in, encodebits * i, &type);
210
211 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
212 return d1;
213 ++d1;
214 } while (++i < vcnt);
215
216 /* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
217 if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
218 d1 += lo - 1;
219 return d1;
220 }
221
222 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
223 unsigned int amortizedshift,
224 erofs_off_t pos, bool lookahead)
225 {
226 struct erofs_inode *const vi = m->inode;
227 const unsigned int lclusterbits = vi->z_logical_clusterbits;
228 const unsigned int lomask = (1 << lclusterbits) - 1;
229 unsigned int vcnt, base, lo, encodebits, nblk, eofs;
230 int i;
231 u8 *in, type;
232 bool big_pcluster;
233
234 if (1 << amortizedshift == 4)
235 vcnt = 2;
236 else if (1 << amortizedshift == 2 && lclusterbits == 12)
237 vcnt = 16;
238 else
239 return -EOPNOTSUPP;
240
241 /* it doesn't equal to round_up(..) */
242 m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
243 (vcnt << amortizedshift);
244 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
245 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
246 eofs = erofs_blkoff(pos);
247 base = round_down(eofs, vcnt << amortizedshift);
248 in = m->kaddr + base;
249
250 i = (eofs - base) >> amortizedshift;
251
252 lo = decode_compactedbits(lclusterbits, lomask,
253 in, encodebits * i, &type);
254 m->type = type;
255 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
256 m->clusterofs = 1 << lclusterbits;
257
258 /* figure out lookahead_distance: delta[1] if needed */
259 if (lookahead)
260 m->delta[1] = get_compacted_la_distance(lclusterbits,
261 encodebits, vcnt, in, i);
262 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
263 if (!big_pcluster) {
264 DBG_BUGON(1);
265 return -EFSCORRUPTED;
266 }
267 m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
268 m->delta[0] = 1;
269 return 0;
270 } else if (i + 1 != (int)vcnt) {
271 m->delta[0] = lo;
272 return 0;
273 }
274 /*
275 * since the last lcluster in the pack is special,
276 * of which lo saves delta[1] rather than delta[0].
277 * Hence, get delta[0] by the previous lcluster indirectly.
278 */
279 lo = decode_compactedbits(lclusterbits, lomask,
280 in, encodebits * (i - 1), &type);
281 if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
282 lo = 0;
283 else if (lo & Z_EROFS_LI_D0_CBLKCNT)
284 lo = 1;
285 m->delta[0] = lo + 1;
286 return 0;
287 }
288 m->clusterofs = lo;
289 m->delta[0] = 0;
290 /* figout out blkaddr (pblk) for HEAD lclusters */
291 if (!big_pcluster) {
292 nblk = 1;
293 while (i > 0) {
294 --i;
295 lo = decode_compactedbits(lclusterbits, lomask,
296 in, encodebits * i, &type);
297 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
298 i -= lo;
299
300 if (i >= 0)
301 ++nblk;
302 }
303 } else {
304 nblk = 0;
305 while (i > 0) {
306 --i;
307 lo = decode_compactedbits(lclusterbits, lomask,
308 in, encodebits * i, &type);
309 if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
310 if (lo & Z_EROFS_LI_D0_CBLKCNT) {
311 --i;
312 nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
313 continue;
314 }
315 if (lo <= 1) {
316 DBG_BUGON(1);
317 /* --i; ++nblk; continue; */
318 return -EFSCORRUPTED;
319 }
320 i -= lo - 2;
321 continue;
322 }
323 ++nblk;
324 }
325 }
326 in += (vcnt << amortizedshift) - sizeof(__le32);
327 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
328 return 0;
329 }
330
331 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
332 unsigned long lcn, bool lookahead)
333 {
334 struct erofs_inode *const vi = m->inode;
335 const unsigned int lclusterbits = vi->z_logical_clusterbits;
336 const erofs_off_t ebase = round_up(iloc(vi->nid) + vi->inode_isize +
337 vi->xattr_isize, 8) +
338 sizeof(struct z_erofs_map_header);
339 const unsigned int totalidx = BLK_ROUND_UP(vi->i_size);
340 unsigned int compacted_4b_initial, compacted_2b;
341 unsigned int amortizedshift;
342 erofs_off_t pos;
343 int err;
344
345 if (lclusterbits != 12)
346 return -EOPNOTSUPP;
347
348 if (lcn >= totalidx)
349 return -EINVAL;
350
351 m->lcn = lcn;
352 /* used to align to 32-byte (compacted_2b) alignment */
353 compacted_4b_initial = (32 - ebase % 32) / 4;
354 if (compacted_4b_initial == 32 / 4)
355 compacted_4b_initial = 0;
356
357 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
358 compacted_4b_initial < totalidx)
359 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
360 else
361 compacted_2b = 0;
362
363 pos = ebase;
364 if (lcn < compacted_4b_initial) {
365 amortizedshift = 2;
366 goto out;
367 }
368 pos += compacted_4b_initial * 4;
369 lcn -= compacted_4b_initial;
370
371 if (lcn < compacted_2b) {
372 amortizedshift = 1;
373 goto out;
374 }
375 pos += compacted_2b * 2;
376 lcn -= compacted_2b;
377 amortizedshift = 2;
378 out:
379 pos += lcn * (1 << amortizedshift);
380 err = z_erofs_reload_indexes(m, erofs_blknr(pos));
381 if (err)
382 return err;
383 return unpack_compacted_index(m, amortizedshift, pos, lookahead);
384 }
385
386 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
387 unsigned int lcn, bool lookahead)
388 {
389 const unsigned int datamode = m->inode->datalayout;
390
391 if (datamode == EROFS_INODE_COMPRESSED_FULL)
392 return legacy_load_cluster_from_disk(m, lcn);
393
394 if (datamode == EROFS_INODE_COMPRESSED_COMPACT)
395 return compacted_load_cluster_from_disk(m, lcn, lookahead);
396
397 return -EINVAL;
398 }
399
400 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
401 unsigned int lookback_distance)
402 {
403 struct erofs_inode *const vi = m->inode;
404 struct erofs_map_blocks *const map = m->map;
405 const unsigned int lclusterbits = vi->z_logical_clusterbits;
406 unsigned long lcn = m->lcn;
407 int err;
408
409 if (lcn < lookback_distance) {
410 erofs_err("bogus lookback distance @ nid %llu",
411 (unsigned long long)vi->nid);
412 DBG_BUGON(1);
413 return -EFSCORRUPTED;
414 }
415
416 /* load extent head logical cluster if needed */
417 lcn -= lookback_distance;
418 err = z_erofs_load_cluster_from_disk(m, lcn, false);
419 if (err)
420 return err;
421
422 switch (m->type) {
423 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
424 if (!m->delta[0]) {
425 erofs_err("invalid lookback distance 0 @ nid %llu",
426 (unsigned long long)vi->nid);
427 DBG_BUGON(1);
428 return -EFSCORRUPTED;
429 }
430 return z_erofs_extent_lookback(m, m->delta[0]);
431 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
432 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
433 m->headtype = m->type;
434 map->m_la = (lcn << lclusterbits) | m->clusterofs;
435 break;
436 default:
437 erofs_err("unknown type %u @ lcn %lu of nid %llu",
438 m->type, lcn, (unsigned long long)vi->nid);
439 DBG_BUGON(1);
440 return -EOPNOTSUPP;
441 }
442 return 0;
443 }
444
445 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
446 unsigned int initial_lcn)
447 {
448 struct erofs_inode *const vi = m->inode;
449 struct erofs_map_blocks *const map = m->map;
450 const unsigned int lclusterbits = vi->z_logical_clusterbits;
451 unsigned long lcn;
452 int err;
453
454 DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
455 m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1);
456
457 if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
458 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
459 map->m_plen = 1 << lclusterbits;
460 return 0;
461 }
462
463 lcn = m->lcn + 1;
464 if (m->compressedblks)
465 goto out;
466
467 err = z_erofs_load_cluster_from_disk(m, lcn, false);
468 if (err)
469 return err;
470
471 /*
472 * If the 1st NONHEAD lcluster has already been handled initially w/o
473 * valid compressedblks, which means at least it mustn't be CBLKCNT, or
474 * an internal implemenatation error is detected.
475 *
476 * The following code can also handle it properly anyway, but let's
477 * BUG_ON in the debugging mode only for developers to notice that.
478 */
479 DBG_BUGON(lcn == initial_lcn &&
480 m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
481
482 switch (m->type) {
483 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
484 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
485 /*
486 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
487 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
488 */
489 m->compressedblks = 1 << (lclusterbits - sbi.blkszbits);
490 break;
491 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
492 if (m->delta[0] != 1)
493 goto err_bonus_cblkcnt;
494 if (m->compressedblks)
495 break;
496 /* fallthrough */
497 default:
498 erofs_err("cannot found CBLKCNT @ lcn %lu of nid %llu",
499 lcn, vi->nid | 0ULL);
500 DBG_BUGON(1);
501 return -EFSCORRUPTED;
502 }
503 out:
504 map->m_plen = m->compressedblks << sbi.blkszbits;
505 return 0;
506 err_bonus_cblkcnt:
507 erofs_err("bogus CBLKCNT @ lcn %lu of nid %llu",
508 lcn, vi->nid | 0ULL);
509 DBG_BUGON(1);
510 return -EFSCORRUPTED;
511 }
512
513 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
514 {
515 struct erofs_inode *const vi = m->inode;
516 struct erofs_map_blocks *map = m->map;
517 unsigned int lclusterbits = vi->z_logical_clusterbits;
518 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits;
519 int err;
520
521 do {
522 /* handle the last EOF pcluster (no next HEAD lcluster) */
523 if ((lcn << lclusterbits) >= vi->i_size) {
524 map->m_llen = vi->i_size - map->m_la;
525 return 0;
526 }
527
528 err = z_erofs_load_cluster_from_disk(m, lcn, true);
529 if (err)
530 return err;
531
532 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
533 DBG_BUGON(!m->delta[1] &&
534 m->clusterofs != 1 << lclusterbits);
535 } else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
536 m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1) {
537 /* go on until the next HEAD lcluster */
538 if (lcn != headlcn)
539 break;
540 m->delta[1] = 1;
541 } else {
542 erofs_err("unknown type %u @ lcn %llu of nid %llu",
543 m->type, lcn | 0ULL,
544 (unsigned long long)vi->nid);
545 DBG_BUGON(1);
546 return -EOPNOTSUPP;
547 }
548 lcn += m->delta[1];
549 } while (m->delta[1]);
550
551 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la;
552 return 0;
553 }
554
555 static int z_erofs_do_map_blocks(struct erofs_inode *vi,
556 struct erofs_map_blocks *map,
557 int flags)
558 {
559 bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
560 bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
561 struct z_erofs_maprecorder m = {
562 .inode = vi,
563 .map = map,
564 .kaddr = map->mpage,
565 };
566 int err = 0;
567 unsigned int lclusterbits, endoff;
568 unsigned long initial_lcn;
569 unsigned long long ofs, end;
570
571 lclusterbits = vi->z_logical_clusterbits;
572 ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? vi->i_size - 1 : map->m_la;
573 initial_lcn = ofs >> lclusterbits;
574 endoff = ofs & ((1 << lclusterbits) - 1);
575
576 err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
577 if (err)
578 goto out;
579
580 if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
581 vi->z_idataoff = m.nextpackoff;
582
583 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
584 end = (m.lcn + 1ULL) << lclusterbits;
585 switch (m.type) {
586 case Z_EROFS_LCLUSTER_TYPE_PLAIN:
587 case Z_EROFS_LCLUSTER_TYPE_HEAD1:
588 if (endoff >= m.clusterofs) {
589 m.headtype = m.type;
590 map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
591 break;
592 }
593 /* m.lcn should be >= 1 if endoff < m.clusterofs */
594 if (!m.lcn) {
595 erofs_err("invalid logical cluster 0 at nid %llu",
596 (unsigned long long)vi->nid);
597 err = -EFSCORRUPTED;
598 goto out;
599 }
600 end = (m.lcn << lclusterbits) | m.clusterofs;
601 map->m_flags |= EROFS_MAP_FULL_MAPPED;
602 m.delta[0] = 1;
603 /* fallthrough */
604 case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
605 /* get the correspoinding first chunk */
606 err = z_erofs_extent_lookback(&m, m.delta[0]);
607 if (err)
608 goto out;
609 break;
610 default:
611 erofs_err("unknown type %u @ offset %llu of nid %llu",
612 m.type, ofs, (unsigned long long)vi->nid);
613 err = -EOPNOTSUPP;
614 goto out;
615 }
616 if (m.partialref)
617 map->m_flags |= EROFS_MAP_PARTIAL_REF;
618 map->m_llen = end - map->m_la;
619 if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
620 vi->z_tailextent_headlcn = m.lcn;
621 /* for non-compact indexes, fragmentoff is 64 bits */
622 if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
623 vi->fragmentoff |= (u64)m.pblk << 32;
624 }
625 if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
626 map->m_flags |= EROFS_MAP_META;
627 map->m_pa = vi->z_idataoff;
628 map->m_plen = vi->z_idata_size;
629 } else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
630 map->m_flags |= EROFS_MAP_FRAGMENT;
631 } else {
632 map->m_pa = erofs_pos(m.pblk);
633 err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
634 if (err)
635 goto out;
636 }
637
638 if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
639 if (map->m_llen > map->m_plen) {
640 DBG_BUGON(1);
641 err = -EFSCORRUPTED;
642 goto out;
643 }
644 if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
645 map->m_algorithmformat =
646 Z_EROFS_COMPRESSION_INTERLACED;
647 else
648 map->m_algorithmformat =
649 Z_EROFS_COMPRESSION_SHIFTED;
650 } else {
651 map->m_algorithmformat = vi->z_algorithmtype[0];
652 }
653
654 if (flags & EROFS_GET_BLOCKS_FIEMAP) {
655 err = z_erofs_get_extent_decompressedlen(&m);
656 if (!err)
657 map->m_flags |= EROFS_MAP_FULL_MAPPED;
658 }
659
660 out:
661 erofs_dbg("m_la %" PRIu64 " m_pa %" PRIu64 " m_llen %" PRIu64 " m_plen %" PRIu64 " m_flags 0%o",
662 map->m_la, map->m_pa,
663 map->m_llen, map->m_plen, map->m_flags);
664 return err;
665 }
666
667 int z_erofs_map_blocks_iter(struct erofs_inode *vi,
668 struct erofs_map_blocks *map,
669 int flags)
670 {
671 int err = 0;
672
673 /* when trying to read beyond EOF, leave it unmapped */
674 if (map->m_la >= vi->i_size) {
675 map->m_llen = map->m_la + 1 - vi->i_size;
676 map->m_la = vi->i_size;
677 map->m_flags = 0;
678 goto out;
679 }
680
681 err = z_erofs_fill_inode_lazy(vi);
682 if (err)
683 goto out;
684
685 if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
686 !vi->z_tailextent_headlcn) {
687 map->m_la = 0;
688 map->m_llen = vi->i_size;
689 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
690 EROFS_MAP_FRAGMENT;
691 goto out;
692 }
693
694 err = z_erofs_do_map_blocks(vi, map, flags);
695 out:
696 DBG_BUGON(err < 0 && err != -ENOMEM);
697 return err;
698 }