]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/rmap.c
xfs: convert refcount btree cursor to use perags
[thirdparty/xfsprogs-dev.git] / repair / rmap.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "libxfs.h"
7 #include "btree.h"
8 #include "err_protos.h"
9 #include "libxlog.h"
10 #include "incore.h"
11 #include "globals.h"
12 #include "dinode.h"
13 #include "slab.h"
14 #include "rmap.h"
15 #include "libfrog/bitmap.h"
16
17 #undef RMAP_DEBUG
18
19 #ifdef RMAP_DEBUG
20 # define dbg_printf(f, a...) do {printf(f, ## a); fflush(stdout); } while (0)
21 #else
22 # define dbg_printf(f, a...)
23 #endif
24
25 /* per-AG rmap object anchor */
26 struct xfs_ag_rmap {
27 struct xfs_slab *ar_rmaps; /* rmap observations, p4 */
28 struct xfs_slab *ar_raw_rmaps; /* unmerged rmaps */
29 int ar_flcount; /* agfl entries from leftover */
30 /* agbt allocations */
31 struct xfs_rmap_irec ar_last_rmap; /* last rmap seen */
32 struct xfs_slab *ar_refcount_items; /* refcount items, p4-5 */
33 };
34
35 static struct xfs_ag_rmap *ag_rmaps;
36 static bool rmapbt_suspect;
37 static bool refcbt_suspect;
38
39 static inline int rmap_compare(const void *a, const void *b)
40 {
41 return libxfs_rmap_compare(a, b);
42 }
43
44 /*
45 * Returns true if we must reconstruct either the reference count or reverse
46 * mapping trees.
47 */
48 bool
49 rmap_needs_work(
50 struct xfs_mount *mp)
51 {
52 return xfs_sb_version_hasreflink(&mp->m_sb) ||
53 xfs_sb_version_hasrmapbt(&mp->m_sb);
54 }
55
56 /*
57 * Initialize per-AG reverse map data.
58 */
59 void
60 rmaps_init(
61 struct xfs_mount *mp)
62 {
63 xfs_agnumber_t i;
64 int error;
65
66 if (!rmap_needs_work(mp))
67 return;
68
69 ag_rmaps = calloc(mp->m_sb.sb_agcount, sizeof(struct xfs_ag_rmap));
70 if (!ag_rmaps)
71 do_error(_("couldn't allocate per-AG reverse map roots\n"));
72
73 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
74 error = init_slab(&ag_rmaps[i].ar_rmaps,
75 sizeof(struct xfs_rmap_irec));
76 if (error)
77 do_error(
78 _("Insufficient memory while allocating reverse mapping slabs."));
79 error = init_slab(&ag_rmaps[i].ar_raw_rmaps,
80 sizeof(struct xfs_rmap_irec));
81 if (error)
82 do_error(
83 _("Insufficient memory while allocating raw metadata reverse mapping slabs."));
84 ag_rmaps[i].ar_last_rmap.rm_owner = XFS_RMAP_OWN_UNKNOWN;
85 error = init_slab(&ag_rmaps[i].ar_refcount_items,
86 sizeof(struct xfs_refcount_irec));
87 if (error)
88 do_error(
89 _("Insufficient memory while allocating refcount item slabs."));
90 }
91 }
92
93 /*
94 * Free the per-AG reverse-mapping data.
95 */
96 void
97 rmaps_free(
98 struct xfs_mount *mp)
99 {
100 xfs_agnumber_t i;
101
102 if (!rmap_needs_work(mp))
103 return;
104
105 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
106 free_slab(&ag_rmaps[i].ar_rmaps);
107 free_slab(&ag_rmaps[i].ar_raw_rmaps);
108 free_slab(&ag_rmaps[i].ar_refcount_items);
109 }
110 free(ag_rmaps);
111 ag_rmaps = NULL;
112 }
113
114 /*
115 * Decide if two reverse-mapping records can be merged.
116 */
117 bool
118 rmaps_are_mergeable(
119 struct xfs_rmap_irec *r1,
120 struct xfs_rmap_irec *r2)
121 {
122 if (r1->rm_owner != r2->rm_owner)
123 return false;
124 if (r1->rm_startblock + r1->rm_blockcount != r2->rm_startblock)
125 return false;
126 if ((unsigned long long)r1->rm_blockcount + r2->rm_blockcount >
127 XFS_RMAP_LEN_MAX)
128 return false;
129 if (XFS_RMAP_NON_INODE_OWNER(r2->rm_owner))
130 return true;
131 /* must be an inode owner below here */
132 if (r1->rm_flags != r2->rm_flags)
133 return false;
134 if (r1->rm_flags & XFS_RMAP_BMBT_BLOCK)
135 return true;
136 return r1->rm_offset + r1->rm_blockcount == r2->rm_offset;
137 }
138
139 /*
140 * Add an observation about a block mapping in an inode's data or attribute
141 * fork for later btree reconstruction.
142 */
143 int
144 rmap_add_rec(
145 struct xfs_mount *mp,
146 xfs_ino_t ino,
147 int whichfork,
148 struct xfs_bmbt_irec *irec)
149 {
150 struct xfs_rmap_irec rmap;
151 xfs_agnumber_t agno;
152 xfs_agblock_t agbno;
153 struct xfs_rmap_irec *last_rmap;
154 int error = 0;
155
156 if (!rmap_needs_work(mp))
157 return 0;
158
159 agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
160 agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
161 ASSERT(agno != NULLAGNUMBER);
162 ASSERT(agno < mp->m_sb.sb_agcount);
163 ASSERT(agbno + irec->br_blockcount <= mp->m_sb.sb_agblocks);
164 ASSERT(ino != NULLFSINO);
165 ASSERT(whichfork == XFS_DATA_FORK || whichfork == XFS_ATTR_FORK);
166
167 rmap.rm_owner = ino;
168 rmap.rm_offset = irec->br_startoff;
169 rmap.rm_flags = 0;
170 if (whichfork == XFS_ATTR_FORK)
171 rmap.rm_flags |= XFS_RMAP_ATTR_FORK;
172 rmap.rm_startblock = agbno;
173 rmap.rm_blockcount = irec->br_blockcount;
174 if (irec->br_state == XFS_EXT_UNWRITTEN)
175 rmap.rm_flags |= XFS_RMAP_UNWRITTEN;
176 last_rmap = &ag_rmaps[agno].ar_last_rmap;
177 if (last_rmap->rm_owner == XFS_RMAP_OWN_UNKNOWN)
178 *last_rmap = rmap;
179 else if (rmaps_are_mergeable(last_rmap, &rmap))
180 last_rmap->rm_blockcount += rmap.rm_blockcount;
181 else {
182 error = slab_add(ag_rmaps[agno].ar_rmaps, last_rmap);
183 if (error)
184 return error;
185 *last_rmap = rmap;
186 }
187
188 return error;
189 }
190
191 /* Finish collecting inode data/attr fork rmaps. */
192 int
193 rmap_finish_collecting_fork_recs(
194 struct xfs_mount *mp,
195 xfs_agnumber_t agno)
196 {
197 if (!rmap_needs_work(mp) ||
198 ag_rmaps[agno].ar_last_rmap.rm_owner == XFS_RMAP_OWN_UNKNOWN)
199 return 0;
200 return slab_add(ag_rmaps[agno].ar_rmaps, &ag_rmaps[agno].ar_last_rmap);
201 }
202
203 /* add a raw rmap; these will be merged later */
204 static int
205 __rmap_add_raw_rec(
206 struct xfs_mount *mp,
207 xfs_agnumber_t agno,
208 xfs_agblock_t agbno,
209 xfs_extlen_t len,
210 uint64_t owner,
211 bool is_attr,
212 bool is_bmbt)
213 {
214 struct xfs_rmap_irec rmap;
215
216 ASSERT(len != 0);
217 rmap.rm_owner = owner;
218 rmap.rm_offset = 0;
219 rmap.rm_flags = 0;
220 if (is_attr)
221 rmap.rm_flags |= XFS_RMAP_ATTR_FORK;
222 if (is_bmbt)
223 rmap.rm_flags |= XFS_RMAP_BMBT_BLOCK;
224 rmap.rm_startblock = agbno;
225 rmap.rm_blockcount = len;
226 return slab_add(ag_rmaps[agno].ar_raw_rmaps, &rmap);
227 }
228
229 /*
230 * Add a reverse mapping for an inode fork's block mapping btree block.
231 */
232 int
233 rmap_add_bmbt_rec(
234 struct xfs_mount *mp,
235 xfs_ino_t ino,
236 int whichfork,
237 xfs_fsblock_t fsbno)
238 {
239 xfs_agnumber_t agno;
240 xfs_agblock_t agbno;
241
242 if (!rmap_needs_work(mp))
243 return 0;
244
245 agno = XFS_FSB_TO_AGNO(mp, fsbno);
246 agbno = XFS_FSB_TO_AGBNO(mp, fsbno);
247 ASSERT(agno != NULLAGNUMBER);
248 ASSERT(agno < mp->m_sb.sb_agcount);
249 ASSERT(agbno + 1 <= mp->m_sb.sb_agblocks);
250
251 return __rmap_add_raw_rec(mp, agno, agbno, 1, ino,
252 whichfork == XFS_ATTR_FORK, true);
253 }
254
255 /*
256 * Add a reverse mapping for a per-AG fixed metadata extent.
257 */
258 int
259 rmap_add_ag_rec(
260 struct xfs_mount *mp,
261 xfs_agnumber_t agno,
262 xfs_agblock_t agbno,
263 xfs_extlen_t len,
264 uint64_t owner)
265 {
266 if (!rmap_needs_work(mp))
267 return 0;
268
269 ASSERT(agno != NULLAGNUMBER);
270 ASSERT(agno < mp->m_sb.sb_agcount);
271 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
272
273 return __rmap_add_raw_rec(mp, agno, agbno, len, owner, false, false);
274 }
275
276 /*
277 * Merge adjacent raw rmaps and add them to the main rmap list.
278 */
279 int
280 rmap_fold_raw_recs(
281 struct xfs_mount *mp,
282 xfs_agnumber_t agno)
283 {
284 struct xfs_slab_cursor *cur = NULL;
285 struct xfs_rmap_irec *prev, *rec;
286 size_t old_sz;
287 int error = 0;
288
289 old_sz = slab_count(ag_rmaps[agno].ar_rmaps);
290 if (slab_count(ag_rmaps[agno].ar_raw_rmaps) == 0)
291 goto no_raw;
292 qsort_slab(ag_rmaps[agno].ar_raw_rmaps, rmap_compare);
293 error = init_slab_cursor(ag_rmaps[agno].ar_raw_rmaps, rmap_compare,
294 &cur);
295 if (error)
296 goto err;
297
298 prev = pop_slab_cursor(cur);
299 rec = pop_slab_cursor(cur);
300 while (prev && rec) {
301 if (rmaps_are_mergeable(prev, rec)) {
302 prev->rm_blockcount += rec->rm_blockcount;
303 rec = pop_slab_cursor(cur);
304 continue;
305 }
306 error = slab_add(ag_rmaps[agno].ar_rmaps, prev);
307 if (error)
308 goto err;
309 prev = rec;
310 rec = pop_slab_cursor(cur);
311 }
312 if (prev) {
313 error = slab_add(ag_rmaps[agno].ar_rmaps, prev);
314 if (error)
315 goto err;
316 }
317 free_slab(&ag_rmaps[agno].ar_raw_rmaps);
318 error = init_slab(&ag_rmaps[agno].ar_raw_rmaps,
319 sizeof(struct xfs_rmap_irec));
320 if (error)
321 do_error(
322 _("Insufficient memory while allocating raw metadata reverse mapping slabs."));
323 no_raw:
324 if (old_sz)
325 qsort_slab(ag_rmaps[agno].ar_rmaps, rmap_compare);
326 err:
327 free_slab_cursor(&cur);
328 return error;
329 }
330
331 static int
332 find_first_zero_bit(
333 uint64_t mask)
334 {
335 int n;
336 int b = 0;
337
338 for (n = 0; n < sizeof(mask) * NBBY && (mask & 1); n++, mask >>= 1)
339 b++;
340
341 return b;
342 }
343
344 static int
345 popcnt(
346 uint64_t mask)
347 {
348 int n;
349 int b = 0;
350
351 if (mask == 0)
352 return 0;
353
354 for (n = 0; n < sizeof(mask) * NBBY; n++, mask >>= 1)
355 if (mask & 1)
356 b++;
357
358 return b;
359 }
360
361 /*
362 * Add an allocation group's fixed metadata to the rmap list. This includes
363 * sb/agi/agf/agfl headers, inode chunks, and the log.
364 */
365 int
366 rmap_add_fixed_ag_rec(
367 struct xfs_mount *mp,
368 xfs_agnumber_t agno)
369 {
370 xfs_fsblock_t fsbno;
371 xfs_agblock_t agbno;
372 ino_tree_node_t *ino_rec;
373 xfs_agino_t agino;
374 int error;
375 int startidx;
376 int nr;
377
378 if (!rmap_needs_work(mp))
379 return 0;
380
381 /* sb/agi/agf/agfl headers */
382 error = rmap_add_ag_rec(mp, agno, 0, XFS_BNO_BLOCK(mp),
383 XFS_RMAP_OWN_FS);
384 if (error)
385 goto out;
386
387 /* inodes */
388 ino_rec = findfirst_inode_rec(agno);
389 for (; ino_rec != NULL; ino_rec = next_ino_rec(ino_rec)) {
390 if (xfs_sb_version_hassparseinodes(&mp->m_sb)) {
391 startidx = find_first_zero_bit(ino_rec->ir_sparse);
392 nr = XFS_INODES_PER_CHUNK - popcnt(ino_rec->ir_sparse);
393 } else {
394 startidx = 0;
395 nr = XFS_INODES_PER_CHUNK;
396 }
397 nr /= mp->m_sb.sb_inopblock;
398 if (nr == 0)
399 nr = 1;
400 agino = ino_rec->ino_startnum + startidx;
401 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
402 if (XFS_AGINO_TO_OFFSET(mp, agino) == 0) {
403 error = rmap_add_ag_rec(mp, agno, agbno, nr,
404 XFS_RMAP_OWN_INODES);
405 if (error)
406 goto out;
407 }
408 }
409
410 /* log */
411 fsbno = mp->m_sb.sb_logstart;
412 if (fsbno && XFS_FSB_TO_AGNO(mp, fsbno) == agno) {
413 agbno = XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart);
414 error = rmap_add_ag_rec(mp, agno, agbno, mp->m_sb.sb_logblocks,
415 XFS_RMAP_OWN_LOG);
416 if (error)
417 goto out;
418 }
419 out:
420 return error;
421 }
422
423 /*
424 * Copy the per-AG btree reverse-mapping data into the rmapbt.
425 *
426 * At rmapbt reconstruction time, the rmapbt will be populated _only_ with
427 * rmaps for file extents, inode chunks, AG headers, and bmbt blocks. While
428 * building the AG btrees we can record all the blocks allocated for each
429 * btree, but we cannot resolve the conflict between the fact that one has to
430 * finish allocating the space for the rmapbt before building the bnobt and the
431 * fact that allocating blocks for the bnobt requires adding rmapbt entries.
432 * Therefore we record in-core the rmaps for each btree and here use the
433 * libxfs rmap functions to finish building the rmap btree.
434 *
435 * During AGF/AGFL reconstruction in phase 5, rmaps for the AG btrees are
436 * recorded in memory. The rmapbt has not been set up yet, so we need to be
437 * able to "expand" the AGFL without updating the rmapbt. After we've written
438 * out the new AGF header the new rmapbt is available, so this function reads
439 * each AGFL to generate rmap entries. These entries are merged with the AG
440 * btree rmap entries, and then we use libxfs' rmap functions to add them to
441 * the rmapbt, after which it is fully regenerated.
442 */
443 int
444 rmap_store_ag_btree_rec(
445 struct xfs_mount *mp,
446 xfs_agnumber_t agno)
447 {
448 struct xfs_slab_cursor *rm_cur;
449 struct xfs_rmap_irec *rm_rec = NULL;
450 struct xfs_buf *agbp = NULL;
451 struct xfs_buf *agflbp = NULL;
452 struct xfs_trans *tp;
453 __be32 *agfl_bno, *b;
454 struct xfs_ag_rmap *ag_rmap = &ag_rmaps[agno];
455 struct bitmap *own_ag_bitmap = NULL;
456 int error = 0;
457
458 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
459 return 0;
460
461 /* Release the ar_rmaps; they were put into the rmapbt during p5. */
462 free_slab(&ag_rmap->ar_rmaps);
463 error = init_slab(&ag_rmap->ar_rmaps, sizeof(struct xfs_rmap_irec));
464 if (error)
465 goto err;
466
467 /* Add the AGFL blocks to the rmap list */
468 error = -libxfs_trans_read_buf(
469 mp, NULL, mp->m_ddev_targp,
470 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
471 XFS_FSS_TO_BB(mp, 1), 0, &agflbp, &xfs_agfl_buf_ops);
472 if (error)
473 goto err;
474
475 /*
476 * Sometimes, the blocks at the beginning of the AGFL are there
477 * because we overestimated how many blocks we needed to rebuild
478 * the freespace btrees. ar_flcount records the number of
479 * blocks in this situation. Since those blocks already have an
480 * rmap, we only need to add rmap records for AGFL blocks past
481 * that point in the AGFL because those blocks are a result of a
482 * no-rmap no-shrink freelist fixup that we did earlier.
483 *
484 * However, some blocks end up on the AGFL because the free space
485 * btrees shed blocks as a result of allocating space to fix the
486 * freelist. We already created in-core rmap records for the free
487 * space btree blocks, so we must be careful not to create those
488 * records again. Create a bitmap of already-recorded OWN_AG rmaps.
489 */
490 error = init_slab_cursor(ag_rmap->ar_raw_rmaps, rmap_compare, &rm_cur);
491 if (error)
492 goto err;
493 error = -bitmap_alloc(&own_ag_bitmap);
494 if (error)
495 goto err_slab;
496 while ((rm_rec = pop_slab_cursor(rm_cur)) != NULL) {
497 if (rm_rec->rm_owner != XFS_RMAP_OWN_AG)
498 continue;
499 error = -bitmap_set(own_ag_bitmap, rm_rec->rm_startblock,
500 rm_rec->rm_blockcount);
501 if (error) {
502 /*
503 * If this range is already set, then the incore rmap
504 * records for the AG free space btrees overlap and
505 * we're toast because that is not allowed.
506 */
507 if (error == EEXIST)
508 error = EFSCORRUPTED;
509 goto err_slab;
510 }
511 }
512 free_slab_cursor(&rm_cur);
513
514 /* Create rmaps for any AGFL blocks that aren't already rmapped. */
515 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
516 b = agfl_bno + ag_rmap->ar_flcount;
517 while (*b != cpu_to_be32(NULLAGBLOCK) &&
518 b - agfl_bno < libxfs_agfl_size(mp)) {
519 xfs_agblock_t agbno;
520
521 agbno = be32_to_cpu(*b);
522 if (!bitmap_test(own_ag_bitmap, agbno, 1)) {
523 error = rmap_add_ag_rec(mp, agno, agbno, 1,
524 XFS_RMAP_OWN_AG);
525 if (error)
526 goto err;
527 }
528 b++;
529 }
530 libxfs_buf_relse(agflbp);
531 agflbp = NULL;
532 bitmap_free(&own_ag_bitmap);
533
534 /* Merge all the raw rmaps into the main list */
535 error = rmap_fold_raw_recs(mp, agno);
536 if (error)
537 goto err;
538
539 /* Create cursors to refcount structures */
540 error = init_slab_cursor(ag_rmap->ar_rmaps, rmap_compare, &rm_cur);
541 if (error)
542 goto err;
543
544 /* Insert rmaps into the btree one at a time */
545 rm_rec = pop_slab_cursor(rm_cur);
546 while (rm_rec) {
547 struct xfs_owner_info oinfo = {};
548 struct xfs_perag *pag;
549
550 error = -libxfs_trans_alloc_rollable(mp, 16, &tp);
551 if (error)
552 goto err_slab;
553
554 error = -libxfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
555 if (error)
556 goto err_trans;
557
558 ASSERT(XFS_RMAP_NON_INODE_OWNER(rm_rec->rm_owner));
559 oinfo.oi_owner = rm_rec->rm_owner;
560 pag = libxfs_perag_get(mp, agno);
561 error = -libxfs_rmap_alloc(tp, agbp, pag, rm_rec->rm_startblock,
562 rm_rec->rm_blockcount, &oinfo);
563 libxfs_perag_put(pag);
564 if (error)
565 goto err_trans;
566
567 error = -libxfs_trans_commit(tp);
568 if (error)
569 goto err_slab;
570
571 fix_freelist(mp, agno, false);
572
573 rm_rec = pop_slab_cursor(rm_cur);
574 }
575
576 free_slab_cursor(&rm_cur);
577 return 0;
578
579 err_trans:
580 libxfs_trans_cancel(tp);
581 err_slab:
582 free_slab_cursor(&rm_cur);
583 err:
584 if (agflbp)
585 libxfs_buf_relse(agflbp);
586 if (own_ag_bitmap)
587 bitmap_free(&own_ag_bitmap);
588 return error;
589 }
590
591 #ifdef RMAP_DEBUG
592 static void
593 rmap_dump(
594 const char *msg,
595 xfs_agnumber_t agno,
596 struct xfs_rmap_irec *rmap)
597 {
598 printf("%s: %p agno=%u pblk=%llu own=%lld lblk=%llu len=%u flags=0x%x\n",
599 msg, rmap,
600 (unsigned int)agno,
601 (unsigned long long)rmap->rm_startblock,
602 (unsigned long long)rmap->rm_owner,
603 (unsigned long long)rmap->rm_offset,
604 (unsigned int)rmap->rm_blockcount,
605 (unsigned int)rmap->rm_flags);
606 }
607 #else
608 # define rmap_dump(m, a, r)
609 #endif
610
611 /*
612 * Rebuilding the Reference Count & Reverse Mapping Btrees
613 *
614 * The reference count (refcnt) and reverse mapping (rmap) btrees are
615 * rebuilt during phase 5, like all other AG btrees. Therefore, reverse
616 * mappings must be processed into reference counts at the end of phase
617 * 4, and the rmaps must be recorded during phase 4. There is a need to
618 * access the rmaps in physical block order, but no particular need for
619 * random access, so the slab.c code provides a big logical array
620 * (consisting of smaller slabs) and some inorder iterator functions.
621 *
622 * Once we've recorded all the reverse mappings, we're ready to
623 * translate the rmaps into refcount entries. Imagine the rmap entries
624 * as rectangles representing extents of physical blocks, and that the
625 * rectangles can be laid down to allow them to overlap each other; then
626 * we know that we must emit a refcnt btree entry wherever the amount of
627 * overlap changes, i.e. the emission stimulus is level-triggered:
628 *
629 * - ---
630 * -- ----- ---- --- ------
631 * -- ---- ----------- ---- ---------
632 * -------------------------------- -----------
633 * ^ ^ ^^ ^^ ^ ^^ ^^^ ^^^^ ^ ^^ ^ ^ ^
634 * 2 1 23 21 3 43 234 2123 1 01 2 3 0
635 *
636 * For our purposes, a rmap is a tuple (startblock, len, fileoff, owner).
637 *
638 * Note that in the actual refcnt btree we don't store the refcount < 2
639 * cases because the bnobt tells us which blocks are free; single-use
640 * blocks aren't recorded in the bnobt or the refcntbt. If the rmapbt
641 * supports storing multiple entries covering a given block we could
642 * theoretically dispense with the refcntbt and simply count rmaps, but
643 * that's inefficient in the (hot) write path, so we'll take the cost of
644 * the extra tree to save time. Also there's no guarantee that rmap
645 * will be enabled.
646 *
647 * Given an array of rmaps sorted by physical block number, a starting
648 * physical block (sp), a bag to hold rmaps that cover sp, and the next
649 * physical block where the level changes (np), we can reconstruct the
650 * refcount btree as follows:
651 *
652 * While there are still unprocessed rmaps in the array,
653 * - Set sp to the physical block (pblk) of the next unprocessed rmap.
654 * - Add to the bag all rmaps in the array where startblock == sp.
655 * - Set np to the physical block where the bag size will change. This
656 * is the minimum of (the pblk of the next unprocessed rmap) and
657 * (startblock + len of each rmap in the bag).
658 * - Record the bag size as old_bag_size.
659 *
660 * - While the bag isn't empty,
661 * - Remove from the bag all rmaps where startblock + len == np.
662 * - Add to the bag all rmaps in the array where startblock == np.
663 * - If the bag size isn't old_bag_size, store the refcount entry
664 * (sp, np - sp, bag_size) in the refcnt btree.
665 * - If the bag is empty, break out of the inner loop.
666 * - Set old_bag_size to the bag size
667 * - Set sp = np.
668 * - Set np to the physical block where the bag size will change.
669 * This is the minimum of (the pblk of the next unprocessed rmap)
670 * and (startblock + len of each rmap in the bag).
671 *
672 * An implementation detail is that because this processing happens
673 * during phase 4, the refcount entries are stored in an array so that
674 * phase 5 can load them into the refcount btree. The rmaps can be
675 * loaded directly into the rmap btree during phase 5 as well.
676 */
677
678 /*
679 * Mark all inodes in the reverse-mapping observation stack as requiring the
680 * reflink inode flag, if the stack depth is greater than 1.
681 */
682 static void
683 mark_inode_rl(
684 struct xfs_mount *mp,
685 struct xfs_bag *rmaps)
686 {
687 xfs_agnumber_t iagno;
688 struct xfs_rmap_irec *rmap;
689 struct ino_tree_node *irec;
690 int off;
691 size_t idx;
692 xfs_agino_t ino;
693
694 if (bag_count(rmaps) < 2)
695 return;
696
697 /* Reflink flag accounting */
698 foreach_bag_ptr(rmaps, idx, rmap) {
699 ASSERT(!XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner));
700 iagno = XFS_INO_TO_AGNO(mp, rmap->rm_owner);
701 ino = XFS_INO_TO_AGINO(mp, rmap->rm_owner);
702 pthread_mutex_lock(&ag_locks[iagno].lock);
703 irec = find_inode_rec(mp, iagno, ino);
704 off = get_inode_offset(mp, rmap->rm_owner, irec);
705 /* lock here because we might go outside this ag */
706 set_inode_is_rl(irec, off);
707 pthread_mutex_unlock(&ag_locks[iagno].lock);
708 }
709 }
710
711 /*
712 * Emit a refcount object for refcntbt reconstruction during phase 5.
713 */
714 #define REFCOUNT_CLAMP(nr) ((nr) > MAXREFCOUNT ? MAXREFCOUNT : (nr))
715 static void
716 refcount_emit(
717 struct xfs_mount *mp,
718 xfs_agnumber_t agno,
719 xfs_agblock_t agbno,
720 xfs_extlen_t len,
721 size_t nr_rmaps)
722 {
723 struct xfs_refcount_irec rlrec;
724 int error;
725 struct xfs_slab *rlslab;
726
727 rlslab = ag_rmaps[agno].ar_refcount_items;
728 ASSERT(nr_rmaps > 0);
729
730 dbg_printf("REFL: agno=%u pblk=%u, len=%u -> refcount=%zu\n",
731 agno, agbno, len, nr_rmaps);
732 rlrec.rc_startblock = agbno;
733 rlrec.rc_blockcount = len;
734 rlrec.rc_refcount = REFCOUNT_CLAMP(nr_rmaps);
735 error = slab_add(rlslab, &rlrec);
736 if (error)
737 do_error(
738 _("Insufficient memory while recreating refcount tree."));
739 }
740 #undef REFCOUNT_CLAMP
741
742 /*
743 * Transform a pile of physical block mapping observations into refcount data
744 * for eventual rebuilding of the btrees.
745 */
746 #define RMAP_END(r) ((r)->rm_startblock + (r)->rm_blockcount)
747 int
748 compute_refcounts(
749 struct xfs_mount *mp,
750 xfs_agnumber_t agno)
751 {
752 struct xfs_bag *stack_top = NULL;
753 struct xfs_slab *rmaps;
754 struct xfs_slab_cursor *rmaps_cur;
755 struct xfs_rmap_irec *array_cur;
756 struct xfs_rmap_irec *rmap;
757 xfs_agblock_t sbno; /* first bno of this rmap set */
758 xfs_agblock_t cbno; /* first bno of this refcount set */
759 xfs_agblock_t nbno; /* next bno where rmap set changes */
760 size_t n, idx;
761 size_t old_stack_nr;
762 int error;
763
764 if (!xfs_sb_version_hasreflink(&mp->m_sb))
765 return 0;
766
767 rmaps = ag_rmaps[agno].ar_rmaps;
768
769 error = init_slab_cursor(rmaps, rmap_compare, &rmaps_cur);
770 if (error)
771 return error;
772
773 error = init_bag(&stack_top);
774 if (error)
775 goto err;
776
777 /* While there are rmaps to be processed... */
778 n = 0;
779 while (n < slab_count(rmaps)) {
780 array_cur = peek_slab_cursor(rmaps_cur);
781 sbno = cbno = array_cur->rm_startblock;
782 /* Push all rmaps with pblk == sbno onto the stack */
783 for (;
784 array_cur && array_cur->rm_startblock == sbno;
785 array_cur = peek_slab_cursor(rmaps_cur)) {
786 advance_slab_cursor(rmaps_cur); n++;
787 rmap_dump("push0", agno, array_cur);
788 error = bag_add(stack_top, array_cur);
789 if (error)
790 goto err;
791 }
792 mark_inode_rl(mp, stack_top);
793
794 /* Set nbno to the bno of the next refcount change */
795 if (n < slab_count(rmaps) && array_cur)
796 nbno = array_cur->rm_startblock;
797 else
798 nbno = NULLAGBLOCK;
799 foreach_bag_ptr(stack_top, idx, rmap) {
800 nbno = min(nbno, RMAP_END(rmap));
801 }
802
803 /* Emit reverse mappings, if needed */
804 ASSERT(nbno > sbno);
805 old_stack_nr = bag_count(stack_top);
806
807 /* While stack isn't empty... */
808 while (bag_count(stack_top)) {
809 /* Pop all rmaps that end at nbno */
810 foreach_bag_ptr_reverse(stack_top, idx, rmap) {
811 if (RMAP_END(rmap) != nbno)
812 continue;
813 rmap_dump("pop", agno, rmap);
814 error = bag_remove(stack_top, idx);
815 if (error)
816 goto err;
817 }
818
819 /* Push array items that start at nbno */
820 for (;
821 array_cur && array_cur->rm_startblock == nbno;
822 array_cur = peek_slab_cursor(rmaps_cur)) {
823 advance_slab_cursor(rmaps_cur); n++;
824 rmap_dump("push1", agno, array_cur);
825 error = bag_add(stack_top, array_cur);
826 if (error)
827 goto err;
828 }
829 mark_inode_rl(mp, stack_top);
830
831 /* Emit refcount if necessary */
832 ASSERT(nbno > cbno);
833 if (bag_count(stack_top) != old_stack_nr) {
834 if (old_stack_nr > 1) {
835 refcount_emit(mp, agno, cbno,
836 nbno - cbno,
837 old_stack_nr);
838 }
839 cbno = nbno;
840 }
841
842 /* Stack empty, go find the next rmap */
843 if (bag_count(stack_top) == 0)
844 break;
845 old_stack_nr = bag_count(stack_top);
846 sbno = nbno;
847
848 /* Set nbno to the bno of the next refcount change */
849 if (n < slab_count(rmaps))
850 nbno = array_cur->rm_startblock;
851 else
852 nbno = NULLAGBLOCK;
853 foreach_bag_ptr(stack_top, idx, rmap) {
854 nbno = min(nbno, RMAP_END(rmap));
855 }
856
857 /* Emit reverse mappings, if needed */
858 ASSERT(nbno > sbno);
859 }
860 }
861 err:
862 free_bag(&stack_top);
863 free_slab_cursor(&rmaps_cur);
864
865 return error;
866 }
867 #undef RMAP_END
868
869 /*
870 * Return the number of rmap objects for an AG.
871 */
872 size_t
873 rmap_record_count(
874 struct xfs_mount *mp,
875 xfs_agnumber_t agno)
876 {
877 return slab_count(ag_rmaps[agno].ar_rmaps);
878 }
879
880 /*
881 * Return a slab cursor that will return rmap objects in order.
882 */
883 int
884 rmap_init_cursor(
885 xfs_agnumber_t agno,
886 struct xfs_slab_cursor **cur)
887 {
888 return init_slab_cursor(ag_rmaps[agno].ar_rmaps, rmap_compare, cur);
889 }
890
891 /*
892 * Disable the refcount btree check.
893 */
894 void
895 rmap_avoid_check(void)
896 {
897 rmapbt_suspect = true;
898 }
899
900 /* Look for an rmap in the rmapbt that matches a given rmap. */
901 static int
902 rmap_lookup(
903 struct xfs_btree_cur *bt_cur,
904 struct xfs_rmap_irec *rm_rec,
905 struct xfs_rmap_irec *tmp,
906 int *have)
907 {
908 int error;
909
910 /* Use the regular btree retrieval routine. */
911 error = -libxfs_rmap_lookup_le(bt_cur, rm_rec->rm_startblock,
912 rm_rec->rm_blockcount,
913 rm_rec->rm_owner, rm_rec->rm_offset,
914 rm_rec->rm_flags, have);
915 if (error)
916 return error;
917 if (*have == 0)
918 return error;
919 return -libxfs_rmap_get_rec(bt_cur, tmp, have);
920 }
921
922 /* Look for an rmap in the rmapbt that matches a given rmap. */
923 static int
924 rmap_lookup_overlapped(
925 struct xfs_btree_cur *bt_cur,
926 struct xfs_rmap_irec *rm_rec,
927 struct xfs_rmap_irec *tmp,
928 int *have)
929 {
930 /* Have to use our fancy version for overlapped */
931 return -libxfs_rmap_lookup_le_range(bt_cur, rm_rec->rm_startblock,
932 rm_rec->rm_owner, rm_rec->rm_offset,
933 rm_rec->rm_flags, tmp, have);
934 }
935
936 /* Does the btree rmap cover the observed rmap? */
937 #define NEXTP(x) ((x)->rm_startblock + (x)->rm_blockcount)
938 #define NEXTL(x) ((x)->rm_offset + (x)->rm_blockcount)
939 static bool
940 rmap_is_good(
941 struct xfs_rmap_irec *observed,
942 struct xfs_rmap_irec *btree)
943 {
944 /* Can't have mismatches in the flags or the owner. */
945 if (btree->rm_flags != observed->rm_flags ||
946 btree->rm_owner != observed->rm_owner)
947 return false;
948
949 /*
950 * Btree record can't physically start after the observed
951 * record, nor can it end before the observed record.
952 */
953 if (btree->rm_startblock > observed->rm_startblock ||
954 NEXTP(btree) < NEXTP(observed))
955 return false;
956
957 /* If this is metadata or bmbt, we're done. */
958 if (XFS_RMAP_NON_INODE_OWNER(observed->rm_owner) ||
959 (observed->rm_flags & XFS_RMAP_BMBT_BLOCK))
960 return true;
961 /*
962 * Btree record can't logically start after the observed
963 * record, nor can it end before the observed record.
964 */
965 if (btree->rm_offset > observed->rm_offset ||
966 NEXTL(btree) < NEXTL(observed))
967 return false;
968
969 return true;
970 }
971 #undef NEXTP
972 #undef NEXTL
973
974 /*
975 * Compare the observed reverse mappings against what's in the ag btree.
976 */
977 int
978 rmaps_verify_btree(
979 struct xfs_mount *mp,
980 xfs_agnumber_t agno)
981 {
982 struct xfs_rmap_irec tmp;
983 struct xfs_slab_cursor *rm_cur;
984 struct xfs_btree_cur *bt_cur = NULL;
985 struct xfs_buf *agbp = NULL;
986 struct xfs_rmap_irec *rm_rec;
987 struct xfs_perag *pag = NULL;
988 int have;
989 int error;
990
991 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
992 return 0;
993 if (rmapbt_suspect) {
994 if (no_modify && agno == 0)
995 do_warn(_("would rebuild corrupt rmap btrees.\n"));
996 return 0;
997 }
998
999 /* Create cursors to refcount structures */
1000 error = rmap_init_cursor(agno, &rm_cur);
1001 if (error)
1002 return error;
1003
1004 error = -libxfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
1005 if (error)
1006 goto err;
1007
1008 /* Leave the per-ag data "uninitialized" since we rewrite it later */
1009 pag = libxfs_perag_get(mp, agno);
1010 pag->pagf_init = 0;
1011
1012 bt_cur = libxfs_rmapbt_init_cursor(mp, NULL, agbp, pag);
1013 if (!bt_cur) {
1014 error = -ENOMEM;
1015 goto err;
1016 }
1017
1018 rm_rec = pop_slab_cursor(rm_cur);
1019 while (rm_rec) {
1020 error = rmap_lookup(bt_cur, rm_rec, &tmp, &have);
1021 if (error)
1022 goto err;
1023 /*
1024 * Using the range query is expensive, so only do it if
1025 * the regular lookup doesn't find anything or if it doesn't
1026 * match the observed rmap.
1027 */
1028 if (xfs_sb_version_hasreflink(&bt_cur->bc_mp->m_sb) &&
1029 (!have || !rmap_is_good(rm_rec, &tmp))) {
1030 error = rmap_lookup_overlapped(bt_cur, rm_rec,
1031 &tmp, &have);
1032 if (error)
1033 goto err;
1034 }
1035 if (!have) {
1036 do_warn(
1037 _("Missing reverse-mapping record for (%u/%u) %slen %u owner %"PRId64" \
1038 %s%soff %"PRIu64"\n"),
1039 agno, rm_rec->rm_startblock,
1040 (rm_rec->rm_flags & XFS_RMAP_UNWRITTEN) ?
1041 _("unwritten ") : "",
1042 rm_rec->rm_blockcount,
1043 rm_rec->rm_owner,
1044 (rm_rec->rm_flags & XFS_RMAP_ATTR_FORK) ?
1045 _("attr ") : "",
1046 (rm_rec->rm_flags & XFS_RMAP_BMBT_BLOCK) ?
1047 _("bmbt ") : "",
1048 rm_rec->rm_offset);
1049 goto next_loop;
1050 }
1051
1052 /* Compare each refcount observation against the btree's */
1053 if (!rmap_is_good(rm_rec, &tmp)) {
1054 do_warn(
1055 _("Incorrect reverse-mapping: saw (%u/%u) %slen %u owner %"PRId64" %s%soff \
1056 %"PRIu64"; should be (%u/%u) %slen %u owner %"PRId64" %s%soff %"PRIu64"\n"),
1057 agno, tmp.rm_startblock,
1058 (tmp.rm_flags & XFS_RMAP_UNWRITTEN) ?
1059 _("unwritten ") : "",
1060 tmp.rm_blockcount,
1061 tmp.rm_owner,
1062 (tmp.rm_flags & XFS_RMAP_ATTR_FORK) ?
1063 _("attr ") : "",
1064 (tmp.rm_flags & XFS_RMAP_BMBT_BLOCK) ?
1065 _("bmbt ") : "",
1066 tmp.rm_offset,
1067 agno, rm_rec->rm_startblock,
1068 (rm_rec->rm_flags & XFS_RMAP_UNWRITTEN) ?
1069 _("unwritten ") : "",
1070 rm_rec->rm_blockcount,
1071 rm_rec->rm_owner,
1072 (rm_rec->rm_flags & XFS_RMAP_ATTR_FORK) ?
1073 _("attr ") : "",
1074 (rm_rec->rm_flags & XFS_RMAP_BMBT_BLOCK) ?
1075 _("bmbt ") : "",
1076 rm_rec->rm_offset);
1077 goto next_loop;
1078 }
1079 next_loop:
1080 rm_rec = pop_slab_cursor(rm_cur);
1081 }
1082
1083 err:
1084 if (bt_cur)
1085 libxfs_btree_del_cursor(bt_cur, XFS_BTREE_NOERROR);
1086 if (pag)
1087 libxfs_perag_put(pag);
1088 if (agbp)
1089 libxfs_buf_relse(agbp);
1090 free_slab_cursor(&rm_cur);
1091 return 0;
1092 }
1093
1094 /*
1095 * Compare the key fields of two rmap records -- positive if key1 > key2,
1096 * negative if key1 < key2, and zero if equal.
1097 */
1098 int64_t
1099 rmap_diffkeys(
1100 struct xfs_rmap_irec *kp1,
1101 struct xfs_rmap_irec *kp2)
1102 {
1103 __u64 oa;
1104 __u64 ob;
1105 int64_t d;
1106 struct xfs_rmap_irec tmp;
1107
1108 tmp = *kp1;
1109 tmp.rm_flags &= ~XFS_RMAP_REC_FLAGS;
1110 oa = libxfs_rmap_irec_offset_pack(&tmp);
1111 tmp = *kp2;
1112 tmp.rm_flags &= ~XFS_RMAP_REC_FLAGS;
1113 ob = libxfs_rmap_irec_offset_pack(&tmp);
1114
1115 d = (int64_t)kp1->rm_startblock - kp2->rm_startblock;
1116 if (d)
1117 return d;
1118
1119 if (kp1->rm_owner > kp2->rm_owner)
1120 return 1;
1121 else if (kp2->rm_owner > kp1->rm_owner)
1122 return -1;
1123
1124 if (oa > ob)
1125 return 1;
1126 else if (ob > oa)
1127 return -1;
1128 return 0;
1129 }
1130
1131 /* Compute the high key of an rmap record. */
1132 void
1133 rmap_high_key_from_rec(
1134 struct xfs_rmap_irec *rec,
1135 struct xfs_rmap_irec *key)
1136 {
1137 int adj;
1138
1139 adj = rec->rm_blockcount - 1;
1140
1141 key->rm_startblock = rec->rm_startblock + adj;
1142 key->rm_owner = rec->rm_owner;
1143 key->rm_offset = rec->rm_offset;
1144 key->rm_flags = rec->rm_flags & XFS_RMAP_KEY_FLAGS;
1145 if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) ||
1146 (rec->rm_flags & XFS_RMAP_BMBT_BLOCK))
1147 return;
1148 key->rm_offset += adj;
1149 }
1150
1151 /*
1152 * Record that an inode had the reflink flag set when repair started. The
1153 * inode reflink flag will be adjusted as necessary.
1154 */
1155 void
1156 record_inode_reflink_flag(
1157 struct xfs_mount *mp,
1158 struct xfs_dinode *dino,
1159 xfs_agnumber_t agno,
1160 xfs_agino_t ino,
1161 xfs_ino_t lino)
1162 {
1163 struct ino_tree_node *irec;
1164 int off;
1165
1166 ASSERT(XFS_AGINO_TO_INO(mp, agno, ino) == be64_to_cpu(dino->di_ino));
1167 if (!(be64_to_cpu(dino->di_flags2) & XFS_DIFLAG2_REFLINK))
1168 return;
1169 irec = find_inode_rec(mp, agno, ino);
1170 off = get_inode_offset(mp, lino, irec);
1171 ASSERT(!inode_was_rl(irec, off));
1172 set_inode_was_rl(irec, off);
1173 dbg_printf("set was_rl lino=%llu was=0x%llx\n",
1174 (unsigned long long)lino, (unsigned long long)irec->ino_was_rl);
1175 }
1176
1177 /*
1178 * Inform the user that we're clearing the reflink flag on an inode that
1179 * doesn't actually share any blocks. This is an optimization (the kernel
1180 * skips refcount checks for non-reflink files) and not a corruption repair,
1181 * so we don't need to log every time we clear a flag unless verbose mode is
1182 * enabled.
1183 */
1184 static void
1185 warn_clearing_reflink(
1186 xfs_ino_t ino)
1187 {
1188 static bool warned = false;
1189 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
1190
1191 if (verbose) {
1192 do_warn(_("clearing reflink flag on inode %"PRIu64"\n"), ino);
1193 return;
1194 }
1195
1196 if (warned)
1197 return;
1198
1199 pthread_mutex_lock(&lock);
1200 if (!warned) {
1201 do_warn(_("clearing reflink flag on inodes when possible\n"));
1202 warned = true;
1203 }
1204 pthread_mutex_unlock(&lock);
1205 }
1206
1207 /*
1208 * Fix an inode's reflink flag.
1209 */
1210 static int
1211 fix_inode_reflink_flag(
1212 struct xfs_mount *mp,
1213 xfs_agnumber_t agno,
1214 xfs_agino_t agino,
1215 bool set)
1216 {
1217 struct xfs_dinode *dino;
1218 struct xfs_buf *buf;
1219
1220 if (set)
1221 do_warn(
1222 _("setting reflink flag on inode %"PRIu64"\n"),
1223 XFS_AGINO_TO_INO(mp, agno, agino));
1224 else if (!no_modify) /* && !set */
1225 warn_clearing_reflink(XFS_AGINO_TO_INO(mp, agno, agino));
1226 if (no_modify)
1227 return 0;
1228
1229 buf = get_agino_buf(mp, agno, agino, &dino);
1230 if (!buf)
1231 return 1;
1232 ASSERT(XFS_AGINO_TO_INO(mp, agno, agino) == be64_to_cpu(dino->di_ino));
1233 if (set)
1234 dino->di_flags2 |= cpu_to_be64(XFS_DIFLAG2_REFLINK);
1235 else
1236 dino->di_flags2 &= cpu_to_be64(~XFS_DIFLAG2_REFLINK);
1237 libxfs_dinode_calc_crc(mp, dino);
1238 libxfs_buf_mark_dirty(buf);
1239 libxfs_buf_relse(buf);
1240
1241 return 0;
1242 }
1243
1244 /*
1245 * Fix discrepancies between the state of the inode reflink flag and our
1246 * observations as to whether or not the inode really needs it.
1247 */
1248 int
1249 fix_inode_reflink_flags(
1250 struct xfs_mount *mp,
1251 xfs_agnumber_t agno)
1252 {
1253 struct ino_tree_node *irec;
1254 int bit;
1255 uint64_t was;
1256 uint64_t is;
1257 uint64_t diff;
1258 uint64_t mask;
1259 int error = 0;
1260 xfs_agino_t agino;
1261
1262 /*
1263 * Update the reflink flag for any inode where there's a discrepancy
1264 * between the inode flag and whether or not we found any reflinked
1265 * extents.
1266 */
1267 for (irec = findfirst_inode_rec(agno);
1268 irec != NULL;
1269 irec = next_ino_rec(irec)) {
1270 ASSERT((irec->ino_was_rl & irec->ir_free) == 0);
1271 ASSERT((irec->ino_is_rl & irec->ir_free) == 0);
1272 was = irec->ino_was_rl;
1273 is = irec->ino_is_rl;
1274 if (was == is)
1275 continue;
1276 diff = was ^ is;
1277 dbg_printf("mismatch ino=%llu was=0x%lx is=0x%lx dif=0x%lx\n",
1278 (unsigned long long)XFS_AGINO_TO_INO(mp, agno,
1279 irec->ino_startnum),
1280 was, is, diff);
1281
1282 for (bit = 0, mask = 1; bit < 64; bit++, mask <<= 1) {
1283 agino = bit + irec->ino_startnum;
1284 if (!(diff & mask))
1285 continue;
1286 else if (was & mask)
1287 error = fix_inode_reflink_flag(mp, agno, agino,
1288 false);
1289 else if (is & mask)
1290 error = fix_inode_reflink_flag(mp, agno, agino,
1291 true);
1292 else
1293 ASSERT(0);
1294 if (error)
1295 do_error(
1296 _("Unable to fix reflink flag on inode %"PRIu64".\n"),
1297 XFS_AGINO_TO_INO(mp, agno, agino));
1298 }
1299 }
1300
1301 return error;
1302 }
1303
1304 /*
1305 * Return the number of refcount objects for an AG.
1306 */
1307 size_t
1308 refcount_record_count(
1309 struct xfs_mount *mp,
1310 xfs_agnumber_t agno)
1311 {
1312 return slab_count(ag_rmaps[agno].ar_refcount_items);
1313 }
1314
1315 /*
1316 * Return a slab cursor that will return refcount objects in order.
1317 */
1318 int
1319 init_refcount_cursor(
1320 xfs_agnumber_t agno,
1321 struct xfs_slab_cursor **cur)
1322 {
1323 return init_slab_cursor(ag_rmaps[agno].ar_refcount_items, NULL, cur);
1324 }
1325
1326 /*
1327 * Disable the refcount btree check.
1328 */
1329 void
1330 refcount_avoid_check(void)
1331 {
1332 refcbt_suspect = true;
1333 }
1334
1335 /*
1336 * Compare the observed reference counts against what's in the ag btree.
1337 */
1338 int
1339 check_refcounts(
1340 struct xfs_mount *mp,
1341 xfs_agnumber_t agno)
1342 {
1343 struct xfs_refcount_irec tmp;
1344 struct xfs_slab_cursor *rl_cur;
1345 struct xfs_btree_cur *bt_cur = NULL;
1346 struct xfs_buf *agbp = NULL;
1347 struct xfs_perag *pag = NULL;
1348 struct xfs_refcount_irec *rl_rec;
1349 int have;
1350 int i;
1351 int error;
1352
1353 if (!xfs_sb_version_hasreflink(&mp->m_sb))
1354 return 0;
1355 if (refcbt_suspect) {
1356 if (no_modify && agno == 0)
1357 do_warn(_("would rebuild corrupt refcount btrees.\n"));
1358 return 0;
1359 }
1360
1361 /* Create cursors to refcount structures */
1362 error = init_refcount_cursor(agno, &rl_cur);
1363 if (error)
1364 return error;
1365
1366 error = -libxfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
1367 if (error)
1368 goto err;
1369
1370 /* Leave the per-ag data "uninitialized" since we rewrite it later */
1371 pag = libxfs_perag_get(mp, agno);
1372 pag->pagf_init = 0;
1373
1374 bt_cur = libxfs_refcountbt_init_cursor(mp, NULL, agbp, pag);
1375 if (!bt_cur) {
1376 error = -ENOMEM;
1377 goto err;
1378 }
1379
1380 rl_rec = pop_slab_cursor(rl_cur);
1381 while (rl_rec) {
1382 /* Look for a refcount record in the btree */
1383 error = -libxfs_refcount_lookup_le(bt_cur,
1384 rl_rec->rc_startblock, &have);
1385 if (error)
1386 goto err;
1387 if (!have) {
1388 do_warn(
1389 _("Missing reference count record for (%u/%u) len %u count %u\n"),
1390 agno, rl_rec->rc_startblock,
1391 rl_rec->rc_blockcount, rl_rec->rc_refcount);
1392 goto next_loop;
1393 }
1394
1395 error = -libxfs_refcount_get_rec(bt_cur, &tmp, &i);
1396 if (error)
1397 goto err;
1398 if (!i) {
1399 do_warn(
1400 _("Missing reference count record for (%u/%u) len %u count %u\n"),
1401 agno, rl_rec->rc_startblock,
1402 rl_rec->rc_blockcount, rl_rec->rc_refcount);
1403 goto next_loop;
1404 }
1405
1406 /* Compare each refcount observation against the btree's */
1407 if (tmp.rc_startblock != rl_rec->rc_startblock ||
1408 tmp.rc_blockcount != rl_rec->rc_blockcount ||
1409 tmp.rc_refcount != rl_rec->rc_refcount)
1410 do_warn(
1411 _("Incorrect reference count: saw (%u/%u) len %u nlinks %u; should be (%u/%u) len %u nlinks %u\n"),
1412 agno, tmp.rc_startblock, tmp.rc_blockcount,
1413 tmp.rc_refcount, agno, rl_rec->rc_startblock,
1414 rl_rec->rc_blockcount, rl_rec->rc_refcount);
1415 next_loop:
1416 rl_rec = pop_slab_cursor(rl_cur);
1417 }
1418
1419 err:
1420 if (bt_cur)
1421 libxfs_btree_del_cursor(bt_cur, error ? XFS_BTREE_ERROR :
1422 XFS_BTREE_NOERROR);
1423 if (pag)
1424 libxfs_perag_put(pag);
1425 if (agbp)
1426 libxfs_buf_relse(agbp);
1427 free_slab_cursor(&rl_cur);
1428 return 0;
1429 }
1430
1431 /*
1432 * Regenerate the AGFL so that we don't run out of it while rebuilding the
1433 * rmap btree. If skip_rmapbt is true, don't update the rmapbt (most probably
1434 * because we're updating the rmapbt).
1435 */
1436 void
1437 fix_freelist(
1438 struct xfs_mount *mp,
1439 xfs_agnumber_t agno,
1440 bool skip_rmapbt)
1441 {
1442 xfs_alloc_arg_t args;
1443 xfs_trans_t *tp;
1444 int flags;
1445 int error;
1446
1447 memset(&args, 0, sizeof(args));
1448 args.mp = mp;
1449 args.agno = agno;
1450 args.alignment = 1;
1451 args.pag = libxfs_perag_get(mp, agno);
1452 error = -libxfs_trans_alloc_rollable(mp, 0, &tp);
1453 if (error)
1454 do_error(_("failed to fix AGFL on AG %d, error %d\n"),
1455 agno, error);
1456 args.tp = tp;
1457
1458 /*
1459 * Prior to rmapbt, all we had to do to fix the freelist is "expand"
1460 * the fresh AGFL header from empty to full. That hasn't changed. For
1461 * rmapbt, however, things change a bit.
1462 *
1463 * When we're stuffing the rmapbt with the AG btree rmaps the tree can
1464 * expand, so we need to keep the AGFL well-stocked for the expansion.
1465 * However, this expansion can cause the bnobt/cntbt to shrink, which
1466 * can make the AGFL eligible for shrinking. Shrinking involves
1467 * freeing rmapbt entries, but since we haven't finished loading the
1468 * rmapbt with the btree rmaps it's possible for the remove operation
1469 * to fail. The AGFL block is large enough at this point to absorb any
1470 * blocks freed from the bnobt/cntbt, so we can disable shrinking.
1471 *
1472 * During the initial AGFL regeneration during AGF generation in phase5
1473 * we must also disable rmapbt modifications because the AGF that
1474 * libxfs reads does not yet point to the new rmapbt. These initial
1475 * AGFL entries are added just prior to adding the AG btree block rmaps
1476 * to the rmapbt. It's ok to pass NOSHRINK here too, since the AGFL is
1477 * empty and cannot shrink.
1478 */
1479 flags = XFS_ALLOC_FLAG_NOSHRINK;
1480 if (skip_rmapbt)
1481 flags |= XFS_ALLOC_FLAG_NORMAP;
1482 error = -libxfs_alloc_fix_freelist(&args, flags);
1483 libxfs_perag_put(args.pag);
1484 if (error) {
1485 do_error(_("failed to fix AGFL on AG %d, error %d\n"),
1486 agno, error);
1487 }
1488 error = -libxfs_trans_commit(tp);
1489 if (error)
1490 do_error(_("%s: commit failed, error %d\n"), __func__, error);
1491 }
1492
1493 /*
1494 * Remember how many AGFL entries came from excess AG btree allocations and
1495 * therefore already have rmap entries.
1496 */
1497 void
1498 rmap_store_agflcount(
1499 struct xfs_mount *mp,
1500 xfs_agnumber_t agno,
1501 int count)
1502 {
1503 if (!rmap_needs_work(mp))
1504 return;
1505
1506 ag_rmaps[agno].ar_flcount = count;
1507 }