]>
Commit | Line | Data |
---|---|---|
79f86c9d DW |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* | |
3 | * Copyright (C) 2020 Oracle. All Rights Reserved. | |
4 | * Author: Darrick J. Wong <darrick.wong@oracle.com> | |
5 | */ | |
6 | #include <libxfs.h> | |
7 | #include "err_protos.h" | |
c94d40ce | 8 | #include "libfrog/bitmap.h" |
79f86c9d DW |
9 | #include "slab.h" |
10 | #include "rmap.h" | |
11 | #include "incore.h" | |
12 | #include "bulkload.h" | |
13 | #include "agbtree.h" | |
14 | ||
15 | /* Initialize a btree rebuild context. */ | |
16 | static void | |
17 | init_rebuild( | |
18 | struct repair_ctx *sc, | |
19 | const struct xfs_owner_info *oinfo, | |
20 | xfs_agblock_t free_space, | |
21 | struct bt_rebuild *btr) | |
22 | { | |
23 | memset(btr, 0, sizeof(struct bt_rebuild)); | |
24 | ||
25 | bulkload_init_ag(&btr->newbt, sc, oinfo); | |
26 | bulkload_estimate_ag_slack(sc, &btr->bload, free_space); | |
27 | } | |
28 | ||
29 | /* | |
30 | * Update this free space record to reflect the blocks we stole from the | |
31 | * beginning of the record. | |
32 | */ | |
33 | static void | |
34 | consume_freespace( | |
35 | xfs_agnumber_t agno, | |
36 | struct extent_tree_node *ext_ptr, | |
37 | uint32_t len) | |
38 | { | |
39 | struct extent_tree_node *bno_ext_ptr; | |
40 | xfs_agblock_t new_start = ext_ptr->ex_startblock + len; | |
41 | xfs_extlen_t new_len = ext_ptr->ex_blockcount - len; | |
42 | ||
43 | /* Delete the used-up extent from both extent trees. */ | |
44 | #ifdef XR_BLD_FREE_TRACE | |
45 | fprintf(stderr, "releasing extent: %u [%u %u]\n", agno, | |
46 | ext_ptr->ex_startblock, ext_ptr->ex_blockcount); | |
47 | #endif | |
48 | bno_ext_ptr = find_bno_extent(agno, ext_ptr->ex_startblock); | |
49 | ASSERT(bno_ext_ptr != NULL); | |
50 | get_bno_extent(agno, bno_ext_ptr); | |
51 | release_extent_tree_node(bno_ext_ptr); | |
52 | ||
53 | ext_ptr = get_bcnt_extent(agno, ext_ptr->ex_startblock, | |
54 | ext_ptr->ex_blockcount); | |
55 | release_extent_tree_node(ext_ptr); | |
56 | ||
57 | /* | |
58 | * If we only used part of this last extent, then we must reinsert the | |
59 | * extent to maintain proper sorting order. | |
60 | */ | |
61 | if (new_len > 0) { | |
62 | add_bno_extent(agno, new_start, new_len); | |
63 | add_bcnt_extent(agno, new_start, new_len); | |
64 | } | |
65 | } | |
66 | ||
41865980 DW |
67 | /* |
68 | * Reserve blocks for the new per-AG structures. Returns true if all blocks | |
69 | * were allocated, and false if we ran out of space. | |
70 | */ | |
71 | static bool | |
72 | reserve_agblocks( | |
79f86c9d DW |
73 | struct xfs_mount *mp, |
74 | xfs_agnumber_t agno, | |
75 | struct bt_rebuild *btr, | |
76 | uint32_t nr_blocks) | |
77 | { | |
78 | struct extent_tree_node *ext_ptr; | |
79 | uint32_t blocks_allocated = 0; | |
80 | uint32_t len; | |
81 | int error; | |
82 | ||
83 | while (blocks_allocated < nr_blocks) { | |
84 | xfs_fsblock_t fsbno; | |
85 | ||
86 | /* | |
87 | * Grab the smallest extent and use it up, then get the | |
88 | * next smallest. This mimics the init_*_cursor code. | |
89 | */ | |
90 | ext_ptr = findfirst_bcnt_extent(agno); | |
91 | if (!ext_ptr) | |
41865980 | 92 | break; |
79f86c9d DW |
93 | |
94 | /* Use up the extent we've got. */ | |
95 | len = min(ext_ptr->ex_blockcount, nr_blocks - blocks_allocated); | |
96 | fsbno = XFS_AGB_TO_FSB(mp, agno, ext_ptr->ex_startblock); | |
97 | error = bulkload_add_blocks(&btr->newbt, fsbno, len); | |
98 | if (error) | |
99 | do_error(_("could not set up btree reservation: %s\n"), | |
100 | strerror(-error)); | |
101 | ||
102 | error = rmap_add_ag_rec(mp, agno, ext_ptr->ex_startblock, len, | |
103 | btr->newbt.oinfo.oi_owner); | |
104 | if (error) | |
105 | do_error(_("could not set up btree rmaps: %s\n"), | |
106 | strerror(-error)); | |
107 | ||
108 | consume_freespace(agno, ext_ptr, len); | |
109 | blocks_allocated += len; | |
110 | } | |
111 | #ifdef XR_BLD_FREE_TRACE | |
112 | fprintf(stderr, "blocks_allocated = %d\n", | |
113 | blocks_allocated); | |
114 | #endif | |
41865980 DW |
115 | return blocks_allocated == nr_blocks; |
116 | } | |
117 | ||
118 | static inline void | |
119 | reserve_btblocks( | |
120 | struct xfs_mount *mp, | |
121 | xfs_agnumber_t agno, | |
122 | struct bt_rebuild *btr, | |
123 | uint32_t nr_blocks) | |
124 | { | |
125 | if (!reserve_agblocks(mp, agno, btr, nr_blocks)) | |
126 | do_error( | |
127 | _("error - not enough free space in filesystem, AG %u\n"), | |
128 | agno); | |
79f86c9d DW |
129 | } |
130 | ||
131 | /* Feed one of the new btree blocks to the bulk loader. */ | |
132 | static int | |
133 | rebuild_claim_block( | |
134 | struct xfs_btree_cur *cur, | |
135 | union xfs_btree_ptr *ptr, | |
136 | void *priv) | |
137 | { | |
138 | struct bt_rebuild *btr = priv; | |
139 | ||
140 | return bulkload_claim_block(cur, &btr->newbt, ptr); | |
141 | } | |
142 | ||
143 | /* | |
144 | * Scoop up leftovers from a rebuild cursor for later freeing, then free the | |
145 | * rebuild context. | |
146 | */ | |
147 | void | |
148 | finish_rebuild( | |
149 | struct xfs_mount *mp, | |
150 | struct bt_rebuild *btr, | |
c94d40ce | 151 | struct bitmap *lost_blocks) |
79f86c9d DW |
152 | { |
153 | struct bulkload_resv *resv, *n; | |
c94d40ce | 154 | int error; |
79f86c9d DW |
155 | |
156 | for_each_bulkload_reservation(&btr->newbt, resv, n) { | |
c94d40ce DW |
157 | if (resv->used == resv->len) |
158 | continue; | |
159 | ||
160 | error = bitmap_set(lost_blocks, resv->fsbno + resv->used, | |
161 | resv->len - resv->used); | |
162 | if (error) | |
163 | do_error( | |
164 | _("Insufficient memory saving lost blocks, err=%d.\n"), error); | |
165 | resv->used = resv->len; | |
79f86c9d DW |
166 | } |
167 | ||
168 | bulkload_destroy(&btr->newbt, 0); | |
169 | } | |
7e5ec4e4 DW |
170 | |
171 | /* | |
172 | * Free Space Btrees | |
173 | * | |
174 | * We need to leave some free records in the tree for the corner case of | |
175 | * setting up the AGFL. This may require allocation of blocks, and as | |
176 | * such can require insertion of new records into the tree (e.g. moving | |
177 | * a record in the by-count tree when a long extent is shortened). If we | |
178 | * pack the records into the leaves with no slack space, this requires a | |
179 | * leaf split to occur and a block to be allocated from the free list. | |
180 | * If we don't have any blocks on the free list (because we are setting | |
181 | * it up!), then we fail, and the filesystem will fail with the same | |
182 | * failure at runtime. Hence leave a couple of records slack space in | |
183 | * each block to allow immediate modification of the tree without | |
184 | * requiring splits to be done. | |
185 | */ | |
186 | ||
187 | /* | |
188 | * Return the next free space extent tree record from the previous value we | |
189 | * saw. | |
190 | */ | |
191 | static inline struct extent_tree_node * | |
192 | get_bno_rec( | |
193 | struct xfs_btree_cur *cur, | |
194 | struct extent_tree_node *prev_value) | |
195 | { | |
196 | xfs_agnumber_t agno = cur->bc_ag.agno; | |
197 | ||
198 | if (cur->bc_btnum == XFS_BTNUM_BNO) { | |
199 | if (!prev_value) | |
200 | return findfirst_bno_extent(agno); | |
201 | return findnext_bno_extent(prev_value); | |
202 | } | |
203 | ||
204 | /* cnt btree */ | |
205 | if (!prev_value) | |
206 | return findfirst_bcnt_extent(agno); | |
207 | return findnext_bcnt_extent(agno, prev_value); | |
208 | } | |
209 | ||
210 | /* Grab one bnobt record and put it in the btree cursor. */ | |
211 | static int | |
212 | get_bnobt_record( | |
213 | struct xfs_btree_cur *cur, | |
214 | void *priv) | |
215 | { | |
216 | struct bt_rebuild *btr = priv; | |
217 | struct xfs_alloc_rec_incore *arec = &cur->bc_rec.a; | |
218 | ||
219 | btr->bno_rec = get_bno_rec(cur, btr->bno_rec); | |
220 | arec->ar_startblock = btr->bno_rec->ex_startblock; | |
221 | arec->ar_blockcount = btr->bno_rec->ex_blockcount; | |
222 | btr->freeblks += btr->bno_rec->ex_blockcount; | |
223 | return 0; | |
224 | } | |
225 | ||
226 | void | |
227 | init_freespace_cursors( | |
228 | struct repair_ctx *sc, | |
ecb44e84 | 229 | struct xfs_perag *pag, |
7e5ec4e4 DW |
230 | unsigned int free_space, |
231 | unsigned int *nr_extents, | |
232 | int *extra_blocks, | |
233 | struct bt_rebuild *btr_bno, | |
234 | struct bt_rebuild *btr_cnt) | |
235 | { | |
ecb44e84 | 236 | xfs_agnumber_t agno = pag->pag_agno; |
41865980 | 237 | unsigned int agfl_goal; |
7e5ec4e4 DW |
238 | int error; |
239 | ||
41865980 DW |
240 | agfl_goal = libxfs_alloc_min_freelist(sc->mp, NULL); |
241 | ||
7e5ec4e4 DW |
242 | init_rebuild(sc, &XFS_RMAP_OINFO_AG, free_space, btr_bno); |
243 | init_rebuild(sc, &XFS_RMAP_OINFO_AG, free_space, btr_cnt); | |
244 | ||
245 | btr_bno->cur = libxfs_allocbt_stage_cursor(sc->mp, | |
ecb44e84 | 246 | &btr_bno->newbt.afake, pag, XFS_BTNUM_BNO); |
7e5ec4e4 | 247 | btr_cnt->cur = libxfs_allocbt_stage_cursor(sc->mp, |
ecb44e84 | 248 | &btr_cnt->newbt.afake, pag, XFS_BTNUM_CNT); |
7e5ec4e4 DW |
249 | |
250 | btr_bno->bload.get_record = get_bnobt_record; | |
251 | btr_bno->bload.claim_block = rebuild_claim_block; | |
252 | ||
253 | btr_cnt->bload.get_record = get_bnobt_record; | |
254 | btr_cnt->bload.claim_block = rebuild_claim_block; | |
255 | ||
256 | /* | |
257 | * Now we need to allocate blocks for the free space btrees using the | |
258 | * free space records we're about to put in them. Every record we use | |
259 | * can change the shape of the free space trees, so we recompute the | |
260 | * btree shape until we stop needing /more/ blocks. If we have any | |
261 | * left over we'll stash them in the AGFL when we're done. | |
262 | */ | |
263 | do { | |
264 | unsigned int num_freeblocks; | |
6ffc9523 | 265 | int delta_bno, delta_cnt; |
41865980 | 266 | int agfl_wanted; |
7e5ec4e4 DW |
267 | |
268 | /* Compute how many bnobt blocks we'll need. */ | |
269 | error = -libxfs_btree_bload_compute_geometry(btr_bno->cur, | |
270 | &btr_bno->bload, *nr_extents); | |
271 | if (error) | |
272 | do_error( | |
273 | _("Unable to compute free space by block btree geometry, error %d.\n"), -error); | |
274 | ||
275 | /* Compute how many cntbt blocks we'll need. */ | |
276 | error = -libxfs_btree_bload_compute_geometry(btr_cnt->cur, | |
277 | &btr_cnt->bload, *nr_extents); | |
278 | if (error) | |
279 | do_error( | |
280 | _("Unable to compute free space by length btree geometry, error %d.\n"), -error); | |
281 | ||
6ffc9523 DW |
282 | /* |
283 | * Compute the deficit between the number of blocks reserved | |
284 | * and the number of blocks we think we need for the btree. | |
285 | */ | |
286 | delta_bno = (int)btr_bno->newbt.nr_reserved - | |
287 | btr_bno->bload.nr_blocks; | |
288 | delta_cnt = (int)btr_cnt->newbt.nr_reserved - | |
289 | btr_cnt->bload.nr_blocks; | |
290 | ||
7e5ec4e4 | 291 | /* We don't need any more blocks, so we're done. */ |
41865980 DW |
292 | if (delta_bno >= 0 && delta_cnt >= 0 && |
293 | delta_bno + delta_cnt >= agfl_goal) { | |
6ffc9523 | 294 | *extra_blocks = delta_bno + delta_cnt; |
7e5ec4e4 | 295 | break; |
6ffc9523 | 296 | } |
7e5ec4e4 DW |
297 | |
298 | /* Allocate however many more blocks we need this time. */ | |
41865980 | 299 | if (delta_bno < 0) { |
6ffc9523 | 300 | reserve_btblocks(sc->mp, agno, btr_bno, -delta_bno); |
41865980 DW |
301 | delta_bno = 0; |
302 | } | |
303 | if (delta_cnt < 0) { | |
6ffc9523 | 304 | reserve_btblocks(sc->mp, agno, btr_cnt, -delta_cnt); |
41865980 DW |
305 | delta_cnt = 0; |
306 | } | |
307 | ||
308 | /* | |
309 | * Try to fill the bnobt cursor with extra blocks to populate | |
310 | * the AGFL. If we don't get all the blocks we want, stop | |
311 | * trying to fill the AGFL because the AG is totally out of | |
312 | * space. | |
313 | */ | |
314 | agfl_wanted = agfl_goal - (delta_bno + delta_cnt); | |
315 | if (agfl_wanted > 0 && | |
316 | !reserve_agblocks(sc->mp, agno, btr_bno, agfl_wanted)) | |
317 | agfl_goal = 0; | |
7e5ec4e4 DW |
318 | |
319 | /* Ok, now how many free space records do we have? */ | |
320 | *nr_extents = count_bno_extents_blocks(agno, &num_freeblocks); | |
321 | } while (1); | |
7e5ec4e4 DW |
322 | } |
323 | ||
324 | /* Rebuild the free space btrees. */ | |
325 | void | |
326 | build_freespace_btrees( | |
327 | struct repair_ctx *sc, | |
328 | xfs_agnumber_t agno, | |
329 | struct bt_rebuild *btr_bno, | |
330 | struct bt_rebuild *btr_cnt) | |
331 | { | |
332 | int error; | |
333 | ||
334 | /* Add all observed bnobt records. */ | |
335 | error = -libxfs_btree_bload(btr_bno->cur, &btr_bno->bload, btr_bno); | |
336 | if (error) | |
337 | do_error( | |
338 | _("Error %d while creating bnobt btree for AG %u.\n"), error, agno); | |
339 | ||
340 | /* Add all observed cntbt records. */ | |
341 | error = -libxfs_btree_bload(btr_cnt->cur, &btr_cnt->bload, btr_cnt); | |
342 | if (error) | |
343 | do_error( | |
344 | _("Error %d while creating cntbt btree for AG %u.\n"), error, agno); | |
345 | ||
346 | /* Since we're not writing the AGF yet, no need to commit the cursor */ | |
347 | libxfs_btree_del_cursor(btr_bno->cur, 0); | |
348 | libxfs_btree_del_cursor(btr_cnt->cur, 0); | |
349 | } | |
7a21223c DW |
350 | |
351 | /* Inode Btrees */ | |
352 | ||
353 | static inline struct ino_tree_node * | |
354 | get_ino_rec( | |
355 | struct xfs_btree_cur *cur, | |
356 | struct ino_tree_node *prev_value) | |
357 | { | |
358 | xfs_agnumber_t agno = cur->bc_ag.agno; | |
359 | ||
360 | if (cur->bc_btnum == XFS_BTNUM_INO) { | |
361 | if (!prev_value) | |
362 | return findfirst_inode_rec(agno); | |
363 | return next_ino_rec(prev_value); | |
364 | } | |
365 | ||
366 | /* finobt */ | |
367 | if (!prev_value) | |
368 | return findfirst_free_inode_rec(agno); | |
369 | return next_free_ino_rec(prev_value); | |
370 | } | |
371 | ||
372 | /* Grab one inobt record. */ | |
373 | static int | |
374 | get_inobt_record( | |
375 | struct xfs_btree_cur *cur, | |
376 | void *priv) | |
377 | { | |
378 | struct bt_rebuild *btr = priv; | |
379 | struct xfs_inobt_rec_incore *irec = &cur->bc_rec.i; | |
380 | struct ino_tree_node *ino_rec; | |
381 | int inocnt = 0; | |
382 | int finocnt = 0; | |
383 | int k; | |
384 | ||
385 | btr->ino_rec = ino_rec = get_ino_rec(cur, btr->ino_rec); | |
386 | ||
387 | /* Transform the incore record into an on-disk record. */ | |
388 | irec->ir_startino = ino_rec->ino_startnum; | |
389 | irec->ir_free = ino_rec->ir_free; | |
390 | ||
391 | for (k = 0; k < sizeof(xfs_inofree_t) * NBBY; k++) { | |
392 | ASSERT(is_inode_confirmed(ino_rec, k)); | |
393 | ||
394 | if (is_inode_sparse(ino_rec, k)) | |
395 | continue; | |
396 | if (is_inode_free(ino_rec, k)) | |
397 | finocnt++; | |
398 | inocnt++; | |
399 | } | |
400 | ||
401 | irec->ir_count = inocnt; | |
402 | irec->ir_freecount = finocnt; | |
403 | ||
404 | if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) { | |
405 | uint64_t sparse; | |
406 | int spmask; | |
407 | uint16_t holemask; | |
408 | ||
409 | /* | |
410 | * Convert the 64-bit in-core sparse inode state to the | |
411 | * 16-bit on-disk holemask. | |
412 | */ | |
413 | holemask = 0; | |
414 | spmask = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1; | |
415 | sparse = ino_rec->ir_sparse; | |
416 | for (k = 0; k < XFS_INOBT_HOLEMASK_BITS; k++) { | |
417 | if (sparse & spmask) { | |
418 | ASSERT((sparse & spmask) == spmask); | |
419 | holemask |= (1 << k); | |
420 | } else | |
421 | ASSERT((sparse & spmask) == 0); | |
422 | sparse >>= XFS_INODES_PER_HOLEMASK_BIT; | |
423 | } | |
424 | ||
425 | irec->ir_holemask = holemask; | |
426 | } else { | |
427 | irec->ir_holemask = 0; | |
428 | } | |
429 | ||
430 | if (btr->first_agino == NULLAGINO) | |
431 | btr->first_agino = ino_rec->ino_startnum; | |
432 | btr->freecount += finocnt; | |
433 | btr->count += inocnt; | |
434 | return 0; | |
435 | } | |
436 | ||
437 | /* Initialize both inode btree cursors as needed. */ | |
438 | void | |
439 | init_ino_cursors( | |
440 | struct repair_ctx *sc, | |
441 | xfs_agnumber_t agno, | |
442 | unsigned int free_space, | |
443 | uint64_t *num_inos, | |
444 | uint64_t *num_free_inos, | |
445 | struct bt_rebuild *btr_ino, | |
446 | struct bt_rebuild *btr_fino) | |
447 | { | |
448 | struct ino_tree_node *ino_rec; | |
449 | unsigned int ino_recs = 0; | |
450 | unsigned int fino_recs = 0; | |
451 | bool finobt; | |
452 | int error; | |
453 | ||
454 | finobt = xfs_sb_version_hasfinobt(&sc->mp->m_sb); | |
455 | init_rebuild(sc, &XFS_RMAP_OINFO_INOBT, free_space, btr_ino); | |
456 | ||
457 | /* Compute inode statistics. */ | |
458 | *num_free_inos = 0; | |
459 | *num_inos = 0; | |
460 | for (ino_rec = findfirst_inode_rec(agno); | |
461 | ino_rec != NULL; | |
462 | ino_rec = next_ino_rec(ino_rec)) { | |
463 | unsigned int rec_ninos = 0; | |
464 | unsigned int rec_nfinos = 0; | |
465 | int i; | |
466 | ||
467 | for (i = 0; i < XFS_INODES_PER_CHUNK; i++) { | |
468 | ASSERT(is_inode_confirmed(ino_rec, i)); | |
469 | /* | |
470 | * sparse inodes are not factored into superblock (free) | |
471 | * inode counts | |
472 | */ | |
473 | if (is_inode_sparse(ino_rec, i)) | |
474 | continue; | |
475 | if (is_inode_free(ino_rec, i)) | |
476 | rec_nfinos++; | |
477 | rec_ninos++; | |
478 | } | |
479 | ||
480 | *num_free_inos += rec_nfinos; | |
481 | *num_inos += rec_ninos; | |
482 | ino_recs++; | |
483 | ||
484 | /* finobt only considers records with free inodes */ | |
485 | if (rec_nfinos) | |
486 | fino_recs++; | |
487 | } | |
488 | ||
489 | btr_ino->cur = libxfs_inobt_stage_cursor(sc->mp, &btr_ino->newbt.afake, | |
490 | agno, XFS_BTNUM_INO); | |
491 | ||
492 | btr_ino->bload.get_record = get_inobt_record; | |
493 | btr_ino->bload.claim_block = rebuild_claim_block; | |
494 | btr_ino->first_agino = NULLAGINO; | |
495 | ||
496 | /* Compute how many inobt blocks we'll need. */ | |
497 | error = -libxfs_btree_bload_compute_geometry(btr_ino->cur, | |
498 | &btr_ino->bload, ino_recs); | |
499 | if (error) | |
500 | do_error( | |
501 | _("Unable to compute inode btree geometry, error %d.\n"), error); | |
502 | ||
503 | reserve_btblocks(sc->mp, agno, btr_ino, btr_ino->bload.nr_blocks); | |
504 | ||
505 | if (!finobt) | |
506 | return; | |
507 | ||
508 | init_rebuild(sc, &XFS_RMAP_OINFO_INOBT, free_space, btr_fino); | |
509 | btr_fino->cur = libxfs_inobt_stage_cursor(sc->mp, | |
510 | &btr_fino->newbt.afake, agno, XFS_BTNUM_FINO); | |
511 | ||
512 | btr_fino->bload.get_record = get_inobt_record; | |
513 | btr_fino->bload.claim_block = rebuild_claim_block; | |
514 | btr_fino->first_agino = NULLAGINO; | |
515 | ||
516 | /* Compute how many finobt blocks we'll need. */ | |
517 | error = -libxfs_btree_bload_compute_geometry(btr_fino->cur, | |
518 | &btr_fino->bload, fino_recs); | |
519 | if (error) | |
520 | do_error( | |
521 | _("Unable to compute free inode btree geometry, error %d.\n"), error); | |
522 | ||
523 | reserve_btblocks(sc->mp, agno, btr_fino, btr_fino->bload.nr_blocks); | |
524 | } | |
525 | ||
526 | /* Rebuild the inode btrees. */ | |
527 | void | |
528 | build_inode_btrees( | |
529 | struct repair_ctx *sc, | |
530 | xfs_agnumber_t agno, | |
531 | struct bt_rebuild *btr_ino, | |
532 | struct bt_rebuild *btr_fino) | |
533 | { | |
534 | int error; | |
535 | ||
536 | /* Add all observed inobt records. */ | |
537 | error = -libxfs_btree_bload(btr_ino->cur, &btr_ino->bload, btr_ino); | |
538 | if (error) | |
539 | do_error( | |
540 | _("Error %d while creating inobt btree for AG %u.\n"), error, agno); | |
541 | ||
542 | /* Since we're not writing the AGI yet, no need to commit the cursor */ | |
543 | libxfs_btree_del_cursor(btr_ino->cur, 0); | |
544 | ||
545 | if (!xfs_sb_version_hasfinobt(&sc->mp->m_sb)) | |
546 | return; | |
547 | ||
548 | /* Add all observed finobt records. */ | |
549 | error = -libxfs_btree_bload(btr_fino->cur, &btr_fino->bload, btr_fino); | |
550 | if (error) | |
551 | do_error( | |
552 | _("Error %d while creating finobt btree for AG %u.\n"), error, agno); | |
553 | ||
554 | /* Since we're not writing the AGI yet, no need to commit the cursor */ | |
555 | libxfs_btree_del_cursor(btr_fino->cur, 0); | |
556 | } | |
dc9f4f5e DW |
557 | |
558 | /* rebuild the rmap tree */ | |
559 | ||
560 | /* Grab one rmap record. */ | |
561 | static int | |
562 | get_rmapbt_record( | |
563 | struct xfs_btree_cur *cur, | |
564 | void *priv) | |
565 | { | |
566 | struct xfs_rmap_irec *rec; | |
567 | struct bt_rebuild *btr = priv; | |
568 | ||
569 | rec = pop_slab_cursor(btr->slab_cursor); | |
570 | memcpy(&cur->bc_rec.r, rec, sizeof(struct xfs_rmap_irec)); | |
571 | return 0; | |
572 | } | |
573 | ||
574 | /* Set up the rmap rebuild parameters. */ | |
575 | void | |
576 | init_rmapbt_cursor( | |
577 | struct repair_ctx *sc, | |
195c248c | 578 | struct xfs_perag *pag, |
dc9f4f5e DW |
579 | unsigned int free_space, |
580 | struct bt_rebuild *btr) | |
581 | { | |
195c248c | 582 | xfs_agnumber_t agno = pag->pag_agno; |
dc9f4f5e DW |
583 | int error; |
584 | ||
585 | if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb)) | |
586 | return; | |
587 | ||
588 | init_rebuild(sc, &XFS_RMAP_OINFO_AG, free_space, btr); | |
195c248c | 589 | btr->cur = libxfs_rmapbt_stage_cursor(sc->mp, &btr->newbt.afake, pag); |
dc9f4f5e DW |
590 | |
591 | btr->bload.get_record = get_rmapbt_record; | |
592 | btr->bload.claim_block = rebuild_claim_block; | |
593 | ||
594 | /* Compute how many blocks we'll need. */ | |
595 | error = -libxfs_btree_bload_compute_geometry(btr->cur, &btr->bload, | |
596 | rmap_record_count(sc->mp, agno)); | |
597 | if (error) | |
598 | do_error( | |
599 | _("Unable to compute rmap btree geometry, error %d.\n"), error); | |
600 | ||
601 | reserve_btblocks(sc->mp, agno, btr, btr->bload.nr_blocks); | |
602 | } | |
603 | ||
604 | /* Rebuild a rmap btree. */ | |
605 | void | |
606 | build_rmap_tree( | |
607 | struct repair_ctx *sc, | |
608 | xfs_agnumber_t agno, | |
609 | struct bt_rebuild *btr) | |
610 | { | |
611 | int error; | |
612 | ||
613 | error = rmap_init_cursor(agno, &btr->slab_cursor); | |
614 | if (error) | |
615 | do_error( | |
616 | _("Insufficient memory to construct rmap cursor.\n")); | |
617 | ||
618 | /* Add all observed rmap records. */ | |
619 | error = -libxfs_btree_bload(btr->cur, &btr->bload, btr); | |
620 | if (error) | |
621 | do_error( | |
622 | _("Error %d while creating rmap btree for AG %u.\n"), error, agno); | |
623 | ||
624 | /* Since we're not writing the AGF yet, no need to commit the cursor */ | |
625 | libxfs_btree_del_cursor(btr->cur, 0); | |
626 | free_slab_cursor(&btr->slab_cursor); | |
627 | } | |
3c1ce0fc DW |
628 | |
629 | /* rebuild the refcount tree */ | |
630 | ||
631 | /* Grab one refcount record. */ | |
632 | static int | |
633 | get_refcountbt_record( | |
634 | struct xfs_btree_cur *cur, | |
635 | void *priv) | |
636 | { | |
637 | struct xfs_refcount_irec *rec; | |
638 | struct bt_rebuild *btr = priv; | |
639 | ||
640 | rec = pop_slab_cursor(btr->slab_cursor); | |
641 | memcpy(&cur->bc_rec.rc, rec, sizeof(struct xfs_refcount_irec)); | |
642 | return 0; | |
643 | } | |
644 | ||
645 | /* Set up the refcount rebuild parameters. */ | |
646 | void | |
647 | init_refc_cursor( | |
648 | struct repair_ctx *sc, | |
971fceb4 | 649 | struct xfs_perag *pag, |
3c1ce0fc DW |
650 | unsigned int free_space, |
651 | struct bt_rebuild *btr) | |
652 | { | |
971fceb4 | 653 | xfs_agnumber_t agno = pag->pag_agno; |
3c1ce0fc DW |
654 | int error; |
655 | ||
656 | if (!xfs_sb_version_hasreflink(&sc->mp->m_sb)) | |
657 | return; | |
658 | ||
659 | init_rebuild(sc, &XFS_RMAP_OINFO_REFC, free_space, btr); | |
660 | btr->cur = libxfs_refcountbt_stage_cursor(sc->mp, &btr->newbt.afake, | |
971fceb4 | 661 | pag); |
3c1ce0fc DW |
662 | |
663 | btr->bload.get_record = get_refcountbt_record; | |
664 | btr->bload.claim_block = rebuild_claim_block; | |
665 | ||
666 | /* Compute how many blocks we'll need. */ | |
667 | error = -libxfs_btree_bload_compute_geometry(btr->cur, &btr->bload, | |
668 | refcount_record_count(sc->mp, agno)); | |
669 | if (error) | |
670 | do_error( | |
671 | _("Unable to compute refcount btree geometry, error %d.\n"), error); | |
672 | ||
673 | reserve_btblocks(sc->mp, agno, btr, btr->bload.nr_blocks); | |
674 | } | |
675 | ||
676 | /* Rebuild a refcount btree. */ | |
677 | void | |
678 | build_refcount_tree( | |
679 | struct repair_ctx *sc, | |
680 | xfs_agnumber_t agno, | |
681 | struct bt_rebuild *btr) | |
682 | { | |
683 | int error; | |
684 | ||
685 | error = init_refcount_cursor(agno, &btr->slab_cursor); | |
686 | if (error) | |
687 | do_error( | |
688 | _("Insufficient memory to construct refcount cursor.\n")); | |
689 | ||
690 | /* Add all observed refcount records. */ | |
691 | error = -libxfs_btree_bload(btr->cur, &btr->bload, btr); | |
692 | if (error) | |
693 | do_error( | |
694 | _("Error %d while creating refcount btree for AG %u.\n"), error, agno); | |
695 | ||
696 | /* Since we're not writing the AGF yet, no need to commit the cursor */ | |
697 | libxfs_btree_del_cursor(btr->cur, 0); | |
698 | free_slab_cursor(&btr->slab_cursor); | |
699 | } |