]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_rmap_btree.c
xfs: add support for rmap btree staging cursors
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_rmap_btree.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2014 Red Hat, Inc.
4 * All Rights Reserved.
5 */
6 #include "libxfs_priv.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_alloc.h"
16 #include "xfs_btree.h"
17 #include "xfs_btree_staging.h"
18 #include "xfs_rmap.h"
19 #include "xfs_rmap_btree.h"
20 #include "xfs_trace.h"
21 #include "xfs_ag_resv.h"
22
23 /*
24 * Reverse map btree.
25 *
26 * This is a per-ag tree used to track the owner(s) of a given extent. With
27 * reflink it is possible for there to be multiple owners, which is a departure
28 * from classic XFS. Owner records for data extents are inserted when the
29 * extent is mapped and removed when an extent is unmapped. Owner records for
30 * all other block types (i.e. metadata) are inserted when an extent is
31 * allocated and removed when an extent is freed. There can only be one owner
32 * of a metadata extent, usually an inode or some other metadata structure like
33 * an AG btree.
34 *
35 * The rmap btree is part of the free space management, so blocks for the tree
36 * are sourced from the agfl. Hence we need transaction reservation support for
37 * this tree so that the freelist is always large enough. This also impacts on
38 * the minimum space we need to leave free in the AG.
39 *
40 * The tree is ordered by [ag block, owner, offset]. This is a large key size,
41 * but it is the only way to enforce unique keys when a block can be owned by
42 * multiple files at any offset. There's no need to order/search by extent
43 * size for online updating/management of the tree. It is intended that most
44 * reverse lookups will be to find the owner(s) of a particular block, or to
45 * try to recover tree and file data from corrupt primary metadata.
46 */
47
48 static struct xfs_btree_cur *
49 xfs_rmapbt_dup_cursor(
50 struct xfs_btree_cur *cur)
51 {
52 return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
53 cur->bc_ag.agbp, cur->bc_ag.agno);
54 }
55
56 STATIC void
57 xfs_rmapbt_set_root(
58 struct xfs_btree_cur *cur,
59 union xfs_btree_ptr *ptr,
60 int inc)
61 {
62 struct xfs_buf *agbp = cur->bc_ag.agbp;
63 struct xfs_agf *agf = agbp->b_addr;
64 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
65 int btnum = cur->bc_btnum;
66 struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
67
68 ASSERT(ptr->s != 0);
69
70 agf->agf_roots[btnum] = ptr->s;
71 be32_add_cpu(&agf->agf_levels[btnum], inc);
72 pag->pagf_levels[btnum] += inc;
73 xfs_perag_put(pag);
74
75 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
76 }
77
78 STATIC int
79 xfs_rmapbt_alloc_block(
80 struct xfs_btree_cur *cur,
81 union xfs_btree_ptr *start,
82 union xfs_btree_ptr *new,
83 int *stat)
84 {
85 struct xfs_buf *agbp = cur->bc_ag.agbp;
86 struct xfs_agf *agf = agbp->b_addr;
87 int error;
88 xfs_agblock_t bno;
89
90 /* Allocate the new block from the freelist. If we can't, give up. */
91 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
92 &bno, 1);
93 if (error)
94 return error;
95
96 trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
97 bno, 1);
98 if (bno == NULLAGBLOCK) {
99 *stat = 0;
100 return 0;
101 }
102
103 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1,
104 false);
105
106 xfs_trans_agbtree_delta(cur->bc_tp, 1);
107 new->s = cpu_to_be32(bno);
108 be32_add_cpu(&agf->agf_rmap_blocks, 1);
109 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
110
111 xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_ag.agno);
112
113 *stat = 1;
114 return 0;
115 }
116
117 STATIC int
118 xfs_rmapbt_free_block(
119 struct xfs_btree_cur *cur,
120 struct xfs_buf *bp)
121 {
122 struct xfs_buf *agbp = cur->bc_ag.agbp;
123 struct xfs_agf *agf = agbp->b_addr;
124 xfs_agblock_t bno;
125 int error;
126
127 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
128 trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_ag.agno,
129 bno, 1);
130 be32_add_cpu(&agf->agf_rmap_blocks, -1);
131 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
132 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
133 if (error)
134 return error;
135
136 xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
137 XFS_EXTENT_BUSY_SKIP_DISCARD);
138 xfs_trans_agbtree_delta(cur->bc_tp, -1);
139
140 xfs_ag_resv_rmapbt_free(cur->bc_mp, cur->bc_ag.agno);
141
142 return 0;
143 }
144
145 STATIC int
146 xfs_rmapbt_get_minrecs(
147 struct xfs_btree_cur *cur,
148 int level)
149 {
150 return cur->bc_mp->m_rmap_mnr[level != 0];
151 }
152
153 STATIC int
154 xfs_rmapbt_get_maxrecs(
155 struct xfs_btree_cur *cur,
156 int level)
157 {
158 return cur->bc_mp->m_rmap_mxr[level != 0];
159 }
160
161 STATIC void
162 xfs_rmapbt_init_key_from_rec(
163 union xfs_btree_key *key,
164 union xfs_btree_rec *rec)
165 {
166 key->rmap.rm_startblock = rec->rmap.rm_startblock;
167 key->rmap.rm_owner = rec->rmap.rm_owner;
168 key->rmap.rm_offset = rec->rmap.rm_offset;
169 }
170
171 /*
172 * The high key for a reverse mapping record can be computed by shifting
173 * the startblock and offset to the highest value that would still map
174 * to that record. In practice this means that we add blockcount-1 to
175 * the startblock for all records, and if the record is for a data/attr
176 * fork mapping, we add blockcount-1 to the offset too.
177 */
178 STATIC void
179 xfs_rmapbt_init_high_key_from_rec(
180 union xfs_btree_key *key,
181 union xfs_btree_rec *rec)
182 {
183 uint64_t off;
184 int adj;
185
186 adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
187
188 key->rmap.rm_startblock = rec->rmap.rm_startblock;
189 be32_add_cpu(&key->rmap.rm_startblock, adj);
190 key->rmap.rm_owner = rec->rmap.rm_owner;
191 key->rmap.rm_offset = rec->rmap.rm_offset;
192 if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
193 XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
194 return;
195 off = be64_to_cpu(key->rmap.rm_offset);
196 off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
197 key->rmap.rm_offset = cpu_to_be64(off);
198 }
199
200 STATIC void
201 xfs_rmapbt_init_rec_from_cur(
202 struct xfs_btree_cur *cur,
203 union xfs_btree_rec *rec)
204 {
205 rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
206 rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
207 rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
208 rec->rmap.rm_offset = cpu_to_be64(
209 xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
210 }
211
212 STATIC void
213 xfs_rmapbt_init_ptr_from_cur(
214 struct xfs_btree_cur *cur,
215 union xfs_btree_ptr *ptr)
216 {
217 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
218
219 ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
220
221 ptr->s = agf->agf_roots[cur->bc_btnum];
222 }
223
224 STATIC int64_t
225 xfs_rmapbt_key_diff(
226 struct xfs_btree_cur *cur,
227 union xfs_btree_key *key)
228 {
229 struct xfs_rmap_irec *rec = &cur->bc_rec.r;
230 struct xfs_rmap_key *kp = &key->rmap;
231 __u64 x, y;
232 int64_t d;
233
234 d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
235 if (d)
236 return d;
237
238 x = be64_to_cpu(kp->rm_owner);
239 y = rec->rm_owner;
240 if (x > y)
241 return 1;
242 else if (y > x)
243 return -1;
244
245 x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
246 y = rec->rm_offset;
247 if (x > y)
248 return 1;
249 else if (y > x)
250 return -1;
251 return 0;
252 }
253
254 STATIC int64_t
255 xfs_rmapbt_diff_two_keys(
256 struct xfs_btree_cur *cur,
257 union xfs_btree_key *k1,
258 union xfs_btree_key *k2)
259 {
260 struct xfs_rmap_key *kp1 = &k1->rmap;
261 struct xfs_rmap_key *kp2 = &k2->rmap;
262 int64_t d;
263 __u64 x, y;
264
265 d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
266 be32_to_cpu(kp2->rm_startblock);
267 if (d)
268 return d;
269
270 x = be64_to_cpu(kp1->rm_owner);
271 y = be64_to_cpu(kp2->rm_owner);
272 if (x > y)
273 return 1;
274 else if (y > x)
275 return -1;
276
277 x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
278 y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
279 if (x > y)
280 return 1;
281 else if (y > x)
282 return -1;
283 return 0;
284 }
285
286 static xfs_failaddr_t
287 xfs_rmapbt_verify(
288 struct xfs_buf *bp)
289 {
290 struct xfs_mount *mp = bp->b_mount;
291 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
292 struct xfs_perag *pag = bp->b_pag;
293 xfs_failaddr_t fa;
294 unsigned int level;
295
296 /*
297 * magic number and level verification
298 *
299 * During growfs operations, we can't verify the exact level or owner as
300 * the perag is not fully initialised and hence not attached to the
301 * buffer. In this case, check against the maximum tree depth.
302 *
303 * Similarly, during log recovery we will have a perag structure
304 * attached, but the agf information will not yet have been initialised
305 * from the on disk AGF. Again, we can only check against maximum limits
306 * in this case.
307 */
308 if (!xfs_verify_magic(bp, block->bb_magic))
309 return __this_address;
310
311 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
312 return __this_address;
313 fa = xfs_btree_sblock_v5hdr_verify(bp);
314 if (fa)
315 return fa;
316
317 level = be16_to_cpu(block->bb_level);
318 if (pag && pag->pagf_init) {
319 if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
320 return __this_address;
321 } else if (level >= mp->m_rmap_maxlevels)
322 return __this_address;
323
324 return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]);
325 }
326
327 static void
328 xfs_rmapbt_read_verify(
329 struct xfs_buf *bp)
330 {
331 xfs_failaddr_t fa;
332
333 if (!xfs_btree_sblock_verify_crc(bp))
334 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
335 else {
336 fa = xfs_rmapbt_verify(bp);
337 if (fa)
338 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
339 }
340
341 if (bp->b_error)
342 trace_xfs_btree_corrupt(bp, _RET_IP_);
343 }
344
345 static void
346 xfs_rmapbt_write_verify(
347 struct xfs_buf *bp)
348 {
349 xfs_failaddr_t fa;
350
351 fa = xfs_rmapbt_verify(bp);
352 if (fa) {
353 trace_xfs_btree_corrupt(bp, _RET_IP_);
354 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
355 return;
356 }
357 xfs_btree_sblock_calc_crc(bp);
358
359 }
360
361 const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
362 .name = "xfs_rmapbt",
363 .magic = { 0, cpu_to_be32(XFS_RMAP_CRC_MAGIC) },
364 .verify_read = xfs_rmapbt_read_verify,
365 .verify_write = xfs_rmapbt_write_verify,
366 .verify_struct = xfs_rmapbt_verify,
367 };
368
369 STATIC int
370 xfs_rmapbt_keys_inorder(
371 struct xfs_btree_cur *cur,
372 union xfs_btree_key *k1,
373 union xfs_btree_key *k2)
374 {
375 uint32_t x;
376 uint32_t y;
377 uint64_t a;
378 uint64_t b;
379
380 x = be32_to_cpu(k1->rmap.rm_startblock);
381 y = be32_to_cpu(k2->rmap.rm_startblock);
382 if (x < y)
383 return 1;
384 else if (x > y)
385 return 0;
386 a = be64_to_cpu(k1->rmap.rm_owner);
387 b = be64_to_cpu(k2->rmap.rm_owner);
388 if (a < b)
389 return 1;
390 else if (a > b)
391 return 0;
392 a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
393 b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
394 if (a <= b)
395 return 1;
396 return 0;
397 }
398
399 STATIC int
400 xfs_rmapbt_recs_inorder(
401 struct xfs_btree_cur *cur,
402 union xfs_btree_rec *r1,
403 union xfs_btree_rec *r2)
404 {
405 uint32_t x;
406 uint32_t y;
407 uint64_t a;
408 uint64_t b;
409
410 x = be32_to_cpu(r1->rmap.rm_startblock);
411 y = be32_to_cpu(r2->rmap.rm_startblock);
412 if (x < y)
413 return 1;
414 else if (x > y)
415 return 0;
416 a = be64_to_cpu(r1->rmap.rm_owner);
417 b = be64_to_cpu(r2->rmap.rm_owner);
418 if (a < b)
419 return 1;
420 else if (a > b)
421 return 0;
422 a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
423 b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
424 if (a <= b)
425 return 1;
426 return 0;
427 }
428
429 static const struct xfs_btree_ops xfs_rmapbt_ops = {
430 .rec_len = sizeof(struct xfs_rmap_rec),
431 .key_len = 2 * sizeof(struct xfs_rmap_key),
432
433 .dup_cursor = xfs_rmapbt_dup_cursor,
434 .set_root = xfs_rmapbt_set_root,
435 .alloc_block = xfs_rmapbt_alloc_block,
436 .free_block = xfs_rmapbt_free_block,
437 .get_minrecs = xfs_rmapbt_get_minrecs,
438 .get_maxrecs = xfs_rmapbt_get_maxrecs,
439 .init_key_from_rec = xfs_rmapbt_init_key_from_rec,
440 .init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec,
441 .init_rec_from_cur = xfs_rmapbt_init_rec_from_cur,
442 .init_ptr_from_cur = xfs_rmapbt_init_ptr_from_cur,
443 .key_diff = xfs_rmapbt_key_diff,
444 .buf_ops = &xfs_rmapbt_buf_ops,
445 .diff_two_keys = xfs_rmapbt_diff_two_keys,
446 .keys_inorder = xfs_rmapbt_keys_inorder,
447 .recs_inorder = xfs_rmapbt_recs_inorder,
448 };
449
450 static struct xfs_btree_cur *
451 xfs_rmapbt_init_common(
452 struct xfs_mount *mp,
453 struct xfs_trans *tp,
454 xfs_agnumber_t agno)
455 {
456 struct xfs_btree_cur *cur;
457
458 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
459 cur->bc_tp = tp;
460 cur->bc_mp = mp;
461 /* Overlapping btree; 2 keys per pointer. */
462 cur->bc_btnum = XFS_BTNUM_RMAP;
463 cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
464 cur->bc_blocklog = mp->m_sb.sb_blocklog;
465 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
466 cur->bc_ag.agno = agno;
467 cur->bc_ops = &xfs_rmapbt_ops;
468
469 return cur;
470 }
471
472 /* Create a new reverse mapping btree cursor. */
473 struct xfs_btree_cur *
474 xfs_rmapbt_init_cursor(
475 struct xfs_mount *mp,
476 struct xfs_trans *tp,
477 struct xfs_buf *agbp,
478 xfs_agnumber_t agno)
479 {
480 struct xfs_agf *agf = agbp->b_addr;
481 struct xfs_btree_cur *cur;
482
483 cur = xfs_rmapbt_init_common(mp, tp, agno);
484 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
485 cur->bc_ag.agbp = agbp;
486 return cur;
487 }
488
489 /* Create a new reverse mapping btree cursor with a fake root for staging. */
490 struct xfs_btree_cur *
491 xfs_rmapbt_stage_cursor(
492 struct xfs_mount *mp,
493 struct xbtree_afakeroot *afake,
494 xfs_agnumber_t agno)
495 {
496 struct xfs_btree_cur *cur;
497
498 cur = xfs_rmapbt_init_common(mp, NULL, agno);
499 xfs_btree_stage_afakeroot(cur, afake);
500 return cur;
501 }
502
503 /*
504 * Install a new reverse mapping btree root. Caller is responsible for
505 * invalidating and freeing the old btree blocks.
506 */
507 void
508 xfs_rmapbt_commit_staged_btree(
509 struct xfs_btree_cur *cur,
510 struct xfs_trans *tp,
511 struct xfs_buf *agbp)
512 {
513 struct xfs_agf *agf = agbp->b_addr;
514 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
515
516 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
517
518 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
519 agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
520 agf->agf_rmap_blocks = cpu_to_be32(afake->af_blocks);
521 xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS |
522 XFS_AGF_RMAP_BLOCKS);
523 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_rmapbt_ops);
524 }
525
526 /*
527 * Calculate number of records in an rmap btree block.
528 */
529 int
530 xfs_rmapbt_maxrecs(
531 int blocklen,
532 int leaf)
533 {
534 blocklen -= XFS_RMAP_BLOCK_LEN;
535
536 if (leaf)
537 return blocklen / sizeof(struct xfs_rmap_rec);
538 return blocklen /
539 (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t));
540 }
541
542 /* Compute the maximum height of an rmap btree. */
543 void
544 xfs_rmapbt_compute_maxlevels(
545 struct xfs_mount *mp)
546 {
547 /*
548 * On a non-reflink filesystem, the maximum number of rmap
549 * records is the number of blocks in the AG, hence the max
550 * rmapbt height is log_$maxrecs($agblocks). However, with
551 * reflink each AG block can have up to 2^32 (per the refcount
552 * record format) owners, which means that theoretically we
553 * could face up to 2^64 rmap records.
554 *
555 * That effectively means that the max rmapbt height must be
556 * XFS_BTREE_MAXLEVELS. "Fortunately" we'll run out of AG
557 * blocks to feed the rmapbt long before the rmapbt reaches
558 * maximum height. The reflink code uses ag_resv_critical to
559 * disallow reflinking when less than 10% of the per-AG metadata
560 * block reservation since the fallback is a regular file copy.
561 */
562 if (xfs_sb_version_hasreflink(&mp->m_sb))
563 mp->m_rmap_maxlevels = XFS_BTREE_MAXLEVELS;
564 else
565 mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(
566 mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
567 }
568
569 /* Calculate the refcount btree size for some records. */
570 xfs_extlen_t
571 xfs_rmapbt_calc_size(
572 struct xfs_mount *mp,
573 unsigned long long len)
574 {
575 return xfs_btree_calc_size(mp->m_rmap_mnr, len);
576 }
577
578 /*
579 * Calculate the maximum refcount btree size.
580 */
581 xfs_extlen_t
582 xfs_rmapbt_max_size(
583 struct xfs_mount *mp,
584 xfs_agblock_t agblocks)
585 {
586 /* Bail out if we're uninitialized, which can happen in mkfs. */
587 if (mp->m_rmap_mxr[0] == 0)
588 return 0;
589
590 return xfs_rmapbt_calc_size(mp, agblocks);
591 }
592
593 /*
594 * Figure out how many blocks to reserve and how many are used by this btree.
595 */
596 int
597 xfs_rmapbt_calc_reserves(
598 struct xfs_mount *mp,
599 struct xfs_trans *tp,
600 xfs_agnumber_t agno,
601 xfs_extlen_t *ask,
602 xfs_extlen_t *used)
603 {
604 struct xfs_buf *agbp;
605 struct xfs_agf *agf;
606 xfs_agblock_t agblocks;
607 xfs_extlen_t tree_len;
608 int error;
609
610 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
611 return 0;
612
613 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
614 if (error)
615 return error;
616
617 agf = agbp->b_addr;
618 agblocks = be32_to_cpu(agf->agf_length);
619 tree_len = be32_to_cpu(agf->agf_rmap_blocks);
620 xfs_trans_brelse(tp, agbp);
621
622 /*
623 * The log is permanently allocated, so the space it occupies will
624 * never be available for the kinds of things that would require btree
625 * expansion. We therefore can pretend the space isn't there.
626 */
627 if (mp->m_sb.sb_logstart &&
628 XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
629 agblocks -= mp->m_sb.sb_logblocks;
630
631 /* Reserve 1% of the AG or enough for 1 block per record. */
632 *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
633 *used += tree_len;
634
635 return error;
636 }