]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_refcount_btree.c
xfs: always compile the btree inorder check functions
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_refcount_btree.c
1 /*
2 * Copyright (C) 2016 Oracle. All Rights Reserved.
3 *
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20 #include "libxfs_priv.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
26 #include "xfs_sb.h"
27 #include "xfs_mount.h"
28 #include "xfs_btree.h"
29 #include "xfs_bmap.h"
30 #include "xfs_refcount_btree.h"
31 #include "xfs_alloc.h"
32 #include "xfs_trace.h"
33 #include "xfs_cksum.h"
34 #include "xfs_trans.h"
35 #include "xfs_bit.h"
36 #include "xfs_rmap.h"
37
38 static struct xfs_btree_cur *
39 xfs_refcountbt_dup_cursor(
40 struct xfs_btree_cur *cur)
41 {
42 return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
43 cur->bc_private.a.agbp, cur->bc_private.a.agno,
44 cur->bc_private.a.dfops);
45 }
46
47 STATIC void
48 xfs_refcountbt_set_root(
49 struct xfs_btree_cur *cur,
50 union xfs_btree_ptr *ptr,
51 int inc)
52 {
53 struct xfs_buf *agbp = cur->bc_private.a.agbp;
54 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
55 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
56 struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
57
58 ASSERT(ptr->s != 0);
59
60 agf->agf_refcount_root = ptr->s;
61 be32_add_cpu(&agf->agf_refcount_level, inc);
62 pag->pagf_refcount_level += inc;
63 xfs_perag_put(pag);
64
65 xfs_alloc_log_agf(cur->bc_tp, agbp,
66 XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
67 }
68
69 STATIC int
70 xfs_refcountbt_alloc_block(
71 struct xfs_btree_cur *cur,
72 union xfs_btree_ptr *start,
73 union xfs_btree_ptr *new,
74 int *stat)
75 {
76 struct xfs_buf *agbp = cur->bc_private.a.agbp;
77 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
78 struct xfs_alloc_arg args; /* block allocation args */
79 int error; /* error return value */
80
81 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
82
83 memset(&args, 0, sizeof(args));
84 args.tp = cur->bc_tp;
85 args.mp = cur->bc_mp;
86 args.type = XFS_ALLOCTYPE_NEAR_BNO;
87 args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
88 xfs_refc_block(args.mp));
89 args.firstblock = args.fsbno;
90 xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_REFC);
91 args.minlen = args.maxlen = args.prod = 1;
92 args.resv = XFS_AG_RESV_METADATA;
93
94 error = xfs_alloc_vextent(&args);
95 if (error)
96 goto out_error;
97 trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
98 args.agbno, 1);
99 if (args.fsbno == NULLFSBLOCK) {
100 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
101 *stat = 0;
102 return 0;
103 }
104 ASSERT(args.agno == cur->bc_private.a.agno);
105 ASSERT(args.len == 1);
106
107 new->s = cpu_to_be32(args.agbno);
108 be32_add_cpu(&agf->agf_refcount_blocks, 1);
109 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
110
111 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
112 *stat = 1;
113 return 0;
114
115 out_error:
116 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
117 return error;
118 }
119
120 STATIC int
121 xfs_refcountbt_free_block(
122 struct xfs_btree_cur *cur,
123 struct xfs_buf *bp)
124 {
125 struct xfs_mount *mp = cur->bc_mp;
126 struct xfs_buf *agbp = cur->bc_private.a.agbp;
127 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
128 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
129 struct xfs_owner_info oinfo;
130 int error;
131
132 trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
133 XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
134 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
135 be32_add_cpu(&agf->agf_refcount_blocks, -1);
136 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
137 error = xfs_free_extent(cur->bc_tp, fsbno, 1, &oinfo,
138 XFS_AG_RESV_METADATA);
139 if (error)
140 return error;
141
142 return error;
143 }
144
145 STATIC int
146 xfs_refcountbt_get_minrecs(
147 struct xfs_btree_cur *cur,
148 int level)
149 {
150 return cur->bc_mp->m_refc_mnr[level != 0];
151 }
152
153 STATIC int
154 xfs_refcountbt_get_maxrecs(
155 struct xfs_btree_cur *cur,
156 int level)
157 {
158 return cur->bc_mp->m_refc_mxr[level != 0];
159 }
160
161 STATIC void
162 xfs_refcountbt_init_key_from_rec(
163 union xfs_btree_key *key,
164 union xfs_btree_rec *rec)
165 {
166 key->refc.rc_startblock = rec->refc.rc_startblock;
167 }
168
169 STATIC void
170 xfs_refcountbt_init_high_key_from_rec(
171 union xfs_btree_key *key,
172 union xfs_btree_rec *rec)
173 {
174 __u32 x;
175
176 x = be32_to_cpu(rec->refc.rc_startblock);
177 x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
178 key->refc.rc_startblock = cpu_to_be32(x);
179 }
180
181 STATIC void
182 xfs_refcountbt_init_rec_from_cur(
183 struct xfs_btree_cur *cur,
184 union xfs_btree_rec *rec)
185 {
186 rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
187 rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
188 rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
189 }
190
191 STATIC void
192 xfs_refcountbt_init_ptr_from_cur(
193 struct xfs_btree_cur *cur,
194 union xfs_btree_ptr *ptr)
195 {
196 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
197
198 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
199 ASSERT(agf->agf_refcount_root != 0);
200
201 ptr->s = agf->agf_refcount_root;
202 }
203
204 STATIC int64_t
205 xfs_refcountbt_key_diff(
206 struct xfs_btree_cur *cur,
207 union xfs_btree_key *key)
208 {
209 struct xfs_refcount_irec *rec = &cur->bc_rec.rc;
210 struct xfs_refcount_key *kp = &key->refc;
211
212 return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
213 }
214
215 STATIC int64_t
216 xfs_refcountbt_diff_two_keys(
217 struct xfs_btree_cur *cur,
218 union xfs_btree_key *k1,
219 union xfs_btree_key *k2)
220 {
221 return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
222 be32_to_cpu(k2->refc.rc_startblock);
223 }
224
225 STATIC bool
226 xfs_refcountbt_verify(
227 struct xfs_buf *bp)
228 {
229 struct xfs_mount *mp = bp->b_target->bt_mount;
230 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
231 struct xfs_perag *pag = bp->b_pag;
232 unsigned int level;
233
234 if (block->bb_magic != cpu_to_be32(XFS_REFC_CRC_MAGIC))
235 return false;
236
237 if (!xfs_sb_version_hasreflink(&mp->m_sb))
238 return false;
239 if (!xfs_btree_sblock_v5hdr_verify(bp))
240 return false;
241
242 level = be16_to_cpu(block->bb_level);
243 if (pag && pag->pagf_init) {
244 if (level >= pag->pagf_refcount_level)
245 return false;
246 } else if (level >= mp->m_refc_maxlevels)
247 return false;
248
249 return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
250 }
251
252 STATIC void
253 xfs_refcountbt_read_verify(
254 struct xfs_buf *bp)
255 {
256 if (!xfs_btree_sblock_verify_crc(bp))
257 xfs_buf_ioerror(bp, -EFSBADCRC);
258 else if (!xfs_refcountbt_verify(bp))
259 xfs_buf_ioerror(bp, -EFSCORRUPTED);
260
261 if (bp->b_error) {
262 trace_xfs_btree_corrupt(bp, _RET_IP_);
263 xfs_verifier_error(bp);
264 }
265 }
266
267 STATIC void
268 xfs_refcountbt_write_verify(
269 struct xfs_buf *bp)
270 {
271 if (!xfs_refcountbt_verify(bp)) {
272 trace_xfs_btree_corrupt(bp, _RET_IP_);
273 xfs_buf_ioerror(bp, -EFSCORRUPTED);
274 xfs_verifier_error(bp);
275 return;
276 }
277 xfs_btree_sblock_calc_crc(bp);
278
279 }
280
281 const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
282 .name = "xfs_refcountbt",
283 .verify_read = xfs_refcountbt_read_verify,
284 .verify_write = xfs_refcountbt_write_verify,
285 };
286
287 STATIC int
288 xfs_refcountbt_keys_inorder(
289 struct xfs_btree_cur *cur,
290 union xfs_btree_key *k1,
291 union xfs_btree_key *k2)
292 {
293 return be32_to_cpu(k1->refc.rc_startblock) <
294 be32_to_cpu(k2->refc.rc_startblock);
295 }
296
297 STATIC int
298 xfs_refcountbt_recs_inorder(
299 struct xfs_btree_cur *cur,
300 union xfs_btree_rec *r1,
301 union xfs_btree_rec *r2)
302 {
303 return be32_to_cpu(r1->refc.rc_startblock) +
304 be32_to_cpu(r1->refc.rc_blockcount) <=
305 be32_to_cpu(r2->refc.rc_startblock);
306 }
307
308 static const struct xfs_btree_ops xfs_refcountbt_ops = {
309 .rec_len = sizeof(struct xfs_refcount_rec),
310 .key_len = sizeof(struct xfs_refcount_key),
311
312 .dup_cursor = xfs_refcountbt_dup_cursor,
313 .set_root = xfs_refcountbt_set_root,
314 .alloc_block = xfs_refcountbt_alloc_block,
315 .free_block = xfs_refcountbt_free_block,
316 .get_minrecs = xfs_refcountbt_get_minrecs,
317 .get_maxrecs = xfs_refcountbt_get_maxrecs,
318 .init_key_from_rec = xfs_refcountbt_init_key_from_rec,
319 .init_high_key_from_rec = xfs_refcountbt_init_high_key_from_rec,
320 .init_rec_from_cur = xfs_refcountbt_init_rec_from_cur,
321 .init_ptr_from_cur = xfs_refcountbt_init_ptr_from_cur,
322 .key_diff = xfs_refcountbt_key_diff,
323 .buf_ops = &xfs_refcountbt_buf_ops,
324 .diff_two_keys = xfs_refcountbt_diff_two_keys,
325 .keys_inorder = xfs_refcountbt_keys_inorder,
326 .recs_inorder = xfs_refcountbt_recs_inorder,
327 };
328
329 /*
330 * Allocate a new refcount btree cursor.
331 */
332 struct xfs_btree_cur *
333 xfs_refcountbt_init_cursor(
334 struct xfs_mount *mp,
335 struct xfs_trans *tp,
336 struct xfs_buf *agbp,
337 xfs_agnumber_t agno,
338 struct xfs_defer_ops *dfops)
339 {
340 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
341 struct xfs_btree_cur *cur;
342
343 ASSERT(agno != NULLAGNUMBER);
344 ASSERT(agno < mp->m_sb.sb_agcount);
345 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
346
347 cur->bc_tp = tp;
348 cur->bc_mp = mp;
349 cur->bc_btnum = XFS_BTNUM_REFC;
350 cur->bc_blocklog = mp->m_sb.sb_blocklog;
351 cur->bc_ops = &xfs_refcountbt_ops;
352 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
353
354 cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
355
356 cur->bc_private.a.agbp = agbp;
357 cur->bc_private.a.agno = agno;
358 cur->bc_private.a.dfops = dfops;
359 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
360
361 cur->bc_private.a.priv.refc.nr_ops = 0;
362 cur->bc_private.a.priv.refc.shape_changes = 0;
363
364 return cur;
365 }
366
367 /*
368 * Calculate the number of records in a refcount btree block.
369 */
370 int
371 xfs_refcountbt_maxrecs(
372 struct xfs_mount *mp,
373 int blocklen,
374 bool leaf)
375 {
376 blocklen -= XFS_REFCOUNT_BLOCK_LEN;
377
378 if (leaf)
379 return blocklen / sizeof(struct xfs_refcount_rec);
380 return blocklen / (sizeof(struct xfs_refcount_key) +
381 sizeof(xfs_refcount_ptr_t));
382 }
383
384 /* Compute the maximum height of a refcount btree. */
385 void
386 xfs_refcountbt_compute_maxlevels(
387 struct xfs_mount *mp)
388 {
389 mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(mp,
390 mp->m_refc_mnr, mp->m_sb.sb_agblocks);
391 }
392
393 /* Calculate the refcount btree size for some records. */
394 xfs_extlen_t
395 xfs_refcountbt_calc_size(
396 struct xfs_mount *mp,
397 unsigned long long len)
398 {
399 return xfs_btree_calc_size(mp, mp->m_refc_mnr, len);
400 }
401
402 /*
403 * Calculate the maximum refcount btree size.
404 */
405 xfs_extlen_t
406 xfs_refcountbt_max_size(
407 struct xfs_mount *mp,
408 xfs_agblock_t agblocks)
409 {
410 /* Bail out if we're uninitialized, which can happen in mkfs. */
411 if (mp->m_refc_mxr[0] == 0)
412 return 0;
413
414 return xfs_refcountbt_calc_size(mp, agblocks);
415 }
416
417 /*
418 * Figure out how many blocks to reserve and how many are used by this btree.
419 */
420 int
421 xfs_refcountbt_calc_reserves(
422 struct xfs_mount *mp,
423 xfs_agnumber_t agno,
424 xfs_extlen_t *ask,
425 xfs_extlen_t *used)
426 {
427 struct xfs_buf *agbp;
428 struct xfs_agf *agf;
429 xfs_agblock_t agblocks;
430 xfs_extlen_t tree_len;
431 int error;
432
433 if (!xfs_sb_version_hasreflink(&mp->m_sb))
434 return 0;
435
436
437 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
438 if (error)
439 return error;
440
441 agf = XFS_BUF_TO_AGF(agbp);
442 agblocks = be32_to_cpu(agf->agf_length);
443 tree_len = be32_to_cpu(agf->agf_refcount_blocks);
444 xfs_buf_relse(agbp);
445
446 *ask += xfs_refcountbt_max_size(mp, agblocks);
447 *used += tree_len;
448
449 return error;
450 }