]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_alloc_btree.c
xfs: xfs_rtbuf_get should check the bmapi_read results
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_alloc_btree.c
1 /*
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "libxfs_priv.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_sb.h"
25 #include "xfs_mount.h"
26 #include "xfs_btree.h"
27 #include "xfs_alloc_btree.h"
28 #include "xfs_alloc.h"
29 #include "xfs_trace.h"
30 #include "xfs_cksum.h"
31 #include "xfs_trans.h"
32
33
34 STATIC struct xfs_btree_cur *
35 xfs_allocbt_dup_cursor(
36 struct xfs_btree_cur *cur)
37 {
38 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
39 cur->bc_private.a.agbp, cur->bc_private.a.agno,
40 cur->bc_btnum);
41 }
42
43 STATIC void
44 xfs_allocbt_set_root(
45 struct xfs_btree_cur *cur,
46 union xfs_btree_ptr *ptr,
47 int inc)
48 {
49 struct xfs_buf *agbp = cur->bc_private.a.agbp;
50 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
51 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
52 int btnum = cur->bc_btnum;
53 struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
54
55 ASSERT(ptr->s != 0);
56
57 agf->agf_roots[btnum] = ptr->s;
58 be32_add_cpu(&agf->agf_levels[btnum], inc);
59 pag->pagf_levels[btnum] += inc;
60 xfs_perag_put(pag);
61
62 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
63 }
64
65 STATIC int
66 xfs_allocbt_alloc_block(
67 struct xfs_btree_cur *cur,
68 union xfs_btree_ptr *start,
69 union xfs_btree_ptr *new,
70 int *stat)
71 {
72 int error;
73 xfs_agblock_t bno;
74
75 /* Allocate the new block from the freelist. If we can't, give up. */
76 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
77 &bno, 1);
78 if (error)
79 return error;
80
81 if (bno == NULLAGBLOCK) {
82 *stat = 0;
83 return 0;
84 }
85
86 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
87
88 xfs_trans_agbtree_delta(cur->bc_tp, 1);
89 new->s = cpu_to_be32(bno);
90
91 *stat = 1;
92 return 0;
93 }
94
95 STATIC int
96 xfs_allocbt_free_block(
97 struct xfs_btree_cur *cur,
98 struct xfs_buf *bp)
99 {
100 struct xfs_buf *agbp = cur->bc_private.a.agbp;
101 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
102 xfs_agblock_t bno;
103 int error;
104
105 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
106 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
107 if (error)
108 return error;
109
110 xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
111 XFS_EXTENT_BUSY_SKIP_DISCARD);
112 xfs_trans_agbtree_delta(cur->bc_tp, -1);
113 return 0;
114 }
115
116 /*
117 * Update the longest extent in the AGF
118 */
119 STATIC void
120 xfs_allocbt_update_lastrec(
121 struct xfs_btree_cur *cur,
122 struct xfs_btree_block *block,
123 union xfs_btree_rec *rec,
124 int ptr,
125 int reason)
126 {
127 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
128 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
129 struct xfs_perag *pag;
130 __be32 len;
131 int numrecs;
132
133 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
134
135 switch (reason) {
136 case LASTREC_UPDATE:
137 /*
138 * If this is the last leaf block and it's the last record,
139 * then update the size of the longest extent in the AG.
140 */
141 if (ptr != xfs_btree_get_numrecs(block))
142 return;
143 len = rec->alloc.ar_blockcount;
144 break;
145 case LASTREC_INSREC:
146 if (be32_to_cpu(rec->alloc.ar_blockcount) <=
147 be32_to_cpu(agf->agf_longest))
148 return;
149 len = rec->alloc.ar_blockcount;
150 break;
151 case LASTREC_DELREC:
152 numrecs = xfs_btree_get_numrecs(block);
153 if (ptr <= numrecs)
154 return;
155 ASSERT(ptr == numrecs + 1);
156
157 if (numrecs) {
158 xfs_alloc_rec_t *rrp;
159
160 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
161 len = rrp->ar_blockcount;
162 } else {
163 len = 0;
164 }
165
166 break;
167 default:
168 ASSERT(0);
169 return;
170 }
171
172 agf->agf_longest = len;
173 pag = xfs_perag_get(cur->bc_mp, seqno);
174 pag->pagf_longest = be32_to_cpu(len);
175 xfs_perag_put(pag);
176 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
177 }
178
179 STATIC int
180 xfs_allocbt_get_minrecs(
181 struct xfs_btree_cur *cur,
182 int level)
183 {
184 return cur->bc_mp->m_alloc_mnr[level != 0];
185 }
186
187 STATIC int
188 xfs_allocbt_get_maxrecs(
189 struct xfs_btree_cur *cur,
190 int level)
191 {
192 return cur->bc_mp->m_alloc_mxr[level != 0];
193 }
194
195 STATIC void
196 xfs_allocbt_init_key_from_rec(
197 union xfs_btree_key *key,
198 union xfs_btree_rec *rec)
199 {
200 key->alloc.ar_startblock = rec->alloc.ar_startblock;
201 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
202 }
203
204 STATIC void
205 xfs_bnobt_init_high_key_from_rec(
206 union xfs_btree_key *key,
207 union xfs_btree_rec *rec)
208 {
209 __u32 x;
210
211 x = be32_to_cpu(rec->alloc.ar_startblock);
212 x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
213 key->alloc.ar_startblock = cpu_to_be32(x);
214 key->alloc.ar_blockcount = 0;
215 }
216
217 STATIC void
218 xfs_cntbt_init_high_key_from_rec(
219 union xfs_btree_key *key,
220 union xfs_btree_rec *rec)
221 {
222 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
223 key->alloc.ar_startblock = 0;
224 }
225
226 STATIC void
227 xfs_allocbt_init_rec_from_cur(
228 struct xfs_btree_cur *cur,
229 union xfs_btree_rec *rec)
230 {
231 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
232 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
233 }
234
235 STATIC void
236 xfs_allocbt_init_ptr_from_cur(
237 struct xfs_btree_cur *cur,
238 union xfs_btree_ptr *ptr)
239 {
240 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
241
242 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
243 ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
244
245 ptr->s = agf->agf_roots[cur->bc_btnum];
246 }
247
248 STATIC int64_t
249 xfs_bnobt_key_diff(
250 struct xfs_btree_cur *cur,
251 union xfs_btree_key *key)
252 {
253 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
254 xfs_alloc_key_t *kp = &key->alloc;
255
256 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
257 }
258
259 STATIC int64_t
260 xfs_cntbt_key_diff(
261 struct xfs_btree_cur *cur,
262 union xfs_btree_key *key)
263 {
264 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
265 xfs_alloc_key_t *kp = &key->alloc;
266 int64_t diff;
267
268 diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
269 if (diff)
270 return diff;
271
272 return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
273 }
274
275 STATIC int64_t
276 xfs_bnobt_diff_two_keys(
277 struct xfs_btree_cur *cur,
278 union xfs_btree_key *k1,
279 union xfs_btree_key *k2)
280 {
281 return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
282 be32_to_cpu(k2->alloc.ar_startblock);
283 }
284
285 STATIC int64_t
286 xfs_cntbt_diff_two_keys(
287 struct xfs_btree_cur *cur,
288 union xfs_btree_key *k1,
289 union xfs_btree_key *k2)
290 {
291 int64_t diff;
292
293 diff = be32_to_cpu(k1->alloc.ar_blockcount) -
294 be32_to_cpu(k2->alloc.ar_blockcount);
295 if (diff)
296 return diff;
297
298 return be32_to_cpu(k1->alloc.ar_startblock) -
299 be32_to_cpu(k2->alloc.ar_startblock);
300 }
301
302 static xfs_failaddr_t
303 xfs_allocbt_verify(
304 struct xfs_buf *bp)
305 {
306 struct xfs_mount *mp = bp->b_target->bt_mount;
307 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
308 struct xfs_perag *pag = bp->b_pag;
309 xfs_failaddr_t fa;
310 unsigned int level;
311
312 /*
313 * magic number and level verification
314 *
315 * During growfs operations, we can't verify the exact level or owner as
316 * the perag is not fully initialised and hence not attached to the
317 * buffer. In this case, check against the maximum tree depth.
318 *
319 * Similarly, during log recovery we will have a perag structure
320 * attached, but the agf information will not yet have been initialised
321 * from the on disk AGF. Again, we can only check against maximum limits
322 * in this case.
323 */
324 level = be16_to_cpu(block->bb_level);
325 switch (block->bb_magic) {
326 case cpu_to_be32(XFS_ABTB_CRC_MAGIC):
327 fa = xfs_btree_sblock_v5hdr_verify(bp);
328 if (fa)
329 return fa;
330 /* fall through */
331 case cpu_to_be32(XFS_ABTB_MAGIC):
332 if (pag && pag->pagf_init) {
333 if (level >= pag->pagf_levels[XFS_BTNUM_BNOi])
334 return __this_address;
335 } else if (level >= mp->m_ag_maxlevels)
336 return __this_address;
337 break;
338 case cpu_to_be32(XFS_ABTC_CRC_MAGIC):
339 fa = xfs_btree_sblock_v5hdr_verify(bp);
340 if (fa)
341 return fa;
342 /* fall through */
343 case cpu_to_be32(XFS_ABTC_MAGIC):
344 if (pag && pag->pagf_init) {
345 if (level >= pag->pagf_levels[XFS_BTNUM_CNTi])
346 return __this_address;
347 } else if (level >= mp->m_ag_maxlevels)
348 return __this_address;
349 break;
350 default:
351 return __this_address;
352 }
353
354 return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
355 }
356
357 static void
358 xfs_allocbt_read_verify(
359 struct xfs_buf *bp)
360 {
361 xfs_failaddr_t fa;
362
363 if (!xfs_btree_sblock_verify_crc(bp))
364 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
365 else {
366 fa = xfs_allocbt_verify(bp);
367 if (fa)
368 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
369 }
370
371 if (bp->b_error)
372 trace_xfs_btree_corrupt(bp, _RET_IP_);
373 }
374
375 static void
376 xfs_allocbt_write_verify(
377 struct xfs_buf *bp)
378 {
379 xfs_failaddr_t fa;
380
381 fa = xfs_allocbt_verify(bp);
382 if (fa) {
383 trace_xfs_btree_corrupt(bp, _RET_IP_);
384 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
385 return;
386 }
387 xfs_btree_sblock_calc_crc(bp);
388
389 }
390
391 const struct xfs_buf_ops xfs_allocbt_buf_ops = {
392 .name = "xfs_allocbt",
393 .verify_read = xfs_allocbt_read_verify,
394 .verify_write = xfs_allocbt_write_verify,
395 .verify_struct = xfs_allocbt_verify,
396 };
397
398
399 STATIC int
400 xfs_bnobt_keys_inorder(
401 struct xfs_btree_cur *cur,
402 union xfs_btree_key *k1,
403 union xfs_btree_key *k2)
404 {
405 return be32_to_cpu(k1->alloc.ar_startblock) <
406 be32_to_cpu(k2->alloc.ar_startblock);
407 }
408
409 STATIC int
410 xfs_bnobt_recs_inorder(
411 struct xfs_btree_cur *cur,
412 union xfs_btree_rec *r1,
413 union xfs_btree_rec *r2)
414 {
415 return be32_to_cpu(r1->alloc.ar_startblock) +
416 be32_to_cpu(r1->alloc.ar_blockcount) <=
417 be32_to_cpu(r2->alloc.ar_startblock);
418 }
419
420 STATIC int
421 xfs_cntbt_keys_inorder(
422 struct xfs_btree_cur *cur,
423 union xfs_btree_key *k1,
424 union xfs_btree_key *k2)
425 {
426 return be32_to_cpu(k1->alloc.ar_blockcount) <
427 be32_to_cpu(k2->alloc.ar_blockcount) ||
428 (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
429 be32_to_cpu(k1->alloc.ar_startblock) <
430 be32_to_cpu(k2->alloc.ar_startblock));
431 }
432
433 STATIC int
434 xfs_cntbt_recs_inorder(
435 struct xfs_btree_cur *cur,
436 union xfs_btree_rec *r1,
437 union xfs_btree_rec *r2)
438 {
439 return be32_to_cpu(r1->alloc.ar_blockcount) <
440 be32_to_cpu(r2->alloc.ar_blockcount) ||
441 (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
442 be32_to_cpu(r1->alloc.ar_startblock) <
443 be32_to_cpu(r2->alloc.ar_startblock));
444 }
445
446 static const struct xfs_btree_ops xfs_bnobt_ops = {
447 .rec_len = sizeof(xfs_alloc_rec_t),
448 .key_len = sizeof(xfs_alloc_key_t),
449
450 .dup_cursor = xfs_allocbt_dup_cursor,
451 .set_root = xfs_allocbt_set_root,
452 .alloc_block = xfs_allocbt_alloc_block,
453 .free_block = xfs_allocbt_free_block,
454 .update_lastrec = xfs_allocbt_update_lastrec,
455 .get_minrecs = xfs_allocbt_get_minrecs,
456 .get_maxrecs = xfs_allocbt_get_maxrecs,
457 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
458 .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec,
459 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
460 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
461 .key_diff = xfs_bnobt_key_diff,
462 .buf_ops = &xfs_allocbt_buf_ops,
463 .diff_two_keys = xfs_bnobt_diff_two_keys,
464 .keys_inorder = xfs_bnobt_keys_inorder,
465 .recs_inorder = xfs_bnobt_recs_inorder,
466 };
467
468 static const struct xfs_btree_ops xfs_cntbt_ops = {
469 .rec_len = sizeof(xfs_alloc_rec_t),
470 .key_len = sizeof(xfs_alloc_key_t),
471
472 .dup_cursor = xfs_allocbt_dup_cursor,
473 .set_root = xfs_allocbt_set_root,
474 .alloc_block = xfs_allocbt_alloc_block,
475 .free_block = xfs_allocbt_free_block,
476 .update_lastrec = xfs_allocbt_update_lastrec,
477 .get_minrecs = xfs_allocbt_get_minrecs,
478 .get_maxrecs = xfs_allocbt_get_maxrecs,
479 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
480 .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec,
481 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
482 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
483 .key_diff = xfs_cntbt_key_diff,
484 .buf_ops = &xfs_allocbt_buf_ops,
485 .diff_two_keys = xfs_cntbt_diff_two_keys,
486 .keys_inorder = xfs_cntbt_keys_inorder,
487 .recs_inorder = xfs_cntbt_recs_inorder,
488 };
489
490 /*
491 * Allocate a new allocation btree cursor.
492 */
493 struct xfs_btree_cur * /* new alloc btree cursor */
494 xfs_allocbt_init_cursor(
495 struct xfs_mount *mp, /* file system mount point */
496 struct xfs_trans *tp, /* transaction pointer */
497 struct xfs_buf *agbp, /* buffer for agf structure */
498 xfs_agnumber_t agno, /* allocation group number */
499 xfs_btnum_t btnum) /* btree identifier */
500 {
501 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
502 struct xfs_btree_cur *cur;
503
504 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
505
506 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
507
508 cur->bc_tp = tp;
509 cur->bc_mp = mp;
510 cur->bc_btnum = btnum;
511 cur->bc_blocklog = mp->m_sb.sb_blocklog;
512
513 if (btnum == XFS_BTNUM_CNT) {
514 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
515 cur->bc_ops = &xfs_cntbt_ops;
516 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
517 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
518 } else {
519 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
520 cur->bc_ops = &xfs_bnobt_ops;
521 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
522 }
523
524 cur->bc_private.a.agbp = agbp;
525 cur->bc_private.a.agno = agno;
526
527 if (xfs_sb_version_hascrc(&mp->m_sb))
528 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
529
530 return cur;
531 }
532
533 /*
534 * Calculate number of records in an alloc btree block.
535 */
536 int
537 xfs_allocbt_maxrecs(
538 struct xfs_mount *mp,
539 int blocklen,
540 int leaf)
541 {
542 blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
543
544 if (leaf)
545 return blocklen / sizeof(xfs_alloc_rec_t);
546 return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
547 }
548
549 /* Calculate the freespace btree size for some records. */
550 xfs_extlen_t
551 xfs_allocbt_calc_size(
552 struct xfs_mount *mp,
553 unsigned long long len)
554 {
555 return xfs_btree_calc_size(mp->m_alloc_mnr, len);
556 }