]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_bmap_btree.c
xfs: allow unaligned extent records in xfs_bmbt_disk_set_all
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_bmap_btree.c
1 /*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "libxfs_priv.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_inode.h"
28 #include "xfs_trans.h"
29 #include "xfs_alloc.h"
30 #include "xfs_btree.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_bmap.h"
33 #include "xfs_trace.h"
34 #include "xfs_cksum.h"
35 #include "xfs_rmap.h"
36
37 /*
38 * Convert on-disk form of btree root to in-memory form.
39 */
40 void
41 xfs_bmdr_to_bmbt(
42 struct xfs_inode *ip,
43 xfs_bmdr_block_t *dblock,
44 int dblocklen,
45 struct xfs_btree_block *rblock,
46 int rblocklen)
47 {
48 struct xfs_mount *mp = ip->i_mount;
49 int dmxr;
50 xfs_bmbt_key_t *fkp;
51 __be64 *fpp;
52 xfs_bmbt_key_t *tkp;
53 __be64 *tpp;
54
55 xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
56 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
57 XFS_BTREE_LONG_PTRS);
58 rblock->bb_level = dblock->bb_level;
59 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
60 rblock->bb_numrecs = dblock->bb_numrecs;
61 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
62 fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
63 tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
64 fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
65 tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
66 dmxr = be16_to_cpu(dblock->bb_numrecs);
67 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
68 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
69 }
70
71 /*
72 * Convert a compressed bmap extent record to an uncompressed form.
73 * This code must be in sync with the routines xfs_bmbt_get_startoff,
74 * xfs_bmbt_get_startblock and xfs_bmbt_get_blockcount.
75 */
76 STATIC void
77 __xfs_bmbt_get_all(
78 uint64_t l0,
79 uint64_t l1,
80 xfs_bmbt_irec_t *s)
81 {
82 int ext_flag;
83 xfs_exntst_t st;
84
85 ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
86 s->br_startoff = ((xfs_fileoff_t)l0 &
87 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
88 s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
89 (((xfs_fsblock_t)l1) >> 21);
90 s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
91 /* This is xfs_extent_state() in-line */
92 if (ext_flag) {
93 ASSERT(s->br_blockcount != 0); /* saved for DMIG */
94 st = XFS_EXT_UNWRITTEN;
95 } else
96 st = XFS_EXT_NORM;
97 s->br_state = st;
98 }
99
100 void
101 xfs_bmbt_get_all(
102 xfs_bmbt_rec_host_t *r,
103 xfs_bmbt_irec_t *s)
104 {
105 __xfs_bmbt_get_all(r->l0, r->l1, s);
106 }
107
108 /*
109 * Extract the blockcount field from an in memory bmap extent record.
110 */
111 xfs_filblks_t
112 xfs_bmbt_get_blockcount(
113 xfs_bmbt_rec_host_t *r)
114 {
115 return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
116 }
117
118 /*
119 * Extract the startblock field from an in memory bmap extent record.
120 */
121 xfs_fsblock_t
122 xfs_bmbt_get_startblock(
123 xfs_bmbt_rec_host_t *r)
124 {
125 return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
126 (((xfs_fsblock_t)r->l1) >> 21);
127 }
128
129 /*
130 * Extract the startoff field from an in memory bmap extent record.
131 */
132 xfs_fileoff_t
133 xfs_bmbt_get_startoff(
134 xfs_bmbt_rec_host_t *r)
135 {
136 return ((xfs_fileoff_t)r->l0 &
137 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
138 }
139
140 /*
141 * Extract the blockcount field from an on disk bmap extent record.
142 */
143 xfs_filblks_t
144 xfs_bmbt_disk_get_blockcount(
145 xfs_bmbt_rec_t *r)
146 {
147 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
148 }
149
150 /*
151 * Extract the startoff field from a disk format bmap extent record.
152 */
153 xfs_fileoff_t
154 xfs_bmbt_disk_get_startoff(
155 xfs_bmbt_rec_t *r)
156 {
157 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
158 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
159 }
160
161 /*
162 * Set all the fields in a bmap extent record from the uncompressed form.
163 */
164 void
165 xfs_bmbt_set_all(
166 struct xfs_bmbt_rec_host *r,
167 struct xfs_bmbt_irec *s)
168 {
169 int extent_flag = (s->br_state != XFS_EXT_NORM);
170
171 ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
172 ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
173 ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
174 ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
175
176 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
177 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
178 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43);
179 r->l1 = ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
180 ((xfs_bmbt_rec_base_t)s->br_blockcount &
181 (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
182 }
183
184 /*
185 * Set all the fields in a bmap extent record from the uncompressed form.
186 */
187 void
188 xfs_bmbt_disk_set_all(
189 struct xfs_bmbt_rec *r,
190 struct xfs_bmbt_irec *s)
191 {
192 int extent_flag = (s->br_state != XFS_EXT_NORM);
193
194 ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
195 ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
196 ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
197 ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
198
199 put_unaligned_be64(
200 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
201 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
202 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0);
203 put_unaligned_be64(
204 ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
205 ((xfs_bmbt_rec_base_t)s->br_blockcount &
206 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1);
207 }
208
209 /*
210 * Convert in-memory form of btree root to on-disk form.
211 */
212 void
213 xfs_bmbt_to_bmdr(
214 struct xfs_mount *mp,
215 struct xfs_btree_block *rblock,
216 int rblocklen,
217 xfs_bmdr_block_t *dblock,
218 int dblocklen)
219 {
220 int dmxr;
221 xfs_bmbt_key_t *fkp;
222 __be64 *fpp;
223 xfs_bmbt_key_t *tkp;
224 __be64 *tpp;
225
226 if (xfs_sb_version_hascrc(&mp->m_sb)) {
227 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
228 ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
229 &mp->m_sb.sb_meta_uuid));
230 ASSERT(rblock->bb_u.l.bb_blkno ==
231 cpu_to_be64(XFS_BUF_DADDR_NULL));
232 } else
233 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
234 ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
235 ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
236 ASSERT(rblock->bb_level != 0);
237 dblock->bb_level = rblock->bb_level;
238 dblock->bb_numrecs = rblock->bb_numrecs;
239 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
240 fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
241 tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
242 fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
243 tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
244 dmxr = be16_to_cpu(dblock->bb_numrecs);
245 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
246 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
247 }
248
249 STATIC struct xfs_btree_cur *
250 xfs_bmbt_dup_cursor(
251 struct xfs_btree_cur *cur)
252 {
253 struct xfs_btree_cur *new;
254
255 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
256 cur->bc_private.b.ip, cur->bc_private.b.whichfork);
257
258 /*
259 * Copy the firstblock, dfops, and flags values,
260 * since init cursor doesn't get them.
261 */
262 new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
263 new->bc_private.b.dfops = cur->bc_private.b.dfops;
264 new->bc_private.b.flags = cur->bc_private.b.flags;
265
266 return new;
267 }
268
269 STATIC void
270 xfs_bmbt_update_cursor(
271 struct xfs_btree_cur *src,
272 struct xfs_btree_cur *dst)
273 {
274 ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
275 (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
276 ASSERT(dst->bc_private.b.dfops == src->bc_private.b.dfops);
277
278 dst->bc_private.b.allocated += src->bc_private.b.allocated;
279 dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
280
281 src->bc_private.b.allocated = 0;
282 }
283
284 STATIC int
285 xfs_bmbt_alloc_block(
286 struct xfs_btree_cur *cur,
287 union xfs_btree_ptr *start,
288 union xfs_btree_ptr *new,
289 int *stat)
290 {
291 xfs_alloc_arg_t args; /* block allocation args */
292 int error; /* error return value */
293
294 memset(&args, 0, sizeof(args));
295 args.tp = cur->bc_tp;
296 args.mp = cur->bc_mp;
297 args.fsbno = cur->bc_private.b.firstblock;
298 args.firstblock = args.fsbno;
299 xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
300 cur->bc_private.b.whichfork);
301
302 if (args.fsbno == NULLFSBLOCK) {
303 args.fsbno = be64_to_cpu(start->l);
304 args.type = XFS_ALLOCTYPE_START_BNO;
305 /*
306 * Make sure there is sufficient room left in the AG to
307 * complete a full tree split for an extent insert. If
308 * we are converting the middle part of an extent then
309 * we may need space for two tree splits.
310 *
311 * We are relying on the caller to make the correct block
312 * reservation for this operation to succeed. If the
313 * reservation amount is insufficient then we may fail a
314 * block allocation here and corrupt the filesystem.
315 */
316 args.minleft = args.tp->t_blk_res;
317 } else if (cur->bc_private.b.dfops->dop_low) {
318 args.type = XFS_ALLOCTYPE_START_BNO;
319 } else {
320 args.type = XFS_ALLOCTYPE_NEAR_BNO;
321 }
322
323 args.minlen = args.maxlen = args.prod = 1;
324 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
325 if (!args.wasdel && args.tp->t_blk_res == 0) {
326 error = -ENOSPC;
327 goto error0;
328 }
329 error = xfs_alloc_vextent(&args);
330 if (error)
331 goto error0;
332
333 if (args.fsbno == NULLFSBLOCK && args.minleft) {
334 /*
335 * Could not find an AG with enough free space to satisfy
336 * a full btree split. Try again and if
337 * successful activate the lowspace algorithm.
338 */
339 args.fsbno = 0;
340 args.type = XFS_ALLOCTYPE_FIRST_AG;
341 error = xfs_alloc_vextent(&args);
342 if (error)
343 goto error0;
344 cur->bc_private.b.dfops->dop_low = true;
345 }
346 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
347 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
348 *stat = 0;
349 return 0;
350 }
351 ASSERT(args.len == 1);
352 cur->bc_private.b.firstblock = args.fsbno;
353 cur->bc_private.b.allocated++;
354 cur->bc_private.b.ip->i_d.di_nblocks++;
355 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
356 xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
357 XFS_TRANS_DQ_BCOUNT, 1L);
358
359 new->l = cpu_to_be64(args.fsbno);
360
361 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
362 *stat = 1;
363 return 0;
364
365 error0:
366 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
367 return error;
368 }
369
370 STATIC int
371 xfs_bmbt_free_block(
372 struct xfs_btree_cur *cur,
373 struct xfs_buf *bp)
374 {
375 struct xfs_mount *mp = cur->bc_mp;
376 struct xfs_inode *ip = cur->bc_private.b.ip;
377 struct xfs_trans *tp = cur->bc_tp;
378 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
379 struct xfs_owner_info oinfo;
380
381 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
382 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, fsbno, 1, &oinfo);
383 ip->i_d.di_nblocks--;
384
385 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
386 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
387 return 0;
388 }
389
390 STATIC int
391 xfs_bmbt_get_minrecs(
392 struct xfs_btree_cur *cur,
393 int level)
394 {
395 if (level == cur->bc_nlevels - 1) {
396 struct xfs_ifork *ifp;
397
398 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
399 cur->bc_private.b.whichfork);
400
401 return xfs_bmbt_maxrecs(cur->bc_mp,
402 ifp->if_broot_bytes, level == 0) / 2;
403 }
404
405 return cur->bc_mp->m_bmap_dmnr[level != 0];
406 }
407
408 int
409 xfs_bmbt_get_maxrecs(
410 struct xfs_btree_cur *cur,
411 int level)
412 {
413 if (level == cur->bc_nlevels - 1) {
414 struct xfs_ifork *ifp;
415
416 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
417 cur->bc_private.b.whichfork);
418
419 return xfs_bmbt_maxrecs(cur->bc_mp,
420 ifp->if_broot_bytes, level == 0);
421 }
422
423 return cur->bc_mp->m_bmap_dmxr[level != 0];
424
425 }
426
427 /*
428 * Get the maximum records we could store in the on-disk format.
429 *
430 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
431 * for the root node this checks the available space in the dinode fork
432 * so that we can resize the in-memory buffer to match it. After a
433 * resize to the maximum size this function returns the same value
434 * as xfs_bmbt_get_maxrecs for the root node, too.
435 */
436 STATIC int
437 xfs_bmbt_get_dmaxrecs(
438 struct xfs_btree_cur *cur,
439 int level)
440 {
441 if (level != cur->bc_nlevels - 1)
442 return cur->bc_mp->m_bmap_dmxr[level != 0];
443 return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
444 }
445
446 STATIC void
447 xfs_bmbt_init_key_from_rec(
448 union xfs_btree_key *key,
449 union xfs_btree_rec *rec)
450 {
451 key->bmbt.br_startoff =
452 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
453 }
454
455 STATIC void
456 xfs_bmbt_init_high_key_from_rec(
457 union xfs_btree_key *key,
458 union xfs_btree_rec *rec)
459 {
460 key->bmbt.br_startoff = cpu_to_be64(
461 xfs_bmbt_disk_get_startoff(&rec->bmbt) +
462 xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1);
463 }
464
465 STATIC void
466 xfs_bmbt_init_rec_from_cur(
467 struct xfs_btree_cur *cur,
468 union xfs_btree_rec *rec)
469 {
470 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
471 }
472
473 STATIC void
474 xfs_bmbt_init_ptr_from_cur(
475 struct xfs_btree_cur *cur,
476 union xfs_btree_ptr *ptr)
477 {
478 ptr->l = 0;
479 }
480
481 STATIC int64_t
482 xfs_bmbt_key_diff(
483 struct xfs_btree_cur *cur,
484 union xfs_btree_key *key)
485 {
486 return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
487 cur->bc_rec.b.br_startoff;
488 }
489
490 STATIC int64_t
491 xfs_bmbt_diff_two_keys(
492 struct xfs_btree_cur *cur,
493 union xfs_btree_key *k1,
494 union xfs_btree_key *k2)
495 {
496 return (int64_t)be64_to_cpu(k1->bmbt.br_startoff) -
497 be64_to_cpu(k2->bmbt.br_startoff);
498 }
499
500 static bool
501 xfs_bmbt_verify(
502 struct xfs_buf *bp)
503 {
504 struct xfs_mount *mp = bp->b_target->bt_mount;
505 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
506 unsigned int level;
507
508 switch (block->bb_magic) {
509 case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
510 if (!xfs_sb_version_hascrc(&mp->m_sb))
511 return false;
512 if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
513 return false;
514 if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn)
515 return false;
516 /*
517 * XXX: need a better way of verifying the owner here. Right now
518 * just make sure there has been one set.
519 */
520 if (be64_to_cpu(block->bb_u.l.bb_owner) == 0)
521 return false;
522 /* fall through */
523 case cpu_to_be32(XFS_BMAP_MAGIC):
524 break;
525 default:
526 return false;
527 }
528
529 /*
530 * numrecs and level verification.
531 *
532 * We don't know what fork we belong to, so just verify that the level
533 * is less than the maximum of the two. Later checks will be more
534 * precise.
535 */
536 level = be16_to_cpu(block->bb_level);
537 if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
538 return false;
539 if (be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
540 return false;
541
542 /* sibling pointer verification */
543 if (!block->bb_u.l.bb_leftsib ||
544 (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
545 !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))))
546 return false;
547 if (!block->bb_u.l.bb_rightsib ||
548 (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
549 !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))))
550 return false;
551
552 return true;
553 }
554
555 static void
556 xfs_bmbt_read_verify(
557 struct xfs_buf *bp)
558 {
559 if (!xfs_btree_lblock_verify_crc(bp))
560 xfs_buf_ioerror(bp, -EFSBADCRC);
561 else if (!xfs_bmbt_verify(bp))
562 xfs_buf_ioerror(bp, -EFSCORRUPTED);
563
564 if (bp->b_error) {
565 trace_xfs_btree_corrupt(bp, _RET_IP_);
566 xfs_verifier_error(bp);
567 }
568 }
569
570 static void
571 xfs_bmbt_write_verify(
572 struct xfs_buf *bp)
573 {
574 if (!xfs_bmbt_verify(bp)) {
575 trace_xfs_btree_corrupt(bp, _RET_IP_);
576 xfs_buf_ioerror(bp, -EFSCORRUPTED);
577 xfs_verifier_error(bp);
578 return;
579 }
580 xfs_btree_lblock_calc_crc(bp);
581 }
582
583 const struct xfs_buf_ops xfs_bmbt_buf_ops = {
584 .name = "xfs_bmbt",
585 .verify_read = xfs_bmbt_read_verify,
586 .verify_write = xfs_bmbt_write_verify,
587 };
588
589
590 STATIC int
591 xfs_bmbt_keys_inorder(
592 struct xfs_btree_cur *cur,
593 union xfs_btree_key *k1,
594 union xfs_btree_key *k2)
595 {
596 return be64_to_cpu(k1->bmbt.br_startoff) <
597 be64_to_cpu(k2->bmbt.br_startoff);
598 }
599
600 STATIC int
601 xfs_bmbt_recs_inorder(
602 struct xfs_btree_cur *cur,
603 union xfs_btree_rec *r1,
604 union xfs_btree_rec *r2)
605 {
606 return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
607 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
608 xfs_bmbt_disk_get_startoff(&r2->bmbt);
609 }
610
611 static const struct xfs_btree_ops xfs_bmbt_ops = {
612 .rec_len = sizeof(xfs_bmbt_rec_t),
613 .key_len = sizeof(xfs_bmbt_key_t),
614
615 .dup_cursor = xfs_bmbt_dup_cursor,
616 .update_cursor = xfs_bmbt_update_cursor,
617 .alloc_block = xfs_bmbt_alloc_block,
618 .free_block = xfs_bmbt_free_block,
619 .get_maxrecs = xfs_bmbt_get_maxrecs,
620 .get_minrecs = xfs_bmbt_get_minrecs,
621 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
622 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
623 .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
624 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
625 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
626 .key_diff = xfs_bmbt_key_diff,
627 .diff_two_keys = xfs_bmbt_diff_two_keys,
628 .buf_ops = &xfs_bmbt_buf_ops,
629 .keys_inorder = xfs_bmbt_keys_inorder,
630 .recs_inorder = xfs_bmbt_recs_inorder,
631 };
632
633 /*
634 * Allocate a new bmap btree cursor.
635 */
636 struct xfs_btree_cur * /* new bmap btree cursor */
637 xfs_bmbt_init_cursor(
638 struct xfs_mount *mp, /* file system mount point */
639 struct xfs_trans *tp, /* transaction pointer */
640 struct xfs_inode *ip, /* inode owning the btree */
641 int whichfork) /* data or attr fork */
642 {
643 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
644 struct xfs_btree_cur *cur;
645 ASSERT(whichfork != XFS_COW_FORK);
646
647 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
648
649 cur->bc_tp = tp;
650 cur->bc_mp = mp;
651 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
652 cur->bc_btnum = XFS_BTNUM_BMAP;
653 cur->bc_blocklog = mp->m_sb.sb_blocklog;
654 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
655
656 cur->bc_ops = &xfs_bmbt_ops;
657 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
658 if (xfs_sb_version_hascrc(&mp->m_sb))
659 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
660
661 cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
662 cur->bc_private.b.ip = ip;
663 cur->bc_private.b.firstblock = NULLFSBLOCK;
664 cur->bc_private.b.dfops = NULL;
665 cur->bc_private.b.allocated = 0;
666 cur->bc_private.b.flags = 0;
667 cur->bc_private.b.whichfork = whichfork;
668
669 return cur;
670 }
671
672 /*
673 * Calculate number of records in a bmap btree block.
674 */
675 int
676 xfs_bmbt_maxrecs(
677 struct xfs_mount *mp,
678 int blocklen,
679 int leaf)
680 {
681 blocklen -= XFS_BMBT_BLOCK_LEN(mp);
682
683 if (leaf)
684 return blocklen / sizeof(xfs_bmbt_rec_t);
685 return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
686 }
687
688 /*
689 * Calculate number of records in a bmap btree inode root.
690 */
691 int
692 xfs_bmdr_maxrecs(
693 int blocklen,
694 int leaf)
695 {
696 blocklen -= sizeof(xfs_bmdr_block_t);
697
698 if (leaf)
699 return blocklen / sizeof(xfs_bmdr_rec_t);
700 return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
701 }
702
703 /*
704 * Change the owner of a btree format fork fo the inode passed in. Change it to
705 * the owner of that is passed in so that we can change owners before or after
706 * we switch forks between inodes. The operation that the caller is doing will
707 * determine whether is needs to change owner before or after the switch.
708 *
709 * For demand paged transactional modification, the fork switch should be done
710 * after reading in all the blocks, modifying them and pinning them in the
711 * transaction. For modification when the buffers are already pinned in memory,
712 * the fork switch can be done before changing the owner as we won't need to
713 * validate the owner until the btree buffers are unpinned and writes can occur
714 * again.
715 *
716 * For recovery based ownership change, there is no transactional context and
717 * so a buffer list must be supplied so that we can record the buffers that we
718 * modified for the caller to issue IO on.
719 */
720 int
721 xfs_bmbt_change_owner(
722 struct xfs_trans *tp,
723 struct xfs_inode *ip,
724 int whichfork,
725 xfs_ino_t new_owner,
726 struct list_head *buffer_list)
727 {
728 struct xfs_btree_cur *cur;
729 int error;
730
731 ASSERT(tp || buffer_list);
732 ASSERT(!(tp && buffer_list));
733 if (whichfork == XFS_DATA_FORK)
734 ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
735 else
736 ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
737
738 cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
739 if (!cur)
740 return -ENOMEM;
741 cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
742
743 error = xfs_btree_change_owner(cur, new_owner, buffer_list);
744 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
745 return error;
746 }