2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "libxfs_priv.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
30 #include "xfs_dir2_priv.h"
31 #include "xfs_inode.h"
32 #include "xfs_trans.h"
33 #include "xfs_alloc.h"
35 #include "xfs_attr_leaf.h"
36 #include "xfs_trace.h"
37 #include "xfs_cksum.h"
42 * Routines to implement directories as Btrees of hashed names.
45 /*========================================================================
46 * Function prototypes for the kernel.
47 *========================================================================*/
50 * Routines used for growing the Btree.
52 STATIC
int xfs_da3_root_split(xfs_da_state_t
*state
,
53 xfs_da_state_blk_t
*existing_root
,
54 xfs_da_state_blk_t
*new_child
);
55 STATIC
int xfs_da3_node_split(xfs_da_state_t
*state
,
56 xfs_da_state_blk_t
*existing_blk
,
57 xfs_da_state_blk_t
*split_blk
,
58 xfs_da_state_blk_t
*blk_to_add
,
61 STATIC
void xfs_da3_node_rebalance(xfs_da_state_t
*state
,
62 xfs_da_state_blk_t
*node_blk_1
,
63 xfs_da_state_blk_t
*node_blk_2
);
64 STATIC
void xfs_da3_node_add(xfs_da_state_t
*state
,
65 xfs_da_state_blk_t
*old_node_blk
,
66 xfs_da_state_blk_t
*new_node_blk
);
69 * Routines used for shrinking the Btree.
71 STATIC
int xfs_da3_root_join(xfs_da_state_t
*state
,
72 xfs_da_state_blk_t
*root_blk
);
73 STATIC
int xfs_da3_node_toosmall(xfs_da_state_t
*state
, int *retval
);
74 STATIC
void xfs_da3_node_remove(xfs_da_state_t
*state
,
75 xfs_da_state_blk_t
*drop_blk
);
76 STATIC
void xfs_da3_node_unbalance(xfs_da_state_t
*state
,
77 xfs_da_state_blk_t
*src_node_blk
,
78 xfs_da_state_blk_t
*dst_node_blk
);
83 STATIC
int xfs_da3_blk_unlink(xfs_da_state_t
*state
,
84 xfs_da_state_blk_t
*drop_blk
,
85 xfs_da_state_blk_t
*save_blk
);
88 kmem_zone_t
*xfs_da_state_zone
; /* anchor for state struct zone */
91 * Allocate a dir-state structure.
92 * We don't put them on the stack since they're large.
95 xfs_da_state_alloc(void)
97 return kmem_zone_zalloc(xfs_da_state_zone
, KM_NOFS
);
101 * Kill the altpath contents of a da-state structure.
104 xfs_da_state_kill_altpath(xfs_da_state_t
*state
)
108 for (i
= 0; i
< state
->altpath
.active
; i
++)
109 state
->altpath
.blk
[i
].bp
= NULL
;
110 state
->altpath
.active
= 0;
114 * Free a da-state structure.
117 xfs_da_state_free(xfs_da_state_t
*state
)
119 xfs_da_state_kill_altpath(state
);
121 memset((char *)state
, 0, sizeof(*state
));
123 kmem_zone_free(xfs_da_state_zone
, state
);
126 static xfs_failaddr_t
130 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
131 struct xfs_da_intnode
*hdr
= bp
->b_addr
;
132 struct xfs_da3_icnode_hdr ichdr
;
133 const struct xfs_dir_ops
*ops
;
135 ops
= xfs_dir_get_ops(mp
, NULL
);
137 ops
->node_hdr_from_disk(&ichdr
, hdr
);
139 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
140 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
142 if (ichdr
.magic
!= XFS_DA3_NODE_MAGIC
)
143 return __this_address
;
145 if (!uuid_equal(&hdr3
->info
.uuid
, &mp
->m_sb
.sb_meta_uuid
))
146 return __this_address
;
147 if (be64_to_cpu(hdr3
->info
.blkno
) != bp
->b_bn
)
148 return __this_address
;
149 if (!xfs_log_check_lsn(mp
, be64_to_cpu(hdr3
->info
.lsn
)))
150 return __this_address
;
152 if (ichdr
.magic
!= XFS_DA_NODE_MAGIC
)
153 return __this_address
;
155 if (ichdr
.level
== 0)
156 return __this_address
;
157 if (ichdr
.level
> XFS_DA_NODE_MAXDEPTH
)
158 return __this_address
;
159 if (ichdr
.count
== 0)
160 return __this_address
;
163 * we don't know if the node is for and attribute or directory tree,
164 * so only fail if the count is outside both bounds
166 if (ichdr
.count
> mp
->m_dir_geo
->node_ents
&&
167 ichdr
.count
> mp
->m_attr_geo
->node_ents
)
168 return __this_address
;
170 /* XXX: hash order check? */
176 xfs_da3_node_write_verify(
179 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
180 struct xfs_buf_log_item
*bip
= bp
->b_fspriv
;
181 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
184 fa
= xfs_da3_node_verify(bp
);
186 xfs_verifier_error(bp
, -EFSCORRUPTED
, fa
);
190 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
194 hdr3
->info
.lsn
= cpu_to_be64(bip
->bli_item
.li_lsn
);
196 xfs_buf_update_cksum(bp
, XFS_DA3_NODE_CRC_OFF
);
200 * leaf/node format detection on trees is sketchy, so a node read can be done on
201 * leaf level blocks when detection identifies the tree as a node format tree
202 * incorrectly. In this case, we need to swap the verifier to match the correct
203 * format of the block being read.
206 xfs_da3_node_read_verify(
209 struct xfs_da_blkinfo
*info
= bp
->b_addr
;
212 switch (be16_to_cpu(info
->magic
)) {
213 case XFS_DA3_NODE_MAGIC
:
214 if (!xfs_buf_verify_cksum(bp
, XFS_DA3_NODE_CRC_OFF
)) {
215 xfs_verifier_error(bp
, -EFSBADCRC
,
220 case XFS_DA_NODE_MAGIC
:
221 fa
= xfs_da3_node_verify(bp
);
223 xfs_verifier_error(bp
, -EFSCORRUPTED
, fa
);
225 case XFS_ATTR_LEAF_MAGIC
:
226 case XFS_ATTR3_LEAF_MAGIC
:
227 bp
->b_ops
= &xfs_attr3_leaf_buf_ops
;
228 bp
->b_ops
->verify_read(bp
);
230 case XFS_DIR2_LEAFN_MAGIC
:
231 case XFS_DIR3_LEAFN_MAGIC
:
232 bp
->b_ops
= &xfs_dir3_leafn_buf_ops
;
233 bp
->b_ops
->verify_read(bp
);
236 xfs_verifier_error(bp
, -EFSCORRUPTED
, __this_address
);
241 /* Verify the structure of a da3 block. */
242 static xfs_failaddr_t
243 xfs_da3_node_verify_struct(
246 struct xfs_da_blkinfo
*info
= bp
->b_addr
;
248 switch (be16_to_cpu(info
->magic
)) {
249 case XFS_DA3_NODE_MAGIC
:
250 case XFS_DA_NODE_MAGIC
:
251 return xfs_da3_node_verify(bp
);
252 case XFS_ATTR_LEAF_MAGIC
:
253 case XFS_ATTR3_LEAF_MAGIC
:
254 bp
->b_ops
= &xfs_attr3_leaf_buf_ops
;
255 return bp
->b_ops
->verify_struct(bp
);
256 case XFS_DIR2_LEAFN_MAGIC
:
257 case XFS_DIR3_LEAFN_MAGIC
:
258 bp
->b_ops
= &xfs_dir3_leafn_buf_ops
;
259 return bp
->b_ops
->verify_struct(bp
);
261 return __this_address
;
265 const struct xfs_buf_ops xfs_da3_node_buf_ops
= {
266 .name
= "xfs_da3_node",
267 .verify_read
= xfs_da3_node_read_verify
,
268 .verify_write
= xfs_da3_node_write_verify
,
269 .verify_struct
= xfs_da3_node_verify_struct
,
274 struct xfs_trans
*tp
,
275 struct xfs_inode
*dp
,
277 xfs_daddr_t mappedbno
,
278 struct xfs_buf
**bpp
,
283 err
= xfs_da_read_buf(tp
, dp
, bno
, mappedbno
, bpp
,
284 which_fork
, &xfs_da3_node_buf_ops
);
285 if (!err
&& tp
&& *bpp
) {
286 struct xfs_da_blkinfo
*info
= (*bpp
)->b_addr
;
289 switch (be16_to_cpu(info
->magic
)) {
290 case XFS_DA_NODE_MAGIC
:
291 case XFS_DA3_NODE_MAGIC
:
292 type
= XFS_BLFT_DA_NODE_BUF
;
294 case XFS_ATTR_LEAF_MAGIC
:
295 case XFS_ATTR3_LEAF_MAGIC
:
296 type
= XFS_BLFT_ATTR_LEAF_BUF
;
298 case XFS_DIR2_LEAFN_MAGIC
:
299 case XFS_DIR3_LEAFN_MAGIC
:
300 type
= XFS_BLFT_DIR_LEAFN_BUF
;
307 xfs_trans_buf_set_type(tp
, *bpp
, type
);
312 /*========================================================================
313 * Routines used for growing the Btree.
314 *========================================================================*/
317 * Create the initial contents of an intermediate node.
321 struct xfs_da_args
*args
,
324 struct xfs_buf
**bpp
,
327 struct xfs_da_intnode
*node
;
328 struct xfs_trans
*tp
= args
->trans
;
329 struct xfs_mount
*mp
= tp
->t_mountp
;
330 struct xfs_da3_icnode_hdr ichdr
= {0};
333 struct xfs_inode
*dp
= args
->dp
;
335 trace_xfs_da_node_create(args
);
336 ASSERT(level
<= XFS_DA_NODE_MAXDEPTH
);
338 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, whichfork
);
341 bp
->b_ops
= &xfs_da3_node_buf_ops
;
342 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DA_NODE_BUF
);
345 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
346 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
348 memset(hdr3
, 0, sizeof(struct xfs_da3_node_hdr
));
349 ichdr
.magic
= XFS_DA3_NODE_MAGIC
;
350 hdr3
->info
.blkno
= cpu_to_be64(bp
->b_bn
);
351 hdr3
->info
.owner
= cpu_to_be64(args
->dp
->i_ino
);
352 uuid_copy(&hdr3
->info
.uuid
, &mp
->m_sb
.sb_meta_uuid
);
354 ichdr
.magic
= XFS_DA_NODE_MAGIC
;
358 dp
->d_ops
->node_hdr_to_disk(node
, &ichdr
);
359 xfs_trans_log_buf(tp
, bp
,
360 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
367 * Split a leaf node, rebalance, then possibly split
368 * intermediate nodes, rebalance, etc.
372 struct xfs_da_state
*state
)
374 struct xfs_da_state_blk
*oldblk
;
375 struct xfs_da_state_blk
*newblk
;
376 struct xfs_da_state_blk
*addblk
;
377 struct xfs_da_intnode
*node
;
383 trace_xfs_da_split(state
->args
);
386 * Walk back up the tree splitting/inserting/adjusting as necessary.
387 * If we need to insert and there isn't room, split the node, then
388 * decide which fragment to insert the new block from below into.
389 * Note that we may split the root this way, but we need more fixup.
391 max
= state
->path
.active
- 1;
392 ASSERT((max
>= 0) && (max
< XFS_DA_NODE_MAXDEPTH
));
393 ASSERT(state
->path
.blk
[max
].magic
== XFS_ATTR_LEAF_MAGIC
||
394 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
396 addblk
= &state
->path
.blk
[max
]; /* initial dummy value */
397 for (i
= max
; (i
>= 0) && addblk
; state
->path
.active
--, i
--) {
398 oldblk
= &state
->path
.blk
[i
];
399 newblk
= &state
->altpath
.blk
[i
];
402 * If a leaf node then
403 * Allocate a new leaf node, then rebalance across them.
404 * else if an intermediate node then
405 * We split on the last layer, must we split the node?
407 switch (oldblk
->magic
) {
408 case XFS_ATTR_LEAF_MAGIC
:
409 error
= xfs_attr3_leaf_split(state
, oldblk
, newblk
);
410 if ((error
!= 0) && (error
!= -ENOSPC
)) {
411 return error
; /* GROT: attr is inconsistent */
418 * Entry wouldn't fit, split the leaf again. The new
419 * extrablk will be consumed by xfs_da3_node_split if
422 state
->extravalid
= 1;
424 state
->extraafter
= 0; /* before newblk */
425 trace_xfs_attr_leaf_split_before(state
->args
);
426 error
= xfs_attr3_leaf_split(state
, oldblk
,
429 state
->extraafter
= 1; /* after newblk */
430 trace_xfs_attr_leaf_split_after(state
->args
);
431 error
= xfs_attr3_leaf_split(state
, newblk
,
435 return error
; /* GROT: attr inconsistent */
438 case XFS_DIR2_LEAFN_MAGIC
:
439 error
= xfs_dir2_leafn_split(state
, oldblk
, newblk
);
444 case XFS_DA_NODE_MAGIC
:
445 error
= xfs_da3_node_split(state
, oldblk
, newblk
, addblk
,
449 return error
; /* GROT: dir is inconsistent */
451 * Record the newly split block for the next time thru?
461 * Update the btree to show the new hashval for this child.
463 xfs_da3_fixhashpath(state
, &state
->path
);
469 * xfs_da3_node_split() should have consumed any extra blocks we added
470 * during a double leaf split in the attr fork. This is guaranteed as
471 * we can't be here if the attr fork only has a single leaf block.
473 ASSERT(state
->extravalid
== 0 ||
474 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
477 * Split the root node.
479 ASSERT(state
->path
.active
== 0);
480 oldblk
= &state
->path
.blk
[0];
481 error
= xfs_da3_root_split(state
, oldblk
, addblk
);
484 return error
; /* GROT: dir is inconsistent */
488 * Update pointers to the node which used to be block 0 and just got
489 * bumped because of the addition of a new root node. Note that the
490 * original block 0 could be at any position in the list of blocks in
493 * Note: the magic numbers and sibling pointers are in the same physical
494 * place for both v2 and v3 headers (by design). Hence it doesn't matter
495 * which version of the xfs_da_intnode structure we use here as the
496 * result will be the same using either structure.
498 node
= oldblk
->bp
->b_addr
;
499 if (node
->hdr
.info
.forw
) {
500 ASSERT(be32_to_cpu(node
->hdr
.info
.forw
) == addblk
->blkno
);
501 node
= addblk
->bp
->b_addr
;
502 node
->hdr
.info
.back
= cpu_to_be32(oldblk
->blkno
);
503 xfs_trans_log_buf(state
->args
->trans
, addblk
->bp
,
504 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
505 sizeof(node
->hdr
.info
)));
507 node
= oldblk
->bp
->b_addr
;
508 if (node
->hdr
.info
.back
) {
509 ASSERT(be32_to_cpu(node
->hdr
.info
.back
) == addblk
->blkno
);
510 node
= addblk
->bp
->b_addr
;
511 node
->hdr
.info
.forw
= cpu_to_be32(oldblk
->blkno
);
512 xfs_trans_log_buf(state
->args
->trans
, addblk
->bp
,
513 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
514 sizeof(node
->hdr
.info
)));
521 * Split the root. We have to create a new root and point to the two
522 * parts (the split old root) that we just created. Copy block zero to
523 * the EOF, extending the inode in process.
525 STATIC
int /* error */
527 struct xfs_da_state
*state
,
528 struct xfs_da_state_blk
*blk1
,
529 struct xfs_da_state_blk
*blk2
)
531 struct xfs_da_intnode
*node
;
532 struct xfs_da_intnode
*oldroot
;
533 struct xfs_da_node_entry
*btree
;
534 struct xfs_da3_icnode_hdr nodehdr
;
535 struct xfs_da_args
*args
;
537 struct xfs_inode
*dp
;
538 struct xfs_trans
*tp
;
539 struct xfs_dir2_leaf
*leaf
;
545 trace_xfs_da_root_split(state
->args
);
548 * Copy the existing (incorrect) block from the root node position
549 * to a free space somewhere.
552 error
= xfs_da_grow_inode(args
, &blkno
);
558 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, args
->whichfork
);
562 oldroot
= blk1
->bp
->b_addr
;
563 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
564 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
)) {
565 struct xfs_da3_icnode_hdr icnodehdr
;
567 dp
->d_ops
->node_hdr_from_disk(&icnodehdr
, oldroot
);
568 btree
= dp
->d_ops
->node_tree_p(oldroot
);
569 size
= (int)((char *)&btree
[icnodehdr
.count
] - (char *)oldroot
);
570 level
= icnodehdr
.level
;
573 * we are about to copy oldroot to bp, so set up the type
574 * of bp while we know exactly what it will be.
576 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DA_NODE_BUF
);
578 struct xfs_dir3_icleaf_hdr leafhdr
;
579 struct xfs_dir2_leaf_entry
*ents
;
581 leaf
= (xfs_dir2_leaf_t
*)oldroot
;
582 dp
->d_ops
->leaf_hdr_from_disk(&leafhdr
, leaf
);
583 ents
= dp
->d_ops
->leaf_ents_p(leaf
);
585 ASSERT(leafhdr
.magic
== XFS_DIR2_LEAFN_MAGIC
||
586 leafhdr
.magic
== XFS_DIR3_LEAFN_MAGIC
);
587 size
= (int)((char *)&ents
[leafhdr
.count
] - (char *)leaf
);
591 * we are about to copy oldroot to bp, so set up the type
592 * of bp while we know exactly what it will be.
594 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DIR_LEAFN_BUF
);
598 * we can copy most of the information in the node from one block to
599 * another, but for CRC enabled headers we have to make sure that the
600 * block specific identifiers are kept intact. We update the buffer
603 memcpy(node
, oldroot
, size
);
604 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
) ||
605 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
606 struct xfs_da3_intnode
*node3
= (struct xfs_da3_intnode
*)node
;
608 node3
->hdr
.info
.blkno
= cpu_to_be64(bp
->b_bn
);
610 xfs_trans_log_buf(tp
, bp
, 0, size
- 1);
612 bp
->b_ops
= blk1
->bp
->b_ops
;
613 xfs_trans_buf_copy_type(bp
, blk1
->bp
);
618 * Set up the new root node.
620 error
= xfs_da3_node_create(args
,
621 (args
->whichfork
== XFS_DATA_FORK
) ? args
->geo
->leafblk
: 0,
622 level
+ 1, &bp
, args
->whichfork
);
627 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
628 btree
= dp
->d_ops
->node_tree_p(node
);
629 btree
[0].hashval
= cpu_to_be32(blk1
->hashval
);
630 btree
[0].before
= cpu_to_be32(blk1
->blkno
);
631 btree
[1].hashval
= cpu_to_be32(blk2
->hashval
);
632 btree
[1].before
= cpu_to_be32(blk2
->blkno
);
634 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
637 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
638 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
639 ASSERT(blk1
->blkno
>= args
->geo
->leafblk
&&
640 blk1
->blkno
< args
->geo
->freeblk
);
641 ASSERT(blk2
->blkno
>= args
->geo
->leafblk
&&
642 blk2
->blkno
< args
->geo
->freeblk
);
646 /* Header is already logged by xfs_da_node_create */
647 xfs_trans_log_buf(tp
, bp
,
648 XFS_DA_LOGRANGE(node
, btree
, sizeof(xfs_da_node_entry_t
) * 2));
654 * Split the node, rebalance, then add the new entry.
656 STATIC
int /* error */
658 struct xfs_da_state
*state
,
659 struct xfs_da_state_blk
*oldblk
,
660 struct xfs_da_state_blk
*newblk
,
661 struct xfs_da_state_blk
*addblk
,
665 struct xfs_da_intnode
*node
;
666 struct xfs_da3_icnode_hdr nodehdr
;
671 struct xfs_inode
*dp
= state
->args
->dp
;
673 trace_xfs_da_node_split(state
->args
);
675 node
= oldblk
->bp
->b_addr
;
676 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
679 * With V2 dirs the extra block is data or freespace.
681 useextra
= state
->extravalid
&& state
->args
->whichfork
== XFS_ATTR_FORK
;
682 newcount
= 1 + useextra
;
684 * Do we have to split the node?
686 if (nodehdr
.count
+ newcount
> state
->args
->geo
->node_ents
) {
688 * Allocate a new node, add to the doubly linked chain of
689 * nodes, then move some of our excess entries into it.
691 error
= xfs_da_grow_inode(state
->args
, &blkno
);
693 return error
; /* GROT: dir is inconsistent */
695 error
= xfs_da3_node_create(state
->args
, blkno
, treelevel
,
696 &newblk
->bp
, state
->args
->whichfork
);
698 return error
; /* GROT: dir is inconsistent */
699 newblk
->blkno
= blkno
;
700 newblk
->magic
= XFS_DA_NODE_MAGIC
;
701 xfs_da3_node_rebalance(state
, oldblk
, newblk
);
702 error
= xfs_da3_blk_link(state
, oldblk
, newblk
);
711 * Insert the new entry(s) into the correct block
712 * (updating last hashval in the process).
714 * xfs_da3_node_add() inserts BEFORE the given index,
715 * and as a result of using node_lookup_int() we always
716 * point to a valid entry (not after one), but a split
717 * operation always results in a new block whose hashvals
718 * FOLLOW the current block.
720 * If we had double-split op below us, then add the extra block too.
722 node
= oldblk
->bp
->b_addr
;
723 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
724 if (oldblk
->index
<= nodehdr
.count
) {
726 xfs_da3_node_add(state
, oldblk
, addblk
);
728 if (state
->extraafter
)
730 xfs_da3_node_add(state
, oldblk
, &state
->extrablk
);
731 state
->extravalid
= 0;
735 xfs_da3_node_add(state
, newblk
, addblk
);
737 if (state
->extraafter
)
739 xfs_da3_node_add(state
, newblk
, &state
->extrablk
);
740 state
->extravalid
= 0;
748 * Balance the btree elements between two intermediate nodes,
749 * usually one full and one empty.
751 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
754 xfs_da3_node_rebalance(
755 struct xfs_da_state
*state
,
756 struct xfs_da_state_blk
*blk1
,
757 struct xfs_da_state_blk
*blk2
)
759 struct xfs_da_intnode
*node1
;
760 struct xfs_da_intnode
*node2
;
761 struct xfs_da_intnode
*tmpnode
;
762 struct xfs_da_node_entry
*btree1
;
763 struct xfs_da_node_entry
*btree2
;
764 struct xfs_da_node_entry
*btree_s
;
765 struct xfs_da_node_entry
*btree_d
;
766 struct xfs_da3_icnode_hdr nodehdr1
;
767 struct xfs_da3_icnode_hdr nodehdr2
;
768 struct xfs_trans
*tp
;
772 struct xfs_inode
*dp
= state
->args
->dp
;
774 trace_xfs_da_node_rebalance(state
->args
);
776 node1
= blk1
->bp
->b_addr
;
777 node2
= blk2
->bp
->b_addr
;
778 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
779 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
780 btree1
= dp
->d_ops
->node_tree_p(node1
);
781 btree2
= dp
->d_ops
->node_tree_p(node2
);
784 * Figure out how many entries need to move, and in which direction.
785 * Swap the nodes around if that makes it simpler.
787 if (nodehdr1
.count
> 0 && nodehdr2
.count
> 0 &&
788 ((be32_to_cpu(btree2
[0].hashval
) < be32_to_cpu(btree1
[0].hashval
)) ||
789 (be32_to_cpu(btree2
[nodehdr2
.count
- 1].hashval
) <
790 be32_to_cpu(btree1
[nodehdr1
.count
- 1].hashval
)))) {
794 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
795 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
796 btree1
= dp
->d_ops
->node_tree_p(node1
);
797 btree2
= dp
->d_ops
->node_tree_p(node2
);
801 count
= (nodehdr1
.count
- nodehdr2
.count
) / 2;
804 tp
= state
->args
->trans
;
806 * Two cases: high-to-low and low-to-high.
810 * Move elements in node2 up to make a hole.
812 tmp
= nodehdr2
.count
;
814 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
815 btree_s
= &btree2
[0];
816 btree_d
= &btree2
[count
];
817 memmove(btree_d
, btree_s
, tmp
);
821 * Move the req'd B-tree elements from high in node1 to
824 nodehdr2
.count
+= count
;
825 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
826 btree_s
= &btree1
[nodehdr1
.count
- count
];
827 btree_d
= &btree2
[0];
828 memcpy(btree_d
, btree_s
, tmp
);
829 nodehdr1
.count
-= count
;
832 * Move the req'd B-tree elements from low in node2 to
836 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
837 btree_s
= &btree2
[0];
838 btree_d
= &btree1
[nodehdr1
.count
];
839 memcpy(btree_d
, btree_s
, tmp
);
840 nodehdr1
.count
+= count
;
842 xfs_trans_log_buf(tp
, blk1
->bp
,
843 XFS_DA_LOGRANGE(node1
, btree_d
, tmp
));
846 * Move elements in node2 down to fill the hole.
848 tmp
= nodehdr2
.count
- count
;
849 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
850 btree_s
= &btree2
[count
];
851 btree_d
= &btree2
[0];
852 memmove(btree_d
, btree_s
, tmp
);
853 nodehdr2
.count
-= count
;
857 * Log header of node 1 and all current bits of node 2.
859 dp
->d_ops
->node_hdr_to_disk(node1
, &nodehdr1
);
860 xfs_trans_log_buf(tp
, blk1
->bp
,
861 XFS_DA_LOGRANGE(node1
, &node1
->hdr
, dp
->d_ops
->node_hdr_size
));
863 dp
->d_ops
->node_hdr_to_disk(node2
, &nodehdr2
);
864 xfs_trans_log_buf(tp
, blk2
->bp
,
865 XFS_DA_LOGRANGE(node2
, &node2
->hdr
,
866 dp
->d_ops
->node_hdr_size
+
867 (sizeof(btree2
[0]) * nodehdr2
.count
)));
870 * Record the last hashval from each block for upward propagation.
871 * (note: don't use the swapped node pointers)
874 node1
= blk1
->bp
->b_addr
;
875 node2
= blk2
->bp
->b_addr
;
876 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
877 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
878 btree1
= dp
->d_ops
->node_tree_p(node1
);
879 btree2
= dp
->d_ops
->node_tree_p(node2
);
881 blk1
->hashval
= be32_to_cpu(btree1
[nodehdr1
.count
- 1].hashval
);
882 blk2
->hashval
= be32_to_cpu(btree2
[nodehdr2
.count
- 1].hashval
);
885 * Adjust the expected index for insertion.
887 if (blk1
->index
>= nodehdr1
.count
) {
888 blk2
->index
= blk1
->index
- nodehdr1
.count
;
889 blk1
->index
= nodehdr1
.count
+ 1; /* make it invalid */
894 * Add a new entry to an intermediate node.
898 struct xfs_da_state
*state
,
899 struct xfs_da_state_blk
*oldblk
,
900 struct xfs_da_state_blk
*newblk
)
902 struct xfs_da_intnode
*node
;
903 struct xfs_da3_icnode_hdr nodehdr
;
904 struct xfs_da_node_entry
*btree
;
906 struct xfs_inode
*dp
= state
->args
->dp
;
908 trace_xfs_da_node_add(state
->args
);
910 node
= oldblk
->bp
->b_addr
;
911 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
912 btree
= dp
->d_ops
->node_tree_p(node
);
914 ASSERT(oldblk
->index
>= 0 && oldblk
->index
<= nodehdr
.count
);
915 ASSERT(newblk
->blkno
!= 0);
916 if (state
->args
->whichfork
== XFS_DATA_FORK
)
917 ASSERT(newblk
->blkno
>= state
->args
->geo
->leafblk
&&
918 newblk
->blkno
< state
->args
->geo
->freeblk
);
921 * We may need to make some room before we insert the new node.
924 if (oldblk
->index
< nodehdr
.count
) {
925 tmp
= (nodehdr
.count
- oldblk
->index
) * (uint
)sizeof(*btree
);
926 memmove(&btree
[oldblk
->index
+ 1], &btree
[oldblk
->index
], tmp
);
928 btree
[oldblk
->index
].hashval
= cpu_to_be32(newblk
->hashval
);
929 btree
[oldblk
->index
].before
= cpu_to_be32(newblk
->blkno
);
930 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
931 XFS_DA_LOGRANGE(node
, &btree
[oldblk
->index
],
932 tmp
+ sizeof(*btree
)));
935 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
936 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
937 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
940 * Copy the last hash value from the oldblk to propagate upwards.
942 oldblk
->hashval
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
945 /*========================================================================
946 * Routines used for shrinking the Btree.
947 *========================================================================*/
950 * Deallocate an empty leaf node, remove it from its parent,
951 * possibly deallocating that block, etc...
955 struct xfs_da_state
*state
)
957 struct xfs_da_state_blk
*drop_blk
;
958 struct xfs_da_state_blk
*save_blk
;
962 trace_xfs_da_join(state
->args
);
964 drop_blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
965 save_blk
= &state
->altpath
.blk
[ state
->path
.active
-1 ];
966 ASSERT(state
->path
.blk
[0].magic
== XFS_DA_NODE_MAGIC
);
967 ASSERT(drop_blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
968 drop_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
971 * Walk back up the tree joining/deallocating as necessary.
972 * When we stop dropping blocks, break out.
974 for ( ; state
->path
.active
>= 2; drop_blk
--, save_blk
--,
975 state
->path
.active
--) {
977 * See if we can combine the block with a neighbor.
978 * (action == 0) => no options, just leave
979 * (action == 1) => coalesce, then unlink
980 * (action == 2) => block empty, unlink it
982 switch (drop_blk
->magic
) {
983 case XFS_ATTR_LEAF_MAGIC
:
984 error
= xfs_attr3_leaf_toosmall(state
, &action
);
989 xfs_attr3_leaf_unbalance(state
, drop_blk
, save_blk
);
991 case XFS_DIR2_LEAFN_MAGIC
:
992 error
= xfs_dir2_leafn_toosmall(state
, &action
);
997 xfs_dir2_leafn_unbalance(state
, drop_blk
, save_blk
);
999 case XFS_DA_NODE_MAGIC
:
1001 * Remove the offending node, fixup hashvals,
1002 * check for a toosmall neighbor.
1004 xfs_da3_node_remove(state
, drop_blk
);
1005 xfs_da3_fixhashpath(state
, &state
->path
);
1006 error
= xfs_da3_node_toosmall(state
, &action
);
1011 xfs_da3_node_unbalance(state
, drop_blk
, save_blk
);
1014 xfs_da3_fixhashpath(state
, &state
->altpath
);
1015 error
= xfs_da3_blk_unlink(state
, drop_blk
, save_blk
);
1016 xfs_da_state_kill_altpath(state
);
1019 error
= xfs_da_shrink_inode(state
->args
, drop_blk
->blkno
,
1021 drop_blk
->bp
= NULL
;
1026 * We joined all the way to the top. If it turns out that
1027 * we only have one entry in the root, make the child block
1030 xfs_da3_node_remove(state
, drop_blk
);
1031 xfs_da3_fixhashpath(state
, &state
->path
);
1032 error
= xfs_da3_root_join(state
, &state
->path
.blk
[0]);
1038 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo
*blkinfo
, __u16 level
)
1040 __be16 magic
= blkinfo
->magic
;
1043 ASSERT(magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1044 magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
) ||
1045 magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
) ||
1046 magic
== cpu_to_be16(XFS_ATTR3_LEAF_MAGIC
));
1048 ASSERT(magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1049 magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
));
1051 ASSERT(!blkinfo
->forw
);
1052 ASSERT(!blkinfo
->back
);
1055 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1059 * We have only one entry in the root. Copy the only remaining child of
1060 * the old root to block 0 as the new root node.
1064 struct xfs_da_state
*state
,
1065 struct xfs_da_state_blk
*root_blk
)
1067 struct xfs_da_intnode
*oldroot
;
1068 struct xfs_da_args
*args
;
1071 struct xfs_da3_icnode_hdr oldroothdr
;
1072 struct xfs_da_node_entry
*btree
;
1074 struct xfs_inode
*dp
= state
->args
->dp
;
1076 trace_xfs_da_root_join(state
->args
);
1078 ASSERT(root_blk
->magic
== XFS_DA_NODE_MAGIC
);
1081 oldroot
= root_blk
->bp
->b_addr
;
1082 dp
->d_ops
->node_hdr_from_disk(&oldroothdr
, oldroot
);
1083 ASSERT(oldroothdr
.forw
== 0);
1084 ASSERT(oldroothdr
.back
== 0);
1087 * If the root has more than one child, then don't do anything.
1089 if (oldroothdr
.count
> 1)
1093 * Read in the (only) child block, then copy those bytes into
1094 * the root block's buffer and free the original child block.
1096 btree
= dp
->d_ops
->node_tree_p(oldroot
);
1097 child
= be32_to_cpu(btree
[0].before
);
1099 error
= xfs_da3_node_read(args
->trans
, dp
, child
, -1, &bp
,
1103 xfs_da_blkinfo_onlychild_validate(bp
->b_addr
, oldroothdr
.level
);
1106 * This could be copying a leaf back into the root block in the case of
1107 * there only being a single leaf block left in the tree. Hence we have
1108 * to update the b_ops pointer as well to match the buffer type change
1109 * that could occur. For dir3 blocks we also need to update the block
1110 * number in the buffer header.
1112 memcpy(root_blk
->bp
->b_addr
, bp
->b_addr
, args
->geo
->blksize
);
1113 root_blk
->bp
->b_ops
= bp
->b_ops
;
1114 xfs_trans_buf_copy_type(root_blk
->bp
, bp
);
1115 if (oldroothdr
.magic
== XFS_DA3_NODE_MAGIC
) {
1116 struct xfs_da3_blkinfo
*da3
= root_blk
->bp
->b_addr
;
1117 da3
->blkno
= cpu_to_be64(root_blk
->bp
->b_bn
);
1119 xfs_trans_log_buf(args
->trans
, root_blk
->bp
, 0,
1120 args
->geo
->blksize
- 1);
1121 error
= xfs_da_shrink_inode(args
, child
, bp
);
1126 * Check a node block and its neighbors to see if the block should be
1127 * collapsed into one or the other neighbor. Always keep the block
1128 * with the smaller block number.
1129 * If the current block is over 50% full, don't try to join it, return 0.
1130 * If the block is empty, fill in the state structure and return 2.
1131 * If it can be collapsed, fill in the state structure and return 1.
1132 * If nothing can be done, return 0.
1135 xfs_da3_node_toosmall(
1136 struct xfs_da_state
*state
,
1139 struct xfs_da_intnode
*node
;
1140 struct xfs_da_state_blk
*blk
;
1141 struct xfs_da_blkinfo
*info
;
1144 struct xfs_da3_icnode_hdr nodehdr
;
1150 struct xfs_inode
*dp
= state
->args
->dp
;
1152 trace_xfs_da_node_toosmall(state
->args
);
1155 * Check for the degenerate case of the block being over 50% full.
1156 * If so, it's not worth even looking to see if we might be able
1157 * to coalesce with a sibling.
1159 blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
1160 info
= blk
->bp
->b_addr
;
1161 node
= (xfs_da_intnode_t
*)info
;
1162 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1163 if (nodehdr
.count
> (state
->args
->geo
->node_ents
>> 1)) {
1164 *action
= 0; /* blk over 50%, don't try to join */
1165 return 0; /* blk over 50%, don't try to join */
1169 * Check for the degenerate case of the block being empty.
1170 * If the block is empty, we'll simply delete it, no need to
1171 * coalesce it with a sibling block. We choose (arbitrarily)
1172 * to merge with the forward block unless it is NULL.
1174 if (nodehdr
.count
== 0) {
1176 * Make altpath point to the block we want to keep and
1177 * path point to the block we want to drop (this one).
1179 forward
= (info
->forw
!= 0);
1180 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1181 error
= xfs_da3_path_shift(state
, &state
->altpath
, forward
,
1194 * Examine each sibling block to see if we can coalesce with
1195 * at least 25% free space to spare. We need to figure out
1196 * whether to merge with the forward or the backward block.
1197 * We prefer coalescing with the lower numbered sibling so as
1198 * to shrink a directory over time.
1200 count
= state
->args
->geo
->node_ents
;
1201 count
-= state
->args
->geo
->node_ents
>> 2;
1202 count
-= nodehdr
.count
;
1204 /* start with smaller blk num */
1205 forward
= nodehdr
.forw
< nodehdr
.back
;
1206 for (i
= 0; i
< 2; forward
= !forward
, i
++) {
1207 struct xfs_da3_icnode_hdr thdr
;
1209 blkno
= nodehdr
.forw
;
1211 blkno
= nodehdr
.back
;
1214 error
= xfs_da3_node_read(state
->args
->trans
, dp
,
1215 blkno
, -1, &bp
, state
->args
->whichfork
);
1220 dp
->d_ops
->node_hdr_from_disk(&thdr
, node
);
1221 xfs_trans_brelse(state
->args
->trans
, bp
);
1223 if (count
- thdr
.count
>= 0)
1224 break; /* fits with at least 25% to spare */
1232 * Make altpath point to the block we want to keep (the lower
1233 * numbered block) and path point to the block we want to drop.
1235 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1236 if (blkno
< blk
->blkno
) {
1237 error
= xfs_da3_path_shift(state
, &state
->altpath
, forward
,
1240 error
= xfs_da3_path_shift(state
, &state
->path
, forward
,
1254 * Pick up the last hashvalue from an intermediate node.
1257 xfs_da3_node_lasthash(
1258 struct xfs_inode
*dp
,
1262 struct xfs_da_intnode
*node
;
1263 struct xfs_da_node_entry
*btree
;
1264 struct xfs_da3_icnode_hdr nodehdr
;
1267 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1269 *count
= nodehdr
.count
;
1272 btree
= dp
->d_ops
->node_tree_p(node
);
1273 return be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1277 * Walk back up the tree adjusting hash values as necessary,
1278 * when we stop making changes, return.
1281 xfs_da3_fixhashpath(
1282 struct xfs_da_state
*state
,
1283 struct xfs_da_state_path
*path
)
1285 struct xfs_da_state_blk
*blk
;
1286 struct xfs_da_intnode
*node
;
1287 struct xfs_da_node_entry
*btree
;
1288 xfs_dahash_t lasthash
=0;
1291 struct xfs_inode
*dp
= state
->args
->dp
;
1293 trace_xfs_da_fixhashpath(state
->args
);
1295 level
= path
->active
-1;
1296 blk
= &path
->blk
[ level
];
1297 switch (blk
->magic
) {
1298 case XFS_ATTR_LEAF_MAGIC
:
1299 lasthash
= xfs_attr_leaf_lasthash(blk
->bp
, &count
);
1303 case XFS_DIR2_LEAFN_MAGIC
:
1304 lasthash
= xfs_dir2_leaf_lasthash(dp
, blk
->bp
, &count
);
1308 case XFS_DA_NODE_MAGIC
:
1309 lasthash
= xfs_da3_node_lasthash(dp
, blk
->bp
, &count
);
1314 for (blk
--, level
--; level
>= 0; blk
--, level
--) {
1315 struct xfs_da3_icnode_hdr nodehdr
;
1317 node
= blk
->bp
->b_addr
;
1318 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1319 btree
= dp
->d_ops
->node_tree_p(node
);
1320 if (be32_to_cpu(btree
[blk
->index
].hashval
) == lasthash
)
1322 blk
->hashval
= lasthash
;
1323 btree
[blk
->index
].hashval
= cpu_to_be32(lasthash
);
1324 xfs_trans_log_buf(state
->args
->trans
, blk
->bp
,
1325 XFS_DA_LOGRANGE(node
, &btree
[blk
->index
],
1328 lasthash
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1333 * Remove an entry from an intermediate node.
1336 xfs_da3_node_remove(
1337 struct xfs_da_state
*state
,
1338 struct xfs_da_state_blk
*drop_blk
)
1340 struct xfs_da_intnode
*node
;
1341 struct xfs_da3_icnode_hdr nodehdr
;
1342 struct xfs_da_node_entry
*btree
;
1345 struct xfs_inode
*dp
= state
->args
->dp
;
1347 trace_xfs_da_node_remove(state
->args
);
1349 node
= drop_blk
->bp
->b_addr
;
1350 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1351 ASSERT(drop_blk
->index
< nodehdr
.count
);
1352 ASSERT(drop_blk
->index
>= 0);
1355 * Copy over the offending entry, or just zero it out.
1357 index
= drop_blk
->index
;
1358 btree
= dp
->d_ops
->node_tree_p(node
);
1359 if (index
< nodehdr
.count
- 1) {
1360 tmp
= nodehdr
.count
- index
- 1;
1361 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
1362 memmove(&btree
[index
], &btree
[index
+ 1], tmp
);
1363 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1364 XFS_DA_LOGRANGE(node
, &btree
[index
], tmp
));
1365 index
= nodehdr
.count
- 1;
1367 memset(&btree
[index
], 0, sizeof(xfs_da_node_entry_t
));
1368 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1369 XFS_DA_LOGRANGE(node
, &btree
[index
], sizeof(btree
[index
])));
1371 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
1372 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1373 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
1376 * Copy the last hash value from the block to propagate upwards.
1378 drop_blk
->hashval
= be32_to_cpu(btree
[index
- 1].hashval
);
1382 * Unbalance the elements between two intermediate nodes,
1383 * move all Btree elements from one node into another.
1386 xfs_da3_node_unbalance(
1387 struct xfs_da_state
*state
,
1388 struct xfs_da_state_blk
*drop_blk
,
1389 struct xfs_da_state_blk
*save_blk
)
1391 struct xfs_da_intnode
*drop_node
;
1392 struct xfs_da_intnode
*save_node
;
1393 struct xfs_da_node_entry
*drop_btree
;
1394 struct xfs_da_node_entry
*save_btree
;
1395 struct xfs_da3_icnode_hdr drop_hdr
;
1396 struct xfs_da3_icnode_hdr save_hdr
;
1397 struct xfs_trans
*tp
;
1400 struct xfs_inode
*dp
= state
->args
->dp
;
1402 trace_xfs_da_node_unbalance(state
->args
);
1404 drop_node
= drop_blk
->bp
->b_addr
;
1405 save_node
= save_blk
->bp
->b_addr
;
1406 dp
->d_ops
->node_hdr_from_disk(&drop_hdr
, drop_node
);
1407 dp
->d_ops
->node_hdr_from_disk(&save_hdr
, save_node
);
1408 drop_btree
= dp
->d_ops
->node_tree_p(drop_node
);
1409 save_btree
= dp
->d_ops
->node_tree_p(save_node
);
1410 tp
= state
->args
->trans
;
1413 * If the dying block has lower hashvals, then move all the
1414 * elements in the remaining block up to make a hole.
1416 if ((be32_to_cpu(drop_btree
[0].hashval
) <
1417 be32_to_cpu(save_btree
[0].hashval
)) ||
1418 (be32_to_cpu(drop_btree
[drop_hdr
.count
- 1].hashval
) <
1419 be32_to_cpu(save_btree
[save_hdr
.count
- 1].hashval
))) {
1420 /* XXX: check this - is memmove dst correct? */
1421 tmp
= save_hdr
.count
* sizeof(xfs_da_node_entry_t
);
1422 memmove(&save_btree
[drop_hdr
.count
], &save_btree
[0], tmp
);
1425 xfs_trans_log_buf(tp
, save_blk
->bp
,
1426 XFS_DA_LOGRANGE(save_node
, &save_btree
[0],
1427 (save_hdr
.count
+ drop_hdr
.count
) *
1428 sizeof(xfs_da_node_entry_t
)));
1430 sindex
= save_hdr
.count
;
1431 xfs_trans_log_buf(tp
, save_blk
->bp
,
1432 XFS_DA_LOGRANGE(save_node
, &save_btree
[sindex
],
1433 drop_hdr
.count
* sizeof(xfs_da_node_entry_t
)));
1437 * Move all the B-tree elements from drop_blk to save_blk.
1439 tmp
= drop_hdr
.count
* (uint
)sizeof(xfs_da_node_entry_t
);
1440 memcpy(&save_btree
[sindex
], &drop_btree
[0], tmp
);
1441 save_hdr
.count
+= drop_hdr
.count
;
1443 dp
->d_ops
->node_hdr_to_disk(save_node
, &save_hdr
);
1444 xfs_trans_log_buf(tp
, save_blk
->bp
,
1445 XFS_DA_LOGRANGE(save_node
, &save_node
->hdr
,
1446 dp
->d_ops
->node_hdr_size
));
1449 * Save the last hashval in the remaining block for upward propagation.
1451 save_blk
->hashval
= be32_to_cpu(save_btree
[save_hdr
.count
- 1].hashval
);
1454 /*========================================================================
1455 * Routines used for finding things in the Btree.
1456 *========================================================================*/
1459 * Walk down the Btree looking for a particular filename, filling
1460 * in the state structure as we go.
1462 * We will set the state structure to point to each of the elements
1463 * in each of the nodes where either the hashval is or should be.
1465 * We support duplicate hashval's so for each entry in the current
1466 * node that could contain the desired hashval, descend. This is a
1467 * pruned depth-first tree search.
1470 xfs_da3_node_lookup_int(
1471 struct xfs_da_state
*state
,
1474 struct xfs_da_state_blk
*blk
;
1475 struct xfs_da_blkinfo
*curr
;
1476 struct xfs_da_intnode
*node
;
1477 struct xfs_da_node_entry
*btree
;
1478 struct xfs_da3_icnode_hdr nodehdr
;
1479 struct xfs_da_args
*args
;
1481 xfs_dahash_t hashval
;
1482 xfs_dahash_t btreehashval
;
1488 unsigned int expected_level
= 0;
1489 struct xfs_inode
*dp
= state
->args
->dp
;
1494 * Descend thru the B-tree searching each level for the right
1495 * node to use, until the right hashval is found.
1497 blkno
= args
->geo
->leafblk
;
1498 for (blk
= &state
->path
.blk
[0], state
->path
.active
= 1;
1499 state
->path
.active
<= XFS_DA_NODE_MAXDEPTH
;
1500 blk
++, state
->path
.active
++) {
1502 * Read the next node down in the tree.
1505 error
= xfs_da3_node_read(args
->trans
, args
->dp
, blkno
,
1506 -1, &blk
->bp
, args
->whichfork
);
1509 state
->path
.active
--;
1512 curr
= blk
->bp
->b_addr
;
1513 blk
->magic
= be16_to_cpu(curr
->magic
);
1515 if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
1516 blk
->magic
== XFS_ATTR3_LEAF_MAGIC
) {
1517 blk
->magic
= XFS_ATTR_LEAF_MAGIC
;
1518 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1522 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1523 blk
->magic
== XFS_DIR3_LEAFN_MAGIC
) {
1524 blk
->magic
= XFS_DIR2_LEAFN_MAGIC
;
1525 blk
->hashval
= xfs_dir2_leaf_lasthash(args
->dp
,
1530 blk
->magic
= XFS_DA_NODE_MAGIC
;
1534 * Search an intermediate node for a match.
1536 node
= blk
->bp
->b_addr
;
1537 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1538 btree
= dp
->d_ops
->node_tree_p(node
);
1540 /* Tree taller than we can handle; bail out! */
1541 if (nodehdr
.level
>= XFS_DA_NODE_MAXDEPTH
)
1542 return -EFSCORRUPTED
;
1544 /* Check the level from the root. */
1545 if (blkno
== args
->geo
->leafblk
)
1546 expected_level
= nodehdr
.level
- 1;
1547 else if (expected_level
!= nodehdr
.level
)
1548 return -EFSCORRUPTED
;
1552 max
= nodehdr
.count
;
1553 blk
->hashval
= be32_to_cpu(btree
[max
- 1].hashval
);
1556 * Binary search. (note: small blocks will skip loop)
1558 probe
= span
= max
/ 2;
1559 hashval
= args
->hashval
;
1562 btreehashval
= be32_to_cpu(btree
[probe
].hashval
);
1563 if (btreehashval
< hashval
)
1565 else if (btreehashval
> hashval
)
1570 ASSERT((probe
>= 0) && (probe
< max
));
1571 ASSERT((span
<= 4) ||
1572 (be32_to_cpu(btree
[probe
].hashval
) == hashval
));
1575 * Since we may have duplicate hashval's, find the first
1576 * matching hashval in the node.
1579 be32_to_cpu(btree
[probe
].hashval
) >= hashval
) {
1582 while (probe
< max
&&
1583 be32_to_cpu(btree
[probe
].hashval
) < hashval
) {
1588 * Pick the right block to descend on.
1591 blk
->index
= max
- 1;
1592 blkno
= be32_to_cpu(btree
[max
- 1].before
);
1595 blkno
= be32_to_cpu(btree
[probe
].before
);
1598 /* We can't point back to the root. */
1599 if (blkno
== args
->geo
->leafblk
)
1600 return -EFSCORRUPTED
;
1603 if (expected_level
!= 0)
1604 return -EFSCORRUPTED
;
1607 * A leaf block that ends in the hashval that we are interested in
1608 * (final hashval == search hashval) means that the next block may
1609 * contain more entries with the same hashval, shift upward to the
1610 * next leaf and keep searching.
1613 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1614 retval
= xfs_dir2_leafn_lookup_int(blk
->bp
, args
,
1615 &blk
->index
, state
);
1616 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1617 retval
= xfs_attr3_leaf_lookup_int(blk
->bp
, args
);
1618 blk
->index
= args
->index
;
1619 args
->blkno
= blk
->blkno
;
1622 return -EFSCORRUPTED
;
1624 if (((retval
== -ENOENT
) || (retval
== -ENOATTR
)) &&
1625 (blk
->hashval
== args
->hashval
)) {
1626 error
= xfs_da3_path_shift(state
, &state
->path
, 1, 1,
1632 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1633 /* path_shift() gives ENOENT */
1643 /*========================================================================
1645 *========================================================================*/
1648 * Compare two intermediate nodes for "order".
1652 struct xfs_inode
*dp
,
1653 struct xfs_buf
*node1_bp
,
1654 struct xfs_buf
*node2_bp
)
1656 struct xfs_da_intnode
*node1
;
1657 struct xfs_da_intnode
*node2
;
1658 struct xfs_da_node_entry
*btree1
;
1659 struct xfs_da_node_entry
*btree2
;
1660 struct xfs_da3_icnode_hdr node1hdr
;
1661 struct xfs_da3_icnode_hdr node2hdr
;
1663 node1
= node1_bp
->b_addr
;
1664 node2
= node2_bp
->b_addr
;
1665 dp
->d_ops
->node_hdr_from_disk(&node1hdr
, node1
);
1666 dp
->d_ops
->node_hdr_from_disk(&node2hdr
, node2
);
1667 btree1
= dp
->d_ops
->node_tree_p(node1
);
1668 btree2
= dp
->d_ops
->node_tree_p(node2
);
1670 if (node1hdr
.count
> 0 && node2hdr
.count
> 0 &&
1671 ((be32_to_cpu(btree2
[0].hashval
) < be32_to_cpu(btree1
[0].hashval
)) ||
1672 (be32_to_cpu(btree2
[node2hdr
.count
- 1].hashval
) <
1673 be32_to_cpu(btree1
[node1hdr
.count
- 1].hashval
)))) {
1680 * Link a new block into a doubly linked list of blocks (of whatever type).
1684 struct xfs_da_state
*state
,
1685 struct xfs_da_state_blk
*old_blk
,
1686 struct xfs_da_state_blk
*new_blk
)
1688 struct xfs_da_blkinfo
*old_info
;
1689 struct xfs_da_blkinfo
*new_info
;
1690 struct xfs_da_blkinfo
*tmp_info
;
1691 struct xfs_da_args
*args
;
1695 struct xfs_inode
*dp
= state
->args
->dp
;
1698 * Set up environment.
1701 ASSERT(args
!= NULL
);
1702 old_info
= old_blk
->bp
->b_addr
;
1703 new_info
= new_blk
->bp
->b_addr
;
1704 ASSERT(old_blk
->magic
== XFS_DA_NODE_MAGIC
||
1705 old_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1706 old_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1708 switch (old_blk
->magic
) {
1709 case XFS_ATTR_LEAF_MAGIC
:
1710 before
= xfs_attr_leaf_order(old_blk
->bp
, new_blk
->bp
);
1712 case XFS_DIR2_LEAFN_MAGIC
:
1713 before
= xfs_dir2_leafn_order(dp
, old_blk
->bp
, new_blk
->bp
);
1715 case XFS_DA_NODE_MAGIC
:
1716 before
= xfs_da3_node_order(dp
, old_blk
->bp
, new_blk
->bp
);
1721 * Link blocks in appropriate order.
1725 * Link new block in before existing block.
1727 trace_xfs_da_link_before(args
);
1728 new_info
->forw
= cpu_to_be32(old_blk
->blkno
);
1729 new_info
->back
= old_info
->back
;
1730 if (old_info
->back
) {
1731 error
= xfs_da3_node_read(args
->trans
, dp
,
1732 be32_to_cpu(old_info
->back
),
1733 -1, &bp
, args
->whichfork
);
1737 tmp_info
= bp
->b_addr
;
1738 ASSERT(tmp_info
->magic
== old_info
->magic
);
1739 ASSERT(be32_to_cpu(tmp_info
->forw
) == old_blk
->blkno
);
1740 tmp_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1741 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1743 old_info
->back
= cpu_to_be32(new_blk
->blkno
);
1746 * Link new block in after existing block.
1748 trace_xfs_da_link_after(args
);
1749 new_info
->forw
= old_info
->forw
;
1750 new_info
->back
= cpu_to_be32(old_blk
->blkno
);
1751 if (old_info
->forw
) {
1752 error
= xfs_da3_node_read(args
->trans
, dp
,
1753 be32_to_cpu(old_info
->forw
),
1754 -1, &bp
, args
->whichfork
);
1758 tmp_info
= bp
->b_addr
;
1759 ASSERT(tmp_info
->magic
== old_info
->magic
);
1760 ASSERT(be32_to_cpu(tmp_info
->back
) == old_blk
->blkno
);
1761 tmp_info
->back
= cpu_to_be32(new_blk
->blkno
);
1762 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1764 old_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1767 xfs_trans_log_buf(args
->trans
, old_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1768 xfs_trans_log_buf(args
->trans
, new_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1773 * Unlink a block from a doubly linked list of blocks.
1775 STATIC
int /* error */
1777 struct xfs_da_state
*state
,
1778 struct xfs_da_state_blk
*drop_blk
,
1779 struct xfs_da_state_blk
*save_blk
)
1781 struct xfs_da_blkinfo
*drop_info
;
1782 struct xfs_da_blkinfo
*save_info
;
1783 struct xfs_da_blkinfo
*tmp_info
;
1784 struct xfs_da_args
*args
;
1789 * Set up environment.
1792 ASSERT(args
!= NULL
);
1793 save_info
= save_blk
->bp
->b_addr
;
1794 drop_info
= drop_blk
->bp
->b_addr
;
1795 ASSERT(save_blk
->magic
== XFS_DA_NODE_MAGIC
||
1796 save_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1797 save_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1798 ASSERT(save_blk
->magic
== drop_blk
->magic
);
1799 ASSERT((be32_to_cpu(save_info
->forw
) == drop_blk
->blkno
) ||
1800 (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
));
1801 ASSERT((be32_to_cpu(drop_info
->forw
) == save_blk
->blkno
) ||
1802 (be32_to_cpu(drop_info
->back
) == save_blk
->blkno
));
1805 * Unlink the leaf block from the doubly linked chain of leaves.
1807 if (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
) {
1808 trace_xfs_da_unlink_back(args
);
1809 save_info
->back
= drop_info
->back
;
1810 if (drop_info
->back
) {
1811 error
= xfs_da3_node_read(args
->trans
, args
->dp
,
1812 be32_to_cpu(drop_info
->back
),
1813 -1, &bp
, args
->whichfork
);
1817 tmp_info
= bp
->b_addr
;
1818 ASSERT(tmp_info
->magic
== save_info
->magic
);
1819 ASSERT(be32_to_cpu(tmp_info
->forw
) == drop_blk
->blkno
);
1820 tmp_info
->forw
= cpu_to_be32(save_blk
->blkno
);
1821 xfs_trans_log_buf(args
->trans
, bp
, 0,
1822 sizeof(*tmp_info
) - 1);
1825 trace_xfs_da_unlink_forward(args
);
1826 save_info
->forw
= drop_info
->forw
;
1827 if (drop_info
->forw
) {
1828 error
= xfs_da3_node_read(args
->trans
, args
->dp
,
1829 be32_to_cpu(drop_info
->forw
),
1830 -1, &bp
, args
->whichfork
);
1834 tmp_info
= bp
->b_addr
;
1835 ASSERT(tmp_info
->magic
== save_info
->magic
);
1836 ASSERT(be32_to_cpu(tmp_info
->back
) == drop_blk
->blkno
);
1837 tmp_info
->back
= cpu_to_be32(save_blk
->blkno
);
1838 xfs_trans_log_buf(args
->trans
, bp
, 0,
1839 sizeof(*tmp_info
) - 1);
1843 xfs_trans_log_buf(args
->trans
, save_blk
->bp
, 0, sizeof(*save_info
) - 1);
1848 * Move a path "forward" or "!forward" one block at the current level.
1850 * This routine will adjust a "path" to point to the next block
1851 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1852 * Btree, including updating pointers to the intermediate nodes between
1853 * the new bottom and the root.
1857 struct xfs_da_state
*state
,
1858 struct xfs_da_state_path
*path
,
1863 struct xfs_da_state_blk
*blk
;
1864 struct xfs_da_blkinfo
*info
;
1865 struct xfs_da_intnode
*node
;
1866 struct xfs_da_args
*args
;
1867 struct xfs_da_node_entry
*btree
;
1868 struct xfs_da3_icnode_hdr nodehdr
;
1870 xfs_dablk_t blkno
= 0;
1873 struct xfs_inode
*dp
= state
->args
->dp
;
1875 trace_xfs_da_path_shift(state
->args
);
1878 * Roll up the Btree looking for the first block where our
1879 * current index is not at the edge of the block. Note that
1880 * we skip the bottom layer because we want the sibling block.
1883 ASSERT(args
!= NULL
);
1884 ASSERT(path
!= NULL
);
1885 ASSERT((path
->active
> 0) && (path
->active
< XFS_DA_NODE_MAXDEPTH
));
1886 level
= (path
->active
-1) - 1; /* skip bottom layer in path */
1887 for (blk
= &path
->blk
[level
]; level
>= 0; blk
--, level
--) {
1888 node
= blk
->bp
->b_addr
;
1889 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1890 btree
= dp
->d_ops
->node_tree_p(node
);
1892 if (forward
&& (blk
->index
< nodehdr
.count
- 1)) {
1894 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1896 } else if (!forward
&& (blk
->index
> 0)) {
1898 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1903 *result
= -ENOENT
; /* we're out of our tree */
1904 ASSERT(args
->op_flags
& XFS_DA_OP_OKNOENT
);
1909 * Roll down the edge of the subtree until we reach the
1910 * same depth we were at originally.
1912 for (blk
++, level
++; level
< path
->active
; blk
++, level
++) {
1914 * Read the next child block into a local buffer.
1916 error
= xfs_da3_node_read(args
->trans
, dp
, blkno
, -1, &bp
,
1922 * Release the old block (if it's dirty, the trans doesn't
1923 * actually let go) and swap the local buffer into the path
1924 * structure. This ensures failure of the above read doesn't set
1925 * a NULL buffer in an active slot in the path.
1928 xfs_trans_brelse(args
->trans
, blk
->bp
);
1932 info
= blk
->bp
->b_addr
;
1933 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1934 info
->magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
) ||
1935 info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1936 info
->magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
) ||
1937 info
->magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
) ||
1938 info
->magic
== cpu_to_be16(XFS_ATTR3_LEAF_MAGIC
));
1942 * Note: we flatten the magic number to a single type so we
1943 * don't have to compare against crc/non-crc types elsewhere.
1945 switch (be16_to_cpu(info
->magic
)) {
1946 case XFS_DA_NODE_MAGIC
:
1947 case XFS_DA3_NODE_MAGIC
:
1948 blk
->magic
= XFS_DA_NODE_MAGIC
;
1949 node
= (xfs_da_intnode_t
*)info
;
1950 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1951 btree
= dp
->d_ops
->node_tree_p(node
);
1952 blk
->hashval
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1956 blk
->index
= nodehdr
.count
- 1;
1957 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1959 case XFS_ATTR_LEAF_MAGIC
:
1960 case XFS_ATTR3_LEAF_MAGIC
:
1961 blk
->magic
= XFS_ATTR_LEAF_MAGIC
;
1962 ASSERT(level
== path
->active
-1);
1964 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1966 case XFS_DIR2_LEAFN_MAGIC
:
1967 case XFS_DIR3_LEAFN_MAGIC
:
1968 blk
->magic
= XFS_DIR2_LEAFN_MAGIC
;
1969 ASSERT(level
== path
->active
-1);
1971 blk
->hashval
= xfs_dir2_leaf_lasthash(args
->dp
,
1984 /*========================================================================
1986 *========================================================================*/
1989 * Implement a simple hash on a character string.
1990 * Rotate the hash value by 7 bits, then XOR each character in.
1991 * This is implemented with some source-level loop unrolling.
1994 xfs_da_hashname(const uint8_t *name
, int namelen
)
1999 * Do four characters at a time as long as we can.
2001 for (hash
= 0; namelen
>= 4; namelen
-= 4, name
+= 4)
2002 hash
= (name
[0] << 21) ^ (name
[1] << 14) ^ (name
[2] << 7) ^
2003 (name
[3] << 0) ^ rol32(hash
, 7 * 4);
2006 * Now do the rest of the characters.
2010 return (name
[0] << 14) ^ (name
[1] << 7) ^ (name
[2] << 0) ^
2013 return (name
[0] << 7) ^ (name
[1] << 0) ^ rol32(hash
, 7 * 2);
2015 return (name
[0] << 0) ^ rol32(hash
, 7 * 1);
2016 default: /* case 0: */
2023 struct xfs_da_args
*args
,
2024 const unsigned char *name
,
2027 return (args
->namelen
== len
&& memcmp(args
->name
, name
, len
) == 0) ?
2028 XFS_CMP_EXACT
: XFS_CMP_DIFFERENT
;
2032 xfs_default_hashname(
2033 struct xfs_name
*name
)
2035 return xfs_da_hashname(name
->name
, name
->len
);
2038 const struct xfs_nameops xfs_default_nameops
= {
2039 .hashname
= xfs_default_hashname
,
2040 .compname
= xfs_da_compname
2044 xfs_da_grow_inode_int(
2045 struct xfs_da_args
*args
,
2049 struct xfs_trans
*tp
= args
->trans
;
2050 struct xfs_inode
*dp
= args
->dp
;
2051 int w
= args
->whichfork
;
2052 xfs_rfsblock_t nblks
= dp
->i_d
.di_nblocks
;
2053 struct xfs_bmbt_irec map
, *mapp
;
2054 int nmap
, error
, got
, i
, mapi
;
2057 * Find a spot in the file space to put the new block.
2059 error
= xfs_bmap_first_unused(tp
, dp
, count
, bno
, w
);
2064 * Try mapping it in one filesystem block.
2067 ASSERT(args
->firstblock
!= NULL
);
2068 error
= xfs_bmapi_write(tp
, dp
, *bno
, count
,
2069 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
|XFS_BMAPI_CONTIG
,
2070 args
->firstblock
, args
->total
, &map
, &nmap
,
2079 } else if (nmap
== 0 && count
> 1) {
2084 * If we didn't get it and the block might work if fragmented,
2085 * try without the CONTIG flag. Loop until we get it all.
2087 mapp
= kmem_alloc(sizeof(*mapp
) * count
, KM_SLEEP
);
2088 for (b
= *bno
, mapi
= 0; b
< *bno
+ count
; ) {
2089 nmap
= MIN(XFS_BMAP_MAX_NMAP
, count
);
2090 c
= (int)(*bno
+ count
- b
);
2091 error
= xfs_bmapi_write(tp
, dp
, b
, c
,
2092 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
2093 args
->firstblock
, args
->total
,
2094 &mapp
[mapi
], &nmap
, args
->dfops
);
2100 b
= mapp
[mapi
- 1].br_startoff
+
2101 mapp
[mapi
- 1].br_blockcount
;
2109 * Count the blocks we got, make sure it matches the total.
2111 for (i
= 0, got
= 0; i
< mapi
; i
++)
2112 got
+= mapp
[i
].br_blockcount
;
2113 if (got
!= count
|| mapp
[0].br_startoff
!= *bno
||
2114 mapp
[mapi
- 1].br_startoff
+ mapp
[mapi
- 1].br_blockcount
!=
2120 /* account for newly allocated blocks in reserved blocks total */
2121 args
->total
-= dp
->i_d
.di_nblocks
- nblks
;
2130 * Add a block to the btree ahead of the file.
2131 * Return the new block number to the caller.
2135 struct xfs_da_args
*args
,
2136 xfs_dablk_t
*new_blkno
)
2141 trace_xfs_da_grow_inode(args
);
2143 bno
= args
->geo
->leafblk
;
2144 error
= xfs_da_grow_inode_int(args
, &bno
, args
->geo
->fsbcount
);
2146 *new_blkno
= (xfs_dablk_t
)bno
;
2151 * Ick. We need to always be able to remove a btree block, even
2152 * if there's no space reservation because the filesystem is full.
2153 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2154 * It swaps the target block with the last block in the file. The
2155 * last block in the file can always be removed since it can't cause
2156 * a bmap btree split to do that.
2159 xfs_da3_swap_lastblock(
2160 struct xfs_da_args
*args
,
2161 xfs_dablk_t
*dead_blknop
,
2162 struct xfs_buf
**dead_bufp
)
2164 struct xfs_da_blkinfo
*dead_info
;
2165 struct xfs_da_blkinfo
*sib_info
;
2166 struct xfs_da_intnode
*par_node
;
2167 struct xfs_da_intnode
*dead_node
;
2168 struct xfs_dir2_leaf
*dead_leaf2
;
2169 struct xfs_da_node_entry
*btree
;
2170 struct xfs_da3_icnode_hdr par_hdr
;
2171 struct xfs_inode
*dp
;
2172 struct xfs_trans
*tp
;
2173 struct xfs_mount
*mp
;
2174 struct xfs_buf
*dead_buf
;
2175 struct xfs_buf
*last_buf
;
2176 struct xfs_buf
*sib_buf
;
2177 struct xfs_buf
*par_buf
;
2178 xfs_dahash_t dead_hash
;
2179 xfs_fileoff_t lastoff
;
2180 xfs_dablk_t dead_blkno
;
2181 xfs_dablk_t last_blkno
;
2182 xfs_dablk_t sib_blkno
;
2183 xfs_dablk_t par_blkno
;
2190 trace_xfs_da_swap_lastblock(args
);
2192 dead_buf
= *dead_bufp
;
2193 dead_blkno
= *dead_blknop
;
2196 w
= args
->whichfork
;
2197 ASSERT(w
== XFS_DATA_FORK
);
2199 lastoff
= args
->geo
->freeblk
;
2200 error
= xfs_bmap_last_before(tp
, dp
, &lastoff
, w
);
2203 if (unlikely(lastoff
== 0)) {
2204 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW
,
2206 return -EFSCORRUPTED
;
2209 * Read the last block in the btree space.
2211 last_blkno
= (xfs_dablk_t
)lastoff
- args
->geo
->fsbcount
;
2212 error
= xfs_da3_node_read(tp
, dp
, last_blkno
, -1, &last_buf
, w
);
2216 * Copy the last block into the dead buffer and log it.
2218 memcpy(dead_buf
->b_addr
, last_buf
->b_addr
, args
->geo
->blksize
);
2219 xfs_trans_log_buf(tp
, dead_buf
, 0, args
->geo
->blksize
- 1);
2220 dead_info
= dead_buf
->b_addr
;
2222 * Get values from the moved block.
2224 if (dead_info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
2225 dead_info
->magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
2226 struct xfs_dir3_icleaf_hdr leafhdr
;
2227 struct xfs_dir2_leaf_entry
*ents
;
2229 dead_leaf2
= (xfs_dir2_leaf_t
*)dead_info
;
2230 dp
->d_ops
->leaf_hdr_from_disk(&leafhdr
, dead_leaf2
);
2231 ents
= dp
->d_ops
->leaf_ents_p(dead_leaf2
);
2233 dead_hash
= be32_to_cpu(ents
[leafhdr
.count
- 1].hashval
);
2235 struct xfs_da3_icnode_hdr deadhdr
;
2237 dead_node
= (xfs_da_intnode_t
*)dead_info
;
2238 dp
->d_ops
->node_hdr_from_disk(&deadhdr
, dead_node
);
2239 btree
= dp
->d_ops
->node_tree_p(dead_node
);
2240 dead_level
= deadhdr
.level
;
2241 dead_hash
= be32_to_cpu(btree
[deadhdr
.count
- 1].hashval
);
2243 sib_buf
= par_buf
= NULL
;
2245 * If the moved block has a left sibling, fix up the pointers.
2247 if ((sib_blkno
= be32_to_cpu(dead_info
->back
))) {
2248 error
= xfs_da3_node_read(tp
, dp
, sib_blkno
, -1, &sib_buf
, w
);
2251 sib_info
= sib_buf
->b_addr
;
2253 be32_to_cpu(sib_info
->forw
) != last_blkno
||
2254 sib_info
->magic
!= dead_info
->magic
)) {
2255 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2256 XFS_ERRLEVEL_LOW
, mp
);
2257 error
= -EFSCORRUPTED
;
2260 sib_info
->forw
= cpu_to_be32(dead_blkno
);
2261 xfs_trans_log_buf(tp
, sib_buf
,
2262 XFS_DA_LOGRANGE(sib_info
, &sib_info
->forw
,
2263 sizeof(sib_info
->forw
)));
2267 * If the moved block has a right sibling, fix up the pointers.
2269 if ((sib_blkno
= be32_to_cpu(dead_info
->forw
))) {
2270 error
= xfs_da3_node_read(tp
, dp
, sib_blkno
, -1, &sib_buf
, w
);
2273 sib_info
= sib_buf
->b_addr
;
2275 be32_to_cpu(sib_info
->back
) != last_blkno
||
2276 sib_info
->magic
!= dead_info
->magic
)) {
2277 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2278 XFS_ERRLEVEL_LOW
, mp
);
2279 error
= -EFSCORRUPTED
;
2282 sib_info
->back
= cpu_to_be32(dead_blkno
);
2283 xfs_trans_log_buf(tp
, sib_buf
,
2284 XFS_DA_LOGRANGE(sib_info
, &sib_info
->back
,
2285 sizeof(sib_info
->back
)));
2288 par_blkno
= args
->geo
->leafblk
;
2291 * Walk down the tree looking for the parent of the moved block.
2294 error
= xfs_da3_node_read(tp
, dp
, par_blkno
, -1, &par_buf
, w
);
2297 par_node
= par_buf
->b_addr
;
2298 dp
->d_ops
->node_hdr_from_disk(&par_hdr
, par_node
);
2299 if (level
>= 0 && level
!= par_hdr
.level
+ 1) {
2300 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2301 XFS_ERRLEVEL_LOW
, mp
);
2302 error
= -EFSCORRUPTED
;
2305 level
= par_hdr
.level
;
2306 btree
= dp
->d_ops
->node_tree_p(par_node
);
2308 entno
< par_hdr
.count
&&
2309 be32_to_cpu(btree
[entno
].hashval
) < dead_hash
;
2312 if (entno
== par_hdr
.count
) {
2313 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2314 XFS_ERRLEVEL_LOW
, mp
);
2315 error
= -EFSCORRUPTED
;
2318 par_blkno
= be32_to_cpu(btree
[entno
].before
);
2319 if (level
== dead_level
+ 1)
2321 xfs_trans_brelse(tp
, par_buf
);
2325 * We're in the right parent block.
2326 * Look for the right entry.
2330 entno
< par_hdr
.count
&&
2331 be32_to_cpu(btree
[entno
].before
) != last_blkno
;
2334 if (entno
< par_hdr
.count
)
2336 par_blkno
= par_hdr
.forw
;
2337 xfs_trans_brelse(tp
, par_buf
);
2339 if (unlikely(par_blkno
== 0)) {
2340 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2341 XFS_ERRLEVEL_LOW
, mp
);
2342 error
= -EFSCORRUPTED
;
2345 error
= xfs_da3_node_read(tp
, dp
, par_blkno
, -1, &par_buf
, w
);
2348 par_node
= par_buf
->b_addr
;
2349 dp
->d_ops
->node_hdr_from_disk(&par_hdr
, par_node
);
2350 if (par_hdr
.level
!= level
) {
2351 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2352 XFS_ERRLEVEL_LOW
, mp
);
2353 error
= -EFSCORRUPTED
;
2356 btree
= dp
->d_ops
->node_tree_p(par_node
);
2360 * Update the parent entry pointing to the moved block.
2362 btree
[entno
].before
= cpu_to_be32(dead_blkno
);
2363 xfs_trans_log_buf(tp
, par_buf
,
2364 XFS_DA_LOGRANGE(par_node
, &btree
[entno
].before
,
2365 sizeof(btree
[entno
].before
)));
2366 *dead_blknop
= last_blkno
;
2367 *dead_bufp
= last_buf
;
2371 xfs_trans_brelse(tp
, par_buf
);
2373 xfs_trans_brelse(tp
, sib_buf
);
2374 xfs_trans_brelse(tp
, last_buf
);
2379 * Remove a btree block from a directory or attribute.
2382 xfs_da_shrink_inode(
2383 xfs_da_args_t
*args
,
2384 xfs_dablk_t dead_blkno
,
2385 struct xfs_buf
*dead_buf
)
2388 int done
, error
, w
, count
;
2391 trace_xfs_da_shrink_inode(args
);
2394 w
= args
->whichfork
;
2396 count
= args
->geo
->fsbcount
;
2399 * Remove extents. If we get ENOSPC for a dir we have to move
2400 * the last block to the place we want to kill.
2402 error
= xfs_bunmapi(tp
, dp
, dead_blkno
, count
,
2403 xfs_bmapi_aflag(w
), 0, args
->firstblock
,
2404 args
->dfops
, &done
);
2405 if (error
== -ENOSPC
) {
2406 if (w
!= XFS_DATA_FORK
)
2408 error
= xfs_da3_swap_lastblock(args
, &dead_blkno
,
2416 xfs_trans_binval(tp
, dead_buf
);
2421 * See if the mapping(s) for this btree block are valid, i.e.
2422 * don't contain holes, are logically contiguous, and cover the whole range.
2425 xfs_da_map_covers_blocks(
2427 xfs_bmbt_irec_t
*mapp
,
2434 for (i
= 0, off
= bno
; i
< nmap
; i
++) {
2435 if (mapp
[i
].br_startblock
== HOLESTARTBLOCK
||
2436 mapp
[i
].br_startblock
== DELAYSTARTBLOCK
) {
2439 if (off
!= mapp
[i
].br_startoff
) {
2442 off
+= mapp
[i
].br_blockcount
;
2444 return off
== bno
+ count
;
2448 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2450 * For the single map case, it is assumed that the caller has provided a pointer
2451 * to a valid xfs_buf_map. For the multiple map case, this function will
2452 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2453 * map pointer with the allocated map.
2456 xfs_buf_map_from_irec(
2457 struct xfs_mount
*mp
,
2458 struct xfs_buf_map
**mapp
,
2460 struct xfs_bmbt_irec
*irecs
,
2463 struct xfs_buf_map
*map
;
2466 ASSERT(*nmaps
== 1);
2467 ASSERT(nirecs
>= 1);
2470 map
= kmem_zalloc(nirecs
* sizeof(struct xfs_buf_map
),
2471 KM_SLEEP
| KM_NOFS
);
2479 for (i
= 0; i
< *nmaps
; i
++) {
2480 ASSERT(irecs
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
2481 irecs
[i
].br_startblock
!= HOLESTARTBLOCK
);
2482 map
[i
].bm_bn
= XFS_FSB_TO_DADDR(mp
, irecs
[i
].br_startblock
);
2483 map
[i
].bm_len
= XFS_FSB_TO_BB(mp
, irecs
[i
].br_blockcount
);
2489 * Map the block we are given ready for reading. There are three possible return
2491 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2492 * caller knows not to execute a subsequent read.
2493 * 0 - if we mapped the block successfully
2494 * >0 - positive error number if there was an error.
2498 struct xfs_inode
*dp
,
2500 xfs_daddr_t mappedbno
,
2502 struct xfs_buf_map
**map
,
2505 struct xfs_mount
*mp
= dp
->i_mount
;
2508 struct xfs_bmbt_irec irec
;
2509 struct xfs_bmbt_irec
*irecs
= &irec
;
2512 ASSERT(map
&& *map
);
2513 ASSERT(*nmaps
== 1);
2515 if (whichfork
== XFS_DATA_FORK
)
2516 nfsb
= mp
->m_dir_geo
->fsbcount
;
2518 nfsb
= mp
->m_attr_geo
->fsbcount
;
2521 * Caller doesn't have a mapping. -2 means don't complain
2522 * if we land in a hole.
2524 if (mappedbno
== -1 || mappedbno
== -2) {
2526 * Optimize the one-block case.
2529 irecs
= kmem_zalloc(sizeof(irec
) * nfsb
,
2530 KM_SLEEP
| KM_NOFS
);
2533 error
= xfs_bmapi_read(dp
, (xfs_fileoff_t
)bno
, nfsb
, irecs
,
2534 &nirecs
, xfs_bmapi_aflag(whichfork
));
2538 irecs
->br_startblock
= XFS_DADDR_TO_FSB(mp
, mappedbno
);
2539 irecs
->br_startoff
= (xfs_fileoff_t
)bno
;
2540 irecs
->br_blockcount
= nfsb
;
2541 irecs
->br_state
= 0;
2545 if (!xfs_da_map_covers_blocks(nirecs
, irecs
, bno
, nfsb
)) {
2546 error
= mappedbno
== -2 ? -1 : -EFSCORRUPTED
;
2547 if (unlikely(error
== -EFSCORRUPTED
)) {
2548 if (xfs_error_level
>= XFS_ERRLEVEL_LOW
) {
2550 xfs_alert(mp
, "%s: bno %lld dir: inode %lld",
2551 __func__
, (long long)bno
,
2552 (long long)dp
->i_ino
);
2553 for (i
= 0; i
< *nmaps
; i
++) {
2555 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2557 (long long)irecs
[i
].br_startoff
,
2558 (long long)irecs
[i
].br_startblock
,
2559 (long long)irecs
[i
].br_blockcount
,
2563 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2564 XFS_ERRLEVEL_LOW
, mp
);
2568 error
= xfs_buf_map_from_irec(mp
, map
, nmaps
, irecs
, nirecs
);
2576 * Get a buffer for the dir/attr block.
2580 struct xfs_trans
*trans
,
2581 struct xfs_inode
*dp
,
2583 xfs_daddr_t mappedbno
,
2584 struct xfs_buf
**bpp
,
2588 struct xfs_buf_map map
;
2589 struct xfs_buf_map
*mapp
;
2596 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2599 /* mapping a hole is not an error, but we don't continue */
2605 bp
= xfs_trans_get_buf_map(trans
, dp
->i_mount
->m_ddev_targp
,
2607 error
= bp
? bp
->b_error
: -EIO
;
2610 xfs_trans_brelse(trans
, bp
);
2624 * Get a buffer for the dir/attr block, fill in the contents.
2628 struct xfs_trans
*trans
,
2629 struct xfs_inode
*dp
,
2631 xfs_daddr_t mappedbno
,
2632 struct xfs_buf
**bpp
,
2634 const struct xfs_buf_ops
*ops
)
2637 struct xfs_buf_map map
;
2638 struct xfs_buf_map
*mapp
;
2645 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2648 /* mapping a hole is not an error, but we don't continue */
2654 error
= xfs_trans_read_buf_map(dp
->i_mount
, trans
,
2655 dp
->i_mount
->m_ddev_targp
,
2656 mapp
, nmap
, 0, &bp
, ops
);
2660 if (whichfork
== XFS_ATTR_FORK
)
2661 xfs_buf_set_ref(bp
, XFS_ATTR_BTREE_REF
);
2663 xfs_buf_set_ref(bp
, XFS_DIR_BTREE_REF
);
2673 * Readahead the dir/attr block.
2677 struct xfs_inode
*dp
,
2679 xfs_daddr_t mappedbno
,
2681 const struct xfs_buf_ops
*ops
)
2683 struct xfs_buf_map map
;
2684 struct xfs_buf_map
*mapp
;
2690 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2693 /* mapping a hole is not an error, but we don't continue */
2699 mappedbno
= mapp
[0].bm_bn
;
2700 xfs_buf_readahead_map(dp
->i_mount
->m_ddev_targp
, mapp
, nmap
, ops
);