]> git.ipfire.org Git - thirdparty/linux.git/blob - fs/xfs/libxfs/xfs_da_btree.c
MAINTAINERS: Fix Hyperv vIOMMU driver file name
[thirdparty/linux.git] / fs / xfs / libxfs / xfs_da_btree.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_dir2.h"
16 #include "xfs_dir2_priv.h"
17 #include "xfs_inode.h"
18 #include "xfs_trans.h"
19 #include "xfs_bmap.h"
20 #include "xfs_attr_leaf.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_buf_item.h"
24 #include "xfs_log.h"
25
26 /*
27 * xfs_da_btree.c
28 *
29 * Routines to implement directories as Btrees of hashed names.
30 */
31
32 /*========================================================================
33 * Function prototypes for the kernel.
34 *========================================================================*/
35
36 /*
37 * Routines used for growing the Btree.
38 */
39 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
40 xfs_da_state_blk_t *existing_root,
41 xfs_da_state_blk_t *new_child);
42 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
43 xfs_da_state_blk_t *existing_blk,
44 xfs_da_state_blk_t *split_blk,
45 xfs_da_state_blk_t *blk_to_add,
46 int treelevel,
47 int *result);
48 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
49 xfs_da_state_blk_t *node_blk_1,
50 xfs_da_state_blk_t *node_blk_2);
51 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
52 xfs_da_state_blk_t *old_node_blk,
53 xfs_da_state_blk_t *new_node_blk);
54
55 /*
56 * Routines used for shrinking the Btree.
57 */
58 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
59 xfs_da_state_blk_t *root_blk);
60 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
61 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
62 xfs_da_state_blk_t *drop_blk);
63 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
64 xfs_da_state_blk_t *src_node_blk,
65 xfs_da_state_blk_t *dst_node_blk);
66
67 /*
68 * Utility routines.
69 */
70 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
71 xfs_da_state_blk_t *drop_blk,
72 xfs_da_state_blk_t *save_blk);
73
74
75 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
76
77 /*
78 * Allocate a dir-state structure.
79 * We don't put them on the stack since they're large.
80 */
81 xfs_da_state_t *
82 xfs_da_state_alloc(void)
83 {
84 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
85 }
86
87 /*
88 * Kill the altpath contents of a da-state structure.
89 */
90 STATIC void
91 xfs_da_state_kill_altpath(xfs_da_state_t *state)
92 {
93 int i;
94
95 for (i = 0; i < state->altpath.active; i++)
96 state->altpath.blk[i].bp = NULL;
97 state->altpath.active = 0;
98 }
99
100 /*
101 * Free a da-state structure.
102 */
103 void
104 xfs_da_state_free(xfs_da_state_t *state)
105 {
106 xfs_da_state_kill_altpath(state);
107 #ifdef DEBUG
108 memset((char *)state, 0, sizeof(*state));
109 #endif /* DEBUG */
110 kmem_zone_free(xfs_da_state_zone, state);
111 }
112
113 /*
114 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only
115 * accessible on v5 filesystems. This header format is common across da node,
116 * attr leaf and dir leaf blocks.
117 */
118 xfs_failaddr_t
119 xfs_da3_blkinfo_verify(
120 struct xfs_buf *bp,
121 struct xfs_da3_blkinfo *hdr3)
122 {
123 struct xfs_mount *mp = bp->b_mount;
124 struct xfs_da_blkinfo *hdr = &hdr3->hdr;
125
126 if (!xfs_verify_magic16(bp, hdr->magic))
127 return __this_address;
128
129 if (xfs_sb_version_hascrc(&mp->m_sb)) {
130 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
131 return __this_address;
132 if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
133 return __this_address;
134 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
135 return __this_address;
136 }
137
138 return NULL;
139 }
140
141 static xfs_failaddr_t
142 xfs_da3_node_verify(
143 struct xfs_buf *bp)
144 {
145 struct xfs_mount *mp = bp->b_mount;
146 struct xfs_da_intnode *hdr = bp->b_addr;
147 struct xfs_da3_icnode_hdr ichdr;
148 const struct xfs_dir_ops *ops;
149 xfs_failaddr_t fa;
150
151 ops = xfs_dir_get_ops(mp, NULL);
152
153 ops->node_hdr_from_disk(&ichdr, hdr);
154
155 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
156 if (fa)
157 return fa;
158
159 if (ichdr.level == 0)
160 return __this_address;
161 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
162 return __this_address;
163 if (ichdr.count == 0)
164 return __this_address;
165
166 /*
167 * we don't know if the node is for and attribute or directory tree,
168 * so only fail if the count is outside both bounds
169 */
170 if (ichdr.count > mp->m_dir_geo->node_ents &&
171 ichdr.count > mp->m_attr_geo->node_ents)
172 return __this_address;
173
174 /* XXX: hash order check? */
175
176 return NULL;
177 }
178
179 static void
180 xfs_da3_node_write_verify(
181 struct xfs_buf *bp)
182 {
183 struct xfs_mount *mp = bp->b_mount;
184 struct xfs_buf_log_item *bip = bp->b_log_item;
185 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
186 xfs_failaddr_t fa;
187
188 fa = xfs_da3_node_verify(bp);
189 if (fa) {
190 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
191 return;
192 }
193
194 if (!xfs_sb_version_hascrc(&mp->m_sb))
195 return;
196
197 if (bip)
198 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
199
200 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
201 }
202
203 /*
204 * leaf/node format detection on trees is sketchy, so a node read can be done on
205 * leaf level blocks when detection identifies the tree as a node format tree
206 * incorrectly. In this case, we need to swap the verifier to match the correct
207 * format of the block being read.
208 */
209 static void
210 xfs_da3_node_read_verify(
211 struct xfs_buf *bp)
212 {
213 struct xfs_da_blkinfo *info = bp->b_addr;
214 xfs_failaddr_t fa;
215
216 switch (be16_to_cpu(info->magic)) {
217 case XFS_DA3_NODE_MAGIC:
218 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
219 xfs_verifier_error(bp, -EFSBADCRC,
220 __this_address);
221 break;
222 }
223 /* fall through */
224 case XFS_DA_NODE_MAGIC:
225 fa = xfs_da3_node_verify(bp);
226 if (fa)
227 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
228 return;
229 case XFS_ATTR_LEAF_MAGIC:
230 case XFS_ATTR3_LEAF_MAGIC:
231 bp->b_ops = &xfs_attr3_leaf_buf_ops;
232 bp->b_ops->verify_read(bp);
233 return;
234 case XFS_DIR2_LEAFN_MAGIC:
235 case XFS_DIR3_LEAFN_MAGIC:
236 bp->b_ops = &xfs_dir3_leafn_buf_ops;
237 bp->b_ops->verify_read(bp);
238 return;
239 default:
240 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
241 break;
242 }
243 }
244
245 /* Verify the structure of a da3 block. */
246 static xfs_failaddr_t
247 xfs_da3_node_verify_struct(
248 struct xfs_buf *bp)
249 {
250 struct xfs_da_blkinfo *info = bp->b_addr;
251
252 switch (be16_to_cpu(info->magic)) {
253 case XFS_DA3_NODE_MAGIC:
254 case XFS_DA_NODE_MAGIC:
255 return xfs_da3_node_verify(bp);
256 case XFS_ATTR_LEAF_MAGIC:
257 case XFS_ATTR3_LEAF_MAGIC:
258 bp->b_ops = &xfs_attr3_leaf_buf_ops;
259 return bp->b_ops->verify_struct(bp);
260 case XFS_DIR2_LEAFN_MAGIC:
261 case XFS_DIR3_LEAFN_MAGIC:
262 bp->b_ops = &xfs_dir3_leafn_buf_ops;
263 return bp->b_ops->verify_struct(bp);
264 default:
265 return __this_address;
266 }
267 }
268
269 const struct xfs_buf_ops xfs_da3_node_buf_ops = {
270 .name = "xfs_da3_node",
271 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC),
272 cpu_to_be16(XFS_DA3_NODE_MAGIC) },
273 .verify_read = xfs_da3_node_read_verify,
274 .verify_write = xfs_da3_node_write_verify,
275 .verify_struct = xfs_da3_node_verify_struct,
276 };
277
278 int
279 xfs_da3_node_read(
280 struct xfs_trans *tp,
281 struct xfs_inode *dp,
282 xfs_dablk_t bno,
283 xfs_daddr_t mappedbno,
284 struct xfs_buf **bpp,
285 int which_fork)
286 {
287 int err;
288
289 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
290 which_fork, &xfs_da3_node_buf_ops);
291 if (!err && tp && *bpp) {
292 struct xfs_da_blkinfo *info = (*bpp)->b_addr;
293 int type;
294
295 switch (be16_to_cpu(info->magic)) {
296 case XFS_DA_NODE_MAGIC:
297 case XFS_DA3_NODE_MAGIC:
298 type = XFS_BLFT_DA_NODE_BUF;
299 break;
300 case XFS_ATTR_LEAF_MAGIC:
301 case XFS_ATTR3_LEAF_MAGIC:
302 type = XFS_BLFT_ATTR_LEAF_BUF;
303 break;
304 case XFS_DIR2_LEAFN_MAGIC:
305 case XFS_DIR3_LEAFN_MAGIC:
306 type = XFS_BLFT_DIR_LEAFN_BUF;
307 break;
308 default:
309 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
310 tp->t_mountp, info, sizeof(*info));
311 xfs_trans_brelse(tp, *bpp);
312 *bpp = NULL;
313 return -EFSCORRUPTED;
314 }
315 xfs_trans_buf_set_type(tp, *bpp, type);
316 }
317 return err;
318 }
319
320 /*========================================================================
321 * Routines used for growing the Btree.
322 *========================================================================*/
323
324 /*
325 * Create the initial contents of an intermediate node.
326 */
327 int
328 xfs_da3_node_create(
329 struct xfs_da_args *args,
330 xfs_dablk_t blkno,
331 int level,
332 struct xfs_buf **bpp,
333 int whichfork)
334 {
335 struct xfs_da_intnode *node;
336 struct xfs_trans *tp = args->trans;
337 struct xfs_mount *mp = tp->t_mountp;
338 struct xfs_da3_icnode_hdr ichdr = {0};
339 struct xfs_buf *bp;
340 int error;
341 struct xfs_inode *dp = args->dp;
342
343 trace_xfs_da_node_create(args);
344 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
345
346 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
347 if (error)
348 return error;
349 bp->b_ops = &xfs_da3_node_buf_ops;
350 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
351 node = bp->b_addr;
352
353 if (xfs_sb_version_hascrc(&mp->m_sb)) {
354 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
355
356 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
357 ichdr.magic = XFS_DA3_NODE_MAGIC;
358 hdr3->info.blkno = cpu_to_be64(bp->b_bn);
359 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
360 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
361 } else {
362 ichdr.magic = XFS_DA_NODE_MAGIC;
363 }
364 ichdr.level = level;
365
366 dp->d_ops->node_hdr_to_disk(node, &ichdr);
367 xfs_trans_log_buf(tp, bp,
368 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
369
370 *bpp = bp;
371 return 0;
372 }
373
374 /*
375 * Split a leaf node, rebalance, then possibly split
376 * intermediate nodes, rebalance, etc.
377 */
378 int /* error */
379 xfs_da3_split(
380 struct xfs_da_state *state)
381 {
382 struct xfs_da_state_blk *oldblk;
383 struct xfs_da_state_blk *newblk;
384 struct xfs_da_state_blk *addblk;
385 struct xfs_da_intnode *node;
386 int max;
387 int action = 0;
388 int error;
389 int i;
390
391 trace_xfs_da_split(state->args);
392
393 /*
394 * Walk back up the tree splitting/inserting/adjusting as necessary.
395 * If we need to insert and there isn't room, split the node, then
396 * decide which fragment to insert the new block from below into.
397 * Note that we may split the root this way, but we need more fixup.
398 */
399 max = state->path.active - 1;
400 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
401 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
402 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
403
404 addblk = &state->path.blk[max]; /* initial dummy value */
405 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
406 oldblk = &state->path.blk[i];
407 newblk = &state->altpath.blk[i];
408
409 /*
410 * If a leaf node then
411 * Allocate a new leaf node, then rebalance across them.
412 * else if an intermediate node then
413 * We split on the last layer, must we split the node?
414 */
415 switch (oldblk->magic) {
416 case XFS_ATTR_LEAF_MAGIC:
417 error = xfs_attr3_leaf_split(state, oldblk, newblk);
418 if ((error != 0) && (error != -ENOSPC)) {
419 return error; /* GROT: attr is inconsistent */
420 }
421 if (!error) {
422 addblk = newblk;
423 break;
424 }
425 /*
426 * Entry wouldn't fit, split the leaf again. The new
427 * extrablk will be consumed by xfs_da3_node_split if
428 * the node is split.
429 */
430 state->extravalid = 1;
431 if (state->inleaf) {
432 state->extraafter = 0; /* before newblk */
433 trace_xfs_attr_leaf_split_before(state->args);
434 error = xfs_attr3_leaf_split(state, oldblk,
435 &state->extrablk);
436 } else {
437 state->extraafter = 1; /* after newblk */
438 trace_xfs_attr_leaf_split_after(state->args);
439 error = xfs_attr3_leaf_split(state, newblk,
440 &state->extrablk);
441 }
442 if (error)
443 return error; /* GROT: attr inconsistent */
444 addblk = newblk;
445 break;
446 case XFS_DIR2_LEAFN_MAGIC:
447 error = xfs_dir2_leafn_split(state, oldblk, newblk);
448 if (error)
449 return error;
450 addblk = newblk;
451 break;
452 case XFS_DA_NODE_MAGIC:
453 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
454 max - i, &action);
455 addblk->bp = NULL;
456 if (error)
457 return error; /* GROT: dir is inconsistent */
458 /*
459 * Record the newly split block for the next time thru?
460 */
461 if (action)
462 addblk = newblk;
463 else
464 addblk = NULL;
465 break;
466 }
467
468 /*
469 * Update the btree to show the new hashval for this child.
470 */
471 xfs_da3_fixhashpath(state, &state->path);
472 }
473 if (!addblk)
474 return 0;
475
476 /*
477 * xfs_da3_node_split() should have consumed any extra blocks we added
478 * during a double leaf split in the attr fork. This is guaranteed as
479 * we can't be here if the attr fork only has a single leaf block.
480 */
481 ASSERT(state->extravalid == 0 ||
482 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
483
484 /*
485 * Split the root node.
486 */
487 ASSERT(state->path.active == 0);
488 oldblk = &state->path.blk[0];
489 error = xfs_da3_root_split(state, oldblk, addblk);
490 if (error) {
491 addblk->bp = NULL;
492 return error; /* GROT: dir is inconsistent */
493 }
494
495 /*
496 * Update pointers to the node which used to be block 0 and just got
497 * bumped because of the addition of a new root node. Note that the
498 * original block 0 could be at any position in the list of blocks in
499 * the tree.
500 *
501 * Note: the magic numbers and sibling pointers are in the same physical
502 * place for both v2 and v3 headers (by design). Hence it doesn't matter
503 * which version of the xfs_da_intnode structure we use here as the
504 * result will be the same using either structure.
505 */
506 node = oldblk->bp->b_addr;
507 if (node->hdr.info.forw) {
508 ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno);
509 node = addblk->bp->b_addr;
510 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
511 xfs_trans_log_buf(state->args->trans, addblk->bp,
512 XFS_DA_LOGRANGE(node, &node->hdr.info,
513 sizeof(node->hdr.info)));
514 }
515 node = oldblk->bp->b_addr;
516 if (node->hdr.info.back) {
517 ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno);
518 node = addblk->bp->b_addr;
519 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
520 xfs_trans_log_buf(state->args->trans, addblk->bp,
521 XFS_DA_LOGRANGE(node, &node->hdr.info,
522 sizeof(node->hdr.info)));
523 }
524 addblk->bp = NULL;
525 return 0;
526 }
527
528 /*
529 * Split the root. We have to create a new root and point to the two
530 * parts (the split old root) that we just created. Copy block zero to
531 * the EOF, extending the inode in process.
532 */
533 STATIC int /* error */
534 xfs_da3_root_split(
535 struct xfs_da_state *state,
536 struct xfs_da_state_blk *blk1,
537 struct xfs_da_state_blk *blk2)
538 {
539 struct xfs_da_intnode *node;
540 struct xfs_da_intnode *oldroot;
541 struct xfs_da_node_entry *btree;
542 struct xfs_da3_icnode_hdr nodehdr;
543 struct xfs_da_args *args;
544 struct xfs_buf *bp;
545 struct xfs_inode *dp;
546 struct xfs_trans *tp;
547 struct xfs_dir2_leaf *leaf;
548 xfs_dablk_t blkno;
549 int level;
550 int error;
551 int size;
552
553 trace_xfs_da_root_split(state->args);
554
555 /*
556 * Copy the existing (incorrect) block from the root node position
557 * to a free space somewhere.
558 */
559 args = state->args;
560 error = xfs_da_grow_inode(args, &blkno);
561 if (error)
562 return error;
563
564 dp = args->dp;
565 tp = args->trans;
566 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
567 if (error)
568 return error;
569 node = bp->b_addr;
570 oldroot = blk1->bp->b_addr;
571 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
572 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
573 struct xfs_da3_icnode_hdr icnodehdr;
574
575 dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot);
576 btree = dp->d_ops->node_tree_p(oldroot);
577 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
578 level = icnodehdr.level;
579
580 /*
581 * we are about to copy oldroot to bp, so set up the type
582 * of bp while we know exactly what it will be.
583 */
584 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
585 } else {
586 struct xfs_dir3_icleaf_hdr leafhdr;
587 struct xfs_dir2_leaf_entry *ents;
588
589 leaf = (xfs_dir2_leaf_t *)oldroot;
590 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
591 ents = dp->d_ops->leaf_ents_p(leaf);
592
593 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
594 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
595 size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
596 level = 0;
597
598 /*
599 * we are about to copy oldroot to bp, so set up the type
600 * of bp while we know exactly what it will be.
601 */
602 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
603 }
604
605 /*
606 * we can copy most of the information in the node from one block to
607 * another, but for CRC enabled headers we have to make sure that the
608 * block specific identifiers are kept intact. We update the buffer
609 * directly for this.
610 */
611 memcpy(node, oldroot, size);
612 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
613 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
614 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
615
616 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
617 }
618 xfs_trans_log_buf(tp, bp, 0, size - 1);
619
620 bp->b_ops = blk1->bp->b_ops;
621 xfs_trans_buf_copy_type(bp, blk1->bp);
622 blk1->bp = bp;
623 blk1->blkno = blkno;
624
625 /*
626 * Set up the new root node.
627 */
628 error = xfs_da3_node_create(args,
629 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
630 level + 1, &bp, args->whichfork);
631 if (error)
632 return error;
633
634 node = bp->b_addr;
635 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
636 btree = dp->d_ops->node_tree_p(node);
637 btree[0].hashval = cpu_to_be32(blk1->hashval);
638 btree[0].before = cpu_to_be32(blk1->blkno);
639 btree[1].hashval = cpu_to_be32(blk2->hashval);
640 btree[1].before = cpu_to_be32(blk2->blkno);
641 nodehdr.count = 2;
642 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
643
644 #ifdef DEBUG
645 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
646 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
647 ASSERT(blk1->blkno >= args->geo->leafblk &&
648 blk1->blkno < args->geo->freeblk);
649 ASSERT(blk2->blkno >= args->geo->leafblk &&
650 blk2->blkno < args->geo->freeblk);
651 }
652 #endif
653
654 /* Header is already logged by xfs_da_node_create */
655 xfs_trans_log_buf(tp, bp,
656 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
657
658 return 0;
659 }
660
661 /*
662 * Split the node, rebalance, then add the new entry.
663 */
664 STATIC int /* error */
665 xfs_da3_node_split(
666 struct xfs_da_state *state,
667 struct xfs_da_state_blk *oldblk,
668 struct xfs_da_state_blk *newblk,
669 struct xfs_da_state_blk *addblk,
670 int treelevel,
671 int *result)
672 {
673 struct xfs_da_intnode *node;
674 struct xfs_da3_icnode_hdr nodehdr;
675 xfs_dablk_t blkno;
676 int newcount;
677 int error;
678 int useextra;
679 struct xfs_inode *dp = state->args->dp;
680
681 trace_xfs_da_node_split(state->args);
682
683 node = oldblk->bp->b_addr;
684 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
685
686 /*
687 * With V2 dirs the extra block is data or freespace.
688 */
689 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
690 newcount = 1 + useextra;
691 /*
692 * Do we have to split the node?
693 */
694 if (nodehdr.count + newcount > state->args->geo->node_ents) {
695 /*
696 * Allocate a new node, add to the doubly linked chain of
697 * nodes, then move some of our excess entries into it.
698 */
699 error = xfs_da_grow_inode(state->args, &blkno);
700 if (error)
701 return error; /* GROT: dir is inconsistent */
702
703 error = xfs_da3_node_create(state->args, blkno, treelevel,
704 &newblk->bp, state->args->whichfork);
705 if (error)
706 return error; /* GROT: dir is inconsistent */
707 newblk->blkno = blkno;
708 newblk->magic = XFS_DA_NODE_MAGIC;
709 xfs_da3_node_rebalance(state, oldblk, newblk);
710 error = xfs_da3_blk_link(state, oldblk, newblk);
711 if (error)
712 return error;
713 *result = 1;
714 } else {
715 *result = 0;
716 }
717
718 /*
719 * Insert the new entry(s) into the correct block
720 * (updating last hashval in the process).
721 *
722 * xfs_da3_node_add() inserts BEFORE the given index,
723 * and as a result of using node_lookup_int() we always
724 * point to a valid entry (not after one), but a split
725 * operation always results in a new block whose hashvals
726 * FOLLOW the current block.
727 *
728 * If we had double-split op below us, then add the extra block too.
729 */
730 node = oldblk->bp->b_addr;
731 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
732 if (oldblk->index <= nodehdr.count) {
733 oldblk->index++;
734 xfs_da3_node_add(state, oldblk, addblk);
735 if (useextra) {
736 if (state->extraafter)
737 oldblk->index++;
738 xfs_da3_node_add(state, oldblk, &state->extrablk);
739 state->extravalid = 0;
740 }
741 } else {
742 newblk->index++;
743 xfs_da3_node_add(state, newblk, addblk);
744 if (useextra) {
745 if (state->extraafter)
746 newblk->index++;
747 xfs_da3_node_add(state, newblk, &state->extrablk);
748 state->extravalid = 0;
749 }
750 }
751
752 return 0;
753 }
754
755 /*
756 * Balance the btree elements between two intermediate nodes,
757 * usually one full and one empty.
758 *
759 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
760 */
761 STATIC void
762 xfs_da3_node_rebalance(
763 struct xfs_da_state *state,
764 struct xfs_da_state_blk *blk1,
765 struct xfs_da_state_blk *blk2)
766 {
767 struct xfs_da_intnode *node1;
768 struct xfs_da_intnode *node2;
769 struct xfs_da_intnode *tmpnode;
770 struct xfs_da_node_entry *btree1;
771 struct xfs_da_node_entry *btree2;
772 struct xfs_da_node_entry *btree_s;
773 struct xfs_da_node_entry *btree_d;
774 struct xfs_da3_icnode_hdr nodehdr1;
775 struct xfs_da3_icnode_hdr nodehdr2;
776 struct xfs_trans *tp;
777 int count;
778 int tmp;
779 int swap = 0;
780 struct xfs_inode *dp = state->args->dp;
781
782 trace_xfs_da_node_rebalance(state->args);
783
784 node1 = blk1->bp->b_addr;
785 node2 = blk2->bp->b_addr;
786 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
787 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
788 btree1 = dp->d_ops->node_tree_p(node1);
789 btree2 = dp->d_ops->node_tree_p(node2);
790
791 /*
792 * Figure out how many entries need to move, and in which direction.
793 * Swap the nodes around if that makes it simpler.
794 */
795 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
796 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
797 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
798 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
799 tmpnode = node1;
800 node1 = node2;
801 node2 = tmpnode;
802 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
803 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
804 btree1 = dp->d_ops->node_tree_p(node1);
805 btree2 = dp->d_ops->node_tree_p(node2);
806 swap = 1;
807 }
808
809 count = (nodehdr1.count - nodehdr2.count) / 2;
810 if (count == 0)
811 return;
812 tp = state->args->trans;
813 /*
814 * Two cases: high-to-low and low-to-high.
815 */
816 if (count > 0) {
817 /*
818 * Move elements in node2 up to make a hole.
819 */
820 tmp = nodehdr2.count;
821 if (tmp > 0) {
822 tmp *= (uint)sizeof(xfs_da_node_entry_t);
823 btree_s = &btree2[0];
824 btree_d = &btree2[count];
825 memmove(btree_d, btree_s, tmp);
826 }
827
828 /*
829 * Move the req'd B-tree elements from high in node1 to
830 * low in node2.
831 */
832 nodehdr2.count += count;
833 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
834 btree_s = &btree1[nodehdr1.count - count];
835 btree_d = &btree2[0];
836 memcpy(btree_d, btree_s, tmp);
837 nodehdr1.count -= count;
838 } else {
839 /*
840 * Move the req'd B-tree elements from low in node2 to
841 * high in node1.
842 */
843 count = -count;
844 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
845 btree_s = &btree2[0];
846 btree_d = &btree1[nodehdr1.count];
847 memcpy(btree_d, btree_s, tmp);
848 nodehdr1.count += count;
849
850 xfs_trans_log_buf(tp, blk1->bp,
851 XFS_DA_LOGRANGE(node1, btree_d, tmp));
852
853 /*
854 * Move elements in node2 down to fill the hole.
855 */
856 tmp = nodehdr2.count - count;
857 tmp *= (uint)sizeof(xfs_da_node_entry_t);
858 btree_s = &btree2[count];
859 btree_d = &btree2[0];
860 memmove(btree_d, btree_s, tmp);
861 nodehdr2.count -= count;
862 }
863
864 /*
865 * Log header of node 1 and all current bits of node 2.
866 */
867 dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
868 xfs_trans_log_buf(tp, blk1->bp,
869 XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
870
871 dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
872 xfs_trans_log_buf(tp, blk2->bp,
873 XFS_DA_LOGRANGE(node2, &node2->hdr,
874 dp->d_ops->node_hdr_size +
875 (sizeof(btree2[0]) * nodehdr2.count)));
876
877 /*
878 * Record the last hashval from each block for upward propagation.
879 * (note: don't use the swapped node pointers)
880 */
881 if (swap) {
882 node1 = blk1->bp->b_addr;
883 node2 = blk2->bp->b_addr;
884 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
885 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
886 btree1 = dp->d_ops->node_tree_p(node1);
887 btree2 = dp->d_ops->node_tree_p(node2);
888 }
889 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
890 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
891
892 /*
893 * Adjust the expected index for insertion.
894 */
895 if (blk1->index >= nodehdr1.count) {
896 blk2->index = blk1->index - nodehdr1.count;
897 blk1->index = nodehdr1.count + 1; /* make it invalid */
898 }
899 }
900
901 /*
902 * Add a new entry to an intermediate node.
903 */
904 STATIC void
905 xfs_da3_node_add(
906 struct xfs_da_state *state,
907 struct xfs_da_state_blk *oldblk,
908 struct xfs_da_state_blk *newblk)
909 {
910 struct xfs_da_intnode *node;
911 struct xfs_da3_icnode_hdr nodehdr;
912 struct xfs_da_node_entry *btree;
913 int tmp;
914 struct xfs_inode *dp = state->args->dp;
915
916 trace_xfs_da_node_add(state->args);
917
918 node = oldblk->bp->b_addr;
919 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
920 btree = dp->d_ops->node_tree_p(node);
921
922 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
923 ASSERT(newblk->blkno != 0);
924 if (state->args->whichfork == XFS_DATA_FORK)
925 ASSERT(newblk->blkno >= state->args->geo->leafblk &&
926 newblk->blkno < state->args->geo->freeblk);
927
928 /*
929 * We may need to make some room before we insert the new node.
930 */
931 tmp = 0;
932 if (oldblk->index < nodehdr.count) {
933 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
934 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
935 }
936 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
937 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
938 xfs_trans_log_buf(state->args->trans, oldblk->bp,
939 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
940 tmp + sizeof(*btree)));
941
942 nodehdr.count += 1;
943 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
944 xfs_trans_log_buf(state->args->trans, oldblk->bp,
945 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
946
947 /*
948 * Copy the last hash value from the oldblk to propagate upwards.
949 */
950 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
951 }
952
953 /*========================================================================
954 * Routines used for shrinking the Btree.
955 *========================================================================*/
956
957 /*
958 * Deallocate an empty leaf node, remove it from its parent,
959 * possibly deallocating that block, etc...
960 */
961 int
962 xfs_da3_join(
963 struct xfs_da_state *state)
964 {
965 struct xfs_da_state_blk *drop_blk;
966 struct xfs_da_state_blk *save_blk;
967 int action = 0;
968 int error;
969
970 trace_xfs_da_join(state->args);
971
972 drop_blk = &state->path.blk[ state->path.active-1 ];
973 save_blk = &state->altpath.blk[ state->path.active-1 ];
974 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
975 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
976 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
977
978 /*
979 * Walk back up the tree joining/deallocating as necessary.
980 * When we stop dropping blocks, break out.
981 */
982 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
983 state->path.active--) {
984 /*
985 * See if we can combine the block with a neighbor.
986 * (action == 0) => no options, just leave
987 * (action == 1) => coalesce, then unlink
988 * (action == 2) => block empty, unlink it
989 */
990 switch (drop_blk->magic) {
991 case XFS_ATTR_LEAF_MAGIC:
992 error = xfs_attr3_leaf_toosmall(state, &action);
993 if (error)
994 return error;
995 if (action == 0)
996 return 0;
997 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
998 break;
999 case XFS_DIR2_LEAFN_MAGIC:
1000 error = xfs_dir2_leafn_toosmall(state, &action);
1001 if (error)
1002 return error;
1003 if (action == 0)
1004 return 0;
1005 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
1006 break;
1007 case XFS_DA_NODE_MAGIC:
1008 /*
1009 * Remove the offending node, fixup hashvals,
1010 * check for a toosmall neighbor.
1011 */
1012 xfs_da3_node_remove(state, drop_blk);
1013 xfs_da3_fixhashpath(state, &state->path);
1014 error = xfs_da3_node_toosmall(state, &action);
1015 if (error)
1016 return error;
1017 if (action == 0)
1018 return 0;
1019 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1020 break;
1021 }
1022 xfs_da3_fixhashpath(state, &state->altpath);
1023 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1024 xfs_da_state_kill_altpath(state);
1025 if (error)
1026 return error;
1027 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1028 drop_blk->bp);
1029 drop_blk->bp = NULL;
1030 if (error)
1031 return error;
1032 }
1033 /*
1034 * We joined all the way to the top. If it turns out that
1035 * we only have one entry in the root, make the child block
1036 * the new root.
1037 */
1038 xfs_da3_node_remove(state, drop_blk);
1039 xfs_da3_fixhashpath(state, &state->path);
1040 error = xfs_da3_root_join(state, &state->path.blk[0]);
1041 return error;
1042 }
1043
1044 #ifdef DEBUG
1045 static void
1046 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1047 {
1048 __be16 magic = blkinfo->magic;
1049
1050 if (level == 1) {
1051 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1052 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1053 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1054 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1055 } else {
1056 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1057 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1058 }
1059 ASSERT(!blkinfo->forw);
1060 ASSERT(!blkinfo->back);
1061 }
1062 #else /* !DEBUG */
1063 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1064 #endif /* !DEBUG */
1065
1066 /*
1067 * We have only one entry in the root. Copy the only remaining child of
1068 * the old root to block 0 as the new root node.
1069 */
1070 STATIC int
1071 xfs_da3_root_join(
1072 struct xfs_da_state *state,
1073 struct xfs_da_state_blk *root_blk)
1074 {
1075 struct xfs_da_intnode *oldroot;
1076 struct xfs_da_args *args;
1077 xfs_dablk_t child;
1078 struct xfs_buf *bp;
1079 struct xfs_da3_icnode_hdr oldroothdr;
1080 struct xfs_da_node_entry *btree;
1081 int error;
1082 struct xfs_inode *dp = state->args->dp;
1083
1084 trace_xfs_da_root_join(state->args);
1085
1086 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1087
1088 args = state->args;
1089 oldroot = root_blk->bp->b_addr;
1090 dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
1091 ASSERT(oldroothdr.forw == 0);
1092 ASSERT(oldroothdr.back == 0);
1093
1094 /*
1095 * If the root has more than one child, then don't do anything.
1096 */
1097 if (oldroothdr.count > 1)
1098 return 0;
1099
1100 /*
1101 * Read in the (only) child block, then copy those bytes into
1102 * the root block's buffer and free the original child block.
1103 */
1104 btree = dp->d_ops->node_tree_p(oldroot);
1105 child = be32_to_cpu(btree[0].before);
1106 ASSERT(child != 0);
1107 error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
1108 args->whichfork);
1109 if (error)
1110 return error;
1111 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1112
1113 /*
1114 * This could be copying a leaf back into the root block in the case of
1115 * there only being a single leaf block left in the tree. Hence we have
1116 * to update the b_ops pointer as well to match the buffer type change
1117 * that could occur. For dir3 blocks we also need to update the block
1118 * number in the buffer header.
1119 */
1120 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
1121 root_blk->bp->b_ops = bp->b_ops;
1122 xfs_trans_buf_copy_type(root_blk->bp, bp);
1123 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1124 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1125 da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1126 }
1127 xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1128 args->geo->blksize - 1);
1129 error = xfs_da_shrink_inode(args, child, bp);
1130 return error;
1131 }
1132
1133 /*
1134 * Check a node block and its neighbors to see if the block should be
1135 * collapsed into one or the other neighbor. Always keep the block
1136 * with the smaller block number.
1137 * If the current block is over 50% full, don't try to join it, return 0.
1138 * If the block is empty, fill in the state structure and return 2.
1139 * If it can be collapsed, fill in the state structure and return 1.
1140 * If nothing can be done, return 0.
1141 */
1142 STATIC int
1143 xfs_da3_node_toosmall(
1144 struct xfs_da_state *state,
1145 int *action)
1146 {
1147 struct xfs_da_intnode *node;
1148 struct xfs_da_state_blk *blk;
1149 struct xfs_da_blkinfo *info;
1150 xfs_dablk_t blkno;
1151 struct xfs_buf *bp;
1152 struct xfs_da3_icnode_hdr nodehdr;
1153 int count;
1154 int forward;
1155 int error;
1156 int retval;
1157 int i;
1158 struct xfs_inode *dp = state->args->dp;
1159
1160 trace_xfs_da_node_toosmall(state->args);
1161
1162 /*
1163 * Check for the degenerate case of the block being over 50% full.
1164 * If so, it's not worth even looking to see if we might be able
1165 * to coalesce with a sibling.
1166 */
1167 blk = &state->path.blk[ state->path.active-1 ];
1168 info = blk->bp->b_addr;
1169 node = (xfs_da_intnode_t *)info;
1170 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1171 if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1172 *action = 0; /* blk over 50%, don't try to join */
1173 return 0; /* blk over 50%, don't try to join */
1174 }
1175
1176 /*
1177 * Check for the degenerate case of the block being empty.
1178 * If the block is empty, we'll simply delete it, no need to
1179 * coalesce it with a sibling block. We choose (arbitrarily)
1180 * to merge with the forward block unless it is NULL.
1181 */
1182 if (nodehdr.count == 0) {
1183 /*
1184 * Make altpath point to the block we want to keep and
1185 * path point to the block we want to drop (this one).
1186 */
1187 forward = (info->forw != 0);
1188 memcpy(&state->altpath, &state->path, sizeof(state->path));
1189 error = xfs_da3_path_shift(state, &state->altpath, forward,
1190 0, &retval);
1191 if (error)
1192 return error;
1193 if (retval) {
1194 *action = 0;
1195 } else {
1196 *action = 2;
1197 }
1198 return 0;
1199 }
1200
1201 /*
1202 * Examine each sibling block to see if we can coalesce with
1203 * at least 25% free space to spare. We need to figure out
1204 * whether to merge with the forward or the backward block.
1205 * We prefer coalescing with the lower numbered sibling so as
1206 * to shrink a directory over time.
1207 */
1208 count = state->args->geo->node_ents;
1209 count -= state->args->geo->node_ents >> 2;
1210 count -= nodehdr.count;
1211
1212 /* start with smaller blk num */
1213 forward = nodehdr.forw < nodehdr.back;
1214 for (i = 0; i < 2; forward = !forward, i++) {
1215 struct xfs_da3_icnode_hdr thdr;
1216 if (forward)
1217 blkno = nodehdr.forw;
1218 else
1219 blkno = nodehdr.back;
1220 if (blkno == 0)
1221 continue;
1222 error = xfs_da3_node_read(state->args->trans, dp,
1223 blkno, -1, &bp, state->args->whichfork);
1224 if (error)
1225 return error;
1226
1227 node = bp->b_addr;
1228 dp->d_ops->node_hdr_from_disk(&thdr, node);
1229 xfs_trans_brelse(state->args->trans, bp);
1230
1231 if (count - thdr.count >= 0)
1232 break; /* fits with at least 25% to spare */
1233 }
1234 if (i >= 2) {
1235 *action = 0;
1236 return 0;
1237 }
1238
1239 /*
1240 * Make altpath point to the block we want to keep (the lower
1241 * numbered block) and path point to the block we want to drop.
1242 */
1243 memcpy(&state->altpath, &state->path, sizeof(state->path));
1244 if (blkno < blk->blkno) {
1245 error = xfs_da3_path_shift(state, &state->altpath, forward,
1246 0, &retval);
1247 } else {
1248 error = xfs_da3_path_shift(state, &state->path, forward,
1249 0, &retval);
1250 }
1251 if (error)
1252 return error;
1253 if (retval) {
1254 *action = 0;
1255 return 0;
1256 }
1257 *action = 1;
1258 return 0;
1259 }
1260
1261 /*
1262 * Pick up the last hashvalue from an intermediate node.
1263 */
1264 STATIC uint
1265 xfs_da3_node_lasthash(
1266 struct xfs_inode *dp,
1267 struct xfs_buf *bp,
1268 int *count)
1269 {
1270 struct xfs_da_intnode *node;
1271 struct xfs_da_node_entry *btree;
1272 struct xfs_da3_icnode_hdr nodehdr;
1273
1274 node = bp->b_addr;
1275 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1276 if (count)
1277 *count = nodehdr.count;
1278 if (!nodehdr.count)
1279 return 0;
1280 btree = dp->d_ops->node_tree_p(node);
1281 return be32_to_cpu(btree[nodehdr.count - 1].hashval);
1282 }
1283
1284 /*
1285 * Walk back up the tree adjusting hash values as necessary,
1286 * when we stop making changes, return.
1287 */
1288 void
1289 xfs_da3_fixhashpath(
1290 struct xfs_da_state *state,
1291 struct xfs_da_state_path *path)
1292 {
1293 struct xfs_da_state_blk *blk;
1294 struct xfs_da_intnode *node;
1295 struct xfs_da_node_entry *btree;
1296 xfs_dahash_t lasthash=0;
1297 int level;
1298 int count;
1299 struct xfs_inode *dp = state->args->dp;
1300
1301 trace_xfs_da_fixhashpath(state->args);
1302
1303 level = path->active-1;
1304 blk = &path->blk[ level ];
1305 switch (blk->magic) {
1306 case XFS_ATTR_LEAF_MAGIC:
1307 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1308 if (count == 0)
1309 return;
1310 break;
1311 case XFS_DIR2_LEAFN_MAGIC:
1312 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count);
1313 if (count == 0)
1314 return;
1315 break;
1316 case XFS_DA_NODE_MAGIC:
1317 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1318 if (count == 0)
1319 return;
1320 break;
1321 }
1322 for (blk--, level--; level >= 0; blk--, level--) {
1323 struct xfs_da3_icnode_hdr nodehdr;
1324
1325 node = blk->bp->b_addr;
1326 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1327 btree = dp->d_ops->node_tree_p(node);
1328 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1329 break;
1330 blk->hashval = lasthash;
1331 btree[blk->index].hashval = cpu_to_be32(lasthash);
1332 xfs_trans_log_buf(state->args->trans, blk->bp,
1333 XFS_DA_LOGRANGE(node, &btree[blk->index],
1334 sizeof(*btree)));
1335
1336 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1337 }
1338 }
1339
1340 /*
1341 * Remove an entry from an intermediate node.
1342 */
1343 STATIC void
1344 xfs_da3_node_remove(
1345 struct xfs_da_state *state,
1346 struct xfs_da_state_blk *drop_blk)
1347 {
1348 struct xfs_da_intnode *node;
1349 struct xfs_da3_icnode_hdr nodehdr;
1350 struct xfs_da_node_entry *btree;
1351 int index;
1352 int tmp;
1353 struct xfs_inode *dp = state->args->dp;
1354
1355 trace_xfs_da_node_remove(state->args);
1356
1357 node = drop_blk->bp->b_addr;
1358 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1359 ASSERT(drop_blk->index < nodehdr.count);
1360 ASSERT(drop_blk->index >= 0);
1361
1362 /*
1363 * Copy over the offending entry, or just zero it out.
1364 */
1365 index = drop_blk->index;
1366 btree = dp->d_ops->node_tree_p(node);
1367 if (index < nodehdr.count - 1) {
1368 tmp = nodehdr.count - index - 1;
1369 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1370 memmove(&btree[index], &btree[index + 1], tmp);
1371 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1372 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1373 index = nodehdr.count - 1;
1374 }
1375 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1376 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1377 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1378 nodehdr.count -= 1;
1379 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
1380 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1381 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
1382
1383 /*
1384 * Copy the last hash value from the block to propagate upwards.
1385 */
1386 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1387 }
1388
1389 /*
1390 * Unbalance the elements between two intermediate nodes,
1391 * move all Btree elements from one node into another.
1392 */
1393 STATIC void
1394 xfs_da3_node_unbalance(
1395 struct xfs_da_state *state,
1396 struct xfs_da_state_blk *drop_blk,
1397 struct xfs_da_state_blk *save_blk)
1398 {
1399 struct xfs_da_intnode *drop_node;
1400 struct xfs_da_intnode *save_node;
1401 struct xfs_da_node_entry *drop_btree;
1402 struct xfs_da_node_entry *save_btree;
1403 struct xfs_da3_icnode_hdr drop_hdr;
1404 struct xfs_da3_icnode_hdr save_hdr;
1405 struct xfs_trans *tp;
1406 int sindex;
1407 int tmp;
1408 struct xfs_inode *dp = state->args->dp;
1409
1410 trace_xfs_da_node_unbalance(state->args);
1411
1412 drop_node = drop_blk->bp->b_addr;
1413 save_node = save_blk->bp->b_addr;
1414 dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
1415 dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
1416 drop_btree = dp->d_ops->node_tree_p(drop_node);
1417 save_btree = dp->d_ops->node_tree_p(save_node);
1418 tp = state->args->trans;
1419
1420 /*
1421 * If the dying block has lower hashvals, then move all the
1422 * elements in the remaining block up to make a hole.
1423 */
1424 if ((be32_to_cpu(drop_btree[0].hashval) <
1425 be32_to_cpu(save_btree[0].hashval)) ||
1426 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1427 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1428 /* XXX: check this - is memmove dst correct? */
1429 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1430 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1431
1432 sindex = 0;
1433 xfs_trans_log_buf(tp, save_blk->bp,
1434 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1435 (save_hdr.count + drop_hdr.count) *
1436 sizeof(xfs_da_node_entry_t)));
1437 } else {
1438 sindex = save_hdr.count;
1439 xfs_trans_log_buf(tp, save_blk->bp,
1440 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1441 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1442 }
1443
1444 /*
1445 * Move all the B-tree elements from drop_blk to save_blk.
1446 */
1447 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1448 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1449 save_hdr.count += drop_hdr.count;
1450
1451 dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
1452 xfs_trans_log_buf(tp, save_blk->bp,
1453 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1454 dp->d_ops->node_hdr_size));
1455
1456 /*
1457 * Save the last hashval in the remaining block for upward propagation.
1458 */
1459 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1460 }
1461
1462 /*========================================================================
1463 * Routines used for finding things in the Btree.
1464 *========================================================================*/
1465
1466 /*
1467 * Walk down the Btree looking for a particular filename, filling
1468 * in the state structure as we go.
1469 *
1470 * We will set the state structure to point to each of the elements
1471 * in each of the nodes where either the hashval is or should be.
1472 *
1473 * We support duplicate hashval's so for each entry in the current
1474 * node that could contain the desired hashval, descend. This is a
1475 * pruned depth-first tree search.
1476 */
1477 int /* error */
1478 xfs_da3_node_lookup_int(
1479 struct xfs_da_state *state,
1480 int *result)
1481 {
1482 struct xfs_da_state_blk *blk;
1483 struct xfs_da_blkinfo *curr;
1484 struct xfs_da_intnode *node;
1485 struct xfs_da_node_entry *btree;
1486 struct xfs_da3_icnode_hdr nodehdr;
1487 struct xfs_da_args *args;
1488 xfs_dablk_t blkno;
1489 xfs_dahash_t hashval;
1490 xfs_dahash_t btreehashval;
1491 int probe;
1492 int span;
1493 int max;
1494 int error;
1495 int retval;
1496 unsigned int expected_level = 0;
1497 uint16_t magic;
1498 struct xfs_inode *dp = state->args->dp;
1499
1500 args = state->args;
1501
1502 /*
1503 * Descend thru the B-tree searching each level for the right
1504 * node to use, until the right hashval is found.
1505 */
1506 blkno = args->geo->leafblk;
1507 for (blk = &state->path.blk[0], state->path.active = 1;
1508 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1509 blk++, state->path.active++) {
1510 /*
1511 * Read the next node down in the tree.
1512 */
1513 blk->blkno = blkno;
1514 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1515 -1, &blk->bp, args->whichfork);
1516 if (error) {
1517 blk->blkno = 0;
1518 state->path.active--;
1519 return error;
1520 }
1521 curr = blk->bp->b_addr;
1522 magic = be16_to_cpu(curr->magic);
1523
1524 if (magic == XFS_ATTR_LEAF_MAGIC ||
1525 magic == XFS_ATTR3_LEAF_MAGIC) {
1526 blk->magic = XFS_ATTR_LEAF_MAGIC;
1527 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1528 break;
1529 }
1530
1531 if (magic == XFS_DIR2_LEAFN_MAGIC ||
1532 magic == XFS_DIR3_LEAFN_MAGIC) {
1533 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1534 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
1535 blk->bp, NULL);
1536 break;
1537 }
1538
1539 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC)
1540 return -EFSCORRUPTED;
1541
1542 blk->magic = XFS_DA_NODE_MAGIC;
1543
1544 /*
1545 * Search an intermediate node for a match.
1546 */
1547 node = blk->bp->b_addr;
1548 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1549 btree = dp->d_ops->node_tree_p(node);
1550
1551 /* Tree taller than we can handle; bail out! */
1552 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
1553 return -EFSCORRUPTED;
1554
1555 /* Check the level from the root. */
1556 if (blkno == args->geo->leafblk)
1557 expected_level = nodehdr.level - 1;
1558 else if (expected_level != nodehdr.level)
1559 return -EFSCORRUPTED;
1560 else
1561 expected_level--;
1562
1563 max = nodehdr.count;
1564 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1565
1566 /*
1567 * Binary search. (note: small blocks will skip loop)
1568 */
1569 probe = span = max / 2;
1570 hashval = args->hashval;
1571 while (span > 4) {
1572 span /= 2;
1573 btreehashval = be32_to_cpu(btree[probe].hashval);
1574 if (btreehashval < hashval)
1575 probe += span;
1576 else if (btreehashval > hashval)
1577 probe -= span;
1578 else
1579 break;
1580 }
1581 ASSERT((probe >= 0) && (probe < max));
1582 ASSERT((span <= 4) ||
1583 (be32_to_cpu(btree[probe].hashval) == hashval));
1584
1585 /*
1586 * Since we may have duplicate hashval's, find the first
1587 * matching hashval in the node.
1588 */
1589 while (probe > 0 &&
1590 be32_to_cpu(btree[probe].hashval) >= hashval) {
1591 probe--;
1592 }
1593 while (probe < max &&
1594 be32_to_cpu(btree[probe].hashval) < hashval) {
1595 probe++;
1596 }
1597
1598 /*
1599 * Pick the right block to descend on.
1600 */
1601 if (probe == max) {
1602 blk->index = max - 1;
1603 blkno = be32_to_cpu(btree[max - 1].before);
1604 } else {
1605 blk->index = probe;
1606 blkno = be32_to_cpu(btree[probe].before);
1607 }
1608
1609 /* We can't point back to the root. */
1610 if (blkno == args->geo->leafblk)
1611 return -EFSCORRUPTED;
1612 }
1613
1614 if (expected_level != 0)
1615 return -EFSCORRUPTED;
1616
1617 /*
1618 * A leaf block that ends in the hashval that we are interested in
1619 * (final hashval == search hashval) means that the next block may
1620 * contain more entries with the same hashval, shift upward to the
1621 * next leaf and keep searching.
1622 */
1623 for (;;) {
1624 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1625 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1626 &blk->index, state);
1627 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1628 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1629 blk->index = args->index;
1630 args->blkno = blk->blkno;
1631 } else {
1632 ASSERT(0);
1633 return -EFSCORRUPTED;
1634 }
1635 if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1636 (blk->hashval == args->hashval)) {
1637 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1638 &retval);
1639 if (error)
1640 return error;
1641 if (retval == 0) {
1642 continue;
1643 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1644 /* path_shift() gives ENOENT */
1645 retval = -ENOATTR;
1646 }
1647 }
1648 break;
1649 }
1650 *result = retval;
1651 return 0;
1652 }
1653
1654 /*========================================================================
1655 * Utility routines.
1656 *========================================================================*/
1657
1658 /*
1659 * Compare two intermediate nodes for "order".
1660 */
1661 STATIC int
1662 xfs_da3_node_order(
1663 struct xfs_inode *dp,
1664 struct xfs_buf *node1_bp,
1665 struct xfs_buf *node2_bp)
1666 {
1667 struct xfs_da_intnode *node1;
1668 struct xfs_da_intnode *node2;
1669 struct xfs_da_node_entry *btree1;
1670 struct xfs_da_node_entry *btree2;
1671 struct xfs_da3_icnode_hdr node1hdr;
1672 struct xfs_da3_icnode_hdr node2hdr;
1673
1674 node1 = node1_bp->b_addr;
1675 node2 = node2_bp->b_addr;
1676 dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
1677 dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
1678 btree1 = dp->d_ops->node_tree_p(node1);
1679 btree2 = dp->d_ops->node_tree_p(node2);
1680
1681 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1682 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1683 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1684 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1685 return 1;
1686 }
1687 return 0;
1688 }
1689
1690 /*
1691 * Link a new block into a doubly linked list of blocks (of whatever type).
1692 */
1693 int /* error */
1694 xfs_da3_blk_link(
1695 struct xfs_da_state *state,
1696 struct xfs_da_state_blk *old_blk,
1697 struct xfs_da_state_blk *new_blk)
1698 {
1699 struct xfs_da_blkinfo *old_info;
1700 struct xfs_da_blkinfo *new_info;
1701 struct xfs_da_blkinfo *tmp_info;
1702 struct xfs_da_args *args;
1703 struct xfs_buf *bp;
1704 int before = 0;
1705 int error;
1706 struct xfs_inode *dp = state->args->dp;
1707
1708 /*
1709 * Set up environment.
1710 */
1711 args = state->args;
1712 ASSERT(args != NULL);
1713 old_info = old_blk->bp->b_addr;
1714 new_info = new_blk->bp->b_addr;
1715 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1716 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1717 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1718
1719 switch (old_blk->magic) {
1720 case XFS_ATTR_LEAF_MAGIC:
1721 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1722 break;
1723 case XFS_DIR2_LEAFN_MAGIC:
1724 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1725 break;
1726 case XFS_DA_NODE_MAGIC:
1727 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1728 break;
1729 }
1730
1731 /*
1732 * Link blocks in appropriate order.
1733 */
1734 if (before) {
1735 /*
1736 * Link new block in before existing block.
1737 */
1738 trace_xfs_da_link_before(args);
1739 new_info->forw = cpu_to_be32(old_blk->blkno);
1740 new_info->back = old_info->back;
1741 if (old_info->back) {
1742 error = xfs_da3_node_read(args->trans, dp,
1743 be32_to_cpu(old_info->back),
1744 -1, &bp, args->whichfork);
1745 if (error)
1746 return error;
1747 ASSERT(bp != NULL);
1748 tmp_info = bp->b_addr;
1749 ASSERT(tmp_info->magic == old_info->magic);
1750 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1751 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1752 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1753 }
1754 old_info->back = cpu_to_be32(new_blk->blkno);
1755 } else {
1756 /*
1757 * Link new block in after existing block.
1758 */
1759 trace_xfs_da_link_after(args);
1760 new_info->forw = old_info->forw;
1761 new_info->back = cpu_to_be32(old_blk->blkno);
1762 if (old_info->forw) {
1763 error = xfs_da3_node_read(args->trans, dp,
1764 be32_to_cpu(old_info->forw),
1765 -1, &bp, args->whichfork);
1766 if (error)
1767 return error;
1768 ASSERT(bp != NULL);
1769 tmp_info = bp->b_addr;
1770 ASSERT(tmp_info->magic == old_info->magic);
1771 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1772 tmp_info->back = cpu_to_be32(new_blk->blkno);
1773 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1774 }
1775 old_info->forw = cpu_to_be32(new_blk->blkno);
1776 }
1777
1778 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1779 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1780 return 0;
1781 }
1782
1783 /*
1784 * Unlink a block from a doubly linked list of blocks.
1785 */
1786 STATIC int /* error */
1787 xfs_da3_blk_unlink(
1788 struct xfs_da_state *state,
1789 struct xfs_da_state_blk *drop_blk,
1790 struct xfs_da_state_blk *save_blk)
1791 {
1792 struct xfs_da_blkinfo *drop_info;
1793 struct xfs_da_blkinfo *save_info;
1794 struct xfs_da_blkinfo *tmp_info;
1795 struct xfs_da_args *args;
1796 struct xfs_buf *bp;
1797 int error;
1798
1799 /*
1800 * Set up environment.
1801 */
1802 args = state->args;
1803 ASSERT(args != NULL);
1804 save_info = save_blk->bp->b_addr;
1805 drop_info = drop_blk->bp->b_addr;
1806 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1807 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1808 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1809 ASSERT(save_blk->magic == drop_blk->magic);
1810 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1811 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1812 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1813 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1814
1815 /*
1816 * Unlink the leaf block from the doubly linked chain of leaves.
1817 */
1818 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1819 trace_xfs_da_unlink_back(args);
1820 save_info->back = drop_info->back;
1821 if (drop_info->back) {
1822 error = xfs_da3_node_read(args->trans, args->dp,
1823 be32_to_cpu(drop_info->back),
1824 -1, &bp, args->whichfork);
1825 if (error)
1826 return error;
1827 ASSERT(bp != NULL);
1828 tmp_info = bp->b_addr;
1829 ASSERT(tmp_info->magic == save_info->magic);
1830 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1831 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1832 xfs_trans_log_buf(args->trans, bp, 0,
1833 sizeof(*tmp_info) - 1);
1834 }
1835 } else {
1836 trace_xfs_da_unlink_forward(args);
1837 save_info->forw = drop_info->forw;
1838 if (drop_info->forw) {
1839 error = xfs_da3_node_read(args->trans, args->dp,
1840 be32_to_cpu(drop_info->forw),
1841 -1, &bp, args->whichfork);
1842 if (error)
1843 return error;
1844 ASSERT(bp != NULL);
1845 tmp_info = bp->b_addr;
1846 ASSERT(tmp_info->magic == save_info->magic);
1847 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1848 tmp_info->back = cpu_to_be32(save_blk->blkno);
1849 xfs_trans_log_buf(args->trans, bp, 0,
1850 sizeof(*tmp_info) - 1);
1851 }
1852 }
1853
1854 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1855 return 0;
1856 }
1857
1858 /*
1859 * Move a path "forward" or "!forward" one block at the current level.
1860 *
1861 * This routine will adjust a "path" to point to the next block
1862 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1863 * Btree, including updating pointers to the intermediate nodes between
1864 * the new bottom and the root.
1865 */
1866 int /* error */
1867 xfs_da3_path_shift(
1868 struct xfs_da_state *state,
1869 struct xfs_da_state_path *path,
1870 int forward,
1871 int release,
1872 int *result)
1873 {
1874 struct xfs_da_state_blk *blk;
1875 struct xfs_da_blkinfo *info;
1876 struct xfs_da_intnode *node;
1877 struct xfs_da_args *args;
1878 struct xfs_da_node_entry *btree;
1879 struct xfs_da3_icnode_hdr nodehdr;
1880 struct xfs_buf *bp;
1881 xfs_dablk_t blkno = 0;
1882 int level;
1883 int error;
1884 struct xfs_inode *dp = state->args->dp;
1885
1886 trace_xfs_da_path_shift(state->args);
1887
1888 /*
1889 * Roll up the Btree looking for the first block where our
1890 * current index is not at the edge of the block. Note that
1891 * we skip the bottom layer because we want the sibling block.
1892 */
1893 args = state->args;
1894 ASSERT(args != NULL);
1895 ASSERT(path != NULL);
1896 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1897 level = (path->active-1) - 1; /* skip bottom layer in path */
1898 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1899 node = blk->bp->b_addr;
1900 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1901 btree = dp->d_ops->node_tree_p(node);
1902
1903 if (forward && (blk->index < nodehdr.count - 1)) {
1904 blk->index++;
1905 blkno = be32_to_cpu(btree[blk->index].before);
1906 break;
1907 } else if (!forward && (blk->index > 0)) {
1908 blk->index--;
1909 blkno = be32_to_cpu(btree[blk->index].before);
1910 break;
1911 }
1912 }
1913 if (level < 0) {
1914 *result = -ENOENT; /* we're out of our tree */
1915 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1916 return 0;
1917 }
1918
1919 /*
1920 * Roll down the edge of the subtree until we reach the
1921 * same depth we were at originally.
1922 */
1923 for (blk++, level++; level < path->active; blk++, level++) {
1924 /*
1925 * Read the next child block into a local buffer.
1926 */
1927 error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
1928 args->whichfork);
1929 if (error)
1930 return error;
1931
1932 /*
1933 * Release the old block (if it's dirty, the trans doesn't
1934 * actually let go) and swap the local buffer into the path
1935 * structure. This ensures failure of the above read doesn't set
1936 * a NULL buffer in an active slot in the path.
1937 */
1938 if (release)
1939 xfs_trans_brelse(args->trans, blk->bp);
1940 blk->blkno = blkno;
1941 blk->bp = bp;
1942
1943 info = blk->bp->b_addr;
1944 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1945 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
1946 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1947 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1948 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1949 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1950
1951
1952 /*
1953 * Note: we flatten the magic number to a single type so we
1954 * don't have to compare against crc/non-crc types elsewhere.
1955 */
1956 switch (be16_to_cpu(info->magic)) {
1957 case XFS_DA_NODE_MAGIC:
1958 case XFS_DA3_NODE_MAGIC:
1959 blk->magic = XFS_DA_NODE_MAGIC;
1960 node = (xfs_da_intnode_t *)info;
1961 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1962 btree = dp->d_ops->node_tree_p(node);
1963 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1964 if (forward)
1965 blk->index = 0;
1966 else
1967 blk->index = nodehdr.count - 1;
1968 blkno = be32_to_cpu(btree[blk->index].before);
1969 break;
1970 case XFS_ATTR_LEAF_MAGIC:
1971 case XFS_ATTR3_LEAF_MAGIC:
1972 blk->magic = XFS_ATTR_LEAF_MAGIC;
1973 ASSERT(level == path->active-1);
1974 blk->index = 0;
1975 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1976 break;
1977 case XFS_DIR2_LEAFN_MAGIC:
1978 case XFS_DIR3_LEAFN_MAGIC:
1979 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1980 ASSERT(level == path->active-1);
1981 blk->index = 0;
1982 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
1983 blk->bp, NULL);
1984 break;
1985 default:
1986 ASSERT(0);
1987 break;
1988 }
1989 }
1990 *result = 0;
1991 return 0;
1992 }
1993
1994
1995 /*========================================================================
1996 * Utility routines.
1997 *========================================================================*/
1998
1999 /*
2000 * Implement a simple hash on a character string.
2001 * Rotate the hash value by 7 bits, then XOR each character in.
2002 * This is implemented with some source-level loop unrolling.
2003 */
2004 xfs_dahash_t
2005 xfs_da_hashname(const uint8_t *name, int namelen)
2006 {
2007 xfs_dahash_t hash;
2008
2009 /*
2010 * Do four characters at a time as long as we can.
2011 */
2012 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
2013 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
2014 (name[3] << 0) ^ rol32(hash, 7 * 4);
2015
2016 /*
2017 * Now do the rest of the characters.
2018 */
2019 switch (namelen) {
2020 case 3:
2021 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
2022 rol32(hash, 7 * 3);
2023 case 2:
2024 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2025 case 1:
2026 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2027 default: /* case 0: */
2028 return hash;
2029 }
2030 }
2031
2032 enum xfs_dacmp
2033 xfs_da_compname(
2034 struct xfs_da_args *args,
2035 const unsigned char *name,
2036 int len)
2037 {
2038 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2039 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2040 }
2041
2042 static xfs_dahash_t
2043 xfs_default_hashname(
2044 struct xfs_name *name)
2045 {
2046 return xfs_da_hashname(name->name, name->len);
2047 }
2048
2049 const struct xfs_nameops xfs_default_nameops = {
2050 .hashname = xfs_default_hashname,
2051 .compname = xfs_da_compname
2052 };
2053
2054 int
2055 xfs_da_grow_inode_int(
2056 struct xfs_da_args *args,
2057 xfs_fileoff_t *bno,
2058 int count)
2059 {
2060 struct xfs_trans *tp = args->trans;
2061 struct xfs_inode *dp = args->dp;
2062 int w = args->whichfork;
2063 xfs_rfsblock_t nblks = dp->i_d.di_nblocks;
2064 struct xfs_bmbt_irec map, *mapp;
2065 int nmap, error, got, i, mapi;
2066
2067 /*
2068 * Find a spot in the file space to put the new block.
2069 */
2070 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2071 if (error)
2072 return error;
2073
2074 /*
2075 * Try mapping it in one filesystem block.
2076 */
2077 nmap = 1;
2078 error = xfs_bmapi_write(tp, dp, *bno, count,
2079 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2080 args->total, &map, &nmap);
2081 if (error)
2082 return error;
2083
2084 ASSERT(nmap <= 1);
2085 if (nmap == 1) {
2086 mapp = &map;
2087 mapi = 1;
2088 } else if (nmap == 0 && count > 1) {
2089 xfs_fileoff_t b;
2090 int c;
2091
2092 /*
2093 * If we didn't get it and the block might work if fragmented,
2094 * try without the CONTIG flag. Loop until we get it all.
2095 */
2096 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
2097 for (b = *bno, mapi = 0; b < *bno + count; ) {
2098 nmap = min(XFS_BMAP_MAX_NMAP, count);
2099 c = (int)(*bno + count - b);
2100 error = xfs_bmapi_write(tp, dp, b, c,
2101 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2102 args->total, &mapp[mapi], &nmap);
2103 if (error)
2104 goto out_free_map;
2105 if (nmap < 1)
2106 break;
2107 mapi += nmap;
2108 b = mapp[mapi - 1].br_startoff +
2109 mapp[mapi - 1].br_blockcount;
2110 }
2111 } else {
2112 mapi = 0;
2113 mapp = NULL;
2114 }
2115
2116 /*
2117 * Count the blocks we got, make sure it matches the total.
2118 */
2119 for (i = 0, got = 0; i < mapi; i++)
2120 got += mapp[i].br_blockcount;
2121 if (got != count || mapp[0].br_startoff != *bno ||
2122 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2123 *bno + count) {
2124 error = -ENOSPC;
2125 goto out_free_map;
2126 }
2127
2128 /* account for newly allocated blocks in reserved blocks total */
2129 args->total -= dp->i_d.di_nblocks - nblks;
2130
2131 out_free_map:
2132 if (mapp != &map)
2133 kmem_free(mapp);
2134 return error;
2135 }
2136
2137 /*
2138 * Add a block to the btree ahead of the file.
2139 * Return the new block number to the caller.
2140 */
2141 int
2142 xfs_da_grow_inode(
2143 struct xfs_da_args *args,
2144 xfs_dablk_t *new_blkno)
2145 {
2146 xfs_fileoff_t bno;
2147 int error;
2148
2149 trace_xfs_da_grow_inode(args);
2150
2151 bno = args->geo->leafblk;
2152 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2153 if (!error)
2154 *new_blkno = (xfs_dablk_t)bno;
2155 return error;
2156 }
2157
2158 /*
2159 * Ick. We need to always be able to remove a btree block, even
2160 * if there's no space reservation because the filesystem is full.
2161 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2162 * It swaps the target block with the last block in the file. The
2163 * last block in the file can always be removed since it can't cause
2164 * a bmap btree split to do that.
2165 */
2166 STATIC int
2167 xfs_da3_swap_lastblock(
2168 struct xfs_da_args *args,
2169 xfs_dablk_t *dead_blknop,
2170 struct xfs_buf **dead_bufp)
2171 {
2172 struct xfs_da_blkinfo *dead_info;
2173 struct xfs_da_blkinfo *sib_info;
2174 struct xfs_da_intnode *par_node;
2175 struct xfs_da_intnode *dead_node;
2176 struct xfs_dir2_leaf *dead_leaf2;
2177 struct xfs_da_node_entry *btree;
2178 struct xfs_da3_icnode_hdr par_hdr;
2179 struct xfs_inode *dp;
2180 struct xfs_trans *tp;
2181 struct xfs_mount *mp;
2182 struct xfs_buf *dead_buf;
2183 struct xfs_buf *last_buf;
2184 struct xfs_buf *sib_buf;
2185 struct xfs_buf *par_buf;
2186 xfs_dahash_t dead_hash;
2187 xfs_fileoff_t lastoff;
2188 xfs_dablk_t dead_blkno;
2189 xfs_dablk_t last_blkno;
2190 xfs_dablk_t sib_blkno;
2191 xfs_dablk_t par_blkno;
2192 int error;
2193 int w;
2194 int entno;
2195 int level;
2196 int dead_level;
2197
2198 trace_xfs_da_swap_lastblock(args);
2199
2200 dead_buf = *dead_bufp;
2201 dead_blkno = *dead_blknop;
2202 tp = args->trans;
2203 dp = args->dp;
2204 w = args->whichfork;
2205 ASSERT(w == XFS_DATA_FORK);
2206 mp = dp->i_mount;
2207 lastoff = args->geo->freeblk;
2208 error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2209 if (error)
2210 return error;
2211 if (unlikely(lastoff == 0)) {
2212 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
2213 mp);
2214 return -EFSCORRUPTED;
2215 }
2216 /*
2217 * Read the last block in the btree space.
2218 */
2219 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2220 error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
2221 if (error)
2222 return error;
2223 /*
2224 * Copy the last block into the dead buffer and log it.
2225 */
2226 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
2227 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2228 dead_info = dead_buf->b_addr;
2229 /*
2230 * Get values from the moved block.
2231 */
2232 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2233 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2234 struct xfs_dir3_icleaf_hdr leafhdr;
2235 struct xfs_dir2_leaf_entry *ents;
2236
2237 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2238 dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
2239 ents = dp->d_ops->leaf_ents_p(dead_leaf2);
2240 dead_level = 0;
2241 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2242 } else {
2243 struct xfs_da3_icnode_hdr deadhdr;
2244
2245 dead_node = (xfs_da_intnode_t *)dead_info;
2246 dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
2247 btree = dp->d_ops->node_tree_p(dead_node);
2248 dead_level = deadhdr.level;
2249 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2250 }
2251 sib_buf = par_buf = NULL;
2252 /*
2253 * If the moved block has a left sibling, fix up the pointers.
2254 */
2255 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2256 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2257 if (error)
2258 goto done;
2259 sib_info = sib_buf->b_addr;
2260 if (unlikely(
2261 be32_to_cpu(sib_info->forw) != last_blkno ||
2262 sib_info->magic != dead_info->magic)) {
2263 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2264 XFS_ERRLEVEL_LOW, mp);
2265 error = -EFSCORRUPTED;
2266 goto done;
2267 }
2268 sib_info->forw = cpu_to_be32(dead_blkno);
2269 xfs_trans_log_buf(tp, sib_buf,
2270 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2271 sizeof(sib_info->forw)));
2272 sib_buf = NULL;
2273 }
2274 /*
2275 * If the moved block has a right sibling, fix up the pointers.
2276 */
2277 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2278 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2279 if (error)
2280 goto done;
2281 sib_info = sib_buf->b_addr;
2282 if (unlikely(
2283 be32_to_cpu(sib_info->back) != last_blkno ||
2284 sib_info->magic != dead_info->magic)) {
2285 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2286 XFS_ERRLEVEL_LOW, mp);
2287 error = -EFSCORRUPTED;
2288 goto done;
2289 }
2290 sib_info->back = cpu_to_be32(dead_blkno);
2291 xfs_trans_log_buf(tp, sib_buf,
2292 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2293 sizeof(sib_info->back)));
2294 sib_buf = NULL;
2295 }
2296 par_blkno = args->geo->leafblk;
2297 level = -1;
2298 /*
2299 * Walk down the tree looking for the parent of the moved block.
2300 */
2301 for (;;) {
2302 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2303 if (error)
2304 goto done;
2305 par_node = par_buf->b_addr;
2306 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2307 if (level >= 0 && level != par_hdr.level + 1) {
2308 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2309 XFS_ERRLEVEL_LOW, mp);
2310 error = -EFSCORRUPTED;
2311 goto done;
2312 }
2313 level = par_hdr.level;
2314 btree = dp->d_ops->node_tree_p(par_node);
2315 for (entno = 0;
2316 entno < par_hdr.count &&
2317 be32_to_cpu(btree[entno].hashval) < dead_hash;
2318 entno++)
2319 continue;
2320 if (entno == par_hdr.count) {
2321 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2322 XFS_ERRLEVEL_LOW, mp);
2323 error = -EFSCORRUPTED;
2324 goto done;
2325 }
2326 par_blkno = be32_to_cpu(btree[entno].before);
2327 if (level == dead_level + 1)
2328 break;
2329 xfs_trans_brelse(tp, par_buf);
2330 par_buf = NULL;
2331 }
2332 /*
2333 * We're in the right parent block.
2334 * Look for the right entry.
2335 */
2336 for (;;) {
2337 for (;
2338 entno < par_hdr.count &&
2339 be32_to_cpu(btree[entno].before) != last_blkno;
2340 entno++)
2341 continue;
2342 if (entno < par_hdr.count)
2343 break;
2344 par_blkno = par_hdr.forw;
2345 xfs_trans_brelse(tp, par_buf);
2346 par_buf = NULL;
2347 if (unlikely(par_blkno == 0)) {
2348 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2349 XFS_ERRLEVEL_LOW, mp);
2350 error = -EFSCORRUPTED;
2351 goto done;
2352 }
2353 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2354 if (error)
2355 goto done;
2356 par_node = par_buf->b_addr;
2357 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2358 if (par_hdr.level != level) {
2359 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2360 XFS_ERRLEVEL_LOW, mp);
2361 error = -EFSCORRUPTED;
2362 goto done;
2363 }
2364 btree = dp->d_ops->node_tree_p(par_node);
2365 entno = 0;
2366 }
2367 /*
2368 * Update the parent entry pointing to the moved block.
2369 */
2370 btree[entno].before = cpu_to_be32(dead_blkno);
2371 xfs_trans_log_buf(tp, par_buf,
2372 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2373 sizeof(btree[entno].before)));
2374 *dead_blknop = last_blkno;
2375 *dead_bufp = last_buf;
2376 return 0;
2377 done:
2378 if (par_buf)
2379 xfs_trans_brelse(tp, par_buf);
2380 if (sib_buf)
2381 xfs_trans_brelse(tp, sib_buf);
2382 xfs_trans_brelse(tp, last_buf);
2383 return error;
2384 }
2385
2386 /*
2387 * Remove a btree block from a directory or attribute.
2388 */
2389 int
2390 xfs_da_shrink_inode(
2391 struct xfs_da_args *args,
2392 xfs_dablk_t dead_blkno,
2393 struct xfs_buf *dead_buf)
2394 {
2395 struct xfs_inode *dp;
2396 int done, error, w, count;
2397 struct xfs_trans *tp;
2398
2399 trace_xfs_da_shrink_inode(args);
2400
2401 dp = args->dp;
2402 w = args->whichfork;
2403 tp = args->trans;
2404 count = args->geo->fsbcount;
2405 for (;;) {
2406 /*
2407 * Remove extents. If we get ENOSPC for a dir we have to move
2408 * the last block to the place we want to kill.
2409 */
2410 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2411 xfs_bmapi_aflag(w), 0, &done);
2412 if (error == -ENOSPC) {
2413 if (w != XFS_DATA_FORK)
2414 break;
2415 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2416 &dead_buf);
2417 if (error)
2418 break;
2419 } else {
2420 break;
2421 }
2422 }
2423 xfs_trans_binval(tp, dead_buf);
2424 return error;
2425 }
2426
2427 /*
2428 * See if the mapping(s) for this btree block are valid, i.e.
2429 * don't contain holes, are logically contiguous, and cover the whole range.
2430 */
2431 STATIC int
2432 xfs_da_map_covers_blocks(
2433 int nmap,
2434 xfs_bmbt_irec_t *mapp,
2435 xfs_dablk_t bno,
2436 int count)
2437 {
2438 int i;
2439 xfs_fileoff_t off;
2440
2441 for (i = 0, off = bno; i < nmap; i++) {
2442 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2443 mapp[i].br_startblock == DELAYSTARTBLOCK) {
2444 return 0;
2445 }
2446 if (off != mapp[i].br_startoff) {
2447 return 0;
2448 }
2449 off += mapp[i].br_blockcount;
2450 }
2451 return off == bno + count;
2452 }
2453
2454 /*
2455 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2456 *
2457 * For the single map case, it is assumed that the caller has provided a pointer
2458 * to a valid xfs_buf_map. For the multiple map case, this function will
2459 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2460 * map pointer with the allocated map.
2461 */
2462 static int
2463 xfs_buf_map_from_irec(
2464 struct xfs_mount *mp,
2465 struct xfs_buf_map **mapp,
2466 int *nmaps,
2467 struct xfs_bmbt_irec *irecs,
2468 int nirecs)
2469 {
2470 struct xfs_buf_map *map;
2471 int i;
2472
2473 ASSERT(*nmaps == 1);
2474 ASSERT(nirecs >= 1);
2475
2476 if (nirecs > 1) {
2477 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2478 KM_SLEEP | KM_NOFS);
2479 if (!map)
2480 return -ENOMEM;
2481 *mapp = map;
2482 }
2483
2484 *nmaps = nirecs;
2485 map = *mapp;
2486 for (i = 0; i < *nmaps; i++) {
2487 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2488 irecs[i].br_startblock != HOLESTARTBLOCK);
2489 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2490 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2491 }
2492 return 0;
2493 }
2494
2495 /*
2496 * Map the block we are given ready for reading. There are three possible return
2497 * values:
2498 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2499 * caller knows not to execute a subsequent read.
2500 * 0 - if we mapped the block successfully
2501 * >0 - positive error number if there was an error.
2502 */
2503 static int
2504 xfs_dabuf_map(
2505 struct xfs_inode *dp,
2506 xfs_dablk_t bno,
2507 xfs_daddr_t mappedbno,
2508 int whichfork,
2509 struct xfs_buf_map **map,
2510 int *nmaps)
2511 {
2512 struct xfs_mount *mp = dp->i_mount;
2513 int nfsb;
2514 int error = 0;
2515 struct xfs_bmbt_irec irec;
2516 struct xfs_bmbt_irec *irecs = &irec;
2517 int nirecs;
2518
2519 ASSERT(map && *map);
2520 ASSERT(*nmaps == 1);
2521
2522 if (whichfork == XFS_DATA_FORK)
2523 nfsb = mp->m_dir_geo->fsbcount;
2524 else
2525 nfsb = mp->m_attr_geo->fsbcount;
2526
2527 /*
2528 * Caller doesn't have a mapping. -2 means don't complain
2529 * if we land in a hole.
2530 */
2531 if (mappedbno == -1 || mappedbno == -2) {
2532 /*
2533 * Optimize the one-block case.
2534 */
2535 if (nfsb != 1)
2536 irecs = kmem_zalloc(sizeof(irec) * nfsb,
2537 KM_SLEEP | KM_NOFS);
2538
2539 nirecs = nfsb;
2540 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2541 &nirecs, xfs_bmapi_aflag(whichfork));
2542 if (error)
2543 goto out;
2544 } else {
2545 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2546 irecs->br_startoff = (xfs_fileoff_t)bno;
2547 irecs->br_blockcount = nfsb;
2548 irecs->br_state = 0;
2549 nirecs = 1;
2550 }
2551
2552 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2553 error = mappedbno == -2 ? -1 : -EFSCORRUPTED;
2554 if (unlikely(error == -EFSCORRUPTED)) {
2555 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2556 int i;
2557 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2558 __func__, (long long)bno,
2559 (long long)dp->i_ino);
2560 for (i = 0; i < *nmaps; i++) {
2561 xfs_alert(mp,
2562 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2563 i,
2564 (long long)irecs[i].br_startoff,
2565 (long long)irecs[i].br_startblock,
2566 (long long)irecs[i].br_blockcount,
2567 irecs[i].br_state);
2568 }
2569 }
2570 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2571 XFS_ERRLEVEL_LOW, mp);
2572 }
2573 goto out;
2574 }
2575 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2576 out:
2577 if (irecs != &irec)
2578 kmem_free(irecs);
2579 return error;
2580 }
2581
2582 /*
2583 * Get a buffer for the dir/attr block.
2584 */
2585 int
2586 xfs_da_get_buf(
2587 struct xfs_trans *trans,
2588 struct xfs_inode *dp,
2589 xfs_dablk_t bno,
2590 xfs_daddr_t mappedbno,
2591 struct xfs_buf **bpp,
2592 int whichfork)
2593 {
2594 struct xfs_buf *bp;
2595 struct xfs_buf_map map;
2596 struct xfs_buf_map *mapp;
2597 int nmap;
2598 int error;
2599
2600 *bpp = NULL;
2601 mapp = &map;
2602 nmap = 1;
2603 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2604 &mapp, &nmap);
2605 if (error) {
2606 /* mapping a hole is not an error, but we don't continue */
2607 if (error == -1)
2608 error = 0;
2609 goto out_free;
2610 }
2611
2612 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2613 mapp, nmap, 0);
2614 error = bp ? bp->b_error : -EIO;
2615 if (error) {
2616 if (bp)
2617 xfs_trans_brelse(trans, bp);
2618 goto out_free;
2619 }
2620
2621 *bpp = bp;
2622
2623 out_free:
2624 if (mapp != &map)
2625 kmem_free(mapp);
2626
2627 return error;
2628 }
2629
2630 /*
2631 * Get a buffer for the dir/attr block, fill in the contents.
2632 */
2633 int
2634 xfs_da_read_buf(
2635 struct xfs_trans *trans,
2636 struct xfs_inode *dp,
2637 xfs_dablk_t bno,
2638 xfs_daddr_t mappedbno,
2639 struct xfs_buf **bpp,
2640 int whichfork,
2641 const struct xfs_buf_ops *ops)
2642 {
2643 struct xfs_buf *bp;
2644 struct xfs_buf_map map;
2645 struct xfs_buf_map *mapp;
2646 int nmap;
2647 int error;
2648
2649 *bpp = NULL;
2650 mapp = &map;
2651 nmap = 1;
2652 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2653 &mapp, &nmap);
2654 if (error) {
2655 /* mapping a hole is not an error, but we don't continue */
2656 if (error == -1)
2657 error = 0;
2658 goto out_free;
2659 }
2660
2661 error = xfs_trans_read_buf_map(dp->i_mount, trans,
2662 dp->i_mount->m_ddev_targp,
2663 mapp, nmap, 0, &bp, ops);
2664 if (error)
2665 goto out_free;
2666
2667 if (whichfork == XFS_ATTR_FORK)
2668 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2669 else
2670 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2671 *bpp = bp;
2672 out_free:
2673 if (mapp != &map)
2674 kmem_free(mapp);
2675
2676 return error;
2677 }
2678
2679 /*
2680 * Readahead the dir/attr block.
2681 */
2682 int
2683 xfs_da_reada_buf(
2684 struct xfs_inode *dp,
2685 xfs_dablk_t bno,
2686 xfs_daddr_t mappedbno,
2687 int whichfork,
2688 const struct xfs_buf_ops *ops)
2689 {
2690 struct xfs_buf_map map;
2691 struct xfs_buf_map *mapp;
2692 int nmap;
2693 int error;
2694
2695 mapp = &map;
2696 nmap = 1;
2697 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2698 &mapp, &nmap);
2699 if (error) {
2700 /* mapping a hole is not an error, but we don't continue */
2701 if (error == -1)
2702 error = 0;
2703 goto out_free;
2704 }
2705
2706 mappedbno = mapp[0].bm_bn;
2707 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
2708
2709 out_free:
2710 if (mapp != &map)
2711 kmem_free(mapp);
2712
2713 return error;
2714 }