]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - libxfs/xfs_btree.c
xfs: remove xfs_btree_bigkey
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_btree.c
CommitLineData
2bd0ea18 1/*
da23017d
NS
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5000d01d 4 *
da23017d
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
2bd0ea18 7 * published by the Free Software Foundation.
5000d01d 8 *
da23017d
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
5000d01d 13 *
da23017d
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
2bd0ea18 17 */
9c799827 18#include "libxfs_priv.h"
b626fb59
DC
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_mount.h"
f944d3d0 26#include "xfs_defer.h"
b626fb59
DC
27#include "xfs_inode.h"
28#include "xfs_trans.h"
29#include "xfs_btree.h"
30#include "xfs_trace.h"
31#include "xfs_cksum.h"
32#include "xfs_alloc.h"
2bd0ea18
NS
33
34/*
35 * Cursor allocation zone.
36 */
5e656dbb 37kmem_zone_t *xfs_btree_cur_zone;
2bd0ea18
NS
38
39/*
40 * Btree magic numbers.
41 */
5dfa5cd2 42static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
e37838e5 43 { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
c0a4c227 44 XFS_FIBT_MAGIC },
e37838e5 45 { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC,
c0a4c227 46 XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC }
2bd0ea18 47};
5dfa5cd2
DC
48#define xfs_btree_magic(cur) \
49 xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum]
2bd0ea18 50
b3563c19 51STATIC int /* error (0 or EFSCORRUPTED) */
2bd0ea18 52xfs_btree_check_lblock(
b194c7d8 53 struct xfs_btree_cur *cur, /* btree cursor */
b3563c19 54 struct xfs_btree_block *block, /* btree long form block pointer */
2bd0ea18 55 int level, /* level of the btree block */
b194c7d8 56 struct xfs_buf *bp) /* buffer for block, if any */
2bd0ea18 57{
5dfa5cd2 58 int lblock_ok = 1; /* block passes checks */
b194c7d8 59 struct xfs_mount *mp; /* file system mount point */
2bd0ea18
NS
60
61 mp = cur->bc_mp;
5dfa5cd2
DC
62
63 if (xfs_sb_version_hascrc(&mp->m_sb)) {
64 lblock_ok = lblock_ok &&
9c4e12fb
ES
65 uuid_equal(&block->bb_u.l.bb_uuid,
66 &mp->m_sb.sb_meta_uuid) &&
5dfa5cd2
DC
67 block->bb_u.l.bb_blkno == cpu_to_be64(
68 bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
69 }
70
71 lblock_ok = lblock_ok &&
72 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
6e3140c7
NS
73 be16_to_cpu(block->bb_level) == level &&
74 be16_to_cpu(block->bb_numrecs) <=
b194c7d8 75 cur->bc_ops->get_maxrecs(cur, level) &&
b3563c19 76 block->bb_u.l.bb_leftsib &&
5a35bf2c 77 (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK) ||
b3563c19 78 XFS_FSB_SANITY_CHECK(mp,
5dfa5cd2 79 be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
b3563c19 80 block->bb_u.l.bb_rightsib &&
5a35bf2c 81 (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK) ||
b3563c19 82 XFS_FSB_SANITY_CHECK(mp,
5dfa5cd2
DC
83 be64_to_cpu(block->bb_u.l.bb_rightsib)));
84
b194c7d8
BN
85 if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
86 XFS_ERRTAG_BTREE_CHECK_LBLOCK,
4ca431fc 87 XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
2bd0ea18 88 if (bp)
56b2de80 89 trace_xfs_btree_corrupt(bp, _RET_IP_);
5dfa5cd2 90 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
12b53197 91 return -EFSCORRUPTED;
2bd0ea18
NS
92 }
93 return 0;
94}
95
b194c7d8 96STATIC int /* error (0 or EFSCORRUPTED) */
2bd0ea18 97xfs_btree_check_sblock(
b194c7d8 98 struct xfs_btree_cur *cur, /* btree cursor */
b3563c19 99 struct xfs_btree_block *block, /* btree short form block pointer */
2bd0ea18 100 int level, /* level of the btree block */
b194c7d8 101 struct xfs_buf *bp) /* buffer containing block */
2bd0ea18 102{
5dfa5cd2 103 struct xfs_mount *mp; /* file system mount point */
b194c7d8
BN
104 struct xfs_buf *agbp; /* buffer for ag. freespace struct */
105 struct xfs_agf *agf; /* ag. freespace structure */
dfc130f3 106 xfs_agblock_t agflen; /* native ag. freespace length */
5dfa5cd2 107 int sblock_ok = 1; /* block passes checks */
2bd0ea18 108
5dfa5cd2 109 mp = cur->bc_mp;
2bd0ea18
NS
110 agbp = cur->bc_private.a.agbp;
111 agf = XFS_BUF_TO_AGF(agbp);
6e3140c7 112 agflen = be32_to_cpu(agf->agf_length);
5dfa5cd2
DC
113
114 if (xfs_sb_version_hascrc(&mp->m_sb)) {
115 sblock_ok = sblock_ok &&
9c4e12fb
ES
116 uuid_equal(&block->bb_u.s.bb_uuid,
117 &mp->m_sb.sb_meta_uuid) &&
5dfa5cd2
DC
118 block->bb_u.s.bb_blkno == cpu_to_be64(
119 bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
120 }
121
122 sblock_ok = sblock_ok &&
123 be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) &&
6e3140c7
NS
124 be16_to_cpu(block->bb_level) == level &&
125 be16_to_cpu(block->bb_numrecs) <=
b194c7d8 126 cur->bc_ops->get_maxrecs(cur, level) &&
a2ceac1f 127 (block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) ||
b3563c19
BN
128 be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) &&
129 block->bb_u.s.bb_leftsib &&
a2ceac1f 130 (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) ||
b3563c19
BN
131 be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
132 block->bb_u.s.bb_rightsib;
5dfa5cd2
DC
133
134 if (unlikely(XFS_TEST_ERROR(!sblock_ok, mp,
2bd0ea18 135 XFS_ERRTAG_BTREE_CHECK_SBLOCK,
4ca431fc 136 XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
2bd0ea18 137 if (bp)
56b2de80 138 trace_xfs_btree_corrupt(bp, _RET_IP_);
5dfa5cd2 139 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
12b53197 140 return -EFSCORRUPTED;
2bd0ea18
NS
141 }
142 return 0;
143}
144
145/*
b194c7d8
BN
146 * Debug routine: check that block header is ok.
147 */
148int
149xfs_btree_check_block(
150 struct xfs_btree_cur *cur, /* btree cursor */
151 struct xfs_btree_block *block, /* generic btree block pointer */
152 int level, /* level of the btree block */
153 struct xfs_buf *bp) /* buffer containing block, if any */
154{
b3563c19
BN
155 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
156 return xfs_btree_check_lblock(cur, block, level, bp);
157 else
158 return xfs_btree_check_sblock(cur, block, level, bp);
b194c7d8
BN
159}
160
161/*
162 * Check that (long) pointer is ok.
2bd0ea18
NS
163 */
164int /* error (0 or EFSCORRUPTED) */
b194c7d8
BN
165xfs_btree_check_lptr(
166 struct xfs_btree_cur *cur, /* btree cursor */
5a35bf2c 167 xfs_fsblock_t bno, /* btree block disk address */
b194c7d8
BN
168 int level) /* btree block level */
169{
19ebedcf 170 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
b194c7d8 171 level > 0 &&
5a35bf2c 172 bno != NULLFSBLOCK &&
b194c7d8
BN
173 XFS_FSB_SANITY_CHECK(cur->bc_mp, bno));
174 return 0;
175}
176
870d4cbc 177#ifdef DEBUG
b194c7d8
BN
178/*
179 * Check that (short) pointer is ok.
180 */
181STATIC int /* error (0 or EFSCORRUPTED) */
2bd0ea18 182xfs_btree_check_sptr(
b194c7d8
BN
183 struct xfs_btree_cur *cur, /* btree cursor */
184 xfs_agblock_t bno, /* btree block disk address */
185 int level) /* btree block level */
2bd0ea18 186{
b194c7d8 187 xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks;
2bd0ea18 188
19ebedcf 189 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
2bd0ea18 190 level > 0 &&
b194c7d8
BN
191 bno != NULLAGBLOCK &&
192 bno != 0 &&
193 bno < agblocks);
2bd0ea18
NS
194 return 0;
195}
196
b194c7d8
BN
197/*
198 * Check that block ptr is ok.
199 */
200STATIC int /* error (0 or EFSCORRUPTED) */
201xfs_btree_check_ptr(
202 struct xfs_btree_cur *cur, /* btree cursor */
203 union xfs_btree_ptr *ptr, /* btree block disk address */
204 int index, /* offset from ptr to check */
205 int level) /* btree block level */
206{
207 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
208 return xfs_btree_check_lptr(cur,
209 be64_to_cpu((&ptr->l)[index]), level);
210 } else {
211 return xfs_btree_check_sptr(cur,
212 be32_to_cpu((&ptr->s)[index]), level);
213 }
214}
870d4cbc 215#endif
b194c7d8 216
5dfa5cd2
DC
217/*
218 * Calculate CRC on the whole btree block and stuff it into the
219 * long-form btree header.
220 *
221 * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
eab16f4c 222 * it into the buffer so recovery knows what the last modification was that made
5dfa5cd2
DC
223 * it to disk.
224 */
225void
226xfs_btree_lblock_calc_crc(
227 struct xfs_buf *bp)
228{
229 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
230 struct xfs_buf_log_item *bip = bp->b_fspriv;
231
232 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
233 return;
234 if (bip)
235 block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
43b5aeed 236 xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
5dfa5cd2
DC
237}
238
239bool
240xfs_btree_lblock_verify_crc(
241 struct xfs_buf *bp)
242{
a65d8d29
BF
243 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
244 struct xfs_mount *mp = bp->b_target->bt_mount;
245
246 if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) {
247 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn)))
248 return false;
d21ca64d 249 return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF);
a65d8d29 250 }
d21ca64d 251
5dfa5cd2
DC
252 return true;
253}
254
255/*
256 * Calculate CRC on the whole btree block and stuff it into the
257 * short-form btree header.
258 *
259 * Prior to calculting the CRC, pull the LSN out of the buffer log item and put
eab16f4c 260 * it into the buffer so recovery knows what the last modification was that made
5dfa5cd2
DC
261 * it to disk.
262 */
263void
264xfs_btree_sblock_calc_crc(
265 struct xfs_buf *bp)
266{
267 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
268 struct xfs_buf_log_item *bip = bp->b_fspriv;
269
270 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
271 return;
272 if (bip)
273 block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
43b5aeed 274 xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
5dfa5cd2
DC
275}
276
277bool
278xfs_btree_sblock_verify_crc(
279 struct xfs_buf *bp)
280{
a65d8d29
BF
281 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
282 struct xfs_mount *mp = bp->b_target->bt_mount;
283
284 if (xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) {
285 if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
286 return false;
d21ca64d 287 return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
a65d8d29 288 }
d21ca64d 289
5dfa5cd2
DC
290 return true;
291}
292
c261f8c0
CH
293static int
294xfs_btree_free_block(
295 struct xfs_btree_cur *cur,
296 struct xfs_buf *bp)
297{
298 int error;
299
300 error = cur->bc_ops->free_block(cur, bp);
08caf14f
CH
301 if (!error) {
302 xfs_trans_binval(cur->bc_tp, bp);
c261f8c0 303 XFS_BTREE_STATS_INC(cur, free);
08caf14f 304 }
c261f8c0
CH
305 return error;
306}
307
2bd0ea18
NS
308/*
309 * Delete the btree cursor.
310 */
311void
312xfs_btree_del_cursor(
dfc130f3 313 xfs_btree_cur_t *cur, /* btree cursor */
2bd0ea18
NS
314 int error) /* del because of error */
315{
316 int i; /* btree level */
317
318 /*
319 * Clear the buffer pointers, and release the buffers.
320 * If we're doing this in the face of an error, we
321 * need to make sure to inspect all of the entries
322 * in the bc_bufs array for buffers to be unlocked.
323 * This is because some of the btree code works from
324 * level n down to 0, and if we get an error along
325 * the way we won't have initialized all the entries
326 * down to 0.
327 */
328 for (i = 0; i < cur->bc_nlevels; i++) {
329 if (cur->bc_bufs[i])
56b2de80 330 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
2bd0ea18
NS
331 else if (!error)
332 break;
333 }
334 /*
5000d01d 335 * Can't free a bmap cursor without having dealt with the
2bd0ea18
NS
336 * allocated indirect blocks' accounting.
337 */
338 ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
339 cur->bc_private.b.allocated == 0);
340 /*
341 * Free the cursor.
342 */
343 kmem_zone_free(xfs_btree_cur_zone, cur);
344}
345
346/*
347 * Duplicate the btree cursor.
348 * Allocate a new one, copy the record, re-get the buffers.
349 */
350int /* error */
351xfs_btree_dup_cursor(
dfc130f3
RC
352 xfs_btree_cur_t *cur, /* input cursor */
353 xfs_btree_cur_t **ncur) /* output cursor */
2bd0ea18
NS
354{
355 xfs_buf_t *bp; /* btree block's buffer pointer */
5000d01d 356 int error; /* error return value */
2bd0ea18
NS
357 int i; /* level number of btree block */
358 xfs_mount_t *mp; /* mount structure for filesystem */
dfc130f3 359 xfs_btree_cur_t *new; /* new cursor value */
2bd0ea18
NS
360 xfs_trans_t *tp; /* transaction pointer, can be NULL */
361
362 tp = cur->bc_tp;
363 mp = cur->bc_mp;
b194c7d8 364
2bd0ea18
NS
365 /*
366 * Allocate a new cursor like the old one.
367 */
b194c7d8
BN
368 new = cur->bc_ops->dup_cursor(cur);
369
2bd0ea18
NS
370 /*
371 * Copy the record currently in the cursor.
372 */
373 new->bc_rec = cur->bc_rec;
b194c7d8 374
2bd0ea18
NS
375 /*
376 * For each level current, re-get the buffer and copy the ptr value.
377 */
378 for (i = 0; i < new->bc_nlevels; i++) {
379 new->bc_ptrs[i] = cur->bc_ptrs[i];
380 new->bc_ra[i] = cur->bc_ra[i];
a2ceac1f
DC
381 bp = cur->bc_bufs[i];
382 if (bp) {
383 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
384 XFS_BUF_ADDR(bp), mp->m_bsize,
385 0, &bp,
386 cur->bc_ops->buf_ops);
387 if (error) {
2bd0ea18
NS
388 xfs_btree_del_cursor(new, error);
389 *ncur = NULL;
390 return error;
391 }
5dfa5cd2
DC
392 }
393 new->bc_bufs[i] = bp;
2bd0ea18 394 }
2bd0ea18
NS
395 *ncur = new;
396 return 0;
397}
398
b194c7d8
BN
399/*
400 * XFS btree block layout and addressing:
401 *
402 * There are two types of blocks in the btree: leaf and non-leaf blocks.
403 *
404 * The leaf record start with a header then followed by records containing
405 * the values. A non-leaf block also starts with the same header, and
406 * then first contains lookup keys followed by an equal number of pointers
407 * to the btree blocks at the previous level.
408 *
409 * +--------+-------+-------+-------+-------+-------+-------+
410 * Leaf: | header | rec 1 | rec 2 | rec 3 | rec 4 | rec 5 | rec N |
411 * +--------+-------+-------+-------+-------+-------+-------+
412 *
413 * +--------+-------+-------+-------+-------+-------+-------+
414 * Non-Leaf: | header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N |
415 * +--------+-------+-------+-------+-------+-------+-------+
416 *
417 * The header is called struct xfs_btree_block for reasons better left unknown
418 * and comes in different versions for short (32bit) and long (64bit) block
419 * pointers. The record and key structures are defined by the btree instances
420 * and opaque to the btree core. The block pointers are simple disk endian
421 * integers, available in a short (32bit) and long (64bit) variant.
422 *
423 * The helpers below calculate the offset of a given record, key or pointer
424 * into a btree block (xfs_btree_*_offset) or return a pointer to the given
425 * record, key or pointer (xfs_btree_*_addr). Note that all addressing
426 * inside the btree block is done using indices starting at one, not zero!
13e831e0
DW
427 *
428 * If XFS_BTREE_OVERLAPPING is set, then this btree supports keys containing
429 * overlapping intervals. In such a tree, records are still sorted lowest to
430 * highest and indexed by the smallest key value that refers to the record.
431 * However, nodes are different: each pointer has two associated keys -- one
432 * indexing the lowest key available in the block(s) below (the same behavior
433 * as the key in a regular btree) and another indexing the highest key
434 * available in the block(s) below. Because records are /not/ sorted by the
435 * highest key, all leaf block updates require us to compute the highest key
436 * that matches any record in the leaf and to recursively update the high keys
437 * in the nodes going further up in the tree, if necessary. Nodes look like
438 * this:
439 *
440 * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
441 * Non-Leaf: | header | lo1 | hi1 | lo2 | hi2 | ... | ptr 1 | ptr 2 | ... |
442 * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
443 *
444 * To perform an interval query on an overlapped tree, perform the usual
445 * depth-first search and use the low and high keys to decide if we can skip
446 * that particular node. If a leaf node is reached, return the records that
447 * intersect the interval. Note that an interval query may return numerous
448 * entries. For a non-overlapped tree, simply search for the record associated
449 * with the lowest key and iterate forward until a non-matching record is
450 * found. Section 14.3 ("Interval Trees") of _Introduction to Algorithms_ by
451 * Cormen, Leiserson, Rivest, and Stein (2nd or 3rd ed. only) discuss this in
452 * more detail.
453 *
454 * Why do we care about overlapping intervals? Let's say you have a bunch of
455 * reverse mapping records on a reflink filesystem:
456 *
457 * 1: +- file A startblock B offset C length D -----------+
458 * 2: +- file E startblock F offset G length H --------------+
459 * 3: +- file I startblock F offset J length K --+
460 * 4: +- file L... --+
461 *
462 * Now say we want to map block (B+D) into file A at offset (C+D). Ideally,
463 * we'd simply increment the length of record 1. But how do we find the record
464 * that ends at (B+D-1) (i.e. record 1)? A LE lookup of (B+D-1) would return
465 * record 3 because the keys are ordered first by startblock. An interval
466 * query would return records 1 and 2 because they both overlap (B+D-1), and
467 * from that we can pick out record 1 as the appropriate left neighbor.
468 *
469 * In the non-overlapped case you can do a LE lookup and decrement the cursor
470 * because a record's interval must end before the next record.
b194c7d8
BN
471 */
472
473/*
474 * Return size of the btree block header for this btree instance.
475 */
476static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
477{
e0607266
DC
478 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
479 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
480 return XFS_BTREE_LBLOCK_CRC_LEN;
481 return XFS_BTREE_LBLOCK_LEN;
482 }
5dfa5cd2 483 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
e0607266
DC
484 return XFS_BTREE_SBLOCK_CRC_LEN;
485 return XFS_BTREE_SBLOCK_LEN;
b194c7d8
BN
486}
487
488/*
489 * Return size of btree block pointers for this btree instance.
490 */
491static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur)
492{
493 return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
494 sizeof(__be64) : sizeof(__be32);
495}
496
497/*
498 * Calculate offset of the n-th record in a btree block.
499 */
500STATIC size_t
501xfs_btree_rec_offset(
502 struct xfs_btree_cur *cur,
503 int n)
504{
505 return xfs_btree_block_len(cur) +
506 (n - 1) * cur->bc_ops->rec_len;
507}
508
509/*
510 * Calculate offset of the n-th key in a btree block.
511 */
512STATIC size_t
513xfs_btree_key_offset(
514 struct xfs_btree_cur *cur,
515 int n)
516{
517 return xfs_btree_block_len(cur) +
518 (n - 1) * cur->bc_ops->key_len;
519}
520
13e831e0
DW
521/*
522 * Calculate offset of the n-th high key in a btree block.
523 */
524STATIC size_t
525xfs_btree_high_key_offset(
526 struct xfs_btree_cur *cur,
527 int n)
528{
529 return xfs_btree_block_len(cur) +
530 (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2);
531}
532
b194c7d8
BN
533/*
534 * Calculate offset of the n-th block pointer in a btree block.
535 */
536STATIC size_t
537xfs_btree_ptr_offset(
538 struct xfs_btree_cur *cur,
539 int n,
540 int level)
541{
542 return xfs_btree_block_len(cur) +
543 cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
544 (n - 1) * xfs_btree_ptr_len(cur);
545}
546
547/*
548 * Return a pointer to the n-th record in the btree block.
549 */
550STATIC union xfs_btree_rec *
551xfs_btree_rec_addr(
552 struct xfs_btree_cur *cur,
553 int n,
554 struct xfs_btree_block *block)
555{
556 return (union xfs_btree_rec *)
557 ((char *)block + xfs_btree_rec_offset(cur, n));
558}
559
560/*
561 * Return a pointer to the n-th key in the btree block.
562 */
563STATIC union xfs_btree_key *
564xfs_btree_key_addr(
565 struct xfs_btree_cur *cur,
566 int n,
567 struct xfs_btree_block *block)
568{
569 return (union xfs_btree_key *)
570 ((char *)block + xfs_btree_key_offset(cur, n));
571}
572
13e831e0
DW
573/*
574 * Return a pointer to the n-th high key in the btree block.
575 */
576STATIC union xfs_btree_key *
577xfs_btree_high_key_addr(
578 struct xfs_btree_cur *cur,
579 int n,
580 struct xfs_btree_block *block)
581{
582 return (union xfs_btree_key *)
583 ((char *)block + xfs_btree_high_key_offset(cur, n));
584}
585
b194c7d8
BN
586/*
587 * Return a pointer to the n-th block pointer in the btree block.
588 */
589STATIC union xfs_btree_ptr *
590xfs_btree_ptr_addr(
591 struct xfs_btree_cur *cur,
592 int n,
593 struct xfs_btree_block *block)
594{
595 int level = xfs_btree_get_level(block);
596
597 ASSERT(block->bb_level != 0);
598
599 return (union xfs_btree_ptr *)
600 ((char *)block + xfs_btree_ptr_offset(cur, n, level));
601}
602
603/*
10851b18 604 * Get the root block which is stored in the inode.
b194c7d8
BN
605 *
606 * For now this btree implementation assumes the btree root is always
607 * stored in the if_broot field of an inode fork.
608 */
609STATIC struct xfs_btree_block *
610xfs_btree_get_iroot(
dcaff8ac 611 struct xfs_btree_cur *cur)
b194c7d8 612{
dcaff8ac 613 struct xfs_ifork *ifp;
b194c7d8 614
dcaff8ac
KN
615 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
616 return (struct xfs_btree_block *)ifp->if_broot;
b194c7d8
BN
617}
618
5000d01d 619/*
2bd0ea18 620 * Retrieve the block pointer from the cursor at the given level.
b194c7d8 621 * This may be an inode btree root or from a buffer.
2bd0ea18 622 */
b194c7d8 623STATIC struct xfs_btree_block * /* generic btree block pointer */
2bd0ea18 624xfs_btree_get_block(
b194c7d8 625 struct xfs_btree_cur *cur, /* btree cursor */
2bd0ea18 626 int level, /* level in btree */
b194c7d8
BN
627 struct xfs_buf **bpp) /* buffer containing the block */
628{
629 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
630 (level == cur->bc_nlevels - 1)) {
631 *bpp = NULL;
632 return xfs_btree_get_iroot(cur);
2bd0ea18 633 }
b194c7d8
BN
634
635 *bpp = cur->bc_bufs[level];
636 return XFS_BUF_TO_BLOCK(*bpp);
2bd0ea18
NS
637}
638
639/*
640 * Get a buffer for the block, return it with no data read.
641 * Long-form addressing.
642 */
643xfs_buf_t * /* buffer for fsbno */
644xfs_btree_get_bufl(
645 xfs_mount_t *mp, /* file system mount point */
646 xfs_trans_t *tp, /* transaction pointer */
647 xfs_fsblock_t fsbno, /* file system block number */
648 uint lock) /* lock flags for get_buf */
649{
2bd0ea18
NS
650 xfs_daddr_t d; /* real disk block address */
651
652 ASSERT(fsbno != NULLFSBLOCK);
653 d = XFS_FSB_TO_DADDR(mp, fsbno);
ff105f75 654 return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
2bd0ea18
NS
655}
656
657/*
658 * Get a buffer for the block, return it with no data read.
659 * Short-form addressing.
660 */
661xfs_buf_t * /* buffer for agno/agbno */
662xfs_btree_get_bufs(
663 xfs_mount_t *mp, /* file system mount point */
664 xfs_trans_t *tp, /* transaction pointer */
665 xfs_agnumber_t agno, /* allocation group number */
666 xfs_agblock_t agbno, /* allocation group block number */
667 uint lock) /* lock flags for get_buf */
668{
2bd0ea18
NS
669 xfs_daddr_t d; /* real disk block address */
670
671 ASSERT(agno != NULLAGNUMBER);
672 ASSERT(agbno != NULLAGBLOCK);
673 d = XFS_AGB_TO_DADDR(mp, agno, agbno);
ff105f75 674 return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock);
2bd0ea18
NS
675}
676
2bd0ea18
NS
677/*
678 * Check for the cursor referring to the last block at the given level.
679 */
680int /* 1=is last block, 0=not last block */
681xfs_btree_islastblock(
682 xfs_btree_cur_t *cur, /* btree cursor */
683 int level) /* level to check */
684{
b3563c19 685 struct xfs_btree_block *block; /* generic btree block pointer */
2bd0ea18
NS
686 xfs_buf_t *bp; /* buffer containing block */
687
688 block = xfs_btree_get_block(cur, level, &bp);
689 xfs_btree_check_block(cur, block, level, bp);
b194c7d8 690 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
5a35bf2c 691 return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
2bd0ea18 692 else
a2ceac1f 693 return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
2bd0ea18
NS
694}
695
5e656dbb
BN
696/*
697 * Change the cursor to point to the first record at the given level.
698 * Other levels are unaffected.
699 */
b194c7d8 700STATIC int /* success=1, failure=0 */
5e656dbb
BN
701xfs_btree_firstrec(
702 xfs_btree_cur_t *cur, /* btree cursor */
703 int level) /* level to change */
704{
b3563c19 705 struct xfs_btree_block *block; /* generic btree block pointer */
5e656dbb
BN
706 xfs_buf_t *bp; /* buffer containing block */
707
708 /*
709 * Get the block pointer for this level.
710 */
711 block = xfs_btree_get_block(cur, level, &bp);
712 xfs_btree_check_block(cur, block, level, bp);
713 /*
714 * It's empty, there is no such record.
715 */
b194c7d8 716 if (!block->bb_numrecs)
5e656dbb
BN
717 return 0;
718 /*
719 * Set the ptr value to 1, that's the first record/key.
720 */
721 cur->bc_ptrs[level] = 1;
722 return 1;
723}
724
2bd0ea18
NS
725/*
726 * Change the cursor to point to the last record in the current block
dfc130f3 727 * at the given level. Other levels are unaffected.
2bd0ea18 728 */
b194c7d8 729STATIC int /* success=1, failure=0 */
2bd0ea18
NS
730xfs_btree_lastrec(
731 xfs_btree_cur_t *cur, /* btree cursor */
732 int level) /* level to change */
733{
b3563c19 734 struct xfs_btree_block *block; /* generic btree block pointer */
2bd0ea18
NS
735 xfs_buf_t *bp; /* buffer containing block */
736
737 /*
738 * Get the block pointer for this level.
739 */
740 block = xfs_btree_get_block(cur, level, &bp);
741 xfs_btree_check_block(cur, block, level, bp);
742 /*
743 * It's empty, there is no such record.
744 */
b194c7d8 745 if (!block->bb_numrecs)
2bd0ea18
NS
746 return 0;
747 /*
748 * Set the ptr value to numrecs, that's the last record/key.
749 */
b194c7d8 750 cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs);
2bd0ea18
NS
751 return 1;
752}
753
754/*
755 * Compute first and last byte offsets for the fields given.
756 * Interprets the offsets table, which contains struct field offsets.
757 */
758void
759xfs_btree_offsets(
760 __int64_t fields, /* bitmask of fields */
761 const short *offsets, /* table of field offsets */
762 int nbits, /* number of bits to inspect */
763 int *first, /* output: first byte offset */
764 int *last) /* output: last byte offset */
765{
766 int i; /* current bit number */
767 __int64_t imask; /* mask for current bit number */
768
769 ASSERT(fields != 0);
770 /*
771 * Find the lowest bit, so the first byte offset.
772 */
773 for (i = 0, imask = 1LL; ; i++, imask <<= 1) {
774 if (imask & fields) {
775 *first = offsets[i];
776 break;
777 }
778 }
779 /*
780 * Find the highest bit, so the last byte offset.
781 */
782 for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) {
783 if (imask & fields) {
784 *last = offsets[i + 1] - 1;
785 break;
786 }
787 }
788}
789
790/*
791 * Get a buffer for the block, return it read in.
792 * Long-form addressing.
793 */
a2ceac1f 794int
2bd0ea18 795xfs_btree_read_bufl(
a2ceac1f
DC
796 struct xfs_mount *mp, /* file system mount point */
797 struct xfs_trans *tp, /* transaction pointer */
798 xfs_fsblock_t fsbno, /* file system block number */
799 uint lock, /* lock flags for read_buf */
800 struct xfs_buf **bpp, /* buffer for fsbno */
801 int refval, /* ref count value for buffer */
802 const struct xfs_buf_ops *ops)
2bd0ea18 803{
a2ceac1f 804 struct xfs_buf *bp; /* return value */
2bd0ea18 805 xfs_daddr_t d; /* real disk block address */
a2ceac1f 806 int error;
2bd0ea18
NS
807
808 ASSERT(fsbno != NULLFSBLOCK);
809 d = XFS_FSB_TO_DADDR(mp, fsbno);
a2ceac1f
DC
810 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
811 mp->m_bsize, lock, &bp, ops);
812 if (error)
2bd0ea18 813 return error;
a2ceac1f
DC
814 if (bp)
815 xfs_buf_set_ref(bp, refval);
2bd0ea18
NS
816 *bpp = bp;
817 return 0;
818}
819
10851b18
DC
820/*
821 * Read-ahead the block, don't wait for it, don't return a buffer.
822 * Long-form addressing.
823 */
824/* ARGSUSED */
825void
826xfs_btree_reada_bufl(
827 struct xfs_mount *mp, /* file system mount point */
828 xfs_fsblock_t fsbno, /* file system block number */
829 xfs_extlen_t count, /* count of filesystem blocks */
830 const struct xfs_buf_ops *ops)
831{
832 xfs_daddr_t d;
833
834 ASSERT(fsbno != NULLFSBLOCK);
835 d = XFS_FSB_TO_DADDR(mp, fsbno);
836 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
837}
838
839/*
840 * Read-ahead the block, don't wait for it, don't return a buffer.
841 * Short-form addressing.
842 */
843/* ARGSUSED */
844void
845xfs_btree_reada_bufs(
846 struct xfs_mount *mp, /* file system mount point */
847 xfs_agnumber_t agno, /* allocation group number */
848 xfs_agblock_t agbno, /* allocation group block number */
849 xfs_extlen_t count, /* count of filesystem blocks */
850 const struct xfs_buf_ops *ops)
851{
852 xfs_daddr_t d;
853
854 ASSERT(agno != NULLAGNUMBER);
855 ASSERT(agbno != NULLAGBLOCK);
856 d = XFS_AGB_TO_DADDR(mp, agno, agbno);
857 xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops);
858}
859
b194c7d8
BN
860STATIC int
861xfs_btree_readahead_lblock(
862 struct xfs_btree_cur *cur,
863 int lr,
864 struct xfs_btree_block *block)
2bd0ea18 865{
2bd0ea18 866 int rval = 0;
5a35bf2c
DC
867 xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib);
868 xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib);
2bd0ea18 869
5a35bf2c 870 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) {
a2ceac1f
DC
871 xfs_btree_reada_bufl(cur->bc_mp, left, 1,
872 cur->bc_ops->buf_ops);
b194c7d8
BN
873 rval++;
874 }
875
5a35bf2c 876 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) {
a2ceac1f
DC
877 xfs_btree_reada_bufl(cur->bc_mp, right, 1,
878 cur->bc_ops->buf_ops);
b194c7d8 879 rval++;
2bd0ea18 880 }
b194c7d8 881
2bd0ea18
NS
882 return rval;
883}
884
b194c7d8
BN
885STATIC int
886xfs_btree_readahead_sblock(
887 struct xfs_btree_cur *cur,
888 int lr,
889 struct xfs_btree_block *block)
2bd0ea18 890{
b194c7d8
BN
891 int rval = 0;
892 xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib);
893 xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib);
2bd0ea18 894
b194c7d8
BN
895
896 if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
897 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
a2ceac1f 898 left, 1, cur->bc_ops->buf_ops);
b194c7d8
BN
899 rval++;
900 }
901
902 if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
903 xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
a2ceac1f 904 right, 1, cur->bc_ops->buf_ops);
b194c7d8
BN
905 rval++;
906 }
907
908 return rval;
909}
910
911/*
912 * Read-ahead btree blocks, at the given level.
913 * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA.
914 */
915STATIC int
916xfs_btree_readahead(
917 struct xfs_btree_cur *cur, /* btree cursor */
918 int lev, /* level in btree */
919 int lr) /* left/right bits */
920{
921 struct xfs_btree_block *block;
922
923 /*
924 * No readahead needed if we are at the root level and the
925 * btree root is stored in the inode.
926 */
927 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
928 (lev == cur->bc_nlevels - 1))
929 return 0;
930
931 if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev])
932 return 0;
933
934 cur->bc_ra[lev] |= lr;
935 block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]);
936
937 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
938 return xfs_btree_readahead_lblock(cur, lr, block);
939 return xfs_btree_readahead_sblock(cur, lr, block);
940}
941
9c6ebc42
DC
942STATIC xfs_daddr_t
943xfs_btree_ptr_to_daddr(
944 struct xfs_btree_cur *cur,
945 union xfs_btree_ptr *ptr)
946{
947 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
5a35bf2c 948 ASSERT(ptr->l != cpu_to_be64(NULLFSBLOCK));
9c6ebc42
DC
949
950 return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
951 } else {
952 ASSERT(cur->bc_private.a.agno != NULLAGNUMBER);
953 ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK));
954
955 return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
956 be32_to_cpu(ptr->s));
957 }
958}
959
960/*
961 * Readahead @count btree blocks at the given @ptr location.
962 *
963 * We don't need to care about long or short form btrees here as we have a
964 * method of converting the ptr directly to a daddr available to us.
965 */
966STATIC void
967xfs_btree_readahead_ptr(
968 struct xfs_btree_cur *cur,
969 union xfs_btree_ptr *ptr,
970 xfs_extlen_t count)
971{
972 xfs_buf_readahead(cur->bc_mp->m_ddev_targp,
973 xfs_btree_ptr_to_daddr(cur, ptr),
974 cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops);
975}
976
b194c7d8
BN
977/*
978 * Set the buffer for level "lev" in the cursor to bp, releasing
979 * any previous buffer.
980 */
56b2de80 981STATIC void
b194c7d8
BN
982xfs_btree_setbuf(
983 xfs_btree_cur_t *cur, /* btree cursor */
984 int lev, /* level in btree */
985 xfs_buf_t *bp) /* new buffer to set */
986{
b3563c19 987 struct xfs_btree_block *b; /* btree block */
b194c7d8 988
56b2de80
DC
989 if (cur->bc_bufs[lev])
990 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]);
2bd0ea18
NS
991 cur->bc_bufs[lev] = bp;
992 cur->bc_ra[lev] = 0;
56b2de80 993
2bd0ea18 994 b = XFS_BUF_TO_BLOCK(bp);
b194c7d8 995 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
5a35bf2c 996 if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK))
2bd0ea18 997 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
5a35bf2c 998 if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK))
2bd0ea18
NS
999 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
1000 } else {
a2ceac1f 1001 if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK))
2bd0ea18 1002 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
a2ceac1f 1003 if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
2bd0ea18
NS
1004 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
1005 }
1006}
b194c7d8
BN
1007
1008STATIC int
1009xfs_btree_ptr_is_null(
1010 struct xfs_btree_cur *cur,
1011 union xfs_btree_ptr *ptr)
1012{
1013 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
5a35bf2c 1014 return ptr->l == cpu_to_be64(NULLFSBLOCK);
b194c7d8 1015 else
a2ceac1f 1016 return ptr->s == cpu_to_be32(NULLAGBLOCK);
b194c7d8
BN
1017}
1018
1019STATIC void
1020xfs_btree_set_ptr_null(
1021 struct xfs_btree_cur *cur,
1022 union xfs_btree_ptr *ptr)
1023{
1024 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
5a35bf2c 1025 ptr->l = cpu_to_be64(NULLFSBLOCK);
b194c7d8
BN
1026 else
1027 ptr->s = cpu_to_be32(NULLAGBLOCK);
1028}
1029
1030/*
1031 * Get/set/init sibling pointers
1032 */
1033STATIC void
1034xfs_btree_get_sibling(
1035 struct xfs_btree_cur *cur,
1036 struct xfs_btree_block *block,
1037 union xfs_btree_ptr *ptr,
1038 int lr)
1039{
1040 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1041
1042 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1043 if (lr == XFS_BB_RIGHTSIB)
1044 ptr->l = block->bb_u.l.bb_rightsib;
1045 else
1046 ptr->l = block->bb_u.l.bb_leftsib;
1047 } else {
1048 if (lr == XFS_BB_RIGHTSIB)
1049 ptr->s = block->bb_u.s.bb_rightsib;
1050 else
1051 ptr->s = block->bb_u.s.bb_leftsib;
1052 }
1053}
1054
1055STATIC void
1056xfs_btree_set_sibling(
1057 struct xfs_btree_cur *cur,
1058 struct xfs_btree_block *block,
1059 union xfs_btree_ptr *ptr,
1060 int lr)
1061{
1062 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
1063
1064 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1065 if (lr == XFS_BB_RIGHTSIB)
1066 block->bb_u.l.bb_rightsib = ptr->l;
1067 else
1068 block->bb_u.l.bb_leftsib = ptr->l;
1069 } else {
1070 if (lr == XFS_BB_RIGHTSIB)
1071 block->bb_u.s.bb_rightsib = ptr->s;
1072 else
1073 block->bb_u.s.bb_leftsib = ptr->s;
1074 }
1075}
1076
5dfa5cd2
DC
1077void
1078xfs_btree_init_block_int(
1079 struct xfs_mount *mp,
1080 struct xfs_btree_block *buf,
1081 xfs_daddr_t blkno,
1082 __u32 magic,
1083 __u16 level,
1084 __u16 numrecs,
1085 __u64 owner,
1086 unsigned int flags)
1087{
1088 buf->bb_magic = cpu_to_be32(magic);
1089 buf->bb_level = cpu_to_be16(level);
1090 buf->bb_numrecs = cpu_to_be16(numrecs);
1091
1092 if (flags & XFS_BTREE_LONG_PTRS) {
5a35bf2c
DC
1093 buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK);
1094 buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK);
5dfa5cd2
DC
1095 if (flags & XFS_BTREE_CRC_BLOCKS) {
1096 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno);
1097 buf->bb_u.l.bb_owner = cpu_to_be64(owner);
9c4e12fb 1098 uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid);
5dfa5cd2 1099 buf->bb_u.l.bb_pad = 0;
6f9ea829 1100 buf->bb_u.l.bb_lsn = 0;
5dfa5cd2
DC
1101 }
1102 } else {
1103 /* owner is a 32 bit value on short blocks */
1104 __u32 __owner = (__u32)owner;
1105
1106 buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
1107 buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
1108 if (flags & XFS_BTREE_CRC_BLOCKS) {
1109 buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
1110 buf->bb_u.s.bb_owner = cpu_to_be32(__owner);
9c4e12fb 1111 uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid);
6f9ea829 1112 buf->bb_u.s.bb_lsn = 0;
5dfa5cd2
DC
1113 }
1114 }
1115}
1116
a2ceac1f 1117void
b194c7d8 1118xfs_btree_init_block(
a2ceac1f
DC
1119 struct xfs_mount *mp,
1120 struct xfs_buf *bp,
1121 __u32 magic,
1122 __u16 level,
1123 __u16 numrecs,
5dfa5cd2 1124 __u64 owner,
a2ceac1f 1125 unsigned int flags)
b194c7d8 1126{
5dfa5cd2
DC
1127 xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1128 magic, level, numrecs, owner, flags);
b194c7d8
BN
1129}
1130
a2ceac1f
DC
1131STATIC void
1132xfs_btree_init_block_cur(
1133 struct xfs_btree_cur *cur,
5dfa5cd2 1134 struct xfs_buf *bp,
a2ceac1f 1135 int level,
5dfa5cd2 1136 int numrecs)
a2ceac1f 1137{
5dfa5cd2
DC
1138 __u64 owner;
1139
1140 /*
1141 * we can pull the owner from the cursor right now as the different
1142 * owners align directly with the pointer size of the btree. This may
1143 * change in future, but is safe for current users of the generic btree
1144 * code.
1145 */
1146 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1147 owner = cur->bc_private.b.ip->i_ino;
1148 else
1149 owner = cur->bc_private.a.agno;
1150
1151 xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
1152 xfs_btree_magic(cur), level, numrecs,
1153 owner, cur->bc_flags);
a2ceac1f
DC
1154}
1155
b194c7d8
BN
1156/*
1157 * Return true if ptr is the last record in the btree and
5dfa5cd2 1158 * we need to track updates to this record. The decision
b194c7d8
BN
1159 * will be further refined in the update_lastrec method.
1160 */
1161STATIC int
1162xfs_btree_is_lastrec(
1163 struct xfs_btree_cur *cur,
1164 struct xfs_btree_block *block,
1165 int level)
1166{
1167 union xfs_btree_ptr ptr;
1168
1169 if (level > 0)
1170 return 0;
1171 if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE))
1172 return 0;
1173
1174 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1175 if (!xfs_btree_ptr_is_null(cur, &ptr))
1176 return 0;
1177 return 1;
1178}
1179
1180STATIC void
1181xfs_btree_buf_to_ptr(
1182 struct xfs_btree_cur *cur,
1183 struct xfs_buf *bp,
1184 union xfs_btree_ptr *ptr)
1185{
1186 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1187 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
1188 XFS_BUF_ADDR(bp)));
1189 else {
56b2de80 1190 ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
b194c7d8
BN
1191 XFS_BUF_ADDR(bp)));
1192 }
1193}
1194
b194c7d8
BN
1195STATIC void
1196xfs_btree_set_refs(
1197 struct xfs_btree_cur *cur,
1198 struct xfs_buf *bp)
1199{
1200 switch (cur->bc_btnum) {
1201 case XFS_BTNUM_BNO:
1202 case XFS_BTNUM_CNT:
a2ceac1f 1203 xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF);
b194c7d8
BN
1204 break;
1205 case XFS_BTNUM_INO:
c0a4c227 1206 case XFS_BTNUM_FINO:
a2ceac1f 1207 xfs_buf_set_ref(bp, XFS_INO_BTREE_REF);
b194c7d8
BN
1208 break;
1209 case XFS_BTNUM_BMAP:
a2ceac1f 1210 xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
b194c7d8 1211 break;
b3a96b46
DW
1212 case XFS_BTNUM_RMAP:
1213 xfs_buf_set_ref(bp, XFS_RMAP_BTREE_REF);
1214 break;
b194c7d8
BN
1215 default:
1216 ASSERT(0);
1217 }
1218}
1219
1220STATIC int
1221xfs_btree_get_buf_block(
1222 struct xfs_btree_cur *cur,
1223 union xfs_btree_ptr *ptr,
1224 int flags,
1225 struct xfs_btree_block **block,
1226 struct xfs_buf **bpp)
1227{
1228 struct xfs_mount *mp = cur->bc_mp;
1229 xfs_daddr_t d;
1230
1231 /* need to sort out how callers deal with failures first */
56b2de80 1232 ASSERT(!(flags & XBF_TRYLOCK));
b194c7d8
BN
1233
1234 d = xfs_btree_ptr_to_daddr(cur, ptr);
1235 *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
1236 mp->m_bsize, flags);
1237
a2ceac1f 1238 if (!*bpp)
12b53197 1239 return -ENOMEM;
b194c7d8 1240
a2ceac1f 1241 (*bpp)->b_ops = cur->bc_ops->buf_ops;
b194c7d8
BN
1242 *block = XFS_BUF_TO_BLOCK(*bpp);
1243 return 0;
1244}
1245
1246/*
1247 * Read in the buffer at the given ptr and return the buffer and
1248 * the block pointer within the buffer.
1249 */
1250STATIC int
1251xfs_btree_read_buf_block(
1252 struct xfs_btree_cur *cur,
1253 union xfs_btree_ptr *ptr,
b194c7d8
BN
1254 int flags,
1255 struct xfs_btree_block **block,
1256 struct xfs_buf **bpp)
1257{
1258 struct xfs_mount *mp = cur->bc_mp;
1259 xfs_daddr_t d;
1260 int error;
1261
1262 /* need to sort out how callers deal with failures first */
56b2de80 1263 ASSERT(!(flags & XBF_TRYLOCK));
b194c7d8
BN
1264
1265 d = xfs_btree_ptr_to_daddr(cur, ptr);
1266 error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
a2ceac1f
DC
1267 mp->m_bsize, flags, bpp,
1268 cur->bc_ops->buf_ops);
b194c7d8
BN
1269 if (error)
1270 return error;
1271
b194c7d8
BN
1272 xfs_btree_set_refs(cur, *bpp);
1273 *block = XFS_BUF_TO_BLOCK(*bpp);
a2ceac1f 1274 return 0;
b194c7d8
BN
1275}
1276
1277/*
1278 * Copy keys from one btree block to another.
1279 */
1280STATIC void
1281xfs_btree_copy_keys(
1282 struct xfs_btree_cur *cur,
1283 union xfs_btree_key *dst_key,
1284 union xfs_btree_key *src_key,
1285 int numkeys)
1286{
1287 ASSERT(numkeys >= 0);
1288 memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
1289}
1290
1291/*
1292 * Copy records from one btree block to another.
1293 */
1294STATIC void
1295xfs_btree_copy_recs(
1296 struct xfs_btree_cur *cur,
1297 union xfs_btree_rec *dst_rec,
1298 union xfs_btree_rec *src_rec,
1299 int numrecs)
1300{
1301 ASSERT(numrecs >= 0);
1302 memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
1303}
1304
1305/*
1306 * Copy block pointers from one btree block to another.
1307 */
1308STATIC void
1309xfs_btree_copy_ptrs(
1310 struct xfs_btree_cur *cur,
1311 union xfs_btree_ptr *dst_ptr,
1312 union xfs_btree_ptr *src_ptr,
1313 int numptrs)
1314{
1315 ASSERT(numptrs >= 0);
1316 memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur));
1317}
1318
1319/*
1320 * Shift keys one index left/right inside a single btree block.
1321 */
1322STATIC void
1323xfs_btree_shift_keys(
1324 struct xfs_btree_cur *cur,
1325 union xfs_btree_key *key,
1326 int dir,
1327 int numkeys)
1328{
1329 char *dst_key;
1330
1331 ASSERT(numkeys >= 0);
1332 ASSERT(dir == 1 || dir == -1);
1333
1334 dst_key = (char *)key + (dir * cur->bc_ops->key_len);
1335 memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
1336}
1337
1338/*
1339 * Shift records one index left/right inside a single btree block.
1340 */
1341STATIC void
1342xfs_btree_shift_recs(
1343 struct xfs_btree_cur *cur,
1344 union xfs_btree_rec *rec,
1345 int dir,
1346 int numrecs)
1347{
1348 char *dst_rec;
1349
1350 ASSERT(numrecs >= 0);
1351 ASSERT(dir == 1 || dir == -1);
1352
1353 dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
1354 memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
1355}
1356
1357/*
1358 * Shift block pointers one index left/right inside a single btree block.
1359 */
1360STATIC void
1361xfs_btree_shift_ptrs(
1362 struct xfs_btree_cur *cur,
1363 union xfs_btree_ptr *ptr,
1364 int dir,
1365 int numptrs)
1366{
1367 char *dst_ptr;
1368
1369 ASSERT(numptrs >= 0);
1370 ASSERT(dir == 1 || dir == -1);
1371
1372 dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur));
1373 memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur));
1374}
1375
1376/*
1377 * Log key values from the btree block.
1378 */
1379STATIC void
1380xfs_btree_log_keys(
1381 struct xfs_btree_cur *cur,
1382 struct xfs_buf *bp,
1383 int first,
1384 int last)
1385{
1386 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1387 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
1388
1389 if (bp) {
bdc16ee5 1390 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
b194c7d8
BN
1391 xfs_trans_log_buf(cur->bc_tp, bp,
1392 xfs_btree_key_offset(cur, first),
1393 xfs_btree_key_offset(cur, last + 1) - 1);
1394 } else {
1395 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1396 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1397 }
1398
1399 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1400}
1401
1402/*
1403 * Log record values from the btree block.
1404 */
1405void
1406xfs_btree_log_recs(
1407 struct xfs_btree_cur *cur,
1408 struct xfs_buf *bp,
1409 int first,
1410 int last)
1411{
1412 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1413 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
1414
bdc16ee5 1415 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
b194c7d8
BN
1416 xfs_trans_log_buf(cur->bc_tp, bp,
1417 xfs_btree_rec_offset(cur, first),
1418 xfs_btree_rec_offset(cur, last + 1) - 1);
1419
1420 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1421}
1422
1423/*
1424 * Log block pointer fields from a btree block (nonleaf).
1425 */
1426STATIC void
1427xfs_btree_log_ptrs(
1428 struct xfs_btree_cur *cur, /* btree cursor */
1429 struct xfs_buf *bp, /* buffer containing btree block */
1430 int first, /* index of first pointer to log */
1431 int last) /* index of last pointer to log */
1432{
1433 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1434 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last);
1435
1436 if (bp) {
1437 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
1438 int level = xfs_btree_get_level(block);
1439
bdc16ee5 1440 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
b194c7d8
BN
1441 xfs_trans_log_buf(cur->bc_tp, bp,
1442 xfs_btree_ptr_offset(cur, first, level),
1443 xfs_btree_ptr_offset(cur, last + 1, level) - 1);
1444 } else {
1445 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1446 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1447 }
1448
1449 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1450}
1451
1452/*
1453 * Log fields from a btree block header.
1454 */
1455void
1456xfs_btree_log_block(
1457 struct xfs_btree_cur *cur, /* btree cursor */
1458 struct xfs_buf *bp, /* buffer containing btree block */
1459 int fields) /* mask of fields: XFS_BB_... */
1460{
1461 int first; /* first byte offset logged */
1462 int last; /* last byte offset logged */
1463 static const short soffsets[] = { /* table of offsets (short) */
b3563c19
BN
1464 offsetof(struct xfs_btree_block, bb_magic),
1465 offsetof(struct xfs_btree_block, bb_level),
1466 offsetof(struct xfs_btree_block, bb_numrecs),
1467 offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
1468 offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
5dfa5cd2
DC
1469 offsetof(struct xfs_btree_block, bb_u.s.bb_blkno),
1470 offsetof(struct xfs_btree_block, bb_u.s.bb_lsn),
1471 offsetof(struct xfs_btree_block, bb_u.s.bb_uuid),
1472 offsetof(struct xfs_btree_block, bb_u.s.bb_owner),
1473 offsetof(struct xfs_btree_block, bb_u.s.bb_crc),
e0607266 1474 XFS_BTREE_SBLOCK_CRC_LEN
b194c7d8
BN
1475 };
1476 static const short loffsets[] = { /* table of offsets (long) */
b3563c19
BN
1477 offsetof(struct xfs_btree_block, bb_magic),
1478 offsetof(struct xfs_btree_block, bb_level),
1479 offsetof(struct xfs_btree_block, bb_numrecs),
1480 offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
1481 offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
5dfa5cd2
DC
1482 offsetof(struct xfs_btree_block, bb_u.l.bb_blkno),
1483 offsetof(struct xfs_btree_block, bb_u.l.bb_lsn),
1484 offsetof(struct xfs_btree_block, bb_u.l.bb_uuid),
1485 offsetof(struct xfs_btree_block, bb_u.l.bb_owner),
1486 offsetof(struct xfs_btree_block, bb_u.l.bb_crc),
1487 offsetof(struct xfs_btree_block, bb_u.l.bb_pad),
e0607266 1488 XFS_BTREE_LBLOCK_CRC_LEN
b194c7d8
BN
1489 };
1490
1491 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1492 XFS_BTREE_TRACE_ARGBI(cur, bp, fields);
1493
1494 if (bp) {
5dfa5cd2
DC
1495 int nbits;
1496
1497 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
1498 /*
1499 * We don't log the CRC when updating a btree
1500 * block but instead recreate it during log
1501 * recovery. As the log buffers have checksums
10851b18 1502 * of their own this is safe and avoids logging a crc
5dfa5cd2
DC
1503 * update in a lot of places.
1504 */
1505 if (fields == XFS_BB_ALL_BITS)
1506 fields = XFS_BB_ALL_BITS_CRC;
1507 nbits = XFS_BB_NUM_BITS_CRC;
1508 } else {
1509 nbits = XFS_BB_NUM_BITS;
1510 }
b194c7d8
BN
1511 xfs_btree_offsets(fields,
1512 (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
1513 loffsets : soffsets,
5dfa5cd2 1514 nbits, &first, &last);
bdc16ee5 1515 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
b194c7d8
BN
1516 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
1517 } else {
1518 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
1519 xfs_ilog_fbroot(cur->bc_private.b.whichfork));
1520 }
1521
1522 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1523}
1524
1525/*
1526 * Increment cursor by one record at the level.
1527 * For nonzero levels the leaf-ward information is untouched.
1528 */
1529int /* error */
1530xfs_btree_increment(
1531 struct xfs_btree_cur *cur,
1532 int level,
1533 int *stat) /* success/failure */
1534{
1535 struct xfs_btree_block *block;
1536 union xfs_btree_ptr ptr;
1537 struct xfs_buf *bp;
1538 int error; /* error return value */
1539 int lev;
1540
1541 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1542 XFS_BTREE_TRACE_ARGI(cur, level);
1543
1544 ASSERT(level < cur->bc_nlevels);
1545
1546 /* Read-ahead to the right at this level. */
1547 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
1548
1549 /* Get a pointer to the btree block. */
1550 block = xfs_btree_get_block(cur, level, &bp);
1551
1552#ifdef DEBUG
1553 error = xfs_btree_check_block(cur, block, level, bp);
1554 if (error)
1555 goto error0;
1556#endif
1557
1558 /* We're done if we remain in the block after the increment. */
1559 if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block))
1560 goto out1;
1561
1562 /* Fail if we just went off the right edge of the tree. */
1563 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1564 if (xfs_btree_ptr_is_null(cur, &ptr))
1565 goto out0;
1566
1567 XFS_BTREE_STATS_INC(cur, increment);
1568
1569 /*
1570 * March up the tree incrementing pointers.
1571 * Stop when we don't go off the right edge of a block.
1572 */
1573 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1574 block = xfs_btree_get_block(cur, lev, &bp);
1575
1576#ifdef DEBUG
1577 error = xfs_btree_check_block(cur, block, lev, bp);
1578 if (error)
1579 goto error0;
1580#endif
1581
1582 if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block))
1583 break;
1584
1585 /* Read-ahead the right block for the next loop. */
1586 xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
1587 }
1588
1589 /*
1590 * If we went off the root then we are either seriously
1591 * confused or have the tree root in an inode.
1592 */
1593 if (lev == cur->bc_nlevels) {
1594 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1595 goto out0;
1596 ASSERT(0);
12b53197 1597 error = -EFSCORRUPTED;
b194c7d8
BN
1598 goto error0;
1599 }
1600 ASSERT(lev < cur->bc_nlevels);
1601
1602 /*
1603 * Now walk back down the tree, fixing up the cursor's buffer
1604 * pointers and key numbers.
1605 */
1606 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1607 union xfs_btree_ptr *ptrp;
1608
1609 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
ff105f75
DC
1610 --lev;
1611 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
b194c7d8
BN
1612 if (error)
1613 goto error0;
1614
1615 xfs_btree_setbuf(cur, lev, bp);
1616 cur->bc_ptrs[lev] = 1;
1617 }
1618out1:
1619 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1620 *stat = 1;
1621 return 0;
1622
1623out0:
1624 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1625 *stat = 0;
1626 return 0;
1627
1628error0:
1629 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1630 return error;
1631}
1632
1633/*
1634 * Decrement cursor by one record at the level.
1635 * For nonzero levels the leaf-ward information is untouched.
1636 */
1637int /* error */
1638xfs_btree_decrement(
1639 struct xfs_btree_cur *cur,
1640 int level,
1641 int *stat) /* success/failure */
1642{
1643 struct xfs_btree_block *block;
1644 xfs_buf_t *bp;
1645 int error; /* error return value */
1646 int lev;
1647 union xfs_btree_ptr ptr;
1648
1649 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1650 XFS_BTREE_TRACE_ARGI(cur, level);
1651
1652 ASSERT(level < cur->bc_nlevels);
1653
1654 /* Read-ahead to the left at this level. */
1655 xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
1656
1657 /* We're done if we remain in the block after the decrement. */
1658 if (--cur->bc_ptrs[level] > 0)
1659 goto out1;
1660
1661 /* Get a pointer to the btree block. */
1662 block = xfs_btree_get_block(cur, level, &bp);
1663
1664#ifdef DEBUG
1665 error = xfs_btree_check_block(cur, block, level, bp);
1666 if (error)
1667 goto error0;
1668#endif
1669
1670 /* Fail if we just went off the left edge of the tree. */
1671 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
1672 if (xfs_btree_ptr_is_null(cur, &ptr))
1673 goto out0;
1674
1675 XFS_BTREE_STATS_INC(cur, decrement);
1676
1677 /*
1678 * March up the tree decrementing pointers.
1679 * Stop when we don't go off the left edge of a block.
1680 */
1681 for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
1682 if (--cur->bc_ptrs[lev] > 0)
1683 break;
1684 /* Read-ahead the left block for the next loop. */
1685 xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
1686 }
1687
1688 /*
1689 * If we went off the root then we are seriously confused.
1690 * or the root of the tree is in an inode.
1691 */
1692 if (lev == cur->bc_nlevels) {
1693 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
1694 goto out0;
1695 ASSERT(0);
12b53197 1696 error = -EFSCORRUPTED;
b194c7d8
BN
1697 goto error0;
1698 }
1699 ASSERT(lev < cur->bc_nlevels);
1700
1701 /*
1702 * Now walk back down the tree, fixing up the cursor's buffer
1703 * pointers and key numbers.
1704 */
1705 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
1706 union xfs_btree_ptr *ptrp;
1707
1708 ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block);
ff105f75
DC
1709 --lev;
1710 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
b194c7d8
BN
1711 if (error)
1712 goto error0;
1713 xfs_btree_setbuf(cur, lev, bp);
1714 cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block);
1715 }
1716out1:
1717 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1718 *stat = 1;
1719 return 0;
1720
1721out0:
1722 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1723 *stat = 0;
1724 return 0;
1725
1726error0:
1727 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1728 return error;
1729}
1730
1731STATIC int
1732xfs_btree_lookup_get_block(
1733 struct xfs_btree_cur *cur, /* btree cursor */
1734 int level, /* level in the btree */
1735 union xfs_btree_ptr *pp, /* ptr to btree block */
1736 struct xfs_btree_block **blkp) /* return btree block */
1737{
1738 struct xfs_buf *bp; /* buffer pointer for btree block */
1739 int error = 0;
1740
1741 /* special case the root block if in an inode */
1742 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
1743 (level == cur->bc_nlevels - 1)) {
1744 *blkp = xfs_btree_get_iroot(cur);
1745 return 0;
1746 }
1747
1748 /*
1749 * If the old buffer at this level for the disk address we are
1750 * looking for re-use it.
1751 *
1752 * Otherwise throw it away and get a new one.
1753 */
1754 bp = cur->bc_bufs[level];
1755 if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) {
1756 *blkp = XFS_BUF_TO_BLOCK(bp);
1757 return 0;
1758 }
1759
ff105f75 1760 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
b194c7d8
BN
1761 if (error)
1762 return error;
1763
1764 xfs_btree_setbuf(cur, level, bp);
1765 return 0;
1766}
1767
1768/*
1769 * Get current search key. For level 0 we don't actually have a key
1770 * structure so we make one up from the record. For all other levels
1771 * we just return the right key.
1772 */
1773STATIC union xfs_btree_key *
1774xfs_lookup_get_search_key(
1775 struct xfs_btree_cur *cur,
1776 int level,
1777 int keyno,
1778 struct xfs_btree_block *block,
1779 union xfs_btree_key *kp)
1780{
1781 if (level == 0) {
1782 cur->bc_ops->init_key_from_rec(kp,
1783 xfs_btree_rec_addr(cur, keyno, block));
1784 return kp;
1785 }
1786
1787 return xfs_btree_key_addr(cur, keyno, block);
1788}
1789
1790/*
1791 * Lookup the record. The cursor is made to point to it, based on dir.
10851b18 1792 * stat is set to 0 if can't find any such record, 1 for success.
b194c7d8
BN
1793 */
1794int /* error */
1795xfs_btree_lookup(
1796 struct xfs_btree_cur *cur, /* btree cursor */
1797 xfs_lookup_t dir, /* <=, ==, or >= */
1798 int *stat) /* success/failure */
1799{
1800 struct xfs_btree_block *block; /* current btree block */
1801 __int64_t diff; /* difference for the current key */
1802 int error; /* error return value */
1803 int keyno; /* current key number */
1804 int level; /* level in the btree */
1805 union xfs_btree_ptr *pp; /* ptr to btree block */
1806 union xfs_btree_ptr ptr; /* ptr to btree block */
1807
1808 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1809 XFS_BTREE_TRACE_ARGI(cur, dir);
1810
1811 XFS_BTREE_STATS_INC(cur, lookup);
1812
574b4153
DW
1813 /* No such thing as a zero-level tree. */
1814 if (cur->bc_nlevels == 0)
1815 return -EFSCORRUPTED;
1816
b194c7d8
BN
1817 block = NULL;
1818 keyno = 0;
1819
1820 /* initialise start pointer from cursor */
1821 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
1822 pp = &ptr;
1823
1824 /*
1825 * Iterate over each level in the btree, starting at the root.
1826 * For each level above the leaves, find the key we need, based
1827 * on the lookup record, then follow the corresponding block
1828 * pointer down to the next level.
1829 */
1830 for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) {
1831 /* Get the block we need to do the lookup on. */
1832 error = xfs_btree_lookup_get_block(cur, level, pp, &block);
1833 if (error)
1834 goto error0;
1835
1836 if (diff == 0) {
1837 /*
1838 * If we already had a key match at a higher level, we
1839 * know we need to use the first entry in this block.
1840 */
1841 keyno = 1;
1842 } else {
1843 /* Otherwise search this block. Do a binary search. */
1844
1845 int high; /* high entry number */
1846 int low; /* low entry number */
1847
1848 /* Set low and high entry numbers, 1-based. */
1849 low = 1;
1850 high = xfs_btree_get_numrecs(block);
1851 if (!high) {
1852 /* Block is empty, must be an empty leaf. */
1853 ASSERT(level == 0 && cur->bc_nlevels == 1);
1854
1855 cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE;
1856 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1857 *stat = 0;
1858 return 0;
1859 }
1860
1861 /* Binary search the block. */
1862 while (low <= high) {
1863 union xfs_btree_key key;
1864 union xfs_btree_key *kp;
1865
1866 XFS_BTREE_STATS_INC(cur, compare);
1867
1868 /* keyno is average of low and high. */
1869 keyno = (low + high) >> 1;
1870
1871 /* Get current search key */
1872 kp = xfs_lookup_get_search_key(cur, level,
1873 keyno, block, &key);
1874
1875 /*
1876 * Compute difference to get next direction:
1877 * - less than, move right
1878 * - greater than, move left
1879 * - equal, we're done
1880 */
1881 diff = cur->bc_ops->key_diff(cur, kp);
1882 if (diff < 0)
1883 low = keyno + 1;
1884 else if (diff > 0)
1885 high = keyno - 1;
1886 else
1887 break;
1888 }
1889 }
1890
1891 /*
1892 * If there are more levels, set up for the next level
1893 * by getting the block number and filling in the cursor.
1894 */
1895 if (level > 0) {
1896 /*
1897 * If we moved left, need the previous key number,
1898 * unless there isn't one.
1899 */
1900 if (diff > 0 && --keyno < 1)
1901 keyno = 1;
1902 pp = xfs_btree_ptr_addr(cur, keyno, block);
1903
1904#ifdef DEBUG
1905 error = xfs_btree_check_ptr(cur, pp, 0, level);
1906 if (error)
1907 goto error0;
1908#endif
1909 cur->bc_ptrs[level] = keyno;
1910 }
1911 }
1912
1913 /* Done with the search. See if we need to adjust the results. */
1914 if (dir != XFS_LOOKUP_LE && diff < 0) {
1915 keyno++;
1916 /*
1917 * If ge search and we went off the end of the block, but it's
1918 * not the last block, we're in the wrong block.
1919 */
1920 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
1921 if (dir == XFS_LOOKUP_GE &&
1922 keyno > xfs_btree_get_numrecs(block) &&
1923 !xfs_btree_ptr_is_null(cur, &ptr)) {
1924 int i;
1925
1926 cur->bc_ptrs[0] = keyno;
1927 error = xfs_btree_increment(cur, 0, &i);
1928 if (error)
1929 goto error0;
19ebedcf 1930 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
b194c7d8
BN
1931 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1932 *stat = 1;
1933 return 0;
1934 }
1935 } else if (dir == XFS_LOOKUP_LE && diff > 0)
1936 keyno--;
1937 cur->bc_ptrs[0] = keyno;
1938
1939 /* Return if we succeeded or not. */
1940 if (keyno == 0 || keyno > xfs_btree_get_numrecs(block))
1941 *stat = 0;
1942 else if (dir != XFS_LOOKUP_EQ || diff == 0)
1943 *stat = 1;
1944 else
1945 *stat = 0;
1946 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1947 return 0;
1948
1949error0:
1950 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1951 return error;
1952}
1953
13e831e0
DW
1954/* Find the high key storage area from a regular key. */
1955STATIC union xfs_btree_key *
1956xfs_btree_high_key_from_key(
1957 struct xfs_btree_cur *cur,
1958 union xfs_btree_key *key)
1959{
1960 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
1961 return (union xfs_btree_key *)((char *)key +
1962 (cur->bc_ops->key_len / 2));
1963}
1964
64dbe047
DW
1965/* Determine the low (and high if overlapped) keys of a leaf block */
1966STATIC void
1967xfs_btree_get_leaf_keys(
13e831e0
DW
1968 struct xfs_btree_cur *cur,
1969 struct xfs_btree_block *block,
1970 union xfs_btree_key *key)
1971{
13e831e0
DW
1972 union xfs_btree_key max_hkey;
1973 union xfs_btree_key hkey;
64dbe047 1974 union xfs_btree_rec *rec;
13e831e0 1975 union xfs_btree_key *high;
64dbe047 1976 int n;
13e831e0 1977
13e831e0
DW
1978 rec = xfs_btree_rec_addr(cur, 1, block);
1979 cur->bc_ops->init_key_from_rec(key, rec);
1980
64dbe047
DW
1981 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
1982
1983 cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
1984 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
1985 rec = xfs_btree_rec_addr(cur, n, block);
1986 cur->bc_ops->init_high_key_from_rec(&hkey, rec);
1987 if (cur->bc_ops->diff_two_keys(cur, &hkey, &max_hkey)
1988 > 0)
1989 max_hkey = hkey;
1990 }
13e831e0 1991
64dbe047
DW
1992 high = xfs_btree_high_key_from_key(cur, key);
1993 memcpy(high, &max_hkey, cur->bc_ops->key_len / 2);
1994 }
13e831e0
DW
1995}
1996
64dbe047
DW
1997/* Determine the low (and high if overlapped) keys of a node block */
1998STATIC void
1999xfs_btree_get_node_keys(
13e831e0
DW
2000 struct xfs_btree_cur *cur,
2001 struct xfs_btree_block *block,
2002 union xfs_btree_key *key)
2003{
13e831e0
DW
2004 union xfs_btree_key *hkey;
2005 union xfs_btree_key *max_hkey;
2006 union xfs_btree_key *high;
64dbe047 2007 int n;
13e831e0 2008
64dbe047
DW
2009 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2010 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2011 cur->bc_ops->key_len / 2);
2012
2013 max_hkey = xfs_btree_high_key_addr(cur, 1, block);
2014 for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
2015 hkey = xfs_btree_high_key_addr(cur, n, block);
2016 if (cur->bc_ops->diff_two_keys(cur, hkey, max_hkey) > 0)
2017 max_hkey = hkey;
2018 }
13e831e0 2019
64dbe047
DW
2020 high = xfs_btree_high_key_from_key(cur, key);
2021 memcpy(high, max_hkey, cur->bc_ops->key_len / 2);
2022 } else {
2023 memcpy(key, xfs_btree_key_addr(cur, 1, block),
2024 cur->bc_ops->key_len);
13e831e0 2025 }
13e831e0
DW
2026}
2027
a3c9cb10
DW
2028/* Derive the keys for any btree block. */
2029STATIC void
2030xfs_btree_get_keys(
2031 struct xfs_btree_cur *cur,
2032 struct xfs_btree_block *block,
2033 union xfs_btree_key *key)
2034{
2035 if (be16_to_cpu(block->bb_level) == 0)
64dbe047 2036 xfs_btree_get_leaf_keys(cur, block, key);
a3c9cb10 2037 else
64dbe047 2038 xfs_btree_get_node_keys(cur, block, key);
a3c9cb10
DW
2039}
2040
b194c7d8 2041/*
a3c9cb10
DW
2042 * Decide if we need to update the parent keys of a btree block. For
2043 * a standard btree this is only necessary if we're updating the first
13e831e0
DW
2044 * record/key. For an overlapping btree, we must always update the
2045 * keys because the highest key can be in any of the records or keys
2046 * in the block.
b194c7d8 2047 */
a3c9cb10
DW
2048static inline bool
2049xfs_btree_needs_key_update(
2050 struct xfs_btree_cur *cur,
2051 int ptr)
2052{
13e831e0
DW
2053 return (cur->bc_flags & XFS_BTREE_OVERLAPPING) || ptr == 1;
2054}
2055
2056/*
2057 * Update the low and high parent keys of the given level, progressing
2058 * towards the root. If force_all is false, stop if the keys for a given
2059 * level do not need updating.
2060 */
2061STATIC int
2062__xfs_btree_updkeys(
2063 struct xfs_btree_cur *cur,
2064 int level,
2065 struct xfs_btree_block *block,
2066 struct xfs_buf *bp0,
2067 bool force_all)
2068{
45413937 2069 union xfs_btree_key key; /* keys from current level */
13e831e0
DW
2070 union xfs_btree_key *lkey; /* keys from the next level up */
2071 union xfs_btree_key *hkey;
2072 union xfs_btree_key *nlkey; /* keys from the next level up */
2073 union xfs_btree_key *nhkey;
2074 struct xfs_buf *bp;
2075 int ptr;
2076
2077 ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
2078
2079 /* Exit if there aren't any parent levels to update. */
2080 if (level + 1 >= cur->bc_nlevels)
2081 return 0;
2082
2083 trace_xfs_btree_updkeys(cur, level, bp0);
2084
45413937 2085 lkey = &key;
13e831e0
DW
2086 hkey = xfs_btree_high_key_from_key(cur, lkey);
2087 xfs_btree_get_keys(cur, block, lkey);
2088 for (level++; level < cur->bc_nlevels; level++) {
2089#ifdef DEBUG
2090 int error;
2091#endif
2092 block = xfs_btree_get_block(cur, level, &bp);
2093 trace_xfs_btree_updkeys(cur, level, bp);
2094#ifdef DEBUG
2095 error = xfs_btree_check_block(cur, block, level, bp);
2096 if (error) {
2097 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2098 return error;
2099 }
2100#endif
2101 ptr = cur->bc_ptrs[level];
2102 nlkey = xfs_btree_key_addr(cur, ptr, block);
2103 nhkey = xfs_btree_high_key_addr(cur, ptr, block);
2104 if (!force_all &&
2105 !(cur->bc_ops->diff_two_keys(cur, nlkey, lkey) != 0 ||
2106 cur->bc_ops->diff_two_keys(cur, nhkey, hkey) != 0))
2107 break;
2108 xfs_btree_copy_keys(cur, nlkey, lkey, 1);
2109 xfs_btree_log_keys(cur, bp, ptr, ptr);
2110 if (level + 1 >= cur->bc_nlevels)
2111 break;
64dbe047 2112 xfs_btree_get_node_keys(cur, block, lkey);
13e831e0
DW
2113 }
2114
2115 return 0;
2116}
2117
13e831e0
DW
2118/* Update all the keys from some level in cursor back to the root. */
2119STATIC int
2120xfs_btree_updkeys_force(
2121 struct xfs_btree_cur *cur,
2122 int level)
2123{
2124 struct xfs_buf *bp;
2125 struct xfs_btree_block *block;
2126
2127 block = xfs_btree_get_block(cur, level, &bp);
2128 return __xfs_btree_updkeys(cur, level, block, bp, true);
a3c9cb10
DW
2129}
2130
2131/*
2132 * Update the parent keys of the given level, progressing towards the root.
2133 */
64dbe047 2134STATIC int
a3c9cb10 2135xfs_btree_update_keys(
b194c7d8 2136 struct xfs_btree_cur *cur,
b194c7d8
BN
2137 int level)
2138{
2139 struct xfs_btree_block *block;
2140 struct xfs_buf *bp;
2141 union xfs_btree_key *kp;
a3c9cb10 2142 union xfs_btree_key key;
b194c7d8
BN
2143 int ptr;
2144
64dbe047
DW
2145 ASSERT(level >= 0);
2146
2147 block = xfs_btree_get_block(cur, level, &bp);
2148 if (cur->bc_flags & XFS_BTREE_OVERLAPPING)
2149 return __xfs_btree_updkeys(cur, level, block, bp, false);
13e831e0 2150
b194c7d8
BN
2151 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2152 XFS_BTREE_TRACE_ARGIK(cur, level, keyp);
2153
b194c7d8
BN
2154 /*
2155 * Go up the tree from this level toward the root.
2156 * At each level, update the key value to the value input.
2157 * Stop when we reach a level where the cursor isn't pointing
2158 * at the first entry in the block.
2159 */
a3c9cb10
DW
2160 xfs_btree_get_keys(cur, block, &key);
2161 for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
b194c7d8
BN
2162#ifdef DEBUG
2163 int error;
2164#endif
2165 block = xfs_btree_get_block(cur, level, &bp);
2166#ifdef DEBUG
2167 error = xfs_btree_check_block(cur, block, level, bp);
2168 if (error) {
2169 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2170 return error;
2171 }
2172#endif
2173 ptr = cur->bc_ptrs[level];
2174 kp = xfs_btree_key_addr(cur, ptr, block);
a3c9cb10 2175 xfs_btree_copy_keys(cur, kp, &key, 1);
b194c7d8
BN
2176 xfs_btree_log_keys(cur, bp, ptr, ptr);
2177 }
2178
2179 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2180 return 0;
2181}
2182
2183/*
2184 * Update the record referred to by cur to the value in the
2185 * given record. This either works (return 0) or gets an
2186 * EFSCORRUPTED error.
2187 */
2188int
2189xfs_btree_update(
2190 struct xfs_btree_cur *cur,
2191 union xfs_btree_rec *rec)
2192{
2193 struct xfs_btree_block *block;
2194 struct xfs_buf *bp;
2195 int error;
2196 int ptr;
2197 union xfs_btree_rec *rp;
2198
2199 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2200 XFS_BTREE_TRACE_ARGR(cur, rec);
2201
2202 /* Pick up the current block. */
2203 block = xfs_btree_get_block(cur, 0, &bp);
2204
2205#ifdef DEBUG
2206 error = xfs_btree_check_block(cur, block, 0, bp);
2207 if (error)
2208 goto error0;
2209#endif
2210 /* Get the address of the rec to be updated. */
2211 ptr = cur->bc_ptrs[0];
2212 rp = xfs_btree_rec_addr(cur, ptr, block);
2213
2214 /* Fill in the new contents and log them. */
2215 xfs_btree_copy_recs(cur, rp, rec, 1);
2216 xfs_btree_log_recs(cur, bp, ptr, ptr);
2217
2218 /*
2219 * If we are tracking the last record in the tree and
2220 * we are at the far right edge of the tree, update it.
2221 */
2222 if (xfs_btree_is_lastrec(cur, block, 0)) {
2223 cur->bc_ops->update_lastrec(cur, block, rec,
2224 ptr, LASTREC_UPDATE);
2225 }
2226
13e831e0 2227 /* Pass new key value up to our parent. */
a3c9cb10 2228 if (xfs_btree_needs_key_update(cur, ptr)) {
64dbe047 2229 error = xfs_btree_update_keys(cur, 0);
b194c7d8
BN
2230 if (error)
2231 goto error0;
2232 }
2233
2234 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2235 return 0;
2236
2237error0:
2238 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2239 return error;
2240}
2241
2242/*
2243 * Move 1 record left from cur/level if possible.
2244 * Update cur to reflect the new path.
2245 */
2246STATIC int /* error */
2247xfs_btree_lshift(
2248 struct xfs_btree_cur *cur,
2249 int level,
2250 int *stat) /* success/failure */
2251{
b194c7d8
BN
2252 struct xfs_buf *lbp; /* left buffer pointer */
2253 struct xfs_btree_block *left; /* left btree block */
2254 int lrecs; /* left record count */
2255 struct xfs_buf *rbp; /* right buffer pointer */
2256 struct xfs_btree_block *right; /* right btree block */
13e831e0 2257 struct xfs_btree_cur *tcur; /* temporary btree cursor */
b194c7d8
BN
2258 int rrecs; /* right record count */
2259 union xfs_btree_ptr lptr; /* left btree pointer */
2260 union xfs_btree_key *rkp = NULL; /* right btree key */
2261 union xfs_btree_ptr *rpp = NULL; /* right address pointer */
2262 union xfs_btree_rec *rrp = NULL; /* right record pointer */
2263 int error; /* error return value */
13e831e0 2264 int i;
b194c7d8
BN
2265
2266 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2267 XFS_BTREE_TRACE_ARGI(cur, level);
2268
2269 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2270 level == cur->bc_nlevels - 1)
2271 goto out0;
2272
2273 /* Set up variables for this block as "right". */
2274 right = xfs_btree_get_block(cur, level, &rbp);
2275
2276#ifdef DEBUG
2277 error = xfs_btree_check_block(cur, right, level, rbp);
2278 if (error)
2279 goto error0;
2280#endif
2281
2282 /* If we've got no left sibling then we can't shift an entry left. */
2283 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2284 if (xfs_btree_ptr_is_null(cur, &lptr))
2285 goto out0;
2286
2287 /*
2288 * If the cursor entry is the one that would be moved, don't
2289 * do it... it's too complicated.
2290 */
2291 if (cur->bc_ptrs[level] <= 1)
2292 goto out0;
2293
2294 /* Set up the left neighbor as "left". */
ff105f75 2295 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
b194c7d8
BN
2296 if (error)
2297 goto error0;
2298
2299 /* If it's full, it can't take another entry. */
2300 lrecs = xfs_btree_get_numrecs(left);
2301 if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
2302 goto out0;
2303
2304 rrecs = xfs_btree_get_numrecs(right);
2305
2306 /*
2307 * We add one entry to the left side and remove one for the right side.
56b2de80 2308 * Account for it here, the changes will be updated on disk and logged
b194c7d8
BN
2309 * later.
2310 */
2311 lrecs++;
2312 rrecs--;
2313
2314 XFS_BTREE_STATS_INC(cur, lshift);
2315 XFS_BTREE_STATS_ADD(cur, moves, 1);
2316
2317 /*
2318 * If non-leaf, copy a key and a ptr to the left block.
2319 * Log the changes to the left block.
2320 */
2321 if (level > 0) {
2322 /* It's a non-leaf. Move keys and pointers. */
2323 union xfs_btree_key *lkp; /* left btree key */
2324 union xfs_btree_ptr *lpp; /* left address pointer */
2325
2326 lkp = xfs_btree_key_addr(cur, lrecs, left);
2327 rkp = xfs_btree_key_addr(cur, 1, right);
2328
2329 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2330 rpp = xfs_btree_ptr_addr(cur, 1, right);
2331#ifdef DEBUG
2332 error = xfs_btree_check_ptr(cur, rpp, 0, level);
2333 if (error)
2334 goto error0;
2335#endif
2336 xfs_btree_copy_keys(cur, lkp, rkp, 1);
2337 xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
2338
2339 xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
2340 xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
2341
2342 ASSERT(cur->bc_ops->keys_inorder(cur,
2343 xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
2344 } else {
2345 /* It's a leaf. Move records. */
2346 union xfs_btree_rec *lrp; /* left record pointer */
2347
2348 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2349 rrp = xfs_btree_rec_addr(cur, 1, right);
2350
2351 xfs_btree_copy_recs(cur, lrp, rrp, 1);
2352 xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
2353
2354 ASSERT(cur->bc_ops->recs_inorder(cur,
2355 xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
2356 }
2357
2358 xfs_btree_set_numrecs(left, lrecs);
2359 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2360
2361 xfs_btree_set_numrecs(right, rrecs);
2362 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2363
2364 /*
2365 * Slide the contents of right down one entry.
2366 */
2367 XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
2368 if (level > 0) {
2369 /* It's a nonleaf. operate on keys and ptrs */
2370#ifdef DEBUG
2371 int i; /* loop index */
2372
2373 for (i = 0; i < rrecs; i++) {
2374 error = xfs_btree_check_ptr(cur, rpp, i + 1, level);
2375 if (error)
2376 goto error0;
2377 }
2378#endif
2379 xfs_btree_shift_keys(cur,
2380 xfs_btree_key_addr(cur, 2, right),
2381 -1, rrecs);
2382 xfs_btree_shift_ptrs(cur,
2383 xfs_btree_ptr_addr(cur, 2, right),
2384 -1, rrecs);
2385
2386 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2387 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2388 } else {
2389 /* It's a leaf. operate on records */
2390 xfs_btree_shift_recs(cur,
2391 xfs_btree_rec_addr(cur, 2, right),
2392 -1, rrecs);
2393 xfs_btree_log_recs(cur, rbp, 1, rrecs);
b194c7d8
BN
2394 }
2395
13e831e0
DW
2396 /*
2397 * Using a temporary cursor, update the parent key values of the
2398 * block on the left.
2399 */
e6358021
DW
2400 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
2401 error = xfs_btree_dup_cursor(cur, &tcur);
2402 if (error)
2403 goto error0;
2404 i = xfs_btree_firstrec(tcur, level);
2405 XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
13e831e0 2406
e6358021
DW
2407 error = xfs_btree_decrement(tcur, level, &i);
2408 if (error)
2409 goto error1;
13e831e0 2410
e6358021 2411 /* Update the parent high keys of the left block, if needed. */
64dbe047 2412 error = xfs_btree_update_keys(tcur, level);
13e831e0
DW
2413 if (error)
2414 goto error1;
e6358021
DW
2415
2416 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
13e831e0
DW
2417 }
2418
e6358021
DW
2419 /* Update the parent keys of the right block. */
2420 error = xfs_btree_update_keys(cur, level);
2421 if (error)
2422 goto error0;
b194c7d8
BN
2423
2424 /* Slide the cursor value left one. */
2425 cur->bc_ptrs[level]--;
2426
2427 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2428 *stat = 1;
2429 return 0;
2430
2431out0:
2432 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2433 *stat = 0;
2434 return 0;
2435
2436error0:
2437 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2438 return error;
13e831e0
DW
2439
2440error1:
2441 XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
2442 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2443 return error;
b194c7d8
BN
2444}
2445
2446/*
2447 * Move 1 record right from cur/level if possible.
2448 * Update cur to reflect the new path.
2449 */
2450STATIC int /* error */
2451xfs_btree_rshift(
2452 struct xfs_btree_cur *cur,
2453 int level,
2454 int *stat) /* success/failure */
2455{
b194c7d8
BN
2456 struct xfs_buf *lbp; /* left buffer pointer */
2457 struct xfs_btree_block *left; /* left btree block */
2458 struct xfs_buf *rbp; /* right buffer pointer */
2459 struct xfs_btree_block *right; /* right btree block */
2460 struct xfs_btree_cur *tcur; /* temporary btree cursor */
2461 union xfs_btree_ptr rptr; /* right block pointer */
2462 union xfs_btree_key *rkp; /* right btree key */
2463 int rrecs; /* right record count */
2464 int lrecs; /* left record count */
2465 int error; /* error return value */
2466 int i; /* loop counter */
2467
2468 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2469 XFS_BTREE_TRACE_ARGI(cur, level);
2470
2471 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2472 (level == cur->bc_nlevels - 1))
2473 goto out0;
2474
2475 /* Set up variables for this block as "left". */
2476 left = xfs_btree_get_block(cur, level, &lbp);
2477
2478#ifdef DEBUG
2479 error = xfs_btree_check_block(cur, left, level, lbp);
2480 if (error)
2481 goto error0;
2482#endif
2483
2484 /* If we've got no right sibling then we can't shift an entry right. */
2485 xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2486 if (xfs_btree_ptr_is_null(cur, &rptr))
2487 goto out0;
2488
2489 /*
2490 * If the cursor entry is the one that would be moved, don't
2491 * do it... it's too complicated.
2492 */
2493 lrecs = xfs_btree_get_numrecs(left);
2494 if (cur->bc_ptrs[level] >= lrecs)
2495 goto out0;
2496
2497 /* Set up the right neighbor as "right". */
ff105f75 2498 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
b194c7d8
BN
2499 if (error)
2500 goto error0;
2501
2502 /* If it's full, it can't take another entry. */
2503 rrecs = xfs_btree_get_numrecs(right);
2504 if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
2505 goto out0;
2506
2507 XFS_BTREE_STATS_INC(cur, rshift);
2508 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2509
2510 /*
2511 * Make a hole at the start of the right neighbor block, then
2512 * copy the last left block entry to the hole.
2513 */
2514 if (level > 0) {
2515 /* It's a nonleaf. make a hole in the keys and ptrs */
2516 union xfs_btree_key *lkp;
2517 union xfs_btree_ptr *lpp;
2518 union xfs_btree_ptr *rpp;
2519
2520 lkp = xfs_btree_key_addr(cur, lrecs, left);
2521 lpp = xfs_btree_ptr_addr(cur, lrecs, left);
2522 rkp = xfs_btree_key_addr(cur, 1, right);
2523 rpp = xfs_btree_ptr_addr(cur, 1, right);
2524
2525#ifdef DEBUG
2526 for (i = rrecs - 1; i >= 0; i--) {
2527 error = xfs_btree_check_ptr(cur, rpp, i, level);
2528 if (error)
2529 goto error0;
2530 }
2531#endif
2532
2533 xfs_btree_shift_keys(cur, rkp, 1, rrecs);
2534 xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
2535
2536#ifdef DEBUG
2537 error = xfs_btree_check_ptr(cur, lpp, 0, level);
2538 if (error)
2539 goto error0;
2540#endif
2541
2542 /* Now put the new data in, and log it. */
2543 xfs_btree_copy_keys(cur, rkp, lkp, 1);
2544 xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
2545
2546 xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
2547 xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
2548
2549 ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
2550 xfs_btree_key_addr(cur, 2, right)));
2551 } else {
2552 /* It's a leaf. make a hole in the records */
2553 union xfs_btree_rec *lrp;
2554 union xfs_btree_rec *rrp;
2555
2556 lrp = xfs_btree_rec_addr(cur, lrecs, left);
2557 rrp = xfs_btree_rec_addr(cur, 1, right);
2558
2559 xfs_btree_shift_recs(cur, rrp, 1, rrecs);
2560
2561 /* Now put the new data in, and log it. */
2562 xfs_btree_copy_recs(cur, rrp, lrp, 1);
2563 xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
b194c7d8
BN
2564 }
2565
2566 /*
2567 * Decrement and log left's numrecs, bump and log right's numrecs.
2568 */
2569 xfs_btree_set_numrecs(left, --lrecs);
2570 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
2571
2572 xfs_btree_set_numrecs(right, ++rrecs);
2573 xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
2574
2575 /*
2576 * Using a temporary cursor, update the parent key values of the
2577 * block on the right.
2578 */
2579 error = xfs_btree_dup_cursor(cur, &tcur);
2580 if (error)
2581 goto error0;
2582 i = xfs_btree_lastrec(tcur, level);
e6358021 2583 XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
b194c7d8
BN
2584
2585 error = xfs_btree_increment(tcur, level, &i);
2586 if (error)
2587 goto error1;
2588
13e831e0
DW
2589 /* Update the parent high keys of the left block, if needed. */
2590 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
64dbe047 2591 error = xfs_btree_update_keys(cur, level);
13e831e0
DW
2592 if (error)
2593 goto error1;
2594 }
2595
a3c9cb10 2596 /* Update the parent keys of the right block. */
64dbe047 2597 error = xfs_btree_update_keys(tcur, level);
b194c7d8
BN
2598 if (error)
2599 goto error1;
2600
2601 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
2602
2603 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2604 *stat = 1;
2605 return 0;
2606
2607out0:
2608 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2609 *stat = 0;
2610 return 0;
2611
2612error0:
2613 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2614 return error;
2615
2616error1:
2617 XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
2618 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2619 return error;
2620}
2621
2622/*
2623 * Split cur/level block in half.
2624 * Return new block number and the key to its first
2625 * record (to be inserted into parent).
2626 */
2627STATIC int /* error */
ff105f75 2628__xfs_btree_split(
b194c7d8
BN
2629 struct xfs_btree_cur *cur,
2630 int level,
2631 union xfs_btree_ptr *ptrp,
2632 union xfs_btree_key *key,
2633 struct xfs_btree_cur **curp,
2634 int *stat) /* success/failure */
2635{
2636 union xfs_btree_ptr lptr; /* left sibling block ptr */
2637 struct xfs_buf *lbp; /* left buffer pointer */
2638 struct xfs_btree_block *left; /* left btree block */
2639 union xfs_btree_ptr rptr; /* right sibling block ptr */
2640 struct xfs_buf *rbp; /* right buffer pointer */
2641 struct xfs_btree_block *right; /* right btree block */
2642 union xfs_btree_ptr rrptr; /* right-right sibling ptr */
2643 struct xfs_buf *rrbp; /* right-right buffer pointer */
2644 struct xfs_btree_block *rrblock; /* right-right btree block */
2645 int lrecs;
2646 int rrecs;
2647 int src_index;
2648 int error; /* error return value */
2649#ifdef DEBUG
2650 int i;
2651#endif
2652
2653 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2654 XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key);
2655
2656 XFS_BTREE_STATS_INC(cur, split);
2657
2658 /* Set up left block (current one). */
2659 left = xfs_btree_get_block(cur, level, &lbp);
2660
2661#ifdef DEBUG
2662 error = xfs_btree_check_block(cur, left, level, lbp);
2663 if (error)
2664 goto error0;
2665#endif
2666
2667 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
2668
2669 /* Allocate the new block. If we can't do it, we're toast. Give up. */
ff105f75 2670 error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat);
b194c7d8
BN
2671 if (error)
2672 goto error0;
2673 if (*stat == 0)
2674 goto out0;
2675 XFS_BTREE_STATS_INC(cur, alloc);
2676
2677 /* Set up the new block as "right". */
2678 error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp);
2679 if (error)
2680 goto error0;
2681
2682 /* Fill in the btree header for the new right block. */
5dfa5cd2 2683 xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
b194c7d8
BN
2684
2685 /*
2686 * Split the entries between the old and the new block evenly.
2687 * Make sure that if there's an odd number of entries now, that
2688 * each new block will have the same number of entries.
2689 */
2690 lrecs = xfs_btree_get_numrecs(left);
2691 rrecs = lrecs / 2;
2692 if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1)
2693 rrecs++;
2694 src_index = (lrecs - rrecs + 1);
2695
2696 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2697
a3c9cb10
DW
2698 /* Adjust numrecs for the later get_*_keys() calls. */
2699 lrecs -= rrecs;
2700 xfs_btree_set_numrecs(left, lrecs);
2701 xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
2702
b194c7d8
BN
2703 /*
2704 * Copy btree block entries from the left block over to the
2705 * new block, the right. Update the right block and log the
2706 * changes.
2707 */
2708 if (level > 0) {
2709 /* It's a non-leaf. Move keys and pointers. */
2710 union xfs_btree_key *lkp; /* left btree key */
2711 union xfs_btree_ptr *lpp; /* left address pointer */
2712 union xfs_btree_key *rkp; /* right btree key */
2713 union xfs_btree_ptr *rpp; /* right address pointer */
2714
2715 lkp = xfs_btree_key_addr(cur, src_index, left);
2716 lpp = xfs_btree_ptr_addr(cur, src_index, left);
2717 rkp = xfs_btree_key_addr(cur, 1, right);
2718 rpp = xfs_btree_ptr_addr(cur, 1, right);
2719
2720#ifdef DEBUG
2721 for (i = src_index; i < rrecs; i++) {
2722 error = xfs_btree_check_ptr(cur, lpp, i, level);
2723 if (error)
2724 goto error0;
2725 }
2726#endif
2727
a3c9cb10 2728 /* Copy the keys & pointers to the new block. */
b194c7d8
BN
2729 xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
2730 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
2731
2732 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2733 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2734
a3c9cb10 2735 /* Stash the keys of the new block for later insertion. */
64dbe047 2736 xfs_btree_get_node_keys(cur, right, key);
b194c7d8
BN
2737 } else {
2738 /* It's a leaf. Move records. */
2739 union xfs_btree_rec *lrp; /* left record pointer */
2740 union xfs_btree_rec *rrp; /* right record pointer */
2741
2742 lrp = xfs_btree_rec_addr(cur, src_index, left);
2743 rrp = xfs_btree_rec_addr(cur, 1, right);
2744
a3c9cb10 2745 /* Copy records to the new block. */
b194c7d8
BN
2746 xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
2747 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2748
a3c9cb10 2749 /* Stash the keys of the new block for later insertion. */
64dbe047 2750 xfs_btree_get_leaf_keys(cur, right, key);
b194c7d8
BN
2751 }
2752
b194c7d8
BN
2753 /*
2754 * Find the left block number by looking in the buffer.
a3c9cb10 2755 * Adjust sibling pointers.
b194c7d8
BN
2756 */
2757 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
2758 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
2759 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2760 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2761
b194c7d8
BN
2762 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
2763 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
2764
2765 /*
2766 * If there's a block to the new block's right, make that block
2767 * point back to right instead of to left.
2768 */
2769 if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
ff105f75 2770 error = xfs_btree_read_buf_block(cur, &rrptr,
b194c7d8
BN
2771 0, &rrblock, &rrbp);
2772 if (error)
2773 goto error0;
2774 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
2775 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
2776 }
13e831e0
DW
2777
2778 /* Update the parent high keys of the left block, if needed. */
2779 if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
64dbe047 2780 error = xfs_btree_update_keys(cur, level);
13e831e0
DW
2781 if (error)
2782 goto error0;
2783 }
2784
b194c7d8
BN
2785 /*
2786 * If the cursor is really in the right block, move it there.
2787 * If it's just pointing past the last entry in left, then we'll
2788 * insert there, so don't change anything in that case.
2789 */
2790 if (cur->bc_ptrs[level] > lrecs + 1) {
2791 xfs_btree_setbuf(cur, level, rbp);
2792 cur->bc_ptrs[level] -= lrecs;
2793 }
2794 /*
2795 * If there are more levels, we'll need another cursor which refers
2796 * the right block, no matter where this cursor was.
2797 */
2798 if (level + 1 < cur->bc_nlevels) {
2799 error = xfs_btree_dup_cursor(cur, curp);
2800 if (error)
2801 goto error0;
2802 (*curp)->bc_ptrs[level + 1]++;
2803 }
2804 *ptrp = rptr;
2805 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2806 *stat = 1;
2807 return 0;
2808out0:
2809 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2810 *stat = 0;
2811 return 0;
2812
2813error0:
2814 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2815 return error;
2816}
2817
19ebedcf 2818#ifdef __KERNEL__
ff105f75
DC
2819struct xfs_btree_split_args {
2820 struct xfs_btree_cur *cur;
2821 int level;
2822 union xfs_btree_ptr *ptrp;
2823 union xfs_btree_key *key;
2824 struct xfs_btree_cur **curp;
2825 int *stat; /* success/failure */
2826 int result;
2827 bool kswapd; /* allocation in kswapd context */
2828 struct completion *done;
2829 struct work_struct work;
2830};
2831
2832/*
2833 * Stack switching interfaces for allocation
2834 */
2835static void
2836xfs_btree_split_worker(
2837 struct work_struct *work)
2838{
2839 struct xfs_btree_split_args *args = container_of(work,
2840 struct xfs_btree_split_args, work);
2841 unsigned long pflags;
2842 unsigned long new_pflags = PF_FSTRANS;
2843
2844 /*
2845 * we are in a transaction context here, but may also be doing work
2846 * in kswapd context, and hence we may need to inherit that state
2847 * temporarily to ensure that we don't block waiting for memory reclaim
2848 * in any way.
2849 */
2850 if (args->kswapd)
2851 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2852
2853 current_set_flags_nested(&pflags, new_pflags);
2854
2855 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
2856 args->key, args->curp, args->stat);
2857 complete(args->done);
2858
2859 current_restore_flags_nested(&pflags, new_pflags);
2860}
ff105f75
DC
2861
2862/*
2863 * BMBT split requests often come in with little stack to work on. Push
2864 * them off to a worker thread so there is lots of stack to use. For the other
2865 * btree types, just call directly to avoid the context switch overhead here.
2866 */
2867STATIC int /* error */
2868xfs_btree_split(
2869 struct xfs_btree_cur *cur,
2870 int level,
2871 union xfs_btree_ptr *ptrp,
2872 union xfs_btree_key *key,
2873 struct xfs_btree_cur **curp,
2874 int *stat) /* success/failure */
2875{
ff105f75
DC
2876 struct xfs_btree_split_args args;
2877 DECLARE_COMPLETION_ONSTACK(done);
2878
2879 if (cur->bc_btnum != XFS_BTNUM_BMAP)
ff105f75
DC
2880 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
2881
ff105f75
DC
2882 args.cur = cur;
2883 args.level = level;
2884 args.ptrp = ptrp;
2885 args.key = key;
2886 args.curp = curp;
2887 args.stat = stat;
2888 args.done = &done;
2889 args.kswapd = current_is_kswapd();
2890 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
2891 queue_work(xfs_alloc_wq, &args.work);
2892 wait_for_completion(&done);
2893 destroy_work_on_stack(&args.work);
2894 return args.result;
ff105f75 2895}
19ebedcf
DC
2896#else /* !KERNEL */
2897#define xfs_btree_split __xfs_btree_split
2898#endif
ff105f75
DC
2899
2900
b194c7d8
BN
2901/*
2902 * Copy the old inode root contents into a real block and make the
2903 * broot point to it.
2904 */
2905int /* error */
2906xfs_btree_new_iroot(
2907 struct xfs_btree_cur *cur, /* btree cursor */
2908 int *logflags, /* logging flags for inode */
2909 int *stat) /* return status - 0 fail */
2910{
2911 struct xfs_buf *cbp; /* buffer for cblock */
2912 struct xfs_btree_block *block; /* btree block */
2913 struct xfs_btree_block *cblock; /* child btree block */
2914 union xfs_btree_key *ckp; /* child key pointer */
2915 union xfs_btree_ptr *cpp; /* child ptr pointer */
2916 union xfs_btree_key *kp; /* pointer to btree key */
2917 union xfs_btree_ptr *pp; /* pointer to block addr */
2918 union xfs_btree_ptr nptr; /* new block addr */
2919 int level; /* btree level */
2920 int error; /* error return code */
2921#ifdef DEBUG
2922 int i; /* loop counter */
2923#endif
2924
2925 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2926 XFS_BTREE_STATS_INC(cur, newroot);
2927
2928 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
2929
2930 level = cur->bc_nlevels - 1;
2931
2932 block = xfs_btree_get_iroot(cur);
2933 pp = xfs_btree_ptr_addr(cur, 1, block);
2934
2935 /* Allocate the new block. If we can't do it, we're toast. Give up. */
ff105f75 2936 error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat);
b194c7d8
BN
2937 if (error)
2938 goto error0;
2939 if (*stat == 0) {
2940 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2941 return 0;
2942 }
2943 XFS_BTREE_STATS_INC(cur, alloc);
2944
2945 /* Copy the root into a real block. */
2946 error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp);
2947 if (error)
2948 goto error0;
2949
77ec5ff4
DC
2950 /*
2951 * we can't just memcpy() the root in for CRC enabled btree blocks.
2952 * In that case have to also ensure the blkno remains correct
2953 */
b194c7d8 2954 memcpy(cblock, block, xfs_btree_block_len(cur));
77ec5ff4
DC
2955 if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
2956 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
2957 cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
2958 else
2959 cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
2960 }
b194c7d8
BN
2961
2962 be16_add_cpu(&block->bb_level, 1);
2963 xfs_btree_set_numrecs(block, 1);
2964 cur->bc_nlevels++;
2965 cur->bc_ptrs[level + 1] = 1;
2966
2967 kp = xfs_btree_key_addr(cur, 1, block);
2968 ckp = xfs_btree_key_addr(cur, 1, cblock);
2969 xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock));
2970
2971 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
2972#ifdef DEBUG
2973 for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) {
2974 error = xfs_btree_check_ptr(cur, pp, i, level);
2975 if (error)
2976 goto error0;
2977 }
2978#endif
2979 xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock));
2980
2981#ifdef DEBUG
2982 error = xfs_btree_check_ptr(cur, &nptr, 0, level);
2983 if (error)
2984 goto error0;
2985#endif
2986 xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
2987
2988 xfs_iroot_realloc(cur->bc_private.b.ip,
2989 1 - xfs_btree_get_numrecs(cblock),
2990 cur->bc_private.b.whichfork);
2991
2992 xfs_btree_setbuf(cur, level, cbp);
2993
2994 /*
2995 * Do all this logging at the end so that
2996 * the root is at the right level.
2997 */
2998 xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
2999 xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3000 xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
3001
3002 *logflags |=
56b2de80 3003 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
b194c7d8
BN
3004 *stat = 1;
3005 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3006 return 0;
3007error0:
3008 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3009 return error;
3010}
3011
3012/*
3013 * Allocate a new root block, fill it in.
3014 */
3015STATIC int /* error */
3016xfs_btree_new_root(
3017 struct xfs_btree_cur *cur, /* btree cursor */
3018 int *stat) /* success/failure */
3019{
3020 struct xfs_btree_block *block; /* one half of the old root block */
3021 struct xfs_buf *bp; /* buffer containing block */
3022 int error; /* error return value */
3023 struct xfs_buf *lbp; /* left buffer pointer */
3024 struct xfs_btree_block *left; /* left btree block */
3025 struct xfs_buf *nbp; /* new (root) buffer */
3026 struct xfs_btree_block *new; /* new (root) btree block */
3027 int nptr; /* new value for key index, 1 or 2 */
3028 struct xfs_buf *rbp; /* right buffer pointer */
3029 struct xfs_btree_block *right; /* right btree block */
3030 union xfs_btree_ptr rptr;
3031 union xfs_btree_ptr lptr;
3032
3033 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3034 XFS_BTREE_STATS_INC(cur, newroot);
3035
3036 /* initialise our start point from the cursor */
3037 cur->bc_ops->init_ptr_from_cur(cur, &rptr);
3038
3039 /* Allocate the new block. If we can't do it, we're toast. Give up. */
ff105f75 3040 error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat);
b194c7d8
BN
3041 if (error)
3042 goto error0;
3043 if (*stat == 0)
3044 goto out0;
3045 XFS_BTREE_STATS_INC(cur, alloc);
3046
3047 /* Set up the new block. */
3048 error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp);
3049 if (error)
3050 goto error0;
3051
3052 /* Set the root in the holding structure increasing the level by 1. */
3053 cur->bc_ops->set_root(cur, &lptr, 1);
3054
3055 /*
3056 * At the previous root level there are now two blocks: the old root,
3057 * and the new block generated when it was split. We don't know which
3058 * one the cursor is pointing at, so we set up variables "left" and
3059 * "right" for each case.
3060 */
3061 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
3062
3063#ifdef DEBUG
3064 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
3065 if (error)
3066 goto error0;
3067#endif
3068
3069 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3070 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3071 /* Our block is left, pick up the right block. */
3072 lbp = bp;
3073 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
3074 left = block;
ff105f75 3075 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
b194c7d8
BN
3076 if (error)
3077 goto error0;
3078 bp = rbp;
3079 nptr = 1;
3080 } else {
3081 /* Our block is right, pick up the left block. */
3082 rbp = bp;
3083 xfs_btree_buf_to_ptr(cur, rbp, &rptr);
3084 right = block;
3085 xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
ff105f75 3086 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
b194c7d8
BN
3087 if (error)
3088 goto error0;
3089 bp = lbp;
3090 nptr = 2;
3091 }
a3c9cb10 3092
b194c7d8 3093 /* Fill in the new block's btree header and log it. */
5dfa5cd2 3094 xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
b194c7d8
BN
3095 xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
3096 ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
3097 !xfs_btree_ptr_is_null(cur, &rptr));
3098
3099 /* Fill in the key data in the new root. */
3100 if (xfs_btree_get_level(left) > 0) {
a3c9cb10
DW
3101 /*
3102 * Get the keys for the left block's keys and put them directly
3103 * in the parent block. Do the same for the right block.
3104 */
64dbe047 3105 xfs_btree_get_node_keys(cur, left,
a3c9cb10 3106 xfs_btree_key_addr(cur, 1, new));
64dbe047 3107 xfs_btree_get_node_keys(cur, right,
a3c9cb10 3108 xfs_btree_key_addr(cur, 2, new));
b194c7d8 3109 } else {
a3c9cb10
DW
3110 /*
3111 * Get the keys for the left block's records and put them
3112 * directly in the parent block. Do the same for the right
3113 * block.
3114 */
64dbe047 3115 xfs_btree_get_leaf_keys(cur, left,
a3c9cb10 3116 xfs_btree_key_addr(cur, 1, new));
64dbe047 3117 xfs_btree_get_leaf_keys(cur, right,
a3c9cb10 3118 xfs_btree_key_addr(cur, 2, new));
b194c7d8
BN
3119 }
3120 xfs_btree_log_keys(cur, nbp, 1, 2);
3121
3122 /* Fill in the pointer data in the new root. */
3123 xfs_btree_copy_ptrs(cur,
3124 xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
3125 xfs_btree_copy_ptrs(cur,
3126 xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
3127 xfs_btree_log_ptrs(cur, nbp, 1, 2);
3128
3129 /* Fix up the cursor. */
3130 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
3131 cur->bc_ptrs[cur->bc_nlevels] = nptr;
3132 cur->bc_nlevels++;
3133 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3134 *stat = 1;
3135 return 0;
3136error0:
3137 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3138 return error;
3139out0:
3140 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3141 *stat = 0;
3142 return 0;
3143}
3144
3145STATIC int
3146xfs_btree_make_block_unfull(
3147 struct xfs_btree_cur *cur, /* btree cursor */
3148 int level, /* btree level */
3149 int numrecs,/* # of recs in block */
3150 int *oindex,/* old tree index */
3151 int *index, /* new tree index */
3152 union xfs_btree_ptr *nptr, /* new btree ptr */
3153 struct xfs_btree_cur **ncur, /* new btree cursor */
a3c9cb10 3154 union xfs_btree_key *key, /* key of new block */
b194c7d8
BN
3155 int *stat)
3156{
b194c7d8
BN
3157 int error = 0;
3158
3159 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3160 level == cur->bc_nlevels - 1) {
3161 struct xfs_inode *ip = cur->bc_private.b.ip;
3162
3163 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
3164 /* A root block that can be made bigger. */
b194c7d8 3165 xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
cff8bf94 3166 *stat = 1;
b194c7d8
BN
3167 } else {
3168 /* A root block that needs replacing */
3169 int logflags = 0;
3170
3171 error = xfs_btree_new_iroot(cur, &logflags, stat);
3172 if (error || *stat == 0)
3173 return error;
3174
3175 xfs_trans_log_inode(cur->bc_tp, ip, logflags);
3176 }
3177
3178 return 0;
3179 }
3180
3181 /* First, try shifting an entry to the right neighbor. */
3182 error = xfs_btree_rshift(cur, level, stat);
3183 if (error || *stat)
3184 return error;
3185
3186 /* Next, try shifting an entry to the left neighbor. */
3187 error = xfs_btree_lshift(cur, level, stat);
3188 if (error)
3189 return error;
3190
3191 if (*stat) {
3192 *oindex = *index = cur->bc_ptrs[level];
3193 return 0;
3194 }
3195
3196 /*
3197 * Next, try splitting the current block in half.
3198 *
3199 * If this works we have to re-set our variables because we
3200 * could be in a different block now.
3201 */
d3cd7a27 3202 error = xfs_btree_split(cur, level, nptr, key, ncur, stat);
b194c7d8
BN
3203 if (error || *stat == 0)
3204 return error;
3205
3206
3207 *index = cur->bc_ptrs[level];
b194c7d8
BN
3208 return 0;
3209}
3210
3211/*
3212 * Insert one record/level. Return information to the caller
3213 * allowing the next level up to proceed if necessary.
3214 */
3215STATIC int
3216xfs_btree_insrec(
3217 struct xfs_btree_cur *cur, /* btree cursor */
3218 int level, /* level to insert record at */
3219 union xfs_btree_ptr *ptrp, /* i/o: block number inserted */
d3cd7a27
DW
3220 union xfs_btree_rec *rec, /* record to insert */
3221 union xfs_btree_key *key, /* i/o: block key for ptrp */
b194c7d8
BN
3222 struct xfs_btree_cur **curp, /* output: new cursor replacing cur */
3223 int *stat) /* success/failure */
3224{
3225 struct xfs_btree_block *block; /* btree block */
3226 struct xfs_buf *bp; /* buffer for block */
b194c7d8
BN
3227 union xfs_btree_ptr nptr; /* new block ptr */
3228 struct xfs_btree_cur *ncur; /* new btree cursor */
45413937 3229 union xfs_btree_key nkey; /* new block key */
13e831e0 3230 union xfs_btree_key *lkey;
b194c7d8
BN
3231 int optr; /* old key/record index */
3232 int ptr; /* key/record index */
3233 int numrecs;/* number of records */
3234 int error; /* error return value */
3235#ifdef DEBUG
3236 int i;
3237#endif
13e831e0 3238 xfs_daddr_t old_bn;
b194c7d8
BN
3239
3240 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
d3cd7a27 3241 XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, &rec);
b194c7d8
BN
3242
3243 ncur = NULL;
45413937 3244 lkey = &nkey;
b194c7d8
BN
3245
3246 /*
3247 * If we have an external root pointer, and we've made it to the
3248 * root level, allocate a new root block and we're done.
3249 */
3250 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
3251 (level >= cur->bc_nlevels)) {
3252 error = xfs_btree_new_root(cur, stat);
3253 xfs_btree_set_ptr_null(cur, ptrp);
3254
3255 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3256 return error;
3257 }
3258
3259 /* If we're off the left edge, return failure. */
3260 ptr = cur->bc_ptrs[level];
3261 if (ptr == 0) {
3262 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3263 *stat = 0;
3264 return 0;
3265 }
3266
b194c7d8
BN
3267 optr = ptr;
3268
3269 XFS_BTREE_STATS_INC(cur, insrec);
3270
3271 /* Get pointers to the btree buffer and block. */
3272 block = xfs_btree_get_block(cur, level, &bp);
13e831e0 3273 old_bn = bp ? bp->b_bn : XFS_BUF_DADDR_NULL;
b194c7d8
BN
3274 numrecs = xfs_btree_get_numrecs(block);
3275
3276#ifdef DEBUG
3277 error = xfs_btree_check_block(cur, block, level, bp);
3278 if (error)
3279 goto error0;
3280
3281 /* Check that the new entry is being inserted in the right place. */
3282 if (ptr <= numrecs) {
3283 if (level == 0) {
d3cd7a27 3284 ASSERT(cur->bc_ops->recs_inorder(cur, rec,
b194c7d8
BN
3285 xfs_btree_rec_addr(cur, ptr, block)));
3286 } else {
d3cd7a27 3287 ASSERT(cur->bc_ops->keys_inorder(cur, key,
b194c7d8
BN
3288 xfs_btree_key_addr(cur, ptr, block)));
3289 }
3290 }
3291#endif
3292
3293 /*
3294 * If the block is full, we can't insert the new entry until we
3295 * make the block un-full.
3296 */
3297 xfs_btree_set_ptr_null(cur, &nptr);
3298 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
3299 error = xfs_btree_make_block_unfull(cur, level, numrecs,
13e831e0 3300 &optr, &ptr, &nptr, &ncur, lkey, stat);
b194c7d8
BN
3301 if (error || *stat == 0)
3302 goto error0;
3303 }
3304
3305 /*
3306 * The current block may have changed if the block was
3307 * previously full and we have just made space in it.
3308 */
3309 block = xfs_btree_get_block(cur, level, &bp);
3310 numrecs = xfs_btree_get_numrecs(block);
3311
3312#ifdef DEBUG
3313 error = xfs_btree_check_block(cur, block, level, bp);
3314 if (error)
3315 return error;
3316#endif
3317
3318 /*
3319 * At this point we know there's room for our new entry in the block
3320 * we're pointing at.
3321 */
3322 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
3323
3324 if (level > 0) {
3325 /* It's a nonleaf. make a hole in the keys and ptrs */
3326 union xfs_btree_key *kp;
3327 union xfs_btree_ptr *pp;
3328
3329 kp = xfs_btree_key_addr(cur, ptr, block);
3330 pp = xfs_btree_ptr_addr(cur, ptr, block);
3331
3332#ifdef DEBUG
3333 for (i = numrecs - ptr; i >= 0; i--) {
3334 error = xfs_btree_check_ptr(cur, pp, i, level);
3335 if (error)
3336 return error;
3337 }
3338#endif
3339
3340 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
3341 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
3342
3343#ifdef DEBUG
3344 error = xfs_btree_check_ptr(cur, ptrp, 0, level);
3345 if (error)
3346 goto error0;
3347#endif
3348
3349 /* Now put the new data in, bump numrecs and log it. */
d3cd7a27 3350 xfs_btree_copy_keys(cur, kp, key, 1);
b194c7d8
BN
3351 xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
3352 numrecs++;
3353 xfs_btree_set_numrecs(block, numrecs);
3354 xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
3355 xfs_btree_log_keys(cur, bp, ptr, numrecs);
3356#ifdef DEBUG
3357 if (ptr < numrecs) {
3358 ASSERT(cur->bc_ops->keys_inorder(cur, kp,
3359 xfs_btree_key_addr(cur, ptr + 1, block)));
3360 }
3361#endif
3362 } else {
3363 /* It's a leaf. make a hole in the records */
3364 union xfs_btree_rec *rp;
3365
3366 rp = xfs_btree_rec_addr(cur, ptr, block);
3367
3368 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
3369
3370 /* Now put the new data in, bump numrecs and log it. */
d3cd7a27 3371 xfs_btree_copy_recs(cur, rp, rec, 1);
b194c7d8
BN
3372 xfs_btree_set_numrecs(block, ++numrecs);
3373 xfs_btree_log_recs(cur, bp, ptr, numrecs);
3374#ifdef DEBUG
3375 if (ptr < numrecs) {
3376 ASSERT(cur->bc_ops->recs_inorder(cur, rp,
3377 xfs_btree_rec_addr(cur, ptr + 1, block)));
3378 }
3379#endif
3380 }
3381
3382 /* Log the new number of records in the btree header. */
3383 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3384
13e831e0
DW
3385 /*
3386 * If we just inserted into a new tree block, we have to
3387 * recalculate nkey here because nkey is out of date.
3388 *
3389 * Otherwise we're just updating an existing block (having shoved
3390 * some records into the new tree block), so use the regular key
3391 * update mechanism.
3392 */
3393 if (bp && bp->b_bn != old_bn) {
3394 xfs_btree_get_keys(cur, block, lkey);
3395 } else if (xfs_btree_needs_key_update(cur, optr)) {
64dbe047 3396 error = xfs_btree_update_keys(cur, level);
b194c7d8
BN
3397 if (error)
3398 goto error0;
3399 }
3400
3401 /*
3402 * If we are tracking the last record in the tree and
3403 * we are at the far right edge of the tree, update it.
3404 */
3405 if (xfs_btree_is_lastrec(cur, block, level)) {
d3cd7a27 3406 cur->bc_ops->update_lastrec(cur, block, rec,
b194c7d8
BN
3407 ptr, LASTREC_INSREC);
3408 }
3409
3410 /*
3411 * Return the new block number, if any.
3412 * If there is one, give back a record value and a cursor too.
3413 */
3414 *ptrp = nptr;
3415 if (!xfs_btree_ptr_is_null(cur, &nptr)) {
13e831e0 3416 xfs_btree_copy_keys(cur, key, lkey, 1);
b194c7d8
BN
3417 *curp = ncur;
3418 }
3419
3420 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3421 *stat = 1;
3422 return 0;
3423
3424error0:
3425 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3426 return error;
3427}
3428
3429/*
3430 * Insert the record at the point referenced by cur.
3431 *
3432 * A multi-level split of the tree on insert will invalidate the original
3433 * cursor. All callers of this function should assume that the cursor is
3434 * no longer valid and revalidate it.
3435 */
3436int
3437xfs_btree_insert(
3438 struct xfs_btree_cur *cur,
3439 int *stat)
3440{
3441 int error; /* error return value */
3442 int i; /* result value, 0 for failure */
3443 int level; /* current level number in btree */
3444 union xfs_btree_ptr nptr; /* new block number (split result) */
3445 struct xfs_btree_cur *ncur; /* new cursor (split result) */
3446 struct xfs_btree_cur *pcur; /* previous level's cursor */
45413937 3447 union xfs_btree_key bkey; /* key of block to insert */
13e831e0 3448 union xfs_btree_key *key;
b194c7d8
BN
3449 union xfs_btree_rec rec; /* record to insert */
3450
3451 level = 0;
3452 ncur = NULL;
3453 pcur = cur;
45413937 3454 key = &bkey;
b194c7d8
BN
3455
3456 xfs_btree_set_ptr_null(cur, &nptr);
d3cd7a27
DW
3457
3458 /* Make a key out of the record data to be inserted, and save it. */
b194c7d8 3459 cur->bc_ops->init_rec_from_cur(cur, &rec);
13e831e0 3460 cur->bc_ops->init_key_from_rec(key, &rec);
b194c7d8
BN
3461
3462 /*
3463 * Loop going up the tree, starting at the leaf level.
3464 * Stop when we don't get a split block, that must mean that
3465 * the insert is finished with this level.
3466 */
3467 do {
3468 /*
3469 * Insert nrec/nptr into this level of the tree.
3470 * Note if we fail, nptr will be null.
3471 */
13e831e0 3472 error = xfs_btree_insrec(pcur, level, &nptr, &rec, key,
d3cd7a27 3473 &ncur, &i);
b194c7d8
BN
3474 if (error) {
3475 if (pcur != cur)
3476 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
3477 goto error0;
3478 }
3479
19ebedcf 3480 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
b194c7d8
BN
3481 level++;
3482
3483 /*
3484 * See if the cursor we just used is trash.
3485 * Can't trash the caller's cursor, but otherwise we should
3486 * if ncur is a new cursor or we're about to be done.
3487 */
3488 if (pcur != cur &&
3489 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
3490 /* Save the state from the cursor before we trash it */
3491 if (cur->bc_ops->update_cursor)
3492 cur->bc_ops->update_cursor(pcur, cur);
3493 cur->bc_nlevels = pcur->bc_nlevels;
3494 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
3495 }
3496 /* If we got a new cursor, switch to it. */
3497 if (ncur) {
3498 pcur = ncur;
3499 ncur = NULL;
3500 }
3501 } while (!xfs_btree_ptr_is_null(cur, &nptr));
3502
3503 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3504 *stat = i;
3505 return 0;
3506error0:
3507 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3508 return error;
3509}
3510
3511/*
3512 * Try to merge a non-leaf block back into the inode root.
3513 *
3514 * Note: the killroot names comes from the fact that we're effectively
3515 * killing the old root block. But because we can't just delete the
3516 * inode we have to copy the single block it was pointing to into the
3517 * inode.
3518 */
56b2de80 3519STATIC int
b194c7d8
BN
3520xfs_btree_kill_iroot(
3521 struct xfs_btree_cur *cur)
3522{
3523 int whichfork = cur->bc_private.b.whichfork;
3524 struct xfs_inode *ip = cur->bc_private.b.ip;
3525 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3526 struct xfs_btree_block *block;
3527 struct xfs_btree_block *cblock;
3528 union xfs_btree_key *kp;
3529 union xfs_btree_key *ckp;
3530 union xfs_btree_ptr *pp;
3531 union xfs_btree_ptr *cpp;
3532 struct xfs_buf *cbp;
3533 int level;
3534 int index;
3535 int numrecs;
410c3de5 3536 int error;
b194c7d8
BN
3537#ifdef DEBUG
3538 union xfs_btree_ptr ptr;
3539 int i;
3540#endif
3541
3542 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3543
3544 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
3545 ASSERT(cur->bc_nlevels > 1);
3546
3547 /*
3548 * Don't deal with the root block needs to be a leaf case.
3549 * We're just going to turn the thing back into extents anyway.
3550 */
3551 level = cur->bc_nlevels - 1;
3552 if (level == 1)
3553 goto out0;
3554
3555 /*
3556 * Give up if the root has multiple children.
3557 */
3558 block = xfs_btree_get_iroot(cur);
3559 if (xfs_btree_get_numrecs(block) != 1)
3560 goto out0;
3561
3562 cblock = xfs_btree_get_block(cur, level - 1, &cbp);
3563 numrecs = xfs_btree_get_numrecs(cblock);
3564
3565 /*
3566 * Only do this if the next level will fit.
3567 * Then the data must be copied up to the inode,
3568 * instead of freeing the root you free the next level.
3569 */
3570 if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
3571 goto out0;
3572
3573 XFS_BTREE_STATS_INC(cur, killroot);
3574
3575#ifdef DEBUG
3576 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
3577 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3578 xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
3579 ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
3580#endif
3581
3582 index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
3583 if (index) {
3584 xfs_iroot_realloc(cur->bc_private.b.ip, index,
3585 cur->bc_private.b.whichfork);
b3563c19 3586 block = ifp->if_broot;
b194c7d8
BN
3587 }
3588
3589 be16_add_cpu(&block->bb_numrecs, index);
3590 ASSERT(block->bb_numrecs == cblock->bb_numrecs);
3591
3592 kp = xfs_btree_key_addr(cur, 1, block);
3593 ckp = xfs_btree_key_addr(cur, 1, cblock);
3594 xfs_btree_copy_keys(cur, kp, ckp, numrecs);
3595
3596 pp = xfs_btree_ptr_addr(cur, 1, block);
3597 cpp = xfs_btree_ptr_addr(cur, 1, cblock);
3598#ifdef DEBUG
3599 for (i = 0; i < numrecs; i++) {
b194c7d8
BN
3600 error = xfs_btree_check_ptr(cur, cpp, i, level - 1);
3601 if (error) {
3602 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3603 return error;
3604 }
3605 }
3606#endif
3607 xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
3608
c261f8c0 3609 error = xfs_btree_free_block(cur, cbp);
410c3de5
CH
3610 if (error) {
3611 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3612 return error;
3613 }
b194c7d8
BN
3614
3615 cur->bc_bufs[level - 1] = NULL;
3616 be16_add_cpu(&block->bb_level, -1);
3617 xfs_trans_log_inode(cur->bc_tp, ip,
56b2de80 3618 XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
b194c7d8
BN
3619 cur->bc_nlevels--;
3620out0:
3621 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3622 return 0;
3623}
3624
56b2de80
DC
3625/*
3626 * Kill the current root node, and replace it with it's only child node.
3627 */
3628STATIC int
3629xfs_btree_kill_root(
3630 struct xfs_btree_cur *cur,
3631 struct xfs_buf *bp,
3632 int level,
3633 union xfs_btree_ptr *newroot)
3634{
3635 int error;
3636
3637 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3638 XFS_BTREE_STATS_INC(cur, killroot);
3639
3640 /*
3641 * Update the root pointer, decreasing the level by 1 and then
3642 * free the old root.
3643 */
3644 cur->bc_ops->set_root(cur, newroot, -1);
3645
c261f8c0 3646 error = xfs_btree_free_block(cur, bp);
56b2de80
DC
3647 if (error) {
3648 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3649 return error;
3650 }
3651
56b2de80
DC
3652 cur->bc_bufs[level] = NULL;
3653 cur->bc_ra[level] = 0;
3654 cur->bc_nlevels--;
3655
3656 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3657 return 0;
3658}
3659
b194c7d8
BN
3660STATIC int
3661xfs_btree_dec_cursor(
3662 struct xfs_btree_cur *cur,
3663 int level,
3664 int *stat)
3665{
3666 int error;
3667 int i;
3668
3669 if (level > 0) {
3670 error = xfs_btree_decrement(cur, level, &i);
3671 if (error)
3672 return error;
3673 }
3674
3675 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3676 *stat = 1;
3677 return 0;
3678}
3679
3680/*
3681 * Single level of the btree record deletion routine.
3682 * Delete record pointed to by cur/level.
3683 * Remove the record from its block then rebalance the tree.
3684 * Return 0 for error, 1 for done, 2 to go on to the next level.
3685 */
3686STATIC int /* error */
3687xfs_btree_delrec(
3688 struct xfs_btree_cur *cur, /* btree cursor */
3689 int level, /* level removing record from */
3690 int *stat) /* fail/done/go-on */
3691{
3692 struct xfs_btree_block *block; /* btree block */
3693 union xfs_btree_ptr cptr; /* current block ptr */
3694 struct xfs_buf *bp; /* buffer for block */
3695 int error; /* error return value */
3696 int i; /* loop counter */
b194c7d8
BN
3697 union xfs_btree_ptr lptr; /* left sibling block ptr */
3698 struct xfs_buf *lbp; /* left buffer pointer */
3699 struct xfs_btree_block *left; /* left btree block */
3700 int lrecs = 0; /* left record count */
3701 int ptr; /* key/record index */
3702 union xfs_btree_ptr rptr; /* right sibling block ptr */
3703 struct xfs_buf *rbp; /* right buffer pointer */
3704 struct xfs_btree_block *right; /* right btree block */
3705 struct xfs_btree_block *rrblock; /* right-right btree block */
3706 struct xfs_buf *rrbp; /* right-right buffer pointer */
3707 int rrecs = 0; /* right record count */
3708 struct xfs_btree_cur *tcur; /* temporary btree cursor */
3709 int numrecs; /* temporary numrec count */
3710
3711 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
3712 XFS_BTREE_TRACE_ARGI(cur, level);
3713
3714 tcur = NULL;
3715
3716 /* Get the index of the entry being deleted, check for nothing there. */
3717 ptr = cur->bc_ptrs[level];
3718 if (ptr == 0) {
3719 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3720 *stat = 0;
3721 return 0;
3722 }
3723
3724 /* Get the buffer & block containing the record or key/ptr. */
3725 block = xfs_btree_get_block(cur, level, &bp);
3726 numrecs = xfs_btree_get_numrecs(block);
3727
3728#ifdef DEBUG
3729 error = xfs_btree_check_block(cur, block, level, bp);
3730 if (error)
3731 goto error0;
3732#endif
3733
3734 /* Fail if we're off the end of the block. */
3735 if (ptr > numrecs) {
3736 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3737 *stat = 0;
3738 return 0;
3739 }
3740
3741 XFS_BTREE_STATS_INC(cur, delrec);
3742 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
3743
3744 /* Excise the entries being deleted. */
3745 if (level > 0) {
3746 /* It's a nonleaf. operate on keys and ptrs */
3747 union xfs_btree_key *lkp;
3748 union xfs_btree_ptr *lpp;
3749
3750 lkp = xfs_btree_key_addr(cur, ptr + 1, block);
3751 lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
3752
3753#ifdef DEBUG
3754 for (i = 0; i < numrecs - ptr; i++) {
3755 error = xfs_btree_check_ptr(cur, lpp, i, level);
3756 if (error)
3757 goto error0;
3758 }
3759#endif
3760
3761 if (ptr < numrecs) {
3762 xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
3763 xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
3764 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
3765 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
3766 }
b194c7d8
BN
3767 } else {
3768 /* It's a leaf. operate on records */
3769 if (ptr < numrecs) {
3770 xfs_btree_shift_recs(cur,
3771 xfs_btree_rec_addr(cur, ptr + 1, block),
3772 -1, numrecs - ptr);
3773 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
3774 }
b194c7d8
BN
3775 }
3776
3777 /*
3778 * Decrement and log the number of entries in the block.
3779 */
3780 xfs_btree_set_numrecs(block, --numrecs);
3781 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
3782
3783 /*
3784 * If we are tracking the last record in the tree and
3785 * we are at the far right edge of the tree, update it.
3786 */
3787 if (xfs_btree_is_lastrec(cur, block, level)) {
3788 cur->bc_ops->update_lastrec(cur, block, NULL,
3789 ptr, LASTREC_DELREC);
3790 }
3791
3792 /*
3793 * We're at the root level. First, shrink the root block in-memory.
3794 * Try to get rid of the next level down. If we can't then there's
3795 * nothing left to do.
3796 */
3797 if (level == cur->bc_nlevels - 1) {
3798 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3799 xfs_iroot_realloc(cur->bc_private.b.ip, -1,
3800 cur->bc_private.b.whichfork);
3801
3802 error = xfs_btree_kill_iroot(cur);
3803 if (error)
3804 goto error0;
3805
3806 error = xfs_btree_dec_cursor(cur, level, stat);
3807 if (error)
3808 goto error0;
3809 *stat = 1;
3810 return 0;
3811 }
3812
3813 /*
3814 * If this is the root level, and there's only one entry left,
3815 * and it's NOT the leaf level, then we can get rid of this
3816 * level.
3817 */
3818 if (numrecs == 1 && level > 0) {
3819 union xfs_btree_ptr *pp;
3820 /*
3821 * pp is still set to the first pointer in the block.
3822 * Make it the new root of the btree.
3823 */
3824 pp = xfs_btree_ptr_addr(cur, 1, block);
56b2de80 3825 error = xfs_btree_kill_root(cur, bp, level, pp);
b194c7d8
BN
3826 if (error)
3827 goto error0;
3828 } else if (level > 0) {
3829 error = xfs_btree_dec_cursor(cur, level, stat);
3830 if (error)
3831 goto error0;
3832 }
3833 *stat = 1;
3834 return 0;
3835 }
3836
3837 /*
3838 * If we deleted the leftmost entry in the block, update the
3839 * key values above us in the tree.
3840 */
a3c9cb10 3841 if (xfs_btree_needs_key_update(cur, ptr)) {
64dbe047 3842 error = xfs_btree_update_keys(cur, level);
b194c7d8
BN
3843 if (error)
3844 goto error0;
3845 }
3846
3847 /*
3848 * If the number of records remaining in the block is at least
3849 * the minimum, we're done.
3850 */
3851 if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
3852 error = xfs_btree_dec_cursor(cur, level, stat);
3853 if (error)
3854 goto error0;
3855 return 0;
3856 }
3857
3858 /*
3859 * Otherwise, we have to move some records around to keep the
3860 * tree balanced. Look at the left and right sibling blocks to
3861 * see if we can re-balance by moving only one record.
3862 */
3863 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
3864 xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
3865
3866 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
3867 /*
3868 * One child of root, need to get a chance to copy its contents
3869 * into the root and delete it. Can't go up to next level,
3870 * there's nothing to delete there.
3871 */
3872 if (xfs_btree_ptr_is_null(cur, &rptr) &&
3873 xfs_btree_ptr_is_null(cur, &lptr) &&
3874 level == cur->bc_nlevels - 2) {
3875 error = xfs_btree_kill_iroot(cur);
3876 if (!error)
3877 error = xfs_btree_dec_cursor(cur, level, stat);
3878 if (error)
3879 goto error0;
3880 return 0;
3881 }
3882 }
3883
3884 ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
3885 !xfs_btree_ptr_is_null(cur, &lptr));
3886
3887 /*
3888 * Duplicate the cursor so our btree manipulations here won't
3889 * disrupt the next level up.
3890 */
3891 error = xfs_btree_dup_cursor(cur, &tcur);
3892 if (error)
3893 goto error0;
3894
3895 /*
3896 * If there's a right sibling, see if it's ok to shift an entry
3897 * out of it.
3898 */
3899 if (!xfs_btree_ptr_is_null(cur, &rptr)) {
3900 /*
3901 * Move the temp cursor to the last entry in the next block.
3902 * Actually any entry but the first would suffice.
3903 */
3904 i = xfs_btree_lastrec(tcur, level);
19ebedcf 3905 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
b194c7d8
BN
3906
3907 error = xfs_btree_increment(tcur, level, &i);
3908 if (error)
3909 goto error0;
19ebedcf 3910 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
b194c7d8
BN
3911
3912 i = xfs_btree_lastrec(tcur, level);
19ebedcf 3913 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
b194c7d8
BN
3914
3915 /* Grab a pointer to the block. */
3916 right = xfs_btree_get_block(tcur, level, &rbp);
3917#ifdef DEBUG
3918 error = xfs_btree_check_block(tcur, right, level, rbp);
3919 if (error)
3920 goto error0;
3921#endif
3922 /* Grab the current block number, for future use. */
3923 xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB);
3924
3925 /*
3926 * If right block is full enough so that removing one entry
3927 * won't make it too empty, and left-shifting an entry out
3928 * of right to us works, we're done.
3929 */
3930 if (xfs_btree_get_numrecs(right) - 1 >=
3931 cur->bc_ops->get_minrecs(tcur, level)) {
3932 error = xfs_btree_lshift(tcur, level, &i);
3933 if (error)
3934 goto error0;
3935 if (i) {
3936 ASSERT(xfs_btree_get_numrecs(block) >=
3937 cur->bc_ops->get_minrecs(tcur, level));
3938
3939 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
3940 tcur = NULL;
3941
3942 error = xfs_btree_dec_cursor(cur, level, stat);
3943 if (error)
3944 goto error0;
3945 return 0;
3946 }
3947 }
3948
3949 /*
3950 * Otherwise, grab the number of records in right for
3951 * future reference, and fix up the temp cursor to point
3952 * to our block again (last record).
3953 */
3954 rrecs = xfs_btree_get_numrecs(right);
3955 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3956 i = xfs_btree_firstrec(tcur, level);
19ebedcf 3957 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
b194c7d8
BN
3958
3959 error = xfs_btree_decrement(tcur, level, &i);
3960 if (error)
3961 goto error0;
19ebedcf 3962 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
b194c7d8
BN
3963 }
3964 }
3965
3966 /*
3967 * If there's a left sibling, see if it's ok to shift an entry
3968 * out of it.
3969 */
3970 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3971 /*
3972 * Move the temp cursor to the first entry in the
3973 * previous block.
3974 */
3975 i = xfs_btree_firstrec(tcur, level);
19ebedcf 3976 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
b194c7d8
BN
3977
3978 error = xfs_btree_decrement(tcur, level, &i);
3979 if (error)
3980 goto error0;
3981 i = xfs_btree_firstrec(tcur, level);
19ebedcf 3982 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
b194c7d8
BN
3983
3984 /* Grab a pointer to the block. */
3985 left = xfs_btree_get_block(tcur, level, &lbp);
3986#ifdef DEBUG
3987 error = xfs_btree_check_block(cur, left, level, lbp);
3988 if (error)
3989 goto error0;
3990#endif
3991 /* Grab the current block number, for future use. */
3992 xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB);
3993
3994 /*
3995 * If left block is full enough so that removing one entry
3996 * won't make it too empty, and right-shifting an entry out
3997 * of left to us works, we're done.
3998 */
3999 if (xfs_btree_get_numrecs(left) - 1 >=
4000 cur->bc_ops->get_minrecs(tcur, level)) {
4001 error = xfs_btree_rshift(tcur, level, &i);
4002 if (error)
4003 goto error0;
4004 if (i) {
4005 ASSERT(xfs_btree_get_numrecs(block) >=
4006 cur->bc_ops->get_minrecs(tcur, level));
4007 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
4008 tcur = NULL;
4009 if (level == 0)
4010 cur->bc_ptrs[0]++;
4011 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
4012 *stat = 1;
4013 return 0;
4014 }
4015 }
4016
4017 /*
4018 * Otherwise, grab the number of records in right for
4019 * future reference.
4020 */
4021 lrecs = xfs_btree_get_numrecs(left);
4022 }
4023
4024 /* Delete the temp cursor, we're done with it. */
4025 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
4026 tcur = NULL;
4027
4028 /* If here, we need to do a join to keep the tree balanced. */
4029 ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
4030
4031 if (!xfs_btree_ptr_is_null(cur, &lptr) &&
4032 lrecs + xfs_btree_get_numrecs(block) <=
4033 cur->bc_ops->get_maxrecs(cur, level)) {
4034 /*
4035 * Set "right" to be the starting block,
4036 * "left" to be the left neighbor.
4037 */
4038 rptr = cptr;
4039 right = block;
4040 rbp = bp;
ff105f75 4041 error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
b194c7d8
BN
4042 if (error)
4043 goto error0;
4044
4045 /*
4046 * If that won't work, see if we can join with the right neighbor block.
4047 */
4048 } else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
4049 rrecs + xfs_btree_get_numrecs(block) <=
4050 cur->bc_ops->get_maxrecs(cur, level)) {
4051 /*
4052 * Set "left" to be the starting block,
4053 * "right" to be the right neighbor.
4054 */
4055 lptr = cptr;
4056 left = block;
4057 lbp = bp;
ff105f75 4058 error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
b194c7d8
BN
4059 if (error)
4060 goto error0;
4061
4062 /*
4063 * Otherwise, we can't fix the imbalance.
4064 * Just return. This is probably a logic error, but it's not fatal.
4065 */
4066 } else {
4067 error = xfs_btree_dec_cursor(cur, level, stat);
4068 if (error)
4069 goto error0;
4070 return 0;
4071 }
4072
4073 rrecs = xfs_btree_get_numrecs(right);
4074 lrecs = xfs_btree_get_numrecs(left);
4075
4076 /*
4077 * We're now going to join "left" and "right" by moving all the stuff
4078 * in "right" to "left" and deleting "right".
4079 */
4080 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
4081 if (level > 0) {
4082 /* It's a non-leaf. Move keys and pointers. */
4083 union xfs_btree_key *lkp; /* left btree key */
4084 union xfs_btree_ptr *lpp; /* left address pointer */
4085 union xfs_btree_key *rkp; /* right btree key */
4086 union xfs_btree_ptr *rpp; /* right address pointer */
4087
4088 lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
4089 lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
4090 rkp = xfs_btree_key_addr(cur, 1, right);
4091 rpp = xfs_btree_ptr_addr(cur, 1, right);
4092#ifdef DEBUG
4093 for (i = 1; i < rrecs; i++) {
4094 error = xfs_btree_check_ptr(cur, rpp, i, level);
4095 if (error)
4096 goto error0;
4097 }
4098#endif
4099 xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
4100 xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
4101
4102 xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
4103 xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
4104 } else {
4105 /* It's a leaf. Move records. */
4106 union xfs_btree_rec *lrp; /* left record pointer */
4107 union xfs_btree_rec *rrp; /* right record pointer */
4108
4109 lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
4110 rrp = xfs_btree_rec_addr(cur, 1, right);
4111
4112 xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
4113 xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
4114 }
4115
4116 XFS_BTREE_STATS_INC(cur, join);
4117
4118 /*
56b2de80 4119 * Fix up the number of records and right block pointer in the
b194c7d8
BN
4120 * surviving block, and log it.
4121 */
4122 xfs_btree_set_numrecs(left, lrecs + rrecs);
4123 xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB),
4124 xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4125 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
4126
4127 /* If there is a right sibling, point it to the remaining block. */
4128 xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
4129 if (!xfs_btree_ptr_is_null(cur, &cptr)) {
ff105f75 4130 error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
b194c7d8
BN
4131 if (error)
4132 goto error0;
4133 xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
4134 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
4135 }
4136
4137 /* Free the deleted block. */
c261f8c0 4138 error = xfs_btree_free_block(cur, rbp);
b194c7d8
BN
4139 if (error)
4140 goto error0;
b194c7d8
BN
4141
4142 /*
4143 * If we joined with the left neighbor, set the buffer in the
4144 * cursor to the left block, and fix up the index.
4145 */
4146 if (bp != lbp) {
4147 cur->bc_bufs[level] = lbp;
4148 cur->bc_ptrs[level] += lrecs;
4149 cur->bc_ra[level] = 0;
4150 }
4151 /*
4152 * If we joined with the right neighbor and there's a level above
4153 * us, increment the cursor at that level.
4154 */
4155 else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) ||
4156 (level + 1 < cur->bc_nlevels)) {
4157 error = xfs_btree_increment(cur, level + 1, &i);
4158 if (error)
4159 goto error0;
4160 }
4161
4162 /*
4163 * Readjust the ptr at this level if it's not a leaf, since it's
4164 * still pointing at the deletion point, which makes the cursor
4165 * inconsistent. If this makes the ptr 0, the caller fixes it up.
4166 * We can't use decrement because it would change the next level up.
4167 */
4168 if (level > 0)
4169 cur->bc_ptrs[level]--;
4170
13e831e0
DW
4171 /*
4172 * We combined blocks, so we have to update the parent keys if the
4173 * btree supports overlapped intervals. However, bc_ptrs[level + 1]
4174 * points to the old block so that the caller knows which record to
4175 * delete. Therefore, the caller must be savvy enough to call updkeys
4176 * for us if we return stat == 2. The other exit points from this
4177 * function don't require deletions further up the tree, so they can
4178 * call updkeys directly.
4179 */
4180
b194c7d8
BN
4181 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
4182 /* Return value means the next level up has something to do. */
4183 *stat = 2;
4184 return 0;
4185
4186error0:
4187 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
4188 if (tcur)
4189 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
4190 return error;
4191}
4192
4193/*
4194 * Delete the record pointed to by cur.
4195 * The cursor refers to the place where the record was (could be inserted)
4196 * when the operation returns.
4197 */
4198int /* error */
4199xfs_btree_delete(
4200 struct xfs_btree_cur *cur,
4201 int *stat) /* success/failure */
4202{
4203 int error; /* error return value */
4204 int level;
4205 int i;
13e831e0 4206 bool joined = false;
b194c7d8
BN
4207
4208 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
4209
4210 /*
4211 * Go up the tree, starting at leaf level.
4212 *
4213 * If 2 is returned then a join was done; go to the next level.
4214 * Otherwise we are done.
4215 */
4216 for (level = 0, i = 2; i == 2; level++) {
4217 error = xfs_btree_delrec(cur, level, &i);
4218 if (error)
4219 goto error0;
13e831e0
DW
4220 if (i == 2)
4221 joined = true;
4222 }
4223
4224 /*
4225 * If we combined blocks as part of deleting the record, delrec won't
4226 * have updated the parent high keys so we have to do that here.
4227 */
4228 if (joined && (cur->bc_flags & XFS_BTREE_OVERLAPPING)) {
4229 error = xfs_btree_updkeys_force(cur, 0);
4230 if (error)
4231 goto error0;
b194c7d8
BN
4232 }
4233
4234 if (i == 0) {
4235 for (level = 1; level < cur->bc_nlevels; level++) {
4236 if (cur->bc_ptrs[level] == 0) {
4237 error = xfs_btree_decrement(cur, level, &i);
4238 if (error)
4239 goto error0;
4240 break;
4241 }
4242 }
4243 }
4244
4245 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
4246 *stat = i;
4247 return 0;
4248error0:
4249 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
4250 return error;
4251}
4252
4253/*
4254 * Get the data from the pointed-to record.
4255 */
4256int /* error */
4257xfs_btree_get_rec(
4258 struct xfs_btree_cur *cur, /* btree cursor */
4259 union xfs_btree_rec **recp, /* output: btree record */
4260 int *stat) /* output: success/failure */
4261{
4262 struct xfs_btree_block *block; /* btree block */
4263 struct xfs_buf *bp; /* buffer pointer */
4264 int ptr; /* record number */
4265#ifdef DEBUG
4266 int error; /* error return value */
4267#endif
4268
4269 ptr = cur->bc_ptrs[0];
4270 block = xfs_btree_get_block(cur, 0, &bp);
4271
4272#ifdef DEBUG
4273 error = xfs_btree_check_block(cur, block, 0, bp);
4274 if (error)
4275 return error;
4276#endif
4277
4278 /*
4279 * Off the right end or left end, return failure.
4280 */
4281 if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) {
4282 *stat = 0;
4283 return 0;
4284 }
4285
4286 /*
4287 * Point to the record and extract its data.
4288 */
4289 *recp = xfs_btree_rec_addr(cur, ptr, block);
4290 *stat = 1;
4291 return 0;
4292}
9c6ebc42 4293
f31736bf
DW
4294/* Visit a block in a btree. */
4295STATIC int
4296xfs_btree_visit_block(
4297 struct xfs_btree_cur *cur,
4298 int level,
4299 xfs_btree_visit_blocks_fn fn,
4300 void *data)
4301{
4302 struct xfs_btree_block *block;
4303 struct xfs_buf *bp;
4304 union xfs_btree_ptr rptr;
4305 int error;
4306
4307 /* do right sibling readahead */
4308 xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
4309 block = xfs_btree_get_block(cur, level, &bp);
4310
4311 /* process the block */
4312 error = fn(cur, level, data);
4313 if (error)
4314 return error;
4315
4316 /* now read rh sibling block for next iteration */
4317 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
4318 if (xfs_btree_ptr_is_null(cur, &rptr))
4319 return -ENOENT;
4320
4321 return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
4322}
4323
4324
4325/* Visit every block in a btree. */
4326int
4327xfs_btree_visit_blocks(
4328 struct xfs_btree_cur *cur,
4329 xfs_btree_visit_blocks_fn fn,
4330 void *data)
4331{
4332 union xfs_btree_ptr lptr;
4333 int level;
4334 struct xfs_btree_block *block = NULL;
4335 int error = 0;
4336
4337 cur->bc_ops->init_ptr_from_cur(cur, &lptr);
4338
4339 /* for each level */
4340 for (level = cur->bc_nlevels - 1; level >= 0; level--) {
4341 /* grab the left hand block */
4342 error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
4343 if (error)
4344 return error;
4345
4346 /* readahead the left most block for the next level down */
4347 if (level > 0) {
4348 union xfs_btree_ptr *ptr;
4349
4350 ptr = xfs_btree_ptr_addr(cur, 1, block);
4351 xfs_btree_readahead_ptr(cur, ptr, 1);
4352
4353 /* save for the next iteration of the loop */
4354 lptr = *ptr;
4355 }
4356
4357 /* for each buffer in the level */
4358 do {
4359 error = xfs_btree_visit_block(cur, level, fn, data);
4360 } while (!error);
4361
4362 if (error != -ENOENT)
4363 return error;
4364 }
4365
4366 return 0;
4367}
4368
9c6ebc42
DC
4369/*
4370 * Change the owner of a btree.
4371 *
4372 * The mechanism we use here is ordered buffer logging. Because we don't know
4373 * how many buffers were are going to need to modify, we don't really want to
4374 * have to make transaction reservations for the worst case of every buffer in a
4375 * full size btree as that may be more space that we can fit in the log....
4376 *
4377 * We do the btree walk in the most optimal manner possible - we have sibling
4378 * pointers so we can just walk all the blocks on each level from left to right
4379 * in a single pass, and then move to the next level and do the same. We can
4380 * also do readahead on the sibling pointers to get IO moving more quickly,
4381 * though for slow disks this is unlikely to make much difference to performance
4382 * as the amount of CPU work we have to do before moving to the next block is
4383 * relatively small.
4384 *
4385 * For each btree block that we load, modify the owner appropriately, set the
4386 * buffer as an ordered buffer and log it appropriately. We need to ensure that
4387 * we mark the region we change dirty so that if the buffer is relogged in
4388 * a subsequent transaction the changes we make here as an ordered buffer are
4389 * correctly relogged in that transaction. If we are in recovery context, then
4390 * just queue the modified buffer as delayed write buffer so the transaction
4391 * recovery completion writes the changes to disk.
4392 */
f31736bf
DW
4393struct xfs_btree_block_change_owner_info {
4394 __uint64_t new_owner;
4395 struct list_head *buffer_list;
4396};
4397
9c6ebc42
DC
4398static int
4399xfs_btree_block_change_owner(
4400 struct xfs_btree_cur *cur,
4401 int level,
f31736bf 4402 void *data)
9c6ebc42 4403{
f31736bf 4404 struct xfs_btree_block_change_owner_info *bbcoi = data;
9c6ebc42
DC
4405 struct xfs_btree_block *block;
4406 struct xfs_buf *bp;
9c6ebc42
DC
4407
4408 /* modify the owner */
4409 block = xfs_btree_get_block(cur, level, &bp);
4410 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
f31736bf 4411 block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
9c6ebc42 4412 else
f31736bf 4413 block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
9c6ebc42
DC
4414
4415 /*
4416 * If the block is a root block hosted in an inode, we might not have a
4417 * buffer pointer here and we shouldn't attempt to log the change as the
4418 * information is already held in the inode and discarded when the root
4419 * block is formatted into the on-disk inode fork. We still change it,
4420 * though, so everything is consistent in memory.
4421 */
4422 if (bp) {
4423 if (cur->bc_tp) {
4424 xfs_trans_ordered_buf(cur->bc_tp, bp);
4425 xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
4426 } else {
f31736bf 4427 xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
9c6ebc42
DC
4428 }
4429 } else {
4430 ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
4431 ASSERT(level == cur->bc_nlevels - 1);
4432 }
4433
f31736bf 4434 return 0;
9c6ebc42
DC
4435}
4436
4437int
4438xfs_btree_change_owner(
4439 struct xfs_btree_cur *cur,
4440 __uint64_t new_owner,
4441 struct list_head *buffer_list)
4442{
f31736bf 4443 struct xfs_btree_block_change_owner_info bbcoi;
9c6ebc42 4444
f31736bf
DW
4445 bbcoi.new_owner = new_owner;
4446 bbcoi.buffer_list = buffer_list;
9c6ebc42 4447
f31736bf
DW
4448 return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
4449 &bbcoi);
9c6ebc42 4450}
dbca0167
DW
4451
4452/**
4453 * xfs_btree_sblock_v5hdr_verify() -- verify the v5 fields of a short-format
4454 * btree block
4455 *
4456 * @bp: buffer containing the btree block
4457 * @max_recs: pointer to the m_*_mxr max records field in the xfs mount
4458 * @pag_max_level: pointer to the per-ag max level field
4459 */
4460bool
4461xfs_btree_sblock_v5hdr_verify(
4462 struct xfs_buf *bp)
4463{
4464 struct xfs_mount *mp = bp->b_target->bt_mount;
4465 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4466 struct xfs_perag *pag = bp->b_pag;
4467
4468 if (!xfs_sb_version_hascrc(&mp->m_sb))
4469 return false;
4470 if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
4471 return false;
4472 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
4473 return false;
4474 if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
4475 return false;
4476 return true;
4477}
4478
4479/**
4480 * xfs_btree_sblock_verify() -- verify a short-format btree block
4481 *
4482 * @bp: buffer containing the btree block
4483 * @max_recs: maximum records allowed in this btree node
4484 */
4485bool
4486xfs_btree_sblock_verify(
4487 struct xfs_buf *bp,
4488 unsigned int max_recs)
4489{
4490 struct xfs_mount *mp = bp->b_target->bt_mount;
4491 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
4492
4493 /* numrecs verification */
4494 if (be16_to_cpu(block->bb_numrecs) > max_recs)
4495 return false;
4496
4497 /* sibling pointer verification */
4498 if (!block->bb_u.s.bb_leftsib ||
4499 (be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
4500 block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
4501 return false;
4502 if (!block->bb_u.s.bb_rightsib ||
4503 (be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
4504 block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
4505 return false;
4506
4507 return true;
4508}
730e2a19
DW
4509
4510/*
4511 * Calculate the number of btree levels needed to store a given number of
4512 * records in a short-format btree.
4513 */
4514uint
4515xfs_btree_compute_maxlevels(
4516 struct xfs_mount *mp,
4517 uint *limits,
4518 unsigned long len)
4519{
4520 uint level;
4521 unsigned long maxblocks;
4522
4523 maxblocks = (len + limits[0] - 1) / limits[0];
4524 for (level = 1; maxblocks > 1; level++)
4525 maxblocks = (maxblocks + limits[1] - 1) / limits[1];
4526 return level;
4527}
5d3b7fe1
DW
4528
4529/*
4530 * Query a regular btree for all records overlapping a given interval.
4531 * Start with a LE lookup of the key of low_rec and return all records
4532 * until we find a record with a key greater than the key of high_rec.
4533 */
4534STATIC int
4535xfs_btree_simple_query_range(
4536 struct xfs_btree_cur *cur,
4537 union xfs_btree_key *low_key,
4538 union xfs_btree_key *high_key,
4539 xfs_btree_query_range_fn fn,
4540 void *priv)
4541{
4542 union xfs_btree_rec *recp;
4543 union xfs_btree_key rec_key;
4544 __int64_t diff;
4545 int stat;
4546 bool firstrec = true;
4547 int error;
4548
4549 ASSERT(cur->bc_ops->init_high_key_from_rec);
4550 ASSERT(cur->bc_ops->diff_two_keys);
4551
4552 /*
4553 * Find the leftmost record. The btree cursor must be set
4554 * to the low record used to generate low_key.
4555 */
4556 stat = 0;
4557 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
4558 if (error)
4559 goto out;
4560
a3654981
DW
4561 /* Nothing? See if there's anything to the right. */
4562 if (!stat) {
4563 error = xfs_btree_increment(cur, 0, &stat);
4564 if (error)
4565 goto out;
4566 }
4567
5d3b7fe1
DW
4568 while (stat) {
4569 /* Find the record. */
4570 error = xfs_btree_get_rec(cur, &recp, &stat);
4571 if (error || !stat)
4572 break;
5d3b7fe1
DW
4573
4574 /* Skip if high_key(rec) < low_key. */
4575 if (firstrec) {
94a22dec 4576 cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
5d3b7fe1
DW
4577 firstrec = false;
4578 diff = cur->bc_ops->diff_two_keys(cur, low_key,
4579 &rec_key);
4580 if (diff > 0)
4581 goto advloop;
4582 }
4583
4584 /* Stop if high_key < low_key(rec). */
94a22dec 4585 cur->bc_ops->init_key_from_rec(&rec_key, recp);
5d3b7fe1
DW
4586 diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
4587 if (diff > 0)
4588 break;
4589
4590 /* Callback */
4591 error = fn(cur, recp, priv);
4592 if (error < 0 || error == XFS_BTREE_QUERY_RANGE_ABORT)
4593 break;
4594
4595advloop:
4596 /* Move on to the next record. */
4597 error = xfs_btree_increment(cur, 0, &stat);
4598 if (error)
4599 break;
4600 }
4601
4602out:
4603 return error;
4604}
4605
4606/*
4607 * Query an overlapped interval btree for all records overlapping a given
4608 * interval. This function roughly follows the algorithm given in
4609 * "Interval Trees" of _Introduction to Algorithms_, which is section
4610 * 14.3 in the 2nd and 3rd editions.
4611 *
4612 * First, generate keys for the low and high records passed in.
4613 *
4614 * For any leaf node, generate the high and low keys for the record.
4615 * If the record keys overlap with the query low/high keys, pass the
4616 * record to the function iterator.
4617 *
4618 * For any internal node, compare the low and high keys of each
4619 * pointer against the query low/high keys. If there's an overlap,
4620 * follow the pointer.
4621 *
4622 * As an optimization, we stop scanning a block when we find a low key
4623 * that is greater than the query's high key.
4624 */
4625STATIC int
4626xfs_btree_overlapped_query_range(
4627 struct xfs_btree_cur *cur,
4628 union xfs_btree_key *low_key,
4629 union xfs_btree_key *high_key,
4630 xfs_btree_query_range_fn fn,
4631 void *priv)
4632{
4633 union xfs_btree_ptr ptr;
4634 union xfs_btree_ptr *pp;
4635 union xfs_btree_key rec_key;
4636 union xfs_btree_key rec_hkey;
4637 union xfs_btree_key *lkp;
4638 union xfs_btree_key *hkp;
4639 union xfs_btree_rec *recp;
4640 struct xfs_btree_block *block;
4641 __int64_t ldiff;
4642 __int64_t hdiff;
4643 int level;
4644 struct xfs_buf *bp;
4645 int i;
4646 int error;
4647
4648 /* Load the root of the btree. */
4649 level = cur->bc_nlevels - 1;
4650 cur->bc_ops->init_ptr_from_cur(cur, &ptr);
4651 error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
4652 if (error)
4653 return error;
4654 xfs_btree_get_block(cur, level, &bp);
4655 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4656#ifdef DEBUG
4657 error = xfs_btree_check_block(cur, block, level, bp);
4658 if (error)
4659 goto out;
4660#endif
4661 cur->bc_ptrs[level] = 1;
4662
4663 while (level < cur->bc_nlevels) {
4664 block = xfs_btree_get_block(cur, level, &bp);
4665
4666 /* End of node, pop back towards the root. */
4667 if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
4668pop_up:
4669 if (level < cur->bc_nlevels - 1)
4670 cur->bc_ptrs[level + 1]++;
4671 level++;
4672 continue;
4673 }
4674
4675 if (level == 0) {
4676 /* Handle a leaf node. */
4677 recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
4678
4679 cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
4680 ldiff = cur->bc_ops->diff_two_keys(cur, &rec_hkey,
4681 low_key);
4682
4683 cur->bc_ops->init_key_from_rec(&rec_key, recp);
4684 hdiff = cur->bc_ops->diff_two_keys(cur, high_key,
4685 &rec_key);
4686
4687 /*
4688 * If (record's high key >= query's low key) and
4689 * (query's high key >= record's low key), then
4690 * this record overlaps the query range; callback.
4691 */
4692 if (ldiff >= 0 && hdiff >= 0) {
4693 error = fn(cur, recp, priv);
4694 if (error < 0 ||
4695 error == XFS_BTREE_QUERY_RANGE_ABORT)
4696 break;
4697 } else if (hdiff < 0) {
4698 /* Record is larger than high key; pop. */
4699 goto pop_up;
4700 }
4701 cur->bc_ptrs[level]++;
4702 continue;
4703 }
4704
4705 /* Handle an internal node. */
4706 lkp = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
4707 hkp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
4708 pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
4709
4710 ldiff = cur->bc_ops->diff_two_keys(cur, hkp, low_key);
4711 hdiff = cur->bc_ops->diff_two_keys(cur, high_key, lkp);
4712
4713 /*
4714 * If (pointer's high key >= query's low key) and
4715 * (query's high key >= pointer's low key), then
4716 * this record overlaps the query range; follow pointer.
4717 */
4718 if (ldiff >= 0 && hdiff >= 0) {
4719 level--;
4720 error = xfs_btree_lookup_get_block(cur, level, pp,
4721 &block);
4722 if (error)
4723 goto out;
4724 xfs_btree_get_block(cur, level, &bp);
4725 trace_xfs_btree_overlapped_query_range(cur, level, bp);
4726#ifdef DEBUG
4727 error = xfs_btree_check_block(cur, block, level, bp);
4728 if (error)
4729 goto out;
4730#endif
4731 cur->bc_ptrs[level] = 1;
4732 continue;
4733 } else if (hdiff < 0) {
4734 /* The low key is larger than the upper range; pop. */
4735 goto pop_up;
4736 }
4737 cur->bc_ptrs[level]++;
4738 }
4739
4740out:
4741 /*
4742 * If we don't end this function with the cursor pointing at a record
4743 * block, a subsequent non-error cursor deletion will not release
4744 * node-level buffers, causing a buffer leak. This is quite possible
4745 * with a zero-results range query, so release the buffers if we
4746 * failed to return any results.
4747 */
4748 if (cur->bc_bufs[0] == NULL) {
4749 for (i = 0; i < cur->bc_nlevels; i++) {
4750 if (cur->bc_bufs[i]) {
4751 xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
4752 cur->bc_bufs[i] = NULL;
4753 cur->bc_ptrs[i] = 0;
4754 cur->bc_ra[i] = 0;
4755 }
4756 }
4757 }
4758
4759 return error;
4760}
4761
4762/*
4763 * Query a btree for all records overlapping a given interval of keys. The
4764 * supplied function will be called with each record found; return one of the
4765 * XFS_BTREE_QUERY_RANGE_{CONTINUE,ABORT} values or the usual negative error
4766 * code. This function returns XFS_BTREE_QUERY_RANGE_ABORT, zero, or a
4767 * negative error code.
4768 */
4769int
4770xfs_btree_query_range(
4771 struct xfs_btree_cur *cur,
4772 union xfs_btree_irec *low_rec,
4773 union xfs_btree_irec *high_rec,
4774 xfs_btree_query_range_fn fn,
4775 void *priv)
4776{
4777 union xfs_btree_rec rec;
4778 union xfs_btree_key low_key;
4779 union xfs_btree_key high_key;
4780
4781 /* Find the keys of both ends of the interval. */
4782 cur->bc_rec = *high_rec;
4783 cur->bc_ops->init_rec_from_cur(cur, &rec);
4784 cur->bc_ops->init_key_from_rec(&high_key, &rec);
4785
4786 cur->bc_rec = *low_rec;
4787 cur->bc_ops->init_rec_from_cur(cur, &rec);
4788 cur->bc_ops->init_key_from_rec(&low_key, &rec);
4789
4790 /* Enforce low key < high key. */
4791 if (cur->bc_ops->diff_two_keys(cur, &low_key, &high_key) > 0)
4792 return -EINVAL;
4793
4794 if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
4795 return xfs_btree_simple_query_range(cur, &low_key,
4796 &high_key, fn, priv);
4797 return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
4798 fn, priv);
4799}