]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - repair/bulkload.c
xfs_repair: detect and fix padding fields that changed with nrext64
[thirdparty/xfsprogs-dev.git] / repair / bulkload.c
CommitLineData
49031e66
DW
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2020 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include <libxfs.h>
7#include "bulkload.h"
8
9int bload_leaf_slack = -1;
10int bload_node_slack = -1;
11
12/* Initialize accounting resources for staging a new AG btree. */
13void
14bulkload_init_ag(
15 struct bulkload *bkl,
16 struct repair_ctx *sc,
17 const struct xfs_owner_info *oinfo)
18{
19 memset(bkl, 0, sizeof(struct bulkload));
20 bkl->sc = sc;
21 bkl->oinfo = *oinfo; /* structure copy */
22 INIT_LIST_HEAD(&bkl->resv_list);
23}
24
25/* Designate specific blocks to be used to build our new btree. */
26int
27bulkload_add_blocks(
28 struct bulkload *bkl,
29 xfs_fsblock_t fsbno,
30 xfs_extlen_t len)
31{
32 struct bulkload_resv *resv;
33
34 resv = kmem_alloc(sizeof(struct bulkload_resv), KM_MAYFAIL);
35 if (!resv)
36 return ENOMEM;
37
38 INIT_LIST_HEAD(&resv->list);
39 resv->fsbno = fsbno;
40 resv->len = len;
41 resv->used = 0;
42 list_add_tail(&resv->list, &bkl->resv_list);
6ffc9523
DW
43 bkl->nr_reserved += len;
44
49031e66
DW
45 return 0;
46}
47
48/* Free all the accounting info and disk space we reserved for a new btree. */
49void
50bulkload_destroy(
51 struct bulkload *bkl,
52 int error)
53{
54 struct bulkload_resv *resv, *n;
55
56 list_for_each_entry_safe(resv, n, &bkl->resv_list, list) {
57 list_del(&resv->list);
58 kmem_free(resv);
59 }
60}
61
62/* Feed one of the reserved btree blocks to the bulk loader. */
63int
64bulkload_claim_block(
65 struct xfs_btree_cur *cur,
66 struct bulkload *bkl,
67 union xfs_btree_ptr *ptr)
68{
69 struct bulkload_resv *resv;
70 xfs_fsblock_t fsb;
71
72 /*
73 * The first item in the list should always have a free block unless
74 * we're completely out.
75 */
76 resv = list_first_entry(&bkl->resv_list, struct bulkload_resv, list);
77 if (resv->used == resv->len)
78 return ENOSPC;
79
80 /*
81 * Peel off a block from the start of the reservation. We allocate
82 * blocks in order to place blocks on disk in increasing record or key
83 * order. The block reservations tend to end up on the list in
84 * decreasing order, which hopefully results in leaf blocks ending up
85 * together.
86 */
87 fsb = resv->fsbno + resv->used;
88 resv->used++;
89
90 /* If we used all the blocks in this reservation, move it to the end. */
91 if (resv->used == resv->len)
92 list_move_tail(&resv->list, &bkl->resv_list);
93
94 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
95 ptr->l = cpu_to_be64(fsb);
96 else
97 ptr->s = cpu_to_be32(XFS_FSB_TO_AGBNO(cur->bc_mp, fsb));
98 return 0;
99}
79f86c9d
DW
100
101/*
102 * Estimate proper slack values for a btree that's being reloaded.
103 *
104 * Under most circumstances, we'll take whatever default loading value the
105 * btree bulk loading code calculates for us. However, there are some
106 * exceptions to this rule:
107 *
108 * (1) If someone turned one of the debug knobs.
109 * (2) The AG has less than ~9% space free.
110 *
111 * Note that we actually use 3/32 for the comparison to avoid division.
112 */
113void
114bulkload_estimate_ag_slack(
115 struct repair_ctx *sc,
116 struct xfs_btree_bload *bload,
117 unsigned int free)
118{
119 /*
120 * The global values are set to -1 (i.e. take the bload defaults)
121 * unless someone has set them otherwise, so we just pull the values
122 * here.
123 */
124 bload->leaf_slack = bload_leaf_slack;
125 bload->node_slack = bload_node_slack;
126
127 /* No further changes if there's more than 3/32ths space left. */
128 if (free >= ((sc->mp->m_sb.sb_agblocks * 3) >> 5))
129 return;
130
131 /*
132 * We're low on space; load the btrees as tightly as possible. Leave
133 * a couple of open slots in each btree block so that we don't end up
134 * splitting the btrees like crazy right after mount.
135 */
136 if (bload->leaf_slack < 0)
137 bload->leaf_slack = 2;
138 if (bload->node_slack < 0)
139 bload->node_slack = 2;
140}