]>
git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore.c
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
14 #include "err_protos.h"
18 * The following manages the in-core bitmap of the entire filesystem
19 * using extents in a btree.
21 * The btree items will point to one of the state values below,
22 * rather than storing the value itself in the pointer.
24 static int states
[16] =
25 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
27 static struct btree_root
**ag_bmap
;
31 struct btree_root
*bmap
,
36 unsigned long end
= offset
+ blen
;
38 unsigned long cur_key
;
40 unsigned long next_key
;
43 cur_state
= btree_find(bmap
, offset
, &cur_key
);
47 if (offset
== cur_key
) {
48 /* if the start is the same as the "item" extent */
49 if (cur_state
== new_state
)
53 * Note: this may be NULL if we are updating the map for
56 prev_state
= btree_peek_prev(bmap
, NULL
);
58 next_state
= btree_peek_next(bmap
, &next_key
);
61 if (new_state
== prev_state
) {
62 /* #1: prev has same state, move offset up */
63 btree_update_key(bmap
, offset
, end
);
67 /* #4: insert new extent after, update current value */
68 btree_update_value(bmap
, offset
, new_state
);
69 btree_insert(bmap
, end
, cur_state
);
73 /* same end (and same start) */
74 if (new_state
== next_state
) {
75 /* next has same state */
76 if (new_state
== prev_state
) {
77 /* #3: merge prev & next */
78 btree_delete(bmap
, offset
);
79 btree_delete(bmap
, end
);
84 btree_update_value(bmap
, offset
, new_state
);
85 btree_delete(bmap
, end
);
89 /* same start, same end, next has different state */
90 if (new_state
== prev_state
) {
91 /* #5: prev has same state */
92 btree_delete(bmap
, offset
);
96 /* #6: update value only */
97 btree_update_value(bmap
, offset
, new_state
);
101 /* different start, offset is in the middle of "cur" */
102 prev_state
= btree_peek_prev(bmap
, NULL
);
103 ASSERT(prev_state
!= NULL
);
104 if (prev_state
== new_state
)
107 if (end
== cur_key
) {
108 /* end is at the same point as the current extent */
109 if (new_state
== cur_state
) {
110 /* #7: move next extent down */
111 btree_update_key(bmap
, end
, offset
);
115 /* #9: different start, same end, add new extent */
116 btree_insert(bmap
, offset
, new_state
);
120 /* #2: insert an extent into the middle of another extent */
121 btree_insert(bmap
, offset
, new_state
);
122 btree_insert(bmap
, end
, prev_state
);
132 update_bmap(ag_bmap
[agno
], agbno
, blen
, &states
[state
]);
139 xfs_agblock_t maxbno
,
145 statep
= btree_find(ag_bmap
[agno
], agbno
, &key
);
151 if (!btree_peek_next(ag_bmap
[agno
], &key
))
153 *blen
= min(maxbno
, key
) - agbno
;
158 statep
= btree_peek_prev(ag_bmap
[agno
], NULL
);
162 *blen
= min(maxbno
, key
) - agbno
;
167 static uint64_t *rt_bmap
;
168 static size_t rt_bmap_size
;
170 /* block records fit into uint64_t's units */
171 #define XR_BB_UNIT 64 /* number of bits/unit */
172 #define XR_BB 4 /* bits per block record */
173 #define XR_BB_NUM (XR_BB_UNIT/XR_BB) /* number of records per unit */
174 #define XR_BB_MASK 0xF /* block record mask */
177 * these work in real-time extents (e.g. fsbno == rt extent number)
183 return (*(rt_bmap
+ bno
/ XR_BB_NUM
) >>
184 ((bno
% XR_BB_NUM
) * XR_BB
)) & XR_BB_MASK
;
192 *(rt_bmap
+ bno
/ XR_BB_NUM
) =
193 ((*(rt_bmap
+ bno
/ XR_BB_NUM
) &
194 (~((uint64_t) XR_BB_MASK
<< ((bno
% XR_BB_NUM
) * XR_BB
)))) |
195 (((uint64_t) state
) << ((bno
% XR_BB_NUM
) * XR_BB
)));
202 memset(rt_bmap
, 0x22, rt_bmap_size
); /* XR_E_FREE */
209 if (mp
->m_sb
.sb_rextents
== 0)
212 rt_bmap_size
= roundup(mp
->m_sb
.sb_rextents
/ (NBBY
/ XR_BB
),
215 rt_bmap
= memalign(sizeof(uint64_t), rt_bmap_size
);
218 _("couldn't allocate realtime block map, size = %" PRIu64
"\n"),
219 mp
->m_sb
.sb_rextents
);
225 free_rt_bmap(xfs_mount_t
*mp
)
233 reset_bmaps(xfs_mount_t
*mp
)
236 xfs_agblock_t ag_size
;
239 ag_hdr_block
= howmany(4 * mp
->m_sb
.sb_sectsize
, mp
->m_sb
.sb_blocksize
);
240 ag_size
= mp
->m_sb
.sb_agblocks
;
242 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
243 if (agno
== mp
->m_sb
.sb_agcount
- 1)
244 ag_size
= (xfs_extlen_t
)(mp
->m_sb
.sb_dblocks
-
245 (xfs_rfsblock_t
)mp
->m_sb
.sb_agblocks
* agno
);
247 if (btree_find(ag_bmap
[agno
], 0, NULL
)) {
248 printf("ag_bmap[%d] btree stats:\n", i
);
249 btree_print_stats(ag_bmap
[agno
], stdout
);
253 * We always insert an item for the first block having a
254 * given state. So the code below means:
256 * block 0..ag_hdr_block-1: XR_E_INUSE_FS
257 * ag_hdr_block..ag_size: XR_E_UNKNOWN
258 * ag_size... XR_E_BAD_STATE
260 btree_clear(ag_bmap
[agno
]);
261 btree_insert(ag_bmap
[agno
], 0, &states
[XR_E_INUSE_FS
]);
262 btree_insert(ag_bmap
[agno
],
263 ag_hdr_block
, &states
[XR_E_UNKNOWN
]);
264 btree_insert(ag_bmap
[agno
], ag_size
, &states
[XR_E_BAD_STATE
]);
267 if (mp
->m_sb
.sb_logstart
!= 0) {
268 set_bmap_ext(XFS_FSB_TO_AGNO(mp
, mp
->m_sb
.sb_logstart
),
269 XFS_FSB_TO_AGBNO(mp
, mp
->m_sb
.sb_logstart
),
270 mp
->m_sb
.sb_logblocks
, XR_E_INUSE_FS
);
277 init_bmaps(xfs_mount_t
*mp
)
281 ag_bmap
= calloc(mp
->m_sb
.sb_agcount
, sizeof(struct btree_root
*));
283 do_error(_("couldn't allocate block map btree roots\n"));
285 ag_locks
= calloc(mp
->m_sb
.sb_agcount
, sizeof(struct aglock
));
287 do_error(_("couldn't allocate block map locks\n"));
289 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++) {
290 btree_init(&ag_bmap
[i
]);
291 pthread_mutex_init(&ag_locks
[i
].lock
, NULL
);
299 free_bmaps(xfs_mount_t
*mp
)
303 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++)
304 btree_destroy(ag_bmap
[i
]);