]>
Commit | Line | Data |
---|---|---|
2bd0ea18 | 1 | /* |
da23017d NS |
2 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
dfc130f3 | 4 | * |
da23017d NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
2bd0ea18 | 7 | * published by the Free Software Foundation. |
dfc130f3 | 8 | * |
da23017d NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
dfc130f3 | 13 | * |
da23017d NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
2bd0ea18 NS |
17 | */ |
18 | ||
19 | #include <libxfs.h> | |
2bd0ea18 NS |
20 | #include "avl.h" |
21 | #include "globals.h" | |
22 | #include "incore.h" | |
23 | #include "agheader.h" | |
24 | #include "protos.h" | |
3b6ac903 | 25 | #include "threads.h" |
2bd0ea18 NS |
26 | #include "err_protos.h" |
27 | ||
2bd0ea18 NS |
28 | /* |
29 | * array of inode tree ptrs, one per ag | |
30 | */ | |
1e77098c | 31 | avltree_desc_t **inode_tree_ptrs; |
2bd0ea18 NS |
32 | |
33 | /* | |
34 | * ditto for uncertain inodes | |
35 | */ | |
36 | static avltree_desc_t **inode_uncertain_tree_ptrs; | |
37 | ||
0f012a4c BN |
38 | /* memory optimised nlink counting for all inodes */ |
39 | ||
40 | static void nlink_grow_8_to_16(ino_tree_node_t *irec); | |
41 | static void nlink_grow_16_to_32(ino_tree_node_t *irec); | |
42 | ||
43 | static void | |
44 | disk_nlink_32_set(ino_tree_node_t *irec, int ino_offset, __uint32_t nlinks) | |
45 | { | |
46 | ((__uint32_t*)irec->disk_nlinks)[ino_offset] = nlinks; | |
47 | } | |
48 | ||
49 | static __uint32_t | |
50 | disk_nlink_32_get(ino_tree_node_t *irec, int ino_offset) | |
51 | { | |
52 | return ((__uint32_t*)irec->disk_nlinks)[ino_offset]; | |
53 | } | |
54 | ||
55 | static __uint32_t | |
56 | counted_nlink_32_get(ino_tree_node_t *irec, int ino_offset) | |
57 | { | |
58 | return ((__uint32_t*)irec->ino_un.ex_data->counted_nlinks)[ino_offset]; | |
59 | } | |
60 | ||
61 | static __uint32_t | |
62 | counted_nlink_32_inc(ino_tree_node_t *irec, int ino_offset) | |
63 | { | |
64 | return ++(((__uint32_t*)irec->ino_un.ex_data->counted_nlinks)[ino_offset]); | |
65 | } | |
66 | ||
67 | static __uint32_t | |
68 | counted_nlink_32_dec(ino_tree_node_t *irec, int ino_offset) | |
69 | { | |
70 | __uint32_t *nlinks = (__uint32_t*)irec->ino_un.ex_data->counted_nlinks; | |
71 | ||
72 | ASSERT(nlinks[ino_offset] > 0); | |
73 | return --(nlinks[ino_offset]); | |
74 | } | |
75 | ||
76 | ||
77 | static void | |
78 | disk_nlink_16_set(ino_tree_node_t *irec, int ino_offset, __uint32_t nlinks) | |
79 | { | |
80 | if (nlinks >= 0x10000) { | |
81 | nlink_grow_16_to_32(irec); | |
82 | disk_nlink_32_set(irec, ino_offset, nlinks); | |
83 | } else | |
84 | ((__uint16_t*)irec->disk_nlinks)[ino_offset] = nlinks; | |
85 | } | |
86 | ||
87 | static __uint32_t | |
88 | disk_nlink_16_get(ino_tree_node_t *irec, int ino_offset) | |
89 | { | |
90 | return ((__uint16_t*)irec->disk_nlinks)[ino_offset]; | |
91 | } | |
92 | ||
93 | static __uint32_t | |
94 | counted_nlink_16_get(ino_tree_node_t *irec, int ino_offset) | |
95 | { | |
96 | return ((__uint16_t*)irec->ino_un.ex_data->counted_nlinks)[ino_offset]; | |
97 | } | |
98 | ||
99 | static __uint32_t | |
100 | counted_nlink_16_inc(ino_tree_node_t *irec, int ino_offset) | |
101 | { | |
102 | __uint16_t *nlinks = (__uint16_t*)irec->ino_un.ex_data->counted_nlinks; | |
103 | ||
104 | if (nlinks[ino_offset] == 0xffff) { | |
105 | nlink_grow_16_to_32(irec); | |
106 | return counted_nlink_32_inc(irec, ino_offset); | |
107 | } | |
108 | return ++(nlinks[ino_offset]); | |
109 | } | |
110 | ||
111 | static __uint32_t | |
112 | counted_nlink_16_dec(ino_tree_node_t *irec, int ino_offset) | |
113 | { | |
114 | __uint16_t *nlinks = (__uint16_t*)irec->ino_un.ex_data->counted_nlinks; | |
115 | ||
116 | ASSERT(nlinks[ino_offset] > 0); | |
117 | return --(nlinks[ino_offset]); | |
118 | } | |
119 | ||
120 | ||
121 | static void | |
122 | disk_nlink_8_set(ino_tree_node_t *irec, int ino_offset, __uint32_t nlinks) | |
123 | { | |
124 | if (nlinks >= 0x100) { | |
125 | nlink_grow_8_to_16(irec); | |
126 | disk_nlink_16_set(irec, ino_offset, nlinks); | |
127 | } else | |
128 | irec->disk_nlinks[ino_offset] = nlinks; | |
129 | } | |
130 | ||
131 | static __uint32_t | |
132 | disk_nlink_8_get(ino_tree_node_t *irec, int ino_offset) | |
133 | { | |
134 | return irec->disk_nlinks[ino_offset]; | |
135 | } | |
136 | ||
137 | static __uint32_t | |
138 | counted_nlink_8_get(ino_tree_node_t *irec, int ino_offset) | |
139 | { | |
140 | return irec->ino_un.ex_data->counted_nlinks[ino_offset]; | |
141 | } | |
142 | ||
143 | static __uint32_t | |
144 | counted_nlink_8_inc(ino_tree_node_t *irec, int ino_offset) | |
145 | { | |
146 | if (irec->ino_un.ex_data->counted_nlinks[ino_offset] == 0xff) { | |
147 | nlink_grow_8_to_16(irec); | |
148 | return counted_nlink_16_inc(irec, ino_offset); | |
149 | } | |
150 | return ++(irec->ino_un.ex_data->counted_nlinks[ino_offset]); | |
151 | } | |
152 | ||
153 | static __uint32_t | |
154 | counted_nlink_8_dec(ino_tree_node_t *irec, int ino_offset) | |
155 | { | |
156 | ASSERT(irec->ino_un.ex_data->counted_nlinks[ino_offset] > 0); | |
157 | return --(irec->ino_un.ex_data->counted_nlinks[ino_offset]); | |
158 | } | |
159 | ||
160 | ||
161 | static nlink_ops_t nlinkops[] = { | |
162 | {sizeof(__uint8_t) * XFS_INODES_PER_CHUNK, | |
163 | disk_nlink_8_set, disk_nlink_8_get, | |
164 | counted_nlink_8_get, counted_nlink_8_inc, counted_nlink_8_dec}, | |
165 | {sizeof(__uint16_t) * XFS_INODES_PER_CHUNK, | |
166 | disk_nlink_16_set, disk_nlink_16_get, | |
167 | counted_nlink_16_get, counted_nlink_16_inc, counted_nlink_16_dec}, | |
168 | {sizeof(__uint32_t) * XFS_INODES_PER_CHUNK, | |
169 | disk_nlink_32_set, disk_nlink_32_get, | |
170 | counted_nlink_32_get, counted_nlink_32_inc, counted_nlink_32_dec}, | |
171 | }; | |
172 | ||
173 | static void | |
174 | nlink_grow_8_to_16(ino_tree_node_t *irec) | |
175 | { | |
176 | __uint16_t *new_nlinks; | |
177 | int i; | |
178 | ||
179 | new_nlinks = malloc(sizeof(__uint16_t) * XFS_INODES_PER_CHUNK); | |
180 | if (new_nlinks == NULL) | |
181 | do_error(_("could not allocate expanded nlink array\n")); | |
182 | for (i = 0; i < XFS_INODES_PER_CHUNK; i++) | |
183 | new_nlinks[i] = irec->disk_nlinks[i]; | |
184 | free(irec->disk_nlinks); | |
185 | irec->disk_nlinks = (__uint8_t*)new_nlinks; | |
186 | ||
187 | if (full_ino_ex_data) { | |
188 | new_nlinks = malloc(sizeof(__uint16_t) * XFS_INODES_PER_CHUNK); | |
189 | if (new_nlinks == NULL) | |
190 | do_error(_("could not allocate expanded nlink array\n")); | |
191 | for (i = 0; i < XFS_INODES_PER_CHUNK; i++) | |
192 | new_nlinks[i] = irec->ino_un.ex_data->counted_nlinks[i]; | |
193 | free(irec->ino_un.ex_data->counted_nlinks); | |
194 | irec->ino_un.ex_data->counted_nlinks = (__uint8_t*)new_nlinks; | |
195 | } | |
196 | irec->nlinkops = &nlinkops[1]; | |
197 | } | |
198 | ||
199 | static void | |
200 | nlink_grow_16_to_32(ino_tree_node_t *irec) | |
201 | { | |
202 | __uint32_t *new_nlinks; | |
203 | int i; | |
204 | ||
205 | new_nlinks = malloc(sizeof(__uint32_t) * XFS_INODES_PER_CHUNK); | |
206 | if (new_nlinks == NULL) | |
207 | do_error(_("could not allocate expanded nlink array\n")); | |
208 | for (i = 0; i < XFS_INODES_PER_CHUNK; i++) | |
209 | new_nlinks[i] = ((__int16_t*)&irec->disk_nlinks)[i]; | |
210 | free(irec->disk_nlinks); | |
211 | irec->disk_nlinks = (__uint8_t*)new_nlinks; | |
212 | ||
213 | if (full_ino_ex_data) { | |
214 | new_nlinks = malloc(sizeof(__uint32_t) * XFS_INODES_PER_CHUNK); | |
215 | if (new_nlinks == NULL) | |
216 | do_error(_("could not allocate expanded nlink array\n")); | |
217 | for (i = 0; i < XFS_INODES_PER_CHUNK; i++) | |
218 | new_nlinks[i] = ((__int16_t*)&irec->ino_un.ex_data->counted_nlinks)[i]; | |
219 | free(irec->ino_un.ex_data->counted_nlinks); | |
220 | irec->ino_un.ex_data->counted_nlinks = (__uint8_t*)new_nlinks; | |
221 | } | |
222 | irec->nlinkops = &nlinkops[2]; | |
223 | } | |
224 | ||
2bd0ea18 | 225 | /* |
ffc82585 | 226 | * Next is the uncertain inode list -- a sorted (in ascending order) |
2bd0ea18 NS |
227 | * list of inode records sorted on the starting inode number. There |
228 | * is one list per ag. | |
229 | */ | |
230 | ||
231 | /* | |
ffc82585 | 232 | * Common code for creating inode records for use by trees and lists. |
2bd0ea18 NS |
233 | * called only from add_inodes and add_inodes_uncertain |
234 | * | |
235 | * IMPORTANT: all inodes (inode records) start off as free and | |
236 | * unconfirmed. | |
237 | */ | |
ffc82585 CH |
238 | static struct ino_tree_node * |
239 | alloc_ino_node( | |
0f012a4c | 240 | xfs_agino_t starting_ino) |
2bd0ea18 | 241 | { |
ffc82585 | 242 | struct ino_tree_node *irec; |
2bd0ea18 | 243 | |
ffc82585 CH |
244 | irec = malloc(sizeof(*irec)); |
245 | if (!irec) | |
246 | do_error(_("inode map malloc failed\n")); | |
2bd0ea18 | 247 | |
ffc82585 CH |
248 | irec->avl_node.avl_nextino = NULL; |
249 | irec->avl_node.avl_forw = NULL; | |
250 | irec->avl_node.avl_back = NULL; | |
2bd0ea18 | 251 | |
ffc82585 CH |
252 | irec->ino_startnum = starting_ino; |
253 | irec->ino_confirmed = 0; | |
254 | irec->ino_isa_dir = 0; | |
255 | irec->ir_free = (xfs_inofree_t) - 1; | |
256 | irec->ino_un.ex_data = NULL; | |
257 | irec->nlinkops = &nlinkops[0]; | |
258 | irec->disk_nlinks = calloc(1, nlinkops[0].nlink_size); | |
259 | if (!irec->disk_nlinks) | |
0f012a4c | 260 | do_error(_("could not allocate nlink array\n")); |
ffc82585 | 261 | return irec; |
2bd0ea18 NS |
262 | } |
263 | ||
2bd0ea18 | 264 | static void |
ffc82585 CH |
265 | free_ino_tree_node( |
266 | struct ino_tree_node *irec) | |
2bd0ea18 | 267 | { |
ffc82585 CH |
268 | irec->avl_node.avl_nextino = NULL; |
269 | irec->avl_node.avl_forw = NULL; | |
270 | irec->avl_node.avl_back = NULL; | |
520cf040 | 271 | |
ffc82585 CH |
272 | free(irec->disk_nlinks); |
273 | if (irec->ino_un.ex_data != NULL) { | |
0f012a4c | 274 | if (full_ino_ex_data) { |
ffc82585 CH |
275 | free(irec->ino_un.ex_data->parents); |
276 | free(irec->ino_un.ex_data->counted_nlinks); | |
0f012a4c | 277 | } |
ffc82585 | 278 | free(irec->ino_un.ex_data); |
0f012a4c | 279 | |
2bd0ea18 | 280 | } |
ffc82585 CH |
281 | |
282 | free(irec); | |
2bd0ea18 NS |
283 | } |
284 | ||
285 | /* | |
286 | * last referenced cache for uncertain inodes | |
287 | */ | |
288 | static ino_tree_node_t **last_rec; | |
289 | ||
290 | /* | |
291 | * ok, the uncertain inodes are a set of trees just like the | |
292 | * good inodes but all starting inode records are (arbitrarily) | |
293 | * aligned on XFS_CHUNK_PER_INODE boundaries to prevent overlaps. | |
294 | * this means we may have partials records in the tree (e.g. records | |
295 | * without 64 confirmed uncertain inodes). Tough. | |
296 | * | |
297 | * free is set to 1 if the inode is thought to be free, 0 if used | |
298 | */ | |
299 | void | |
300 | add_aginode_uncertain(xfs_agnumber_t agno, xfs_agino_t ino, int free) | |
301 | { | |
302 | ino_tree_node_t *ino_rec; | |
303 | xfs_agino_t s_ino; | |
304 | int offset; | |
305 | ||
306 | ASSERT(agno < glob_agcount); | |
307 | ASSERT(last_rec != NULL); | |
308 | ||
309 | s_ino = rounddown(ino, XFS_INODES_PER_CHUNK); | |
310 | ||
311 | /* | |
312 | * check for a cache hit | |
313 | */ | |
314 | if (last_rec[agno] != NULL && last_rec[agno]->ino_startnum == s_ino) { | |
315 | offset = ino - s_ino; | |
316 | if (free) | |
317 | set_inode_free(last_rec[agno], offset); | |
318 | else | |
319 | set_inode_used(last_rec[agno], offset); | |
320 | ||
321 | return; | |
322 | } | |
323 | ||
324 | /* | |
325 | * check to see if record containing inode is already in the tree. | |
326 | * if not, add it | |
327 | */ | |
ffc82585 CH |
328 | ino_rec = (ino_tree_node_t *) |
329 | avl_findrange(inode_uncertain_tree_ptrs[agno], s_ino); | |
330 | if (!ino_rec) { | |
331 | ino_rec = alloc_ino_node(s_ino); | |
332 | ||
333 | if (!avl_insert(inode_uncertain_tree_ptrs[agno], | |
334 | &ino_rec->avl_node)) | |
335 | do_error( | |
336 | _("add_aginode_uncertain - duplicate inode range\n")); | |
2bd0ea18 NS |
337 | } |
338 | ||
339 | if (free) | |
340 | set_inode_free(ino_rec, ino - s_ino); | |
341 | else | |
342 | set_inode_used(ino_rec, ino - s_ino); | |
343 | ||
344 | /* | |
345 | * set cache entry | |
346 | */ | |
347 | last_rec[agno] = ino_rec; | |
2bd0ea18 NS |
348 | } |
349 | ||
350 | /* | |
351 | * like add_aginode_uncertain() only it needs an xfs_mount_t * | |
352 | * to perform the inode number conversion. | |
353 | */ | |
354 | void | |
355 | add_inode_uncertain(xfs_mount_t *mp, xfs_ino_t ino, int free) | |
356 | { | |
357 | add_aginode_uncertain(XFS_INO_TO_AGNO(mp, ino), | |
358 | XFS_INO_TO_AGINO(mp, ino), free); | |
359 | } | |
360 | ||
361 | /* | |
362 | * pull the indicated inode record out of the uncertain inode tree | |
363 | */ | |
364 | void | |
1ae311d5 LC |
365 | get_uncertain_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno, |
366 | ino_tree_node_t *ino_rec) | |
2bd0ea18 NS |
367 | { |
368 | ASSERT(inode_tree_ptrs != NULL); | |
1ae311d5 | 369 | ASSERT(agno < mp->m_sb.sb_agcount); |
2bd0ea18 NS |
370 | ASSERT(inode_tree_ptrs[agno] != NULL); |
371 | ||
372 | avl_delete(inode_uncertain_tree_ptrs[agno], &ino_rec->avl_node); | |
373 | ||
374 | ino_rec->avl_node.avl_nextino = NULL; | |
375 | ino_rec->avl_node.avl_forw = NULL; | |
376 | ino_rec->avl_node.avl_back = NULL; | |
377 | } | |
378 | ||
379 | ino_tree_node_t * | |
380 | findfirst_uncertain_inode_rec(xfs_agnumber_t agno) | |
381 | { | |
382 | return((ino_tree_node_t *) | |
383 | inode_uncertain_tree_ptrs[agno]->avl_firstino); | |
384 | } | |
385 | ||
2d9475a4 NS |
386 | ino_tree_node_t * |
387 | find_uncertain_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino) | |
388 | { | |
389 | return((ino_tree_node_t *) | |
390 | avl_findrange(inode_uncertain_tree_ptrs[agno], ino)); | |
391 | } | |
392 | ||
2bd0ea18 NS |
393 | void |
394 | clear_uncertain_ino_cache(xfs_agnumber_t agno) | |
395 | { | |
396 | last_rec[agno] = NULL; | |
2bd0ea18 NS |
397 | } |
398 | ||
399 | ||
400 | /* | |
ffc82585 CH |
401 | * Next comes the inode trees. One per AG, AVL trees of inode records, each |
402 | * inode record tracking 64 inodes | |
2bd0ea18 | 403 | */ |
ffc82585 | 404 | |
2bd0ea18 | 405 | /* |
ffc82585 CH |
406 | * Set up an inode tree record for a group of inodes that will include the |
407 | * requested inode. | |
2bd0ea18 | 408 | * |
ffc82585 CH |
409 | * This does NOT do error-check for duplicate records. The caller is |
410 | * responsible for checking that. Ino must be the start of an | |
411 | * XFS_INODES_PER_CHUNK (64) inode chunk | |
2bd0ea18 | 412 | * |
ffc82585 CH |
413 | * Each inode resides in a 64-inode chunk which can be part one or more chunks |
414 | * (MAX(64, inodes-per-block). The fs allocates in chunks (as opposed to 1 | |
415 | * chunk) when a block can hold more than one chunk (inodes per block > 64). | |
416 | * Allocating in one chunk pieces causes us problems when it takes more than | |
417 | * one fs block to contain an inode chunk because the chunks can start on | |
418 | * *any* block boundary. So we assume that the caller has a clue because at | |
419 | * this level, we don't. | |
2bd0ea18 | 420 | */ |
ffc82585 CH |
421 | static struct ino_tree_node * |
422 | add_inode( | |
423 | struct xfs_mount *mp, | |
424 | xfs_agnumber_t agno, | |
425 | xfs_agino_t agino) | |
2bd0ea18 | 426 | { |
ffc82585 | 427 | struct ino_tree_node *irec; |
2bd0ea18 | 428 | |
ffc82585 CH |
429 | irec = alloc_ino_node(agino); |
430 | if (!avl_insert(inode_tree_ptrs[agno], &irec->avl_node)) | |
507f4e33 | 431 | do_warn(_("add_inode - duplicate inode range\n")); |
ffc82585 | 432 | return irec; |
2bd0ea18 NS |
433 | } |
434 | ||
435 | /* | |
436 | * pull the indicated inode record out of the inode tree | |
437 | */ | |
438 | void | |
1ae311d5 | 439 | get_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno, ino_tree_node_t *ino_rec) |
2bd0ea18 NS |
440 | { |
441 | ASSERT(inode_tree_ptrs != NULL); | |
1ae311d5 | 442 | ASSERT(agno < mp->m_sb.sb_agcount); |
2bd0ea18 NS |
443 | ASSERT(inode_tree_ptrs[agno] != NULL); |
444 | ||
445 | avl_delete(inode_tree_ptrs[agno], &ino_rec->avl_node); | |
446 | ||
447 | ino_rec->avl_node.avl_nextino = NULL; | |
448 | ino_rec->avl_node.avl_forw = NULL; | |
449 | ino_rec->avl_node.avl_back = NULL; | |
450 | } | |
451 | ||
452 | /* | |
453 | * free the designated inode record (return it to the free pool) | |
454 | */ | |
455 | /* ARGSUSED */ | |
456 | void | |
457 | free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec) | |
458 | { | |
459 | free_ino_tree_node(ino_rec); | |
2bd0ea18 NS |
460 | } |
461 | ||
2bd0ea18 | 462 | void |
1ae311d5 LC |
463 | find_inode_rec_range(struct xfs_mount *mp, xfs_agnumber_t agno, |
464 | xfs_agino_t start_ino, xfs_agino_t end_ino, | |
465 | ino_tree_node_t **first, ino_tree_node_t **last) | |
2bd0ea18 NS |
466 | { |
467 | *first = *last = NULL; | |
468 | ||
1ae311d5 LC |
469 | /* |
470 | * Is the AG inside the file system ? | |
471 | */ | |
472 | if (agno < mp->m_sb.sb_agcount) | |
473 | avl_findranges(inode_tree_ptrs[agno], start_ino, | |
474 | end_ino, (avlnode_t **) first, (avlnode_t **) last); | |
2bd0ea18 NS |
475 | } |
476 | ||
477 | /* | |
478 | * if ino doesn't exist, it must be properly aligned -- on a | |
479 | * filesystem block boundary or XFS_INODES_PER_CHUNK boundary, | |
480 | * whichever alignment is larger. | |
481 | */ | |
482 | ino_tree_node_t * | |
1ae311d5 | 483 | set_inode_used_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino) |
2bd0ea18 NS |
484 | { |
485 | ino_tree_node_t *ino_rec; | |
486 | ||
487 | /* | |
488 | * check alignment -- the only way to detect this | |
489 | * is too see if the chunk overlaps another chunk | |
490 | * already in the tree | |
491 | */ | |
1ae311d5 | 492 | ino_rec = add_inode(mp, agno, ino); |
2bd0ea18 NS |
493 | |
494 | ASSERT(ino_rec != NULL); | |
495 | ASSERT(ino >= ino_rec->ino_startnum && | |
496 | ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK); | |
497 | ||
498 | set_inode_used(ino_rec, ino - ino_rec->ino_startnum); | |
499 | ||
500 | return(ino_rec); | |
501 | } | |
502 | ||
503 | ino_tree_node_t * | |
1ae311d5 | 504 | set_inode_free_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino) |
2bd0ea18 NS |
505 | { |
506 | ino_tree_node_t *ino_rec; | |
507 | ||
1ae311d5 | 508 | ino_rec = add_inode(mp, agno, ino); |
2bd0ea18 NS |
509 | |
510 | ASSERT(ino_rec != NULL); | |
511 | ASSERT(ino >= ino_rec->ino_startnum && | |
512 | ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK); | |
513 | ||
514 | set_inode_free(ino_rec, ino - ino_rec->ino_startnum); | |
515 | ||
516 | return(ino_rec); | |
517 | } | |
518 | ||
2bd0ea18 NS |
519 | void |
520 | print_inode_list_int(xfs_agnumber_t agno, int uncertain) | |
521 | { | |
522 | ino_tree_node_t *ino_rec; | |
523 | ||
524 | if (!uncertain) { | |
507f4e33 | 525 | fprintf(stderr, _("good inode list is --\n")); |
2bd0ea18 NS |
526 | ino_rec = findfirst_inode_rec(agno); |
527 | } else { | |
507f4e33 | 528 | fprintf(stderr, _("uncertain inode list is --\n")); |
2bd0ea18 NS |
529 | ino_rec = findfirst_uncertain_inode_rec(agno); |
530 | } | |
531 | ||
532 | if (ino_rec == NULL) { | |
507f4e33 | 533 | fprintf(stderr, _("agno %d -- no inodes\n"), agno); |
2bd0ea18 NS |
534 | return; |
535 | } | |
536 | ||
507f4e33 | 537 | printf(_("agno %d\n"), agno); |
2bd0ea18 NS |
538 | |
539 | while(ino_rec != NULL) { | |
540 | fprintf(stderr, | |
507f4e33 | 541 | _("\tptr = %lx, start = 0x%x, free = 0x%llx, confirmed = 0x%llx\n"), |
5b64e00a | 542 | (unsigned long)ino_rec, |
2bd0ea18 | 543 | ino_rec->ino_startnum, |
5b64e00a NS |
544 | (unsigned long long)ino_rec->ir_free, |
545 | (unsigned long long)ino_rec->ino_confirmed); | |
2bd0ea18 NS |
546 | if (ino_rec->ino_startnum == 0) |
547 | ino_rec = ino_rec; | |
548 | ino_rec = next_ino_rec(ino_rec); | |
549 | } | |
550 | } | |
551 | ||
552 | void | |
553 | print_inode_list(xfs_agnumber_t agno) | |
554 | { | |
555 | print_inode_list_int(agno, 0); | |
556 | } | |
557 | ||
558 | void | |
559 | print_uncertain_inode_list(xfs_agnumber_t agno) | |
560 | { | |
561 | print_inode_list_int(agno, 1); | |
562 | } | |
563 | ||
564 | /* | |
565 | * set parent -- use a bitmask and a packed array. The bitmask | |
566 | * indicate which inodes have an entry in the array. An inode that | |
567 | * is the Nth bit set in the mask is stored in the Nth location in | |
568 | * the array where N starts at 0. | |
569 | */ | |
6fa00c33 | 570 | |
2bd0ea18 | 571 | void |
6fa00c33 BN |
572 | set_inode_parent( |
573 | ino_tree_node_t *irec, | |
574 | int offset, | |
575 | xfs_ino_t parent) | |
576 | { | |
577 | parent_list_t *ptbl; | |
578 | int i; | |
579 | int cnt; | |
580 | int target; | |
581 | __uint64_t bitmask; | |
582 | parent_entry_t *tmp; | |
2bd0ea18 | 583 | |
6fa00c33 BN |
584 | if (full_ino_ex_data) |
585 | ptbl = irec->ino_un.ex_data->parents; | |
586 | else | |
587 | ptbl = irec->ino_un.plist; | |
2bd0ea18 | 588 | |
6fa00c33 BN |
589 | if (ptbl == NULL) { |
590 | ptbl = (parent_list_t *)malloc(sizeof(parent_list_t)); | |
591 | if (!ptbl) | |
507f4e33 | 592 | do_error(_("couldn't malloc parent list table\n")); |
dfc130f3 | 593 | |
6fa00c33 BN |
594 | if (full_ino_ex_data) |
595 | irec->ino_un.ex_data->parents = ptbl; | |
596 | else | |
597 | irec->ino_un.plist = ptbl; | |
598 | ||
599 | ptbl->pmask = 1LL << offset; | |
600 | ptbl->pentries = (xfs_ino_t*)memalign(sizeof(xfs_ino_t), | |
601 | sizeof(xfs_ino_t)); | |
602 | if (!ptbl->pentries) | |
dfc130f3 | 603 | do_error(_("couldn't memalign pentries table\n")); |
2bd0ea18 | 604 | #ifdef DEBUG |
6fa00c33 | 605 | ptbl->cnt = 1; |
2bd0ea18 | 606 | #endif |
6fa00c33 | 607 | ptbl->pentries[0] = parent; |
2bd0ea18 NS |
608 | |
609 | return; | |
610 | } | |
611 | ||
6fa00c33 | 612 | if (ptbl->pmask & (1LL << offset)) { |
2bd0ea18 NS |
613 | bitmask = 1LL; |
614 | target = 0; | |
615 | ||
616 | for (i = 0; i < offset; i++) { | |
6fa00c33 | 617 | if (ptbl->pmask & bitmask) |
2bd0ea18 NS |
618 | target++; |
619 | bitmask <<= 1; | |
620 | } | |
621 | #ifdef DEBUG | |
6fa00c33 | 622 | ASSERT(target < ptbl->cnt); |
2bd0ea18 | 623 | #endif |
6fa00c33 | 624 | ptbl->pentries[target] = parent; |
2bd0ea18 NS |
625 | |
626 | return; | |
627 | } | |
628 | ||
629 | bitmask = 1LL; | |
630 | cnt = target = 0; | |
631 | ||
632 | for (i = 0; i < XFS_INODES_PER_CHUNK; i++) { | |
6fa00c33 | 633 | if (ptbl->pmask & bitmask) { |
2bd0ea18 NS |
634 | cnt++; |
635 | if (i < offset) | |
636 | target++; | |
637 | } | |
638 | ||
639 | bitmask <<= 1; | |
640 | } | |
641 | ||
642 | #ifdef DEBUG | |
6fa00c33 | 643 | ASSERT(cnt == ptbl->cnt); |
2bd0ea18 NS |
644 | #endif |
645 | ASSERT(cnt >= target); | |
646 | ||
647 | tmp = (xfs_ino_t*)memalign(sizeof(xfs_ino_t), (cnt + 1) * sizeof(xfs_ino_t)); | |
dfc130f3 RC |
648 | if (!tmp) |
649 | do_error(_("couldn't memalign pentries table\n")); | |
2bd0ea18 | 650 | |
dab9b8d6 | 651 | memmove(tmp, ptbl->pentries, target * sizeof(parent_entry_t)); |
2bd0ea18 NS |
652 | |
653 | if (cnt > target) | |
dab9b8d6 | 654 | memmove(tmp + target + 1, ptbl->pentries + target, |
2bd0ea18 NS |
655 | (cnt - target) * sizeof(parent_entry_t)); |
656 | ||
6fa00c33 | 657 | free(ptbl->pentries); |
2bd0ea18 | 658 | |
6fa00c33 | 659 | ptbl->pentries = tmp; |
2bd0ea18 NS |
660 | |
661 | #ifdef DEBUG | |
6fa00c33 | 662 | ptbl->cnt++; |
2bd0ea18 | 663 | #endif |
6fa00c33 BN |
664 | ptbl->pentries[target] = parent; |
665 | ptbl->pmask |= (1LL << offset); | |
2bd0ea18 NS |
666 | } |
667 | ||
2bd0ea18 NS |
668 | xfs_ino_t |
669 | get_inode_parent(ino_tree_node_t *irec, int offset) | |
670 | { | |
671 | __uint64_t bitmask; | |
672 | parent_list_t *ptbl; | |
673 | int i; | |
674 | int target; | |
675 | ||
0f012a4c BN |
676 | if (full_ino_ex_data) |
677 | ptbl = irec->ino_un.ex_data->parents; | |
2bd0ea18 NS |
678 | else |
679 | ptbl = irec->ino_un.plist; | |
680 | ||
681 | if (ptbl->pmask & (1LL << offset)) { | |
682 | bitmask = 1LL; | |
683 | target = 0; | |
684 | ||
685 | for (i = 0; i < offset; i++) { | |
686 | if (ptbl->pmask & bitmask) | |
687 | target++; | |
688 | bitmask <<= 1; | |
689 | } | |
690 | #ifdef DEBUG | |
691 | ASSERT(target < ptbl->cnt); | |
692 | #endif | |
693 | return(ptbl->pentries[target]); | |
694 | } | |
695 | ||
696 | return(0LL); | |
697 | } | |
698 | ||
0f012a4c BN |
699 | static void |
700 | alloc_ex_data(ino_tree_node_t *irec) | |
2bd0ea18 | 701 | { |
0f012a4c | 702 | parent_list_t *ptbl; |
2bd0ea18 | 703 | |
0f012a4c BN |
704 | ptbl = irec->ino_un.plist; |
705 | irec->ino_un.ex_data = (ino_ex_data_t *)calloc(1, sizeof(ino_ex_data_t)); | |
706 | if (irec->ino_un.ex_data == NULL) | |
707 | do_error(_("could not malloc inode extra data\n")); | |
dfc130f3 | 708 | |
0f012a4c BN |
709 | irec->ino_un.ex_data->parents = ptbl; |
710 | irec->ino_un.ex_data->counted_nlinks = calloc(1, irec->nlinkops->nlink_size); | |
2bd0ea18 | 711 | |
0f012a4c BN |
712 | if (irec->ino_un.ex_data->counted_nlinks == NULL) |
713 | do_error(_("could not malloc inode extra data\n")); | |
2bd0ea18 NS |
714 | } |
715 | ||
716 | void | |
0f012a4c | 717 | add_ino_ex_data(xfs_mount_t *mp) |
2bd0ea18 | 718 | { |
0f012a4c BN |
719 | ino_tree_node_t *ino_rec; |
720 | xfs_agnumber_t i; | |
2bd0ea18 NS |
721 | |
722 | for (i = 0; i < mp->m_sb.sb_agcount; i++) { | |
723 | ino_rec = findfirst_inode_rec(i); | |
724 | ||
725 | while (ino_rec != NULL) { | |
0f012a4c | 726 | alloc_ex_data(ino_rec); |
2bd0ea18 NS |
727 | ino_rec = next_ino_rec(ino_rec); |
728 | } | |
729 | } | |
0f012a4c | 730 | full_ino_ex_data = 1; |
2bd0ea18 NS |
731 | } |
732 | ||
733 | static __psunsigned_t | |
734 | avl_ino_start(avlnode_t *node) | |
735 | { | |
736 | return((__psunsigned_t) ((ino_tree_node_t *) node)->ino_startnum); | |
737 | } | |
738 | ||
739 | static __psunsigned_t | |
740 | avl_ino_end(avlnode_t *node) | |
741 | { | |
742 | return((__psunsigned_t) ( | |
743 | ((ino_tree_node_t *) node)->ino_startnum + | |
744 | XFS_INODES_PER_CHUNK)); | |
745 | } | |
746 | ||
747 | avlops_t avl_ino_tree_ops = { | |
748 | avl_ino_start, | |
749 | avl_ino_end | |
750 | }; | |
751 | ||
752 | void | |
753 | incore_ino_init(xfs_mount_t *mp) | |
754 | { | |
755 | int i; | |
756 | int agcount = mp->m_sb.sb_agcount; | |
757 | ||
758 | if ((inode_tree_ptrs = malloc(agcount * | |
759 | sizeof(avltree_desc_t *))) == NULL) | |
507f4e33 | 760 | do_error(_("couldn't malloc inode tree descriptor table\n")); |
2bd0ea18 NS |
761 | if ((inode_uncertain_tree_ptrs = malloc(agcount * |
762 | sizeof(avltree_desc_t *))) == NULL) | |
507f4e33 NS |
763 | do_error( |
764 | _("couldn't malloc uncertain ino tree descriptor table\n")); | |
2bd0ea18 NS |
765 | |
766 | for (i = 0; i < agcount; i++) { | |
767 | if ((inode_tree_ptrs[i] = | |
768 | malloc(sizeof(avltree_desc_t))) == NULL) | |
507f4e33 | 769 | do_error(_("couldn't malloc inode tree descriptor\n")); |
2bd0ea18 NS |
770 | if ((inode_uncertain_tree_ptrs[i] = |
771 | malloc(sizeof(avltree_desc_t))) == NULL) | |
772 | do_error( | |
507f4e33 | 773 | _("couldn't malloc uncertain ino tree descriptor\n")); |
2bd0ea18 NS |
774 | } |
775 | for (i = 0; i < agcount; i++) { | |
776 | avl_init_tree(inode_tree_ptrs[i], &avl_ino_tree_ops); | |
777 | avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops); | |
778 | } | |
779 | ||
2bd0ea18 | 780 | if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL) |
507f4e33 | 781 | do_error(_("couldn't malloc uncertain inode cache area\n")); |
2bd0ea18 | 782 | |
dab9b8d6 | 783 | memset(last_rec, 0, sizeof(ino_tree_node_t *) * agcount); |
2bd0ea18 | 784 | |
0f012a4c | 785 | full_ino_ex_data = 0; |
2bd0ea18 | 786 | } |