]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - libxfs/xfs_ialloc.c
document change to log zeroing logic in xfs_repair.
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_ialloc.c
CommitLineData
2bd0ea18
NS
1/*
2 * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33#include <xfs.h>
34
35/*
36 * Internal functions.
37 */
38
39/*
40 * Log specified fields for the inode given by bp and off.
41 */
42STATIC void
43xfs_ialloc_log_di(
44 xfs_trans_t *tp, /* transaction pointer */
45 xfs_buf_t *bp, /* inode buffer */
46 int off, /* index of inode in buffer */
47 int fields) /* bitmask of fields to log */
48{
49 int first; /* first byte number */
50 int ioffset; /* off in bytes */
51 int last; /* last byte number */
52 xfs_mount_t *mp; /* mount point structure */
53 static const short offsets[] = { /* field offsets */
54 /* keep in sync with bits */
55 offsetof(xfs_dinode_core_t, di_magic),
56 offsetof(xfs_dinode_core_t, di_mode),
57 offsetof(xfs_dinode_core_t, di_version),
58 offsetof(xfs_dinode_core_t, di_format),
59 offsetof(xfs_dinode_core_t, di_onlink),
60 offsetof(xfs_dinode_core_t, di_uid),
61 offsetof(xfs_dinode_core_t, di_gid),
62 offsetof(xfs_dinode_core_t, di_nlink),
63 offsetof(xfs_dinode_core_t, di_projid),
64 offsetof(xfs_dinode_core_t, di_pad),
65 offsetof(xfs_dinode_core_t, di_atime),
66 offsetof(xfs_dinode_core_t, di_mtime),
67 offsetof(xfs_dinode_core_t, di_ctime),
68 offsetof(xfs_dinode_core_t, di_size),
69 offsetof(xfs_dinode_core_t, di_nblocks),
70 offsetof(xfs_dinode_core_t, di_extsize),
71 offsetof(xfs_dinode_core_t, di_nextents),
72 offsetof(xfs_dinode_core_t, di_anextents),
73 offsetof(xfs_dinode_core_t, di_forkoff),
74 offsetof(xfs_dinode_core_t, di_aformat),
75 offsetof(xfs_dinode_core_t, di_dmevmask),
76 offsetof(xfs_dinode_core_t, di_dmstate),
77 offsetof(xfs_dinode_core_t, di_flags),
78 offsetof(xfs_dinode_core_t, di_gen),
79 offsetof(xfs_dinode_t, di_next_unlinked),
80 offsetof(xfs_dinode_t, di_u),
81 offsetof(xfs_dinode_t, di_a),
82 sizeof(xfs_dinode_t)
83 };
84
85
86 ASSERT(offsetof(xfs_dinode_t, di_core) == 0);
87 ASSERT((fields & (XFS_DI_U|XFS_DI_A)) == 0);
88 mp = tp->t_mountp;
89 /*
90 * Get the inode-relative first and last bytes for these fields
91 */
92 xfs_btree_offsets(fields, offsets, XFS_DI_NUM_BITS, &first, &last);
93 /*
94 * Convert to buffer offsets and log it.
95 */
96 ioffset = off << mp->m_sb.sb_inodelog;
97 first += ioffset;
98 last += ioffset;
99 xfs_trans_log_buf(tp, bp, first, last);
100}
101
102/*
103 * Allocation group level functions.
104 */
105
106/*
107 * Allocate new inodes in the allocation group specified by agbp.
108 * Return 0 for success, else error code.
109 */
110STATIC int /* error code or 0 */
111xfs_ialloc_ag_alloc(
112 xfs_trans_t *tp, /* transaction pointer */
113 xfs_buf_t *agbp, /* alloc group buffer */
114 int *alloc)
115{
116 xfs_agi_t *agi; /* allocation group header */
117 xfs_alloc_arg_t args; /* allocation argument structure */
118 int blks_per_cluster; /* fs blocks per inode cluster */
119 xfs_btree_cur_t *cur; /* inode btree cursor */
0e266570 120 xfs_daddr_t d; /* disk addr of buffer */
2bd0ea18
NS
121 int error;
122 xfs_buf_t *fbuf; /* new free inodes' buffer */
123 xfs_dinode_t *free; /* new free inode structure */
124 int i; /* inode counter */
125 int j; /* block counter */
126 int nbufs; /* num bufs of new inodes */
127 xfs_agino_t newino; /* new first inode's number */
128 xfs_agino_t newlen; /* new number of inodes */
129 int ninodes; /* num inodes per buf */
130 xfs_agino_t thisino; /* current inode number, for loop */
131 int version; /* inode version number to use */
132 static xfs_timestamp_t ztime; /* zero xfs timestamp */
133 int isaligned; /* inode allocation at stripe unit */
134 /* boundary */
135 xfs_dinode_core_t dic; /* a dinode_core to copy to new */
136 /* inodes */
137
138 args.tp = tp;
139 args.mp = tp->t_mountp;
140
141 /*
142 * Locking will ensure that we don't have two callers in here
143 * at one time.
144 */
145 newlen = XFS_IALLOC_INODES(args.mp);
146 if (args.mp->m_maxicount &&
147 args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount)
148 return XFS_ERROR(ENOSPC);
149 args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp);
150 /*
151 * Set the alignment for the allocation.
152 * If stripe alignment is turned on then align at stripe unit
153 * boundary.
154 * If the cluster size is smaller than a filesystem block
155 * then we're doing I/O for inodes in filesystem block size pieces,
156 * so don't need alignment anyway.
157 */
158 isaligned = 0;
159 if (args.mp->m_sinoalign) {
160 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
161 args.alignment = args.mp->m_dalign;
162 isaligned = 1;
163 } else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
164 args.mp->m_sb.sb_inoalignmt >=
165 XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp)))
166 args.alignment = args.mp->m_sb.sb_inoalignmt;
167 else
168 args.alignment = 1;
169 agi = XFS_BUF_TO_AGI(agbp);
170 /*
171 * Need to figure out where to allocate the inode blocks.
172 * Ideally they should be spaced out through the a.g.
173 * For now, just allocate blocks up front.
174 */
175 args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT);
176 args.fsbno = XFS_AGB_TO_FSB(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT),
177 args.agbno);
178 /*
179 * Allocate a fixed-size extent of inodes.
180 */
181 args.type = XFS_ALLOCTYPE_NEAR_BNO;
182 args.mod = args.total = args.wasdel = args.isfl = args.userdata =
183 args.minalignslop = 0;
184 args.prod = 1;
185 /*
186 * Allow space for the inode btree to split.
187 */
188 args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
0e266570 189 if ((error = xfs_alloc_vextent(&args)))
2bd0ea18
NS
190 return error;
191
192 /*
193 * If stripe alignment is turned on, then try again with cluster
194 * alignment.
195 */
196 if (isaligned && args.fsbno == NULLFSBLOCK) {
197 args.type = XFS_ALLOCTYPE_NEAR_BNO;
198 args.agbno = INT_GET(agi->agi_root, ARCH_CONVERT);
199 args.fsbno = XFS_AGB_TO_FSB(args.mp,
200 INT_GET(agi->agi_seqno, ARCH_CONVERT), args.agbno);
201 if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
202 args.mp->m_sb.sb_inoalignmt >=
203 XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp)))
204 args.alignment = args.mp->m_sb.sb_inoalignmt;
205 else
206 args.alignment = 1;
0e266570 207 if ((error = xfs_alloc_vextent(&args)))
2bd0ea18
NS
208 return error;
209 }
210
211 if (args.fsbno == NULLFSBLOCK) {
212 *alloc = 0;
213 return 0;
214 }
215 ASSERT(args.len == args.minlen);
216 /*
217 * Convert the results.
218 */
219 newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
220 /*
221 * Loop over the new block(s), filling in the inodes.
222 * For small block sizes, manipulate the inodes in buffers
223 * which are multiples of the blocks size.
224 */
225 if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) {
226 blks_per_cluster = 1;
227 nbufs = (int)args.len;
228 ninodes = args.mp->m_sb.sb_inopblock;
229 } else {
230 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) /
231 args.mp->m_sb.sb_blocksize;
232 nbufs = (int)args.len / blks_per_cluster;
233 ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock;
234 }
235 /*
236 * Figure out what version number to use in the inodes we create.
237 * If the superblock version has caught up to the one that supports
238 * the new inode format, then use the new inode version. Otherwise
239 * use the old version so that old kernels will continue to be
240 * able to use the file system.
241 */
242 if (XFS_SB_VERSION_HASNLINK(&args.mp->m_sb))
243 version = XFS_DINODE_VERSION_2;
244 else
245 version = XFS_DINODE_VERSION_1;
246 for (j = 0; j < nbufs; j++) {
247 /*
248 * Get the block.
249 */
250 d = XFS_AGB_TO_DADDR(args.mp, INT_GET(agi->agi_seqno, ARCH_CONVERT),
251 args.agbno + (j * blks_per_cluster));
252 fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d,
253 args.mp->m_bsize * blks_per_cluster,
254 XFS_BUF_LOCK);
255 ASSERT(fbuf);
256 ASSERT(!XFS_BUF_GETERROR(fbuf));
257 /*
258 * Loop over the inodes in this buffer.
259 */
260 INT_SET(dic.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC);
261 INT_ZERO(dic.di_mode, ARCH_CONVERT);
262 INT_SET(dic.di_version, ARCH_CONVERT, version);
263 INT_ZERO(dic.di_format, ARCH_CONVERT);
264 INT_ZERO(dic.di_onlink, ARCH_CONVERT);
265 INT_ZERO(dic.di_uid, ARCH_CONVERT);
266 INT_ZERO(dic.di_gid, ARCH_CONVERT);
267 INT_ZERO(dic.di_nlink, ARCH_CONVERT);
268 INT_ZERO(dic.di_projid, ARCH_CONVERT);
269 bzero(&(dic.di_pad[0]),sizeof(dic.di_pad));
270 INT_SET(dic.di_atime.t_sec, ARCH_CONVERT, ztime.t_sec);
271 INT_SET(dic.di_atime.t_nsec, ARCH_CONVERT, ztime.t_nsec);
272
273 INT_SET(dic.di_mtime.t_sec, ARCH_CONVERT, ztime.t_sec);
274 INT_SET(dic.di_mtime.t_nsec, ARCH_CONVERT, ztime.t_nsec);
275
276 INT_SET(dic.di_ctime.t_sec, ARCH_CONVERT, ztime.t_sec);
277 INT_SET(dic.di_ctime.t_nsec, ARCH_CONVERT, ztime.t_nsec);
278
279 INT_ZERO(dic.di_size, ARCH_CONVERT);
280 INT_ZERO(dic.di_nblocks, ARCH_CONVERT);
281 INT_ZERO(dic.di_extsize, ARCH_CONVERT);
282 INT_ZERO(dic.di_nextents, ARCH_CONVERT);
283 INT_ZERO(dic.di_anextents, ARCH_CONVERT);
284 INT_ZERO(dic.di_forkoff, ARCH_CONVERT);
285 INT_ZERO(dic.di_aformat, ARCH_CONVERT);
286 INT_ZERO(dic.di_dmevmask, ARCH_CONVERT);
287 INT_ZERO(dic.di_dmstate, ARCH_CONVERT);
288 INT_ZERO(dic.di_flags, ARCH_CONVERT);
289 INT_ZERO(dic.di_gen, ARCH_CONVERT);
290
291 for (i = 0; i < ninodes; i++) {
292 free = XFS_MAKE_IPTR(args.mp, fbuf, i);
293 bcopy (&dic, &(free->di_core), sizeof(xfs_dinode_core_t));
294 INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
295 xfs_ialloc_log_di(tp, fbuf, i,
296 XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED);
297 }
298 xfs_trans_inode_alloc_buf(tp, fbuf);
299 }
300 INT_MOD(agi->agi_count, ARCH_CONVERT, newlen);
301 INT_MOD(agi->agi_freecount, ARCH_CONVERT, newlen);
302 mraccess(&args.mp->m_peraglock);
303 args.mp->m_perag[INT_GET(agi->agi_seqno, ARCH_CONVERT)].pagi_freecount += newlen;
304 mraccunlock(&args.mp->m_peraglock);
305 INT_SET(agi->agi_newino, ARCH_CONVERT, newino);
306 /*
307 * Insert records describing the new inode chunk into the btree.
308 */
309 cur = xfs_btree_init_cursor(args.mp, tp, agbp,
310 INT_GET(agi->agi_seqno, ARCH_CONVERT),
311 XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
312 for (thisino = newino;
313 thisino < newino + newlen;
314 thisino += XFS_INODES_PER_CHUNK) {
0e266570
NS
315 if ((error = xfs_inobt_lookup_eq(cur, thisino,
316 XFS_INODES_PER_CHUNK, XFS_INOBT_ALL_FREE, &i))) {
2bd0ea18
NS
317 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
318 return error;
319 }
320 ASSERT(i == 0);
0e266570 321 if ((error = xfs_inobt_insert(cur, &i))) {
2bd0ea18
NS
322 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
323 return error;
324 }
325 ASSERT(i == 1);
326 }
327 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
328 /*
329 * Log allocation group header fields
330 */
331 xfs_ialloc_log_agi(tp, agbp,
332 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
333 /*
334 * Modify/log superblock values for inode count and inode free count.
335 */
336 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
337 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
338 *alloc = 1;
339 return 0;
340}
341
342/*
343 * Select an allocation group to look for a free inode in, based on the parent
344 * inode and then mode. Return the allocation group buffer.
345 */
346STATIC xfs_buf_t * /* allocation group buffer */
347xfs_ialloc_ag_select(
348 xfs_trans_t *tp, /* transaction pointer */
349 xfs_ino_t parent, /* parent directory inode number */
350 mode_t mode, /* bits set to indicate file type */
351 int okalloc) /* ok to allocate more space */
352{
353 xfs_buf_t *agbp; /* allocation group header buffer */
354 xfs_agnumber_t agcount; /* number of ag's in the filesystem */
355 xfs_agnumber_t agno; /* current ag number */
356 int flags; /* alloc buffer locking flags */
357 xfs_extlen_t ineed; /* blocks needed for inode allocation */
275ae71f 358 xfs_extlen_t longest = 0; /* longest extent available */
2bd0ea18
NS
359 xfs_mount_t *mp; /* mount point structure */
360 int needspace; /* file mode implies space allocated */
361 xfs_perag_t *pag; /* per allocation group data */
362 xfs_agnumber_t pagno; /* parent (starting) ag number */
363
364 /*
365 * Files of these types need at least one block if length > 0
366 * (and they won't fit in the inode, but that's hard to figure out).
367 */
368 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
369 mp = tp->t_mountp;
34317449 370 agcount = mp->m_maxagi;
2bd0ea18
NS
371 if (S_ISDIR(mode))
372 pagno = atomicIncWithWrap((int *)&mp->m_agirotor, agcount);
373 else
374 pagno = XFS_INO_TO_AGNO(mp, parent);
375 ASSERT(pagno < agcount);
376 /*
377 * Loop through allocation groups, looking for one with a little
378 * free space in it. Note we don't look for free inodes, exactly.
379 * Instead, we include whether there is a need to allocate inodes
380 * to mean that blocks must be allocated for them,
381 * if none are currently free.
382 */
383 agno = pagno;
384 flags = XFS_ALLOC_FLAG_TRYLOCK;
385 for (;;) {
386 mraccess(&mp->m_peraglock);
387 pag = &mp->m_perag[agno];
388 if (!pag->pagi_init) {
389 if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
390 agbp = NULL;
391 mraccunlock(&mp->m_peraglock);
392 goto nextag;
393 }
394 } else
395 agbp = NULL;
34317449
NS
396
397 if (!pag->pagi_inodeok) {
398 atomicIncWithWrap((int *)&mp->m_agirotor, agcount);
399 goto unlock_nextag;
400 }
401
2bd0ea18
NS
402 /*
403 * Is there enough free space for the file plus a block
404 * of inodes (if we need to allocate some)?
405 */
406 ineed = pag->pagi_freecount ? 0 : XFS_IALLOC_BLOCKS(mp);
407 if (ineed && !pag->pagf_init) {
408 if (agbp == NULL &&
409 xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
410 agbp = NULL;
411 mraccunlock(&mp->m_peraglock);
412 goto nextag;
413 }
414 (void)xfs_alloc_pagf_init(mp, tp, agno, flags);
415 }
416 if (!ineed || pag->pagf_init) {
417 if (ineed && !(longest = pag->pagf_longest))
418 longest = pag->pagf_flcount > 0;
419 if (!ineed ||
420 (pag->pagf_freeblks >= needspace + ineed &&
421 longest >= ineed &&
422 okalloc)) {
423 if (agbp == NULL &&
424 xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
425 agbp = NULL;
426 mraccunlock(&mp->m_peraglock);
427 goto nextag;
428 }
429 mraccunlock(&mp->m_peraglock);
430 return agbp;
431 }
432 }
34317449 433unlock_nextag:
2bd0ea18
NS
434 mraccunlock(&mp->m_peraglock);
435 if (agbp)
436 xfs_trans_brelse(tp, agbp);
437nextag:
438 /*
439 * No point in iterating over the rest, if we're shutting
440 * down.
441 */
442 if (XFS_FORCED_SHUTDOWN(mp))
443 return (xfs_buf_t *)0;
444 agno++;
445 if (agno == agcount)
446 agno = 0;
447 if (agno == pagno) {
448 if (flags == 0)
449 return (xfs_buf_t *)0;
450 flags = 0;
451 }
452 }
453}
454
455/*
456 * Visible inode allocation functions.
457 */
458
459/*
460 * Allocate an inode on disk.
461 * Mode is used to tell whether the new inode will need space, and whether
462 * it is a directory.
463 *
464 * The arguments IO_agbp and alloc_done are defined to work within
465 * the constraint of one allocation per transaction.
466 * xfs_dialloc() is designed to be called twice if it has to do an
467 * allocation to make more free inodes. On the first call,
468 * IO_agbp should be set to NULL. If an inode is available,
469 * i.e., xfs_dialloc() did not need to do an allocation, an inode
470 * number is returned. In this case, IO_agbp would be set to the
471 * current ag_buf and alloc_done set to false.
472 * If an allocation needed to be done, xfs_dialloc would return
473 * the current ag_buf in IO_agbp and set alloc_done to true.
474 * The caller should then commit the current transaction, allocate a new
475 * transaction, and call xfs_dialloc() again, passing in the previous
476 * value of IO_agbp. IO_agbp should be held across the transactions.
477 * Since the agbp is locked across the two calls, the second call is
478 * guaranteed to have a free inode available.
479 *
480 * Once we successfully pick an inode its number is returned and the
481 * on-disk data structures are updated. The inode itself is not read
482 * in, since doing so would break ordering constraints with xfs_reclaim.
483 */
484int
485xfs_dialloc(
486 xfs_trans_t *tp, /* transaction pointer */
487 xfs_ino_t parent, /* parent inode (directory) */
488 mode_t mode, /* mode bits for new inode */
489 int okalloc, /* ok to allocate more space */
490 xfs_buf_t **IO_agbp, /* in/out ag header's buffer */
491 boolean_t *alloc_done, /* true if we needed to replenish
492 inode freelist */
493 xfs_ino_t *inop) /* inode number allocated */
494{
495 xfs_agnumber_t agcount; /* number of allocation groups */
496 xfs_buf_t *agbp; /* allocation group header's buffer */
497 xfs_agnumber_t agno; /* allocation group number */
498 xfs_agi_t *agi; /* allocation group header structure */
499 xfs_btree_cur_t *cur; /* inode allocation btree cursor */
500 int error; /* error return value */
501 int i; /* result code */
502 int ialloced; /* inode allocation status */
503 int noroom = 0; /* no space for inode blk allocation */
504 xfs_ino_t ino; /* fs-relative inode to be returned */
505 /* REFERENCED */
506 int j; /* result code */
507 xfs_mount_t *mp; /* file system mount structure */
508 int offset; /* index of inode in chunk */
509 xfs_agino_t pagino; /* parent's a.g. relative inode # */
510 xfs_agnumber_t pagno; /* parent's allocation group number */
511 xfs_inobt_rec_t rec; /* inode allocation record */
512 xfs_agnumber_t tagno; /* testing allocation group number */
513 xfs_btree_cur_t *tcur; /* temp cursor */
514 xfs_inobt_rec_t trec; /* temp inode allocation record */
515
516
517 if (*IO_agbp == NULL) {
518 /*
519 * We do not have an agbp, so select an initial allocation
520 * group for inode allocation.
521 */
522 agbp = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
523 /*
524 * Couldn't find an allocation group satisfying the
525 * criteria, give up.
526 */
527 if (!agbp) {
528 *inop = NULLFSINO;
529 return 0;
530 }
531 agi = XFS_BUF_TO_AGI(agbp);
532 ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
533 } else {
534 /*
535 * Continue where we left off before. In this case, we
536 * know that the allocation group has free inodes.
537 */
538 agbp = *IO_agbp;
539 agi = XFS_BUF_TO_AGI(agbp);
540 ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
541 ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0);
542 }
543 mp = tp->t_mountp;
544 agcount = mp->m_sb.sb_agcount;
545 agno = INT_GET(agi->agi_seqno, ARCH_CONVERT);
546 tagno = agno;
547 pagno = XFS_INO_TO_AGNO(mp, parent);
548 pagino = XFS_INO_TO_AGINO(mp, parent);
549
550 /*
551 * If we have already hit the ceiling of inode blocks then clear
552 * okalloc so we scan all available agi structures for a free
553 * inode.
554 */
555
556 if (mp->m_maxicount &&
557 mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) {
558 noroom = 1;
559 okalloc = 0;
560 }
561
562 /*
563 * Loop until we find an allocation group that either has free inodes
564 * or in which we can allocate some inodes. Iterate through the
565 * allocation groups upward, wrapping at the end.
566 */
567 *alloc_done = B_FALSE;
568 while (INT_GET(agi->agi_freecount, ARCH_CONVERT) == 0) {
569 /*
570 * Don't do anything if we're not supposed to allocate
571 * any blocks, just go on to the next ag.
572 */
573 if (okalloc) {
574 /*
575 * Try to allocate some new inodes in the allocation
576 * group.
577 */
0e266570 578 if ((error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced))) {
2bd0ea18
NS
579 xfs_trans_brelse(tp, agbp);
580 if (error == ENOSPC) {
581 *inop = NULLFSINO;
582 return 0;
583 } else
584 return error;
585 }
586 if (ialloced) {
587 /*
588 * We successfully allocated some inodes, return
589 * the current context to the caller so that it
590 * can commit the current transaction and call
591 * us again where we left off.
592 */
593 ASSERT(INT_GET(agi->agi_freecount, ARCH_CONVERT) > 0);
594 *alloc_done = B_TRUE;
595 *IO_agbp = agbp;
596 *inop = NULLFSINO;
597 return 0;
598 }
599 }
600 /*
601 * If it failed, give up on this ag.
602 */
603 xfs_trans_brelse(tp, agbp);
604 /*
605 * Go on to the next ag: get its ag header.
606 */
607nextag:
608 if (++tagno == agcount)
609 tagno = 0;
610 if (tagno == agno) {
611 *inop = NULLFSINO;
612 return noroom ? ENOSPC : 0;
613 }
614 mraccess(&mp->m_peraglock);
34317449
NS
615 if (mp->m_perag[tagno].pagi_inodeok == 0) {
616 mraccunlock(&mp->m_peraglock);
617 goto nextag;
618 }
2bd0ea18
NS
619 error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp);
620 mraccunlock(&mp->m_peraglock);
621 if (error)
622 goto nextag;
623 agi = XFS_BUF_TO_AGI(agbp);
624 ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
625 }
626 /*
627 * Here with an allocation group that has a free inode.
628 * Reset agno since we may have chosen a new ag in the
629 * loop above.
630 */
631 agno = tagno;
632 *IO_agbp = NULL;
633 cur = xfs_btree_init_cursor(mp, tp, agbp, INT_GET(agi->agi_seqno, ARCH_CONVERT),
634 XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
635 /*
636 * If pagino is 0 (this is the root inode allocation) use newino.
637 * This must work because we've just allocated some.
638 */
639 if (!pagino)
640 pagino = INT_GET(agi->agi_newino, ARCH_CONVERT);
641#ifdef DEBUG
642 if (cur->bc_nlevels == 1) {
643 int freecount = 0;
644
0e266570 645 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
2bd0ea18
NS
646 goto error0;
647 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
648 do {
0e266570
NS
649 if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino,
650 &rec.ir_freecount, &rec.ir_free, &i, ARCH_NOCONVERT)))
2bd0ea18
NS
651 goto error0;
652 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
653 freecount += rec.ir_freecount;
0e266570 654 if ((error = xfs_inobt_increment(cur, 0, &i)))
2bd0ea18
NS
655 goto error0;
656 } while (i == 1);
657
658 ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) ||
659 XFS_FORCED_SHUTDOWN(mp));
660 }
661#endif
662 /*
663 * If in the same a.g. as the parent, try to get near the parent.
664 */
665 if (pagno == agno) {
0e266570 666 if ((error = xfs_inobt_lookup_le(cur, pagino, 0, 0, &i)))
2bd0ea18
NS
667 goto error0;
668 if (i != 0 &&
669 (error = xfs_inobt_get_rec(cur, &rec.ir_startino,
670 &rec.ir_freecount, &rec.ir_free, &j, ARCH_NOCONVERT)) == 0 &&
671 j == 1 &&
672 rec.ir_freecount > 0) {
673 /*
674 * Found a free inode in the same chunk
675 * as parent, done.
676 */
677 }
678 /*
679 * In the same a.g. as parent, but parent's chunk is full.
680 */
681 else {
682 int doneleft; /* done, to the left */
683 int doneright; /* done, to the right */
684
685 if (error)
686 goto error0;
687 ASSERT(i == 1);
688 ASSERT(j == 1);
689 /*
690 * Duplicate the cursor, search left & right
691 * simultaneously.
692 */
0e266570 693 if ((error = xfs_btree_dup_cursor(cur, &tcur)))
2bd0ea18
NS
694 goto error0;
695 /*
696 * Search left with tcur, back up 1 record.
697 */
0e266570 698 if ((error = xfs_inobt_decrement(tcur, 0, &i)))
2bd0ea18
NS
699 goto error1;
700 doneleft = !i;
701 if (!doneleft) {
0e266570 702 if ((error = xfs_inobt_get_rec(tcur,
2bd0ea18
NS
703 &trec.ir_startino,
704 &trec.ir_freecount,
0e266570 705 &trec.ir_free, &i, ARCH_NOCONVERT)))
2bd0ea18
NS
706 goto error1;
707 XFS_WANT_CORRUPTED_GOTO(i == 1, error1);
708 }
709 /*
710 * Search right with cur, go forward 1 record.
711 */
0e266570 712 if ((error = xfs_inobt_increment(cur, 0, &i)))
2bd0ea18
NS
713 goto error1;
714 doneright = !i;
715 if (!doneright) {
0e266570 716 if ((error = xfs_inobt_get_rec(cur,
2bd0ea18
NS
717 &rec.ir_startino,
718 &rec.ir_freecount,
0e266570 719 &rec.ir_free, &i, ARCH_NOCONVERT)))
2bd0ea18
NS
720 goto error1;
721 XFS_WANT_CORRUPTED_GOTO(i == 1, error1);
722 }
723 /*
724 * Loop until we find the closest inode chunk
725 * with a free one.
726 */
727 while (!doneleft || !doneright) {
728 int useleft; /* using left inode
729 chunk this time */
730
731 /*
732 * Figure out which block is closer,
733 * if both are valid.
734 */
735 if (!doneleft && !doneright)
736 useleft =
737 pagino -
738 (trec.ir_startino +
739 XFS_INODES_PER_CHUNK - 1) <
740 rec.ir_startino - pagino;
741 else
742 useleft = !doneleft;
743 /*
744 * If checking the left, does it have
745 * free inodes?
746 */
747 if (useleft && trec.ir_freecount) {
748 /*
749 * Yes, set it up as the chunk to use.
750 */
751 rec = trec;
752 xfs_btree_del_cursor(cur,
753 XFS_BTREE_NOERROR);
754 cur = tcur;
755 break;
756 }
757 /*
758 * If checking the right, does it have
759 * free inodes?
760 */
761 if (!useleft && rec.ir_freecount) {
762 /*
763 * Yes, it's already set up.
764 */
765 xfs_btree_del_cursor(tcur,
766 XFS_BTREE_NOERROR);
767 break;
768 }
769 /*
770 * If used the left, get another one
771 * further left.
772 */
773 if (useleft) {
0e266570
NS
774 if ((error = xfs_inobt_decrement(tcur, 0,
775 &i)))
2bd0ea18
NS
776 goto error1;
777 doneleft = !i;
778 if (!doneleft) {
0e266570 779 if ((error = xfs_inobt_get_rec(
2bd0ea18
NS
780 tcur,
781 &trec.ir_startino,
782 &trec.ir_freecount,
0e266570 783 &trec.ir_free, &i, ARCH_NOCONVERT)))
2bd0ea18
NS
784 goto error1;
785 XFS_WANT_CORRUPTED_GOTO(i == 1,
786 error1);
787 }
788 }
789 /*
790 * If used the right, get another one
791 * further right.
792 */
793 else {
0e266570
NS
794 if ((error = xfs_inobt_increment(cur, 0,
795 &i)))
2bd0ea18
NS
796 goto error1;
797 doneright = !i;
798 if (!doneright) {
0e266570 799 if ((error = xfs_inobt_get_rec(
2bd0ea18
NS
800 cur,
801 &rec.ir_startino,
802 &rec.ir_freecount,
0e266570 803 &rec.ir_free, &i, ARCH_NOCONVERT)))
2bd0ea18
NS
804 goto error1;
805 XFS_WANT_CORRUPTED_GOTO(i == 1,
806 error1);
807 }
808 }
809 }
810 ASSERT(!doneleft || !doneright);
811 }
812 }
813 /*
814 * In a different a.g. from the parent.
815 * See if the most recently allocated block has any free.
816 */
817 else if (INT_GET(agi->agi_newino, ARCH_CONVERT) != NULLAGINO) {
0e266570
NS
818 if ((error = xfs_inobt_lookup_eq(cur,
819 INT_GET(agi->agi_newino, ARCH_CONVERT), 0, 0, &i)))
2bd0ea18
NS
820 goto error0;
821 if (i == 1 &&
822 (error = xfs_inobt_get_rec(cur, &rec.ir_startino,
823 &rec.ir_freecount, &rec.ir_free, &j, ARCH_NOCONVERT)) == 0 &&
824 j == 1 &&
825 rec.ir_freecount > 0) {
826 /*
827 * The last chunk allocated in the group still has
828 * a free inode.
829 */
830 }
831 /*
832 * None left in the last group, search the whole a.g.
833 */
834 else {
835 if (error)
836 goto error0;
0e266570 837 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
2bd0ea18
NS
838 goto error0;
839 ASSERT(i == 1);
840 for (;;) {
0e266570 841 if ((error = xfs_inobt_get_rec(cur,
2bd0ea18
NS
842 &rec.ir_startino,
843 &rec.ir_freecount, &rec.ir_free,
0e266570 844 &i, ARCH_NOCONVERT)))
2bd0ea18
NS
845 goto error0;
846 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
847 if (rec.ir_freecount > 0)
848 break;
0e266570 849 if ((error = xfs_inobt_increment(cur, 0, &i)))
2bd0ea18
NS
850 goto error0;
851 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
852 }
853 }
854 }
855 offset = XFS_IALLOC_FIND_FREE(&rec.ir_free);
856 ASSERT(offset >= 0);
857 ASSERT(offset < XFS_INODES_PER_CHUNK);
858 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
859 XFS_INODES_PER_CHUNK) == 0);
860 ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
861 XFS_INOBT_CLR_FREE(&rec, offset, ARCH_NOCONVERT);
862 rec.ir_freecount--;
0e266570
NS
863 if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount,
864 rec.ir_free)))
2bd0ea18
NS
865 goto error0;
866 INT_MOD(agi->agi_freecount, ARCH_CONVERT, -1);
867 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
868 mraccess(&mp->m_peraglock);
869 mp->m_perag[tagno].pagi_freecount--;
870 mraccunlock(&mp->m_peraglock);
871#ifdef DEBUG
872 if (cur->bc_nlevels == 1) {
873 int freecount = 0;
874
0e266570 875 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
2bd0ea18
NS
876 goto error0;
877 do {
0e266570
NS
878 if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino,
879 &rec.ir_freecount, &rec.ir_free, &i, ARCH_NOCONVERT)))
2bd0ea18
NS
880 goto error0;
881 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
882 freecount += rec.ir_freecount;
0e266570 883 if ((error = xfs_inobt_increment(cur, 0, &i)))
2bd0ea18
NS
884 goto error0;
885 } while (i == 1);
886 ASSERT(freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT) ||
887 XFS_FORCED_SHUTDOWN(mp));
888 }
889#endif
890 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
891 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
892 *inop = ino;
893 return 0;
894error1:
895 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
896error0:
897 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
898 return error;
899}
900
901
902/*
903 * Return the location of the inode in bno/off, for mapping it into a buffer.
904 */
905/*ARGSUSED*/
906int
907xfs_dilocate(
908 xfs_mount_t *mp, /* file system mount structure */
909 xfs_trans_t *tp, /* transaction pointer */
910 xfs_ino_t ino, /* inode to locate */
911 xfs_fsblock_t *bno, /* output: block containing inode */
912 int *len, /* output: num blocks in inode cluster */
913 int *off, /* output: index in block of inode */
914 uint flags) /* flags concerning inode lookup */
915{
916 xfs_agblock_t agbno; /* block number of inode in the alloc group */
917 xfs_buf_t *agbp; /* agi buffer */
918 xfs_agino_t agino; /* inode number within alloc group */
919 xfs_agnumber_t agno; /* allocation group number */
920 int blks_per_cluster; /* num blocks per inode cluster */
921 xfs_agblock_t chunk_agbno; /* first block in inode chunk */
922 xfs_agino_t chunk_agino; /* first agino in inode chunk */
923 __int32_t chunk_cnt; /* count of free inodes in chunk */
924 xfs_inofree_t chunk_free; /* mask of free inodes in chunk */
925 xfs_agblock_t cluster_agbno; /* first block in inode cluster */
926 xfs_btree_cur_t *cur; /* inode btree cursor */
927 int error; /* error code */
928 int i; /* temp state */
929 int offset; /* index of inode in its buffer */
930 int offset_agbno; /* blks from chunk start to inode */
931
932 ASSERT(ino != NULLFSINO);
933 /*
934 * Split up the inode number into its parts.
935 */
936 agno = XFS_INO_TO_AGNO(mp, ino);
937 agino = XFS_INO_TO_AGINO(mp, ino);
938 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
939 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
940 ino != XFS_AGINO_TO_INO(mp, agno, agino))
941 return XFS_ERROR(EINVAL);
942 if ((mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) ||
943 !(flags & XFS_IMAP_LOOKUP)) {
944 offset = XFS_INO_TO_OFFSET(mp, ino);
945 ASSERT(offset < mp->m_sb.sb_inopblock);
946 *bno = XFS_AGB_TO_FSB(mp, agno, agbno);
947 *off = offset;
948 *len = 1;
949 return 0;
950 }
951 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
952 if (*bno != NULLFSBLOCK) {
953 offset = XFS_INO_TO_OFFSET(mp, ino);
954 ASSERT(offset < mp->m_sb.sb_inopblock);
955 cluster_agbno = XFS_FSB_TO_AGBNO(mp, *bno);
956 *off = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
957 offset;
958 *len = blks_per_cluster;
959 return 0;
960 }
961 if (mp->m_inoalign_mask) {
962 offset_agbno = agbno & mp->m_inoalign_mask;
963 chunk_agbno = agbno - offset_agbno;
964 } else {
965 mraccess(&mp->m_peraglock);
966 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
967 mraccunlock(&mp->m_peraglock);
968 if (error)
969 return error;
970 cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO,
971 (xfs_inode_t *)0, 0);
0e266570 972 if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i)))
2bd0ea18 973 goto error0;
0e266570
NS
974 if ((error = xfs_inobt_get_rec(cur, &chunk_agino, &chunk_cnt,
975 &chunk_free, &i, ARCH_NOCONVERT)))
2bd0ea18
NS
976 goto error0;
977 if (i == 0)
978 error = XFS_ERROR(EINVAL);
979 xfs_trans_brelse(tp, agbp);
980 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
981 if (error)
982 return error;
983 chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_agino);
984 offset_agbno = agbno - chunk_agbno;
985 }
986 ASSERT(agbno >= chunk_agbno);
987 cluster_agbno = chunk_agbno +
988 ((offset_agbno / blks_per_cluster) * blks_per_cluster);
989 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
990 XFS_INO_TO_OFFSET(mp, ino);
991 *bno = XFS_AGB_TO_FSB(mp, agno, cluster_agbno);
992 *off = offset;
993 *len = blks_per_cluster;
994 return 0;
995error0:
996 xfs_trans_brelse(tp, agbp);
997 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
998 return error;
999}
1000
1001/*
1002 * Compute and fill in value of m_in_maxlevels.
1003 */
1004void
1005xfs_ialloc_compute_maxlevels(
1006 xfs_mount_t *mp) /* file system mount structure */
1007{
1008 int level;
1009 uint maxblocks;
1010 uint maxleafents;
1011 int minleafrecs;
1012 int minnoderecs;
1013
1014 maxleafents = (1LL << XFS_INO_AGINO_BITS(mp)) >>
1015 XFS_INODES_PER_CHUNK_LOG;
1016 minleafrecs = mp->m_alloc_mnr[0];
1017 minnoderecs = mp->m_alloc_mnr[1];
1018 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1019 for (level = 1; maxblocks > 1; level++)
1020 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1021 mp->m_in_maxlevels = level;
1022}
1023
1024/*
1025 * Log specified fields for the ag hdr (inode section)
1026 */
1027void
1028xfs_ialloc_log_agi(
1029 xfs_trans_t *tp, /* transaction pointer */
1030 xfs_buf_t *bp, /* allocation group header buffer */
1031 int fields) /* bitmask of fields to log */
1032{
1033 int first; /* first byte number */
1034 int last; /* last byte number */
1035 static const short offsets[] = { /* field starting offsets */
1036 /* keep in sync with bit definitions */
1037 offsetof(xfs_agi_t, agi_magicnum),
1038 offsetof(xfs_agi_t, agi_versionnum),
1039 offsetof(xfs_agi_t, agi_seqno),
1040 offsetof(xfs_agi_t, agi_length),
1041 offsetof(xfs_agi_t, agi_count),
1042 offsetof(xfs_agi_t, agi_root),
1043 offsetof(xfs_agi_t, agi_level),
1044 offsetof(xfs_agi_t, agi_freecount),
1045 offsetof(xfs_agi_t, agi_newino),
1046 offsetof(xfs_agi_t, agi_dirino),
1047 offsetof(xfs_agi_t, agi_unlinked),
1048 sizeof(xfs_agi_t)
1049 };
1050#ifdef DEBUG
1051 xfs_agi_t *agi; /* allocation group header */
1052
1053 agi = XFS_BUF_TO_AGI(bp);
1054 ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) ==
1055 XFS_AGI_MAGIC);
1056#endif
1057 /*
1058 * Compute byte offsets for the first and last fields.
1059 */
1060 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS, &first, &last);
1061 /*
1062 * Log the allocation group inode header buffer.
1063 */
1064 xfs_trans_log_buf(tp, bp, first, last);
1065}
1066
1067/*
1068 * Read in the allocation group header (inode allocation section)
1069 */
1070int
1071xfs_ialloc_read_agi(
1072 xfs_mount_t *mp, /* file system mount structure */
1073 xfs_trans_t *tp, /* transaction pointer */
1074 xfs_agnumber_t agno, /* allocation group number */
1075 xfs_buf_t **bpp) /* allocation group hdr buf */
1076{
1077 xfs_agi_t *agi; /* allocation group header */
1078 int agi_ok; /* agi is consistent */
1079 xfs_buf_t *bp; /* allocation group hdr buf */
1080 xfs_daddr_t d; /* disk block address */
1081 int error;
1082#ifdef DEBUG
1083 int i;
1084#endif
1085 xfs_perag_t *pag; /* per allocation group data */
1086
1087
1088 ASSERT(agno != NULLAGNUMBER);
1089 d = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR);
0e266570 1090 if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, 1, 0, &bp)))
2bd0ea18
NS
1091 return error;
1092 ASSERT(bp && !XFS_BUF_GETERROR(bp));
1093 /*
1094 * Validate the magic number of the agi block.
1095 */
1096 agi = XFS_BUF_TO_AGI(bp);
1097 agi_ok =
1098 INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC &&
1099 XFS_AGI_GOOD_VERSION(INT_GET(agi->agi_versionnum, ARCH_CONVERT));
1100 if (XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI,
1101 XFS_RANDOM_IALLOC_READ_AGI)) {
1102 xfs_trans_brelse(tp, bp);
63be04eb
NS
1103#ifdef __KERNEL__ /* additional, temporary, debugging code */
1104 cmn_err(CE_NOTE,
1105 "EFSCORRUPTED returned from file %s line %d",
1106 __FILE__, __LINE__);
1107#endif
2bd0ea18
NS
1108 return XFS_ERROR(EFSCORRUPTED);
1109 }
1110 pag = &mp->m_perag[agno];
1111 if (!pag->pagi_init) {
1112 pag->pagi_freecount = INT_GET(agi->agi_freecount, ARCH_CONVERT);
1113 pag->pagi_init = 1;
1114 } else {
1115 /*
1116 * It's possible for these to be out of sync if
1117 * we are in the middle of a forced shutdown.
1118 */
1119 ASSERT(pag->pagi_freecount == INT_GET(agi->agi_freecount, ARCH_CONVERT)
1120 || XFS_FORCED_SHUTDOWN(mp));
1121 }
1122#ifdef DEBUG
1123 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
1124 ASSERT(INT_GET(agi->agi_unlinked[i], ARCH_CONVERT) != 0);
1125#endif
1126 XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGI, XFS_AGI_REF);
1127 *bpp = bp;
1128 return 0;
1129}