]> git.ipfire.org Git - people/ms/linux.git/blame - fs/xfs/xfs_ialloc.c
xfs: improve xfs_inobt_get_rec prototype
[people/ms/linux.git] / fs / xfs / xfs_ialloc.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
1da177e4 20#include "xfs_types.h"
a844f451 21#include "xfs_bit.h"
1da177e4 22#include "xfs_log.h"
a844f451 23#include "xfs_inum.h"
1da177e4
LT
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
1da177e4
LT
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h"
1da177e4 30#include "xfs_bmap_btree.h"
a844f451 31#include "xfs_alloc_btree.h"
1da177e4 32#include "xfs_ialloc_btree.h"
1da177e4 33#include "xfs_dir2_sf.h"
a844f451 34#include "xfs_attr_sf.h"
1da177e4
LT
35#include "xfs_dinode.h"
36#include "xfs_inode.h"
a844f451
NS
37#include "xfs_btree.h"
38#include "xfs_ialloc.h"
1da177e4 39#include "xfs_alloc.h"
1da177e4
LT
40#include "xfs_rtalloc.h"
41#include "xfs_error.h"
42#include "xfs_bmap.h"
43
1da177e4
LT
44
45/*
46 * Allocation group level functions.
47 */
75de2a91
DC
48static inline int
49xfs_ialloc_cluster_alignment(
50 xfs_alloc_arg_t *args)
51{
52 if (xfs_sb_version_hasalign(&args->mp->m_sb) &&
53 args->mp->m_sb.sb_inoalignmt >=
54 XFS_B_TO_FSBT(args->mp, XFS_INODE_CLUSTER_SIZE(args->mp)))
55 return args->mp->m_sb.sb_inoalignmt;
56 return 1;
57}
1da177e4 58
fe033cc8
CH
59/*
60 * Lookup the record equal to ino in the btree given by cur.
61 */
62STATIC int /* error */
63xfs_inobt_lookup_eq(
64 struct xfs_btree_cur *cur, /* btree cursor */
65 xfs_agino_t ino, /* starting inode of chunk */
66 __int32_t fcnt, /* free inode count */
67 xfs_inofree_t free, /* free inode mask */
68 int *stat) /* success/failure */
69{
70 cur->bc_rec.i.ir_startino = ino;
71 cur->bc_rec.i.ir_freecount = fcnt;
72 cur->bc_rec.i.ir_free = free;
73 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
74}
75
76/*
77 * Lookup the first record greater than or equal to ino
78 * in the btree given by cur.
79 */
80int /* error */
81xfs_inobt_lookup_ge(
82 struct xfs_btree_cur *cur, /* btree cursor */
83 xfs_agino_t ino, /* starting inode of chunk */
84 __int32_t fcnt, /* free inode count */
85 xfs_inofree_t free, /* free inode mask */
86 int *stat) /* success/failure */
87{
88 cur->bc_rec.i.ir_startino = ino;
89 cur->bc_rec.i.ir_freecount = fcnt;
90 cur->bc_rec.i.ir_free = free;
91 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
92}
93
94/*
95 * Lookup the first record less than or equal to ino
96 * in the btree given by cur.
97 */
98int /* error */
99xfs_inobt_lookup_le(
100 struct xfs_btree_cur *cur, /* btree cursor */
101 xfs_agino_t ino, /* starting inode of chunk */
102 __int32_t fcnt, /* free inode count */
103 xfs_inofree_t free, /* free inode mask */
104 int *stat) /* success/failure */
105{
106 cur->bc_rec.i.ir_startino = ino;
107 cur->bc_rec.i.ir_freecount = fcnt;
108 cur->bc_rec.i.ir_free = free;
109 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
110}
111
278d0ca1
CH
112/*
113 * Update the record referred to by cur to the value given
114 * by [ino, fcnt, free].
115 * This either works (return 0) or gets an EFSCORRUPTED error.
116 */
117STATIC int /* error */
118xfs_inobt_update(
119 struct xfs_btree_cur *cur, /* btree cursor */
120 xfs_agino_t ino, /* starting inode of chunk */
121 __int32_t fcnt, /* free inode count */
122 xfs_inofree_t free) /* free inode mask */
123{
124 union xfs_btree_rec rec;
125
126 rec.inobt.ir_startino = cpu_to_be32(ino);
127 rec.inobt.ir_freecount = cpu_to_be32(fcnt);
128 rec.inobt.ir_free = cpu_to_be64(free);
129 return xfs_btree_update(cur, &rec);
130}
131
8cc938fe
CH
132/*
133 * Get the data from the pointed-to record.
134 */
135int /* error */
136xfs_inobt_get_rec(
137 struct xfs_btree_cur *cur, /* btree cursor */
2e287a73 138 xfs_inobt_rec_incore_t *irec, /* btree record */
8cc938fe
CH
139 int *stat) /* output: success/failure */
140{
141 union xfs_btree_rec *rec;
142 int error;
143
144 error = xfs_btree_get_rec(cur, &rec, stat);
145 if (!error && *stat == 1) {
2e287a73
CH
146 irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
147 irec->ir_freecount = be32_to_cpu(rec->inobt.ir_freecount);
148 irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
8cc938fe
CH
149 }
150 return error;
151}
152
85c0b2ab
DC
153/*
154 * Initialise a new set of inodes.
155 */
156STATIC void
157xfs_ialloc_inode_init(
158 struct xfs_mount *mp,
159 struct xfs_trans *tp,
160 xfs_agnumber_t agno,
161 xfs_agblock_t agbno,
162 xfs_agblock_t length,
163 unsigned int gen)
164{
165 struct xfs_buf *fbuf;
166 struct xfs_dinode *free;
167 int blks_per_cluster, nbufs, ninodes;
168 int version;
169 int i, j;
170 xfs_daddr_t d;
171
172 /*
173 * Loop over the new block(s), filling in the inodes.
174 * For small block sizes, manipulate the inodes in buffers
175 * which are multiples of the blocks size.
176 */
177 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
178 blks_per_cluster = 1;
179 nbufs = length;
180 ninodes = mp->m_sb.sb_inopblock;
181 } else {
182 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
183 mp->m_sb.sb_blocksize;
184 nbufs = length / blks_per_cluster;
185 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
186 }
187
188 /*
189 * Figure out what version number to use in the inodes we create.
190 * If the superblock version has caught up to the one that supports
191 * the new inode format, then use the new inode version. Otherwise
192 * use the old version so that old kernels will continue to be
193 * able to use the file system.
194 */
195 if (xfs_sb_version_hasnlink(&mp->m_sb))
196 version = 2;
197 else
198 version = 1;
199
200 for (j = 0; j < nbufs; j++) {
201 /*
202 * Get the block.
203 */
204 d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
205 fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
206 mp->m_bsize * blks_per_cluster,
207 XFS_BUF_LOCK);
208 ASSERT(fbuf);
209 ASSERT(!XFS_BUF_GETERROR(fbuf));
210
211 /*
212 * Initialize all inodes in this buffer and then log them.
213 *
214 * XXX: It would be much better if we had just one transaction
215 * to log a whole cluster of inodes instead of all the
216 * individual transactions causing a lot of log traffic.
217 */
218 xfs_biozero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog);
219 for (i = 0; i < ninodes; i++) {
220 int ioffset = i << mp->m_sb.sb_inodelog;
221 uint isize = sizeof(struct xfs_dinode);
222
223 free = xfs_make_iptr(mp, fbuf, i);
224 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
225 free->di_version = version;
226 free->di_gen = cpu_to_be32(gen);
227 free->di_next_unlinked = cpu_to_be32(NULLAGINO);
228 xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1);
229 }
230 xfs_trans_inode_alloc_buf(tp, fbuf);
231 }
232}
233
1da177e4
LT
234/*
235 * Allocate new inodes in the allocation group specified by agbp.
236 * Return 0 for success, else error code.
237 */
238STATIC int /* error code or 0 */
239xfs_ialloc_ag_alloc(
240 xfs_trans_t *tp, /* transaction pointer */
241 xfs_buf_t *agbp, /* alloc group buffer */
242 int *alloc)
243{
244 xfs_agi_t *agi; /* allocation group header */
245 xfs_alloc_arg_t args; /* allocation argument structure */
1da177e4 246 xfs_btree_cur_t *cur; /* inode btree cursor */
92821e2b 247 xfs_agnumber_t agno;
1da177e4 248 int error;
85c0b2ab 249 int i;
1da177e4
LT
250 xfs_agino_t newino; /* new first inode's number */
251 xfs_agino_t newlen; /* new number of inodes */
1da177e4 252 xfs_agino_t thisino; /* current inode number, for loop */
3ccb8b5f 253 int isaligned = 0; /* inode allocation at stripe unit */
1da177e4 254 /* boundary */
1da177e4
LT
255
256 args.tp = tp;
257 args.mp = tp->t_mountp;
258
259 /*
260 * Locking will ensure that we don't have two callers in here
261 * at one time.
262 */
263 newlen = XFS_IALLOC_INODES(args.mp);
264 if (args.mp->m_maxicount &&
265 args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount)
266 return XFS_ERROR(ENOSPC);
267 args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp);
268 /*
3ccb8b5f
GO
269 * First try to allocate inodes contiguous with the last-allocated
270 * chunk of inodes. If the filesystem is striped, this will fill
271 * an entire stripe unit with inodes.
272 */
1da177e4 273 agi = XFS_BUF_TO_AGI(agbp);
3ccb8b5f 274 newino = be32_to_cpu(agi->agi_newino);
85c0b2ab 275 agno = be32_to_cpu(agi->agi_seqno);
019ff2d5
NS
276 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
277 XFS_IALLOC_BLOCKS(args.mp);
278 if (likely(newino != NULLAGINO &&
279 (args.agbno < be32_to_cpu(agi->agi_length)))) {
85c0b2ab 280 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
3ccb8b5f
GO
281 args.type = XFS_ALLOCTYPE_THIS_BNO;
282 args.mod = args.total = args.wasdel = args.isfl =
283 args.userdata = args.minalignslop = 0;
284 args.prod = 1;
75de2a91 285
3ccb8b5f 286 /*
75de2a91
DC
287 * We need to take into account alignment here to ensure that
288 * we don't modify the free list if we fail to have an exact
289 * block. If we don't have an exact match, and every oher
290 * attempt allocation attempt fails, we'll end up cancelling
291 * a dirty transaction and shutting down.
292 *
293 * For an exact allocation, alignment must be 1,
294 * however we need to take cluster alignment into account when
295 * fixing up the freelist. Use the minalignslop field to
296 * indicate that extra blocks might be required for alignment,
297 * but not to use them in the actual exact allocation.
3ccb8b5f 298 */
75de2a91
DC
299 args.alignment = 1;
300 args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1;
301
302 /* Allow space for the inode btree to split. */
0d87e656 303 args.minleft = args.mp->m_in_maxlevels - 1;
3ccb8b5f
GO
304 if ((error = xfs_alloc_vextent(&args)))
305 return error;
306 } else
307 args.fsbno = NULLFSBLOCK;
1da177e4 308
3ccb8b5f
GO
309 if (unlikely(args.fsbno == NULLFSBLOCK)) {
310 /*
311 * Set the alignment for the allocation.
312 * If stripe alignment is turned on then align at stripe unit
313 * boundary.
019ff2d5
NS
314 * If the cluster size is smaller than a filesystem block
315 * then we're doing I/O for inodes in filesystem block size
3ccb8b5f
GO
316 * pieces, so don't need alignment anyway.
317 */
318 isaligned = 0;
319 if (args.mp->m_sinoalign) {
320 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
321 args.alignment = args.mp->m_dalign;
322 isaligned = 1;
75de2a91
DC
323 } else
324 args.alignment = xfs_ialloc_cluster_alignment(&args);
3ccb8b5f
GO
325 /*
326 * Need to figure out where to allocate the inode blocks.
327 * Ideally they should be spaced out through the a.g.
328 * For now, just allocate blocks up front.
329 */
330 args.agbno = be32_to_cpu(agi->agi_root);
85c0b2ab 331 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
3ccb8b5f
GO
332 /*
333 * Allocate a fixed-size extent of inodes.
334 */
335 args.type = XFS_ALLOCTYPE_NEAR_BNO;
336 args.mod = args.total = args.wasdel = args.isfl =
337 args.userdata = args.minalignslop = 0;
338 args.prod = 1;
339 /*
340 * Allow space for the inode btree to split.
341 */
0d87e656 342 args.minleft = args.mp->m_in_maxlevels - 1;
3ccb8b5f
GO
343 if ((error = xfs_alloc_vextent(&args)))
344 return error;
345 }
019ff2d5 346
1da177e4
LT
347 /*
348 * If stripe alignment is turned on, then try again with cluster
349 * alignment.
350 */
351 if (isaligned && args.fsbno == NULLFSBLOCK) {
352 args.type = XFS_ALLOCTYPE_NEAR_BNO;
16259e7d 353 args.agbno = be32_to_cpu(agi->agi_root);
85c0b2ab 354 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
75de2a91 355 args.alignment = xfs_ialloc_cluster_alignment(&args);
1da177e4
LT
356 if ((error = xfs_alloc_vextent(&args)))
357 return error;
358 }
359
360 if (args.fsbno == NULLFSBLOCK) {
361 *alloc = 0;
362 return 0;
363 }
364 ASSERT(args.len == args.minlen);
1da177e4 365
359346a9 366 /*
85c0b2ab
DC
367 * Stamp and write the inode buffers.
368 *
359346a9
DC
369 * Seed the new inode cluster with a random generation number. This
370 * prevents short-term reuse of generation numbers if a chunk is
371 * freed and then immediately reallocated. We use random numbers
372 * rather than a linear progression to prevent the next generation
373 * number from being easily guessable.
374 */
85c0b2ab
DC
375 xfs_ialloc_inode_init(args.mp, tp, agno, args.agbno, args.len,
376 random32());
d42f08f6 377
85c0b2ab
DC
378 /*
379 * Convert the results.
380 */
381 newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
413d57c9
MS
382 be32_add_cpu(&agi->agi_count, newlen);
383 be32_add_cpu(&agi->agi_freecount, newlen);
1da177e4 384 down_read(&args.mp->m_peraglock);
92821e2b 385 args.mp->m_perag[agno].pagi_freecount += newlen;
1da177e4 386 up_read(&args.mp->m_peraglock);
16259e7d 387 agi->agi_newino = cpu_to_be32(newino);
85c0b2ab 388
1da177e4
LT
389 /*
390 * Insert records describing the new inode chunk into the btree.
391 */
561f7d17 392 cur = xfs_inobt_init_cursor(args.mp, tp, agbp, agno);
1da177e4
LT
393 for (thisino = newino;
394 thisino < newino + newlen;
395 thisino += XFS_INODES_PER_CHUNK) {
396 if ((error = xfs_inobt_lookup_eq(cur, thisino,
397 XFS_INODES_PER_CHUNK, XFS_INOBT_ALL_FREE, &i))) {
398 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
399 return error;
400 }
401 ASSERT(i == 0);
4b22a571 402 if ((error = xfs_btree_insert(cur, &i))) {
1da177e4
LT
403 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
404 return error;
405 }
406 ASSERT(i == 1);
407 }
408 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
409 /*
410 * Log allocation group header fields
411 */
412 xfs_ialloc_log_agi(tp, agbp,
413 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
414 /*
415 * Modify/log superblock values for inode count and inode free count.
416 */
417 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
418 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
419 *alloc = 1;
420 return 0;
421}
422
7989cb8e 423STATIC_INLINE xfs_agnumber_t
1da177e4
LT
424xfs_ialloc_next_ag(
425 xfs_mount_t *mp)
426{
427 xfs_agnumber_t agno;
428
429 spin_lock(&mp->m_agirotor_lock);
430 agno = mp->m_agirotor;
431 if (++mp->m_agirotor == mp->m_maxagi)
432 mp->m_agirotor = 0;
433 spin_unlock(&mp->m_agirotor_lock);
434
435 return agno;
436}
437
438/*
439 * Select an allocation group to look for a free inode in, based on the parent
440 * inode and then mode. Return the allocation group buffer.
441 */
442STATIC xfs_buf_t * /* allocation group buffer */
443xfs_ialloc_ag_select(
444 xfs_trans_t *tp, /* transaction pointer */
445 xfs_ino_t parent, /* parent directory inode number */
446 mode_t mode, /* bits set to indicate file type */
447 int okalloc) /* ok to allocate more space */
448{
449 xfs_buf_t *agbp; /* allocation group header buffer */
450 xfs_agnumber_t agcount; /* number of ag's in the filesystem */
451 xfs_agnumber_t agno; /* current ag number */
452 int flags; /* alloc buffer locking flags */
453 xfs_extlen_t ineed; /* blocks needed for inode allocation */
454 xfs_extlen_t longest = 0; /* longest extent available */
455 xfs_mount_t *mp; /* mount point structure */
456 int needspace; /* file mode implies space allocated */
457 xfs_perag_t *pag; /* per allocation group data */
458 xfs_agnumber_t pagno; /* parent (starting) ag number */
459
460 /*
461 * Files of these types need at least one block if length > 0
462 * (and they won't fit in the inode, but that's hard to figure out).
463 */
464 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
465 mp = tp->t_mountp;
466 agcount = mp->m_maxagi;
467 if (S_ISDIR(mode))
468 pagno = xfs_ialloc_next_ag(mp);
469 else {
470 pagno = XFS_INO_TO_AGNO(mp, parent);
471 if (pagno >= agcount)
472 pagno = 0;
473 }
474 ASSERT(pagno < agcount);
475 /*
476 * Loop through allocation groups, looking for one with a little
477 * free space in it. Note we don't look for free inodes, exactly.
478 * Instead, we include whether there is a need to allocate inodes
479 * to mean that blocks must be allocated for them,
480 * if none are currently free.
481 */
482 agno = pagno;
483 flags = XFS_ALLOC_FLAG_TRYLOCK;
484 down_read(&mp->m_peraglock);
485 for (;;) {
486 pag = &mp->m_perag[agno];
487 if (!pag->pagi_init) {
488 if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
489 agbp = NULL;
490 goto nextag;
491 }
492 } else
493 agbp = NULL;
494
495 if (!pag->pagi_inodeok) {
496 xfs_ialloc_next_ag(mp);
497 goto unlock_nextag;
498 }
499
500 /*
501 * Is there enough free space for the file plus a block
502 * of inodes (if we need to allocate some)?
503 */
504 ineed = pag->pagi_freecount ? 0 : XFS_IALLOC_BLOCKS(mp);
505 if (ineed && !pag->pagf_init) {
506 if (agbp == NULL &&
507 xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
508 agbp = NULL;
509 goto nextag;
510 }
511 (void)xfs_alloc_pagf_init(mp, tp, agno, flags);
512 }
513 if (!ineed || pag->pagf_init) {
514 if (ineed && !(longest = pag->pagf_longest))
515 longest = pag->pagf_flcount > 0;
516 if (!ineed ||
517 (pag->pagf_freeblks >= needspace + ineed &&
518 longest >= ineed &&
519 okalloc)) {
520 if (agbp == NULL &&
521 xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
522 agbp = NULL;
523 goto nextag;
524 }
525 up_read(&mp->m_peraglock);
526 return agbp;
527 }
528 }
529unlock_nextag:
530 if (agbp)
531 xfs_trans_brelse(tp, agbp);
532nextag:
533 /*
534 * No point in iterating over the rest, if we're shutting
535 * down.
536 */
537 if (XFS_FORCED_SHUTDOWN(mp)) {
538 up_read(&mp->m_peraglock);
1121b219 539 return NULL;
1da177e4
LT
540 }
541 agno++;
542 if (agno >= agcount)
543 agno = 0;
544 if (agno == pagno) {
545 if (flags == 0) {
546 up_read(&mp->m_peraglock);
1121b219 547 return NULL;
1da177e4
LT
548 }
549 flags = 0;
550 }
551 }
552}
553
554/*
555 * Visible inode allocation functions.
556 */
557
558/*
559 * Allocate an inode on disk.
560 * Mode is used to tell whether the new inode will need space, and whether
561 * it is a directory.
562 *
563 * The arguments IO_agbp and alloc_done are defined to work within
564 * the constraint of one allocation per transaction.
565 * xfs_dialloc() is designed to be called twice if it has to do an
566 * allocation to make more free inodes. On the first call,
567 * IO_agbp should be set to NULL. If an inode is available,
568 * i.e., xfs_dialloc() did not need to do an allocation, an inode
569 * number is returned. In this case, IO_agbp would be set to the
570 * current ag_buf and alloc_done set to false.
571 * If an allocation needed to be done, xfs_dialloc would return
572 * the current ag_buf in IO_agbp and set alloc_done to true.
573 * The caller should then commit the current transaction, allocate a new
574 * transaction, and call xfs_dialloc() again, passing in the previous
575 * value of IO_agbp. IO_agbp should be held across the transactions.
576 * Since the agbp is locked across the two calls, the second call is
577 * guaranteed to have a free inode available.
578 *
579 * Once we successfully pick an inode its number is returned and the
580 * on-disk data structures are updated. The inode itself is not read
581 * in, since doing so would break ordering constraints with xfs_reclaim.
582 */
583int
584xfs_dialloc(
585 xfs_trans_t *tp, /* transaction pointer */
586 xfs_ino_t parent, /* parent inode (directory) */
587 mode_t mode, /* mode bits for new inode */
588 int okalloc, /* ok to allocate more space */
589 xfs_buf_t **IO_agbp, /* in/out ag header's buffer */
590 boolean_t *alloc_done, /* true if we needed to replenish
591 inode freelist */
592 xfs_ino_t *inop) /* inode number allocated */
593{
594 xfs_agnumber_t agcount; /* number of allocation groups */
595 xfs_buf_t *agbp; /* allocation group header's buffer */
596 xfs_agnumber_t agno; /* allocation group number */
597 xfs_agi_t *agi; /* allocation group header structure */
598 xfs_btree_cur_t *cur; /* inode allocation btree cursor */
599 int error; /* error return value */
600 int i; /* result code */
601 int ialloced; /* inode allocation status */
602 int noroom = 0; /* no space for inode blk allocation */
603 xfs_ino_t ino; /* fs-relative inode to be returned */
604 /* REFERENCED */
605 int j; /* result code */
606 xfs_mount_t *mp; /* file system mount structure */
607 int offset; /* index of inode in chunk */
608 xfs_agino_t pagino; /* parent's a.g. relative inode # */
609 xfs_agnumber_t pagno; /* parent's allocation group number */
61a25848 610 xfs_inobt_rec_incore_t rec; /* inode allocation record */
1da177e4
LT
611 xfs_agnumber_t tagno; /* testing allocation group number */
612 xfs_btree_cur_t *tcur; /* temp cursor */
61a25848 613 xfs_inobt_rec_incore_t trec; /* temp inode allocation record */
1da177e4
LT
614
615
616 if (*IO_agbp == NULL) {
617 /*
618 * We do not have an agbp, so select an initial allocation
619 * group for inode allocation.
620 */
621 agbp = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
622 /*
623 * Couldn't find an allocation group satisfying the
624 * criteria, give up.
625 */
626 if (!agbp) {
627 *inop = NULLFSINO;
628 return 0;
629 }
630 agi = XFS_BUF_TO_AGI(agbp);
16259e7d 631 ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
1da177e4
LT
632 } else {
633 /*
634 * Continue where we left off before. In this case, we
635 * know that the allocation group has free inodes.
636 */
637 agbp = *IO_agbp;
638 agi = XFS_BUF_TO_AGI(agbp);
16259e7d
CH
639 ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
640 ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
1da177e4
LT
641 }
642 mp = tp->t_mountp;
643 agcount = mp->m_sb.sb_agcount;
16259e7d 644 agno = be32_to_cpu(agi->agi_seqno);
1da177e4
LT
645 tagno = agno;
646 pagno = XFS_INO_TO_AGNO(mp, parent);
647 pagino = XFS_INO_TO_AGINO(mp, parent);
648
649 /*
650 * If we have already hit the ceiling of inode blocks then clear
651 * okalloc so we scan all available agi structures for a free
652 * inode.
653 */
654
655 if (mp->m_maxicount &&
656 mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) {
657 noroom = 1;
658 okalloc = 0;
659 }
660
661 /*
662 * Loop until we find an allocation group that either has free inodes
663 * or in which we can allocate some inodes. Iterate through the
664 * allocation groups upward, wrapping at the end.
665 */
666 *alloc_done = B_FALSE;
667 while (!agi->agi_freecount) {
668 /*
669 * Don't do anything if we're not supposed to allocate
670 * any blocks, just go on to the next ag.
671 */
672 if (okalloc) {
673 /*
674 * Try to allocate some new inodes in the allocation
675 * group.
676 */
677 if ((error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced))) {
678 xfs_trans_brelse(tp, agbp);
679 if (error == ENOSPC) {
680 *inop = NULLFSINO;
681 return 0;
682 } else
683 return error;
684 }
685 if (ialloced) {
686 /*
687 * We successfully allocated some inodes, return
688 * the current context to the caller so that it
689 * can commit the current transaction and call
690 * us again where we left off.
691 */
16259e7d 692 ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
1da177e4
LT
693 *alloc_done = B_TRUE;
694 *IO_agbp = agbp;
695 *inop = NULLFSINO;
696 return 0;
697 }
698 }
699 /*
700 * If it failed, give up on this ag.
701 */
702 xfs_trans_brelse(tp, agbp);
703 /*
704 * Go on to the next ag: get its ag header.
705 */
706nextag:
707 if (++tagno == agcount)
708 tagno = 0;
709 if (tagno == agno) {
710 *inop = NULLFSINO;
711 return noroom ? ENOSPC : 0;
712 }
713 down_read(&mp->m_peraglock);
714 if (mp->m_perag[tagno].pagi_inodeok == 0) {
715 up_read(&mp->m_peraglock);
716 goto nextag;
717 }
718 error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp);
719 up_read(&mp->m_peraglock);
720 if (error)
721 goto nextag;
722 agi = XFS_BUF_TO_AGI(agbp);
16259e7d 723 ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
1da177e4
LT
724 }
725 /*
726 * Here with an allocation group that has a free inode.
727 * Reset agno since we may have chosen a new ag in the
728 * loop above.
729 */
730 agno = tagno;
731 *IO_agbp = NULL;
561f7d17 732 cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno));
1da177e4
LT
733 /*
734 * If pagino is 0 (this is the root inode allocation) use newino.
735 * This must work because we've just allocated some.
736 */
737 if (!pagino)
16259e7d 738 pagino = be32_to_cpu(agi->agi_newino);
1da177e4
LT
739#ifdef DEBUG
740 if (cur->bc_nlevels == 1) {
741 int freecount = 0;
742
743 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
744 goto error0;
745 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
746 do {
2e287a73
CH
747 error = xfs_inobt_get_rec(cur, &rec, &i);
748 if (error)
1da177e4
LT
749 goto error0;
750 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
751 freecount += rec.ir_freecount;
637aa50f 752 if ((error = xfs_btree_increment(cur, 0, &i)))
1da177e4
LT
753 goto error0;
754 } while (i == 1);
755
16259e7d 756 ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
1da177e4
LT
757 XFS_FORCED_SHUTDOWN(mp));
758 }
759#endif
760 /*
761 * If in the same a.g. as the parent, try to get near the parent.
762 */
763 if (pagno == agno) {
764 if ((error = xfs_inobt_lookup_le(cur, pagino, 0, 0, &i)))
765 goto error0;
766 if (i != 0 &&
2e287a73 767 (error = xfs_inobt_get_rec(cur, &rec, &j)) == 0 &&
1da177e4
LT
768 j == 1 &&
769 rec.ir_freecount > 0) {
770 /*
771 * Found a free inode in the same chunk
772 * as parent, done.
773 */
774 }
775 /*
776 * In the same a.g. as parent, but parent's chunk is full.
777 */
778 else {
779 int doneleft; /* done, to the left */
780 int doneright; /* done, to the right */
781
782 if (error)
783 goto error0;
784 ASSERT(i == 1);
785 ASSERT(j == 1);
786 /*
787 * Duplicate the cursor, search left & right
788 * simultaneously.
789 */
790 if ((error = xfs_btree_dup_cursor(cur, &tcur)))
791 goto error0;
792 /*
793 * Search left with tcur, back up 1 record.
794 */
8df4da4a 795 if ((error = xfs_btree_decrement(tcur, 0, &i)))
1da177e4
LT
796 goto error1;
797 doneleft = !i;
798 if (!doneleft) {
2e287a73
CH
799 error = xfs_inobt_get_rec(tcur, &trec, &i);
800 if (error)
1da177e4
LT
801 goto error1;
802 XFS_WANT_CORRUPTED_GOTO(i == 1, error1);
803 }
804 /*
805 * Search right with cur, go forward 1 record.
806 */
637aa50f 807 if ((error = xfs_btree_increment(cur, 0, &i)))
1da177e4
LT
808 goto error1;
809 doneright = !i;
810 if (!doneright) {
2e287a73
CH
811 error = xfs_inobt_get_rec(cur, &rec, &i);
812 if (error)
1da177e4
LT
813 goto error1;
814 XFS_WANT_CORRUPTED_GOTO(i == 1, error1);
815 }
816 /*
817 * Loop until we find the closest inode chunk
818 * with a free one.
819 */
820 while (!doneleft || !doneright) {
821 int useleft; /* using left inode
822 chunk this time */
823
824 /*
825 * Figure out which block is closer,
826 * if both are valid.
827 */
828 if (!doneleft && !doneright)
829 useleft =
830 pagino -
831 (trec.ir_startino +
832 XFS_INODES_PER_CHUNK - 1) <
833 rec.ir_startino - pagino;
834 else
835 useleft = !doneleft;
836 /*
837 * If checking the left, does it have
838 * free inodes?
839 */
840 if (useleft && trec.ir_freecount) {
841 /*
842 * Yes, set it up as the chunk to use.
843 */
844 rec = trec;
845 xfs_btree_del_cursor(cur,
846 XFS_BTREE_NOERROR);
847 cur = tcur;
848 break;
849 }
850 /*
851 * If checking the right, does it have
852 * free inodes?
853 */
854 if (!useleft && rec.ir_freecount) {
855 /*
856 * Yes, it's already set up.
857 */
858 xfs_btree_del_cursor(tcur,
859 XFS_BTREE_NOERROR);
860 break;
861 }
862 /*
863 * If used the left, get another one
864 * further left.
865 */
866 if (useleft) {
8df4da4a 867 if ((error = xfs_btree_decrement(tcur, 0,
1da177e4
LT
868 &i)))
869 goto error1;
870 doneleft = !i;
871 if (!doneleft) {
2e287a73
CH
872 error = xfs_inobt_get_rec(
873 tcur, &trec, &i);
874 if (error)
1da177e4
LT
875 goto error1;
876 XFS_WANT_CORRUPTED_GOTO(i == 1,
877 error1);
878 }
879 }
880 /*
881 * If used the right, get another one
882 * further right.
883 */
884 else {
637aa50f 885 if ((error = xfs_btree_increment(cur, 0,
1da177e4
LT
886 &i)))
887 goto error1;
888 doneright = !i;
889 if (!doneright) {
2e287a73
CH
890 error = xfs_inobt_get_rec(
891 cur, &rec, &i);
892 if (error)
1da177e4
LT
893 goto error1;
894 XFS_WANT_CORRUPTED_GOTO(i == 1,
895 error1);
896 }
897 }
898 }
899 ASSERT(!doneleft || !doneright);
900 }
901 }
902 /*
903 * In a different a.g. from the parent.
904 * See if the most recently allocated block has any free.
905 */
16259e7d 906 else if (be32_to_cpu(agi->agi_newino) != NULLAGINO) {
1da177e4 907 if ((error = xfs_inobt_lookup_eq(cur,
16259e7d 908 be32_to_cpu(agi->agi_newino), 0, 0, &i)))
1da177e4
LT
909 goto error0;
910 if (i == 1 &&
2e287a73 911 (error = xfs_inobt_get_rec(cur, &rec, &j)) == 0 &&
1da177e4
LT
912 j == 1 &&
913 rec.ir_freecount > 0) {
914 /*
915 * The last chunk allocated in the group still has
916 * a free inode.
917 */
918 }
919 /*
920 * None left in the last group, search the whole a.g.
921 */
922 else {
923 if (error)
924 goto error0;
925 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
926 goto error0;
927 ASSERT(i == 1);
928 for (;;) {
2e287a73
CH
929 error = xfs_inobt_get_rec(cur, &rec, &i);
930 if (error)
1da177e4
LT
931 goto error0;
932 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
933 if (rec.ir_freecount > 0)
934 break;
637aa50f 935 if ((error = xfs_btree_increment(cur, 0, &i)))
1da177e4
LT
936 goto error0;
937 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
938 }
939 }
940 }
9d87c319 941 offset = xfs_ialloc_find_free(&rec.ir_free);
1da177e4
LT
942 ASSERT(offset >= 0);
943 ASSERT(offset < XFS_INODES_PER_CHUNK);
944 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
945 XFS_INODES_PER_CHUNK) == 0);
946 ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
0d87e656 947 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1da177e4
LT
948 rec.ir_freecount--;
949 if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount,
950 rec.ir_free)))
951 goto error0;
413d57c9 952 be32_add_cpu(&agi->agi_freecount, -1);
1da177e4
LT
953 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
954 down_read(&mp->m_peraglock);
955 mp->m_perag[tagno].pagi_freecount--;
956 up_read(&mp->m_peraglock);
957#ifdef DEBUG
958 if (cur->bc_nlevels == 1) {
959 int freecount = 0;
960
961 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
962 goto error0;
963 do {
2e287a73
CH
964 error = xfs_inobt_get_rec(cur, &rec, &i);
965 if (error)
1da177e4
LT
966 goto error0;
967 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
968 freecount += rec.ir_freecount;
637aa50f 969 if ((error = xfs_btree_increment(cur, 0, &i)))
1da177e4
LT
970 goto error0;
971 } while (i == 1);
16259e7d 972 ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
1da177e4
LT
973 XFS_FORCED_SHUTDOWN(mp));
974 }
975#endif
976 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
977 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
978 *inop = ino;
979 return 0;
980error1:
981 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
982error0:
983 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
984 return error;
985}
986
987/*
988 * Free disk inode. Carefully avoids touching the incore inode, all
989 * manipulations incore are the caller's responsibility.
990 * The on-disk inode is not changed by this operation, only the
991 * btree (free inode mask) is changed.
992 */
993int
994xfs_difree(
995 xfs_trans_t *tp, /* transaction pointer */
996 xfs_ino_t inode, /* inode to be freed */
997 xfs_bmap_free_t *flist, /* extents to free */
998 int *delete, /* set if inode cluster was deleted */
999 xfs_ino_t *first_ino) /* first inode in deleted cluster */
1000{
1001 /* REFERENCED */
1002 xfs_agblock_t agbno; /* block number containing inode */
1003 xfs_buf_t *agbp; /* buffer containing allocation group header */
1004 xfs_agino_t agino; /* inode number relative to allocation group */
1005 xfs_agnumber_t agno; /* allocation group number */
1006 xfs_agi_t *agi; /* allocation group header */
1007 xfs_btree_cur_t *cur; /* inode btree cursor */
1008 int error; /* error return value */
1009 int i; /* result code */
1010 int ilen; /* inodes in an inode cluster */
1011 xfs_mount_t *mp; /* mount structure for filesystem */
1012 int off; /* offset of inode in inode chunk */
61a25848 1013 xfs_inobt_rec_incore_t rec; /* btree record */
1da177e4
LT
1014
1015 mp = tp->t_mountp;
1016
1017 /*
1018 * Break up inode number into its components.
1019 */
1020 agno = XFS_INO_TO_AGNO(mp, inode);
1021 if (agno >= mp->m_sb.sb_agcount) {
1022 cmn_err(CE_WARN,
1023 "xfs_difree: agno >= mp->m_sb.sb_agcount (%d >= %d) on %s. Returning EINVAL.",
1024 agno, mp->m_sb.sb_agcount, mp->m_fsname);
1025 ASSERT(0);
1026 return XFS_ERROR(EINVAL);
1027 }
1028 agino = XFS_INO_TO_AGINO(mp, inode);
1029 if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) {
1030 cmn_err(CE_WARN,
da1650a5
CH
1031 "xfs_difree: inode != XFS_AGINO_TO_INO() "
1032 "(%llu != %llu) on %s. Returning EINVAL.",
1033 (unsigned long long)inode,
1034 (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino),
1035 mp->m_fsname);
1da177e4
LT
1036 ASSERT(0);
1037 return XFS_ERROR(EINVAL);
1038 }
1039 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
1040 if (agbno >= mp->m_sb.sb_agblocks) {
1041 cmn_err(CE_WARN,
1042 "xfs_difree: agbno >= mp->m_sb.sb_agblocks (%d >= %d) on %s. Returning EINVAL.",
1043 agbno, mp->m_sb.sb_agblocks, mp->m_fsname);
1044 ASSERT(0);
1045 return XFS_ERROR(EINVAL);
1046 }
1047 /*
1048 * Get the allocation group header.
1049 */
1050 down_read(&mp->m_peraglock);
1051 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1052 up_read(&mp->m_peraglock);
1053 if (error) {
1054 cmn_err(CE_WARN,
1055 "xfs_difree: xfs_ialloc_read_agi() returned an error %d on %s. Returning error.",
1056 error, mp->m_fsname);
1057 return error;
1058 }
1059 agi = XFS_BUF_TO_AGI(agbp);
16259e7d
CH
1060 ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
1061 ASSERT(agbno < be32_to_cpu(agi->agi_length));
1da177e4
LT
1062 /*
1063 * Initialize the cursor.
1064 */
561f7d17 1065 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
1da177e4
LT
1066#ifdef DEBUG
1067 if (cur->bc_nlevels == 1) {
1068 int freecount = 0;
1069
1070 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
1071 goto error0;
1072 do {
2e287a73
CH
1073 error = xfs_inobt_get_rec(cur, &rec, &i);
1074 if (error)
1da177e4
LT
1075 goto error0;
1076 if (i) {
1077 freecount += rec.ir_freecount;
637aa50f 1078 if ((error = xfs_btree_increment(cur, 0, &i)))
1da177e4
LT
1079 goto error0;
1080 }
1081 } while (i == 1);
16259e7d 1082 ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
1da177e4
LT
1083 XFS_FORCED_SHUTDOWN(mp));
1084 }
1085#endif
1086 /*
1087 * Look for the entry describing this inode.
1088 */
1089 if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) {
1090 cmn_err(CE_WARN,
1091 "xfs_difree: xfs_inobt_lookup_le returned() an error %d on %s. Returning error.",
1092 error, mp->m_fsname);
1093 goto error0;
1094 }
1095 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
2e287a73
CH
1096 error = xfs_inobt_get_rec(cur, &rec, &i);
1097 if (error) {
1da177e4
LT
1098 cmn_err(CE_WARN,
1099 "xfs_difree: xfs_inobt_get_rec() returned an error %d on %s. Returning error.",
1100 error, mp->m_fsname);
1101 goto error0;
1102 }
1103 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1104 /*
1105 * Get the offset in the inode chunk.
1106 */
1107 off = agino - rec.ir_startino;
1108 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
0d87e656 1109 ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
1da177e4
LT
1110 /*
1111 * Mark the inode free & increment the count.
1112 */
0d87e656 1113 rec.ir_free |= XFS_INOBT_MASK(off);
1da177e4
LT
1114 rec.ir_freecount++;
1115
1116 /*
c41564b5 1117 * When an inode cluster is free, it becomes eligible for removal
1da177e4 1118 */
1bd960ee 1119 if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
1da177e4
LT
1120 (rec.ir_freecount == XFS_IALLOC_INODES(mp))) {
1121
1122 *delete = 1;
1123 *first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
1124
1125 /*
1126 * Remove the inode cluster from the AGI B+Tree, adjust the
1127 * AGI and Superblock inode counts, and mark the disk space
1128 * to be freed when the transaction is committed.
1129 */
1130 ilen = XFS_IALLOC_INODES(mp);
413d57c9
MS
1131 be32_add_cpu(&agi->agi_count, -ilen);
1132 be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
1da177e4
LT
1133 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
1134 down_read(&mp->m_peraglock);
1135 mp->m_perag[agno].pagi_freecount -= ilen - 1;
1136 up_read(&mp->m_peraglock);
1137 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
1138 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
1139
91cca5df
CH
1140 if ((error = xfs_btree_delete(cur, &i))) {
1141 cmn_err(CE_WARN, "xfs_difree: xfs_btree_delete returned an error %d on %s.\n",
1da177e4
LT
1142 error, mp->m_fsname);
1143 goto error0;
1144 }
1145
1146 xfs_bmap_add_free(XFS_AGB_TO_FSB(mp,
1147 agno, XFS_INO_TO_AGBNO(mp,rec.ir_startino)),
1148 XFS_IALLOC_BLOCKS(mp), flist, mp);
1149 } else {
1150 *delete = 0;
1151
1152 if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, rec.ir_free))) {
1153 cmn_err(CE_WARN,
1154 "xfs_difree: xfs_inobt_update() returned an error %d on %s. Returning error.",
1155 error, mp->m_fsname);
1156 goto error0;
1157 }
1158 /*
1159 * Change the inode free counts and log the ag/sb changes.
1160 */
413d57c9 1161 be32_add_cpu(&agi->agi_freecount, 1);
1da177e4
LT
1162 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1163 down_read(&mp->m_peraglock);
1164 mp->m_perag[agno].pagi_freecount++;
1165 up_read(&mp->m_peraglock);
1166 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
1167 }
1168
1169#ifdef DEBUG
1170 if (cur->bc_nlevels == 1) {
1171 int freecount = 0;
1172
1173 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
1174 goto error0;
1175 do {
2e287a73
CH
1176 error = xfs_inobt_get_rec(cur, &rec, &i);
1177 if (error)
1da177e4
LT
1178 goto error0;
1179 if (i) {
1180 freecount += rec.ir_freecount;
637aa50f 1181 if ((error = xfs_btree_increment(cur, 0, &i)))
1da177e4
LT
1182 goto error0;
1183 }
1184 } while (i == 1);
16259e7d 1185 ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
1da177e4
LT
1186 XFS_FORCED_SHUTDOWN(mp));
1187 }
1188#endif
1189 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1190 return 0;
1191
1192error0:
1193 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1194 return error;
1195}
1196
1197/*
94e1b69d 1198 * Return the location of the inode in imap, for mapping it into a buffer.
1da177e4 1199 */
1da177e4 1200int
94e1b69d
CH
1201xfs_imap(
1202 xfs_mount_t *mp, /* file system mount structure */
1203 xfs_trans_t *tp, /* transaction pointer */
1da177e4 1204 xfs_ino_t ino, /* inode to locate */
94e1b69d
CH
1205 struct xfs_imap *imap, /* location map structure */
1206 uint flags) /* flags for inode btree lookup */
1da177e4
LT
1207{
1208 xfs_agblock_t agbno; /* block number of inode in the alloc group */
1da177e4
LT
1209 xfs_agino_t agino; /* inode number within alloc group */
1210 xfs_agnumber_t agno; /* allocation group number */
1211 int blks_per_cluster; /* num blocks per inode cluster */
1212 xfs_agblock_t chunk_agbno; /* first block in inode chunk */
1da177e4 1213 xfs_agblock_t cluster_agbno; /* first block in inode cluster */
1da177e4 1214 int error; /* error code */
1da177e4
LT
1215 int offset; /* index of inode in its buffer */
1216 int offset_agbno; /* blks from chunk start to inode */
1217
1218 ASSERT(ino != NULLFSINO);
94e1b69d 1219
1da177e4
LT
1220 /*
1221 * Split up the inode number into its parts.
1222 */
1223 agno = XFS_INO_TO_AGNO(mp, ino);
1224 agino = XFS_INO_TO_AGINO(mp, ino);
1225 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
1226 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
1227 ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
1228#ifdef DEBUG
4d1a2ed3 1229 /* no diagnostics for bulkstat, ino comes from userspace */
b48d8d64 1230 if (flags & XFS_IGET_BULKSTAT)
4d1a2ed3 1231 return XFS_ERROR(EINVAL);
1da177e4
LT
1232 if (agno >= mp->m_sb.sb_agcount) {
1233 xfs_fs_cmn_err(CE_ALERT, mp,
94e1b69d 1234 "xfs_imap: agno (%d) >= "
1da177e4
LT
1235 "mp->m_sb.sb_agcount (%d)",
1236 agno, mp->m_sb.sb_agcount);
1237 }
1238 if (agbno >= mp->m_sb.sb_agblocks) {
1239 xfs_fs_cmn_err(CE_ALERT, mp,
94e1b69d 1240 "xfs_imap: agbno (0x%llx) >= "
1da177e4
LT
1241 "mp->m_sb.sb_agblocks (0x%lx)",
1242 (unsigned long long) agbno,
1243 (unsigned long) mp->m_sb.sb_agblocks);
1244 }
1245 if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
1246 xfs_fs_cmn_err(CE_ALERT, mp,
94e1b69d 1247 "xfs_imap: ino (0x%llx) != "
1da177e4
LT
1248 "XFS_AGINO_TO_INO(mp, agno, agino) "
1249 "(0x%llx)",
1250 ino, XFS_AGINO_TO_INO(mp, agno, agino));
1251 }
745b1f47 1252 xfs_stack_trace();
1da177e4
LT
1253#endif /* DEBUG */
1254 return XFS_ERROR(EINVAL);
1255 }
94e1b69d
CH
1256
1257 /*
1258 * If the inode cluster size is the same as the blocksize or
1259 * smaller we get to the buffer by simple arithmetics.
1260 */
1261 if (XFS_INODE_CLUSTER_SIZE(mp) <= mp->m_sb.sb_blocksize) {
1da177e4
LT
1262 offset = XFS_INO_TO_OFFSET(mp, ino);
1263 ASSERT(offset < mp->m_sb.sb_inopblock);
94e1b69d
CH
1264
1265 imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
1266 imap->im_len = XFS_FSB_TO_BB(mp, 1);
1267 imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
1da177e4
LT
1268 return 0;
1269 }
94e1b69d 1270
1da177e4 1271 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
94e1b69d
CH
1272
1273 /*
1274 * If we get a block number passed from bulkstat we can use it to
1275 * find the buffer easily.
1276 */
1277 if (imap->im_blkno) {
1da177e4
LT
1278 offset = XFS_INO_TO_OFFSET(mp, ino);
1279 ASSERT(offset < mp->m_sb.sb_inopblock);
94e1b69d 1280
9d87c319 1281 cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno);
94e1b69d
CH
1282 offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock;
1283
1284 imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
1285 imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
1da177e4
LT
1286 return 0;
1287 }
94e1b69d
CH
1288
1289 /*
1290 * If the inode chunks are aligned then use simple maths to
1291 * find the location. Otherwise we have to do a btree
1292 * lookup to find the location.
1293 */
1da177e4
LT
1294 if (mp->m_inoalign_mask) {
1295 offset_agbno = agbno & mp->m_inoalign_mask;
1296 chunk_agbno = agbno - offset_agbno;
1297 } else {
94e1b69d 1298 xfs_btree_cur_t *cur; /* inode btree cursor */
2e287a73 1299 xfs_inobt_rec_incore_t chunk_rec;
94e1b69d
CH
1300 xfs_buf_t *agbp; /* agi buffer */
1301 int i; /* temp state */
1302
1da177e4
LT
1303 down_read(&mp->m_peraglock);
1304 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1305 up_read(&mp->m_peraglock);
1306 if (error) {
94e1b69d 1307 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1da177e4
LT
1308 "xfs_ialloc_read_agi() returned "
1309 "error %d, agno %d",
1310 error, agno);
1da177e4
LT
1311 return error;
1312 }
94e1b69d 1313
561f7d17 1314 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
94e1b69d
CH
1315 error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i);
1316 if (error) {
1317 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1da177e4 1318 "xfs_inobt_lookup_le() failed");
1da177e4
LT
1319 goto error0;
1320 }
94e1b69d 1321
2e287a73 1322 error = xfs_inobt_get_rec(cur, &chunk_rec, &i);
94e1b69d
CH
1323 if (error) {
1324 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1da177e4 1325 "xfs_inobt_get_rec() failed");
1da177e4
LT
1326 goto error0;
1327 }
1328 if (i == 0) {
1329#ifdef DEBUG
94e1b69d 1330 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1da177e4
LT
1331 "xfs_inobt_get_rec() failed");
1332#endif /* DEBUG */
1333 error = XFS_ERROR(EINVAL);
1334 }
94e1b69d 1335 error0:
1da177e4
LT
1336 xfs_trans_brelse(tp, agbp);
1337 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1338 if (error)
1339 return error;
2e287a73 1340 chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_rec.ir_startino);
1da177e4
LT
1341 offset_agbno = agbno - chunk_agbno;
1342 }
94e1b69d 1343
1da177e4
LT
1344 ASSERT(agbno >= chunk_agbno);
1345 cluster_agbno = chunk_agbno +
1346 ((offset_agbno / blks_per_cluster) * blks_per_cluster);
1347 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
1348 XFS_INO_TO_OFFSET(mp, ino);
94e1b69d
CH
1349
1350 imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
1351 imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
1352 imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
1353
1354 /*
1355 * If the inode number maps to a block outside the bounds
1356 * of the file system then return NULL rather than calling
1357 * read_buf and panicing when we get an error from the
1358 * driver.
1359 */
1360 if ((imap->im_blkno + imap->im_len) >
1361 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
1362 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
1363 "(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > "
1364 " XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)",
1365 (unsigned long long) imap->im_blkno,
1366 (unsigned long long) imap->im_len,
1367 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
1368 return XFS_ERROR(EINVAL);
1369 }
1370
1da177e4 1371 return 0;
1da177e4
LT
1372}
1373
1374/*
1375 * Compute and fill in value of m_in_maxlevels.
1376 */
1377void
1378xfs_ialloc_compute_maxlevels(
1379 xfs_mount_t *mp) /* file system mount structure */
1380{
1381 int level;
1382 uint maxblocks;
1383 uint maxleafents;
1384 int minleafrecs;
1385 int minnoderecs;
1386
1387 maxleafents = (1LL << XFS_INO_AGINO_BITS(mp)) >>
1388 XFS_INODES_PER_CHUNK_LOG;
1389 minleafrecs = mp->m_alloc_mnr[0];
1390 minnoderecs = mp->m_alloc_mnr[1];
1391 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1392 for (level = 1; maxblocks > 1; level++)
1393 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1394 mp->m_in_maxlevels = level;
1395}
1396
1397/*
1398 * Log specified fields for the ag hdr (inode section)
1399 */
1400void
1401xfs_ialloc_log_agi(
1402 xfs_trans_t *tp, /* transaction pointer */
1403 xfs_buf_t *bp, /* allocation group header buffer */
1404 int fields) /* bitmask of fields to log */
1405{
1406 int first; /* first byte number */
1407 int last; /* last byte number */
1408 static const short offsets[] = { /* field starting offsets */
1409 /* keep in sync with bit definitions */
1410 offsetof(xfs_agi_t, agi_magicnum),
1411 offsetof(xfs_agi_t, agi_versionnum),
1412 offsetof(xfs_agi_t, agi_seqno),
1413 offsetof(xfs_agi_t, agi_length),
1414 offsetof(xfs_agi_t, agi_count),
1415 offsetof(xfs_agi_t, agi_root),
1416 offsetof(xfs_agi_t, agi_level),
1417 offsetof(xfs_agi_t, agi_freecount),
1418 offsetof(xfs_agi_t, agi_newino),
1419 offsetof(xfs_agi_t, agi_dirino),
1420 offsetof(xfs_agi_t, agi_unlinked),
1421 sizeof(xfs_agi_t)
1422 };
1423#ifdef DEBUG
1424 xfs_agi_t *agi; /* allocation group header */
1425
1426 agi = XFS_BUF_TO_AGI(bp);
16259e7d 1427 ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
1da177e4
LT
1428#endif
1429 /*
1430 * Compute byte offsets for the first and last fields.
1431 */
1432 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS, &first, &last);
1433 /*
1434 * Log the allocation group inode header buffer.
1435 */
1436 xfs_trans_log_buf(tp, bp, first, last);
1437}
1438
5e1be0fb
CH
1439#ifdef DEBUG
1440STATIC void
1441xfs_check_agi_unlinked(
1442 struct xfs_agi *agi)
1443{
1444 int i;
1445
1446 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
1447 ASSERT(agi->agi_unlinked[i]);
1448}
1449#else
1450#define xfs_check_agi_unlinked(agi)
1451#endif
1452
1da177e4
LT
1453/*
1454 * Read in the allocation group header (inode allocation section)
1455 */
1456int
5e1be0fb
CH
1457xfs_read_agi(
1458 struct xfs_mount *mp, /* file system mount structure */
1459 struct xfs_trans *tp, /* transaction pointer */
1460 xfs_agnumber_t agno, /* allocation group number */
1461 struct xfs_buf **bpp) /* allocation group hdr buf */
1da177e4 1462{
5e1be0fb
CH
1463 struct xfs_agi *agi; /* allocation group header */
1464 int agi_ok; /* agi is consistent */
1465 int error;
1da177e4
LT
1466
1467 ASSERT(agno != NULLAGNUMBER);
5e1be0fb
CH
1468
1469 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
1da177e4 1470 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
5e1be0fb 1471 XFS_FSS_TO_BB(mp, 1), 0, bpp);
1da177e4
LT
1472 if (error)
1473 return error;
5e1be0fb
CH
1474
1475 ASSERT(*bpp && !XFS_BUF_GETERROR(*bpp));
1476 agi = XFS_BUF_TO_AGI(*bpp);
1da177e4
LT
1477
1478 /*
1479 * Validate the magic number of the agi block.
1480 */
5e1be0fb
CH
1481 agi_ok = be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1482 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)) &&
1483 be32_to_cpu(agi->agi_seqno) == agno;
1da177e4
LT
1484 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI,
1485 XFS_RANDOM_IALLOC_READ_AGI))) {
5e1be0fb 1486 XFS_CORRUPTION_ERROR("xfs_read_agi", XFS_ERRLEVEL_LOW,
1da177e4 1487 mp, agi);
5e1be0fb 1488 xfs_trans_brelse(tp, *bpp);
1da177e4
LT
1489 return XFS_ERROR(EFSCORRUPTED);
1490 }
5e1be0fb
CH
1491
1492 XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_AGI, XFS_AGI_REF);
1493
1494 xfs_check_agi_unlinked(agi);
1495 return 0;
1496}
1497
1498int
1499xfs_ialloc_read_agi(
1500 struct xfs_mount *mp, /* file system mount structure */
1501 struct xfs_trans *tp, /* transaction pointer */
1502 xfs_agnumber_t agno, /* allocation group number */
1503 struct xfs_buf **bpp) /* allocation group hdr buf */
1504{
1505 struct xfs_agi *agi; /* allocation group header */
1506 struct xfs_perag *pag; /* per allocation group data */
1507 int error;
1508
1509 error = xfs_read_agi(mp, tp, agno, bpp);
1510 if (error)
1511 return error;
1512
1513 agi = XFS_BUF_TO_AGI(*bpp);
1da177e4 1514 pag = &mp->m_perag[agno];
5e1be0fb 1515
1da177e4 1516 if (!pag->pagi_init) {
16259e7d 1517 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
92821e2b 1518 pag->pagi_count = be32_to_cpu(agi->agi_count);
1da177e4 1519 pag->pagi_init = 1;
1da177e4 1520 }
1da177e4 1521
5e1be0fb
CH
1522 /*
1523 * It's possible for these to be out of sync if
1524 * we are in the middle of a forced shutdown.
1525 */
1526 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
1527 XFS_FORCED_SHUTDOWN(mp));
1da177e4
LT
1528 return 0;
1529}
92821e2b
DC
1530
1531/*
1532 * Read in the agi to initialise the per-ag data in the mount structure
1533 */
1534int
1535xfs_ialloc_pagi_init(
1536 xfs_mount_t *mp, /* file system mount structure */
1537 xfs_trans_t *tp, /* transaction pointer */
1538 xfs_agnumber_t agno) /* allocation group number */
1539{
1540 xfs_buf_t *bp = NULL;
1541 int error;
1542
1543 error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
1544 if (error)
1545 return error;
1546 if (bp)
1547 xfs_trans_brelse(tp, bp);
1548 return 0;
1549}