]> git.ipfire.org Git - people/ms/linux.git/blame - fs/xfs/xfs_alloc.c
xfs: print useful caller information in xfs_error_report
[people/ms/linux.git] / fs / xfs / xfs_alloc.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
70a9883c 20#include "xfs_format.h"
239880ef 21#include "xfs_log_format.h"
70a9883c 22#include "xfs_shared.h"
239880ef 23#include "xfs_trans_resv.h"
a844f451 24#include "xfs_bit.h"
1da177e4
LT
25#include "xfs_sb.h"
26#include "xfs_ag.h"
1da177e4 27#include "xfs_mount.h"
a844f451 28#include "xfs_inode.h"
1da177e4 29#include "xfs_btree.h"
a4fbe6ab 30#include "xfs_alloc_btree.h"
1da177e4 31#include "xfs_alloc.h"
efc27b52 32#include "xfs_extent_busy.h"
1da177e4 33#include "xfs_error.h"
4e0e6040 34#include "xfs_cksum.h"
0b1b213f 35#include "xfs_trace.h"
239880ef 36#include "xfs_trans.h"
4e0e6040 37#include "xfs_buf_item.h"
239880ef 38#include "xfs_log.h"
1da177e4 39
c999a223 40struct workqueue_struct *xfs_alloc_wq;
1da177e4
LT
41
42#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
43
44#define XFSA_FIXUP_BNO_OK 1
45#define XFSA_FIXUP_CNT_OK 2
46
1da177e4
LT
47STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
48STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
49STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
50STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
e26f0501 51 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
1da177e4 52
fe033cc8
CH
53/*
54 * Lookup the record equal to [bno, len] in the btree given by cur.
55 */
56STATIC int /* error */
57xfs_alloc_lookup_eq(
58 struct xfs_btree_cur *cur, /* btree cursor */
59 xfs_agblock_t bno, /* starting block of extent */
60 xfs_extlen_t len, /* length of extent */
61 int *stat) /* success/failure */
62{
63 cur->bc_rec.a.ar_startblock = bno;
64 cur->bc_rec.a.ar_blockcount = len;
65 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
66}
67
68/*
69 * Lookup the first record greater than or equal to [bno, len]
70 * in the btree given by cur.
71 */
a66d6363 72int /* error */
fe033cc8
CH
73xfs_alloc_lookup_ge(
74 struct xfs_btree_cur *cur, /* btree cursor */
75 xfs_agblock_t bno, /* starting block of extent */
76 xfs_extlen_t len, /* length of extent */
77 int *stat) /* success/failure */
78{
79 cur->bc_rec.a.ar_startblock = bno;
80 cur->bc_rec.a.ar_blockcount = len;
81 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
82}
83
84/*
85 * Lookup the first record less than or equal to [bno, len]
86 * in the btree given by cur.
87 */
a46db608 88int /* error */
fe033cc8
CH
89xfs_alloc_lookup_le(
90 struct xfs_btree_cur *cur, /* btree cursor */
91 xfs_agblock_t bno, /* starting block of extent */
92 xfs_extlen_t len, /* length of extent */
93 int *stat) /* success/failure */
94{
95 cur->bc_rec.a.ar_startblock = bno;
96 cur->bc_rec.a.ar_blockcount = len;
97 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
98}
99
278d0ca1
CH
100/*
101 * Update the record referred to by cur to the value given
102 * by [bno, len].
103 * This either works (return 0) or gets an EFSCORRUPTED error.
104 */
105STATIC int /* error */
106xfs_alloc_update(
107 struct xfs_btree_cur *cur, /* btree cursor */
108 xfs_agblock_t bno, /* starting block of extent */
109 xfs_extlen_t len) /* length of extent */
110{
111 union xfs_btree_rec rec;
112
113 rec.alloc.ar_startblock = cpu_to_be32(bno);
114 rec.alloc.ar_blockcount = cpu_to_be32(len);
115 return xfs_btree_update(cur, &rec);
116}
fe033cc8 117
8cc938fe
CH
118/*
119 * Get the data from the pointed-to record.
120 */
a46db608 121int /* error */
8cc938fe
CH
122xfs_alloc_get_rec(
123 struct xfs_btree_cur *cur, /* btree cursor */
124 xfs_agblock_t *bno, /* output: starting block of extent */
125 xfs_extlen_t *len, /* output: length of extent */
126 int *stat) /* output: success/failure */
127{
128 union xfs_btree_rec *rec;
129 int error;
130
131 error = xfs_btree_get_rec(cur, &rec, stat);
132 if (!error && *stat == 1) {
133 *bno = be32_to_cpu(rec->alloc.ar_startblock);
134 *len = be32_to_cpu(rec->alloc.ar_blockcount);
135 }
136 return error;
137}
138
1da177e4
LT
139/*
140 * Compute aligned version of the found extent.
141 * Takes alignment and min length into account.
142 */
12375c82 143STATIC void
1da177e4 144xfs_alloc_compute_aligned(
86fa8af6 145 xfs_alloc_arg_t *args, /* allocation argument structure */
1da177e4
LT
146 xfs_agblock_t foundbno, /* starting block in found extent */
147 xfs_extlen_t foundlen, /* length in found extent */
1da177e4
LT
148 xfs_agblock_t *resbno, /* result block number */
149 xfs_extlen_t *reslen) /* result length */
150{
151 xfs_agblock_t bno;
1da177e4
LT
152 xfs_extlen_t len;
153
e26f0501 154 /* Trim busy sections out of found extent */
4ecbfe63 155 xfs_extent_busy_trim(args, foundbno, foundlen, &bno, &len);
e26f0501
CH
156
157 if (args->alignment > 1 && len >= args->minlen) {
158 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
159 xfs_extlen_t diff = aligned_bno - bno;
160
161 *resbno = aligned_bno;
162 *reslen = diff >= len ? 0 : len - diff;
1da177e4 163 } else {
e26f0501
CH
164 *resbno = bno;
165 *reslen = len;
1da177e4 166 }
1da177e4
LT
167}
168
169/*
170 * Compute best start block and diff for "near" allocations.
171 * freelen >= wantlen already checked by caller.
172 */
173STATIC xfs_extlen_t /* difference value (absolute) */
174xfs_alloc_compute_diff(
175 xfs_agblock_t wantbno, /* target starting block */
176 xfs_extlen_t wantlen, /* target length */
177 xfs_extlen_t alignment, /* target alignment */
211d022c 178 char userdata, /* are we allocating data? */
1da177e4
LT
179 xfs_agblock_t freebno, /* freespace's starting block */
180 xfs_extlen_t freelen, /* freespace's length */
181 xfs_agblock_t *newbnop) /* result: best start block from free */
182{
183 xfs_agblock_t freeend; /* end of freespace extent */
184 xfs_agblock_t newbno1; /* return block number */
185 xfs_agblock_t newbno2; /* other new block number */
186 xfs_extlen_t newlen1=0; /* length with newbno1 */
187 xfs_extlen_t newlen2=0; /* length with newbno2 */
188 xfs_agblock_t wantend; /* end of target extent */
189
190 ASSERT(freelen >= wantlen);
191 freeend = freebno + freelen;
192 wantend = wantbno + wantlen;
211d022c
JK
193 /*
194 * We want to allocate from the start of a free extent if it is past
195 * the desired block or if we are allocating user data and the free
196 * extent is before desired block. The second case is there to allow
197 * for contiguous allocation from the remaining free space if the file
198 * grows in the short term.
199 */
200 if (freebno >= wantbno || (userdata && freeend < wantend)) {
1da177e4
LT
201 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
202 newbno1 = NULLAGBLOCK;
203 } else if (freeend >= wantend && alignment > 1) {
204 newbno1 = roundup(wantbno, alignment);
205 newbno2 = newbno1 - alignment;
206 if (newbno1 >= freeend)
207 newbno1 = NULLAGBLOCK;
208 else
209 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
210 if (newbno2 < freebno)
211 newbno2 = NULLAGBLOCK;
212 else
213 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
214 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
215 if (newlen1 < newlen2 ||
216 (newlen1 == newlen2 &&
217 XFS_ABSDIFF(newbno1, wantbno) >
218 XFS_ABSDIFF(newbno2, wantbno)))
219 newbno1 = newbno2;
220 } else if (newbno2 != NULLAGBLOCK)
221 newbno1 = newbno2;
222 } else if (freeend >= wantend) {
223 newbno1 = wantbno;
224 } else if (alignment > 1) {
225 newbno1 = roundup(freeend - wantlen, alignment);
226 if (newbno1 > freeend - wantlen &&
227 newbno1 - alignment >= freebno)
228 newbno1 -= alignment;
229 else if (newbno1 >= freeend)
230 newbno1 = NULLAGBLOCK;
231 } else
232 newbno1 = freeend - wantlen;
233 *newbnop = newbno1;
234 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
235}
236
237/*
238 * Fix up the length, based on mod and prod.
239 * len should be k * prod + mod for some k.
240 * If len is too small it is returned unchanged.
241 * If len hits maxlen it is left alone.
242 */
243STATIC void
244xfs_alloc_fix_len(
245 xfs_alloc_arg_t *args) /* allocation argument structure */
246{
247 xfs_extlen_t k;
248 xfs_extlen_t rlen;
249
250 ASSERT(args->mod < args->prod);
251 rlen = args->len;
252 ASSERT(rlen >= args->minlen);
253 ASSERT(rlen <= args->maxlen);
254 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
255 (args->mod == 0 && rlen < args->prod))
256 return;
257 k = rlen % args->prod;
258 if (k == args->mod)
259 return;
260 if (k > args->mod) {
261 if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
262 return;
263 } else {
264 if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
265 (int)args->minlen)
266 return;
267 }
268 ASSERT(rlen >= args->minlen);
269 ASSERT(rlen <= args->maxlen);
270 args->len = rlen;
271}
272
273/*
274 * Fix up length if there is too little space left in the a.g.
275 * Return 1 if ok, 0 if too little, should give up.
276 */
277STATIC int
278xfs_alloc_fix_minleft(
279 xfs_alloc_arg_t *args) /* allocation argument structure */
280{
281 xfs_agf_t *agf; /* a.g. freelist header */
282 int diff; /* free space difference */
283
284 if (args->minleft == 0)
285 return 1;
286 agf = XFS_BUF_TO_AGF(args->agbp);
16259e7d 287 diff = be32_to_cpu(agf->agf_freeblks)
1da177e4
LT
288 - args->len - args->minleft;
289 if (diff >= 0)
290 return 1;
291 args->len += diff; /* shrink the allocated space */
292 if (args->len >= args->minlen)
293 return 1;
294 args->agbno = NULLAGBLOCK;
295 return 0;
296}
297
298/*
299 * Update the two btrees, logically removing from freespace the extent
300 * starting at rbno, rlen blocks. The extent is contained within the
301 * actual (current) free extent fbno for flen blocks.
302 * Flags are passed in indicating whether the cursors are set to the
303 * relevant records.
304 */
305STATIC int /* error code */
306xfs_alloc_fixup_trees(
307 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
308 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
309 xfs_agblock_t fbno, /* starting block of free extent */
310 xfs_extlen_t flen, /* length of free extent */
311 xfs_agblock_t rbno, /* starting block of returned extent */
312 xfs_extlen_t rlen, /* length of returned extent */
313 int flags) /* flags, XFSA_FIXUP_... */
314{
315 int error; /* error code */
316 int i; /* operation results */
317 xfs_agblock_t nfbno1; /* first new free startblock */
318 xfs_agblock_t nfbno2; /* second new free startblock */
319 xfs_extlen_t nflen1=0; /* first new free length */
320 xfs_extlen_t nflen2=0; /* second new free length */
321
322 /*
323 * Look up the record in the by-size tree if necessary.
324 */
325 if (flags & XFSA_FIXUP_CNT_OK) {
326#ifdef DEBUG
327 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
328 return error;
329 XFS_WANT_CORRUPTED_RETURN(
330 i == 1 && nfbno1 == fbno && nflen1 == flen);
331#endif
332 } else {
333 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
334 return error;
335 XFS_WANT_CORRUPTED_RETURN(i == 1);
336 }
337 /*
338 * Look up the record in the by-block tree if necessary.
339 */
340 if (flags & XFSA_FIXUP_BNO_OK) {
341#ifdef DEBUG
342 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
343 return error;
344 XFS_WANT_CORRUPTED_RETURN(
345 i == 1 && nfbno1 == fbno && nflen1 == flen);
346#endif
347 } else {
348 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
349 return error;
350 XFS_WANT_CORRUPTED_RETURN(i == 1);
351 }
7cc95a82 352
1da177e4 353#ifdef DEBUG
7cc95a82
CH
354 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
355 struct xfs_btree_block *bnoblock;
356 struct xfs_btree_block *cntblock;
357
358 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
359 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
1da177e4 360
7cc95a82
CH
361 XFS_WANT_CORRUPTED_RETURN(
362 bnoblock->bb_numrecs == cntblock->bb_numrecs);
1da177e4
LT
363 }
364#endif
7cc95a82 365
1da177e4
LT
366 /*
367 * Deal with all four cases: the allocated record is contained
368 * within the freespace record, so we can have new freespace
369 * at either (or both) end, or no freespace remaining.
370 */
371 if (rbno == fbno && rlen == flen)
372 nfbno1 = nfbno2 = NULLAGBLOCK;
373 else if (rbno == fbno) {
374 nfbno1 = rbno + rlen;
375 nflen1 = flen - rlen;
376 nfbno2 = NULLAGBLOCK;
377 } else if (rbno + rlen == fbno + flen) {
378 nfbno1 = fbno;
379 nflen1 = flen - rlen;
380 nfbno2 = NULLAGBLOCK;
381 } else {
382 nfbno1 = fbno;
383 nflen1 = rbno - fbno;
384 nfbno2 = rbno + rlen;
385 nflen2 = (fbno + flen) - nfbno2;
386 }
387 /*
388 * Delete the entry from the by-size btree.
389 */
91cca5df 390 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4
LT
391 return error;
392 XFS_WANT_CORRUPTED_RETURN(i == 1);
393 /*
394 * Add new by-size btree entry(s).
395 */
396 if (nfbno1 != NULLAGBLOCK) {
397 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
398 return error;
399 XFS_WANT_CORRUPTED_RETURN(i == 0);
4b22a571 400 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4
LT
401 return error;
402 XFS_WANT_CORRUPTED_RETURN(i == 1);
403 }
404 if (nfbno2 != NULLAGBLOCK) {
405 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
406 return error;
407 XFS_WANT_CORRUPTED_RETURN(i == 0);
4b22a571 408 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4
LT
409 return error;
410 XFS_WANT_CORRUPTED_RETURN(i == 1);
411 }
412 /*
413 * Fix up the by-block btree entry(s).
414 */
415 if (nfbno1 == NULLAGBLOCK) {
416 /*
417 * No remaining freespace, just delete the by-block tree entry.
418 */
91cca5df 419 if ((error = xfs_btree_delete(bno_cur, &i)))
1da177e4
LT
420 return error;
421 XFS_WANT_CORRUPTED_RETURN(i == 1);
422 } else {
423 /*
424 * Update the by-block entry to start later|be shorter.
425 */
426 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
427 return error;
428 }
429 if (nfbno2 != NULLAGBLOCK) {
430 /*
431 * 2 resulting free entries, need to add one.
432 */
433 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
434 return error;
435 XFS_WANT_CORRUPTED_RETURN(i == 0);
4b22a571 436 if ((error = xfs_btree_insert(bno_cur, &i)))
1da177e4
LT
437 return error;
438 XFS_WANT_CORRUPTED_RETURN(i == 1);
439 }
440 return 0;
441}
442
77c95bba 443static bool
612cfbfe 444xfs_agfl_verify(
bb80c6d7
DC
445 struct xfs_buf *bp)
446{
bb80c6d7
DC
447 struct xfs_mount *mp = bp->b_target->bt_mount;
448 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
bb80c6d7
DC
449 int i;
450
77c95bba
CH
451 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_uuid))
452 return false;
453 if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
454 return false;
455 /*
456 * during growfs operations, the perag is not fully initialised,
457 * so we can't use it for any useful checking. growfs ensures we can't
458 * use it by using uncached buffers that don't have the perag attached
459 * so we can detect and avoid this problem.
460 */
461 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
462 return false;
463
bb80c6d7 464 for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
77c95bba 465 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
bb80c6d7 466 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
77c95bba 467 return false;
bb80c6d7 468 }
77c95bba
CH
469 return true;
470}
471
472static void
473xfs_agfl_read_verify(
474 struct xfs_buf *bp)
475{
476 struct xfs_mount *mp = bp->b_target->bt_mount;
477 int agfl_ok = 1;
478
479 /*
480 * There is no verification of non-crc AGFLs because mkfs does not
481 * initialise the AGFL to zero or NULL. Hence the only valid part of the
482 * AGFL is what the AGF says is active. We can't get to the AGF, so we
483 * can't verify just those entries are valid.
484 */
485 if (!xfs_sb_version_hascrc(&mp->m_sb))
486 return;
487
51582170 488 agfl_ok = xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF);
77c95bba
CH
489
490 agfl_ok = agfl_ok && xfs_agfl_verify(bp);
bb80c6d7
DC
491
492 if (!agfl_ok) {
77c95bba 493 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
bb80c6d7
DC
494 xfs_buf_ioerror(bp, EFSCORRUPTED);
495 }
612cfbfe
DC
496}
497
1813dd64 498static void
612cfbfe
DC
499xfs_agfl_write_verify(
500 struct xfs_buf *bp)
501{
77c95bba
CH
502 struct xfs_mount *mp = bp->b_target->bt_mount;
503 struct xfs_buf_log_item *bip = bp->b_fspriv;
612cfbfe 504
77c95bba
CH
505 /* no verification of non-crc AGFLs */
506 if (!xfs_sb_version_hascrc(&mp->m_sb))
507 return;
508
509 if (!xfs_agfl_verify(bp)) {
510 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
511 xfs_buf_ioerror(bp, EFSCORRUPTED);
512 return;
513 }
514
515 if (bip)
516 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
517
f1dbcd7e 518 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
bb80c6d7
DC
519}
520
1813dd64
DC
521const struct xfs_buf_ops xfs_agfl_buf_ops = {
522 .verify_read = xfs_agfl_read_verify,
523 .verify_write = xfs_agfl_write_verify,
524};
525
1da177e4
LT
526/*
527 * Read in the allocation group free block array.
528 */
529STATIC int /* error */
530xfs_alloc_read_agfl(
531 xfs_mount_t *mp, /* mount point structure */
532 xfs_trans_t *tp, /* transaction pointer */
533 xfs_agnumber_t agno, /* allocation group number */
534 xfs_buf_t **bpp) /* buffer for the ag free block array */
535{
536 xfs_buf_t *bp; /* return value */
537 int error;
538
539 ASSERT(agno != NULLAGNUMBER);
540 error = xfs_trans_read_buf(
541 mp, tp, mp->m_ddev_targp,
542 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
1813dd64 543 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
1da177e4
LT
544 if (error)
545 return error;
5a52c2a5 546 ASSERT(!xfs_buf_geterror(bp));
38f23232 547 xfs_buf_set_ref(bp, XFS_AGFL_REF);
1da177e4
LT
548 *bpp = bp;
549 return 0;
550}
551
ecb6928f
CH
552STATIC int
553xfs_alloc_update_counters(
554 struct xfs_trans *tp,
555 struct xfs_perag *pag,
556 struct xfs_buf *agbp,
557 long len)
558{
559 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
560
561 pag->pagf_freeblks += len;
562 be32_add_cpu(&agf->agf_freeblks, len);
563
564 xfs_trans_agblocks_delta(tp, len);
565 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
566 be32_to_cpu(agf->agf_length)))
567 return EFSCORRUPTED;
568
569 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
570 return 0;
571}
572
1da177e4
LT
573/*
574 * Allocation group level functions.
575 */
576
577/*
578 * Allocate a variable extent in the allocation group agno.
579 * Type and bno are used to determine where in the allocation group the
580 * extent will start.
581 * Extent's length (returned in *len) will be between minlen and maxlen,
582 * and of the form k * prod + mod unless there's nothing that large.
583 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
584 */
585STATIC int /* error */
586xfs_alloc_ag_vextent(
587 xfs_alloc_arg_t *args) /* argument structure for allocation */
588{
589 int error=0;
1da177e4
LT
590
591 ASSERT(args->minlen > 0);
592 ASSERT(args->maxlen > 0);
593 ASSERT(args->minlen <= args->maxlen);
594 ASSERT(args->mod < args->prod);
595 ASSERT(args->alignment > 0);
596 /*
597 * Branch to correct routine based on the type.
598 */
599 args->wasfromfl = 0;
600 switch (args->type) {
601 case XFS_ALLOCTYPE_THIS_AG:
602 error = xfs_alloc_ag_vextent_size(args);
603 break;
604 case XFS_ALLOCTYPE_NEAR_BNO:
605 error = xfs_alloc_ag_vextent_near(args);
606 break;
607 case XFS_ALLOCTYPE_THIS_BNO:
608 error = xfs_alloc_ag_vextent_exact(args);
609 break;
610 default:
611 ASSERT(0);
612 /* NOTREACHED */
613 }
ecb6928f
CH
614
615 if (error || args->agbno == NULLAGBLOCK)
1da177e4 616 return error;
ecb6928f
CH
617
618 ASSERT(args->len >= args->minlen);
619 ASSERT(args->len <= args->maxlen);
620 ASSERT(!args->wasfromfl || !args->isfl);
621 ASSERT(args->agbno % args->alignment == 0);
622
623 if (!args->wasfromfl) {
624 error = xfs_alloc_update_counters(args->tp, args->pag,
625 args->agbp,
626 -((long)(args->len)));
627 if (error)
628 return error;
629
4ecbfe63 630 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
e26f0501 631 args->agbno, args->len));
1da177e4 632 }
ecb6928f
CH
633
634 if (!args->isfl) {
635 xfs_trans_mod_sb(args->tp, args->wasdel ?
636 XFS_TRANS_SB_RES_FDBLOCKS :
637 XFS_TRANS_SB_FDBLOCKS,
638 -((long)(args->len)));
639 }
640
641 XFS_STATS_INC(xs_allocx);
642 XFS_STATS_ADD(xs_allocb, args->len);
643 return error;
1da177e4
LT
644}
645
646/*
647 * Allocate a variable extent at exactly agno/bno.
648 * Extent's length (returned in *len) will be between minlen and maxlen,
649 * and of the form k * prod + mod unless there's nothing that large.
650 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
651 */
652STATIC int /* error */
653xfs_alloc_ag_vextent_exact(
654 xfs_alloc_arg_t *args) /* allocation argument structure */
655{
656 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
657 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
1da177e4
LT
658 int error;
659 xfs_agblock_t fbno; /* start block of found extent */
1da177e4 660 xfs_extlen_t flen; /* length of found extent */
e26f0501
CH
661 xfs_agblock_t tbno; /* start block of trimmed extent */
662 xfs_extlen_t tlen; /* length of trimmed extent */
663 xfs_agblock_t tend; /* end block of trimmed extent */
1da177e4 664 int i; /* success/failure of operation */
1da177e4
LT
665
666 ASSERT(args->alignment == 1);
9f9baab3 667
1da177e4
LT
668 /*
669 * Allocate/initialize a cursor for the by-number freespace btree.
670 */
561f7d17 671 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
9f9baab3
CH
672 args->agno, XFS_BTNUM_BNO);
673
1da177e4
LT
674 /*
675 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
676 * Look for the closest free block <= bno, it must contain bno
677 * if any free block does.
678 */
9f9baab3
CH
679 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
680 if (error)
1da177e4 681 goto error0;
9f9baab3
CH
682 if (!i)
683 goto not_found;
684
1da177e4
LT
685 /*
686 * Grab the freespace record.
687 */
9f9baab3
CH
688 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
689 if (error)
1da177e4
LT
690 goto error0;
691 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
692 ASSERT(fbno <= args->agbno);
9f9baab3 693
1da177e4 694 /*
e26f0501 695 * Check for overlapping busy extents.
1da177e4 696 */
4ecbfe63 697 xfs_extent_busy_trim(args, fbno, flen, &tbno, &tlen);
e26f0501
CH
698
699 /*
700 * Give up if the start of the extent is busy, or the freespace isn't
701 * long enough for the minimum request.
702 */
703 if (tbno > args->agbno)
704 goto not_found;
705 if (tlen < args->minlen)
706 goto not_found;
707 tend = tbno + tlen;
708 if (tend < args->agbno + args->minlen)
9f9baab3
CH
709 goto not_found;
710
1da177e4
LT
711 /*
712 * End of extent will be smaller of the freespace end and the
713 * maximal requested end.
9f9baab3 714 *
1da177e4
LT
715 * Fix the length according to mod and prod if given.
716 */
81463b1c
CS
717 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
718 - args->agbno;
1da177e4 719 xfs_alloc_fix_len(args);
9f9baab3
CH
720 if (!xfs_alloc_fix_minleft(args))
721 goto not_found;
722
81463b1c 723 ASSERT(args->agbno + args->len <= tend);
9f9baab3 724
1da177e4 725 /*
81463b1c 726 * We are allocating agbno for args->len
1da177e4
LT
727 * Allocate/initialize a cursor for the by-size btree.
728 */
561f7d17
CH
729 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
730 args->agno, XFS_BTNUM_CNT);
1da177e4 731 ASSERT(args->agbno + args->len <=
16259e7d 732 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
9f9baab3
CH
733 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
734 args->len, XFSA_FIXUP_BNO_OK);
735 if (error) {
1da177e4
LT
736 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
737 goto error0;
738 }
9f9baab3 739
1da177e4
LT
740 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
741 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
0b1b213f 742
1da177e4 743 args->wasfromfl = 0;
9f9baab3
CH
744 trace_xfs_alloc_exact_done(args);
745 return 0;
746
747not_found:
748 /* Didn't find it, return null. */
749 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
750 args->agbno = NULLAGBLOCK;
751 trace_xfs_alloc_exact_notfound(args);
1da177e4
LT
752 return 0;
753
754error0:
755 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
0b1b213f 756 trace_xfs_alloc_exact_error(args);
1da177e4
LT
757 return error;
758}
759
489a150f
CH
760/*
761 * Search the btree in a given direction via the search cursor and compare
762 * the records found against the good extent we've already found.
763 */
764STATIC int
765xfs_alloc_find_best_extent(
766 struct xfs_alloc_arg *args, /* allocation argument structure */
767 struct xfs_btree_cur **gcur, /* good cursor */
768 struct xfs_btree_cur **scur, /* searching cursor */
769 xfs_agblock_t gdiff, /* difference for search comparison */
770 xfs_agblock_t *sbno, /* extent found by search */
e26f0501
CH
771 xfs_extlen_t *slen, /* extent length */
772 xfs_agblock_t *sbnoa, /* aligned extent found by search */
773 xfs_extlen_t *slena, /* aligned extent length */
489a150f
CH
774 int dir) /* 0 = search right, 1 = search left */
775{
489a150f
CH
776 xfs_agblock_t new;
777 xfs_agblock_t sdiff;
778 int error;
779 int i;
780
781 /* The good extent is perfect, no need to search. */
782 if (!gdiff)
783 goto out_use_good;
784
785 /*
786 * Look until we find a better one, run out of space or run off the end.
787 */
788 do {
789 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
790 if (error)
791 goto error0;
792 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
e26f0501 793 xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
489a150f
CH
794
795 /*
796 * The good extent is closer than this one.
797 */
798 if (!dir) {
e26f0501 799 if (*sbnoa >= args->agbno + gdiff)
489a150f
CH
800 goto out_use_good;
801 } else {
e26f0501 802 if (*sbnoa <= args->agbno - gdiff)
489a150f
CH
803 goto out_use_good;
804 }
805
806 /*
807 * Same distance, compare length and pick the best.
808 */
809 if (*slena >= args->minlen) {
810 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
811 xfs_alloc_fix_len(args);
812
813 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
211d022c
JK
814 args->alignment,
815 args->userdata, *sbnoa,
e26f0501 816 *slena, &new);
489a150f
CH
817
818 /*
819 * Choose closer size and invalidate other cursor.
820 */
821 if (sdiff < gdiff)
822 goto out_use_search;
823 goto out_use_good;
824 }
825
826 if (!dir)
827 error = xfs_btree_increment(*scur, 0, &i);
828 else
829 error = xfs_btree_decrement(*scur, 0, &i);
830 if (error)
831 goto error0;
832 } while (i);
833
834out_use_good:
835 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
836 *scur = NULL;
837 return 0;
838
839out_use_search:
840 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
841 *gcur = NULL;
842 return 0;
843
844error0:
845 /* caller invalidates cursors */
846 return error;
847}
848
1da177e4
LT
849/*
850 * Allocate a variable extent near bno in the allocation group agno.
851 * Extent's length (returned in len) will be between minlen and maxlen,
852 * and of the form k * prod + mod unless there's nothing that large.
853 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
854 */
855STATIC int /* error */
856xfs_alloc_ag_vextent_near(
857 xfs_alloc_arg_t *args) /* allocation argument structure */
858{
859 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
860 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
861 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
1da177e4
LT
862 xfs_agblock_t gtbno; /* start bno of right side entry */
863 xfs_agblock_t gtbnoa; /* aligned ... */
864 xfs_extlen_t gtdiff; /* difference to right side entry */
865 xfs_extlen_t gtlen; /* length of right side entry */
e26f0501 866 xfs_extlen_t gtlena; /* aligned ... */
1da177e4
LT
867 xfs_agblock_t gtnew; /* useful start bno of right side */
868 int error; /* error code */
869 int i; /* result code, temporary */
870 int j; /* result code, temporary */
871 xfs_agblock_t ltbno; /* start bno of left side entry */
872 xfs_agblock_t ltbnoa; /* aligned ... */
873 xfs_extlen_t ltdiff; /* difference to left side entry */
1da177e4 874 xfs_extlen_t ltlen; /* length of left side entry */
e26f0501 875 xfs_extlen_t ltlena; /* aligned ... */
1da177e4
LT
876 xfs_agblock_t ltnew; /* useful start bno of left side */
877 xfs_extlen_t rlen; /* length of returned extent */
e26f0501 878 int forced = 0;
63d20d6e 879#ifdef DEBUG
1da177e4
LT
880 /*
881 * Randomly don't execute the first algorithm.
882 */
883 int dofirst; /* set to do first algorithm */
884
ecb3403d 885 dofirst = prandom_u32() & 1;
1da177e4 886#endif
e26f0501
CH
887
888restart:
889 bno_cur_lt = NULL;
890 bno_cur_gt = NULL;
891 ltlen = 0;
892 gtlena = 0;
893 ltlena = 0;
894
1da177e4
LT
895 /*
896 * Get a cursor for the by-size btree.
897 */
561f7d17
CH
898 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
899 args->agno, XFS_BTNUM_CNT);
e26f0501 900
1da177e4
LT
901 /*
902 * See if there are any free extents as big as maxlen.
903 */
904 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
905 goto error0;
906 /*
907 * If none, then pick up the last entry in the tree unless the
908 * tree is empty.
909 */
910 if (!i) {
911 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
912 &ltlen, &i)))
913 goto error0;
914 if (i == 0 || ltlen == 0) {
915 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
e26f0501 916 trace_xfs_alloc_near_noentry(args);
1da177e4
LT
917 return 0;
918 }
919 ASSERT(i == 1);
920 }
921 args->wasfromfl = 0;
e26f0501 922
1da177e4
LT
923 /*
924 * First algorithm.
925 * If the requested extent is large wrt the freespaces available
926 * in this a.g., then the cursor will be pointing to a btree entry
927 * near the right edge of the tree. If it's in the last btree leaf
928 * block, then we just examine all the entries in that block
929 * that are big enough, and pick the best one.
930 * This is written as a while loop so we can break out of it,
931 * but we never loop back to the top.
932 */
933 while (xfs_btree_islastblock(cnt_cur, 0)) {
934 xfs_extlen_t bdiff;
935 int besti=0;
936 xfs_extlen_t blen=0;
937 xfs_agblock_t bnew=0;
938
63d20d6e
DC
939#ifdef DEBUG
940 if (dofirst)
1da177e4
LT
941 break;
942#endif
943 /*
944 * Start from the entry that lookup found, sequence through
945 * all larger free blocks. If we're actually pointing at a
946 * record smaller than maxlen, go to the start of this block,
947 * and skip all those smaller than minlen.
948 */
949 if (ltlen || args->alignment > 1) {
950 cnt_cur->bc_ptrs[0] = 1;
951 do {
952 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
953 &ltlen, &i)))
954 goto error0;
955 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
956 if (ltlen >= args->minlen)
957 break;
637aa50f 958 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1da177e4
LT
959 goto error0;
960 } while (i);
961 ASSERT(ltlen >= args->minlen);
962 if (!i)
963 break;
964 }
965 i = cnt_cur->bc_ptrs[0];
966 for (j = 1, blen = 0, bdiff = 0;
967 !error && j && (blen < args->maxlen || bdiff > 0);
637aa50f 968 error = xfs_btree_increment(cnt_cur, 0, &j)) {
1da177e4
LT
969 /*
970 * For each entry, decide if it's better than
971 * the previous best entry.
972 */
973 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
974 goto error0;
975 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
86fa8af6
CH
976 xfs_alloc_compute_aligned(args, ltbno, ltlen,
977 &ltbnoa, &ltlena);
e6430037 978 if (ltlena < args->minlen)
1da177e4
LT
979 continue;
980 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
981 xfs_alloc_fix_len(args);
982 ASSERT(args->len >= args->minlen);
983 if (args->len < blen)
984 continue;
985 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
211d022c
JK
986 args->alignment, args->userdata, ltbnoa,
987 ltlena, &ltnew);
1da177e4
LT
988 if (ltnew != NULLAGBLOCK &&
989 (args->len > blen || ltdiff < bdiff)) {
990 bdiff = ltdiff;
991 bnew = ltnew;
992 blen = args->len;
993 besti = cnt_cur->bc_ptrs[0];
994 }
995 }
996 /*
997 * It didn't work. We COULD be in a case where
998 * there's a good record somewhere, so try again.
999 */
1000 if (blen == 0)
1001 break;
1002 /*
1003 * Point at the best entry, and retrieve it again.
1004 */
1005 cnt_cur->bc_ptrs[0] = besti;
1006 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1007 goto error0;
1008 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
73523a2e 1009 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1da177e4
LT
1010 args->len = blen;
1011 if (!xfs_alloc_fix_minleft(args)) {
1012 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
0b1b213f 1013 trace_xfs_alloc_near_nominleft(args);
1da177e4
LT
1014 return 0;
1015 }
1016 blen = args->len;
1017 /*
1018 * We are allocating starting at bnew for blen blocks.
1019 */
1020 args->agbno = bnew;
1021 ASSERT(bnew >= ltbno);
73523a2e 1022 ASSERT(bnew + blen <= ltbno + ltlen);
1da177e4
LT
1023 /*
1024 * Set up a cursor for the by-bno tree.
1025 */
561f7d17
CH
1026 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1027 args->agbp, args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1028 /*
1029 * Fix up the btree entries.
1030 */
1031 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1032 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1033 goto error0;
1034 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1035 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
0b1b213f
CH
1036
1037 trace_xfs_alloc_near_first(args);
1da177e4
LT
1038 return 0;
1039 }
1040 /*
1041 * Second algorithm.
1042 * Search in the by-bno tree to the left and to the right
1043 * simultaneously, until in each case we find a space big enough,
1044 * or run into the edge of the tree. When we run into the edge,
1045 * we deallocate that cursor.
1046 * If both searches succeed, we compare the two spaces and pick
1047 * the better one.
1048 * With alignment, it's possible for both to fail; the upper
1049 * level algorithm that picks allocation groups for allocations
1050 * is not supposed to do this.
1051 */
1052 /*
1053 * Allocate and initialize the cursor for the leftward search.
1054 */
561f7d17
CH
1055 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1056 args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1057 /*
1058 * Lookup <= bno to find the leftward search's starting point.
1059 */
1060 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1061 goto error0;
1062 if (!i) {
1063 /*
1064 * Didn't find anything; use this cursor for the rightward
1065 * search.
1066 */
1067 bno_cur_gt = bno_cur_lt;
1068 bno_cur_lt = NULL;
1069 }
1070 /*
1071 * Found something. Duplicate the cursor for the rightward search.
1072 */
1073 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1074 goto error0;
1075 /*
1076 * Increment the cursor, so we will point at the entry just right
1077 * of the leftward entry if any, or to the leftmost entry.
1078 */
637aa50f 1079 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1da177e4
LT
1080 goto error0;
1081 if (!i) {
1082 /*
1083 * It failed, there are no rightward entries.
1084 */
1085 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1086 bno_cur_gt = NULL;
1087 }
1088 /*
1089 * Loop going left with the leftward cursor, right with the
1090 * rightward cursor, until either both directions give up or
1091 * we find an entry at least as big as minlen.
1092 */
1093 do {
1094 if (bno_cur_lt) {
1095 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1096 goto error0;
1097 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
86fa8af6
CH
1098 xfs_alloc_compute_aligned(args, ltbno, ltlen,
1099 &ltbnoa, &ltlena);
12375c82 1100 if (ltlena >= args->minlen)
1da177e4 1101 break;
8df4da4a 1102 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1da177e4
LT
1103 goto error0;
1104 if (!i) {
1105 xfs_btree_del_cursor(bno_cur_lt,
1106 XFS_BTREE_NOERROR);
1107 bno_cur_lt = NULL;
1108 }
1109 }
1110 if (bno_cur_gt) {
1111 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1112 goto error0;
1113 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
86fa8af6
CH
1114 xfs_alloc_compute_aligned(args, gtbno, gtlen,
1115 &gtbnoa, &gtlena);
12375c82 1116 if (gtlena >= args->minlen)
1da177e4 1117 break;
637aa50f 1118 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1da177e4
LT
1119 goto error0;
1120 if (!i) {
1121 xfs_btree_del_cursor(bno_cur_gt,
1122 XFS_BTREE_NOERROR);
1123 bno_cur_gt = NULL;
1124 }
1125 }
1126 } while (bno_cur_lt || bno_cur_gt);
489a150f 1127
1da177e4
LT
1128 /*
1129 * Got both cursors still active, need to find better entry.
1130 */
1131 if (bno_cur_lt && bno_cur_gt) {
1da177e4
LT
1132 if (ltlena >= args->minlen) {
1133 /*
489a150f 1134 * Left side is good, look for a right side entry.
1da177e4
LT
1135 */
1136 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1137 xfs_alloc_fix_len(args);
489a150f 1138 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
211d022c
JK
1139 args->alignment, args->userdata, ltbnoa,
1140 ltlena, &ltnew);
489a150f
CH
1141
1142 error = xfs_alloc_find_best_extent(args,
1143 &bno_cur_lt, &bno_cur_gt,
e26f0501
CH
1144 ltdiff, &gtbno, &gtlen,
1145 &gtbnoa, &gtlena,
489a150f
CH
1146 0 /* search right */);
1147 } else {
1148 ASSERT(gtlena >= args->minlen);
1149
1da177e4 1150 /*
489a150f 1151 * Right side is good, look for a left side entry.
1da177e4
LT
1152 */
1153 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1154 xfs_alloc_fix_len(args);
489a150f 1155 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
211d022c
JK
1156 args->alignment, args->userdata, gtbnoa,
1157 gtlena, &gtnew);
489a150f
CH
1158
1159 error = xfs_alloc_find_best_extent(args,
1160 &bno_cur_gt, &bno_cur_lt,
e26f0501
CH
1161 gtdiff, &ltbno, &ltlen,
1162 &ltbnoa, &ltlena,
489a150f 1163 1 /* search left */);
1da177e4 1164 }
489a150f
CH
1165
1166 if (error)
1167 goto error0;
1da177e4 1168 }
489a150f 1169
1da177e4
LT
1170 /*
1171 * If we couldn't get anything, give up.
1172 */
1173 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
e3a746f5
DC
1174 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1175
e26f0501
CH
1176 if (!forced++) {
1177 trace_xfs_alloc_near_busy(args);
1178 xfs_log_force(args->mp, XFS_LOG_SYNC);
1179 goto restart;
1180 }
0b1b213f 1181 trace_xfs_alloc_size_neither(args);
1da177e4
LT
1182 args->agbno = NULLAGBLOCK;
1183 return 0;
1184 }
489a150f 1185
1da177e4
LT
1186 /*
1187 * At this point we have selected a freespace entry, either to the
1188 * left or to the right. If it's on the right, copy all the
1189 * useful variables to the "left" set so we only have one
1190 * copy of this code.
1191 */
1192 if (bno_cur_gt) {
1193 bno_cur_lt = bno_cur_gt;
1194 bno_cur_gt = NULL;
1195 ltbno = gtbno;
1196 ltbnoa = gtbnoa;
1197 ltlen = gtlen;
1198 ltlena = gtlena;
1199 j = 1;
1200 } else
1201 j = 0;
489a150f 1202
1da177e4
LT
1203 /*
1204 * Fix up the length and compute the useful address.
1205 */
1da177e4
LT
1206 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1207 xfs_alloc_fix_len(args);
1208 if (!xfs_alloc_fix_minleft(args)) {
0b1b213f 1209 trace_xfs_alloc_near_nominleft(args);
1da177e4
LT
1210 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1211 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1212 return 0;
1213 }
1214 rlen = args->len;
e26f0501 1215 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
211d022c 1216 args->userdata, ltbnoa, ltlena, &ltnew);
1da177e4 1217 ASSERT(ltnew >= ltbno);
e26f0501 1218 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
16259e7d 1219 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1da177e4 1220 args->agbno = ltnew;
e26f0501 1221
1da177e4
LT
1222 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1223 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1224 goto error0;
0b1b213f
CH
1225
1226 if (j)
1227 trace_xfs_alloc_near_greater(args);
1228 else
1229 trace_xfs_alloc_near_lesser(args);
1230
1da177e4
LT
1231 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1232 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1233 return 0;
1234
1235 error0:
0b1b213f 1236 trace_xfs_alloc_near_error(args);
1da177e4
LT
1237 if (cnt_cur != NULL)
1238 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1239 if (bno_cur_lt != NULL)
1240 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1241 if (bno_cur_gt != NULL)
1242 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1243 return error;
1244}
1245
1246/*
1247 * Allocate a variable extent anywhere in the allocation group agno.
1248 * Extent's length (returned in len) will be between minlen and maxlen,
1249 * and of the form k * prod + mod unless there's nothing that large.
1250 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1251 */
1252STATIC int /* error */
1253xfs_alloc_ag_vextent_size(
1254 xfs_alloc_arg_t *args) /* allocation argument structure */
1255{
1256 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1257 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1258 int error; /* error result */
1259 xfs_agblock_t fbno; /* start of found freespace */
1260 xfs_extlen_t flen; /* length of found freespace */
1da177e4
LT
1261 int i; /* temp status variable */
1262 xfs_agblock_t rbno; /* returned block number */
1263 xfs_extlen_t rlen; /* length of returned extent */
e26f0501 1264 int forced = 0;
1da177e4 1265
e26f0501 1266restart:
1da177e4
LT
1267 /*
1268 * Allocate and initialize a cursor for the by-size btree.
1269 */
561f7d17
CH
1270 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1271 args->agno, XFS_BTNUM_CNT);
1da177e4 1272 bno_cur = NULL;
e26f0501 1273
1da177e4
LT
1274 /*
1275 * Look for an entry >= maxlen+alignment-1 blocks.
1276 */
1277 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1278 args->maxlen + args->alignment - 1, &i)))
1279 goto error0;
e26f0501 1280
1da177e4 1281 /*
e26f0501
CH
1282 * If none or we have busy extents that we cannot allocate from, then
1283 * we have to settle for a smaller extent. In the case that there are
1284 * no large extents, this will return the last entry in the tree unless
1285 * the tree is empty. In the case that there are only busy large
1286 * extents, this will return the largest small extent unless there
1287 * are no smaller extents available.
1da177e4 1288 */
e26f0501
CH
1289 if (!i || forced > 1) {
1290 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1291 &fbno, &flen, &i);
1292 if (error)
1da177e4
LT
1293 goto error0;
1294 if (i == 0 || flen == 0) {
1295 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
0b1b213f 1296 trace_xfs_alloc_size_noentry(args);
1da177e4
LT
1297 return 0;
1298 }
1299 ASSERT(i == 1);
e26f0501
CH
1300 xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
1301 } else {
1302 /*
1303 * Search for a non-busy extent that is large enough.
1304 * If we are at low space, don't check, or if we fall of
1305 * the end of the btree, turn off the busy check and
1306 * restart.
1307 */
1308 for (;;) {
1309 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1310 if (error)
1311 goto error0;
1312 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1313
1314 xfs_alloc_compute_aligned(args, fbno, flen,
1315 &rbno, &rlen);
1316
1317 if (rlen >= args->maxlen)
1318 break;
1319
1320 error = xfs_btree_increment(cnt_cur, 0, &i);
1321 if (error)
1322 goto error0;
1323 if (i == 0) {
1324 /*
1325 * Our only valid extents must have been busy.
1326 * Make it unbusy by forcing the log out and
1327 * retrying. If we've been here before, forcing
1328 * the log isn't making the extents available,
1329 * which means they have probably been freed in
1330 * this transaction. In that case, we have to
1331 * give up on them and we'll attempt a minlen
1332 * allocation the next time around.
1333 */
1334 xfs_btree_del_cursor(cnt_cur,
1335 XFS_BTREE_NOERROR);
1336 trace_xfs_alloc_size_busy(args);
1337 if (!forced++)
1338 xfs_log_force(args->mp, XFS_LOG_SYNC);
1339 goto restart;
1340 }
1341 }
1da177e4 1342 }
e26f0501 1343
1da177e4
LT
1344 /*
1345 * In the first case above, we got the last entry in the
1346 * by-size btree. Now we check to see if the space hits maxlen
1347 * once aligned; if not, we search left for something better.
1348 * This can't happen in the second case above.
1349 */
1da177e4
LT
1350 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1351 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1352 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1353 if (rlen < args->maxlen) {
1354 xfs_agblock_t bestfbno;
1355 xfs_extlen_t bestflen;
1356 xfs_agblock_t bestrbno;
1357 xfs_extlen_t bestrlen;
1358
1359 bestrlen = rlen;
1360 bestrbno = rbno;
1361 bestflen = flen;
1362 bestfbno = fbno;
1363 for (;;) {
8df4da4a 1364 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1da177e4
LT
1365 goto error0;
1366 if (i == 0)
1367 break;
1368 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1369 &i)))
1370 goto error0;
1371 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1372 if (flen < bestrlen)
1373 break;
86fa8af6
CH
1374 xfs_alloc_compute_aligned(args, fbno, flen,
1375 &rbno, &rlen);
1da177e4
LT
1376 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1377 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1378 (rlen <= flen && rbno + rlen <= fbno + flen),
1379 error0);
1380 if (rlen > bestrlen) {
1381 bestrlen = rlen;
1382 bestrbno = rbno;
1383 bestflen = flen;
1384 bestfbno = fbno;
1385 if (rlen == args->maxlen)
1386 break;
1387 }
1388 }
1389 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1390 &i)))
1391 goto error0;
1392 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1393 rlen = bestrlen;
1394 rbno = bestrbno;
1395 flen = bestflen;
1396 fbno = bestfbno;
1397 }
1398 args->wasfromfl = 0;
1399 /*
1400 * Fix up the length.
1401 */
1402 args->len = rlen;
e26f0501
CH
1403 if (rlen < args->minlen) {
1404 if (!forced++) {
1405 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1406 trace_xfs_alloc_size_busy(args);
1407 xfs_log_force(args->mp, XFS_LOG_SYNC);
1408 goto restart;
1409 }
1410 goto out_nominleft;
1da177e4 1411 }
e26f0501
CH
1412 xfs_alloc_fix_len(args);
1413
1414 if (!xfs_alloc_fix_minleft(args))
1415 goto out_nominleft;
1da177e4
LT
1416 rlen = args->len;
1417 XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
1418 /*
1419 * Allocate and initialize a cursor for the by-block tree.
1420 */
561f7d17
CH
1421 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1422 args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1423 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1424 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1425 goto error0;
1426 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1427 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1428 cnt_cur = bno_cur = NULL;
1429 args->len = rlen;
1430 args->agbno = rbno;
1431 XFS_WANT_CORRUPTED_GOTO(
1432 args->agbno + args->len <=
16259e7d 1433 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1da177e4 1434 error0);
0b1b213f 1435 trace_xfs_alloc_size_done(args);
1da177e4
LT
1436 return 0;
1437
1438error0:
0b1b213f 1439 trace_xfs_alloc_size_error(args);
1da177e4
LT
1440 if (cnt_cur)
1441 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1442 if (bno_cur)
1443 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1444 return error;
e26f0501
CH
1445
1446out_nominleft:
1447 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1448 trace_xfs_alloc_size_nominleft(args);
1449 args->agbno = NULLAGBLOCK;
1450 return 0;
1da177e4
LT
1451}
1452
1453/*
1454 * Deal with the case where only small freespaces remain.
1455 * Either return the contents of the last freespace record,
1456 * or allocate space from the freelist if there is nothing in the tree.
1457 */
1458STATIC int /* error */
1459xfs_alloc_ag_vextent_small(
1460 xfs_alloc_arg_t *args, /* allocation argument structure */
1461 xfs_btree_cur_t *ccur, /* by-size cursor */
1462 xfs_agblock_t *fbnop, /* result block number */
1463 xfs_extlen_t *flenp, /* result length */
1464 int *stat) /* status: 0-freelist, 1-normal/none */
1465{
1466 int error;
1467 xfs_agblock_t fbno;
1468 xfs_extlen_t flen;
1da177e4
LT
1469 int i;
1470
8df4da4a 1471 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1da177e4
LT
1472 goto error0;
1473 if (i) {
1474 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1475 goto error0;
1476 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1477 }
1478 /*
1479 * Nothing in the btree, try the freelist. Make sure
1480 * to respect minleft even when pulling from the
1481 * freelist.
1482 */
1483 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
16259e7d
CH
1484 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1485 > args->minleft)) {
92821e2b
DC
1486 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1487 if (error)
1da177e4
LT
1488 goto error0;
1489 if (fbno != NULLAGBLOCK) {
4ecbfe63 1490 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
97d3ac75
CH
1491 args->userdata);
1492
1da177e4
LT
1493 if (args->userdata) {
1494 xfs_buf_t *bp;
1495
1496 bp = xfs_btree_get_bufs(args->mp, args->tp,
1497 args->agno, fbno, 0);
1498 xfs_trans_binval(args->tp, bp);
1499 }
1500 args->len = 1;
1501 args->agbno = fbno;
1502 XFS_WANT_CORRUPTED_GOTO(
1503 args->agbno + args->len <=
16259e7d 1504 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1da177e4
LT
1505 error0);
1506 args->wasfromfl = 1;
0b1b213f 1507 trace_xfs_alloc_small_freelist(args);
1da177e4
LT
1508 *stat = 0;
1509 return 0;
1510 }
1511 /*
1512 * Nothing in the freelist.
1513 */
1514 else
1515 flen = 0;
1516 }
1517 /*
1518 * Can't allocate from the freelist for some reason.
1519 */
d432c80e
NS
1520 else {
1521 fbno = NULLAGBLOCK;
1da177e4 1522 flen = 0;
d432c80e 1523 }
1da177e4
LT
1524 /*
1525 * Can't do the allocation, give up.
1526 */
1527 if (flen < args->minlen) {
1528 args->agbno = NULLAGBLOCK;
0b1b213f 1529 trace_xfs_alloc_small_notenough(args);
1da177e4
LT
1530 flen = 0;
1531 }
1532 *fbnop = fbno;
1533 *flenp = flen;
1534 *stat = 1;
0b1b213f 1535 trace_xfs_alloc_small_done(args);
1da177e4
LT
1536 return 0;
1537
1538error0:
0b1b213f 1539 trace_xfs_alloc_small_error(args);
1da177e4
LT
1540 return error;
1541}
1542
1543/*
1544 * Free the extent starting at agno/bno for length.
1545 */
1546STATIC int /* error */
1547xfs_free_ag_extent(
1548 xfs_trans_t *tp, /* transaction pointer */
1549 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
1550 xfs_agnumber_t agno, /* allocation group number */
1551 xfs_agblock_t bno, /* starting block number */
1552 xfs_extlen_t len, /* length of extent */
1553 int isfl) /* set if is freelist blocks - no sb acctg */
1554{
1555 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1556 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1557 int error; /* error return value */
1da177e4
LT
1558 xfs_agblock_t gtbno; /* start of right neighbor block */
1559 xfs_extlen_t gtlen; /* length of right neighbor block */
1560 int haveleft; /* have a left neighbor block */
1561 int haveright; /* have a right neighbor block */
1562 int i; /* temp, result code */
1563 xfs_agblock_t ltbno; /* start of left neighbor block */
1564 xfs_extlen_t ltlen; /* length of left neighbor block */
1565 xfs_mount_t *mp; /* mount point struct for filesystem */
1566 xfs_agblock_t nbno; /* new starting block of freespace */
1567 xfs_extlen_t nlen; /* new length of freespace */
ecb6928f 1568 xfs_perag_t *pag; /* per allocation group data */
1da177e4
LT
1569
1570 mp = tp->t_mountp;
1571 /*
1572 * Allocate and initialize a cursor for the by-block btree.
1573 */
561f7d17 1574 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1da177e4
LT
1575 cnt_cur = NULL;
1576 /*
1577 * Look for a neighboring block on the left (lower block numbers)
1578 * that is contiguous with this space.
1579 */
1580 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1581 goto error0;
1582 if (haveleft) {
1583 /*
1584 * There is a block to our left.
1585 */
1586 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1587 goto error0;
1588 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1589 /*
1590 * It's not contiguous, though.
1591 */
1592 if (ltbno + ltlen < bno)
1593 haveleft = 0;
1594 else {
1595 /*
1596 * If this failure happens the request to free this
1597 * space was invalid, it's (partly) already free.
1598 * Very bad.
1599 */
1600 XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
1601 }
1602 }
1603 /*
1604 * Look for a neighboring block on the right (higher block numbers)
1605 * that is contiguous with this space.
1606 */
637aa50f 1607 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1da177e4
LT
1608 goto error0;
1609 if (haveright) {
1610 /*
1611 * There is a block to our right.
1612 */
1613 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1614 goto error0;
1615 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1616 /*
1617 * It's not contiguous, though.
1618 */
1619 if (bno + len < gtbno)
1620 haveright = 0;
1621 else {
1622 /*
1623 * If this failure happens the request to free this
1624 * space was invalid, it's (partly) already free.
1625 * Very bad.
1626 */
1627 XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
1628 }
1629 }
1630 /*
1631 * Now allocate and initialize a cursor for the by-size tree.
1632 */
561f7d17 1633 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1da177e4
LT
1634 /*
1635 * Have both left and right contiguous neighbors.
1636 * Merge all three into a single free block.
1637 */
1638 if (haveleft && haveright) {
1639 /*
1640 * Delete the old by-size entry on the left.
1641 */
1642 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1643 goto error0;
1644 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
91cca5df 1645 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4
LT
1646 goto error0;
1647 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1648 /*
1649 * Delete the old by-size entry on the right.
1650 */
1651 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1652 goto error0;
1653 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
91cca5df 1654 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4
LT
1655 goto error0;
1656 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1657 /*
1658 * Delete the old by-block entry for the right block.
1659 */
91cca5df 1660 if ((error = xfs_btree_delete(bno_cur, &i)))
1da177e4
LT
1661 goto error0;
1662 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1663 /*
1664 * Move the by-block cursor back to the left neighbor.
1665 */
8df4da4a 1666 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1da177e4
LT
1667 goto error0;
1668 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1669#ifdef DEBUG
1670 /*
1671 * Check that this is the right record: delete didn't
1672 * mangle the cursor.
1673 */
1674 {
1675 xfs_agblock_t xxbno;
1676 xfs_extlen_t xxlen;
1677
1678 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1679 &i)))
1680 goto error0;
1681 XFS_WANT_CORRUPTED_GOTO(
1682 i == 1 && xxbno == ltbno && xxlen == ltlen,
1683 error0);
1684 }
1685#endif
1686 /*
1687 * Update remaining by-block entry to the new, joined block.
1688 */
1689 nbno = ltbno;
1690 nlen = len + ltlen + gtlen;
1691 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1692 goto error0;
1693 }
1694 /*
1695 * Have only a left contiguous neighbor.
1696 * Merge it together with the new freespace.
1697 */
1698 else if (haveleft) {
1699 /*
1700 * Delete the old by-size entry on the left.
1701 */
1702 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1703 goto error0;
1704 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
91cca5df 1705 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4
LT
1706 goto error0;
1707 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1708 /*
1709 * Back up the by-block cursor to the left neighbor, and
1710 * update its length.
1711 */
8df4da4a 1712 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1da177e4
LT
1713 goto error0;
1714 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1715 nbno = ltbno;
1716 nlen = len + ltlen;
1717 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1718 goto error0;
1719 }
1720 /*
1721 * Have only a right contiguous neighbor.
1722 * Merge it together with the new freespace.
1723 */
1724 else if (haveright) {
1725 /*
1726 * Delete the old by-size entry on the right.
1727 */
1728 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1729 goto error0;
1730 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
91cca5df 1731 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4
LT
1732 goto error0;
1733 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1734 /*
1735 * Update the starting block and length of the right
1736 * neighbor in the by-block tree.
1737 */
1738 nbno = bno;
1739 nlen = len + gtlen;
1740 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1741 goto error0;
1742 }
1743 /*
1744 * No contiguous neighbors.
1745 * Insert the new freespace into the by-block tree.
1746 */
1747 else {
1748 nbno = bno;
1749 nlen = len;
4b22a571 1750 if ((error = xfs_btree_insert(bno_cur, &i)))
1da177e4
LT
1751 goto error0;
1752 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1753 }
1754 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1755 bno_cur = NULL;
1756 /*
1757 * In all cases we need to insert the new freespace in the by-size tree.
1758 */
1759 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1760 goto error0;
1761 XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
4b22a571 1762 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4
LT
1763 goto error0;
1764 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1765 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1766 cnt_cur = NULL;
ecb6928f 1767
1da177e4
LT
1768 /*
1769 * Update the freespace totals in the ag and superblock.
1770 */
ecb6928f
CH
1771 pag = xfs_perag_get(mp, agno);
1772 error = xfs_alloc_update_counters(tp, pag, agbp, len);
1773 xfs_perag_put(pag);
1774 if (error)
1775 goto error0;
1776
1777 if (!isfl)
1778 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1779 XFS_STATS_INC(xs_freex);
1780 XFS_STATS_ADD(xs_freeb, len);
0b1b213f
CH
1781
1782 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
1da177e4 1783
1da177e4
LT
1784 return 0;
1785
1786 error0:
0b1b213f 1787 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
1da177e4
LT
1788 if (bno_cur)
1789 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1790 if (cnt_cur)
1791 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1792 return error;
1793}
1794
1795/*
1796 * Visible (exported) allocation/free functions.
1797 * Some of these are used just by xfs_alloc_btree.c and this file.
1798 */
1799
1800/*
1801 * Compute and fill in value of m_ag_maxlevels.
1802 */
1803void
1804xfs_alloc_compute_maxlevels(
1805 xfs_mount_t *mp) /* file system mount structure */
1806{
1807 int level;
1808 uint maxblocks;
1809 uint maxleafents;
1810 int minleafrecs;
1811 int minnoderecs;
1812
1813 maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
1814 minleafrecs = mp->m_alloc_mnr[0];
1815 minnoderecs = mp->m_alloc_mnr[1];
1816 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1817 for (level = 1; maxblocks > 1; level++)
1818 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1819 mp->m_ag_maxlevels = level;
1820}
1821
6cc87645
DC
1822/*
1823 * Find the length of the longest extent in an AG.
1824 */
1825xfs_extlen_t
1826xfs_alloc_longest_free_extent(
1827 struct xfs_mount *mp,
1828 struct xfs_perag *pag)
1829{
1830 xfs_extlen_t need, delta = 0;
1831
1832 need = XFS_MIN_FREELIST_PAG(pag, mp);
1833 if (need > pag->pagf_flcount)
1834 delta = need - pag->pagf_flcount;
1835
1836 if (pag->pagf_longest > delta)
1837 return pag->pagf_longest - delta;
1838 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1839}
1840
1da177e4
LT
1841/*
1842 * Decide whether to use this allocation group for this allocation.
1843 * If so, fix up the btree freelist's size.
1844 */
1845STATIC int /* error */
1846xfs_alloc_fix_freelist(
1847 xfs_alloc_arg_t *args, /* allocation argument structure */
1848 int flags) /* XFS_ALLOC_FLAG_... */
1849{
1850 xfs_buf_t *agbp; /* agf buffer pointer */
1851 xfs_agf_t *agf; /* a.g. freespace structure pointer */
1852 xfs_buf_t *agflbp;/* agfl buffer pointer */
1853 xfs_agblock_t bno; /* freelist block */
1854 xfs_extlen_t delta; /* new blocks needed in freelist */
1855 int error; /* error result code */
1856 xfs_extlen_t longest;/* longest extent in allocation group */
1857 xfs_mount_t *mp; /* file system mount point structure */
1858 xfs_extlen_t need; /* total blocks needed in freelist */
1859 xfs_perag_t *pag; /* per-ag information structure */
1860 xfs_alloc_arg_t targs; /* local allocation arguments */
1861 xfs_trans_t *tp; /* transaction pointer */
1862
1863 mp = args->mp;
1864
1865 pag = args->pag;
1866 tp = args->tp;
1867 if (!pag->pagf_init) {
1868 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1869 &agbp)))
1870 return error;
1871 if (!pag->pagf_init) {
0e1edbd9
NS
1872 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1873 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1da177e4
LT
1874 args->agbp = NULL;
1875 return 0;
1876 }
1877 } else
1878 agbp = NULL;
1879
0e1edbd9
NS
1880 /*
1881 * If this is a metadata preferred pag and we are user data
1da177e4
LT
1882 * then try somewhere else if we are not being asked to
1883 * try harder at this point
1884 */
0e1edbd9
NS
1885 if (pag->pagf_metadata && args->userdata &&
1886 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
1887 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1da177e4
LT
1888 args->agbp = NULL;
1889 return 0;
1890 }
1891
0e1edbd9 1892 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
0e1edbd9
NS
1893 /*
1894 * If it looks like there isn't a long enough extent, or enough
1895 * total blocks, reject it.
1896 */
6cc87645
DC
1897 need = XFS_MIN_FREELIST_PAG(pag, mp);
1898 longest = xfs_alloc_longest_free_extent(mp, pag);
0e1edbd9
NS
1899 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1900 longest ||
1901 ((int)(pag->pagf_freeblks + pag->pagf_flcount -
1902 need - args->total) < (int)args->minleft)) {
1903 if (agbp)
1904 xfs_trans_brelse(tp, agbp);
1905 args->agbp = NULL;
1906 return 0;
1907 }
1da177e4 1908 }
0e1edbd9 1909
1da177e4
LT
1910 /*
1911 * Get the a.g. freespace buffer.
1912 * Can fail if we're not blocking on locks, and it's held.
1913 */
1914 if (agbp == NULL) {
1915 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1916 &agbp)))
1917 return error;
1918 if (agbp == NULL) {
0e1edbd9
NS
1919 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1920 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1da177e4
LT
1921 args->agbp = NULL;
1922 return 0;
1923 }
1924 }
1925 /*
1926 * Figure out how many blocks we should have in the freelist.
1927 */
1928 agf = XFS_BUF_TO_AGF(agbp);
1929 need = XFS_MIN_FREELIST(agf, mp);
1da177e4
LT
1930 /*
1931 * If there isn't enough total or single-extent, reject it.
1932 */
0e1edbd9
NS
1933 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1934 delta = need > be32_to_cpu(agf->agf_flcount) ?
1935 (need - be32_to_cpu(agf->agf_flcount)) : 0;
1936 longest = be32_to_cpu(agf->agf_longest);
1937 longest = (longest > delta) ? (longest - delta) :
1938 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
1939 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1940 longest ||
1941 ((int)(be32_to_cpu(agf->agf_freeblks) +
1942 be32_to_cpu(agf->agf_flcount) - need - args->total) <
1943 (int)args->minleft)) {
1944 xfs_trans_brelse(tp, agbp);
1945 args->agbp = NULL;
1946 return 0;
1947 }
1da177e4
LT
1948 }
1949 /*
1950 * Make the freelist shorter if it's too long.
1951 */
16259e7d 1952 while (be32_to_cpu(agf->agf_flcount) > need) {
1da177e4
LT
1953 xfs_buf_t *bp;
1954
92821e2b
DC
1955 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
1956 if (error)
1da177e4
LT
1957 return error;
1958 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
1959 return error;
1960 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
1961 xfs_trans_binval(tp, bp);
1962 }
1963 /*
1964 * Initialize the args structure.
1965 */
a0041684 1966 memset(&targs, 0, sizeof(targs));
1da177e4
LT
1967 targs.tp = tp;
1968 targs.mp = mp;
1969 targs.agbp = agbp;
1970 targs.agno = args->agno;
1da177e4
LT
1971 targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
1972 targs.type = XFS_ALLOCTYPE_THIS_AG;
1973 targs.pag = pag;
1974 if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
1975 return error;
1976 /*
1977 * Make the freelist longer if it's too short.
1978 */
16259e7d 1979 while (be32_to_cpu(agf->agf_flcount) < need) {
1da177e4 1980 targs.agbno = 0;
16259e7d 1981 targs.maxlen = need - be32_to_cpu(agf->agf_flcount);
1da177e4
LT
1982 /*
1983 * Allocate as many blocks as possible at once.
1984 */
e63a3690
NS
1985 if ((error = xfs_alloc_ag_vextent(&targs))) {
1986 xfs_trans_brelse(tp, agflbp);
1da177e4 1987 return error;
e63a3690 1988 }
1da177e4
LT
1989 /*
1990 * Stop if we run out. Won't happen if callers are obeying
1991 * the restrictions correctly. Can happen for free calls
1992 * on a completely full ag.
1993 */
d210a28c 1994 if (targs.agbno == NULLAGBLOCK) {
0e1edbd9
NS
1995 if (flags & XFS_ALLOC_FLAG_FREEING)
1996 break;
1997 xfs_trans_brelse(tp, agflbp);
1998 args->agbp = NULL;
1999 return 0;
d210a28c 2000 }
1da177e4
LT
2001 /*
2002 * Put each allocated block on the list.
2003 */
2004 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
92821e2b
DC
2005 error = xfs_alloc_put_freelist(tp, agbp,
2006 agflbp, bno, 0);
2007 if (error)
1da177e4
LT
2008 return error;
2009 }
2010 }
e63a3690 2011 xfs_trans_brelse(tp, agflbp);
1da177e4
LT
2012 args->agbp = agbp;
2013 return 0;
2014}
2015
2016/*
2017 * Get a block from the freelist.
2018 * Returns with the buffer for the block gotten.
2019 */
2020int /* error */
2021xfs_alloc_get_freelist(
2022 xfs_trans_t *tp, /* transaction pointer */
2023 xfs_buf_t *agbp, /* buffer containing the agf structure */
92821e2b
DC
2024 xfs_agblock_t *bnop, /* block address retrieved from freelist */
2025 int btreeblk) /* destination is a AGF btree */
1da177e4
LT
2026{
2027 xfs_agf_t *agf; /* a.g. freespace structure */
1da177e4
LT
2028 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
2029 xfs_agblock_t bno; /* block number returned */
77c95bba 2030 __be32 *agfl_bno;
1da177e4 2031 int error;
92821e2b 2032 int logflags;
77c95bba 2033 xfs_mount_t *mp = tp->t_mountp;
1da177e4
LT
2034 xfs_perag_t *pag; /* per allocation group data */
2035
1da177e4
LT
2036 /*
2037 * Freelist is empty, give up.
2038 */
77c95bba 2039 agf = XFS_BUF_TO_AGF(agbp);
1da177e4
LT
2040 if (!agf->agf_flcount) {
2041 *bnop = NULLAGBLOCK;
2042 return 0;
2043 }
2044 /*
2045 * Read the array of free blocks.
2046 */
77c95bba
CH
2047 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2048 &agflbp);
2049 if (error)
1da177e4 2050 return error;
77c95bba
CH
2051
2052
1da177e4
LT
2053 /*
2054 * Get the block number and update the data structures.
2055 */
77c95bba
CH
2056 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2057 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
413d57c9 2058 be32_add_cpu(&agf->agf_flfirst, 1);
1da177e4 2059 xfs_trans_brelse(tp, agflbp);
16259e7d 2060 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
1da177e4 2061 agf->agf_flfirst = 0;
a862e0fd
DC
2062
2063 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
413d57c9 2064 be32_add_cpu(&agf->agf_flcount, -1);
1da177e4
LT
2065 xfs_trans_agflist_delta(tp, -1);
2066 pag->pagf_flcount--;
a862e0fd 2067 xfs_perag_put(pag);
92821e2b
DC
2068
2069 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2070 if (btreeblk) {
413d57c9 2071 be32_add_cpu(&agf->agf_btreeblks, 1);
92821e2b
DC
2072 pag->pagf_btreeblks++;
2073 logflags |= XFS_AGF_BTREEBLKS;
2074 }
2075
92821e2b 2076 xfs_alloc_log_agf(tp, agbp, logflags);
1da177e4
LT
2077 *bnop = bno;
2078
1da177e4
LT
2079 return 0;
2080}
2081
2082/*
2083 * Log the given fields from the agf structure.
2084 */
2085void
2086xfs_alloc_log_agf(
2087 xfs_trans_t *tp, /* transaction pointer */
2088 xfs_buf_t *bp, /* buffer for a.g. freelist header */
2089 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2090{
2091 int first; /* first byte offset */
2092 int last; /* last byte offset */
2093 static const short offsets[] = {
2094 offsetof(xfs_agf_t, agf_magicnum),
2095 offsetof(xfs_agf_t, agf_versionnum),
2096 offsetof(xfs_agf_t, agf_seqno),
2097 offsetof(xfs_agf_t, agf_length),
2098 offsetof(xfs_agf_t, agf_roots[0]),
2099 offsetof(xfs_agf_t, agf_levels[0]),
2100 offsetof(xfs_agf_t, agf_flfirst),
2101 offsetof(xfs_agf_t, agf_fllast),
2102 offsetof(xfs_agf_t, agf_flcount),
2103 offsetof(xfs_agf_t, agf_freeblks),
2104 offsetof(xfs_agf_t, agf_longest),
92821e2b 2105 offsetof(xfs_agf_t, agf_btreeblks),
4e0e6040 2106 offsetof(xfs_agf_t, agf_uuid),
1da177e4
LT
2107 sizeof(xfs_agf_t)
2108 };
2109
0b1b213f
CH
2110 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2111
61fe135c 2112 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
4e0e6040 2113
1da177e4
LT
2114 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2115 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2116}
2117
2118/*
2119 * Interface for inode allocation to force the pag data to be initialized.
2120 */
2121int /* error */
2122xfs_alloc_pagf_init(
2123 xfs_mount_t *mp, /* file system mount structure */
2124 xfs_trans_t *tp, /* transaction pointer */
2125 xfs_agnumber_t agno, /* allocation group number */
2126 int flags) /* XFS_ALLOC_FLAGS_... */
2127{
2128 xfs_buf_t *bp;
2129 int error;
2130
2131 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2132 return error;
2133 if (bp)
2134 xfs_trans_brelse(tp, bp);
2135 return 0;
2136}
2137
2138/*
2139 * Put the block on the freelist for the allocation group.
2140 */
2141int /* error */
2142xfs_alloc_put_freelist(
2143 xfs_trans_t *tp, /* transaction pointer */
2144 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2145 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
92821e2b
DC
2146 xfs_agblock_t bno, /* block being freed */
2147 int btreeblk) /* block came from a AGF btree */
1da177e4
LT
2148{
2149 xfs_agf_t *agf; /* a.g. freespace structure */
e2101005 2150 __be32 *blockp;/* pointer to array entry */
1da177e4 2151 int error;
92821e2b 2152 int logflags;
1da177e4
LT
2153 xfs_mount_t *mp; /* mount structure */
2154 xfs_perag_t *pag; /* per allocation group data */
77c95bba
CH
2155 __be32 *agfl_bno;
2156 int startoff;
1da177e4
LT
2157
2158 agf = XFS_BUF_TO_AGF(agbp);
2159 mp = tp->t_mountp;
2160
2161 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
16259e7d 2162 be32_to_cpu(agf->agf_seqno), &agflbp)))
1da177e4 2163 return error;
413d57c9 2164 be32_add_cpu(&agf->agf_fllast, 1);
16259e7d 2165 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
1da177e4 2166 agf->agf_fllast = 0;
a862e0fd
DC
2167
2168 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
413d57c9 2169 be32_add_cpu(&agf->agf_flcount, 1);
1da177e4
LT
2170 xfs_trans_agflist_delta(tp, 1);
2171 pag->pagf_flcount++;
92821e2b
DC
2172
2173 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2174 if (btreeblk) {
413d57c9 2175 be32_add_cpu(&agf->agf_btreeblks, -1);
92821e2b
DC
2176 pag->pagf_btreeblks--;
2177 logflags |= XFS_AGF_BTREEBLKS;
2178 }
a862e0fd 2179 xfs_perag_put(pag);
92821e2b 2180
92821e2b
DC
2181 xfs_alloc_log_agf(tp, agbp, logflags);
2182
16259e7d 2183 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
77c95bba
CH
2184
2185 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2186 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
e2101005 2187 *blockp = cpu_to_be32(bno);
77c95bba
CH
2188 startoff = (char *)blockp - (char *)agflbp->b_addr;
2189
92821e2b 2190 xfs_alloc_log_agf(tp, agbp, logflags);
77c95bba 2191
61fe135c 2192 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
77c95bba
CH
2193 xfs_trans_log_buf(tp, agflbp, startoff,
2194 startoff + sizeof(xfs_agblock_t) - 1);
1da177e4
LT
2195 return 0;
2196}
2197
4e0e6040 2198static bool
612cfbfe 2199xfs_agf_verify(
4e0e6040 2200 struct xfs_mount *mp,
5d5f527d
DC
2201 struct xfs_buf *bp)
2202 {
4e0e6040 2203 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
5d5f527d 2204
4e0e6040
DC
2205 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2206 !uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_uuid))
2207 return false;
5d5f527d 2208
4e0e6040
DC
2209 if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2210 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2211 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2212 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2213 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2214 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
2215 return false;
5d5f527d
DC
2216
2217 /*
2218 * during growfs operations, the perag is not fully initialised,
2219 * so we can't use it for any useful checking. growfs ensures we can't
2220 * use it by using uncached buffers that don't have the perag attached
2221 * so we can detect and avoid this problem.
2222 */
4e0e6040
DC
2223 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2224 return false;
5d5f527d 2225
4e0e6040
DC
2226 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2227 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2228 return false;
2229
2230 return true;;
5d5f527d 2231
612cfbfe
DC
2232}
2233
1813dd64
DC
2234static void
2235xfs_agf_read_verify(
612cfbfe
DC
2236 struct xfs_buf *bp)
2237{
4e0e6040
DC
2238 struct xfs_mount *mp = bp->b_target->bt_mount;
2239 int agf_ok = 1;
2240
2241 if (xfs_sb_version_hascrc(&mp->m_sb))
51582170 2242 agf_ok = xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF);
4e0e6040
DC
2243
2244 agf_ok = agf_ok && xfs_agf_verify(mp, bp);
2245
2246 if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
2247 XFS_RANDOM_ALLOC_READ_AGF))) {
2248 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
2249 xfs_buf_ioerror(bp, EFSCORRUPTED);
2250 }
612cfbfe 2251}
5d5f527d 2252
b0f539de 2253static void
1813dd64 2254xfs_agf_write_verify(
612cfbfe
DC
2255 struct xfs_buf *bp)
2256{
4e0e6040
DC
2257 struct xfs_mount *mp = bp->b_target->bt_mount;
2258 struct xfs_buf_log_item *bip = bp->b_fspriv;
2259
2260 if (!xfs_agf_verify(mp, bp)) {
2261 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
2262 xfs_buf_ioerror(bp, EFSCORRUPTED);
2263 return;
2264 }
2265
2266 if (!xfs_sb_version_hascrc(&mp->m_sb))
2267 return;
2268
2269 if (bip)
2270 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2271
f1dbcd7e 2272 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
5d5f527d
DC
2273}
2274
1813dd64
DC
2275const struct xfs_buf_ops xfs_agf_buf_ops = {
2276 .verify_read = xfs_agf_read_verify,
2277 .verify_write = xfs_agf_write_verify,
2278};
2279
1da177e4
LT
2280/*
2281 * Read in the allocation group header (free/alloc section).
2282 */
2283int /* error */
4805621a
CH
2284xfs_read_agf(
2285 struct xfs_mount *mp, /* mount point structure */
2286 struct xfs_trans *tp, /* transaction pointer */
2287 xfs_agnumber_t agno, /* allocation group number */
2288 int flags, /* XFS_BUF_ */
2289 struct xfs_buf **bpp) /* buffer for the ag freelist header */
1da177e4 2290{
1da177e4
LT
2291 int error;
2292
d123031a
DC
2293 trace_xfs_read_agf(mp, agno);
2294
1da177e4
LT
2295 ASSERT(agno != NULLAGNUMBER);
2296 error = xfs_trans_read_buf(
2297 mp, tp, mp->m_ddev_targp,
2298 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
1813dd64 2299 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
1da177e4
LT
2300 if (error)
2301 return error;
4805621a 2302 if (!*bpp)
1da177e4 2303 return 0;
4805621a 2304
5a52c2a5 2305 ASSERT(!(*bpp)->b_error);
38f23232 2306 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
4805621a
CH
2307 return 0;
2308}
2309
2310/*
2311 * Read in the allocation group header (free/alloc section).
2312 */
2313int /* error */
2314xfs_alloc_read_agf(
2315 struct xfs_mount *mp, /* mount point structure */
2316 struct xfs_trans *tp, /* transaction pointer */
2317 xfs_agnumber_t agno, /* allocation group number */
2318 int flags, /* XFS_ALLOC_FLAG_... */
2319 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2320{
2321 struct xfs_agf *agf; /* ag freelist header */
2322 struct xfs_perag *pag; /* per allocation group data */
2323 int error;
2324
d123031a 2325 trace_xfs_alloc_read_agf(mp, agno);
4805621a 2326
d123031a 2327 ASSERT(agno != NULLAGNUMBER);
4805621a 2328 error = xfs_read_agf(mp, tp, agno,
0cadda1c 2329 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
4805621a
CH
2330 bpp);
2331 if (error)
2332 return error;
2333 if (!*bpp)
2334 return 0;
5a52c2a5 2335 ASSERT(!(*bpp)->b_error);
4805621a
CH
2336
2337 agf = XFS_BUF_TO_AGF(*bpp);
a862e0fd 2338 pag = xfs_perag_get(mp, agno);
1da177e4 2339 if (!pag->pagf_init) {
16259e7d 2340 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
92821e2b 2341 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
16259e7d
CH
2342 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2343 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
1da177e4 2344 pag->pagf_levels[XFS_BTNUM_BNOi] =
16259e7d 2345 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
1da177e4 2346 pag->pagf_levels[XFS_BTNUM_CNTi] =
16259e7d 2347 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
007c61c6 2348 spin_lock_init(&pag->pagb_lock);
e57336ff 2349 pag->pagb_count = 0;
ed3b4d6c 2350 pag->pagb_tree = RB_ROOT;
1da177e4
LT
2351 pag->pagf_init = 1;
2352 }
2353#ifdef DEBUG
2354 else if (!XFS_FORCED_SHUTDOWN(mp)) {
16259e7d 2355 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
89b28393 2356 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
16259e7d
CH
2357 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2358 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
1da177e4 2359 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
16259e7d 2360 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
1da177e4 2361 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
16259e7d 2362 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
1da177e4
LT
2363 }
2364#endif
a862e0fd 2365 xfs_perag_put(pag);
1da177e4
LT
2366 return 0;
2367}
2368
2369/*
2370 * Allocate an extent (variable-size).
2371 * Depending on the allocation type, we either look in a single allocation
2372 * group or loop over the allocation groups to find the result.
2373 */
2374int /* error */
e04426b9 2375xfs_alloc_vextent(
1da177e4
LT
2376 xfs_alloc_arg_t *args) /* allocation argument structure */
2377{
2378 xfs_agblock_t agsize; /* allocation group size */
2379 int error;
2380 int flags; /* XFS_ALLOC_FLAG_... locking flags */
1da177e4
LT
2381 xfs_extlen_t minleft;/* minimum left value, temp copy */
2382 xfs_mount_t *mp; /* mount structure pointer */
2383 xfs_agnumber_t sagno; /* starting allocation group number */
2384 xfs_alloctype_t type; /* input allocation type */
2385 int bump_rotor = 0;
2386 int no_min = 0;
2387 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2388
2389 mp = args->mp;
2390 type = args->otype = args->type;
2391 args->agbno = NULLAGBLOCK;
2392 /*
2393 * Just fix this up, for the case where the last a.g. is shorter
2394 * (or there's only one a.g.) and the caller couldn't easily figure
2395 * that out (xfs_bmap_alloc).
2396 */
2397 agsize = mp->m_sb.sb_agblocks;
2398 if (args->maxlen > agsize)
2399 args->maxlen = agsize;
2400 if (args->alignment == 0)
2401 args->alignment = 1;
2402 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2403 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2404 ASSERT(args->minlen <= args->maxlen);
2405 ASSERT(args->minlen <= agsize);
2406 ASSERT(args->mod < args->prod);
2407 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2408 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2409 args->minlen > args->maxlen || args->minlen > agsize ||
2410 args->mod >= args->prod) {
2411 args->fsbno = NULLFSBLOCK;
0b1b213f 2412 trace_xfs_alloc_vextent_badargs(args);
1da177e4
LT
2413 return 0;
2414 }
2415 minleft = args->minleft;
2416
2417 switch (type) {
2418 case XFS_ALLOCTYPE_THIS_AG:
2419 case XFS_ALLOCTYPE_NEAR_BNO:
2420 case XFS_ALLOCTYPE_THIS_BNO:
2421 /*
2422 * These three force us into a single a.g.
2423 */
2424 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
a862e0fd 2425 args->pag = xfs_perag_get(mp, args->agno);
1da177e4
LT
2426 args->minleft = 0;
2427 error = xfs_alloc_fix_freelist(args, 0);
2428 args->minleft = minleft;
2429 if (error) {
0b1b213f 2430 trace_xfs_alloc_vextent_nofix(args);
1da177e4
LT
2431 goto error0;
2432 }
2433 if (!args->agbp) {
0b1b213f 2434 trace_xfs_alloc_vextent_noagbp(args);
1da177e4
LT
2435 break;
2436 }
2437 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2438 if ((error = xfs_alloc_ag_vextent(args)))
2439 goto error0;
1da177e4
LT
2440 break;
2441 case XFS_ALLOCTYPE_START_BNO:
2442 /*
2443 * Try near allocation first, then anywhere-in-ag after
2444 * the first a.g. fails.
2445 */
2446 if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) &&
2447 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2448 args->fsbno = XFS_AGB_TO_FSB(mp,
2449 ((mp->m_agfrotor / rotorstep) %
2450 mp->m_sb.sb_agcount), 0);
2451 bump_rotor = 1;
2452 }
2453 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2454 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2455 /* FALLTHROUGH */
2456 case XFS_ALLOCTYPE_ANY_AG:
2457 case XFS_ALLOCTYPE_START_AG:
2458 case XFS_ALLOCTYPE_FIRST_AG:
2459 /*
2460 * Rotate through the allocation groups looking for a winner.
2461 */
2462 if (type == XFS_ALLOCTYPE_ANY_AG) {
2463 /*
2464 * Start with the last place we left off.
2465 */
2466 args->agno = sagno = (mp->m_agfrotor / rotorstep) %
2467 mp->m_sb.sb_agcount;
2468 args->type = XFS_ALLOCTYPE_THIS_AG;
2469 flags = XFS_ALLOC_FLAG_TRYLOCK;
2470 } else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2471 /*
2472 * Start with allocation group given by bno.
2473 */
2474 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2475 args->type = XFS_ALLOCTYPE_THIS_AG;
2476 sagno = 0;
2477 flags = 0;
2478 } else {
2479 if (type == XFS_ALLOCTYPE_START_AG)
2480 args->type = XFS_ALLOCTYPE_THIS_AG;
2481 /*
2482 * Start with the given allocation group.
2483 */
2484 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2485 flags = XFS_ALLOC_FLAG_TRYLOCK;
2486 }
2487 /*
2488 * Loop over allocation groups twice; first time with
2489 * trylock set, second time without.
2490 */
1da177e4 2491 for (;;) {
a862e0fd 2492 args->pag = xfs_perag_get(mp, args->agno);
1da177e4
LT
2493 if (no_min) args->minleft = 0;
2494 error = xfs_alloc_fix_freelist(args, flags);
2495 args->minleft = minleft;
2496 if (error) {
0b1b213f 2497 trace_xfs_alloc_vextent_nofix(args);
1da177e4
LT
2498 goto error0;
2499 }
2500 /*
2501 * If we get a buffer back then the allocation will fly.
2502 */
2503 if (args->agbp) {
2504 if ((error = xfs_alloc_ag_vextent(args)))
2505 goto error0;
2506 break;
2507 }
0b1b213f
CH
2508
2509 trace_xfs_alloc_vextent_loopfailed(args);
2510
1da177e4
LT
2511 /*
2512 * Didn't work, figure out the next iteration.
2513 */
2514 if (args->agno == sagno &&
2515 type == XFS_ALLOCTYPE_START_BNO)
2516 args->type = XFS_ALLOCTYPE_THIS_AG;
d210a28c
YL
2517 /*
2518 * For the first allocation, we can try any AG to get
2519 * space. However, if we already have allocated a
2520 * block, we don't want to try AGs whose number is below
2521 * sagno. Otherwise, we may end up with out-of-order
2522 * locking of AGF, which might cause deadlock.
2523 */
2524 if (++(args->agno) == mp->m_sb.sb_agcount) {
2525 if (args->firstblock != NULLFSBLOCK)
2526 args->agno = sagno;
2527 else
2528 args->agno = 0;
2529 }
1da177e4
LT
2530 /*
2531 * Reached the starting a.g., must either be done
2532 * or switch to non-trylock mode.
2533 */
2534 if (args->agno == sagno) {
2535 if (no_min == 1) {
2536 args->agbno = NULLAGBLOCK;
0b1b213f 2537 trace_xfs_alloc_vextent_allfailed(args);
1da177e4
LT
2538 break;
2539 }
2540 if (flags == 0) {
2541 no_min = 1;
2542 } else {
2543 flags = 0;
2544 if (type == XFS_ALLOCTYPE_START_BNO) {
2545 args->agbno = XFS_FSB_TO_AGBNO(mp,
2546 args->fsbno);
2547 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2548 }
2549 }
2550 }
a862e0fd 2551 xfs_perag_put(args->pag);
1da177e4 2552 }
1da177e4
LT
2553 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
2554 if (args->agno == sagno)
2555 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2556 (mp->m_sb.sb_agcount * rotorstep);
2557 else
2558 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2559 (mp->m_sb.sb_agcount * rotorstep);
2560 }
2561 break;
2562 default:
2563 ASSERT(0);
2564 /* NOTREACHED */
2565 }
2566 if (args->agbno == NULLAGBLOCK)
2567 args->fsbno = NULLFSBLOCK;
2568 else {
2569 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2570#ifdef DEBUG
2571 ASSERT(args->len >= args->minlen);
2572 ASSERT(args->len <= args->maxlen);
2573 ASSERT(args->agbno % args->alignment == 0);
2574 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2575 args->len);
2576#endif
2577 }
a862e0fd 2578 xfs_perag_put(args->pag);
1da177e4
LT
2579 return 0;
2580error0:
a862e0fd 2581 xfs_perag_put(args->pag);
1da177e4
LT
2582 return error;
2583}
2584
2585/*
2586 * Free an extent.
2587 * Just break up the extent address and hand off to xfs_free_ag_extent
2588 * after fixing up the freelist.
2589 */
2590int /* error */
2591xfs_free_extent(
2592 xfs_trans_t *tp, /* transaction pointer */
2593 xfs_fsblock_t bno, /* starting block number of extent */
2594 xfs_extlen_t len) /* length of extent */
2595{
0e1edbd9 2596 xfs_alloc_arg_t args;
1da177e4
LT
2597 int error;
2598
2599 ASSERT(len != 0);
0e1edbd9 2600 memset(&args, 0, sizeof(xfs_alloc_arg_t));
1da177e4
LT
2601 args.tp = tp;
2602 args.mp = tp->t_mountp;
be65b18a
DC
2603
2604 /*
2605 * validate that the block number is legal - the enables us to detect
2606 * and handle a silent filesystem corruption rather than crashing.
2607 */
1da177e4 2608 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
be65b18a
DC
2609 if (args.agno >= args.mp->m_sb.sb_agcount)
2610 return EFSCORRUPTED;
2611
1da177e4 2612 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
be65b18a
DC
2613 if (args.agbno >= args.mp->m_sb.sb_agblocks)
2614 return EFSCORRUPTED;
2615
a862e0fd 2616 args.pag = xfs_perag_get(args.mp, args.agno);
be65b18a
DC
2617 ASSERT(args.pag);
2618
2619 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2620 if (error)
1da177e4 2621 goto error0;
be65b18a
DC
2622
2623 /* validate the extent size is legal now we have the agf locked */
2624 if (args.agbno + len >
2625 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
2626 error = EFSCORRUPTED;
2627 goto error0;
2628 }
2629
0e1edbd9 2630 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
a870acd9 2631 if (!error)
4ecbfe63 2632 xfs_extent_busy_insert(tp, args.agno, args.agbno, len, 0);
1da177e4 2633error0:
a862e0fd 2634 xfs_perag_put(args.pag);
1da177e4
LT
2635 return error;
2636}