]> git.ipfire.org Git - people/ms/linux.git/blame - fs/xfs/libxfs/xfs_alloc.c
xfs: several xattr functions can be void
[people/ms/linux.git] / fs / xfs / libxfs / xfs_alloc.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
70a9883c 20#include "xfs_format.h"
239880ef 21#include "xfs_log_format.h"
70a9883c 22#include "xfs_shared.h"
239880ef 23#include "xfs_trans_resv.h"
a844f451 24#include "xfs_bit.h"
1da177e4 25#include "xfs_sb.h"
1da177e4 26#include "xfs_mount.h"
3ab78df2 27#include "xfs_defer.h"
a844f451 28#include "xfs_inode.h"
1da177e4 29#include "xfs_btree.h"
673930c3 30#include "xfs_rmap.h"
a4fbe6ab 31#include "xfs_alloc_btree.h"
1da177e4 32#include "xfs_alloc.h"
efc27b52 33#include "xfs_extent_busy.h"
1da177e4 34#include "xfs_error.h"
4e0e6040 35#include "xfs_cksum.h"
0b1b213f 36#include "xfs_trace.h"
239880ef 37#include "xfs_trans.h"
4e0e6040 38#include "xfs_buf_item.h"
239880ef 39#include "xfs_log.h"
3fd129b6 40#include "xfs_ag_resv.h"
1da177e4 41
c999a223 42struct workqueue_struct *xfs_alloc_wq;
1da177e4
LT
43
44#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
45
46#define XFSA_FIXUP_BNO_OK 1
47#define XFSA_FIXUP_CNT_OK 2
48
1da177e4
LT
49STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
50STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
51STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
52STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
e26f0501 53 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
1da177e4 54
af30dfa1
DW
55unsigned int
56xfs_refc_block(
57 struct xfs_mount *mp)
58{
59 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
60 return XFS_RMAP_BLOCK(mp) + 1;
61 if (xfs_sb_version_hasfinobt(&mp->m_sb))
62 return XFS_FIBT_BLOCK(mp) + 1;
63 return XFS_IBT_BLOCK(mp) + 1;
64}
65
8018026e
DW
66xfs_extlen_t
67xfs_prealloc_blocks(
68 struct xfs_mount *mp)
69{
af30dfa1
DW
70 if (xfs_sb_version_hasreflink(&mp->m_sb))
71 return xfs_refc_block(mp) + 1;
8018026e
DW
72 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
73 return XFS_RMAP_BLOCK(mp) + 1;
74 if (xfs_sb_version_hasfinobt(&mp->m_sb))
75 return XFS_FIBT_BLOCK(mp) + 1;
76 return XFS_IBT_BLOCK(mp) + 1;
77}
78
52548852
DW
79/*
80 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
81 * AGF buffer (PV 947395), we place constraints on the relationship among
82 * actual allocations for data blocks, freelist blocks, and potential file data
83 * bmap btree blocks. However, these restrictions may result in no actual space
84 * allocated for a delayed extent, for example, a data block in a certain AG is
85 * allocated but there is no additional block for the additional bmap btree
86 * block due to a split of the bmap btree of the file. The result of this may
87 * lead to an infinite loop when the file gets flushed to disk and all delayed
88 * extents need to be actually allocated. To get around this, we explicitly set
89 * aside a few blocks which will not be reserved in delayed allocation.
90 *
3fd129b6
DW
91 * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
92 * potential split of the file's bmap btree.
52548852
DW
93 */
94unsigned int
95xfs_alloc_set_aside(
96 struct xfs_mount *mp)
97{
98 unsigned int blocks;
99
100 blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
52548852
DW
101 return blocks;
102}
103
104/*
105 * When deciding how much space to allocate out of an AG, we limit the
106 * allocation maximum size to the size the AG. However, we cannot use all the
107 * blocks in the AG - some are permanently used by metadata. These
108 * blocks are generally:
109 * - the AG superblock, AGF, AGI and AGFL
110 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
111 * the AGI free inode and rmap btree root blocks.
112 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
113 * - the rmapbt root block
114 *
115 * The AG headers are sector sized, so the amount of space they take up is
116 * dependent on filesystem geometry. The others are all single blocks.
117 */
118unsigned int
119xfs_alloc_ag_max_usable(
120 struct xfs_mount *mp)
121{
122 unsigned int blocks;
123
124 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
125 blocks += XFS_ALLOC_AGFL_RESERVE;
126 blocks += 3; /* AGF, AGI btree root blocks */
127 if (xfs_sb_version_hasfinobt(&mp->m_sb))
128 blocks++; /* finobt root block */
129 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
130 blocks++; /* rmap root block */
d0e853f3
DW
131 if (xfs_sb_version_hasreflink(&mp->m_sb))
132 blocks++; /* refcount root block */
52548852
DW
133
134 return mp->m_sb.sb_agblocks - blocks;
135}
136
fe033cc8
CH
137/*
138 * Lookup the record equal to [bno, len] in the btree given by cur.
139 */
140STATIC int /* error */
141xfs_alloc_lookup_eq(
142 struct xfs_btree_cur *cur, /* btree cursor */
143 xfs_agblock_t bno, /* starting block of extent */
144 xfs_extlen_t len, /* length of extent */
145 int *stat) /* success/failure */
146{
147 cur->bc_rec.a.ar_startblock = bno;
148 cur->bc_rec.a.ar_blockcount = len;
149 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
150}
151
152/*
153 * Lookup the first record greater than or equal to [bno, len]
154 * in the btree given by cur.
155 */
a66d6363 156int /* error */
fe033cc8
CH
157xfs_alloc_lookup_ge(
158 struct xfs_btree_cur *cur, /* btree cursor */
159 xfs_agblock_t bno, /* starting block of extent */
160 xfs_extlen_t len, /* length of extent */
161 int *stat) /* success/failure */
162{
163 cur->bc_rec.a.ar_startblock = bno;
164 cur->bc_rec.a.ar_blockcount = len;
165 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
166}
167
168/*
169 * Lookup the first record less than or equal to [bno, len]
170 * in the btree given by cur.
171 */
0d5a75e9 172static int /* error */
fe033cc8
CH
173xfs_alloc_lookup_le(
174 struct xfs_btree_cur *cur, /* btree cursor */
175 xfs_agblock_t bno, /* starting block of extent */
176 xfs_extlen_t len, /* length of extent */
177 int *stat) /* success/failure */
178{
179 cur->bc_rec.a.ar_startblock = bno;
180 cur->bc_rec.a.ar_blockcount = len;
181 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
182}
183
278d0ca1
CH
184/*
185 * Update the record referred to by cur to the value given
186 * by [bno, len].
187 * This either works (return 0) or gets an EFSCORRUPTED error.
188 */
189STATIC int /* error */
190xfs_alloc_update(
191 struct xfs_btree_cur *cur, /* btree cursor */
192 xfs_agblock_t bno, /* starting block of extent */
193 xfs_extlen_t len) /* length of extent */
194{
195 union xfs_btree_rec rec;
196
197 rec.alloc.ar_startblock = cpu_to_be32(bno);
198 rec.alloc.ar_blockcount = cpu_to_be32(len);
199 return xfs_btree_update(cur, &rec);
200}
fe033cc8 201
8cc938fe
CH
202/*
203 * Get the data from the pointed-to record.
204 */
a46db608 205int /* error */
8cc938fe
CH
206xfs_alloc_get_rec(
207 struct xfs_btree_cur *cur, /* btree cursor */
208 xfs_agblock_t *bno, /* output: starting block of extent */
209 xfs_extlen_t *len, /* output: length of extent */
210 int *stat) /* output: success/failure */
211{
212 union xfs_btree_rec *rec;
213 int error;
214
215 error = xfs_btree_get_rec(cur, &rec, stat);
216 if (!error && *stat == 1) {
217 *bno = be32_to_cpu(rec->alloc.ar_startblock);
218 *len = be32_to_cpu(rec->alloc.ar_blockcount);
219 }
220 return error;
221}
222
1da177e4
LT
223/*
224 * Compute aligned version of the found extent.
225 * Takes alignment and min length into account.
226 */
12375c82 227STATIC void
1da177e4 228xfs_alloc_compute_aligned(
86fa8af6 229 xfs_alloc_arg_t *args, /* allocation argument structure */
1da177e4
LT
230 xfs_agblock_t foundbno, /* starting block in found extent */
231 xfs_extlen_t foundlen, /* length in found extent */
1da177e4
LT
232 xfs_agblock_t *resbno, /* result block number */
233 xfs_extlen_t *reslen) /* result length */
234{
235 xfs_agblock_t bno;
1da177e4 236 xfs_extlen_t len;
bfe46d4e 237 xfs_extlen_t diff;
1da177e4 238
e26f0501 239 /* Trim busy sections out of found extent */
4ecbfe63 240 xfs_extent_busy_trim(args, foundbno, foundlen, &bno, &len);
e26f0501 241
bfe46d4e
BF
242 /*
243 * If we have a largish extent that happens to start before min_agbno,
244 * see if we can shift it into range...
245 */
246 if (bno < args->min_agbno && bno + len > args->min_agbno) {
247 diff = args->min_agbno - bno;
248 if (len > diff) {
249 bno += diff;
250 len -= diff;
251 }
252 }
253
e26f0501
CH
254 if (args->alignment > 1 && len >= args->minlen) {
255 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
bfe46d4e
BF
256
257 diff = aligned_bno - bno;
e26f0501
CH
258
259 *resbno = aligned_bno;
260 *reslen = diff >= len ? 0 : len - diff;
1da177e4 261 } else {
e26f0501
CH
262 *resbno = bno;
263 *reslen = len;
1da177e4 264 }
1da177e4
LT
265}
266
267/*
268 * Compute best start block and diff for "near" allocations.
269 * freelen >= wantlen already checked by caller.
270 */
271STATIC xfs_extlen_t /* difference value (absolute) */
272xfs_alloc_compute_diff(
273 xfs_agblock_t wantbno, /* target starting block */
274 xfs_extlen_t wantlen, /* target length */
275 xfs_extlen_t alignment, /* target alignment */
292378ed 276 int datatype, /* are we allocating data? */
1da177e4
LT
277 xfs_agblock_t freebno, /* freespace's starting block */
278 xfs_extlen_t freelen, /* freespace's length */
279 xfs_agblock_t *newbnop) /* result: best start block from free */
280{
281 xfs_agblock_t freeend; /* end of freespace extent */
282 xfs_agblock_t newbno1; /* return block number */
283 xfs_agblock_t newbno2; /* other new block number */
284 xfs_extlen_t newlen1=0; /* length with newbno1 */
285 xfs_extlen_t newlen2=0; /* length with newbno2 */
286 xfs_agblock_t wantend; /* end of target extent */
292378ed 287 bool userdata = xfs_alloc_is_userdata(datatype);
1da177e4
LT
288
289 ASSERT(freelen >= wantlen);
290 freeend = freebno + freelen;
291 wantend = wantbno + wantlen;
211d022c
JK
292 /*
293 * We want to allocate from the start of a free extent if it is past
294 * the desired block or if we are allocating user data and the free
295 * extent is before desired block. The second case is there to allow
296 * for contiguous allocation from the remaining free space if the file
297 * grows in the short term.
298 */
299 if (freebno >= wantbno || (userdata && freeend < wantend)) {
1da177e4
LT
300 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
301 newbno1 = NULLAGBLOCK;
302 } else if (freeend >= wantend && alignment > 1) {
303 newbno1 = roundup(wantbno, alignment);
304 newbno2 = newbno1 - alignment;
305 if (newbno1 >= freeend)
306 newbno1 = NULLAGBLOCK;
307 else
308 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
309 if (newbno2 < freebno)
310 newbno2 = NULLAGBLOCK;
311 else
312 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
313 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
314 if (newlen1 < newlen2 ||
315 (newlen1 == newlen2 &&
316 XFS_ABSDIFF(newbno1, wantbno) >
317 XFS_ABSDIFF(newbno2, wantbno)))
318 newbno1 = newbno2;
319 } else if (newbno2 != NULLAGBLOCK)
320 newbno1 = newbno2;
321 } else if (freeend >= wantend) {
322 newbno1 = wantbno;
323 } else if (alignment > 1) {
324 newbno1 = roundup(freeend - wantlen, alignment);
325 if (newbno1 > freeend - wantlen &&
326 newbno1 - alignment >= freebno)
327 newbno1 -= alignment;
328 else if (newbno1 >= freeend)
329 newbno1 = NULLAGBLOCK;
330 } else
331 newbno1 = freeend - wantlen;
332 *newbnop = newbno1;
333 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
334}
335
336/*
337 * Fix up the length, based on mod and prod.
338 * len should be k * prod + mod for some k.
339 * If len is too small it is returned unchanged.
340 * If len hits maxlen it is left alone.
341 */
342STATIC void
343xfs_alloc_fix_len(
344 xfs_alloc_arg_t *args) /* allocation argument structure */
345{
346 xfs_extlen_t k;
347 xfs_extlen_t rlen;
348
349 ASSERT(args->mod < args->prod);
350 rlen = args->len;
351 ASSERT(rlen >= args->minlen);
352 ASSERT(rlen <= args->maxlen);
353 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
354 (args->mod == 0 && rlen < args->prod))
355 return;
356 k = rlen % args->prod;
357 if (k == args->mod)
358 return;
30265117
JK
359 if (k > args->mod)
360 rlen = rlen - (k - args->mod);
361 else
362 rlen = rlen - args->prod + (args->mod - k);
3790a8cd 363 /* casts to (int) catch length underflows */
30265117
JK
364 if ((int)rlen < (int)args->minlen)
365 return;
366 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
367 ASSERT(rlen % args->prod == args->mod);
1da177e4
LT
368 args->len = rlen;
369}
370
371/*
372 * Fix up length if there is too little space left in the a.g.
373 * Return 1 if ok, 0 if too little, should give up.
374 */
375STATIC int
376xfs_alloc_fix_minleft(
377 xfs_alloc_arg_t *args) /* allocation argument structure */
378{
379 xfs_agf_t *agf; /* a.g. freelist header */
380 int diff; /* free space difference */
381
382 if (args->minleft == 0)
383 return 1;
384 agf = XFS_BUF_TO_AGF(args->agbp);
16259e7d 385 diff = be32_to_cpu(agf->agf_freeblks)
1da177e4
LT
386 - args->len - args->minleft;
387 if (diff >= 0)
388 return 1;
389 args->len += diff; /* shrink the allocated space */
3790a8cd
DC
390 /* casts to (int) catch length underflows */
391 if ((int)args->len >= (int)args->minlen)
1da177e4
LT
392 return 1;
393 args->agbno = NULLAGBLOCK;
394 return 0;
395}
396
397/*
398 * Update the two btrees, logically removing from freespace the extent
399 * starting at rbno, rlen blocks. The extent is contained within the
400 * actual (current) free extent fbno for flen blocks.
401 * Flags are passed in indicating whether the cursors are set to the
402 * relevant records.
403 */
404STATIC int /* error code */
405xfs_alloc_fixup_trees(
406 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
407 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
408 xfs_agblock_t fbno, /* starting block of free extent */
409 xfs_extlen_t flen, /* length of free extent */
410 xfs_agblock_t rbno, /* starting block of returned extent */
411 xfs_extlen_t rlen, /* length of returned extent */
412 int flags) /* flags, XFSA_FIXUP_... */
413{
414 int error; /* error code */
415 int i; /* operation results */
416 xfs_agblock_t nfbno1; /* first new free startblock */
417 xfs_agblock_t nfbno2; /* second new free startblock */
418 xfs_extlen_t nflen1=0; /* first new free length */
419 xfs_extlen_t nflen2=0; /* second new free length */
5fb5aeee
ES
420 struct xfs_mount *mp;
421
422 mp = cnt_cur->bc_mp;
1da177e4
LT
423
424 /*
425 * Look up the record in the by-size tree if necessary.
426 */
427 if (flags & XFSA_FIXUP_CNT_OK) {
428#ifdef DEBUG
429 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
430 return error;
5fb5aeee 431 XFS_WANT_CORRUPTED_RETURN(mp,
1da177e4
LT
432 i == 1 && nfbno1 == fbno && nflen1 == flen);
433#endif
434 } else {
435 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
436 return error;
5fb5aeee 437 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
438 }
439 /*
440 * Look up the record in the by-block tree if necessary.
441 */
442 if (flags & XFSA_FIXUP_BNO_OK) {
443#ifdef DEBUG
444 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
445 return error;
5fb5aeee 446 XFS_WANT_CORRUPTED_RETURN(mp,
1da177e4
LT
447 i == 1 && nfbno1 == fbno && nflen1 == flen);
448#endif
449 } else {
450 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
451 return error;
5fb5aeee 452 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4 453 }
7cc95a82 454
1da177e4 455#ifdef DEBUG
7cc95a82
CH
456 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
457 struct xfs_btree_block *bnoblock;
458 struct xfs_btree_block *cntblock;
459
460 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
461 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
1da177e4 462
5fb5aeee 463 XFS_WANT_CORRUPTED_RETURN(mp,
7cc95a82 464 bnoblock->bb_numrecs == cntblock->bb_numrecs);
1da177e4
LT
465 }
466#endif
7cc95a82 467
1da177e4
LT
468 /*
469 * Deal with all four cases: the allocated record is contained
470 * within the freespace record, so we can have new freespace
471 * at either (or both) end, or no freespace remaining.
472 */
473 if (rbno == fbno && rlen == flen)
474 nfbno1 = nfbno2 = NULLAGBLOCK;
475 else if (rbno == fbno) {
476 nfbno1 = rbno + rlen;
477 nflen1 = flen - rlen;
478 nfbno2 = NULLAGBLOCK;
479 } else if (rbno + rlen == fbno + flen) {
480 nfbno1 = fbno;
481 nflen1 = flen - rlen;
482 nfbno2 = NULLAGBLOCK;
483 } else {
484 nfbno1 = fbno;
485 nflen1 = rbno - fbno;
486 nfbno2 = rbno + rlen;
487 nflen2 = (fbno + flen) - nfbno2;
488 }
489 /*
490 * Delete the entry from the by-size btree.
491 */
91cca5df 492 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4 493 return error;
5fb5aeee 494 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
495 /*
496 * Add new by-size btree entry(s).
497 */
498 if (nfbno1 != NULLAGBLOCK) {
499 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
500 return error;
5fb5aeee 501 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
4b22a571 502 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4 503 return error;
5fb5aeee 504 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
505 }
506 if (nfbno2 != NULLAGBLOCK) {
507 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
508 return error;
5fb5aeee 509 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
4b22a571 510 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4 511 return error;
5fb5aeee 512 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
513 }
514 /*
515 * Fix up the by-block btree entry(s).
516 */
517 if (nfbno1 == NULLAGBLOCK) {
518 /*
519 * No remaining freespace, just delete the by-block tree entry.
520 */
91cca5df 521 if ((error = xfs_btree_delete(bno_cur, &i)))
1da177e4 522 return error;
5fb5aeee 523 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
524 } else {
525 /*
526 * Update the by-block entry to start later|be shorter.
527 */
528 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
529 return error;
530 }
531 if (nfbno2 != NULLAGBLOCK) {
532 /*
533 * 2 resulting free entries, need to add one.
534 */
535 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
536 return error;
5fb5aeee 537 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
4b22a571 538 if ((error = xfs_btree_insert(bno_cur, &i)))
1da177e4 539 return error;
5fb5aeee 540 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
541 }
542 return 0;
543}
544
77c95bba 545static bool
612cfbfe 546xfs_agfl_verify(
bb80c6d7
DC
547 struct xfs_buf *bp)
548{
bb80c6d7
DC
549 struct xfs_mount *mp = bp->b_target->bt_mount;
550 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
bb80c6d7
DC
551 int i;
552
ce748eaa 553 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
77c95bba
CH
554 return false;
555 if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
556 return false;
557 /*
558 * during growfs operations, the perag is not fully initialised,
559 * so we can't use it for any useful checking. growfs ensures we can't
560 * use it by using uncached buffers that don't have the perag attached
561 * so we can detect and avoid this problem.
562 */
563 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
564 return false;
565
bb80c6d7 566 for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
77c95bba 567 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
bb80c6d7 568 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
77c95bba 569 return false;
bb80c6d7 570 }
a45086e2
BF
571
572 return xfs_log_check_lsn(mp,
573 be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn));
77c95bba
CH
574}
575
576static void
577xfs_agfl_read_verify(
578 struct xfs_buf *bp)
579{
580 struct xfs_mount *mp = bp->b_target->bt_mount;
77c95bba
CH
581
582 /*
583 * There is no verification of non-crc AGFLs because mkfs does not
584 * initialise the AGFL to zero or NULL. Hence the only valid part of the
585 * AGFL is what the AGF says is active. We can't get to the AGF, so we
586 * can't verify just those entries are valid.
587 */
588 if (!xfs_sb_version_hascrc(&mp->m_sb))
589 return;
590
ce5028cf 591 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
2451337d 592 xfs_buf_ioerror(bp, -EFSBADCRC);
ce5028cf 593 else if (!xfs_agfl_verify(bp))
2451337d 594 xfs_buf_ioerror(bp, -EFSCORRUPTED);
ce5028cf
ES
595
596 if (bp->b_error)
597 xfs_verifier_error(bp);
612cfbfe
DC
598}
599
1813dd64 600static void
612cfbfe
DC
601xfs_agfl_write_verify(
602 struct xfs_buf *bp)
603{
77c95bba
CH
604 struct xfs_mount *mp = bp->b_target->bt_mount;
605 struct xfs_buf_log_item *bip = bp->b_fspriv;
612cfbfe 606
77c95bba
CH
607 /* no verification of non-crc AGFLs */
608 if (!xfs_sb_version_hascrc(&mp->m_sb))
609 return;
610
611 if (!xfs_agfl_verify(bp)) {
2451337d 612 xfs_buf_ioerror(bp, -EFSCORRUPTED);
ce5028cf 613 xfs_verifier_error(bp);
77c95bba
CH
614 return;
615 }
616
617 if (bip)
618 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
619
f1dbcd7e 620 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
bb80c6d7
DC
621}
622
1813dd64 623const struct xfs_buf_ops xfs_agfl_buf_ops = {
233135b7 624 .name = "xfs_agfl",
1813dd64
DC
625 .verify_read = xfs_agfl_read_verify,
626 .verify_write = xfs_agfl_write_verify,
627};
628
1da177e4
LT
629/*
630 * Read in the allocation group free block array.
631 */
632STATIC int /* error */
633xfs_alloc_read_agfl(
634 xfs_mount_t *mp, /* mount point structure */
635 xfs_trans_t *tp, /* transaction pointer */
636 xfs_agnumber_t agno, /* allocation group number */
637 xfs_buf_t **bpp) /* buffer for the ag free block array */
638{
639 xfs_buf_t *bp; /* return value */
640 int error;
641
642 ASSERT(agno != NULLAGNUMBER);
643 error = xfs_trans_read_buf(
644 mp, tp, mp->m_ddev_targp,
645 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
1813dd64 646 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
1da177e4
LT
647 if (error)
648 return error;
38f23232 649 xfs_buf_set_ref(bp, XFS_AGFL_REF);
1da177e4
LT
650 *bpp = bp;
651 return 0;
652}
653
ecb6928f
CH
654STATIC int
655xfs_alloc_update_counters(
656 struct xfs_trans *tp,
657 struct xfs_perag *pag,
658 struct xfs_buf *agbp,
659 long len)
660{
661 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
662
663 pag->pagf_freeblks += len;
664 be32_add_cpu(&agf->agf_freeblks, len);
665
666 xfs_trans_agblocks_delta(tp, len);
667 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
668 be32_to_cpu(agf->agf_length)))
2451337d 669 return -EFSCORRUPTED;
ecb6928f
CH
670
671 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
672 return 0;
673}
674
1da177e4
LT
675/*
676 * Allocation group level functions.
677 */
678
679/*
680 * Allocate a variable extent in the allocation group agno.
681 * Type and bno are used to determine where in the allocation group the
682 * extent will start.
683 * Extent's length (returned in *len) will be between minlen and maxlen,
684 * and of the form k * prod + mod unless there's nothing that large.
685 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
686 */
687STATIC int /* error */
688xfs_alloc_ag_vextent(
689 xfs_alloc_arg_t *args) /* argument structure for allocation */
690{
691 int error=0;
3fd129b6
DW
692 xfs_extlen_t reservation;
693 xfs_extlen_t oldmax;
1da177e4
LT
694
695 ASSERT(args->minlen > 0);
696 ASSERT(args->maxlen > 0);
697 ASSERT(args->minlen <= args->maxlen);
698 ASSERT(args->mod < args->prod);
699 ASSERT(args->alignment > 0);
3fd129b6
DW
700
701 /*
702 * Clamp maxlen to the amount of free space minus any reservations
703 * that have been made.
704 */
705 oldmax = args->maxlen;
706 reservation = xfs_ag_resv_needed(args->pag, args->resv);
707 if (args->maxlen > args->pag->pagf_freeblks - reservation)
708 args->maxlen = args->pag->pagf_freeblks - reservation;
709 if (args->maxlen == 0) {
710 args->agbno = NULLAGBLOCK;
711 args->maxlen = oldmax;
712 return 0;
713 }
714
1da177e4
LT
715 /*
716 * Branch to correct routine based on the type.
717 */
718 args->wasfromfl = 0;
719 switch (args->type) {
720 case XFS_ALLOCTYPE_THIS_AG:
721 error = xfs_alloc_ag_vextent_size(args);
722 break;
723 case XFS_ALLOCTYPE_NEAR_BNO:
724 error = xfs_alloc_ag_vextent_near(args);
725 break;
726 case XFS_ALLOCTYPE_THIS_BNO:
727 error = xfs_alloc_ag_vextent_exact(args);
728 break;
729 default:
730 ASSERT(0);
731 /* NOTREACHED */
732 }
ecb6928f 733
3fd129b6
DW
734 args->maxlen = oldmax;
735
ecb6928f 736 if (error || args->agbno == NULLAGBLOCK)
1da177e4 737 return error;
ecb6928f
CH
738
739 ASSERT(args->len >= args->minlen);
740 ASSERT(args->len <= args->maxlen);
3fd129b6 741 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
ecb6928f
CH
742 ASSERT(args->agbno % args->alignment == 0);
743
673930c3
DW
744 /* if not file data, insert new block into the reverse map btree */
745 if (args->oinfo.oi_owner != XFS_RMAP_OWN_UNKNOWN) {
746 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
747 args->agbno, args->len, &args->oinfo);
748 if (error)
749 return error;
750 }
751
ecb6928f
CH
752 if (!args->wasfromfl) {
753 error = xfs_alloc_update_counters(args->tp, args->pag,
754 args->agbp,
755 -((long)(args->len)));
756 if (error)
757 return error;
758
4ecbfe63 759 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
e26f0501 760 args->agbno, args->len));
1da177e4 761 }
ecb6928f 762
3fd129b6 763 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
ecb6928f 764
ff6d6af2
BD
765 XFS_STATS_INC(args->mp, xs_allocx);
766 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
ecb6928f 767 return error;
1da177e4
LT
768}
769
770/*
771 * Allocate a variable extent at exactly agno/bno.
772 * Extent's length (returned in *len) will be between minlen and maxlen,
773 * and of the form k * prod + mod unless there's nothing that large.
774 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
775 */
776STATIC int /* error */
777xfs_alloc_ag_vextent_exact(
778 xfs_alloc_arg_t *args) /* allocation argument structure */
779{
780 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
781 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
1da177e4
LT
782 int error;
783 xfs_agblock_t fbno; /* start block of found extent */
1da177e4 784 xfs_extlen_t flen; /* length of found extent */
e26f0501
CH
785 xfs_agblock_t tbno; /* start block of trimmed extent */
786 xfs_extlen_t tlen; /* length of trimmed extent */
787 xfs_agblock_t tend; /* end block of trimmed extent */
1da177e4 788 int i; /* success/failure of operation */
1da177e4
LT
789
790 ASSERT(args->alignment == 1);
9f9baab3 791
1da177e4
LT
792 /*
793 * Allocate/initialize a cursor for the by-number freespace btree.
794 */
561f7d17 795 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
9f9baab3
CH
796 args->agno, XFS_BTNUM_BNO);
797
1da177e4
LT
798 /*
799 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
800 * Look for the closest free block <= bno, it must contain bno
801 * if any free block does.
802 */
9f9baab3
CH
803 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
804 if (error)
1da177e4 805 goto error0;
9f9baab3
CH
806 if (!i)
807 goto not_found;
808
1da177e4
LT
809 /*
810 * Grab the freespace record.
811 */
9f9baab3
CH
812 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
813 if (error)
1da177e4 814 goto error0;
c29aad41 815 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1da177e4 816 ASSERT(fbno <= args->agbno);
9f9baab3 817
1da177e4 818 /*
e26f0501 819 * Check for overlapping busy extents.
1da177e4 820 */
4ecbfe63 821 xfs_extent_busy_trim(args, fbno, flen, &tbno, &tlen);
e26f0501
CH
822
823 /*
824 * Give up if the start of the extent is busy, or the freespace isn't
825 * long enough for the minimum request.
826 */
827 if (tbno > args->agbno)
828 goto not_found;
829 if (tlen < args->minlen)
830 goto not_found;
831 tend = tbno + tlen;
832 if (tend < args->agbno + args->minlen)
9f9baab3
CH
833 goto not_found;
834
1da177e4
LT
835 /*
836 * End of extent will be smaller of the freespace end and the
837 * maximal requested end.
9f9baab3 838 *
1da177e4
LT
839 * Fix the length according to mod and prod if given.
840 */
81463b1c
CS
841 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
842 - args->agbno;
1da177e4 843 xfs_alloc_fix_len(args);
9f9baab3
CH
844 if (!xfs_alloc_fix_minleft(args))
845 goto not_found;
846
81463b1c 847 ASSERT(args->agbno + args->len <= tend);
9f9baab3 848
1da177e4 849 /*
81463b1c 850 * We are allocating agbno for args->len
1da177e4
LT
851 * Allocate/initialize a cursor for the by-size btree.
852 */
561f7d17
CH
853 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
854 args->agno, XFS_BTNUM_CNT);
1da177e4 855 ASSERT(args->agbno + args->len <=
16259e7d 856 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
9f9baab3
CH
857 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
858 args->len, XFSA_FIXUP_BNO_OK);
859 if (error) {
1da177e4
LT
860 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
861 goto error0;
862 }
9f9baab3 863
1da177e4
LT
864 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
865 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
0b1b213f 866
1da177e4 867 args->wasfromfl = 0;
9f9baab3
CH
868 trace_xfs_alloc_exact_done(args);
869 return 0;
870
871not_found:
872 /* Didn't find it, return null. */
873 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
874 args->agbno = NULLAGBLOCK;
875 trace_xfs_alloc_exact_notfound(args);
1da177e4
LT
876 return 0;
877
878error0:
879 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
0b1b213f 880 trace_xfs_alloc_exact_error(args);
1da177e4
LT
881 return error;
882}
883
489a150f
CH
884/*
885 * Search the btree in a given direction via the search cursor and compare
886 * the records found against the good extent we've already found.
887 */
888STATIC int
889xfs_alloc_find_best_extent(
890 struct xfs_alloc_arg *args, /* allocation argument structure */
891 struct xfs_btree_cur **gcur, /* good cursor */
892 struct xfs_btree_cur **scur, /* searching cursor */
893 xfs_agblock_t gdiff, /* difference for search comparison */
894 xfs_agblock_t *sbno, /* extent found by search */
e26f0501
CH
895 xfs_extlen_t *slen, /* extent length */
896 xfs_agblock_t *sbnoa, /* aligned extent found by search */
897 xfs_extlen_t *slena, /* aligned extent length */
489a150f
CH
898 int dir) /* 0 = search right, 1 = search left */
899{
489a150f
CH
900 xfs_agblock_t new;
901 xfs_agblock_t sdiff;
902 int error;
903 int i;
904
905 /* The good extent is perfect, no need to search. */
906 if (!gdiff)
907 goto out_use_good;
908
909 /*
910 * Look until we find a better one, run out of space or run off the end.
911 */
912 do {
913 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
914 if (error)
915 goto error0;
c29aad41 916 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
e26f0501 917 xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
489a150f
CH
918
919 /*
920 * The good extent is closer than this one.
921 */
922 if (!dir) {
bfe46d4e
BF
923 if (*sbnoa > args->max_agbno)
924 goto out_use_good;
e26f0501 925 if (*sbnoa >= args->agbno + gdiff)
489a150f
CH
926 goto out_use_good;
927 } else {
bfe46d4e
BF
928 if (*sbnoa < args->min_agbno)
929 goto out_use_good;
e26f0501 930 if (*sbnoa <= args->agbno - gdiff)
489a150f
CH
931 goto out_use_good;
932 }
933
934 /*
935 * Same distance, compare length and pick the best.
936 */
937 if (*slena >= args->minlen) {
938 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
939 xfs_alloc_fix_len(args);
940
941 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
211d022c 942 args->alignment,
292378ed 943 args->datatype, *sbnoa,
e26f0501 944 *slena, &new);
489a150f
CH
945
946 /*
947 * Choose closer size and invalidate other cursor.
948 */
949 if (sdiff < gdiff)
950 goto out_use_search;
951 goto out_use_good;
952 }
953
954 if (!dir)
955 error = xfs_btree_increment(*scur, 0, &i);
956 else
957 error = xfs_btree_decrement(*scur, 0, &i);
958 if (error)
959 goto error0;
960 } while (i);
961
962out_use_good:
963 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
964 *scur = NULL;
965 return 0;
966
967out_use_search:
968 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
969 *gcur = NULL;
970 return 0;
971
972error0:
973 /* caller invalidates cursors */
974 return error;
975}
976
1da177e4
LT
977/*
978 * Allocate a variable extent near bno in the allocation group agno.
979 * Extent's length (returned in len) will be between minlen and maxlen,
980 * and of the form k * prod + mod unless there's nothing that large.
981 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
982 */
983STATIC int /* error */
984xfs_alloc_ag_vextent_near(
985 xfs_alloc_arg_t *args) /* allocation argument structure */
986{
987 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
988 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
989 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
1da177e4
LT
990 xfs_agblock_t gtbno; /* start bno of right side entry */
991 xfs_agblock_t gtbnoa; /* aligned ... */
992 xfs_extlen_t gtdiff; /* difference to right side entry */
993 xfs_extlen_t gtlen; /* length of right side entry */
e26f0501 994 xfs_extlen_t gtlena; /* aligned ... */
1da177e4
LT
995 xfs_agblock_t gtnew; /* useful start bno of right side */
996 int error; /* error code */
997 int i; /* result code, temporary */
998 int j; /* result code, temporary */
999 xfs_agblock_t ltbno; /* start bno of left side entry */
1000 xfs_agblock_t ltbnoa; /* aligned ... */
1001 xfs_extlen_t ltdiff; /* difference to left side entry */
1da177e4 1002 xfs_extlen_t ltlen; /* length of left side entry */
e26f0501 1003 xfs_extlen_t ltlena; /* aligned ... */
1da177e4
LT
1004 xfs_agblock_t ltnew; /* useful start bno of left side */
1005 xfs_extlen_t rlen; /* length of returned extent */
e26f0501 1006 int forced = 0;
63d20d6e 1007#ifdef DEBUG
1da177e4
LT
1008 /*
1009 * Randomly don't execute the first algorithm.
1010 */
1011 int dofirst; /* set to do first algorithm */
1012
ecb3403d 1013 dofirst = prandom_u32() & 1;
1da177e4 1014#endif
e26f0501 1015
bfe46d4e
BF
1016 /* handle unitialized agbno range so caller doesn't have to */
1017 if (!args->min_agbno && !args->max_agbno)
1018 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1019 ASSERT(args->min_agbno <= args->max_agbno);
1020
1021 /* clamp agbno to the range if it's outside */
1022 if (args->agbno < args->min_agbno)
1023 args->agbno = args->min_agbno;
1024 if (args->agbno > args->max_agbno)
1025 args->agbno = args->max_agbno;
1026
e26f0501
CH
1027restart:
1028 bno_cur_lt = NULL;
1029 bno_cur_gt = NULL;
1030 ltlen = 0;
1031 gtlena = 0;
1032 ltlena = 0;
1033
1da177e4
LT
1034 /*
1035 * Get a cursor for the by-size btree.
1036 */
561f7d17
CH
1037 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1038 args->agno, XFS_BTNUM_CNT);
e26f0501 1039
1da177e4
LT
1040 /*
1041 * See if there are any free extents as big as maxlen.
1042 */
1043 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
1044 goto error0;
1045 /*
1046 * If none, then pick up the last entry in the tree unless the
1047 * tree is empty.
1048 */
1049 if (!i) {
1050 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
1051 &ltlen, &i)))
1052 goto error0;
1053 if (i == 0 || ltlen == 0) {
1054 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
e26f0501 1055 trace_xfs_alloc_near_noentry(args);
1da177e4
LT
1056 return 0;
1057 }
1058 ASSERT(i == 1);
1059 }
1060 args->wasfromfl = 0;
e26f0501 1061
1da177e4
LT
1062 /*
1063 * First algorithm.
1064 * If the requested extent is large wrt the freespaces available
1065 * in this a.g., then the cursor will be pointing to a btree entry
1066 * near the right edge of the tree. If it's in the last btree leaf
1067 * block, then we just examine all the entries in that block
1068 * that are big enough, and pick the best one.
1069 * This is written as a while loop so we can break out of it,
1070 * but we never loop back to the top.
1071 */
1072 while (xfs_btree_islastblock(cnt_cur, 0)) {
1073 xfs_extlen_t bdiff;
1074 int besti=0;
1075 xfs_extlen_t blen=0;
1076 xfs_agblock_t bnew=0;
1077
63d20d6e
DC
1078#ifdef DEBUG
1079 if (dofirst)
1da177e4
LT
1080 break;
1081#endif
1082 /*
1083 * Start from the entry that lookup found, sequence through
1084 * all larger free blocks. If we're actually pointing at a
1085 * record smaller than maxlen, go to the start of this block,
1086 * and skip all those smaller than minlen.
1087 */
1088 if (ltlen || args->alignment > 1) {
1089 cnt_cur->bc_ptrs[0] = 1;
1090 do {
1091 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
1092 &ltlen, &i)))
1093 goto error0;
c29aad41 1094 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1da177e4
LT
1095 if (ltlen >= args->minlen)
1096 break;
637aa50f 1097 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1da177e4
LT
1098 goto error0;
1099 } while (i);
1100 ASSERT(ltlen >= args->minlen);
1101 if (!i)
1102 break;
1103 }
1104 i = cnt_cur->bc_ptrs[0];
1105 for (j = 1, blen = 0, bdiff = 0;
1106 !error && j && (blen < args->maxlen || bdiff > 0);
637aa50f 1107 error = xfs_btree_increment(cnt_cur, 0, &j)) {
1da177e4
LT
1108 /*
1109 * For each entry, decide if it's better than
1110 * the previous best entry.
1111 */
1112 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1113 goto error0;
c29aad41 1114 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
86fa8af6
CH
1115 xfs_alloc_compute_aligned(args, ltbno, ltlen,
1116 &ltbnoa, &ltlena);
e6430037 1117 if (ltlena < args->minlen)
1da177e4 1118 continue;
bfe46d4e
BF
1119 if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1120 continue;
1da177e4
LT
1121 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1122 xfs_alloc_fix_len(args);
1123 ASSERT(args->len >= args->minlen);
1124 if (args->len < blen)
1125 continue;
1126 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
292378ed 1127 args->alignment, args->datatype, ltbnoa,
211d022c 1128 ltlena, &ltnew);
1da177e4
LT
1129 if (ltnew != NULLAGBLOCK &&
1130 (args->len > blen || ltdiff < bdiff)) {
1131 bdiff = ltdiff;
1132 bnew = ltnew;
1133 blen = args->len;
1134 besti = cnt_cur->bc_ptrs[0];
1135 }
1136 }
1137 /*
1138 * It didn't work. We COULD be in a case where
1139 * there's a good record somewhere, so try again.
1140 */
1141 if (blen == 0)
1142 break;
1143 /*
1144 * Point at the best entry, and retrieve it again.
1145 */
1146 cnt_cur->bc_ptrs[0] = besti;
1147 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1148 goto error0;
c29aad41 1149 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
73523a2e 1150 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1da177e4
LT
1151 args->len = blen;
1152 if (!xfs_alloc_fix_minleft(args)) {
1153 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
0b1b213f 1154 trace_xfs_alloc_near_nominleft(args);
1da177e4
LT
1155 return 0;
1156 }
1157 blen = args->len;
1158 /*
1159 * We are allocating starting at bnew for blen blocks.
1160 */
1161 args->agbno = bnew;
1162 ASSERT(bnew >= ltbno);
73523a2e 1163 ASSERT(bnew + blen <= ltbno + ltlen);
1da177e4
LT
1164 /*
1165 * Set up a cursor for the by-bno tree.
1166 */
561f7d17
CH
1167 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1168 args->agbp, args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1169 /*
1170 * Fix up the btree entries.
1171 */
1172 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1173 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1174 goto error0;
1175 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1176 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
0b1b213f
CH
1177
1178 trace_xfs_alloc_near_first(args);
1da177e4
LT
1179 return 0;
1180 }
1181 /*
1182 * Second algorithm.
1183 * Search in the by-bno tree to the left and to the right
1184 * simultaneously, until in each case we find a space big enough,
1185 * or run into the edge of the tree. When we run into the edge,
1186 * we deallocate that cursor.
1187 * If both searches succeed, we compare the two spaces and pick
1188 * the better one.
1189 * With alignment, it's possible for both to fail; the upper
1190 * level algorithm that picks allocation groups for allocations
1191 * is not supposed to do this.
1192 */
1193 /*
1194 * Allocate and initialize the cursor for the leftward search.
1195 */
561f7d17
CH
1196 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1197 args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1198 /*
1199 * Lookup <= bno to find the leftward search's starting point.
1200 */
1201 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1202 goto error0;
1203 if (!i) {
1204 /*
1205 * Didn't find anything; use this cursor for the rightward
1206 * search.
1207 */
1208 bno_cur_gt = bno_cur_lt;
1209 bno_cur_lt = NULL;
1210 }
1211 /*
1212 * Found something. Duplicate the cursor for the rightward search.
1213 */
1214 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1215 goto error0;
1216 /*
1217 * Increment the cursor, so we will point at the entry just right
1218 * of the leftward entry if any, or to the leftmost entry.
1219 */
637aa50f 1220 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1da177e4
LT
1221 goto error0;
1222 if (!i) {
1223 /*
1224 * It failed, there are no rightward entries.
1225 */
1226 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1227 bno_cur_gt = NULL;
1228 }
1229 /*
1230 * Loop going left with the leftward cursor, right with the
1231 * rightward cursor, until either both directions give up or
1232 * we find an entry at least as big as minlen.
1233 */
1234 do {
1235 if (bno_cur_lt) {
1236 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1237 goto error0;
c29aad41 1238 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
86fa8af6
CH
1239 xfs_alloc_compute_aligned(args, ltbno, ltlen,
1240 &ltbnoa, &ltlena);
bfe46d4e 1241 if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
1da177e4 1242 break;
8df4da4a 1243 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1da177e4 1244 goto error0;
bfe46d4e 1245 if (!i || ltbnoa < args->min_agbno) {
1da177e4
LT
1246 xfs_btree_del_cursor(bno_cur_lt,
1247 XFS_BTREE_NOERROR);
1248 bno_cur_lt = NULL;
1249 }
1250 }
1251 if (bno_cur_gt) {
1252 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1253 goto error0;
c29aad41 1254 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
86fa8af6
CH
1255 xfs_alloc_compute_aligned(args, gtbno, gtlen,
1256 &gtbnoa, &gtlena);
bfe46d4e 1257 if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
1da177e4 1258 break;
637aa50f 1259 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1da177e4 1260 goto error0;
bfe46d4e 1261 if (!i || gtbnoa > args->max_agbno) {
1da177e4
LT
1262 xfs_btree_del_cursor(bno_cur_gt,
1263 XFS_BTREE_NOERROR);
1264 bno_cur_gt = NULL;
1265 }
1266 }
1267 } while (bno_cur_lt || bno_cur_gt);
489a150f 1268
1da177e4
LT
1269 /*
1270 * Got both cursors still active, need to find better entry.
1271 */
1272 if (bno_cur_lt && bno_cur_gt) {
1da177e4
LT
1273 if (ltlena >= args->minlen) {
1274 /*
489a150f 1275 * Left side is good, look for a right side entry.
1da177e4
LT
1276 */
1277 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1278 xfs_alloc_fix_len(args);
489a150f 1279 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
292378ed 1280 args->alignment, args->datatype, ltbnoa,
211d022c 1281 ltlena, &ltnew);
489a150f
CH
1282
1283 error = xfs_alloc_find_best_extent(args,
1284 &bno_cur_lt, &bno_cur_gt,
e26f0501
CH
1285 ltdiff, &gtbno, &gtlen,
1286 &gtbnoa, &gtlena,
489a150f
CH
1287 0 /* search right */);
1288 } else {
1289 ASSERT(gtlena >= args->minlen);
1290
1da177e4 1291 /*
489a150f 1292 * Right side is good, look for a left side entry.
1da177e4
LT
1293 */
1294 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1295 xfs_alloc_fix_len(args);
489a150f 1296 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
292378ed 1297 args->alignment, args->datatype, gtbnoa,
211d022c 1298 gtlena, &gtnew);
489a150f
CH
1299
1300 error = xfs_alloc_find_best_extent(args,
1301 &bno_cur_gt, &bno_cur_lt,
e26f0501
CH
1302 gtdiff, &ltbno, &ltlen,
1303 &ltbnoa, &ltlena,
489a150f 1304 1 /* search left */);
1da177e4 1305 }
489a150f
CH
1306
1307 if (error)
1308 goto error0;
1da177e4 1309 }
489a150f 1310
1da177e4
LT
1311 /*
1312 * If we couldn't get anything, give up.
1313 */
1314 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
e3a746f5
DC
1315 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1316
e26f0501
CH
1317 if (!forced++) {
1318 trace_xfs_alloc_near_busy(args);
1319 xfs_log_force(args->mp, XFS_LOG_SYNC);
1320 goto restart;
1321 }
0b1b213f 1322 trace_xfs_alloc_size_neither(args);
1da177e4
LT
1323 args->agbno = NULLAGBLOCK;
1324 return 0;
1325 }
489a150f 1326
1da177e4
LT
1327 /*
1328 * At this point we have selected a freespace entry, either to the
1329 * left or to the right. If it's on the right, copy all the
1330 * useful variables to the "left" set so we only have one
1331 * copy of this code.
1332 */
1333 if (bno_cur_gt) {
1334 bno_cur_lt = bno_cur_gt;
1335 bno_cur_gt = NULL;
1336 ltbno = gtbno;
1337 ltbnoa = gtbnoa;
1338 ltlen = gtlen;
1339 ltlena = gtlena;
1340 j = 1;
1341 } else
1342 j = 0;
489a150f 1343
1da177e4
LT
1344 /*
1345 * Fix up the length and compute the useful address.
1346 */
1da177e4
LT
1347 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1348 xfs_alloc_fix_len(args);
1349 if (!xfs_alloc_fix_minleft(args)) {
0b1b213f 1350 trace_xfs_alloc_near_nominleft(args);
1da177e4
LT
1351 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1352 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1353 return 0;
1354 }
1355 rlen = args->len;
e26f0501 1356 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
292378ed 1357 args->datatype, ltbnoa, ltlena, &ltnew);
1da177e4 1358 ASSERT(ltnew >= ltbno);
e26f0501 1359 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
16259e7d 1360 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
bfe46d4e 1361 ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
1da177e4 1362 args->agbno = ltnew;
e26f0501 1363
1da177e4
LT
1364 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1365 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1366 goto error0;
0b1b213f
CH
1367
1368 if (j)
1369 trace_xfs_alloc_near_greater(args);
1370 else
1371 trace_xfs_alloc_near_lesser(args);
1372
1da177e4
LT
1373 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1374 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1375 return 0;
1376
1377 error0:
0b1b213f 1378 trace_xfs_alloc_near_error(args);
1da177e4
LT
1379 if (cnt_cur != NULL)
1380 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1381 if (bno_cur_lt != NULL)
1382 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1383 if (bno_cur_gt != NULL)
1384 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1385 return error;
1386}
1387
1388/*
1389 * Allocate a variable extent anywhere in the allocation group agno.
1390 * Extent's length (returned in len) will be between minlen and maxlen,
1391 * and of the form k * prod + mod unless there's nothing that large.
1392 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1393 */
1394STATIC int /* error */
1395xfs_alloc_ag_vextent_size(
1396 xfs_alloc_arg_t *args) /* allocation argument structure */
1397{
1398 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1399 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1400 int error; /* error result */
1401 xfs_agblock_t fbno; /* start of found freespace */
1402 xfs_extlen_t flen; /* length of found freespace */
1da177e4
LT
1403 int i; /* temp status variable */
1404 xfs_agblock_t rbno; /* returned block number */
1405 xfs_extlen_t rlen; /* length of returned extent */
e26f0501 1406 int forced = 0;
1da177e4 1407
e26f0501 1408restart:
1da177e4
LT
1409 /*
1410 * Allocate and initialize a cursor for the by-size btree.
1411 */
561f7d17
CH
1412 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1413 args->agno, XFS_BTNUM_CNT);
1da177e4 1414 bno_cur = NULL;
e26f0501 1415
1da177e4
LT
1416 /*
1417 * Look for an entry >= maxlen+alignment-1 blocks.
1418 */
1419 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1420 args->maxlen + args->alignment - 1, &i)))
1421 goto error0;
e26f0501 1422
1da177e4 1423 /*
e26f0501
CH
1424 * If none or we have busy extents that we cannot allocate from, then
1425 * we have to settle for a smaller extent. In the case that there are
1426 * no large extents, this will return the last entry in the tree unless
1427 * the tree is empty. In the case that there are only busy large
1428 * extents, this will return the largest small extent unless there
1429 * are no smaller extents available.
1da177e4 1430 */
e26f0501
CH
1431 if (!i || forced > 1) {
1432 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1433 &fbno, &flen, &i);
1434 if (error)
1da177e4
LT
1435 goto error0;
1436 if (i == 0 || flen == 0) {
1437 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
0b1b213f 1438 trace_xfs_alloc_size_noentry(args);
1da177e4
LT
1439 return 0;
1440 }
1441 ASSERT(i == 1);
e26f0501
CH
1442 xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
1443 } else {
1444 /*
1445 * Search for a non-busy extent that is large enough.
1446 * If we are at low space, don't check, or if we fall of
1447 * the end of the btree, turn off the busy check and
1448 * restart.
1449 */
1450 for (;;) {
1451 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1452 if (error)
1453 goto error0;
c29aad41 1454 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
e26f0501
CH
1455
1456 xfs_alloc_compute_aligned(args, fbno, flen,
1457 &rbno, &rlen);
1458
1459 if (rlen >= args->maxlen)
1460 break;
1461
1462 error = xfs_btree_increment(cnt_cur, 0, &i);
1463 if (error)
1464 goto error0;
1465 if (i == 0) {
1466 /*
1467 * Our only valid extents must have been busy.
1468 * Make it unbusy by forcing the log out and
1469 * retrying. If we've been here before, forcing
1470 * the log isn't making the extents available,
1471 * which means they have probably been freed in
1472 * this transaction. In that case, we have to
1473 * give up on them and we'll attempt a minlen
1474 * allocation the next time around.
1475 */
1476 xfs_btree_del_cursor(cnt_cur,
1477 XFS_BTREE_NOERROR);
1478 trace_xfs_alloc_size_busy(args);
1479 if (!forced++)
1480 xfs_log_force(args->mp, XFS_LOG_SYNC);
1481 goto restart;
1482 }
1483 }
1da177e4 1484 }
e26f0501 1485
1da177e4
LT
1486 /*
1487 * In the first case above, we got the last entry in the
1488 * by-size btree. Now we check to see if the space hits maxlen
1489 * once aligned; if not, we search left for something better.
1490 * This can't happen in the second case above.
1491 */
1da177e4 1492 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
c29aad41 1493 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1da177e4
LT
1494 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1495 if (rlen < args->maxlen) {
1496 xfs_agblock_t bestfbno;
1497 xfs_extlen_t bestflen;
1498 xfs_agblock_t bestrbno;
1499 xfs_extlen_t bestrlen;
1500
1501 bestrlen = rlen;
1502 bestrbno = rbno;
1503 bestflen = flen;
1504 bestfbno = fbno;
1505 for (;;) {
8df4da4a 1506 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1da177e4
LT
1507 goto error0;
1508 if (i == 0)
1509 break;
1510 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1511 &i)))
1512 goto error0;
c29aad41 1513 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1da177e4
LT
1514 if (flen < bestrlen)
1515 break;
86fa8af6
CH
1516 xfs_alloc_compute_aligned(args, fbno, flen,
1517 &rbno, &rlen);
1da177e4 1518 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
c29aad41 1519 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1da177e4
LT
1520 (rlen <= flen && rbno + rlen <= fbno + flen),
1521 error0);
1522 if (rlen > bestrlen) {
1523 bestrlen = rlen;
1524 bestrbno = rbno;
1525 bestflen = flen;
1526 bestfbno = fbno;
1527 if (rlen == args->maxlen)
1528 break;
1529 }
1530 }
1531 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1532 &i)))
1533 goto error0;
c29aad41 1534 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1da177e4
LT
1535 rlen = bestrlen;
1536 rbno = bestrbno;
1537 flen = bestflen;
1538 fbno = bestfbno;
1539 }
1540 args->wasfromfl = 0;
1541 /*
1542 * Fix up the length.
1543 */
1544 args->len = rlen;
e26f0501
CH
1545 if (rlen < args->minlen) {
1546 if (!forced++) {
1547 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1548 trace_xfs_alloc_size_busy(args);
1549 xfs_log_force(args->mp, XFS_LOG_SYNC);
1550 goto restart;
1551 }
1552 goto out_nominleft;
1da177e4 1553 }
e26f0501
CH
1554 xfs_alloc_fix_len(args);
1555
1556 if (!xfs_alloc_fix_minleft(args))
1557 goto out_nominleft;
1da177e4 1558 rlen = args->len;
c29aad41 1559 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1da177e4
LT
1560 /*
1561 * Allocate and initialize a cursor for the by-block tree.
1562 */
561f7d17
CH
1563 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1564 args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1565 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1566 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1567 goto error0;
1568 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1569 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1570 cnt_cur = bno_cur = NULL;
1571 args->len = rlen;
1572 args->agbno = rbno;
c29aad41 1573 XFS_WANT_CORRUPTED_GOTO(args->mp,
1da177e4 1574 args->agbno + args->len <=
16259e7d 1575 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1da177e4 1576 error0);
0b1b213f 1577 trace_xfs_alloc_size_done(args);
1da177e4
LT
1578 return 0;
1579
1580error0:
0b1b213f 1581 trace_xfs_alloc_size_error(args);
1da177e4
LT
1582 if (cnt_cur)
1583 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1584 if (bno_cur)
1585 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1586 return error;
e26f0501
CH
1587
1588out_nominleft:
1589 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1590 trace_xfs_alloc_size_nominleft(args);
1591 args->agbno = NULLAGBLOCK;
1592 return 0;
1da177e4
LT
1593}
1594
1595/*
1596 * Deal with the case where only small freespaces remain.
1597 * Either return the contents of the last freespace record,
1598 * or allocate space from the freelist if there is nothing in the tree.
1599 */
1600STATIC int /* error */
1601xfs_alloc_ag_vextent_small(
1602 xfs_alloc_arg_t *args, /* allocation argument structure */
1603 xfs_btree_cur_t *ccur, /* by-size cursor */
1604 xfs_agblock_t *fbnop, /* result block number */
1605 xfs_extlen_t *flenp, /* result length */
1606 int *stat) /* status: 0-freelist, 1-normal/none */
1607{
a03f1a66 1608 struct xfs_owner_info oinfo;
3fd129b6 1609 struct xfs_perag *pag;
1da177e4
LT
1610 int error;
1611 xfs_agblock_t fbno;
1612 xfs_extlen_t flen;
1da177e4
LT
1613 int i;
1614
8df4da4a 1615 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1da177e4
LT
1616 goto error0;
1617 if (i) {
1618 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1619 goto error0;
c29aad41 1620 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1da177e4
LT
1621 }
1622 /*
1623 * Nothing in the btree, try the freelist. Make sure
1624 * to respect minleft even when pulling from the
1625 * freelist.
1626 */
3fd129b6
DW
1627 else if (args->minlen == 1 && args->alignment == 1 &&
1628 args->resv != XFS_AG_RESV_AGFL &&
16259e7d
CH
1629 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1630 > args->minleft)) {
92821e2b
DC
1631 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1632 if (error)
1da177e4
LT
1633 goto error0;
1634 if (fbno != NULLAGBLOCK) {
4ecbfe63 1635 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
292378ed 1636 xfs_alloc_allow_busy_reuse(args->datatype));
97d3ac75 1637
292378ed 1638 if (xfs_alloc_is_userdata(args->datatype)) {
1da177e4
LT
1639 xfs_buf_t *bp;
1640
1641 bp = xfs_btree_get_bufs(args->mp, args->tp,
1642 args->agno, fbno, 0);
1643 xfs_trans_binval(args->tp, bp);
1644 }
1645 args->len = 1;
1646 args->agbno = fbno;
c29aad41 1647 XFS_WANT_CORRUPTED_GOTO(args->mp,
1da177e4 1648 args->agbno + args->len <=
16259e7d 1649 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1da177e4
LT
1650 error0);
1651 args->wasfromfl = 1;
0b1b213f 1652 trace_xfs_alloc_small_freelist(args);
a03f1a66
DW
1653
1654 /*
1655 * If we're feeding an AGFL block to something that
1656 * doesn't live in the free space, we need to clear
3fd129b6
DW
1657 * out the OWN_AG rmap and add the block back to
1658 * the AGFL per-AG reservation.
a03f1a66
DW
1659 */
1660 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
1661 error = xfs_rmap_free(args->tp, args->agbp, args->agno,
1662 fbno, 1, &oinfo);
1663 if (error)
1664 goto error0;
3fd129b6
DW
1665 pag = xfs_perag_get(args->mp, args->agno);
1666 xfs_ag_resv_free_extent(pag, XFS_AG_RESV_AGFL,
1667 args->tp, 1);
1668 xfs_perag_put(pag);
a03f1a66 1669
1da177e4
LT
1670 *stat = 0;
1671 return 0;
1672 }
1673 /*
1674 * Nothing in the freelist.
1675 */
1676 else
1677 flen = 0;
1678 }
1679 /*
1680 * Can't allocate from the freelist for some reason.
1681 */
d432c80e
NS
1682 else {
1683 fbno = NULLAGBLOCK;
1da177e4 1684 flen = 0;
d432c80e 1685 }
1da177e4
LT
1686 /*
1687 * Can't do the allocation, give up.
1688 */
1689 if (flen < args->minlen) {
1690 args->agbno = NULLAGBLOCK;
0b1b213f 1691 trace_xfs_alloc_small_notenough(args);
1da177e4
LT
1692 flen = 0;
1693 }
1694 *fbnop = fbno;
1695 *flenp = flen;
1696 *stat = 1;
0b1b213f 1697 trace_xfs_alloc_small_done(args);
1da177e4
LT
1698 return 0;
1699
1700error0:
0b1b213f 1701 trace_xfs_alloc_small_error(args);
1da177e4
LT
1702 return error;
1703}
1704
1705/*
1706 * Free the extent starting at agno/bno for length.
1707 */
340785cc 1708STATIC int
1da177e4 1709xfs_free_ag_extent(
340785cc
DW
1710 xfs_trans_t *tp,
1711 xfs_buf_t *agbp,
1712 xfs_agnumber_t agno,
1713 xfs_agblock_t bno,
1714 xfs_extlen_t len,
1715 struct xfs_owner_info *oinfo,
3fd129b6 1716 enum xfs_ag_resv_type type)
1da177e4
LT
1717{
1718 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1719 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1720 int error; /* error return value */
1da177e4
LT
1721 xfs_agblock_t gtbno; /* start of right neighbor block */
1722 xfs_extlen_t gtlen; /* length of right neighbor block */
1723 int haveleft; /* have a left neighbor block */
1724 int haveright; /* have a right neighbor block */
1725 int i; /* temp, result code */
1726 xfs_agblock_t ltbno; /* start of left neighbor block */
1727 xfs_extlen_t ltlen; /* length of left neighbor block */
1728 xfs_mount_t *mp; /* mount point struct for filesystem */
1729 xfs_agblock_t nbno; /* new starting block of freespace */
1730 xfs_extlen_t nlen; /* new length of freespace */
ecb6928f 1731 xfs_perag_t *pag; /* per allocation group data */
1da177e4 1732
673930c3 1733 bno_cur = cnt_cur = NULL;
1da177e4 1734 mp = tp->t_mountp;
673930c3
DW
1735
1736 if (oinfo->oi_owner != XFS_RMAP_OWN_UNKNOWN) {
1737 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1738 if (error)
1739 goto error0;
1740 }
1741
1da177e4
LT
1742 /*
1743 * Allocate and initialize a cursor for the by-block btree.
1744 */
561f7d17 1745 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1da177e4
LT
1746 /*
1747 * Look for a neighboring block on the left (lower block numbers)
1748 * that is contiguous with this space.
1749 */
1750 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1751 goto error0;
1752 if (haveleft) {
1753 /*
1754 * There is a block to our left.
1755 */
1756 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1757 goto error0;
c29aad41 1758 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1759 /*
1760 * It's not contiguous, though.
1761 */
1762 if (ltbno + ltlen < bno)
1763 haveleft = 0;
1764 else {
1765 /*
1766 * If this failure happens the request to free this
1767 * space was invalid, it's (partly) already free.
1768 * Very bad.
1769 */
c29aad41
ES
1770 XFS_WANT_CORRUPTED_GOTO(mp,
1771 ltbno + ltlen <= bno, error0);
1da177e4
LT
1772 }
1773 }
1774 /*
1775 * Look for a neighboring block on the right (higher block numbers)
1776 * that is contiguous with this space.
1777 */
637aa50f 1778 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1da177e4
LT
1779 goto error0;
1780 if (haveright) {
1781 /*
1782 * There is a block to our right.
1783 */
1784 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1785 goto error0;
c29aad41 1786 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1787 /*
1788 * It's not contiguous, though.
1789 */
1790 if (bno + len < gtbno)
1791 haveright = 0;
1792 else {
1793 /*
1794 * If this failure happens the request to free this
1795 * space was invalid, it's (partly) already free.
1796 * Very bad.
1797 */
c29aad41 1798 XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1da177e4
LT
1799 }
1800 }
1801 /*
1802 * Now allocate and initialize a cursor for the by-size tree.
1803 */
561f7d17 1804 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1da177e4
LT
1805 /*
1806 * Have both left and right contiguous neighbors.
1807 * Merge all three into a single free block.
1808 */
1809 if (haveleft && haveright) {
1810 /*
1811 * Delete the old by-size entry on the left.
1812 */
1813 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1814 goto error0;
c29aad41 1815 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
91cca5df 1816 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4 1817 goto error0;
c29aad41 1818 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1819 /*
1820 * Delete the old by-size entry on the right.
1821 */
1822 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1823 goto error0;
c29aad41 1824 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
91cca5df 1825 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4 1826 goto error0;
c29aad41 1827 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1828 /*
1829 * Delete the old by-block entry for the right block.
1830 */
91cca5df 1831 if ((error = xfs_btree_delete(bno_cur, &i)))
1da177e4 1832 goto error0;
c29aad41 1833 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1834 /*
1835 * Move the by-block cursor back to the left neighbor.
1836 */
8df4da4a 1837 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1da177e4 1838 goto error0;
c29aad41 1839 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1840#ifdef DEBUG
1841 /*
1842 * Check that this is the right record: delete didn't
1843 * mangle the cursor.
1844 */
1845 {
1846 xfs_agblock_t xxbno;
1847 xfs_extlen_t xxlen;
1848
1849 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1850 &i)))
1851 goto error0;
c29aad41 1852 XFS_WANT_CORRUPTED_GOTO(mp,
1da177e4
LT
1853 i == 1 && xxbno == ltbno && xxlen == ltlen,
1854 error0);
1855 }
1856#endif
1857 /*
1858 * Update remaining by-block entry to the new, joined block.
1859 */
1860 nbno = ltbno;
1861 nlen = len + ltlen + gtlen;
1862 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1863 goto error0;
1864 }
1865 /*
1866 * Have only a left contiguous neighbor.
1867 * Merge it together with the new freespace.
1868 */
1869 else if (haveleft) {
1870 /*
1871 * Delete the old by-size entry on the left.
1872 */
1873 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1874 goto error0;
c29aad41 1875 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
91cca5df 1876 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4 1877 goto error0;
c29aad41 1878 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1879 /*
1880 * Back up the by-block cursor to the left neighbor, and
1881 * update its length.
1882 */
8df4da4a 1883 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1da177e4 1884 goto error0;
c29aad41 1885 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1886 nbno = ltbno;
1887 nlen = len + ltlen;
1888 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1889 goto error0;
1890 }
1891 /*
1892 * Have only a right contiguous neighbor.
1893 * Merge it together with the new freespace.
1894 */
1895 else if (haveright) {
1896 /*
1897 * Delete the old by-size entry on the right.
1898 */
1899 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1900 goto error0;
c29aad41 1901 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
91cca5df 1902 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4 1903 goto error0;
c29aad41 1904 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1905 /*
1906 * Update the starting block and length of the right
1907 * neighbor in the by-block tree.
1908 */
1909 nbno = bno;
1910 nlen = len + gtlen;
1911 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1912 goto error0;
1913 }
1914 /*
1915 * No contiguous neighbors.
1916 * Insert the new freespace into the by-block tree.
1917 */
1918 else {
1919 nbno = bno;
1920 nlen = len;
4b22a571 1921 if ((error = xfs_btree_insert(bno_cur, &i)))
1da177e4 1922 goto error0;
c29aad41 1923 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1924 }
1925 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1926 bno_cur = NULL;
1927 /*
1928 * In all cases we need to insert the new freespace in the by-size tree.
1929 */
1930 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1931 goto error0;
c29aad41 1932 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
4b22a571 1933 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4 1934 goto error0;
c29aad41 1935 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1936 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1937 cnt_cur = NULL;
ecb6928f 1938
1da177e4
LT
1939 /*
1940 * Update the freespace totals in the ag and superblock.
1941 */
ecb6928f
CH
1942 pag = xfs_perag_get(mp, agno);
1943 error = xfs_alloc_update_counters(tp, pag, agbp, len);
3fd129b6 1944 xfs_ag_resv_free_extent(pag, type, tp, len);
ecb6928f
CH
1945 xfs_perag_put(pag);
1946 if (error)
1947 goto error0;
1948
ff6d6af2
BD
1949 XFS_STATS_INC(mp, xs_freex);
1950 XFS_STATS_ADD(mp, xs_freeb, len);
0b1b213f 1951
3fd129b6
DW
1952 trace_xfs_free_extent(mp, agno, bno, len, type == XFS_AG_RESV_AGFL,
1953 haveleft, haveright);
1da177e4 1954
1da177e4
LT
1955 return 0;
1956
1957 error0:
3fd129b6
DW
1958 trace_xfs_free_extent(mp, agno, bno, len, type == XFS_AG_RESV_AGFL,
1959 -1, -1);
1da177e4
LT
1960 if (bno_cur)
1961 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1962 if (cnt_cur)
1963 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1964 return error;
1965}
1966
1967/*
1968 * Visible (exported) allocation/free functions.
1969 * Some of these are used just by xfs_alloc_btree.c and this file.
1970 */
1971
1972/*
1973 * Compute and fill in value of m_ag_maxlevels.
1974 */
1975void
1976xfs_alloc_compute_maxlevels(
1977 xfs_mount_t *mp) /* file system mount structure */
1978{
19b54ee6
DW
1979 mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_alloc_mnr,
1980 (mp->m_sb.sb_agblocks + 1) / 2);
1da177e4
LT
1981}
1982
6cc87645 1983/*
3fd129b6
DW
1984 * Find the length of the longest extent in an AG. The 'need' parameter
1985 * specifies how much space we're going to need for the AGFL and the
1986 * 'reserved' parameter tells us how many blocks in this AG are reserved for
1987 * other callers.
6cc87645
DC
1988 */
1989xfs_extlen_t
1990xfs_alloc_longest_free_extent(
1991 struct xfs_mount *mp,
50adbcb4 1992 struct xfs_perag *pag,
3fd129b6
DW
1993 xfs_extlen_t need,
1994 xfs_extlen_t reserved)
6cc87645 1995{
50adbcb4 1996 xfs_extlen_t delta = 0;
6cc87645 1997
3fd129b6
DW
1998 /*
1999 * If the AGFL needs a recharge, we'll have to subtract that from the
2000 * longest extent.
2001 */
6cc87645
DC
2002 if (need > pag->pagf_flcount)
2003 delta = need - pag->pagf_flcount;
2004
3fd129b6
DW
2005 /*
2006 * If we cannot maintain others' reservations with space from the
2007 * not-longest freesp extents, we'll have to subtract /that/ from
2008 * the longest extent too.
2009 */
2010 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2011 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2012
2013 /*
2014 * If the longest extent is long enough to satisfy all the
2015 * reservations and AGFL rules in place, we can return this extent.
2016 */
6cc87645
DC
2017 if (pag->pagf_longest > delta)
2018 return pag->pagf_longest - delta;
3fd129b6
DW
2019
2020 /* Otherwise, let the caller try for 1 block if there's space. */
6cc87645
DC
2021 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2022}
2023
496817b4
DC
2024unsigned int
2025xfs_alloc_min_freelist(
2026 struct xfs_mount *mp,
2027 struct xfs_perag *pag)
2028{
2029 unsigned int min_free;
2030
2031 /* space needed by-bno freespace btree */
2032 min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
2033 mp->m_ag_maxlevels);
2034 /* space needed by-size freespace btree */
2035 min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
2036 mp->m_ag_maxlevels);
52548852
DW
2037 /* space needed reverse mapping used space btree */
2038 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2039 min_free += min_t(unsigned int,
2040 pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
2041 mp->m_rmap_maxlevels);
496817b4
DC
2042
2043 return min_free;
2044}
2045
72d55285
DC
2046/*
2047 * Check if the operation we are fixing up the freelist for should go ahead or
2048 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2049 * is dependent on whether the size and shape of free space available will
2050 * permit the requested allocation to take place.
2051 */
2052static bool
2053xfs_alloc_space_available(
2054 struct xfs_alloc_arg *args,
2055 xfs_extlen_t min_free,
2056 int flags)
2057{
2058 struct xfs_perag *pag = args->pag;
2059 xfs_extlen_t longest;
3fd129b6 2060 xfs_extlen_t reservation; /* blocks that are still reserved */
72d55285
DC
2061 int available;
2062
2063 if (flags & XFS_ALLOC_FLAG_FREEING)
2064 return true;
2065
3fd129b6
DW
2066 reservation = xfs_ag_resv_needed(pag, args->resv);
2067
72d55285 2068 /* do we have enough contiguous free space for the allocation? */
3fd129b6
DW
2069 longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
2070 reservation);
72d55285
DC
2071 if ((args->minlen + args->alignment + args->minalignslop - 1) > longest)
2072 return false;
2073
3fd129b6 2074 /* do we have enough free space remaining for the allocation? */
72d55285 2075 available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
3fd129b6
DW
2076 reservation - min_free - args->total);
2077 if (available < (int)args->minleft || available <= 0)
72d55285
DC
2078 return false;
2079
2080 return true;
2081}
2082
1da177e4
LT
2083/*
2084 * Decide whether to use this allocation group for this allocation.
2085 * If so, fix up the btree freelist's size.
2086 */
2e9101da 2087int /* error */
1da177e4 2088xfs_alloc_fix_freelist(
396503fc
DC
2089 struct xfs_alloc_arg *args, /* allocation argument structure */
2090 int flags) /* XFS_ALLOC_FLAG_... */
1da177e4 2091{
396503fc
DC
2092 struct xfs_mount *mp = args->mp;
2093 struct xfs_perag *pag = args->pag;
2094 struct xfs_trans *tp = args->tp;
2095 struct xfs_buf *agbp = NULL;
2096 struct xfs_buf *agflbp = NULL;
2097 struct xfs_alloc_arg targs; /* local allocation arguments */
2098 xfs_agblock_t bno; /* freelist block */
2099 xfs_extlen_t need; /* total blocks needed in freelist */
c184f855 2100 int error = 0;
396503fc 2101
1da177e4 2102 if (!pag->pagf_init) {
396503fc
DC
2103 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2104 if (error)
2105 goto out_no_agbp;
1da177e4 2106 if (!pag->pagf_init) {
0e1edbd9
NS
2107 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2108 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
396503fc 2109 goto out_agbp_relse;
1da177e4 2110 }
396503fc 2111 }
1da177e4 2112
0e1edbd9 2113 /*
396503fc
DC
2114 * If this is a metadata preferred pag and we are user data then try
2115 * somewhere else if we are not being asked to try harder at this
2116 * point
1da177e4 2117 */
292378ed 2118 if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
0e1edbd9
NS
2119 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2120 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
396503fc 2121 goto out_agbp_relse;
1da177e4
LT
2122 }
2123
496817b4 2124 need = xfs_alloc_min_freelist(mp, pag);
396503fc
DC
2125 if (!xfs_alloc_space_available(args, need, flags))
2126 goto out_agbp_relse;
0e1edbd9 2127
1da177e4
LT
2128 /*
2129 * Get the a.g. freespace buffer.
2130 * Can fail if we're not blocking on locks, and it's held.
2131 */
396503fc
DC
2132 if (!agbp) {
2133 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2134 if (error)
2135 goto out_no_agbp;
2136 if (!agbp) {
0e1edbd9
NS
2137 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2138 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
396503fc 2139 goto out_no_agbp;
0e1edbd9 2140 }
1da177e4 2141 }
50adbcb4 2142
50adbcb4 2143 /* If there isn't enough total space or single-extent, reject it. */
496817b4 2144 need = xfs_alloc_min_freelist(mp, pag);
396503fc
DC
2145 if (!xfs_alloc_space_available(args, need, flags))
2146 goto out_agbp_relse;
72d55285 2147
1da177e4
LT
2148 /*
2149 * Make the freelist shorter if it's too long.
50adbcb4 2150 *
396503fc
DC
2151 * Note that from this point onwards, we will always release the agf and
2152 * agfl buffers on error. This handles the case where we error out and
2153 * the buffers are clean or may not have been joined to the transaction
2154 * and hence need to be released manually. If they have been joined to
2155 * the transaction, then xfs_trans_brelse() will handle them
2156 * appropriately based on the recursion count and dirty state of the
2157 * buffer.
2158 *
50adbcb4
DC
2159 * XXX (dgc): When we have lots of free space, does this buy us
2160 * anything other than extra overhead when we need to put more blocks
2161 * back on the free list? Maybe we should only do this when space is
2162 * getting low or the AGFL is more than half full?
04f13060
DW
2163 *
2164 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2165 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2166 * updating the rmapbt. Both flags are used in xfs_repair while we're
2167 * rebuilding the rmapbt, and neither are used by the kernel. They're
2168 * both required to ensure that rmaps are correctly recorded for the
2169 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2170 * repair/rmap.c in xfsprogs for details.
1da177e4 2171 */
04f13060
DW
2172 memset(&targs, 0, sizeof(targs));
2173 if (flags & XFS_ALLOC_FLAG_NORMAP)
2174 xfs_rmap_skip_owner_update(&targs.oinfo);
2175 else
2176 xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
2177 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
50adbcb4 2178 struct xfs_buf *bp;
1da177e4 2179
92821e2b
DC
2180 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2181 if (error)
396503fc 2182 goto out_agbp_relse;
340785cc 2183 error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1,
3fd129b6 2184 &targs.oinfo, XFS_AG_RESV_AGFL);
50adbcb4 2185 if (error)
396503fc 2186 goto out_agbp_relse;
1da177e4
LT
2187 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
2188 xfs_trans_binval(tp, bp);
2189 }
50adbcb4 2190
1da177e4
LT
2191 targs.tp = tp;
2192 targs.mp = mp;
2193 targs.agbp = agbp;
2194 targs.agno = args->agno;
3fd129b6 2195 targs.alignment = targs.minlen = targs.prod = 1;
1da177e4
LT
2196 targs.type = XFS_ALLOCTYPE_THIS_AG;
2197 targs.pag = pag;
50adbcb4
DC
2198 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2199 if (error)
396503fc 2200 goto out_agbp_relse;
50adbcb4
DC
2201
2202 /* Make the freelist longer if it's too short. */
2203 while (pag->pagf_flcount < need) {
1da177e4 2204 targs.agbno = 0;
50adbcb4 2205 targs.maxlen = need - pag->pagf_flcount;
3fd129b6 2206 targs.resv = XFS_AG_RESV_AGFL;
50adbcb4
DC
2207
2208 /* Allocate as many blocks as possible at once. */
2209 error = xfs_alloc_ag_vextent(&targs);
396503fc
DC
2210 if (error)
2211 goto out_agflbp_relse;
2212
1da177e4
LT
2213 /*
2214 * Stop if we run out. Won't happen if callers are obeying
2215 * the restrictions correctly. Can happen for free calls
2216 * on a completely full ag.
2217 */
d210a28c 2218 if (targs.agbno == NULLAGBLOCK) {
0e1edbd9
NS
2219 if (flags & XFS_ALLOC_FLAG_FREEING)
2220 break;
396503fc 2221 goto out_agflbp_relse;
d210a28c 2222 }
1da177e4
LT
2223 /*
2224 * Put each allocated block on the list.
2225 */
2226 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
92821e2b
DC
2227 error = xfs_alloc_put_freelist(tp, agbp,
2228 agflbp, bno, 0);
2229 if (error)
396503fc 2230 goto out_agflbp_relse;
1da177e4
LT
2231 }
2232 }
e63a3690 2233 xfs_trans_brelse(tp, agflbp);
1da177e4
LT
2234 args->agbp = agbp;
2235 return 0;
396503fc
DC
2236
2237out_agflbp_relse:
2238 xfs_trans_brelse(tp, agflbp);
2239out_agbp_relse:
2240 if (agbp)
2241 xfs_trans_brelse(tp, agbp);
2242out_no_agbp:
2243 args->agbp = NULL;
2244 return error;
1da177e4
LT
2245}
2246
2247/*
2248 * Get a block from the freelist.
2249 * Returns with the buffer for the block gotten.
2250 */
2251int /* error */
2252xfs_alloc_get_freelist(
2253 xfs_trans_t *tp, /* transaction pointer */
2254 xfs_buf_t *agbp, /* buffer containing the agf structure */
92821e2b
DC
2255 xfs_agblock_t *bnop, /* block address retrieved from freelist */
2256 int btreeblk) /* destination is a AGF btree */
1da177e4
LT
2257{
2258 xfs_agf_t *agf; /* a.g. freespace structure */
1da177e4
LT
2259 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
2260 xfs_agblock_t bno; /* block number returned */
77c95bba 2261 __be32 *agfl_bno;
1da177e4 2262 int error;
92821e2b 2263 int logflags;
77c95bba 2264 xfs_mount_t *mp = tp->t_mountp;
1da177e4
LT
2265 xfs_perag_t *pag; /* per allocation group data */
2266
1da177e4
LT
2267 /*
2268 * Freelist is empty, give up.
2269 */
77c95bba 2270 agf = XFS_BUF_TO_AGF(agbp);
1da177e4
LT
2271 if (!agf->agf_flcount) {
2272 *bnop = NULLAGBLOCK;
2273 return 0;
2274 }
2275 /*
2276 * Read the array of free blocks.
2277 */
77c95bba
CH
2278 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2279 &agflbp);
2280 if (error)
1da177e4 2281 return error;
77c95bba
CH
2282
2283
1da177e4
LT
2284 /*
2285 * Get the block number and update the data structures.
2286 */
77c95bba
CH
2287 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2288 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
413d57c9 2289 be32_add_cpu(&agf->agf_flfirst, 1);
1da177e4 2290 xfs_trans_brelse(tp, agflbp);
16259e7d 2291 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
1da177e4 2292 agf->agf_flfirst = 0;
a862e0fd
DC
2293
2294 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
413d57c9 2295 be32_add_cpu(&agf->agf_flcount, -1);
1da177e4
LT
2296 xfs_trans_agflist_delta(tp, -1);
2297 pag->pagf_flcount--;
a862e0fd 2298 xfs_perag_put(pag);
92821e2b
DC
2299
2300 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2301 if (btreeblk) {
413d57c9 2302 be32_add_cpu(&agf->agf_btreeblks, 1);
92821e2b
DC
2303 pag->pagf_btreeblks++;
2304 logflags |= XFS_AGF_BTREEBLKS;
2305 }
2306
92821e2b 2307 xfs_alloc_log_agf(tp, agbp, logflags);
1da177e4
LT
2308 *bnop = bno;
2309
1da177e4
LT
2310 return 0;
2311}
2312
2313/*
2314 * Log the given fields from the agf structure.
2315 */
2316void
2317xfs_alloc_log_agf(
2318 xfs_trans_t *tp, /* transaction pointer */
2319 xfs_buf_t *bp, /* buffer for a.g. freelist header */
2320 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2321{
2322 int first; /* first byte offset */
2323 int last; /* last byte offset */
2324 static const short offsets[] = {
2325 offsetof(xfs_agf_t, agf_magicnum),
2326 offsetof(xfs_agf_t, agf_versionnum),
2327 offsetof(xfs_agf_t, agf_seqno),
2328 offsetof(xfs_agf_t, agf_length),
2329 offsetof(xfs_agf_t, agf_roots[0]),
2330 offsetof(xfs_agf_t, agf_levels[0]),
2331 offsetof(xfs_agf_t, agf_flfirst),
2332 offsetof(xfs_agf_t, agf_fllast),
2333 offsetof(xfs_agf_t, agf_flcount),
2334 offsetof(xfs_agf_t, agf_freeblks),
2335 offsetof(xfs_agf_t, agf_longest),
92821e2b 2336 offsetof(xfs_agf_t, agf_btreeblks),
4e0e6040 2337 offsetof(xfs_agf_t, agf_uuid),
f32866fd 2338 offsetof(xfs_agf_t, agf_rmap_blocks),
bdf28630
DW
2339 offsetof(xfs_agf_t, agf_refcount_blocks),
2340 offsetof(xfs_agf_t, agf_refcount_root),
2341 offsetof(xfs_agf_t, agf_refcount_level),
da1f039d
DW
2342 /* needed so that we don't log the whole rest of the structure: */
2343 offsetof(xfs_agf_t, agf_spare64),
1da177e4
LT
2344 sizeof(xfs_agf_t)
2345 };
2346
0b1b213f
CH
2347 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2348
61fe135c 2349 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
4e0e6040 2350
1da177e4
LT
2351 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2352 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2353}
2354
2355/*
2356 * Interface for inode allocation to force the pag data to be initialized.
2357 */
2358int /* error */
2359xfs_alloc_pagf_init(
2360 xfs_mount_t *mp, /* file system mount structure */
2361 xfs_trans_t *tp, /* transaction pointer */
2362 xfs_agnumber_t agno, /* allocation group number */
2363 int flags) /* XFS_ALLOC_FLAGS_... */
2364{
2365 xfs_buf_t *bp;
2366 int error;
2367
2368 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2369 return error;
2370 if (bp)
2371 xfs_trans_brelse(tp, bp);
2372 return 0;
2373}
2374
2375/*
2376 * Put the block on the freelist for the allocation group.
2377 */
2378int /* error */
2379xfs_alloc_put_freelist(
2380 xfs_trans_t *tp, /* transaction pointer */
2381 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2382 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
92821e2b
DC
2383 xfs_agblock_t bno, /* block being freed */
2384 int btreeblk) /* block came from a AGF btree */
1da177e4
LT
2385{
2386 xfs_agf_t *agf; /* a.g. freespace structure */
e2101005 2387 __be32 *blockp;/* pointer to array entry */
1da177e4 2388 int error;
92821e2b 2389 int logflags;
1da177e4
LT
2390 xfs_mount_t *mp; /* mount structure */
2391 xfs_perag_t *pag; /* per allocation group data */
77c95bba
CH
2392 __be32 *agfl_bno;
2393 int startoff;
1da177e4
LT
2394
2395 agf = XFS_BUF_TO_AGF(agbp);
2396 mp = tp->t_mountp;
2397
2398 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
16259e7d 2399 be32_to_cpu(agf->agf_seqno), &agflbp)))
1da177e4 2400 return error;
413d57c9 2401 be32_add_cpu(&agf->agf_fllast, 1);
16259e7d 2402 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
1da177e4 2403 agf->agf_fllast = 0;
a862e0fd
DC
2404
2405 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
413d57c9 2406 be32_add_cpu(&agf->agf_flcount, 1);
1da177e4
LT
2407 xfs_trans_agflist_delta(tp, 1);
2408 pag->pagf_flcount++;
92821e2b
DC
2409
2410 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2411 if (btreeblk) {
413d57c9 2412 be32_add_cpu(&agf->agf_btreeblks, -1);
92821e2b
DC
2413 pag->pagf_btreeblks--;
2414 logflags |= XFS_AGF_BTREEBLKS;
2415 }
a862e0fd 2416 xfs_perag_put(pag);
92821e2b 2417
92821e2b
DC
2418 xfs_alloc_log_agf(tp, agbp, logflags);
2419
16259e7d 2420 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
77c95bba
CH
2421
2422 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2423 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
e2101005 2424 *blockp = cpu_to_be32(bno);
77c95bba
CH
2425 startoff = (char *)blockp - (char *)agflbp->b_addr;
2426
92821e2b 2427 xfs_alloc_log_agf(tp, agbp, logflags);
77c95bba 2428
61fe135c 2429 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
77c95bba
CH
2430 xfs_trans_log_buf(tp, agflbp, startoff,
2431 startoff + sizeof(xfs_agblock_t) - 1);
1da177e4
LT
2432 return 0;
2433}
2434
4e0e6040 2435static bool
612cfbfe 2436xfs_agf_verify(
4e0e6040 2437 struct xfs_mount *mp,
5d5f527d
DC
2438 struct xfs_buf *bp)
2439 {
4e0e6040 2440 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
5d5f527d 2441
a45086e2
BF
2442 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2443 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
4e0e6040 2444 return false;
a45086e2
BF
2445 if (!xfs_log_check_lsn(mp,
2446 be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2447 return false;
2448 }
5d5f527d 2449
4e0e6040
DC
2450 if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2451 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2452 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2453 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2454 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2455 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
2456 return false;
5d5f527d 2457
e1b05723
ES
2458 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
2459 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2460 return false;
2461
b8704944
DW
2462 if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2463 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)
2464 return false;
2465
5d5f527d
DC
2466 /*
2467 * during growfs operations, the perag is not fully initialised,
2468 * so we can't use it for any useful checking. growfs ensures we can't
2469 * use it by using uncached buffers that don't have the perag attached
2470 * so we can detect and avoid this problem.
2471 */
4e0e6040
DC
2472 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2473 return false;
5d5f527d 2474
4e0e6040
DC
2475 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2476 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2477 return false;
2478
46eeb521
DW
2479 if (xfs_sb_version_hasreflink(&mp->m_sb) &&
2480 be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS)
2481 return false;
2482
4e0e6040 2483 return true;;
5d5f527d 2484
612cfbfe
DC
2485}
2486
1813dd64
DC
2487static void
2488xfs_agf_read_verify(
612cfbfe
DC
2489 struct xfs_buf *bp)
2490{
4e0e6040 2491 struct xfs_mount *mp = bp->b_target->bt_mount;
4e0e6040 2492
ce5028cf
ES
2493 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2494 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2451337d 2495 xfs_buf_ioerror(bp, -EFSBADCRC);
ce5028cf
ES
2496 else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp,
2497 XFS_ERRTAG_ALLOC_READ_AGF,
2498 XFS_RANDOM_ALLOC_READ_AGF))
2451337d 2499 xfs_buf_ioerror(bp, -EFSCORRUPTED);
ce5028cf
ES
2500
2501 if (bp->b_error)
2502 xfs_verifier_error(bp);
612cfbfe 2503}
5d5f527d 2504
b0f539de 2505static void
1813dd64 2506xfs_agf_write_verify(
612cfbfe
DC
2507 struct xfs_buf *bp)
2508{
4e0e6040
DC
2509 struct xfs_mount *mp = bp->b_target->bt_mount;
2510 struct xfs_buf_log_item *bip = bp->b_fspriv;
2511
2512 if (!xfs_agf_verify(mp, bp)) {
2451337d 2513 xfs_buf_ioerror(bp, -EFSCORRUPTED);
ce5028cf 2514 xfs_verifier_error(bp);
4e0e6040
DC
2515 return;
2516 }
2517
2518 if (!xfs_sb_version_hascrc(&mp->m_sb))
2519 return;
2520
2521 if (bip)
2522 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2523
f1dbcd7e 2524 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
5d5f527d
DC
2525}
2526
1813dd64 2527const struct xfs_buf_ops xfs_agf_buf_ops = {
233135b7 2528 .name = "xfs_agf",
1813dd64
DC
2529 .verify_read = xfs_agf_read_verify,
2530 .verify_write = xfs_agf_write_verify,
2531};
2532
1da177e4
LT
2533/*
2534 * Read in the allocation group header (free/alloc section).
2535 */
2536int /* error */
4805621a
CH
2537xfs_read_agf(
2538 struct xfs_mount *mp, /* mount point structure */
2539 struct xfs_trans *tp, /* transaction pointer */
2540 xfs_agnumber_t agno, /* allocation group number */
2541 int flags, /* XFS_BUF_ */
2542 struct xfs_buf **bpp) /* buffer for the ag freelist header */
1da177e4 2543{
1da177e4
LT
2544 int error;
2545
d123031a
DC
2546 trace_xfs_read_agf(mp, agno);
2547
1da177e4
LT
2548 ASSERT(agno != NULLAGNUMBER);
2549 error = xfs_trans_read_buf(
2550 mp, tp, mp->m_ddev_targp,
2551 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
1813dd64 2552 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
1da177e4
LT
2553 if (error)
2554 return error;
4805621a 2555 if (!*bpp)
1da177e4 2556 return 0;
4805621a 2557
5a52c2a5 2558 ASSERT(!(*bpp)->b_error);
38f23232 2559 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
4805621a
CH
2560 return 0;
2561}
2562
2563/*
2564 * Read in the allocation group header (free/alloc section).
2565 */
2566int /* error */
2567xfs_alloc_read_agf(
2568 struct xfs_mount *mp, /* mount point structure */
2569 struct xfs_trans *tp, /* transaction pointer */
2570 xfs_agnumber_t agno, /* allocation group number */
2571 int flags, /* XFS_ALLOC_FLAG_... */
2572 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2573{
2574 struct xfs_agf *agf; /* ag freelist header */
2575 struct xfs_perag *pag; /* per allocation group data */
2576 int error;
2577
d123031a 2578 trace_xfs_alloc_read_agf(mp, agno);
4805621a 2579
d123031a 2580 ASSERT(agno != NULLAGNUMBER);
4805621a 2581 error = xfs_read_agf(mp, tp, agno,
0cadda1c 2582 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
4805621a
CH
2583 bpp);
2584 if (error)
2585 return error;
2586 if (!*bpp)
2587 return 0;
5a52c2a5 2588 ASSERT(!(*bpp)->b_error);
4805621a
CH
2589
2590 agf = XFS_BUF_TO_AGF(*bpp);
a862e0fd 2591 pag = xfs_perag_get(mp, agno);
1da177e4 2592 if (!pag->pagf_init) {
16259e7d 2593 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
92821e2b 2594 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
16259e7d
CH
2595 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2596 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
1da177e4 2597 pag->pagf_levels[XFS_BTNUM_BNOi] =
16259e7d 2598 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
1da177e4 2599 pag->pagf_levels[XFS_BTNUM_CNTi] =
16259e7d 2600 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
b8704944
DW
2601 pag->pagf_levels[XFS_BTNUM_RMAPi] =
2602 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
46eeb521 2603 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
007c61c6 2604 spin_lock_init(&pag->pagb_lock);
e57336ff 2605 pag->pagb_count = 0;
ed3b4d6c 2606 pag->pagb_tree = RB_ROOT;
1da177e4
LT
2607 pag->pagf_init = 1;
2608 }
2609#ifdef DEBUG
2610 else if (!XFS_FORCED_SHUTDOWN(mp)) {
16259e7d 2611 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
89b28393 2612 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
16259e7d
CH
2613 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2614 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
1da177e4 2615 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
16259e7d 2616 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
1da177e4 2617 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
16259e7d 2618 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
1da177e4
LT
2619 }
2620#endif
a862e0fd 2621 xfs_perag_put(pag);
1da177e4
LT
2622 return 0;
2623}
2624
2625/*
2626 * Allocate an extent (variable-size).
2627 * Depending on the allocation type, we either look in a single allocation
2628 * group or loop over the allocation groups to find the result.
2629 */
2630int /* error */
e04426b9 2631xfs_alloc_vextent(
1da177e4
LT
2632 xfs_alloc_arg_t *args) /* allocation argument structure */
2633{
2634 xfs_agblock_t agsize; /* allocation group size */
2635 int error;
2636 int flags; /* XFS_ALLOC_FLAG_... locking flags */
1da177e4
LT
2637 xfs_extlen_t minleft;/* minimum left value, temp copy */
2638 xfs_mount_t *mp; /* mount structure pointer */
2639 xfs_agnumber_t sagno; /* starting allocation group number */
2640 xfs_alloctype_t type; /* input allocation type */
2641 int bump_rotor = 0;
2642 int no_min = 0;
2643 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2644
2645 mp = args->mp;
2646 type = args->otype = args->type;
2647 args->agbno = NULLAGBLOCK;
2648 /*
2649 * Just fix this up, for the case where the last a.g. is shorter
2650 * (or there's only one a.g.) and the caller couldn't easily figure
2651 * that out (xfs_bmap_alloc).
2652 */
2653 agsize = mp->m_sb.sb_agblocks;
2654 if (args->maxlen > agsize)
2655 args->maxlen = agsize;
2656 if (args->alignment == 0)
2657 args->alignment = 1;
2658 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2659 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2660 ASSERT(args->minlen <= args->maxlen);
2661 ASSERT(args->minlen <= agsize);
2662 ASSERT(args->mod < args->prod);
2663 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2664 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2665 args->minlen > args->maxlen || args->minlen > agsize ||
2666 args->mod >= args->prod) {
2667 args->fsbno = NULLFSBLOCK;
0b1b213f 2668 trace_xfs_alloc_vextent_badargs(args);
1da177e4
LT
2669 return 0;
2670 }
2671 minleft = args->minleft;
2672
2673 switch (type) {
2674 case XFS_ALLOCTYPE_THIS_AG:
2675 case XFS_ALLOCTYPE_NEAR_BNO:
2676 case XFS_ALLOCTYPE_THIS_BNO:
2677 /*
2678 * These three force us into a single a.g.
2679 */
2680 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
a862e0fd 2681 args->pag = xfs_perag_get(mp, args->agno);
1da177e4
LT
2682 args->minleft = 0;
2683 error = xfs_alloc_fix_freelist(args, 0);
2684 args->minleft = minleft;
2685 if (error) {
0b1b213f 2686 trace_xfs_alloc_vextent_nofix(args);
1da177e4
LT
2687 goto error0;
2688 }
2689 if (!args->agbp) {
0b1b213f 2690 trace_xfs_alloc_vextent_noagbp(args);
1da177e4
LT
2691 break;
2692 }
2693 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2694 if ((error = xfs_alloc_ag_vextent(args)))
2695 goto error0;
1da177e4
LT
2696 break;
2697 case XFS_ALLOCTYPE_START_BNO:
2698 /*
2699 * Try near allocation first, then anywhere-in-ag after
2700 * the first a.g. fails.
2701 */
292378ed 2702 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
1da177e4
LT
2703 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2704 args->fsbno = XFS_AGB_TO_FSB(mp,
2705 ((mp->m_agfrotor / rotorstep) %
2706 mp->m_sb.sb_agcount), 0);
2707 bump_rotor = 1;
2708 }
2709 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2710 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2711 /* FALLTHROUGH */
2712 case XFS_ALLOCTYPE_ANY_AG:
2713 case XFS_ALLOCTYPE_START_AG:
2714 case XFS_ALLOCTYPE_FIRST_AG:
2715 /*
2716 * Rotate through the allocation groups looking for a winner.
2717 */
2718 if (type == XFS_ALLOCTYPE_ANY_AG) {
2719 /*
2720 * Start with the last place we left off.
2721 */
2722 args->agno = sagno = (mp->m_agfrotor / rotorstep) %
2723 mp->m_sb.sb_agcount;
2724 args->type = XFS_ALLOCTYPE_THIS_AG;
2725 flags = XFS_ALLOC_FLAG_TRYLOCK;
2726 } else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2727 /*
2728 * Start with allocation group given by bno.
2729 */
2730 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2731 args->type = XFS_ALLOCTYPE_THIS_AG;
2732 sagno = 0;
2733 flags = 0;
2734 } else {
2735 if (type == XFS_ALLOCTYPE_START_AG)
2736 args->type = XFS_ALLOCTYPE_THIS_AG;
2737 /*
2738 * Start with the given allocation group.
2739 */
2740 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2741 flags = XFS_ALLOC_FLAG_TRYLOCK;
2742 }
2743 /*
2744 * Loop over allocation groups twice; first time with
2745 * trylock set, second time without.
2746 */
1da177e4 2747 for (;;) {
a862e0fd 2748 args->pag = xfs_perag_get(mp, args->agno);
1da177e4
LT
2749 if (no_min) args->minleft = 0;
2750 error = xfs_alloc_fix_freelist(args, flags);
2751 args->minleft = minleft;
2752 if (error) {
0b1b213f 2753 trace_xfs_alloc_vextent_nofix(args);
1da177e4
LT
2754 goto error0;
2755 }
2756 /*
2757 * If we get a buffer back then the allocation will fly.
2758 */
2759 if (args->agbp) {
2760 if ((error = xfs_alloc_ag_vextent(args)))
2761 goto error0;
2762 break;
2763 }
0b1b213f
CH
2764
2765 trace_xfs_alloc_vextent_loopfailed(args);
2766
1da177e4
LT
2767 /*
2768 * Didn't work, figure out the next iteration.
2769 */
2770 if (args->agno == sagno &&
2771 type == XFS_ALLOCTYPE_START_BNO)
2772 args->type = XFS_ALLOCTYPE_THIS_AG;
d210a28c
YL
2773 /*
2774 * For the first allocation, we can try any AG to get
2775 * space. However, if we already have allocated a
2776 * block, we don't want to try AGs whose number is below
2777 * sagno. Otherwise, we may end up with out-of-order
2778 * locking of AGF, which might cause deadlock.
2779 */
2780 if (++(args->agno) == mp->m_sb.sb_agcount) {
2781 if (args->firstblock != NULLFSBLOCK)
2782 args->agno = sagno;
2783 else
2784 args->agno = 0;
2785 }
1da177e4
LT
2786 /*
2787 * Reached the starting a.g., must either be done
2788 * or switch to non-trylock mode.
2789 */
2790 if (args->agno == sagno) {
2791 if (no_min == 1) {
2792 args->agbno = NULLAGBLOCK;
0b1b213f 2793 trace_xfs_alloc_vextent_allfailed(args);
1da177e4
LT
2794 break;
2795 }
2796 if (flags == 0) {
2797 no_min = 1;
2798 } else {
2799 flags = 0;
2800 if (type == XFS_ALLOCTYPE_START_BNO) {
2801 args->agbno = XFS_FSB_TO_AGBNO(mp,
2802 args->fsbno);
2803 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2804 }
2805 }
2806 }
a862e0fd 2807 xfs_perag_put(args->pag);
1da177e4 2808 }
1da177e4
LT
2809 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
2810 if (args->agno == sagno)
2811 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2812 (mp->m_sb.sb_agcount * rotorstep);
2813 else
2814 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2815 (mp->m_sb.sb_agcount * rotorstep);
2816 }
2817 break;
2818 default:
2819 ASSERT(0);
2820 /* NOTREACHED */
2821 }
2822 if (args->agbno == NULLAGBLOCK)
2823 args->fsbno = NULLFSBLOCK;
2824 else {
2825 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2826#ifdef DEBUG
2827 ASSERT(args->len >= args->minlen);
2828 ASSERT(args->len <= args->maxlen);
2829 ASSERT(args->agbno % args->alignment == 0);
2830 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2831 args->len);
2832#endif
3fbbbea3
DC
2833
2834 /* Zero the extent if we were asked to do so */
292378ed 2835 if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
3fbbbea3
DC
2836 error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2837 if (error)
2838 goto error0;
2839 }
2840
1da177e4 2841 }
a862e0fd 2842 xfs_perag_put(args->pag);
1da177e4
LT
2843 return 0;
2844error0:
a862e0fd 2845 xfs_perag_put(args->pag);
1da177e4
LT
2846 return error;
2847}
2848
4d89e20b
DC
2849/* Ensure that the freelist is at full capacity. */
2850int
2851xfs_free_extent_fix_freelist(
2852 struct xfs_trans *tp,
2853 xfs_agnumber_t agno,
2854 struct xfs_buf **agbp)
1da177e4 2855{
4d89e20b
DC
2856 struct xfs_alloc_arg args;
2857 int error;
1da177e4 2858
4d89e20b 2859 memset(&args, 0, sizeof(struct xfs_alloc_arg));
1da177e4
LT
2860 args.tp = tp;
2861 args.mp = tp->t_mountp;
4d89e20b 2862 args.agno = agno;
be65b18a
DC
2863
2864 /*
2865 * validate that the block number is legal - the enables us to detect
2866 * and handle a silent filesystem corruption rather than crashing.
2867 */
be65b18a 2868 if (args.agno >= args.mp->m_sb.sb_agcount)
2451337d 2869 return -EFSCORRUPTED;
be65b18a 2870
a862e0fd 2871 args.pag = xfs_perag_get(args.mp, args.agno);
be65b18a
DC
2872 ASSERT(args.pag);
2873
2874 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2875 if (error)
4d89e20b
DC
2876 goto out;
2877
2878 *agbp = args.agbp;
2879out:
2880 xfs_perag_put(args.pag);
2881 return error;
2882}
2883
2884/*
2885 * Free an extent.
2886 * Just break up the extent address and hand off to xfs_free_ag_extent
2887 * after fixing up the freelist.
2888 */
2889int /* error */
2890xfs_free_extent(
2891 struct xfs_trans *tp, /* transaction pointer */
2892 xfs_fsblock_t bno, /* starting block number of extent */
340785cc 2893 xfs_extlen_t len, /* length of extent */
3fd129b6
DW
2894 struct xfs_owner_info *oinfo, /* extent owner */
2895 enum xfs_ag_resv_type type) /* block reservation type */
4d89e20b
DC
2896{
2897 struct xfs_mount *mp = tp->t_mountp;
2898 struct xfs_buf *agbp;
2899 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
2900 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
2901 int error;
2902
2903 ASSERT(len != 0);
3fd129b6 2904 ASSERT(type != XFS_AG_RESV_AGFL);
4d89e20b 2905
ba9e7802
DW
2906 if (XFS_TEST_ERROR(false, mp,
2907 XFS_ERRTAG_FREE_EXTENT,
2908 XFS_RANDOM_FREE_EXTENT))
2909 return -EIO;
2910
4d89e20b
DC
2911 error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
2912 if (error)
2913 return error;
2914
2915 XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
be65b18a
DC
2916
2917 /* validate the extent size is legal now we have the agf locked */
4d89e20b
DC
2918 XFS_WANT_CORRUPTED_GOTO(mp,
2919 agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
2920 err);
be65b18a 2921
3fd129b6 2922 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
4d89e20b
DC
2923 if (error)
2924 goto err;
2925
2926 xfs_extent_busy_insert(tp, agno, agbno, len, 0);
2927 return 0;
2928
2929err:
2930 xfs_trans_brelse(tp, agbp);
1da177e4
LT
2931 return error;
2932}