]> git.ipfire.org Git - people/ms/linux.git/blame - fs/xfs/xfs_extent_busy.c
Merge tag 'soc-fixes-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[people/ms/linux.git] / fs / xfs / xfs_extent_busy.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
efc27b52
DC
2/*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2010 David Chinner.
5 * Copyright (c) 2011 Christoph Hellwig.
6 * All Rights Reserved.
efc27b52
DC
7 */
8#include "xfs.h"
9#include "xfs_fs.h"
70a9883c 10#include "xfs_format.h"
239880ef 11#include "xfs_log_format.h"
70a9883c 12#include "xfs_shared.h"
239880ef 13#include "xfs_trans_resv.h"
efc27b52 14#include "xfs_mount.h"
efc27b52 15#include "xfs_alloc.h"
efc27b52
DC
16#include "xfs_extent_busy.h"
17#include "xfs_trace.h"
239880ef
DC
18#include "xfs_trans.h"
19#include "xfs_log.h"
9bbafc71 20#include "xfs_ag.h"
efc27b52
DC
21
22void
4ecbfe63 23xfs_extent_busy_insert(
efc27b52 24 struct xfs_trans *tp,
45d06621 25 struct xfs_perag *pag,
efc27b52
DC
26 xfs_agblock_t bno,
27 xfs_extlen_t len,
28 unsigned int flags)
29{
4ecbfe63
DC
30 struct xfs_extent_busy *new;
31 struct xfs_extent_busy *busyp;
efc27b52
DC
32 struct rb_node **rbp;
33 struct rb_node *parent = NULL;
34
707e0dda 35 new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
45d06621 36 new->agno = pag->pag_agno;
efc27b52
DC
37 new->bno = bno;
38 new->length = len;
39 INIT_LIST_HEAD(&new->list);
40 new->flags = flags;
41
42 /* trace before insert to be able to see failed inserts */
45d06621 43 trace_xfs_extent_busy(tp->t_mountp, pag->pag_agno, bno, len);
efc27b52 44
efc27b52
DC
45 spin_lock(&pag->pagb_lock);
46 rbp = &pag->pagb_tree.rb_node;
47 while (*rbp) {
48 parent = *rbp;
4ecbfe63 49 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
efc27b52
DC
50
51 if (new->bno < busyp->bno) {
52 rbp = &(*rbp)->rb_left;
53 ASSERT(new->bno + new->length <= busyp->bno);
54 } else if (new->bno > busyp->bno) {
55 rbp = &(*rbp)->rb_right;
56 ASSERT(bno >= busyp->bno + busyp->length);
57 } else {
58 ASSERT(0);
59 }
60 }
61
62 rb_link_node(&new->rb_node, parent, rbp);
63 rb_insert_color(&new->rb_node, &pag->pagb_tree);
64
65 list_add(&new->list, &tp->t_busy);
66 spin_unlock(&pag->pagb_lock);
efc27b52
DC
67}
68
69/*
70 * Search for a busy extent within the range of the extent we are about to
71 * allocate. You need to be holding the busy extent tree lock when calling
4ecbfe63 72 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
efc27b52
DC
73 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
74 * match. This is done so that a non-zero return indicates an overlap that
75 * will require a synchronous transaction, but it can still be
76 * used to distinguish between a partial or exact match.
77 */
78int
4ecbfe63 79xfs_extent_busy_search(
efc27b52 80 struct xfs_mount *mp,
45d06621 81 struct xfs_perag *pag,
efc27b52
DC
82 xfs_agblock_t bno,
83 xfs_extlen_t len)
84{
efc27b52 85 struct rb_node *rbp;
4ecbfe63 86 struct xfs_extent_busy *busyp;
efc27b52
DC
87 int match = 0;
88
45d06621 89 /* find closest start bno overlap */
efc27b52 90 spin_lock(&pag->pagb_lock);
efc27b52 91 rbp = pag->pagb_tree.rb_node;
efc27b52 92 while (rbp) {
4ecbfe63 93 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
efc27b52
DC
94 if (bno < busyp->bno) {
95 /* may overlap, but exact start block is lower */
96 if (bno + len > busyp->bno)
97 match = -1;
98 rbp = rbp->rb_left;
99 } else if (bno > busyp->bno) {
100 /* may overlap, but exact start block is higher */
101 if (bno < busyp->bno + busyp->length)
102 match = -1;
103 rbp = rbp->rb_right;
104 } else {
105 /* bno matches busyp, length determines exact match */
106 match = (busyp->length == len) ? 1 : -1;
107 break;
108 }
109 }
110 spin_unlock(&pag->pagb_lock);
efc27b52
DC
111 return match;
112}
113
114/*
115 * The found free extent [fbno, fend] overlaps part or all of the given busy
116 * extent. If the overlap covers the beginning, the end, or all of the busy
117 * extent, the overlapping portion can be made unbusy and used for the
118 * allocation. We can't split a busy extent because we can't modify a
b3c49634 119 * transaction/CIL context busy list, but we can update an entry's block
efc27b52
DC
120 * number or length.
121 *
122 * Returns true if the extent can safely be reused, or false if the search
123 * needs to be restarted.
124 */
125STATIC bool
4ecbfe63 126xfs_extent_busy_update_extent(
efc27b52
DC
127 struct xfs_mount *mp,
128 struct xfs_perag *pag,
4ecbfe63 129 struct xfs_extent_busy *busyp,
efc27b52
DC
130 xfs_agblock_t fbno,
131 xfs_extlen_t flen,
a30b0367
DC
132 bool userdata) __releases(&pag->pagb_lock)
133 __acquires(&pag->pagb_lock)
efc27b52
DC
134{
135 xfs_agblock_t fend = fbno + flen;
136 xfs_agblock_t bbno = busyp->bno;
137 xfs_agblock_t bend = bbno + busyp->length;
138
139 /*
140 * This extent is currently being discarded. Give the thread
141 * performing the discard a chance to mark the extent unbusy
142 * and retry.
143 */
4ecbfe63 144 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
efc27b52
DC
145 spin_unlock(&pag->pagb_lock);
146 delay(1);
147 spin_lock(&pag->pagb_lock);
148 return false;
149 }
150
151 /*
152 * If there is a busy extent overlapping a user allocation, we have
153 * no choice but to force the log and retry the search.
154 *
155 * Fortunately this does not happen during normal operation, but
156 * only if the filesystem is very low on space and has to dip into
157 * the AGFL for normal allocations.
158 */
159 if (userdata)
160 goto out_force_log;
161
162 if (bbno < fbno && bend > fend) {
163 /*
164 * Case 1:
165 * bbno bend
166 * +BBBBBBBBBBBBBBBBB+
167 * +---------+
168 * fbno fend
169 */
170
171 /*
172 * We would have to split the busy extent to be able to track
173 * it correct, which we cannot do because we would have to
174 * modify the list of busy extents attached to the transaction
175 * or CIL context, which is immutable.
176 *
177 * Force out the log to clear the busy extent and retry the
178 * search.
179 */
180 goto out_force_log;
181 } else if (bbno >= fbno && bend <= fend) {
182 /*
183 * Case 2:
184 * bbno bend
185 * +BBBBBBBBBBBBBBBBB+
186 * +-----------------+
187 * fbno fend
188 *
189 * Case 3:
190 * bbno bend
191 * +BBBBBBBBBBBBBBBBB+
192 * +--------------------------+
193 * fbno fend
194 *
195 * Case 4:
196 * bbno bend
197 * +BBBBBBBBBBBBBBBBB+
198 * +--------------------------+
199 * fbno fend
200 *
201 * Case 5:
202 * bbno bend
203 * +BBBBBBBBBBBBBBBBB+
204 * +-----------------------------------+
205 * fbno fend
206 *
207 */
208
209 /*
210 * The busy extent is fully covered by the extent we are
211 * allocating, and can simply be removed from the rbtree.
212 * However we cannot remove it from the immutable list
213 * tracking busy extents in the transaction or CIL context,
214 * so set the length to zero to mark it invalid.
215 *
216 * We also need to restart the busy extent search from the
217 * tree root, because erasing the node can rearrange the
218 * tree topology.
219 */
220 rb_erase(&busyp->rb_node, &pag->pagb_tree);
221 busyp->length = 0;
222 return false;
223 } else if (fend < bend) {
224 /*
225 * Case 6:
226 * bbno bend
227 * +BBBBBBBBBBBBBBBBB+
228 * +---------+
229 * fbno fend
230 *
231 * Case 7:
232 * bbno bend
233 * +BBBBBBBBBBBBBBBBB+
234 * +------------------+
235 * fbno fend
236 *
237 */
238 busyp->bno = fend;
239 } else if (bbno < fbno) {
240 /*
241 * Case 8:
242 * bbno bend
243 * +BBBBBBBBBBBBBBBBB+
244 * +-------------+
245 * fbno fend
246 *
247 * Case 9:
248 * bbno bend
249 * +BBBBBBBBBBBBBBBBB+
250 * +----------------------+
251 * fbno fend
252 */
253 busyp->length = fbno - busyp->bno;
254 } else {
255 ASSERT(0);
256 }
257
4ecbfe63 258 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
efc27b52
DC
259 return true;
260
261out_force_log:
262 spin_unlock(&pag->pagb_lock);
263 xfs_log_force(mp, XFS_LOG_SYNC);
4ecbfe63 264 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
efc27b52
DC
265 spin_lock(&pag->pagb_lock);
266 return false;
267}
268
269
270/*
271 * For a given extent [fbno, flen], make sure we can reuse it safely.
272 */
273void
4ecbfe63 274xfs_extent_busy_reuse(
efc27b52 275 struct xfs_mount *mp,
45d06621 276 struct xfs_perag *pag,
efc27b52
DC
277 xfs_agblock_t fbno,
278 xfs_extlen_t flen,
279 bool userdata)
280{
efc27b52
DC
281 struct rb_node *rbp;
282
283 ASSERT(flen > 0);
efc27b52
DC
284 spin_lock(&pag->pagb_lock);
285restart:
286 rbp = pag->pagb_tree.rb_node;
287 while (rbp) {
4ecbfe63
DC
288 struct xfs_extent_busy *busyp =
289 rb_entry(rbp, struct xfs_extent_busy, rb_node);
efc27b52
DC
290 xfs_agblock_t bbno = busyp->bno;
291 xfs_agblock_t bend = bbno + busyp->length;
292
293 if (fbno + flen <= bbno) {
294 rbp = rbp->rb_left;
295 continue;
296 } else if (fbno >= bend) {
297 rbp = rbp->rb_right;
298 continue;
299 }
300
4ecbfe63 301 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
efc27b52
DC
302 userdata))
303 goto restart;
304 }
305 spin_unlock(&pag->pagb_lock);
efc27b52
DC
306}
307
308/*
309 * For a given extent [fbno, flen], search the busy extent list to find a
310 * subset of the extent that is not busy. If *rlen is smaller than
311 * args->minlen no suitable extent could be found, and the higher level
312 * code needs to force out the log and retry the allocation.
ebf55872
CH
313 *
314 * Return the current busy generation for the AG if the extent is busy. This
315 * value can be used to wait for at least one of the currently busy extents
316 * to be cleared. Note that the busy list is not guaranteed to be empty after
317 * the gen is woken. The state of a specific extent must always be confirmed
318 * with another call to xfs_extent_busy_trim() before it can be used.
efc27b52 319 */
ebf55872 320bool
4ecbfe63 321xfs_extent_busy_trim(
efc27b52 322 struct xfs_alloc_arg *args,
ebf55872
CH
323 xfs_agblock_t *bno,
324 xfs_extlen_t *len,
325 unsigned *busy_gen)
efc27b52
DC
326{
327 xfs_agblock_t fbno;
328 xfs_extlen_t flen;
329 struct rb_node *rbp;
ebf55872 330 bool ret = false;
efc27b52 331
353fe445 332 ASSERT(*len > 0);
efc27b52
DC
333
334 spin_lock(&args->pag->pagb_lock);
ebf55872
CH
335 fbno = *bno;
336 flen = *len;
efc27b52
DC
337 rbp = args->pag->pagb_tree.rb_node;
338 while (rbp && flen >= args->minlen) {
4ecbfe63
DC
339 struct xfs_extent_busy *busyp =
340 rb_entry(rbp, struct xfs_extent_busy, rb_node);
efc27b52
DC
341 xfs_agblock_t fend = fbno + flen;
342 xfs_agblock_t bbno = busyp->bno;
343 xfs_agblock_t bend = bbno + busyp->length;
344
345 if (fend <= bbno) {
346 rbp = rbp->rb_left;
347 continue;
348 } else if (fbno >= bend) {
349 rbp = rbp->rb_right;
350 continue;
351 }
352
efc27b52
DC
353 if (bbno <= fbno) {
354 /* start overlap */
355
356 /*
357 * Case 1:
358 * bbno bend
359 * +BBBBBBBBBBBBBBBBB+
360 * +---------+
361 * fbno fend
362 *
363 * Case 2:
364 * bbno bend
365 * +BBBBBBBBBBBBBBBBB+
366 * +-------------+
367 * fbno fend
368 *
369 * Case 3:
370 * bbno bend
371 * +BBBBBBBBBBBBBBBBB+
372 * +-------------+
373 * fbno fend
374 *
375 * Case 4:
376 * bbno bend
377 * +BBBBBBBBBBBBBBBBB+
378 * +-----------------+
379 * fbno fend
380 *
381 * No unbusy region in extent, return failure.
382 */
383 if (fend <= bend)
384 goto fail;
385
386 /*
387 * Case 5:
388 * bbno bend
389 * +BBBBBBBBBBBBBBBBB+
390 * +----------------------+
391 * fbno fend
392 *
393 * Case 6:
394 * bbno bend
395 * +BBBBBBBBBBBBBBBBB+
396 * +--------------------------+
397 * fbno fend
398 *
399 * Needs to be trimmed to:
400 * +-------+
401 * fbno fend
402 */
403 fbno = bend;
404 } else if (bend >= fend) {
405 /* end overlap */
406
407 /*
408 * Case 7:
409 * bbno bend
410 * +BBBBBBBBBBBBBBBBB+
411 * +------------------+
412 * fbno fend
413 *
414 * Case 8:
415 * bbno bend
416 * +BBBBBBBBBBBBBBBBB+
417 * +--------------------------+
418 * fbno fend
419 *
420 * Needs to be trimmed to:
421 * +-------+
422 * fbno fend
423 */
424 fend = bbno;
425 } else {
426 /* middle overlap */
427
428 /*
429 * Case 9:
430 * bbno bend
431 * +BBBBBBBBBBBBBBBBB+
432 * +-----------------------------------+
433 * fbno fend
434 *
435 * Can be trimmed to:
436 * +-------+ OR +-------+
437 * fbno fend fbno fend
438 *
439 * Backward allocation leads to significant
440 * fragmentation of directories, which degrades
441 * directory performance, therefore we always want to
442 * choose the option that produces forward allocation
443 * patterns.
444 * Preferring the lower bno extent will make the next
445 * request use "fend" as the start of the next
446 * allocation; if the segment is no longer busy at
447 * that point, we'll get a contiguous allocation, but
448 * even if it is still busy, we will get a forward
449 * allocation.
450 * We try to avoid choosing the segment at "bend",
451 * because that can lead to the next allocation
452 * taking the segment at "fbno", which would be a
453 * backward allocation. We only use the segment at
454 * "fbno" if it is much larger than the current
455 * requested size, because in that case there's a
456 * good chance subsequent allocations will be
457 * contiguous.
458 */
459 if (bbno - fbno >= args->maxlen) {
460 /* left candidate fits perfect */
461 fend = bbno;
462 } else if (fend - bend >= args->maxlen * 4) {
463 /* right candidate has enough free space */
464 fbno = bend;
465 } else if (bbno - fbno >= args->minlen) {
466 /* left candidate fits minimum requirement */
467 fend = bbno;
468 } else {
469 goto fail;
470 }
471 }
472
473 flen = fend - fbno;
474 }
ebf55872 475out:
efc27b52 476
ebf55872
CH
477 if (fbno != *bno || flen != *len) {
478 trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
efc27b52 479 fbno, flen);
ebf55872
CH
480 *bno = fbno;
481 *len = flen;
482 *busy_gen = args->pag->pagb_gen;
483 ret = true;
efc27b52 484 }
ebf55872
CH
485 spin_unlock(&args->pag->pagb_lock);
486 return ret;
efc27b52
DC
487fail:
488 /*
489 * Return a zero extent length as failure indications. All callers
490 * re-check if the trimmed extent satisfies the minlen requirement.
491 */
ebf55872
CH
492 flen = 0;
493 goto out;
efc27b52
DC
494}
495
4ecbfe63
DC
496STATIC void
497xfs_extent_busy_clear_one(
efc27b52
DC
498 struct xfs_mount *mp,
499 struct xfs_perag *pag,
4ecbfe63 500 struct xfs_extent_busy *busyp)
efc27b52
DC
501{
502 if (busyp->length) {
4ecbfe63 503 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
efc27b52
DC
504 busyp->length);
505 rb_erase(&busyp->rb_node, &pag->pagb_tree);
506 }
507
508 list_del_init(&busyp->list);
509 kmem_free(busyp);
510}
511
ebf55872
CH
512static void
513xfs_extent_busy_put_pag(
514 struct xfs_perag *pag,
515 bool wakeup)
516 __releases(pag->pagb_lock)
517{
518 if (wakeup) {
519 pag->pagb_gen++;
520 wake_up_all(&pag->pagb_wait);
521 }
522
523 spin_unlock(&pag->pagb_lock);
524 xfs_perag_put(pag);
525}
526
efc27b52
DC
527/*
528 * Remove all extents on the passed in list from the busy extents tree.
529 * If do_discard is set skip extents that need to be discarded, and mark
530 * these as undergoing a discard operation instead.
531 */
532void
4ecbfe63 533xfs_extent_busy_clear(
efc27b52
DC
534 struct xfs_mount *mp,
535 struct list_head *list,
536 bool do_discard)
537{
4ecbfe63 538 struct xfs_extent_busy *busyp, *n;
efc27b52
DC
539 struct xfs_perag *pag = NULL;
540 xfs_agnumber_t agno = NULLAGNUMBER;
ebf55872 541 bool wakeup = false;
efc27b52
DC
542
543 list_for_each_entry_safe(busyp, n, list, list) {
544 if (busyp->agno != agno) {
ebf55872
CH
545 if (pag)
546 xfs_extent_busy_put_pag(pag, wakeup);
efc27b52 547 agno = busyp->agno;
ebf55872
CH
548 pag = xfs_perag_get(mp, agno);
549 spin_lock(&pag->pagb_lock);
550 wakeup = false;
efc27b52
DC
551 }
552
553 if (do_discard && busyp->length &&
ebf55872 554 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
4ecbfe63 555 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
ebf55872 556 } else {
4ecbfe63 557 xfs_extent_busy_clear_one(mp, pag, busyp);
ebf55872
CH
558 wakeup = true;
559 }
efc27b52
DC
560 }
561
ebf55872
CH
562 if (pag)
563 xfs_extent_busy_put_pag(pag, wakeup);
564}
565
566/*
567 * Flush out all busy extents for this AG.
568 */
569void
570xfs_extent_busy_flush(
571 struct xfs_mount *mp,
572 struct xfs_perag *pag,
573 unsigned busy_gen)
574{
575 DEFINE_WAIT (wait);
2b56c285 576 int error;
ebf55872 577
60e5bb78 578 error = xfs_log_force(mp, XFS_LOG_SYNC);
ebf55872
CH
579 if (error)
580 return;
581
582 do {
583 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
584 if (busy_gen != READ_ONCE(pag->pagb_gen))
585 break;
586 schedule();
587 } while (1);
588
589 finish_wait(&pag->pagb_wait, &wait);
590}
591
592void
593xfs_extent_busy_wait_all(
594 struct xfs_mount *mp)
595{
f250eedc 596 struct xfs_perag *pag;
ebf55872
CH
597 DEFINE_WAIT (wait);
598 xfs_agnumber_t agno;
599
f250eedc 600 for_each_perag(mp, agno, pag) {
ebf55872
CH
601 do {
602 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
603 if (RB_EMPTY_ROOT(&pag->pagb_tree))
604 break;
605 schedule();
606 } while (1);
607 finish_wait(&pag->pagb_wait, &wait);
efc27b52
DC
608 }
609}
610
611/*
612 * Callback for list_sort to sort busy extents by the AG they reside in.
613 */
614int
4ecbfe63 615xfs_extent_busy_ag_cmp(
efc27b52 616 void *priv,
4f0f586b
ST
617 const struct list_head *l1,
618 const struct list_head *l2)
efc27b52 619{
46694129
CH
620 struct xfs_extent_busy *b1 =
621 container_of(l1, struct xfs_extent_busy, list);
622 struct xfs_extent_busy *b2 =
623 container_of(l2, struct xfs_extent_busy, list);
624 s32 diff;
625
626 diff = b1->agno - b2->agno;
627 if (!diff)
628 diff = b1->bno - b2->bno;
629 return diff;
efc27b52 630}