1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_inode_item.h"
17 #include "xfs_quota.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_bmap_util.h"
21 #include "xfs_dquot_item.h"
22 #include "xfs_dquot.h"
23 #include "xfs_reflink.h"
24 #include "xfs_ialloc.h"
26 #include "xfs_log_priv.h"
28 #include <linux/iversion.h>
30 /* Radix tree tags for incore inode tree. */
32 /* inode is to be reclaimed */
33 #define XFS_ICI_RECLAIM_TAG 0
34 /* Inode has speculative preallocations (posteof or cow) to clean. */
35 #define XFS_ICI_BLOCKGC_TAG 1
38 * The goal for walking incore inodes. These can correspond with incore inode
39 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
41 enum xfs_icwalk_goal
{
42 /* Goals directly associated with tagged inodes. */
43 XFS_ICWALK_BLOCKGC
= XFS_ICI_BLOCKGC_TAG
,
44 XFS_ICWALK_RECLAIM
= XFS_ICI_RECLAIM_TAG
,
47 static int xfs_icwalk(struct xfs_mount
*mp
,
48 enum xfs_icwalk_goal goal
, struct xfs_icwalk
*icw
);
49 static int xfs_icwalk_ag(struct xfs_perag
*pag
,
50 enum xfs_icwalk_goal goal
, struct xfs_icwalk
*icw
);
53 * Private inode cache walk flags for struct xfs_icwalk. Must not
54 * coincide with XFS_ICWALK_FLAGS_VALID.
57 /* Stop scanning after icw_scan_limit inodes. */
58 #define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
60 #define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
61 #define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
63 #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
64 XFS_ICWALK_FLAG_RECLAIM_SICK | \
65 XFS_ICWALK_FLAG_UNION)
68 * Allocate and initialise an xfs_inode.
78 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
79 * and return NULL here on ENOMEM.
81 ip
= alloc_inode_sb(mp
->m_super
, xfs_inode_cache
, GFP_KERNEL
| __GFP_NOFAIL
);
83 if (inode_init_always(mp
->m_super
, VFS_I(ip
))) {
84 kmem_cache_free(xfs_inode_cache
, ip
);
88 /* VFS doesn't initialise i_mode or i_state! */
89 VFS_I(ip
)->i_mode
= 0;
90 VFS_I(ip
)->i_state
= 0;
91 mapping_set_large_folios(VFS_I(ip
)->i_mapping
);
93 XFS_STATS_INC(mp
, vn_active
);
94 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
95 ASSERT(ip
->i_ino
== 0);
97 /* initialise the xfs inode */
100 memset(&ip
->i_imap
, 0, sizeof(struct xfs_imap
));
102 memset(&ip
->i_af
, 0, sizeof(ip
->i_af
));
103 ip
->i_af
.if_format
= XFS_DINODE_FMT_EXTENTS
;
104 memset(&ip
->i_df
, 0, sizeof(ip
->i_df
));
106 ip
->i_delayed_blks
= 0;
107 ip
->i_diflags2
= mp
->m_ino_geo
.new_diflags2
;
112 INIT_WORK(&ip
->i_ioend_work
, xfs_end_io
);
113 INIT_LIST_HEAD(&ip
->i_ioend_list
);
114 spin_lock_init(&ip
->i_ioend_lock
);
115 ip
->i_next_unlinked
= NULLAGINO
;
116 ip
->i_prev_unlinked
= NULLAGINO
;
122 xfs_inode_free_callback(
123 struct rcu_head
*head
)
125 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
126 struct xfs_inode
*ip
= XFS_I(inode
);
128 switch (VFS_I(ip
)->i_mode
& S_IFMT
) {
132 xfs_idestroy_fork(&ip
->i_df
);
136 xfs_ifork_zap_attr(ip
);
139 xfs_idestroy_fork(ip
->i_cowfp
);
140 kmem_cache_free(xfs_ifork_cache
, ip
->i_cowfp
);
143 ASSERT(!test_bit(XFS_LI_IN_AIL
,
144 &ip
->i_itemp
->ili_item
.li_flags
));
145 xfs_inode_item_destroy(ip
);
149 kmem_cache_free(xfs_inode_cache
, ip
);
154 struct xfs_inode
*ip
)
156 /* asserts to verify all state is correct here */
157 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
158 ASSERT(!ip
->i_itemp
|| list_empty(&ip
->i_itemp
->ili_item
.li_bio_list
));
159 XFS_STATS_DEC(ip
->i_mount
, vn_active
);
161 call_rcu(&VFS_I(ip
)->i_rcu
, xfs_inode_free_callback
);
166 struct xfs_inode
*ip
)
168 ASSERT(!xfs_iflags_test(ip
, XFS_IFLUSHING
));
171 * Because we use RCU freeing we need to ensure the inode always
172 * appears to be reclaimed with an invalid inode number when in the
173 * free state. The ip->i_flags_lock provides the barrier against lookup
176 spin_lock(&ip
->i_flags_lock
);
177 ip
->i_flags
= XFS_IRECLAIM
;
179 spin_unlock(&ip
->i_flags_lock
);
181 __xfs_inode_free(ip
);
185 * Queue background inode reclaim work if there are reclaimable inodes and there
186 * isn't reclaim work already scheduled or in progress.
189 xfs_reclaim_work_queue(
190 struct xfs_mount
*mp
)
194 if (radix_tree_tagged(&mp
->m_perag_tree
, XFS_ICI_RECLAIM_TAG
)) {
195 queue_delayed_work(mp
->m_reclaim_workqueue
, &mp
->m_reclaim_work
,
196 msecs_to_jiffies(xfs_syncd_centisecs
/ 6 * 10));
202 * Background scanning to trim preallocated space. This is queued based on the
203 * 'speculative_prealloc_lifetime' tunable (5m by default).
207 struct xfs_perag
*pag
)
209 struct xfs_mount
*mp
= pag
->pag_mount
;
211 if (!xfs_is_blockgc_enabled(mp
))
215 if (radix_tree_tagged(&pag
->pag_ici_root
, XFS_ICI_BLOCKGC_TAG
))
216 queue_delayed_work(pag
->pag_mount
->m_blockgc_wq
,
217 &pag
->pag_blockgc_work
,
218 msecs_to_jiffies(xfs_blockgc_secs
* 1000));
222 /* Set a tag on both the AG incore inode tree and the AG radix tree. */
224 xfs_perag_set_inode_tag(
225 struct xfs_perag
*pag
,
229 struct xfs_mount
*mp
= pag
->pag_mount
;
232 lockdep_assert_held(&pag
->pag_ici_lock
);
234 was_tagged
= radix_tree_tagged(&pag
->pag_ici_root
, tag
);
235 radix_tree_tag_set(&pag
->pag_ici_root
, agino
, tag
);
237 if (tag
== XFS_ICI_RECLAIM_TAG
)
238 pag
->pag_ici_reclaimable
++;
243 /* propagate the tag up into the perag radix tree */
244 spin_lock(&mp
->m_perag_lock
);
245 radix_tree_tag_set(&mp
->m_perag_tree
, pag
->pag_agno
, tag
);
246 spin_unlock(&mp
->m_perag_lock
);
248 /* start background work */
250 case XFS_ICI_RECLAIM_TAG
:
251 xfs_reclaim_work_queue(mp
);
253 case XFS_ICI_BLOCKGC_TAG
:
254 xfs_blockgc_queue(pag
);
258 trace_xfs_perag_set_inode_tag(mp
, pag
->pag_agno
, tag
, _RET_IP_
);
261 /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
263 xfs_perag_clear_inode_tag(
264 struct xfs_perag
*pag
,
268 struct xfs_mount
*mp
= pag
->pag_mount
;
270 lockdep_assert_held(&pag
->pag_ici_lock
);
273 * Reclaim can signal (with a null agino) that it cleared its own tag
274 * by removing the inode from the radix tree.
276 if (agino
!= NULLAGINO
)
277 radix_tree_tag_clear(&pag
->pag_ici_root
, agino
, tag
);
279 ASSERT(tag
== XFS_ICI_RECLAIM_TAG
);
281 if (tag
== XFS_ICI_RECLAIM_TAG
)
282 pag
->pag_ici_reclaimable
--;
284 if (radix_tree_tagged(&pag
->pag_ici_root
, tag
))
287 /* clear the tag from the perag radix tree */
288 spin_lock(&mp
->m_perag_lock
);
289 radix_tree_tag_clear(&mp
->m_perag_tree
, pag
->pag_agno
, tag
);
290 spin_unlock(&mp
->m_perag_lock
);
292 trace_xfs_perag_clear_inode_tag(mp
, pag
->pag_agno
, tag
, _RET_IP_
);
296 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
297 * part of the structure. This is made more complex by the fact we store
298 * information about the on-disk values in the VFS inode and so we can't just
299 * overwrite the values unconditionally. Hence we save the parameters we
300 * need to retain across reinitialisation, and rewrite them into the VFS inode
301 * after reinitialisation even if it fails.
305 struct xfs_mount
*mp
,
309 uint32_t nlink
= inode
->i_nlink
;
310 uint32_t generation
= inode
->i_generation
;
311 uint64_t version
= inode_peek_iversion(inode
);
312 umode_t mode
= inode
->i_mode
;
313 dev_t dev
= inode
->i_rdev
;
314 kuid_t uid
= inode
->i_uid
;
315 kgid_t gid
= inode
->i_gid
;
317 error
= inode_init_always(mp
->m_super
, inode
);
319 set_nlink(inode
, nlink
);
320 inode
->i_generation
= generation
;
321 inode_set_iversion_queried(inode
, version
);
322 inode
->i_mode
= mode
;
326 mapping_set_large_folios(inode
->i_mapping
);
331 * Carefully nudge an inode whose VFS state has been torn down back into a
332 * usable state. Drops the i_flags_lock and the rcu read lock.
336 struct xfs_perag
*pag
,
337 struct xfs_inode
*ip
) __releases(&ip
->i_flags_lock
)
339 struct xfs_mount
*mp
= ip
->i_mount
;
340 struct inode
*inode
= VFS_I(ip
);
343 trace_xfs_iget_recycle(ip
);
346 * We need to make it look like the inode is being reclaimed to prevent
347 * the actual reclaim workers from stomping over us while we recycle
348 * the inode. We can't clear the radix tree tag yet as it requires
349 * pag_ici_lock to be held exclusive.
351 ip
->i_flags
|= XFS_IRECLAIM
;
353 spin_unlock(&ip
->i_flags_lock
);
356 ASSERT(!rwsem_is_locked(&inode
->i_rwsem
));
357 error
= xfs_reinit_inode(mp
, inode
);
360 * Re-initializing the inode failed, and we are in deep
361 * trouble. Try to re-add it to the reclaim list.
364 spin_lock(&ip
->i_flags_lock
);
365 ip
->i_flags
&= ~(XFS_INEW
| XFS_IRECLAIM
);
366 ASSERT(ip
->i_flags
& XFS_IRECLAIMABLE
);
367 spin_unlock(&ip
->i_flags_lock
);
370 trace_xfs_iget_recycle_fail(ip
);
374 spin_lock(&pag
->pag_ici_lock
);
375 spin_lock(&ip
->i_flags_lock
);
378 * Clear the per-lifetime state in the inode as we are now effectively
379 * a new inode and need to return to the initial state before reuse
382 ip
->i_flags
&= ~XFS_IRECLAIM_RESET_FLAGS
;
383 ip
->i_flags
|= XFS_INEW
;
384 xfs_perag_clear_inode_tag(pag
, XFS_INO_TO_AGINO(mp
, ip
->i_ino
),
385 XFS_ICI_RECLAIM_TAG
);
386 inode
->i_state
= I_NEW
;
387 spin_unlock(&ip
->i_flags_lock
);
388 spin_unlock(&pag
->pag_ici_lock
);
394 * If we are allocating a new inode, then check what was returned is
395 * actually a free, empty inode. If we are not allocating an inode,
396 * then check we didn't find a free inode.
399 * 0 if the inode free state matches the lookup context
400 * -ENOENT if the inode is free and we are not allocating
401 * -EFSCORRUPTED if there is any state mismatch at all
404 xfs_iget_check_free_state(
405 struct xfs_inode
*ip
,
408 if (flags
& XFS_IGET_CREATE
) {
409 /* should be a free inode */
410 if (VFS_I(ip
)->i_mode
!= 0) {
411 xfs_warn(ip
->i_mount
,
412 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
413 ip
->i_ino
, VFS_I(ip
)->i_mode
);
414 return -EFSCORRUPTED
;
417 if (ip
->i_nblocks
!= 0) {
418 xfs_warn(ip
->i_mount
,
419 "Corruption detected! Free inode 0x%llx has blocks allocated!",
421 return -EFSCORRUPTED
;
426 /* should be an allocated inode */
427 if (VFS_I(ip
)->i_mode
== 0)
433 /* Make all pending inactivation work start immediately. */
435 xfs_inodegc_queue_all(
436 struct xfs_mount
*mp
)
438 struct xfs_inodegc
*gc
;
441 for_each_online_cpu(cpu
) {
442 gc
= per_cpu_ptr(mp
->m_inodegc
, cpu
);
443 if (!llist_empty(&gc
->list
))
444 mod_delayed_work_on(cpu
, mp
->m_inodegc_wq
, &gc
->work
, 0);
449 * Check the validity of the inode we just found it the cache
453 struct xfs_perag
*pag
,
454 struct xfs_inode
*ip
,
457 int lock_flags
) __releases(RCU
)
459 struct inode
*inode
= VFS_I(ip
);
460 struct xfs_mount
*mp
= ip
->i_mount
;
464 * check for re-use of an inode within an RCU grace period due to the
465 * radix tree nodes not being updated yet. We monitor for this by
466 * setting the inode number to zero before freeing the inode structure.
467 * If the inode has been reallocated and set up, then the inode number
468 * will not match, so check for that, too.
470 spin_lock(&ip
->i_flags_lock
);
471 if (ip
->i_ino
!= ino
)
475 * If we are racing with another cache hit that is currently
476 * instantiating this inode or currently recycling it out of
477 * reclaimable state, wait for the initialisation to complete
480 * If we're racing with the inactivation worker we also want to wait.
481 * If we're creating a new file, it's possible that the worker
482 * previously marked the inode as free on disk but hasn't finished
483 * updating the incore state yet. The AGI buffer will be dirty and
484 * locked to the icreate transaction, so a synchronous push of the
485 * inodegc workers would result in deadlock. For a regular iget, the
486 * worker is running already, so we might as well wait.
488 * XXX(hch): eventually we should do something equivalent to
489 * wait_on_inode to wait for these flags to be cleared
490 * instead of polling for it.
492 if (ip
->i_flags
& (XFS_INEW
| XFS_IRECLAIM
| XFS_INACTIVATING
))
495 if (ip
->i_flags
& XFS_NEED_INACTIVE
) {
496 /* Unlinked inodes cannot be re-grabbed. */
497 if (VFS_I(ip
)->i_nlink
== 0) {
501 goto out_inodegc_flush
;
505 * Check the inode free state is valid. This also detects lookup
506 * racing with unlinks.
508 error
= xfs_iget_check_free_state(ip
, flags
);
512 /* Skip inodes that have no vfs state. */
513 if ((flags
& XFS_IGET_INCORE
) &&
514 (ip
->i_flags
& XFS_IRECLAIMABLE
))
517 /* The inode fits the selection criteria; process it. */
518 if (ip
->i_flags
& XFS_IRECLAIMABLE
) {
519 /* Drops i_flags_lock and RCU read lock. */
520 error
= xfs_iget_recycle(pag
, ip
);
524 /* If the VFS inode is being torn down, pause and try again. */
528 /* We've got a live one. */
529 spin_unlock(&ip
->i_flags_lock
);
531 trace_xfs_iget_hit(ip
);
535 xfs_ilock(ip
, lock_flags
);
537 if (!(flags
& XFS_IGET_INCORE
))
538 xfs_iflags_clear(ip
, XFS_ISTALE
);
539 XFS_STATS_INC(mp
, xs_ig_found
);
544 trace_xfs_iget_skip(ip
);
545 XFS_STATS_INC(mp
, xs_ig_frecycle
);
548 spin_unlock(&ip
->i_flags_lock
);
553 spin_unlock(&ip
->i_flags_lock
);
556 * Do not wait for the workers, because the caller could hold an AGI
557 * buffer lock. We're just going to sleep in a loop anyway.
559 if (xfs_is_inodegc_enabled(mp
))
560 xfs_inodegc_queue_all(mp
);
566 struct xfs_mount
*mp
,
567 struct xfs_perag
*pag
,
570 struct xfs_inode
**ipp
,
574 struct xfs_inode
*ip
;
576 xfs_agino_t agino
= XFS_INO_TO_AGINO(mp
, ino
);
579 ip
= xfs_inode_alloc(mp
, ino
);
583 error
= xfs_imap(mp
, tp
, ip
->i_ino
, &ip
->i_imap
, flags
);
588 * For version 5 superblocks, if we are initialising a new inode and we
589 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
590 * simply build the new inode core with a random generation number.
592 * For version 4 (and older) superblocks, log recovery is dependent on
593 * the i_flushiter field being initialised from the current on-disk
594 * value and hence we must also read the inode off disk even when
595 * initializing new inodes.
597 if (xfs_has_v3inodes(mp
) &&
598 (flags
& XFS_IGET_CREATE
) && !xfs_has_ikeep(mp
)) {
599 VFS_I(ip
)->i_generation
= prandom_u32();
603 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &bp
);
607 error
= xfs_inode_from_disk(ip
,
608 xfs_buf_offset(bp
, ip
->i_imap
.im_boffset
));
610 xfs_buf_set_ref(bp
, XFS_INO_REF
);
611 xfs_trans_brelse(tp
, bp
);
617 trace_xfs_iget_miss(ip
);
620 * Check the inode free state is valid. This also detects lookup
621 * racing with unlinks.
623 error
= xfs_iget_check_free_state(ip
, flags
);
628 * Preload the radix tree so we can insert safely under the
629 * write spinlock. Note that we cannot sleep inside the preload
630 * region. Since we can be called from transaction context, don't
631 * recurse into the file system.
633 if (radix_tree_preload(GFP_NOFS
)) {
639 * Because the inode hasn't been added to the radix-tree yet it can't
640 * be found by another thread, so we can do the non-sleeping lock here.
643 if (!xfs_ilock_nowait(ip
, lock_flags
))
648 * These values must be set before inserting the inode into the radix
649 * tree as the moment it is inserted a concurrent lookup (allowed by the
650 * RCU locking mechanism) can find it and that lookup must see that this
651 * is an inode currently under construction (i.e. that XFS_INEW is set).
652 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
653 * memory barrier that ensures this detection works correctly at lookup
657 if (flags
& XFS_IGET_DONTCACHE
)
658 d_mark_dontcache(VFS_I(ip
));
662 xfs_iflags_set(ip
, iflags
);
664 /* insert the new inode */
665 spin_lock(&pag
->pag_ici_lock
);
666 error
= radix_tree_insert(&pag
->pag_ici_root
, agino
, ip
);
667 if (unlikely(error
)) {
668 WARN_ON(error
!= -EEXIST
);
669 XFS_STATS_INC(mp
, xs_ig_dup
);
671 goto out_preload_end
;
673 spin_unlock(&pag
->pag_ici_lock
);
674 radix_tree_preload_end();
680 spin_unlock(&pag
->pag_ici_lock
);
681 radix_tree_preload_end();
683 xfs_iunlock(ip
, lock_flags
);
685 __destroy_inode(VFS_I(ip
));
691 * Look up an inode by number in the given file system. The inode is looked up
692 * in the cache held in each AG. If the inode is found in the cache, initialise
693 * the vfs inode if necessary.
695 * If it is not in core, read it in from the file system's device, add it to the
696 * cache and initialise the vfs inode.
698 * The inode is locked according to the value of the lock_flags parameter.
699 * Inode lookup is only done during metadata operations and not as part of the
700 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
704 struct xfs_mount
*mp
,
705 struct xfs_trans
*tp
,
709 struct xfs_inode
**ipp
)
711 struct xfs_inode
*ip
;
712 struct xfs_perag
*pag
;
716 ASSERT((lock_flags
& (XFS_IOLOCK_EXCL
| XFS_IOLOCK_SHARED
)) == 0);
718 /* reject inode numbers outside existing AGs */
719 if (!ino
|| XFS_INO_TO_AGNO(mp
, ino
) >= mp
->m_sb
.sb_agcount
)
722 XFS_STATS_INC(mp
, xs_ig_attempts
);
724 /* get the perag structure and ensure that it's inode capable */
725 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, ino
));
726 agino
= XFS_INO_TO_AGINO(mp
, ino
);
731 ip
= radix_tree_lookup(&pag
->pag_ici_root
, agino
);
734 error
= xfs_iget_cache_hit(pag
, ip
, ino
, flags
, lock_flags
);
736 goto out_error_or_again
;
739 if (flags
& XFS_IGET_INCORE
) {
741 goto out_error_or_again
;
743 XFS_STATS_INC(mp
, xs_ig_missed
);
745 error
= xfs_iget_cache_miss(mp
, pag
, tp
, ino
, &ip
,
748 goto out_error_or_again
;
755 * If we have a real type for an on-disk inode, we can setup the inode
756 * now. If it's a new inode being created, xfs_init_new_inode will
759 if (xfs_iflags_test(ip
, XFS_INEW
) && VFS_I(ip
)->i_mode
!= 0)
760 xfs_setup_existing_inode(ip
);
764 if (!(flags
& XFS_IGET_INCORE
) && error
== -EAGAIN
) {
773 * "Is this a cached inode that's also allocated?"
775 * Look up an inode by number in the given file system. If the inode is
776 * in cache and isn't in purgatory, return 1 if the inode is allocated
777 * and 0 if it is not. For all other cases (not in cache, being torn
778 * down, etc.), return a negative error code.
780 * The caller has to prevent inode allocation and freeing activity,
781 * presumably by locking the AGI buffer. This is to ensure that an
782 * inode cannot transition from allocated to freed until the caller is
783 * ready to allow that. If the inode is in an intermediate state (new,
784 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
785 * inode is not in the cache, -ENOENT will be returned. The caller must
786 * deal with these scenarios appropriately.
788 * This is a specialized use case for the online scrubber; if you're
789 * reading this, you probably want xfs_iget.
792 xfs_icache_inode_is_allocated(
793 struct xfs_mount
*mp
,
794 struct xfs_trans
*tp
,
798 struct xfs_inode
*ip
;
801 error
= xfs_iget(mp
, tp
, ino
, XFS_IGET_INCORE
, 0, &ip
);
805 *inuse
= !!(VFS_I(ip
)->i_mode
);
811 * Grab the inode for reclaim exclusively.
813 * We have found this inode via a lookup under RCU, so the inode may have
814 * already been freed, or it may be in the process of being recycled by
815 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
816 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
817 * will not be set. Hence we need to check for both these flag conditions to
818 * avoid inodes that are no longer reclaim candidates.
820 * Note: checking for other state flags here, under the i_flags_lock or not, is
821 * racy and should be avoided. Those races should be resolved only after we have
822 * ensured that we are able to reclaim this inode and the world can see that we
823 * are going to reclaim it.
825 * Return true if we grabbed it, false otherwise.
829 struct xfs_inode
*ip
,
830 struct xfs_icwalk
*icw
)
832 ASSERT(rcu_read_lock_held());
834 spin_lock(&ip
->i_flags_lock
);
835 if (!__xfs_iflags_test(ip
, XFS_IRECLAIMABLE
) ||
836 __xfs_iflags_test(ip
, XFS_IRECLAIM
)) {
837 /* not a reclaim candidate. */
838 spin_unlock(&ip
->i_flags_lock
);
842 /* Don't reclaim a sick inode unless the caller asked for it. */
844 (!icw
|| !(icw
->icw_flags
& XFS_ICWALK_FLAG_RECLAIM_SICK
))) {
845 spin_unlock(&ip
->i_flags_lock
);
849 __xfs_iflags_set(ip
, XFS_IRECLAIM
);
850 spin_unlock(&ip
->i_flags_lock
);
855 * Inode reclaim is non-blocking, so the default action if progress cannot be
856 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
857 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
858 * blocking anymore and hence we can wait for the inode to be able to reclaim
861 * We do no IO here - if callers require inodes to be cleaned they must push the
862 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
863 * done in the background in a non-blocking manner, and enables memory reclaim
864 * to make progress without blocking.
868 struct xfs_inode
*ip
,
869 struct xfs_perag
*pag
)
871 xfs_ino_t ino
= ip
->i_ino
; /* for radix_tree_delete */
873 if (!xfs_ilock_nowait(ip
, XFS_ILOCK_EXCL
))
875 if (xfs_iflags_test_and_set(ip
, XFS_IFLUSHING
))
879 * Check for log shutdown because aborting the inode can move the log
880 * tail and corrupt in memory state. This is fine if the log is shut
881 * down, but if the log is still active and only the mount is shut down
882 * then the in-memory log tail movement caused by the abort can be
883 * incorrectly propagated to disk.
885 if (xlog_is_shutdown(ip
->i_mount
->m_log
)) {
887 xfs_iflush_shutdown_abort(ip
);
890 if (xfs_ipincount(ip
))
891 goto out_clear_flush
;
892 if (!xfs_inode_clean(ip
))
893 goto out_clear_flush
;
895 xfs_iflags_clear(ip
, XFS_IFLUSHING
);
897 trace_xfs_inode_reclaiming(ip
);
900 * Because we use RCU freeing we need to ensure the inode always appears
901 * to be reclaimed with an invalid inode number when in the free state.
902 * We do this as early as possible under the ILOCK so that
903 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
904 * detect races with us here. By doing this, we guarantee that once
905 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
906 * it will see either a valid inode that will serialise correctly, or it
907 * will see an invalid inode that it can skip.
909 spin_lock(&ip
->i_flags_lock
);
910 ip
->i_flags
= XFS_IRECLAIM
;
914 spin_unlock(&ip
->i_flags_lock
);
916 ASSERT(!ip
->i_itemp
|| ip
->i_itemp
->ili_item
.li_buf
== NULL
);
917 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
919 XFS_STATS_INC(ip
->i_mount
, xs_ig_reclaims
);
921 * Remove the inode from the per-AG radix tree.
923 * Because radix_tree_delete won't complain even if the item was never
924 * added to the tree assert that it's been there before to catch
925 * problems with the inode life time early on.
927 spin_lock(&pag
->pag_ici_lock
);
928 if (!radix_tree_delete(&pag
->pag_ici_root
,
929 XFS_INO_TO_AGINO(ip
->i_mount
, ino
)))
931 xfs_perag_clear_inode_tag(pag
, NULLAGINO
, XFS_ICI_RECLAIM_TAG
);
932 spin_unlock(&pag
->pag_ici_lock
);
935 * Here we do an (almost) spurious inode lock in order to coordinate
936 * with inode cache radix tree lookups. This is because the lookup
937 * can reference the inodes in the cache without taking references.
939 * We make that OK here by ensuring that we wait until the inode is
940 * unlocked after the lookup before we go ahead and free it.
942 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
943 ASSERT(!ip
->i_udquot
&& !ip
->i_gdquot
&& !ip
->i_pdquot
);
944 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
945 ASSERT(xfs_inode_clean(ip
));
947 __xfs_inode_free(ip
);
951 xfs_iflags_clear(ip
, XFS_IFLUSHING
);
953 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
955 xfs_iflags_clear(ip
, XFS_IRECLAIM
);
958 /* Reclaim sick inodes if we're unmounting or the fs went down. */
960 xfs_want_reclaim_sick(
961 struct xfs_mount
*mp
)
963 return xfs_is_unmounting(mp
) || xfs_has_norecovery(mp
) ||
969 struct xfs_mount
*mp
)
971 struct xfs_icwalk icw
= {
975 if (xfs_want_reclaim_sick(mp
))
976 icw
.icw_flags
|= XFS_ICWALK_FLAG_RECLAIM_SICK
;
978 while (radix_tree_tagged(&mp
->m_perag_tree
, XFS_ICI_RECLAIM_TAG
)) {
979 xfs_ail_push_all_sync(mp
->m_ail
);
980 xfs_icwalk(mp
, XFS_ICWALK_RECLAIM
, &icw
);
985 * The shrinker infrastructure determines how many inodes we should scan for
986 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
987 * push the AIL here. We also want to proactively free up memory if we can to
988 * minimise the amount of work memory reclaim has to do so we kick the
989 * background reclaim if it isn't already scheduled.
992 xfs_reclaim_inodes_nr(
993 struct xfs_mount
*mp
,
994 unsigned long nr_to_scan
)
996 struct xfs_icwalk icw
= {
997 .icw_flags
= XFS_ICWALK_FLAG_SCAN_LIMIT
,
998 .icw_scan_limit
= min_t(unsigned long, LONG_MAX
, nr_to_scan
),
1001 if (xfs_want_reclaim_sick(mp
))
1002 icw
.icw_flags
|= XFS_ICWALK_FLAG_RECLAIM_SICK
;
1004 /* kick background reclaimer and push the AIL */
1005 xfs_reclaim_work_queue(mp
);
1006 xfs_ail_push_all(mp
->m_ail
);
1008 xfs_icwalk(mp
, XFS_ICWALK_RECLAIM
, &icw
);
1013 * Return the number of reclaimable inodes in the filesystem for
1014 * the shrinker to determine how much to reclaim.
1017 xfs_reclaim_inodes_count(
1018 struct xfs_mount
*mp
)
1020 struct xfs_perag
*pag
;
1021 xfs_agnumber_t ag
= 0;
1022 long reclaimable
= 0;
1024 while ((pag
= xfs_perag_get_tag(mp
, ag
, XFS_ICI_RECLAIM_TAG
))) {
1025 ag
= pag
->pag_agno
+ 1;
1026 reclaimable
+= pag
->pag_ici_reclaimable
;
1033 xfs_icwalk_match_id(
1034 struct xfs_inode
*ip
,
1035 struct xfs_icwalk
*icw
)
1037 if ((icw
->icw_flags
& XFS_ICWALK_FLAG_UID
) &&
1038 !uid_eq(VFS_I(ip
)->i_uid
, icw
->icw_uid
))
1041 if ((icw
->icw_flags
& XFS_ICWALK_FLAG_GID
) &&
1042 !gid_eq(VFS_I(ip
)->i_gid
, icw
->icw_gid
))
1045 if ((icw
->icw_flags
& XFS_ICWALK_FLAG_PRID
) &&
1046 ip
->i_projid
!= icw
->icw_prid
)
1053 * A union-based inode filtering algorithm. Process the inode if any of the
1054 * criteria match. This is for global/internal scans only.
1057 xfs_icwalk_match_id_union(
1058 struct xfs_inode
*ip
,
1059 struct xfs_icwalk
*icw
)
1061 if ((icw
->icw_flags
& XFS_ICWALK_FLAG_UID
) &&
1062 uid_eq(VFS_I(ip
)->i_uid
, icw
->icw_uid
))
1065 if ((icw
->icw_flags
& XFS_ICWALK_FLAG_GID
) &&
1066 gid_eq(VFS_I(ip
)->i_gid
, icw
->icw_gid
))
1069 if ((icw
->icw_flags
& XFS_ICWALK_FLAG_PRID
) &&
1070 ip
->i_projid
== icw
->icw_prid
)
1077 * Is this inode @ip eligible for eof/cow block reclamation, given some
1078 * filtering parameters @icw? The inode is eligible if @icw is null or
1079 * if the predicate functions match.
1083 struct xfs_inode
*ip
,
1084 struct xfs_icwalk
*icw
)
1091 if (icw
->icw_flags
& XFS_ICWALK_FLAG_UNION
)
1092 match
= xfs_icwalk_match_id_union(ip
, icw
);
1094 match
= xfs_icwalk_match_id(ip
, icw
);
1098 /* skip the inode if the file size is too small */
1099 if ((icw
->icw_flags
& XFS_ICWALK_FLAG_MINFILESIZE
) &&
1100 XFS_ISIZE(ip
) < icw
->icw_min_file_size
)
1107 * This is a fast pass over the inode cache to try to get reclaim moving on as
1108 * many inodes as possible in a short period of time. It kicks itself every few
1109 * seconds, as well as being kicked by the inode cache shrinker when memory
1114 struct work_struct
*work
)
1116 struct xfs_mount
*mp
= container_of(to_delayed_work(work
),
1117 struct xfs_mount
, m_reclaim_work
);
1119 xfs_icwalk(mp
, XFS_ICWALK_RECLAIM
, NULL
);
1120 xfs_reclaim_work_queue(mp
);
1124 xfs_inode_free_eofblocks(
1125 struct xfs_inode
*ip
,
1126 struct xfs_icwalk
*icw
,
1127 unsigned int *lockflags
)
1131 wait
= icw
&& (icw
->icw_flags
& XFS_ICWALK_FLAG_SYNC
);
1133 if (!xfs_iflags_test(ip
, XFS_IEOFBLOCKS
))
1137 * If the mapping is dirty the operation can block and wait for some
1138 * time. Unless we are waiting, skip it.
1140 if (!wait
&& mapping_tagged(VFS_I(ip
)->i_mapping
, PAGECACHE_TAG_DIRTY
))
1143 if (!xfs_icwalk_match(ip
, icw
))
1147 * If the caller is waiting, return -EAGAIN to keep the background
1148 * scanner moving and revisit the inode in a subsequent pass.
1150 if (!xfs_ilock_nowait(ip
, XFS_IOLOCK_EXCL
)) {
1155 *lockflags
|= XFS_IOLOCK_EXCL
;
1157 if (xfs_can_free_eofblocks(ip
, false))
1158 return xfs_free_eofblocks(ip
);
1160 /* inode could be preallocated or append-only */
1161 trace_xfs_inode_free_eofblocks_invalid(ip
);
1162 xfs_inode_clear_eofblocks_tag(ip
);
1167 xfs_blockgc_set_iflag(
1168 struct xfs_inode
*ip
,
1169 unsigned long iflag
)
1171 struct xfs_mount
*mp
= ip
->i_mount
;
1172 struct xfs_perag
*pag
;
1174 ASSERT((iflag
& ~(XFS_IEOFBLOCKS
| XFS_ICOWBLOCKS
)) == 0);
1177 * Don't bother locking the AG and looking up in the radix trees
1178 * if we already know that we have the tag set.
1180 if (ip
->i_flags
& iflag
)
1182 spin_lock(&ip
->i_flags_lock
);
1183 ip
->i_flags
|= iflag
;
1184 spin_unlock(&ip
->i_flags_lock
);
1186 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, ip
->i_ino
));
1187 spin_lock(&pag
->pag_ici_lock
);
1189 xfs_perag_set_inode_tag(pag
, XFS_INO_TO_AGINO(mp
, ip
->i_ino
),
1190 XFS_ICI_BLOCKGC_TAG
);
1192 spin_unlock(&pag
->pag_ici_lock
);
1197 xfs_inode_set_eofblocks_tag(
1200 trace_xfs_inode_set_eofblocks_tag(ip
);
1201 return xfs_blockgc_set_iflag(ip
, XFS_IEOFBLOCKS
);
1205 xfs_blockgc_clear_iflag(
1206 struct xfs_inode
*ip
,
1207 unsigned long iflag
)
1209 struct xfs_mount
*mp
= ip
->i_mount
;
1210 struct xfs_perag
*pag
;
1213 ASSERT((iflag
& ~(XFS_IEOFBLOCKS
| XFS_ICOWBLOCKS
)) == 0);
1215 spin_lock(&ip
->i_flags_lock
);
1216 ip
->i_flags
&= ~iflag
;
1217 clear_tag
= (ip
->i_flags
& (XFS_IEOFBLOCKS
| XFS_ICOWBLOCKS
)) == 0;
1218 spin_unlock(&ip
->i_flags_lock
);
1223 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, ip
->i_ino
));
1224 spin_lock(&pag
->pag_ici_lock
);
1226 xfs_perag_clear_inode_tag(pag
, XFS_INO_TO_AGINO(mp
, ip
->i_ino
),
1227 XFS_ICI_BLOCKGC_TAG
);
1229 spin_unlock(&pag
->pag_ici_lock
);
1234 xfs_inode_clear_eofblocks_tag(
1237 trace_xfs_inode_clear_eofblocks_tag(ip
);
1238 return xfs_blockgc_clear_iflag(ip
, XFS_IEOFBLOCKS
);
1242 * Set ourselves up to free CoW blocks from this file. If it's already clean
1243 * then we can bail out quickly, but otherwise we must back off if the file
1244 * is undergoing some kind of write.
1247 xfs_prep_free_cowblocks(
1248 struct xfs_inode
*ip
)
1251 * Just clear the tag if we have an empty cow fork or none at all. It's
1252 * possible the inode was fully unshared since it was originally tagged.
1254 if (!xfs_inode_has_cow_data(ip
)) {
1255 trace_xfs_inode_free_cowblocks_invalid(ip
);
1256 xfs_inode_clear_cowblocks_tag(ip
);
1261 * If the mapping is dirty or under writeback we cannot touch the
1262 * CoW fork. Leave it alone if we're in the midst of a directio.
1264 if ((VFS_I(ip
)->i_state
& I_DIRTY_PAGES
) ||
1265 mapping_tagged(VFS_I(ip
)->i_mapping
, PAGECACHE_TAG_DIRTY
) ||
1266 mapping_tagged(VFS_I(ip
)->i_mapping
, PAGECACHE_TAG_WRITEBACK
) ||
1267 atomic_read(&VFS_I(ip
)->i_dio_count
))
1274 * Automatic CoW Reservation Freeing
1276 * These functions automatically garbage collect leftover CoW reservations
1277 * that were made on behalf of a cowextsize hint when we start to run out
1278 * of quota or when the reservations sit around for too long. If the file
1279 * has dirty pages or is undergoing writeback, its CoW reservations will
1282 * The actual garbage collection piggybacks off the same code that runs
1283 * the speculative EOF preallocation garbage collector.
1286 xfs_inode_free_cowblocks(
1287 struct xfs_inode
*ip
,
1288 struct xfs_icwalk
*icw
,
1289 unsigned int *lockflags
)
1294 wait
= icw
&& (icw
->icw_flags
& XFS_ICWALK_FLAG_SYNC
);
1296 if (!xfs_iflags_test(ip
, XFS_ICOWBLOCKS
))
1299 if (!xfs_prep_free_cowblocks(ip
))
1302 if (!xfs_icwalk_match(ip
, icw
))
1306 * If the caller is waiting, return -EAGAIN to keep the background
1307 * scanner moving and revisit the inode in a subsequent pass.
1309 if (!(*lockflags
& XFS_IOLOCK_EXCL
) &&
1310 !xfs_ilock_nowait(ip
, XFS_IOLOCK_EXCL
)) {
1315 *lockflags
|= XFS_IOLOCK_EXCL
;
1317 if (!xfs_ilock_nowait(ip
, XFS_MMAPLOCK_EXCL
)) {
1322 *lockflags
|= XFS_MMAPLOCK_EXCL
;
1325 * Check again, nobody else should be able to dirty blocks or change
1326 * the reflink iflag now that we have the first two locks held.
1328 if (xfs_prep_free_cowblocks(ip
))
1329 ret
= xfs_reflink_cancel_cow_range(ip
, 0, NULLFILEOFF
, false);
1334 xfs_inode_set_cowblocks_tag(
1337 trace_xfs_inode_set_cowblocks_tag(ip
);
1338 return xfs_blockgc_set_iflag(ip
, XFS_ICOWBLOCKS
);
1342 xfs_inode_clear_cowblocks_tag(
1345 trace_xfs_inode_clear_cowblocks_tag(ip
);
1346 return xfs_blockgc_clear_iflag(ip
, XFS_ICOWBLOCKS
);
1349 /* Disable post-EOF and CoW block auto-reclamation. */
1352 struct xfs_mount
*mp
)
1354 struct xfs_perag
*pag
;
1355 xfs_agnumber_t agno
;
1357 if (!xfs_clear_blockgc_enabled(mp
))
1360 for_each_perag(mp
, agno
, pag
)
1361 cancel_delayed_work_sync(&pag
->pag_blockgc_work
);
1362 trace_xfs_blockgc_stop(mp
, __return_address
);
1365 /* Enable post-EOF and CoW block auto-reclamation. */
1368 struct xfs_mount
*mp
)
1370 struct xfs_perag
*pag
;
1371 xfs_agnumber_t agno
;
1373 if (xfs_set_blockgc_enabled(mp
))
1376 trace_xfs_blockgc_start(mp
, __return_address
);
1377 for_each_perag_tag(mp
, agno
, pag
, XFS_ICI_BLOCKGC_TAG
)
1378 xfs_blockgc_queue(pag
);
1381 /* Don't try to run block gc on an inode that's in any of these states. */
1382 #define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
1383 XFS_NEED_INACTIVE | \
1384 XFS_INACTIVATING | \
1385 XFS_IRECLAIMABLE | \
1388 * Decide if the given @ip is eligible for garbage collection of speculative
1389 * preallocations, and grab it if so. Returns true if it's ready to go or
1390 * false if we should just ignore it.
1394 struct xfs_inode
*ip
)
1396 struct inode
*inode
= VFS_I(ip
);
1398 ASSERT(rcu_read_lock_held());
1400 /* Check for stale RCU freed inode */
1401 spin_lock(&ip
->i_flags_lock
);
1403 goto out_unlock_noent
;
1405 if (ip
->i_flags
& XFS_BLOCKGC_NOGRAB_IFLAGS
)
1406 goto out_unlock_noent
;
1407 spin_unlock(&ip
->i_flags_lock
);
1409 /* nothing to sync during shutdown */
1410 if (xfs_is_shutdown(ip
->i_mount
))
1413 /* If we can't grab the inode, it must on it's way to reclaim. */
1417 /* inode is valid */
1421 spin_unlock(&ip
->i_flags_lock
);
1425 /* Scan one incore inode for block preallocations that we can remove. */
1427 xfs_blockgc_scan_inode(
1428 struct xfs_inode
*ip
,
1429 struct xfs_icwalk
*icw
)
1431 unsigned int lockflags
= 0;
1434 error
= xfs_inode_free_eofblocks(ip
, icw
, &lockflags
);
1438 error
= xfs_inode_free_cowblocks(ip
, icw
, &lockflags
);
1441 xfs_iunlock(ip
, lockflags
);
1446 /* Background worker that trims preallocated space. */
1449 struct work_struct
*work
)
1451 struct xfs_perag
*pag
= container_of(to_delayed_work(work
),
1452 struct xfs_perag
, pag_blockgc_work
);
1453 struct xfs_mount
*mp
= pag
->pag_mount
;
1456 trace_xfs_blockgc_worker(mp
, __return_address
);
1458 error
= xfs_icwalk_ag(pag
, XFS_ICWALK_BLOCKGC
, NULL
);
1460 xfs_info(mp
, "AG %u preallocation gc worker failed, err=%d",
1461 pag
->pag_agno
, error
);
1462 xfs_blockgc_queue(pag
);
1466 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1470 xfs_blockgc_free_space(
1471 struct xfs_mount
*mp
,
1472 struct xfs_icwalk
*icw
)
1476 trace_xfs_blockgc_free_space(mp
, icw
, _RET_IP_
);
1478 error
= xfs_icwalk(mp
, XFS_ICWALK_BLOCKGC
, icw
);
1482 xfs_inodegc_flush(mp
);
1487 * Reclaim all the free space that we can by scheduling the background blockgc
1488 * and inodegc workers immediately and waiting for them all to clear.
1491 xfs_blockgc_flush_all(
1492 struct xfs_mount
*mp
)
1494 struct xfs_perag
*pag
;
1495 xfs_agnumber_t agno
;
1497 trace_xfs_blockgc_flush_all(mp
, __return_address
);
1500 * For each blockgc worker, move its queue time up to now. If it
1501 * wasn't queued, it will not be requeued. Then flush whatever's
1504 for_each_perag_tag(mp
, agno
, pag
, XFS_ICI_BLOCKGC_TAG
)
1505 mod_delayed_work(pag
->pag_mount
->m_blockgc_wq
,
1506 &pag
->pag_blockgc_work
, 0);
1508 for_each_perag_tag(mp
, agno
, pag
, XFS_ICI_BLOCKGC_TAG
)
1509 flush_delayed_work(&pag
->pag_blockgc_work
);
1511 xfs_inodegc_flush(mp
);
1515 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1516 * quota caused an allocation failure, so we make a best effort by including
1517 * each quota under low free space conditions (less than 1% free space) in the
1520 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1521 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1525 xfs_blockgc_free_dquots(
1526 struct xfs_mount
*mp
,
1527 struct xfs_dquot
*udqp
,
1528 struct xfs_dquot
*gdqp
,
1529 struct xfs_dquot
*pdqp
,
1530 unsigned int iwalk_flags
)
1532 struct xfs_icwalk icw
= {0};
1533 bool do_work
= false;
1535 if (!udqp
&& !gdqp
&& !pdqp
)
1539 * Run a scan to free blocks using the union filter to cover all
1540 * applicable quotas in a single scan.
1542 icw
.icw_flags
= XFS_ICWALK_FLAG_UNION
| iwalk_flags
;
1544 if (XFS_IS_UQUOTA_ENFORCED(mp
) && udqp
&& xfs_dquot_lowsp(udqp
)) {
1545 icw
.icw_uid
= make_kuid(mp
->m_super
->s_user_ns
, udqp
->q_id
);
1546 icw
.icw_flags
|= XFS_ICWALK_FLAG_UID
;
1550 if (XFS_IS_UQUOTA_ENFORCED(mp
) && gdqp
&& xfs_dquot_lowsp(gdqp
)) {
1551 icw
.icw_gid
= make_kgid(mp
->m_super
->s_user_ns
, gdqp
->q_id
);
1552 icw
.icw_flags
|= XFS_ICWALK_FLAG_GID
;
1556 if (XFS_IS_PQUOTA_ENFORCED(mp
) && pdqp
&& xfs_dquot_lowsp(pdqp
)) {
1557 icw
.icw_prid
= pdqp
->q_id
;
1558 icw
.icw_flags
|= XFS_ICWALK_FLAG_PRID
;
1565 return xfs_blockgc_free_space(mp
, &icw
);
1568 /* Run cow/eofblocks scans on the quotas attached to the inode. */
1570 xfs_blockgc_free_quota(
1571 struct xfs_inode
*ip
,
1572 unsigned int iwalk_flags
)
1574 return xfs_blockgc_free_dquots(ip
->i_mount
,
1575 xfs_inode_dquot(ip
, XFS_DQTYPE_USER
),
1576 xfs_inode_dquot(ip
, XFS_DQTYPE_GROUP
),
1577 xfs_inode_dquot(ip
, XFS_DQTYPE_PROJ
), iwalk_flags
);
1580 /* XFS Inode Cache Walking Code */
1583 * The inode lookup is done in batches to keep the amount of lock traffic and
1584 * radix tree lookups to a minimum. The batch size is a trade off between
1585 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1588 #define XFS_LOOKUP_BATCH 32
1592 * Decide if we want to grab this inode in anticipation of doing work towards
1597 enum xfs_icwalk_goal goal
,
1598 struct xfs_inode
*ip
,
1599 struct xfs_icwalk
*icw
)
1602 case XFS_ICWALK_BLOCKGC
:
1603 return xfs_blockgc_igrab(ip
);
1604 case XFS_ICWALK_RECLAIM
:
1605 return xfs_reclaim_igrab(ip
, icw
);
1612 * Process an inode. Each processing function must handle any state changes
1613 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1616 xfs_icwalk_process_inode(
1617 enum xfs_icwalk_goal goal
,
1618 struct xfs_inode
*ip
,
1619 struct xfs_perag
*pag
,
1620 struct xfs_icwalk
*icw
)
1625 case XFS_ICWALK_BLOCKGC
:
1626 error
= xfs_blockgc_scan_inode(ip
, icw
);
1628 case XFS_ICWALK_RECLAIM
:
1629 xfs_reclaim_inode(ip
, pag
);
1636 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1637 * process them in some manner.
1641 struct xfs_perag
*pag
,
1642 enum xfs_icwalk_goal goal
,
1643 struct xfs_icwalk
*icw
)
1645 struct xfs_mount
*mp
= pag
->pag_mount
;
1646 uint32_t first_index
;
1655 if (goal
== XFS_ICWALK_RECLAIM
)
1656 first_index
= READ_ONCE(pag
->pag_ici_reclaim_cursor
);
1661 struct xfs_inode
*batch
[XFS_LOOKUP_BATCH
];
1667 nr_found
= radix_tree_gang_lookup_tag(&pag
->pag_ici_root
,
1668 (void **) batch
, first_index
,
1669 XFS_LOOKUP_BATCH
, goal
);
1677 * Grab the inodes before we drop the lock. if we found
1678 * nothing, nr == 0 and the loop will be skipped.
1680 for (i
= 0; i
< nr_found
; i
++) {
1681 struct xfs_inode
*ip
= batch
[i
];
1683 if (done
|| !xfs_icwalk_igrab(goal
, ip
, icw
))
1687 * Update the index for the next lookup. Catch
1688 * overflows into the next AG range which can occur if
1689 * we have inodes in the last block of the AG and we
1690 * are currently pointing to the last inode.
1692 * Because we may see inodes that are from the wrong AG
1693 * due to RCU freeing and reallocation, only update the
1694 * index if it lies in this AG. It was a race that lead
1695 * us to see this inode, so another lookup from the
1696 * same index will not find it again.
1698 if (XFS_INO_TO_AGNO(mp
, ip
->i_ino
) != pag
->pag_agno
)
1700 first_index
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
+ 1);
1701 if (first_index
< XFS_INO_TO_AGINO(mp
, ip
->i_ino
))
1705 /* unlock now we've grabbed the inodes. */
1708 for (i
= 0; i
< nr_found
; i
++) {
1711 error
= xfs_icwalk_process_inode(goal
, batch
[i
], pag
,
1713 if (error
== -EAGAIN
) {
1717 if (error
&& last_error
!= -EFSCORRUPTED
)
1721 /* bail out if the filesystem is corrupted. */
1722 if (error
== -EFSCORRUPTED
)
1727 if (icw
&& (icw
->icw_flags
& XFS_ICWALK_FLAG_SCAN_LIMIT
)) {
1728 icw
->icw_scan_limit
-= XFS_LOOKUP_BATCH
;
1729 if (icw
->icw_scan_limit
<= 0)
1732 } while (nr_found
&& !done
);
1734 if (goal
== XFS_ICWALK_RECLAIM
) {
1737 WRITE_ONCE(pag
->pag_ici_reclaim_cursor
, first_index
);
1747 /* Walk all incore inodes to achieve a given goal. */
1750 struct xfs_mount
*mp
,
1751 enum xfs_icwalk_goal goal
,
1752 struct xfs_icwalk
*icw
)
1754 struct xfs_perag
*pag
;
1757 xfs_agnumber_t agno
;
1759 for_each_perag_tag(mp
, agno
, pag
, goal
) {
1760 error
= xfs_icwalk_ag(pag
, goal
, icw
);
1763 if (error
== -EFSCORRUPTED
) {
1770 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS
& XFS_ICWALK_FLAGS_VALID
);
1776 struct xfs_inode
*ip
,
1779 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
1780 struct xfs_bmbt_irec got
;
1781 struct xfs_iext_cursor icur
;
1783 if (!ifp
|| !xfs_iext_lookup_extent(ip
, ifp
, 0, &icur
, &got
))
1786 if (isnullstartblock(got
.br_startblock
)) {
1787 xfs_warn(ip
->i_mount
,
1788 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1790 whichfork
== XFS_DATA_FORK
? "data" : "cow",
1791 got
.br_startoff
, got
.br_blockcount
);
1793 } while (xfs_iext_next_extent(ifp
, &icur
, &got
));
1796 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
1799 /* Schedule the inode for reclaim. */
1801 xfs_inodegc_set_reclaimable(
1802 struct xfs_inode
*ip
)
1804 struct xfs_mount
*mp
= ip
->i_mount
;
1805 struct xfs_perag
*pag
;
1807 if (!xfs_is_shutdown(mp
) && ip
->i_delayed_blks
) {
1808 xfs_check_delalloc(ip
, XFS_DATA_FORK
);
1809 xfs_check_delalloc(ip
, XFS_COW_FORK
);
1813 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, ip
->i_ino
));
1814 spin_lock(&pag
->pag_ici_lock
);
1815 spin_lock(&ip
->i_flags_lock
);
1817 trace_xfs_inode_set_reclaimable(ip
);
1818 ip
->i_flags
&= ~(XFS_NEED_INACTIVE
| XFS_INACTIVATING
);
1819 ip
->i_flags
|= XFS_IRECLAIMABLE
;
1820 xfs_perag_set_inode_tag(pag
, XFS_INO_TO_AGINO(mp
, ip
->i_ino
),
1821 XFS_ICI_RECLAIM_TAG
);
1823 spin_unlock(&ip
->i_flags_lock
);
1824 spin_unlock(&pag
->pag_ici_lock
);
1829 * Free all speculative preallocations and possibly even the inode itself.
1830 * This is the last chance to make changes to an otherwise unreferenced file
1831 * before incore reclamation happens.
1834 xfs_inodegc_inactivate(
1835 struct xfs_inode
*ip
)
1837 trace_xfs_inode_inactivating(ip
);
1839 xfs_inodegc_set_reclaimable(ip
);
1844 struct work_struct
*work
)
1846 struct xfs_inodegc
*gc
= container_of(to_delayed_work(work
),
1847 struct xfs_inodegc
, work
);
1848 struct llist_node
*node
= llist_del_all(&gc
->list
);
1849 struct xfs_inode
*ip
, *n
;
1851 WRITE_ONCE(gc
->items
, 0);
1856 ip
= llist_entry(node
, struct xfs_inode
, i_gclist
);
1857 trace_xfs_inodegc_worker(ip
->i_mount
, READ_ONCE(gc
->shrinker_hits
));
1859 WRITE_ONCE(gc
->shrinker_hits
, 0);
1860 llist_for_each_entry_safe(ip
, n
, node
, i_gclist
) {
1861 xfs_iflags_set(ip
, XFS_INACTIVATING
);
1862 xfs_inodegc_inactivate(ip
);
1867 * Expedite all pending inodegc work to run immediately. This does not wait for
1868 * completion of the work.
1872 struct xfs_mount
*mp
)
1874 if (!xfs_is_inodegc_enabled(mp
))
1876 trace_xfs_inodegc_push(mp
, __return_address
);
1877 xfs_inodegc_queue_all(mp
);
1881 * Force all currently queued inode inactivation work to run immediately and
1882 * wait for the work to finish.
1886 struct xfs_mount
*mp
)
1888 xfs_inodegc_push(mp
);
1889 trace_xfs_inodegc_flush(mp
, __return_address
);
1890 flush_workqueue(mp
->m_inodegc_wq
);
1894 * Flush all the pending work and then disable the inode inactivation background
1895 * workers and wait for them to stop.
1899 struct xfs_mount
*mp
)
1901 if (!xfs_clear_inodegc_enabled(mp
))
1904 xfs_inodegc_queue_all(mp
);
1905 drain_workqueue(mp
->m_inodegc_wq
);
1907 trace_xfs_inodegc_stop(mp
, __return_address
);
1911 * Enable the inode inactivation background workers and schedule deferred inode
1912 * inactivation work if there is any.
1916 struct xfs_mount
*mp
)
1918 if (xfs_set_inodegc_enabled(mp
))
1921 trace_xfs_inodegc_start(mp
, __return_address
);
1922 xfs_inodegc_queue_all(mp
);
1925 #ifdef CONFIG_XFS_RT
1927 xfs_inodegc_want_queue_rt_file(
1928 struct xfs_inode
*ip
)
1930 struct xfs_mount
*mp
= ip
->i_mount
;
1932 if (!XFS_IS_REALTIME_INODE(ip
))
1935 if (__percpu_counter_compare(&mp
->m_frextents
,
1936 mp
->m_low_rtexts
[XFS_LOWSP_5_PCNT
],
1937 XFS_FDBLOCKS_BATCH
) < 0)
1943 # define xfs_inodegc_want_queue_rt_file(ip) (false)
1944 #endif /* CONFIG_XFS_RT */
1947 * Schedule the inactivation worker when:
1949 * - We've accumulated more than one inode cluster buffer's worth of inodes.
1950 * - There is less than 5% free space left.
1951 * - Any of the quotas for this inode are near an enforcement limit.
1954 xfs_inodegc_want_queue_work(
1955 struct xfs_inode
*ip
,
1958 struct xfs_mount
*mp
= ip
->i_mount
;
1960 if (items
> mp
->m_ino_geo
.inodes_per_cluster
)
1963 if (__percpu_counter_compare(&mp
->m_fdblocks
,
1964 mp
->m_low_space
[XFS_LOWSP_5_PCNT
],
1965 XFS_FDBLOCKS_BATCH
) < 0)
1968 if (xfs_inodegc_want_queue_rt_file(ip
))
1971 if (xfs_inode_near_dquot_enforcement(ip
, XFS_DQTYPE_USER
))
1974 if (xfs_inode_near_dquot_enforcement(ip
, XFS_DQTYPE_GROUP
))
1977 if (xfs_inode_near_dquot_enforcement(ip
, XFS_DQTYPE_PROJ
))
1984 * Upper bound on the number of inodes in each AG that can be queued for
1985 * inactivation at any given time, to avoid monopolizing the workqueue.
1987 #define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
1990 * Make the frontend wait for inactivations when:
1992 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
1993 * - The queue depth exceeds the maximum allowable percpu backlog.
1995 * Note: If the current thread is running a transaction, we don't ever want to
1996 * wait for other transactions because that could introduce a deadlock.
1999 xfs_inodegc_want_flush_work(
2000 struct xfs_inode
*ip
,
2002 unsigned int shrinker_hits
)
2004 if (current
->journal_info
)
2007 if (shrinker_hits
> 0)
2010 if (items
> XFS_INODEGC_MAX_BACKLOG
)
2017 * Queue a background inactivation worker if there are inodes that need to be
2018 * inactivated and higher level xfs code hasn't disabled the background
2023 struct xfs_inode
*ip
)
2025 struct xfs_mount
*mp
= ip
->i_mount
;
2026 struct xfs_inodegc
*gc
;
2028 unsigned int shrinker_hits
;
2029 unsigned long queue_delay
= 1;
2031 trace_xfs_inode_set_need_inactive(ip
);
2032 spin_lock(&ip
->i_flags_lock
);
2033 ip
->i_flags
|= XFS_NEED_INACTIVE
;
2034 spin_unlock(&ip
->i_flags_lock
);
2036 gc
= get_cpu_ptr(mp
->m_inodegc
);
2037 llist_add(&ip
->i_gclist
, &gc
->list
);
2038 items
= READ_ONCE(gc
->items
);
2039 WRITE_ONCE(gc
->items
, items
+ 1);
2040 shrinker_hits
= READ_ONCE(gc
->shrinker_hits
);
2043 * We queue the work while holding the current CPU so that the work
2044 * is scheduled to run on this CPU.
2046 if (!xfs_is_inodegc_enabled(mp
)) {
2051 if (xfs_inodegc_want_queue_work(ip
, items
))
2054 trace_xfs_inodegc_queue(mp
, __return_address
);
2055 mod_delayed_work(mp
->m_inodegc_wq
, &gc
->work
, queue_delay
);
2058 if (xfs_inodegc_want_flush_work(ip
, items
, shrinker_hits
)) {
2059 trace_xfs_inodegc_throttle(mp
, __return_address
);
2060 flush_delayed_work(&gc
->work
);
2065 * Fold the dead CPU inodegc queue into the current CPUs queue.
2068 xfs_inodegc_cpu_dead(
2069 struct xfs_mount
*mp
,
2070 unsigned int dead_cpu
)
2072 struct xfs_inodegc
*dead_gc
, *gc
;
2073 struct llist_node
*first
, *last
;
2074 unsigned int count
= 0;
2076 dead_gc
= per_cpu_ptr(mp
->m_inodegc
, dead_cpu
);
2077 cancel_delayed_work_sync(&dead_gc
->work
);
2079 if (llist_empty(&dead_gc
->list
))
2082 first
= dead_gc
->list
.first
;
2084 while (last
->next
) {
2088 dead_gc
->list
.first
= NULL
;
2091 /* Add pending work to current CPU */
2092 gc
= get_cpu_ptr(mp
->m_inodegc
);
2093 llist_add_batch(first
, last
, &gc
->list
);
2094 count
+= READ_ONCE(gc
->items
);
2095 WRITE_ONCE(gc
->items
, count
);
2097 if (xfs_is_inodegc_enabled(mp
)) {
2098 trace_xfs_inodegc_queue(mp
, __return_address
);
2099 mod_delayed_work(mp
->m_inodegc_wq
, &gc
->work
, 0);
2105 * We set the inode flag atomically with the radix tree tag. Once we get tag
2106 * lookups on the radix tree, this inode flag can go away.
2108 * We always use background reclaim here because even if the inode is clean, it
2109 * still may be under IO and hence we have wait for IO completion to occur
2110 * before we can reclaim the inode. The background reclaim path handles this
2111 * more efficiently than we can here, so simply let background reclaim tear down
2115 xfs_inode_mark_reclaimable(
2116 struct xfs_inode
*ip
)
2118 struct xfs_mount
*mp
= ip
->i_mount
;
2121 XFS_STATS_INC(mp
, vn_reclaim
);
2124 * We should never get here with any of the reclaim flags already set.
2126 ASSERT_ALWAYS(!xfs_iflags_test(ip
, XFS_ALL_IRECLAIM_FLAGS
));
2128 need_inactive
= xfs_inode_needs_inactive(ip
);
2129 if (need_inactive
) {
2130 xfs_inodegc_queue(ip
);
2134 /* Going straight to reclaim, so drop the dquots. */
2135 xfs_qm_dqdetach(ip
);
2136 xfs_inodegc_set_reclaimable(ip
);
2140 * Register a phony shrinker so that we can run background inodegc sooner when
2141 * there's memory pressure. Inactivation does not itself free any memory but
2142 * it does make inodes reclaimable, which eventually frees memory.
2144 * The count function, seek value, and batch value are crafted to trigger the
2145 * scan function during the second round of scanning. Hopefully this means
2146 * that we reclaimed enough memory that initiating metadata transactions won't
2147 * make things worse.
2149 #define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
2150 #define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2152 static unsigned long
2153 xfs_inodegc_shrinker_count(
2154 struct shrinker
*shrink
,
2155 struct shrink_control
*sc
)
2157 struct xfs_mount
*mp
= container_of(shrink
, struct xfs_mount
,
2158 m_inodegc_shrinker
);
2159 struct xfs_inodegc
*gc
;
2162 if (!xfs_is_inodegc_enabled(mp
))
2165 for_each_online_cpu(cpu
) {
2166 gc
= per_cpu_ptr(mp
->m_inodegc
, cpu
);
2167 if (!llist_empty(&gc
->list
))
2168 return XFS_INODEGC_SHRINKER_COUNT
;
2174 static unsigned long
2175 xfs_inodegc_shrinker_scan(
2176 struct shrinker
*shrink
,
2177 struct shrink_control
*sc
)
2179 struct xfs_mount
*mp
= container_of(shrink
, struct xfs_mount
,
2180 m_inodegc_shrinker
);
2181 struct xfs_inodegc
*gc
;
2183 bool no_items
= true;
2185 if (!xfs_is_inodegc_enabled(mp
))
2188 trace_xfs_inodegc_shrinker_scan(mp
, sc
, __return_address
);
2190 for_each_online_cpu(cpu
) {
2191 gc
= per_cpu_ptr(mp
->m_inodegc
, cpu
);
2192 if (!llist_empty(&gc
->list
)) {
2193 unsigned int h
= READ_ONCE(gc
->shrinker_hits
);
2195 WRITE_ONCE(gc
->shrinker_hits
, h
+ 1);
2196 mod_delayed_work_on(cpu
, mp
->m_inodegc_wq
, &gc
->work
, 0);
2202 * If there are no inodes to inactivate, we don't want the shrinker
2203 * to think there's deferred work to call us back about.
2211 /* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2213 xfs_inodegc_register_shrinker(
2214 struct xfs_mount
*mp
)
2216 struct shrinker
*shrink
= &mp
->m_inodegc_shrinker
;
2218 shrink
->count_objects
= xfs_inodegc_shrinker_count
;
2219 shrink
->scan_objects
= xfs_inodegc_shrinker_scan
;
2221 shrink
->flags
= SHRINKER_NONSLAB
;
2222 shrink
->batch
= XFS_INODEGC_SHRINKER_BATCH
;
2224 return register_shrinker(shrink
, "xfs-inodegc:%s", mp
->m_super
->s_id
);