1 // SPDX-License-Identifier: GPL-2.0-only
3 * (C) 1997 Linus Torvalds
4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
6 #include <linux/export.h>
8 #include <linux/filelock.h>
10 #include <linux/backing-dev.h>
11 #include <linux/hash.h>
12 #include <linux/swap.h>
13 #include <linux/security.h>
14 #include <linux/cdev.h>
15 #include <linux/memblock.h>
16 #include <linux/fsnotify.h>
17 #include <linux/mount.h>
18 #include <linux/posix_acl.h>
19 #include <linux/buffer_head.h> /* for inode_has_buffers */
20 #include <linux/ratelimit.h>
21 #include <linux/list_lru.h>
22 #include <linux/iversion.h>
23 #include <trace/events/writeback.h>
27 * Inode locking rules:
29 * inode->i_lock protects:
30 * inode->i_state, inode->i_hash, __iget(), inode->i_io_list
31 * Inode LRU list locks protect:
32 * inode->i_sb->s_inode_lru, inode->i_lru
33 * inode->i_sb->s_inode_list_lock protects:
34 * inode->i_sb->s_inodes, inode->i_sb_list
35 * bdi->wb.list_lock protects:
36 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
37 * inode_hash_lock protects:
38 * inode_hashtable, inode->i_hash
42 * inode->i_sb->s_inode_list_lock
44 * Inode LRU list locks
50 * inode->i_sb->s_inode_list_lock
57 static unsigned int i_hash_mask __read_mostly
;
58 static unsigned int i_hash_shift __read_mostly
;
59 static struct hlist_head
*inode_hashtable __read_mostly
;
60 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(inode_hash_lock
);
63 * Empty aops. Can be used for the cases where the user does not
64 * define any of the address_space operations.
66 const struct address_space_operations empty_aops
= {
68 EXPORT_SYMBOL(empty_aops
);
70 static DEFINE_PER_CPU(unsigned long, nr_inodes
);
71 static DEFINE_PER_CPU(unsigned long, nr_unused
);
73 static struct kmem_cache
*inode_cachep __read_mostly
;
75 static long get_nr_inodes(void)
79 for_each_possible_cpu(i
)
80 sum
+= per_cpu(nr_inodes
, i
);
81 return sum
< 0 ? 0 : sum
;
84 static inline long get_nr_inodes_unused(void)
88 for_each_possible_cpu(i
)
89 sum
+= per_cpu(nr_unused
, i
);
90 return sum
< 0 ? 0 : sum
;
93 long get_nr_dirty_inodes(void)
95 /* not actually dirty inodes, but a wild approximation */
96 long nr_dirty
= get_nr_inodes() - get_nr_inodes_unused();
97 return nr_dirty
> 0 ? nr_dirty
: 0;
101 * Handle nr_inode sysctl
105 * Statistics gathering..
107 static struct inodes_stat_t inodes_stat
;
109 static int proc_nr_inodes(struct ctl_table
*table
, int write
, void *buffer
,
110 size_t *lenp
, loff_t
*ppos
)
112 inodes_stat
.nr_inodes
= get_nr_inodes();
113 inodes_stat
.nr_unused
= get_nr_inodes_unused();
114 return proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
117 static struct ctl_table inodes_sysctls
[] = {
119 .procname
= "inode-nr",
120 .data
= &inodes_stat
,
121 .maxlen
= 2*sizeof(long),
123 .proc_handler
= proc_nr_inodes
,
126 .procname
= "inode-state",
127 .data
= &inodes_stat
,
128 .maxlen
= 7*sizeof(long),
130 .proc_handler
= proc_nr_inodes
,
135 static int __init
init_fs_inode_sysctls(void)
137 register_sysctl_init("fs", inodes_sysctls
);
140 early_initcall(init_fs_inode_sysctls
);
143 static int no_open(struct inode
*inode
, struct file
*file
)
149 * inode_init_always - perform inode structure initialisation
150 * @sb: superblock inode belongs to
151 * @inode: inode to initialise
153 * These are initializations that need to be done on every inode
154 * allocation as the fields are not initialised by slab allocation.
156 int inode_init_always(struct super_block
*sb
, struct inode
*inode
)
158 static const struct inode_operations empty_iops
;
159 static const struct file_operations no_open_fops
= {.open
= no_open
};
160 struct address_space
*const mapping
= &inode
->i_data
;
163 inode
->i_blkbits
= sb
->s_blocksize_bits
;
165 atomic64_set(&inode
->i_sequence
, 0);
166 atomic_set(&inode
->i_count
, 1);
167 inode
->i_op
= &empty_iops
;
168 inode
->i_fop
= &no_open_fops
;
170 inode
->__i_nlink
= 1;
171 inode
->i_opflags
= 0;
173 inode
->i_opflags
|= IOP_XATTR
;
174 i_uid_write(inode
, 0);
175 i_gid_write(inode
, 0);
176 atomic_set(&inode
->i_writecount
, 0);
178 inode
->i_write_hint
= WRITE_LIFE_NOT_SET
;
181 inode
->i_generation
= 0;
182 inode
->i_pipe
= NULL
;
183 inode
->i_cdev
= NULL
;
184 inode
->i_link
= NULL
;
185 inode
->i_dir_seq
= 0;
187 inode
->dirtied_when
= 0;
189 #ifdef CONFIG_CGROUP_WRITEBACK
190 inode
->i_wb_frn_winner
= 0;
191 inode
->i_wb_frn_avg_time
= 0;
192 inode
->i_wb_frn_history
= 0;
195 spin_lock_init(&inode
->i_lock
);
196 lockdep_set_class(&inode
->i_lock
, &sb
->s_type
->i_lock_key
);
198 init_rwsem(&inode
->i_rwsem
);
199 lockdep_set_class(&inode
->i_rwsem
, &sb
->s_type
->i_mutex_key
);
201 atomic_set(&inode
->i_dio_count
, 0);
203 mapping
->a_ops
= &empty_aops
;
204 mapping
->host
= inode
;
207 atomic_set(&mapping
->i_mmap_writable
, 0);
208 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
209 atomic_set(&mapping
->nr_thps
, 0);
211 mapping_set_gfp_mask(mapping
, GFP_HIGHUSER_MOVABLE
);
212 mapping
->private_data
= NULL
;
213 mapping
->writeback_index
= 0;
214 init_rwsem(&mapping
->invalidate_lock
);
215 lockdep_set_class_and_name(&mapping
->invalidate_lock
,
216 &sb
->s_type
->invalidate_lock_key
,
217 "mapping.invalidate_lock");
218 inode
->i_private
= NULL
;
219 inode
->i_mapping
= mapping
;
220 INIT_HLIST_HEAD(&inode
->i_dentry
); /* buggered by rcu freeing */
221 #ifdef CONFIG_FS_POSIX_ACL
222 inode
->i_acl
= inode
->i_default_acl
= ACL_NOT_CACHED
;
225 #ifdef CONFIG_FSNOTIFY
226 inode
->i_fsnotify_mask
= 0;
228 inode
->i_flctx
= NULL
;
230 if (unlikely(security_inode_alloc(inode
)))
232 this_cpu_inc(nr_inodes
);
236 EXPORT_SYMBOL(inode_init_always
);
238 void free_inode_nonrcu(struct inode
*inode
)
240 kmem_cache_free(inode_cachep
, inode
);
242 EXPORT_SYMBOL(free_inode_nonrcu
);
244 static void i_callback(struct rcu_head
*head
)
246 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
247 if (inode
->free_inode
)
248 inode
->free_inode(inode
);
250 free_inode_nonrcu(inode
);
253 static struct inode
*alloc_inode(struct super_block
*sb
)
255 const struct super_operations
*ops
= sb
->s_op
;
258 if (ops
->alloc_inode
)
259 inode
= ops
->alloc_inode(sb
);
261 inode
= alloc_inode_sb(sb
, inode_cachep
, GFP_KERNEL
);
266 if (unlikely(inode_init_always(sb
, inode
))) {
267 if (ops
->destroy_inode
) {
268 ops
->destroy_inode(inode
);
269 if (!ops
->free_inode
)
272 inode
->free_inode
= ops
->free_inode
;
273 i_callback(&inode
->i_rcu
);
280 void __destroy_inode(struct inode
*inode
)
282 BUG_ON(inode_has_buffers(inode
));
283 inode_detach_wb(inode
);
284 security_inode_free(inode
);
285 fsnotify_inode_delete(inode
);
286 locks_free_lock_context(inode
);
287 if (!inode
->i_nlink
) {
288 WARN_ON(atomic_long_read(&inode
->i_sb
->s_remove_count
) == 0);
289 atomic_long_dec(&inode
->i_sb
->s_remove_count
);
292 #ifdef CONFIG_FS_POSIX_ACL
293 if (inode
->i_acl
&& !is_uncached_acl(inode
->i_acl
))
294 posix_acl_release(inode
->i_acl
);
295 if (inode
->i_default_acl
&& !is_uncached_acl(inode
->i_default_acl
))
296 posix_acl_release(inode
->i_default_acl
);
298 this_cpu_dec(nr_inodes
);
300 EXPORT_SYMBOL(__destroy_inode
);
302 static void destroy_inode(struct inode
*inode
)
304 const struct super_operations
*ops
= inode
->i_sb
->s_op
;
306 BUG_ON(!list_empty(&inode
->i_lru
));
307 __destroy_inode(inode
);
308 if (ops
->destroy_inode
) {
309 ops
->destroy_inode(inode
);
310 if (!ops
->free_inode
)
313 inode
->free_inode
= ops
->free_inode
;
314 call_rcu(&inode
->i_rcu
, i_callback
);
318 * drop_nlink - directly drop an inode's link count
321 * This is a low-level filesystem helper to replace any
322 * direct filesystem manipulation of i_nlink. In cases
323 * where we are attempting to track writes to the
324 * filesystem, a decrement to zero means an imminent
325 * write when the file is truncated and actually unlinked
328 void drop_nlink(struct inode
*inode
)
330 WARN_ON(inode
->i_nlink
== 0);
333 atomic_long_inc(&inode
->i_sb
->s_remove_count
);
335 EXPORT_SYMBOL(drop_nlink
);
338 * clear_nlink - directly zero an inode's link count
341 * This is a low-level filesystem helper to replace any
342 * direct filesystem manipulation of i_nlink. See
343 * drop_nlink() for why we care about i_nlink hitting zero.
345 void clear_nlink(struct inode
*inode
)
347 if (inode
->i_nlink
) {
348 inode
->__i_nlink
= 0;
349 atomic_long_inc(&inode
->i_sb
->s_remove_count
);
352 EXPORT_SYMBOL(clear_nlink
);
355 * set_nlink - directly set an inode's link count
357 * @nlink: new nlink (should be non-zero)
359 * This is a low-level filesystem helper to replace any
360 * direct filesystem manipulation of i_nlink.
362 void set_nlink(struct inode
*inode
, unsigned int nlink
)
367 /* Yes, some filesystems do change nlink from zero to one */
368 if (inode
->i_nlink
== 0)
369 atomic_long_dec(&inode
->i_sb
->s_remove_count
);
371 inode
->__i_nlink
= nlink
;
374 EXPORT_SYMBOL(set_nlink
);
377 * inc_nlink - directly increment an inode's link count
380 * This is a low-level filesystem helper to replace any
381 * direct filesystem manipulation of i_nlink. Currently,
382 * it is only here for parity with dec_nlink().
384 void inc_nlink(struct inode
*inode
)
386 if (unlikely(inode
->i_nlink
== 0)) {
387 WARN_ON(!(inode
->i_state
& I_LINKABLE
));
388 atomic_long_dec(&inode
->i_sb
->s_remove_count
);
393 EXPORT_SYMBOL(inc_nlink
);
395 static void __address_space_init_once(struct address_space
*mapping
)
397 xa_init_flags(&mapping
->i_pages
, XA_FLAGS_LOCK_IRQ
| XA_FLAGS_ACCOUNT
);
398 init_rwsem(&mapping
->i_mmap_rwsem
);
399 INIT_LIST_HEAD(&mapping
->private_list
);
400 spin_lock_init(&mapping
->private_lock
);
401 mapping
->i_mmap
= RB_ROOT_CACHED
;
404 void address_space_init_once(struct address_space
*mapping
)
406 memset(mapping
, 0, sizeof(*mapping
));
407 __address_space_init_once(mapping
);
409 EXPORT_SYMBOL(address_space_init_once
);
412 * These are initializations that only need to be done
413 * once, because the fields are idempotent across use
414 * of the inode, so let the slab aware of that.
416 void inode_init_once(struct inode
*inode
)
418 memset(inode
, 0, sizeof(*inode
));
419 INIT_HLIST_NODE(&inode
->i_hash
);
420 INIT_LIST_HEAD(&inode
->i_devices
);
421 INIT_LIST_HEAD(&inode
->i_io_list
);
422 INIT_LIST_HEAD(&inode
->i_wb_list
);
423 INIT_LIST_HEAD(&inode
->i_lru
);
424 INIT_LIST_HEAD(&inode
->i_sb_list
);
425 __address_space_init_once(&inode
->i_data
);
426 i_size_ordered_init(inode
);
428 EXPORT_SYMBOL(inode_init_once
);
430 static void init_once(void *foo
)
432 struct inode
*inode
= (struct inode
*) foo
;
434 inode_init_once(inode
);
438 * inode->i_lock must be held
440 void __iget(struct inode
*inode
)
442 atomic_inc(&inode
->i_count
);
446 * get additional reference to inode; caller must already hold one.
448 void ihold(struct inode
*inode
)
450 WARN_ON(atomic_inc_return(&inode
->i_count
) < 2);
452 EXPORT_SYMBOL(ihold
);
454 static void __inode_add_lru(struct inode
*inode
, bool rotate
)
456 if (inode
->i_state
& (I_DIRTY_ALL
| I_SYNC
| I_FREEING
| I_WILL_FREE
))
458 if (atomic_read(&inode
->i_count
))
460 if (!(inode
->i_sb
->s_flags
& SB_ACTIVE
))
462 if (!mapping_shrinkable(&inode
->i_data
))
465 if (list_lru_add(&inode
->i_sb
->s_inode_lru
, &inode
->i_lru
))
466 this_cpu_inc(nr_unused
);
468 inode
->i_state
|= I_REFERENCED
;
472 * Add inode to LRU if needed (inode is unused and clean).
474 * Needs inode->i_lock held.
476 void inode_add_lru(struct inode
*inode
)
478 __inode_add_lru(inode
, false);
481 static void inode_lru_list_del(struct inode
*inode
)
483 if (list_lru_del(&inode
->i_sb
->s_inode_lru
, &inode
->i_lru
))
484 this_cpu_dec(nr_unused
);
488 * inode_sb_list_add - add inode to the superblock list of inodes
489 * @inode: inode to add
491 void inode_sb_list_add(struct inode
*inode
)
493 spin_lock(&inode
->i_sb
->s_inode_list_lock
);
494 list_add(&inode
->i_sb_list
, &inode
->i_sb
->s_inodes
);
495 spin_unlock(&inode
->i_sb
->s_inode_list_lock
);
497 EXPORT_SYMBOL_GPL(inode_sb_list_add
);
499 static inline void inode_sb_list_del(struct inode
*inode
)
501 if (!list_empty(&inode
->i_sb_list
)) {
502 spin_lock(&inode
->i_sb
->s_inode_list_lock
);
503 list_del_init(&inode
->i_sb_list
);
504 spin_unlock(&inode
->i_sb
->s_inode_list_lock
);
508 static unsigned long hash(struct super_block
*sb
, unsigned long hashval
)
512 tmp
= (hashval
* (unsigned long)sb
) ^ (GOLDEN_RATIO_PRIME
+ hashval
) /
514 tmp
= tmp
^ ((tmp
^ GOLDEN_RATIO_PRIME
) >> i_hash_shift
);
515 return tmp
& i_hash_mask
;
519 * __insert_inode_hash - hash an inode
520 * @inode: unhashed inode
521 * @hashval: unsigned long value used to locate this object in the
524 * Add an inode to the inode hash for this superblock.
526 void __insert_inode_hash(struct inode
*inode
, unsigned long hashval
)
528 struct hlist_head
*b
= inode_hashtable
+ hash(inode
->i_sb
, hashval
);
530 spin_lock(&inode_hash_lock
);
531 spin_lock(&inode
->i_lock
);
532 hlist_add_head_rcu(&inode
->i_hash
, b
);
533 spin_unlock(&inode
->i_lock
);
534 spin_unlock(&inode_hash_lock
);
536 EXPORT_SYMBOL(__insert_inode_hash
);
539 * __remove_inode_hash - remove an inode from the hash
540 * @inode: inode to unhash
542 * Remove an inode from the superblock.
544 void __remove_inode_hash(struct inode
*inode
)
546 spin_lock(&inode_hash_lock
);
547 spin_lock(&inode
->i_lock
);
548 hlist_del_init_rcu(&inode
->i_hash
);
549 spin_unlock(&inode
->i_lock
);
550 spin_unlock(&inode_hash_lock
);
552 EXPORT_SYMBOL(__remove_inode_hash
);
554 void dump_mapping(const struct address_space
*mapping
)
557 const struct address_space_operations
*a_ops
;
558 struct hlist_node
*dentry_first
;
559 struct dentry
*dentry_ptr
;
560 struct dentry dentry
;
564 * If mapping is an invalid pointer, we don't want to crash
565 * accessing it, so probe everything depending on it carefully.
567 if (get_kernel_nofault(host
, &mapping
->host
) ||
568 get_kernel_nofault(a_ops
, &mapping
->a_ops
)) {
569 pr_warn("invalid mapping:%px\n", mapping
);
574 pr_warn("aops:%ps\n", a_ops
);
578 if (get_kernel_nofault(dentry_first
, &host
->i_dentry
.first
) ||
579 get_kernel_nofault(ino
, &host
->i_ino
)) {
580 pr_warn("aops:%ps invalid inode:%px\n", a_ops
, host
);
585 pr_warn("aops:%ps ino:%lx\n", a_ops
, ino
);
589 dentry_ptr
= container_of(dentry_first
, struct dentry
, d_u
.d_alias
);
590 if (get_kernel_nofault(dentry
, dentry_ptr
)) {
591 pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
592 a_ops
, ino
, dentry_ptr
);
597 * if dentry is corrupted, the %pd handler may still crash,
598 * but it's unlikely that we reach here with a corrupt mapping
600 pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n", a_ops
, ino
, &dentry
);
603 void clear_inode(struct inode
*inode
)
606 * We have to cycle the i_pages lock here because reclaim can be in the
607 * process of removing the last page (in __filemap_remove_folio())
608 * and we must not free the mapping under it.
610 xa_lock_irq(&inode
->i_data
.i_pages
);
611 BUG_ON(inode
->i_data
.nrpages
);
613 * Almost always, mapping_empty(&inode->i_data) here; but there are
614 * two known and long-standing ways in which nodes may get left behind
615 * (when deep radix-tree node allocation failed partway; or when THP
616 * collapse_file() failed). Until those two known cases are cleaned up,
617 * or a cleanup function is called here, do not BUG_ON(!mapping_empty),
618 * nor even WARN_ON(!mapping_empty).
620 xa_unlock_irq(&inode
->i_data
.i_pages
);
621 BUG_ON(!list_empty(&inode
->i_data
.private_list
));
622 BUG_ON(!(inode
->i_state
& I_FREEING
));
623 BUG_ON(inode
->i_state
& I_CLEAR
);
624 BUG_ON(!list_empty(&inode
->i_wb_list
));
625 /* don't need i_lock here, no concurrent mods to i_state */
626 inode
->i_state
= I_FREEING
| I_CLEAR
;
628 EXPORT_SYMBOL(clear_inode
);
631 * Free the inode passed in, removing it from the lists it is still connected
632 * to. We remove any pages still attached to the inode and wait for any IO that
633 * is still in progress before finally destroying the inode.
635 * An inode must already be marked I_FREEING so that we avoid the inode being
636 * moved back onto lists if we race with other code that manipulates the lists
637 * (e.g. writeback_single_inode). The caller is responsible for setting this.
639 * An inode must already be removed from the LRU list before being evicted from
640 * the cache. This should occur atomically with setting the I_FREEING state
641 * flag, so no inodes here should ever be on the LRU when being evicted.
643 static void evict(struct inode
*inode
)
645 const struct super_operations
*op
= inode
->i_sb
->s_op
;
647 BUG_ON(!(inode
->i_state
& I_FREEING
));
648 BUG_ON(!list_empty(&inode
->i_lru
));
650 if (!list_empty(&inode
->i_io_list
))
651 inode_io_list_del(inode
);
653 inode_sb_list_del(inode
);
656 * Wait for flusher thread to be done with the inode so that filesystem
657 * does not start destroying it while writeback is still running. Since
658 * the inode has I_FREEING set, flusher thread won't start new work on
659 * the inode. We just have to wait for running writeback to finish.
661 inode_wait_for_writeback(inode
);
663 if (op
->evict_inode
) {
664 op
->evict_inode(inode
);
666 truncate_inode_pages_final(&inode
->i_data
);
669 if (S_ISCHR(inode
->i_mode
) && inode
->i_cdev
)
672 remove_inode_hash(inode
);
674 spin_lock(&inode
->i_lock
);
675 wake_up_bit(&inode
->i_state
, __I_NEW
);
676 BUG_ON(inode
->i_state
!= (I_FREEING
| I_CLEAR
));
677 spin_unlock(&inode
->i_lock
);
679 destroy_inode(inode
);
683 * dispose_list - dispose of the contents of a local list
684 * @head: the head of the list to free
686 * Dispose-list gets a local list with local inodes in it, so it doesn't
687 * need to worry about list corruption and SMP locks.
689 static void dispose_list(struct list_head
*head
)
691 while (!list_empty(head
)) {
694 inode
= list_first_entry(head
, struct inode
, i_lru
);
695 list_del_init(&inode
->i_lru
);
703 * evict_inodes - evict all evictable inodes for a superblock
704 * @sb: superblock to operate on
706 * Make sure that no inodes with zero refcount are retained. This is
707 * called by superblock shutdown after having SB_ACTIVE flag removed,
708 * so any inode reaching zero refcount during or after that call will
709 * be immediately evicted.
711 void evict_inodes(struct super_block
*sb
)
713 struct inode
*inode
, *next
;
717 spin_lock(&sb
->s_inode_list_lock
);
718 list_for_each_entry_safe(inode
, next
, &sb
->s_inodes
, i_sb_list
) {
719 if (atomic_read(&inode
->i_count
))
722 spin_lock(&inode
->i_lock
);
723 if (inode
->i_state
& (I_NEW
| I_FREEING
| I_WILL_FREE
)) {
724 spin_unlock(&inode
->i_lock
);
728 inode
->i_state
|= I_FREEING
;
729 inode_lru_list_del(inode
);
730 spin_unlock(&inode
->i_lock
);
731 list_add(&inode
->i_lru
, &dispose
);
734 * We can have a ton of inodes to evict at unmount time given
735 * enough memory, check to see if we need to go to sleep for a
736 * bit so we don't livelock.
738 if (need_resched()) {
739 spin_unlock(&sb
->s_inode_list_lock
);
741 dispose_list(&dispose
);
745 spin_unlock(&sb
->s_inode_list_lock
);
747 dispose_list(&dispose
);
749 EXPORT_SYMBOL_GPL(evict_inodes
);
752 * invalidate_inodes - attempt to free all inodes on a superblock
753 * @sb: superblock to operate on
755 * Attempts to free all inodes (including dirty inodes) for a given superblock.
757 void invalidate_inodes(struct super_block
*sb
)
759 struct inode
*inode
, *next
;
763 spin_lock(&sb
->s_inode_list_lock
);
764 list_for_each_entry_safe(inode
, next
, &sb
->s_inodes
, i_sb_list
) {
765 spin_lock(&inode
->i_lock
);
766 if (inode
->i_state
& (I_NEW
| I_FREEING
| I_WILL_FREE
)) {
767 spin_unlock(&inode
->i_lock
);
770 if (atomic_read(&inode
->i_count
)) {
771 spin_unlock(&inode
->i_lock
);
775 inode
->i_state
|= I_FREEING
;
776 inode_lru_list_del(inode
);
777 spin_unlock(&inode
->i_lock
);
778 list_add(&inode
->i_lru
, &dispose
);
779 if (need_resched()) {
780 spin_unlock(&sb
->s_inode_list_lock
);
782 dispose_list(&dispose
);
786 spin_unlock(&sb
->s_inode_list_lock
);
788 dispose_list(&dispose
);
792 * Isolate the inode from the LRU in preparation for freeing it.
794 * If the inode has the I_REFERENCED flag set, then it means that it has been
795 * used recently - the flag is set in iput_final(). When we encounter such an
796 * inode, clear the flag and move it to the back of the LRU so it gets another
797 * pass through the LRU before it gets reclaimed. This is necessary because of
798 * the fact we are doing lazy LRU updates to minimise lock contention so the
799 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
800 * with this flag set because they are the inodes that are out of order.
802 static enum lru_status
inode_lru_isolate(struct list_head
*item
,
803 struct list_lru_one
*lru
, spinlock_t
*lru_lock
, void *arg
)
805 struct list_head
*freeable
= arg
;
806 struct inode
*inode
= container_of(item
, struct inode
, i_lru
);
809 * We are inverting the lru lock/inode->i_lock here, so use a
810 * trylock. If we fail to get the lock, just skip it.
812 if (!spin_trylock(&inode
->i_lock
))
816 * Inodes can get referenced, redirtied, or repopulated while
817 * they're already on the LRU, and this can make them
818 * unreclaimable for a while. Remove them lazily here; iput,
819 * sync, or the last page cache deletion will requeue them.
821 if (atomic_read(&inode
->i_count
) ||
822 (inode
->i_state
& ~I_REFERENCED
) ||
823 !mapping_shrinkable(&inode
->i_data
)) {
824 list_lru_isolate(lru
, &inode
->i_lru
);
825 spin_unlock(&inode
->i_lock
);
826 this_cpu_dec(nr_unused
);
830 /* Recently referenced inodes get one more pass */
831 if (inode
->i_state
& I_REFERENCED
) {
832 inode
->i_state
&= ~I_REFERENCED
;
833 spin_unlock(&inode
->i_lock
);
838 * On highmem systems, mapping_shrinkable() permits dropping
839 * page cache in order to free up struct inodes: lowmem might
840 * be under pressure before the cache inside the highmem zone.
842 if (inode_has_buffers(inode
) || !mapping_empty(&inode
->i_data
)) {
844 spin_unlock(&inode
->i_lock
);
845 spin_unlock(lru_lock
);
846 if (remove_inode_buffers(inode
)) {
848 reap
= invalidate_mapping_pages(&inode
->i_data
, 0, -1);
849 if (current_is_kswapd())
850 __count_vm_events(KSWAPD_INODESTEAL
, reap
);
852 __count_vm_events(PGINODESTEAL
, reap
);
853 mm_account_reclaimed_pages(reap
);
860 WARN_ON(inode
->i_state
& I_NEW
);
861 inode
->i_state
|= I_FREEING
;
862 list_lru_isolate_move(lru
, &inode
->i_lru
, freeable
);
863 spin_unlock(&inode
->i_lock
);
865 this_cpu_dec(nr_unused
);
870 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
871 * This is called from the superblock shrinker function with a number of inodes
872 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
873 * then are freed outside inode_lock by dispose_list().
875 long prune_icache_sb(struct super_block
*sb
, struct shrink_control
*sc
)
880 freed
= list_lru_shrink_walk(&sb
->s_inode_lru
, sc
,
881 inode_lru_isolate
, &freeable
);
882 dispose_list(&freeable
);
886 static void __wait_on_freeing_inode(struct inode
*inode
);
888 * Called with the inode lock held.
890 static struct inode
*find_inode(struct super_block
*sb
,
891 struct hlist_head
*head
,
892 int (*test
)(struct inode
*, void *),
895 struct inode
*inode
= NULL
;
898 hlist_for_each_entry(inode
, head
, i_hash
) {
899 if (inode
->i_sb
!= sb
)
901 if (!test(inode
, data
))
903 spin_lock(&inode
->i_lock
);
904 if (inode
->i_state
& (I_FREEING
|I_WILL_FREE
)) {
905 __wait_on_freeing_inode(inode
);
908 if (unlikely(inode
->i_state
& I_CREATING
)) {
909 spin_unlock(&inode
->i_lock
);
910 return ERR_PTR(-ESTALE
);
913 spin_unlock(&inode
->i_lock
);
920 * find_inode_fast is the fast path version of find_inode, see the comment at
921 * iget_locked for details.
923 static struct inode
*find_inode_fast(struct super_block
*sb
,
924 struct hlist_head
*head
, unsigned long ino
)
926 struct inode
*inode
= NULL
;
929 hlist_for_each_entry(inode
, head
, i_hash
) {
930 if (inode
->i_ino
!= ino
)
932 if (inode
->i_sb
!= sb
)
934 spin_lock(&inode
->i_lock
);
935 if (inode
->i_state
& (I_FREEING
|I_WILL_FREE
)) {
936 __wait_on_freeing_inode(inode
);
939 if (unlikely(inode
->i_state
& I_CREATING
)) {
940 spin_unlock(&inode
->i_lock
);
941 return ERR_PTR(-ESTALE
);
944 spin_unlock(&inode
->i_lock
);
951 * Each cpu owns a range of LAST_INO_BATCH numbers.
952 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
953 * to renew the exhausted range.
955 * This does not significantly increase overflow rate because every CPU can
956 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
957 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
958 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
959 * overflow rate by 2x, which does not seem too significant.
961 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
962 * error if st_ino won't fit in target struct field. Use 32bit counter
963 * here to attempt to avoid that.
965 #define LAST_INO_BATCH 1024
966 static DEFINE_PER_CPU(unsigned int, last_ino
);
968 unsigned int get_next_ino(void)
970 unsigned int *p
= &get_cpu_var(last_ino
);
971 unsigned int res
= *p
;
974 if (unlikely((res
& (LAST_INO_BATCH
-1)) == 0)) {
975 static atomic_t shared_last_ino
;
976 int next
= atomic_add_return(LAST_INO_BATCH
, &shared_last_ino
);
978 res
= next
- LAST_INO_BATCH
;
983 /* get_next_ino should not provide a 0 inode number */
987 put_cpu_var(last_ino
);
990 EXPORT_SYMBOL(get_next_ino
);
993 * new_inode_pseudo - obtain an inode
996 * Allocates a new inode for given superblock.
997 * Inode wont be chained in superblock s_inodes list
999 * - fs can't be unmount
1000 * - quotas, fsnotify, writeback can't work
1002 struct inode
*new_inode_pseudo(struct super_block
*sb
)
1004 struct inode
*inode
= alloc_inode(sb
);
1007 spin_lock(&inode
->i_lock
);
1009 spin_unlock(&inode
->i_lock
);
1015 * new_inode - obtain an inode
1018 * Allocates a new inode for given superblock. The default gfp_mask
1019 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
1020 * If HIGHMEM pages are unsuitable or it is known that pages allocated
1021 * for the page cache are not reclaimable or migratable,
1022 * mapping_set_gfp_mask() must be called with suitable flags on the
1023 * newly created inode's mapping
1026 struct inode
*new_inode(struct super_block
*sb
)
1028 struct inode
*inode
;
1030 inode
= new_inode_pseudo(sb
);
1032 inode_sb_list_add(inode
);
1035 EXPORT_SYMBOL(new_inode
);
1037 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1038 void lockdep_annotate_inode_mutex_key(struct inode
*inode
)
1040 if (S_ISDIR(inode
->i_mode
)) {
1041 struct file_system_type
*type
= inode
->i_sb
->s_type
;
1043 /* Set new key only if filesystem hasn't already changed it */
1044 if (lockdep_match_class(&inode
->i_rwsem
, &type
->i_mutex_key
)) {
1046 * ensure nobody is actually holding i_mutex
1048 // mutex_destroy(&inode->i_mutex);
1049 init_rwsem(&inode
->i_rwsem
);
1050 lockdep_set_class(&inode
->i_rwsem
,
1051 &type
->i_mutex_dir_key
);
1055 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key
);
1059 * unlock_new_inode - clear the I_NEW state and wake up any waiters
1060 * @inode: new inode to unlock
1062 * Called when the inode is fully initialised to clear the new state of the
1063 * inode and wake up anyone waiting for the inode to finish initialisation.
1065 void unlock_new_inode(struct inode
*inode
)
1067 lockdep_annotate_inode_mutex_key(inode
);
1068 spin_lock(&inode
->i_lock
);
1069 WARN_ON(!(inode
->i_state
& I_NEW
));
1070 inode
->i_state
&= ~I_NEW
& ~I_CREATING
;
1072 wake_up_bit(&inode
->i_state
, __I_NEW
);
1073 spin_unlock(&inode
->i_lock
);
1075 EXPORT_SYMBOL(unlock_new_inode
);
1077 void discard_new_inode(struct inode
*inode
)
1079 lockdep_annotate_inode_mutex_key(inode
);
1080 spin_lock(&inode
->i_lock
);
1081 WARN_ON(!(inode
->i_state
& I_NEW
));
1082 inode
->i_state
&= ~I_NEW
;
1084 wake_up_bit(&inode
->i_state
, __I_NEW
);
1085 spin_unlock(&inode
->i_lock
);
1088 EXPORT_SYMBOL(discard_new_inode
);
1091 * lock_two_inodes - lock two inodes (may be regular files but also dirs)
1093 * Lock any non-NULL argument. The caller must make sure that if he is passing
1094 * in two directories, one is not ancestor of the other. Zero, one or two
1095 * objects may be locked by this function.
1097 * @inode1: first inode to lock
1098 * @inode2: second inode to lock
1099 * @subclass1: inode lock subclass for the first lock obtained
1100 * @subclass2: inode lock subclass for the second lock obtained
1102 void lock_two_inodes(struct inode
*inode1
, struct inode
*inode2
,
1103 unsigned subclass1
, unsigned subclass2
)
1105 if (!inode1
|| !inode2
) {
1107 * Make sure @subclass1 will be used for the acquired lock.
1108 * This is not strictly necessary (no current caller cares) but
1109 * let's keep things consistent.
1112 swap(inode1
, inode2
);
1117 * If one object is directory and the other is not, we must make sure
1118 * to lock directory first as the other object may be its child.
1120 if (S_ISDIR(inode2
->i_mode
) == S_ISDIR(inode1
->i_mode
)) {
1121 if (inode1
> inode2
)
1122 swap(inode1
, inode2
);
1123 } else if (!S_ISDIR(inode1
->i_mode
))
1124 swap(inode1
, inode2
);
1127 inode_lock_nested(inode1
, subclass1
);
1128 if (inode2
&& inode2
!= inode1
)
1129 inode_lock_nested(inode2
, subclass2
);
1133 * lock_two_nondirectories - take two i_mutexes on non-directory objects
1135 * Lock any non-NULL argument. Passed objects must not be directories.
1136 * Zero, one or two objects may be locked by this function.
1138 * @inode1: first inode to lock
1139 * @inode2: second inode to lock
1141 void lock_two_nondirectories(struct inode
*inode1
, struct inode
*inode2
)
1144 WARN_ON_ONCE(S_ISDIR(inode1
->i_mode
));
1146 WARN_ON_ONCE(S_ISDIR(inode2
->i_mode
));
1147 lock_two_inodes(inode1
, inode2
, I_MUTEX_NORMAL
, I_MUTEX_NONDIR2
);
1149 EXPORT_SYMBOL(lock_two_nondirectories
);
1152 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1153 * @inode1: first inode to unlock
1154 * @inode2: second inode to unlock
1156 void unlock_two_nondirectories(struct inode
*inode1
, struct inode
*inode2
)
1159 WARN_ON_ONCE(S_ISDIR(inode1
->i_mode
));
1160 inode_unlock(inode1
);
1162 if (inode2
&& inode2
!= inode1
) {
1163 WARN_ON_ONCE(S_ISDIR(inode2
->i_mode
));
1164 inode_unlock(inode2
);
1167 EXPORT_SYMBOL(unlock_two_nondirectories
);
1170 * inode_insert5 - obtain an inode from a mounted file system
1171 * @inode: pre-allocated inode to use for insert to cache
1172 * @hashval: hash value (usually inode number) to get
1173 * @test: callback used for comparisons between inodes
1174 * @set: callback used to initialize a new struct inode
1175 * @data: opaque data pointer to pass to @test and @set
1177 * Search for the inode specified by @hashval and @data in the inode cache,
1178 * and if present it is return it with an increased reference count. This is
1179 * a variant of iget5_locked() for callers that don't want to fail on memory
1180 * allocation of inode.
1182 * If the inode is not in cache, insert the pre-allocated inode to cache and
1183 * return it locked, hashed, and with the I_NEW flag set. The file system gets
1184 * to fill it in before unlocking it via unlock_new_inode().
1186 * Note both @test and @set are called with the inode_hash_lock held, so can't
1189 struct inode
*inode_insert5(struct inode
*inode
, unsigned long hashval
,
1190 int (*test
)(struct inode
*, void *),
1191 int (*set
)(struct inode
*, void *), void *data
)
1193 struct hlist_head
*head
= inode_hashtable
+ hash(inode
->i_sb
, hashval
);
1197 spin_lock(&inode_hash_lock
);
1198 old
= find_inode(inode
->i_sb
, head
, test
, data
);
1199 if (unlikely(old
)) {
1201 * Uhhuh, somebody else created the same inode under us.
1202 * Use the old inode instead of the preallocated one.
1204 spin_unlock(&inode_hash_lock
);
1208 if (unlikely(inode_unhashed(old
))) {
1215 if (set
&& unlikely(set(inode
, data
))) {
1221 * Return the locked inode with I_NEW set, the
1222 * caller is responsible for filling in the contents
1224 spin_lock(&inode
->i_lock
);
1225 inode
->i_state
|= I_NEW
;
1226 hlist_add_head_rcu(&inode
->i_hash
, head
);
1227 spin_unlock(&inode
->i_lock
);
1230 * Add inode to the sb list if it's not already. It has I_NEW at this
1231 * point, so it should be safe to test i_sb_list locklessly.
1233 if (list_empty(&inode
->i_sb_list
))
1234 inode_sb_list_add(inode
);
1236 spin_unlock(&inode_hash_lock
);
1240 EXPORT_SYMBOL(inode_insert5
);
1243 * iget5_locked - obtain an inode from a mounted file system
1244 * @sb: super block of file system
1245 * @hashval: hash value (usually inode number) to get
1246 * @test: callback used for comparisons between inodes
1247 * @set: callback used to initialize a new struct inode
1248 * @data: opaque data pointer to pass to @test and @set
1250 * Search for the inode specified by @hashval and @data in the inode cache,
1251 * and if present it is return it with an increased reference count. This is
1252 * a generalized version of iget_locked() for file systems where the inode
1253 * number is not sufficient for unique identification of an inode.
1255 * If the inode is not in cache, allocate a new inode and return it locked,
1256 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1257 * before unlocking it via unlock_new_inode().
1259 * Note both @test and @set are called with the inode_hash_lock held, so can't
1262 struct inode
*iget5_locked(struct super_block
*sb
, unsigned long hashval
,
1263 int (*test
)(struct inode
*, void *),
1264 int (*set
)(struct inode
*, void *), void *data
)
1266 struct inode
*inode
= ilookup5(sb
, hashval
, test
, data
);
1269 struct inode
*new = alloc_inode(sb
);
1273 inode
= inode_insert5(new, hashval
, test
, set
, data
);
1274 if (unlikely(inode
!= new))
1280 EXPORT_SYMBOL(iget5_locked
);
1283 * iget_locked - obtain an inode from a mounted file system
1284 * @sb: super block of file system
1285 * @ino: inode number to get
1287 * Search for the inode specified by @ino in the inode cache and if present
1288 * return it with an increased reference count. This is for file systems
1289 * where the inode number is sufficient for unique identification of an inode.
1291 * If the inode is not in cache, allocate a new inode and return it locked,
1292 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1293 * before unlocking it via unlock_new_inode().
1295 struct inode
*iget_locked(struct super_block
*sb
, unsigned long ino
)
1297 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1298 struct inode
*inode
;
1300 spin_lock(&inode_hash_lock
);
1301 inode
= find_inode_fast(sb
, head
, ino
);
1302 spin_unlock(&inode_hash_lock
);
1306 wait_on_inode(inode
);
1307 if (unlikely(inode_unhashed(inode
))) {
1314 inode
= alloc_inode(sb
);
1318 spin_lock(&inode_hash_lock
);
1319 /* We released the lock, so.. */
1320 old
= find_inode_fast(sb
, head
, ino
);
1323 spin_lock(&inode
->i_lock
);
1324 inode
->i_state
= I_NEW
;
1325 hlist_add_head_rcu(&inode
->i_hash
, head
);
1326 spin_unlock(&inode
->i_lock
);
1327 inode_sb_list_add(inode
);
1328 spin_unlock(&inode_hash_lock
);
1330 /* Return the locked inode with I_NEW set, the
1331 * caller is responsible for filling in the contents
1337 * Uhhuh, somebody else created the same inode under
1338 * us. Use the old inode instead of the one we just
1341 spin_unlock(&inode_hash_lock
);
1342 destroy_inode(inode
);
1346 wait_on_inode(inode
);
1347 if (unlikely(inode_unhashed(inode
))) {
1354 EXPORT_SYMBOL(iget_locked
);
1357 * search the inode cache for a matching inode number.
1358 * If we find one, then the inode number we are trying to
1359 * allocate is not unique and so we should not use it.
1361 * Returns 1 if the inode number is unique, 0 if it is not.
1363 static int test_inode_iunique(struct super_block
*sb
, unsigned long ino
)
1365 struct hlist_head
*b
= inode_hashtable
+ hash(sb
, ino
);
1366 struct inode
*inode
;
1368 hlist_for_each_entry_rcu(inode
, b
, i_hash
) {
1369 if (inode
->i_ino
== ino
&& inode
->i_sb
== sb
)
1376 * iunique - get a unique inode number
1378 * @max_reserved: highest reserved inode number
1380 * Obtain an inode number that is unique on the system for a given
1381 * superblock. This is used by file systems that have no natural
1382 * permanent inode numbering system. An inode number is returned that
1383 * is higher than the reserved limit but unique.
1386 * With a large number of inodes live on the file system this function
1387 * currently becomes quite slow.
1389 ino_t
iunique(struct super_block
*sb
, ino_t max_reserved
)
1392 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1393 * error if st_ino won't fit in target struct field. Use 32bit counter
1394 * here to attempt to avoid that.
1396 static DEFINE_SPINLOCK(iunique_lock
);
1397 static unsigned int counter
;
1401 spin_lock(&iunique_lock
);
1403 if (counter
<= max_reserved
)
1404 counter
= max_reserved
+ 1;
1406 } while (!test_inode_iunique(sb
, res
));
1407 spin_unlock(&iunique_lock
);
1412 EXPORT_SYMBOL(iunique
);
1414 struct inode
*igrab(struct inode
*inode
)
1416 spin_lock(&inode
->i_lock
);
1417 if (!(inode
->i_state
& (I_FREEING
|I_WILL_FREE
))) {
1419 spin_unlock(&inode
->i_lock
);
1421 spin_unlock(&inode
->i_lock
);
1423 * Handle the case where s_op->clear_inode is not been
1424 * called yet, and somebody is calling igrab
1425 * while the inode is getting freed.
1431 EXPORT_SYMBOL(igrab
);
1434 * ilookup5_nowait - search for an inode in the inode cache
1435 * @sb: super block of file system to search
1436 * @hashval: hash value (usually inode number) to search for
1437 * @test: callback used for comparisons between inodes
1438 * @data: opaque data pointer to pass to @test
1440 * Search for the inode specified by @hashval and @data in the inode cache.
1441 * If the inode is in the cache, the inode is returned with an incremented
1444 * Note: I_NEW is not waited upon so you have to be very careful what you do
1445 * with the returned inode. You probably should be using ilookup5() instead.
1447 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1449 struct inode
*ilookup5_nowait(struct super_block
*sb
, unsigned long hashval
,
1450 int (*test
)(struct inode
*, void *), void *data
)
1452 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1453 struct inode
*inode
;
1455 spin_lock(&inode_hash_lock
);
1456 inode
= find_inode(sb
, head
, test
, data
);
1457 spin_unlock(&inode_hash_lock
);
1459 return IS_ERR(inode
) ? NULL
: inode
;
1461 EXPORT_SYMBOL(ilookup5_nowait
);
1464 * ilookup5 - search for an inode in the inode cache
1465 * @sb: super block of file system to search
1466 * @hashval: hash value (usually inode number) to search for
1467 * @test: callback used for comparisons between inodes
1468 * @data: opaque data pointer to pass to @test
1470 * Search for the inode specified by @hashval and @data in the inode cache,
1471 * and if the inode is in the cache, return the inode with an incremented
1472 * reference count. Waits on I_NEW before returning the inode.
1473 * returned with an incremented reference count.
1475 * This is a generalized version of ilookup() for file systems where the
1476 * inode number is not sufficient for unique identification of an inode.
1478 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1480 struct inode
*ilookup5(struct super_block
*sb
, unsigned long hashval
,
1481 int (*test
)(struct inode
*, void *), void *data
)
1483 struct inode
*inode
;
1485 inode
= ilookup5_nowait(sb
, hashval
, test
, data
);
1487 wait_on_inode(inode
);
1488 if (unlikely(inode_unhashed(inode
))) {
1495 EXPORT_SYMBOL(ilookup5
);
1498 * ilookup - search for an inode in the inode cache
1499 * @sb: super block of file system to search
1500 * @ino: inode number to search for
1502 * Search for the inode @ino in the inode cache, and if the inode is in the
1503 * cache, the inode is returned with an incremented reference count.
1505 struct inode
*ilookup(struct super_block
*sb
, unsigned long ino
)
1507 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1508 struct inode
*inode
;
1510 spin_lock(&inode_hash_lock
);
1511 inode
= find_inode_fast(sb
, head
, ino
);
1512 spin_unlock(&inode_hash_lock
);
1517 wait_on_inode(inode
);
1518 if (unlikely(inode_unhashed(inode
))) {
1525 EXPORT_SYMBOL(ilookup
);
1528 * find_inode_nowait - find an inode in the inode cache
1529 * @sb: super block of file system to search
1530 * @hashval: hash value (usually inode number) to search for
1531 * @match: callback used for comparisons between inodes
1532 * @data: opaque data pointer to pass to @match
1534 * Search for the inode specified by @hashval and @data in the inode
1535 * cache, where the helper function @match will return 0 if the inode
1536 * does not match, 1 if the inode does match, and -1 if the search
1537 * should be stopped. The @match function must be responsible for
1538 * taking the i_lock spin_lock and checking i_state for an inode being
1539 * freed or being initialized, and incrementing the reference count
1540 * before returning 1. It also must not sleep, since it is called with
1541 * the inode_hash_lock spinlock held.
1543 * This is a even more generalized version of ilookup5() when the
1544 * function must never block --- find_inode() can block in
1545 * __wait_on_freeing_inode() --- or when the caller can not increment
1546 * the reference count because the resulting iput() might cause an
1547 * inode eviction. The tradeoff is that the @match funtion must be
1548 * very carefully implemented.
1550 struct inode
*find_inode_nowait(struct super_block
*sb
,
1551 unsigned long hashval
,
1552 int (*match
)(struct inode
*, unsigned long,
1556 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1557 struct inode
*inode
, *ret_inode
= NULL
;
1560 spin_lock(&inode_hash_lock
);
1561 hlist_for_each_entry(inode
, head
, i_hash
) {
1562 if (inode
->i_sb
!= sb
)
1564 mval
= match(inode
, hashval
, data
);
1572 spin_unlock(&inode_hash_lock
);
1575 EXPORT_SYMBOL(find_inode_nowait
);
1578 * find_inode_rcu - find an inode in the inode cache
1579 * @sb: Super block of file system to search
1580 * @hashval: Key to hash
1581 * @test: Function to test match on an inode
1582 * @data: Data for test function
1584 * Search for the inode specified by @hashval and @data in the inode cache,
1585 * where the helper function @test will return 0 if the inode does not match
1586 * and 1 if it does. The @test function must be responsible for taking the
1587 * i_lock spin_lock and checking i_state for an inode being freed or being
1590 * If successful, this will return the inode for which the @test function
1591 * returned 1 and NULL otherwise.
1593 * The @test function is not permitted to take a ref on any inode presented.
1594 * It is also not permitted to sleep.
1596 * The caller must hold the RCU read lock.
1598 struct inode
*find_inode_rcu(struct super_block
*sb
, unsigned long hashval
,
1599 int (*test
)(struct inode
*, void *), void *data
)
1601 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1602 struct inode
*inode
;
1604 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1605 "suspicious find_inode_rcu() usage");
1607 hlist_for_each_entry_rcu(inode
, head
, i_hash
) {
1608 if (inode
->i_sb
== sb
&&
1609 !(READ_ONCE(inode
->i_state
) & (I_FREEING
| I_WILL_FREE
)) &&
1615 EXPORT_SYMBOL(find_inode_rcu
);
1618 * find_inode_by_ino_rcu - Find an inode in the inode cache
1619 * @sb: Super block of file system to search
1620 * @ino: The inode number to match
1622 * Search for the inode specified by @hashval and @data in the inode cache,
1623 * where the helper function @test will return 0 if the inode does not match
1624 * and 1 if it does. The @test function must be responsible for taking the
1625 * i_lock spin_lock and checking i_state for an inode being freed or being
1628 * If successful, this will return the inode for which the @test function
1629 * returned 1 and NULL otherwise.
1631 * The @test function is not permitted to take a ref on any inode presented.
1632 * It is also not permitted to sleep.
1634 * The caller must hold the RCU read lock.
1636 struct inode
*find_inode_by_ino_rcu(struct super_block
*sb
,
1639 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1640 struct inode
*inode
;
1642 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1643 "suspicious find_inode_by_ino_rcu() usage");
1645 hlist_for_each_entry_rcu(inode
, head
, i_hash
) {
1646 if (inode
->i_ino
== ino
&&
1647 inode
->i_sb
== sb
&&
1648 !(READ_ONCE(inode
->i_state
) & (I_FREEING
| I_WILL_FREE
)))
1653 EXPORT_SYMBOL(find_inode_by_ino_rcu
);
1655 int insert_inode_locked(struct inode
*inode
)
1657 struct super_block
*sb
= inode
->i_sb
;
1658 ino_t ino
= inode
->i_ino
;
1659 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1662 struct inode
*old
= NULL
;
1663 spin_lock(&inode_hash_lock
);
1664 hlist_for_each_entry(old
, head
, i_hash
) {
1665 if (old
->i_ino
!= ino
)
1667 if (old
->i_sb
!= sb
)
1669 spin_lock(&old
->i_lock
);
1670 if (old
->i_state
& (I_FREEING
|I_WILL_FREE
)) {
1671 spin_unlock(&old
->i_lock
);
1677 spin_lock(&inode
->i_lock
);
1678 inode
->i_state
|= I_NEW
| I_CREATING
;
1679 hlist_add_head_rcu(&inode
->i_hash
, head
);
1680 spin_unlock(&inode
->i_lock
);
1681 spin_unlock(&inode_hash_lock
);
1684 if (unlikely(old
->i_state
& I_CREATING
)) {
1685 spin_unlock(&old
->i_lock
);
1686 spin_unlock(&inode_hash_lock
);
1690 spin_unlock(&old
->i_lock
);
1691 spin_unlock(&inode_hash_lock
);
1693 if (unlikely(!inode_unhashed(old
))) {
1700 EXPORT_SYMBOL(insert_inode_locked
);
1702 int insert_inode_locked4(struct inode
*inode
, unsigned long hashval
,
1703 int (*test
)(struct inode
*, void *), void *data
)
1707 inode
->i_state
|= I_CREATING
;
1708 old
= inode_insert5(inode
, hashval
, test
, NULL
, data
);
1716 EXPORT_SYMBOL(insert_inode_locked4
);
1719 int generic_delete_inode(struct inode
*inode
)
1723 EXPORT_SYMBOL(generic_delete_inode
);
1726 * Called when we're dropping the last reference
1729 * Call the FS "drop_inode()" function, defaulting to
1730 * the legacy UNIX filesystem behaviour. If it tells
1731 * us to evict inode, do so. Otherwise, retain inode
1732 * in cache if fs is alive, sync and evict if fs is
1735 static void iput_final(struct inode
*inode
)
1737 struct super_block
*sb
= inode
->i_sb
;
1738 const struct super_operations
*op
= inode
->i_sb
->s_op
;
1739 unsigned long state
;
1742 WARN_ON(inode
->i_state
& I_NEW
);
1745 drop
= op
->drop_inode(inode
);
1747 drop
= generic_drop_inode(inode
);
1750 !(inode
->i_state
& I_DONTCACHE
) &&
1751 (sb
->s_flags
& SB_ACTIVE
)) {
1752 __inode_add_lru(inode
, true);
1753 spin_unlock(&inode
->i_lock
);
1757 state
= inode
->i_state
;
1759 WRITE_ONCE(inode
->i_state
, state
| I_WILL_FREE
);
1760 spin_unlock(&inode
->i_lock
);
1762 write_inode_now(inode
, 1);
1764 spin_lock(&inode
->i_lock
);
1765 state
= inode
->i_state
;
1766 WARN_ON(state
& I_NEW
);
1767 state
&= ~I_WILL_FREE
;
1770 WRITE_ONCE(inode
->i_state
, state
| I_FREEING
);
1771 if (!list_empty(&inode
->i_lru
))
1772 inode_lru_list_del(inode
);
1773 spin_unlock(&inode
->i_lock
);
1779 * iput - put an inode
1780 * @inode: inode to put
1782 * Puts an inode, dropping its usage count. If the inode use count hits
1783 * zero, the inode is then freed and may also be destroyed.
1785 * Consequently, iput() can sleep.
1787 void iput(struct inode
*inode
)
1791 BUG_ON(inode
->i_state
& I_CLEAR
);
1793 if (atomic_dec_and_lock(&inode
->i_count
, &inode
->i_lock
)) {
1794 if (inode
->i_nlink
&& (inode
->i_state
& I_DIRTY_TIME
)) {
1795 atomic_inc(&inode
->i_count
);
1796 spin_unlock(&inode
->i_lock
);
1797 trace_writeback_lazytime_iput(inode
);
1798 mark_inode_dirty_sync(inode
);
1804 EXPORT_SYMBOL(iput
);
1808 * bmap - find a block number in a file
1809 * @inode: inode owning the block number being requested
1810 * @block: pointer containing the block to find
1812 * Replaces the value in ``*block`` with the block number on the device holding
1813 * corresponding to the requested block number in the file.
1814 * That is, asked for block 4 of inode 1 the function will replace the
1815 * 4 in ``*block``, with disk block relative to the disk start that holds that
1816 * block of the file.
1818 * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
1819 * hole, returns 0 and ``*block`` is also set to 0.
1821 int bmap(struct inode
*inode
, sector_t
*block
)
1823 if (!inode
->i_mapping
->a_ops
->bmap
)
1826 *block
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
, *block
);
1829 EXPORT_SYMBOL(bmap
);
1833 * With relative atime, only update atime if the previous atime is
1834 * earlier than or equal to either the ctime or mtime,
1835 * or if at least a day has passed since the last atime update.
1837 static int relatime_need_update(struct vfsmount
*mnt
, struct inode
*inode
,
1838 struct timespec64 now
)
1840 struct timespec64 ctime
;
1842 if (!(mnt
->mnt_flags
& MNT_RELATIME
))
1845 * Is mtime younger than or equal to atime? If yes, update atime:
1847 if (timespec64_compare(&inode
->i_mtime
, &inode
->i_atime
) >= 0)
1850 * Is ctime younger than or equal to atime? If yes, update atime:
1852 ctime
= inode_get_ctime(inode
);
1853 if (timespec64_compare(&ctime
, &inode
->i_atime
) >= 0)
1857 * Is the previous atime value older than a day? If yes,
1860 if ((long)(now
.tv_sec
- inode
->i_atime
.tv_sec
) >= 24*60*60)
1863 * Good, we can skip the atime update:
1869 * inode_update_timestamps - update the timestamps on the inode
1870 * @inode: inode to be updated
1871 * @flags: S_* flags that needed to be updated
1873 * The update_time function is called when an inode's timestamps need to be
1874 * updated for a read or write operation. This function handles updating the
1875 * actual timestamps. It's up to the caller to ensure that the inode is marked
1876 * dirty appropriately.
1878 * In the case where any of S_MTIME, S_CTIME, or S_VERSION need to be updated,
1879 * attempt to update all three of them. S_ATIME updates can be handled
1880 * independently of the rest.
1882 * Returns a set of S_* flags indicating which values changed.
1884 int inode_update_timestamps(struct inode
*inode
, int flags
)
1887 struct timespec64 now
;
1889 if (flags
& (S_MTIME
|S_CTIME
|S_VERSION
)) {
1890 struct timespec64 ctime
= inode_get_ctime(inode
);
1892 now
= inode_set_ctime_current(inode
);
1893 if (!timespec64_equal(&now
, &ctime
))
1895 if (!timespec64_equal(&now
, &inode
->i_mtime
)) {
1896 inode
->i_mtime
= now
;
1899 if (IS_I_VERSION(inode
) && inode_maybe_inc_iversion(inode
, updated
))
1900 updated
|= S_VERSION
;
1902 now
= current_time(inode
);
1905 if (flags
& S_ATIME
) {
1906 if (!timespec64_equal(&now
, &inode
->i_atime
)) {
1907 inode
->i_atime
= now
;
1913 EXPORT_SYMBOL(inode_update_timestamps
);
1916 * generic_update_time - update the timestamps on the inode
1917 * @inode: inode to be updated
1918 * @flags: S_* flags that needed to be updated
1920 * The update_time function is called when an inode's timestamps need to be
1921 * updated for a read or write operation. In the case where any of S_MTIME, S_CTIME,
1922 * or S_VERSION need to be updated we attempt to update all three of them. S_ATIME
1923 * updates can be handled done independently of the rest.
1925 * Returns a S_* mask indicating which fields were updated.
1927 int generic_update_time(struct inode
*inode
, int flags
)
1929 int updated
= inode_update_timestamps(inode
, flags
);
1930 int dirty_flags
= 0;
1932 if (updated
& (S_ATIME
|S_MTIME
|S_CTIME
))
1933 dirty_flags
= inode
->i_sb
->s_flags
& SB_LAZYTIME
? I_DIRTY_TIME
: I_DIRTY_SYNC
;
1934 if (updated
& S_VERSION
)
1935 dirty_flags
|= I_DIRTY_SYNC
;
1936 __mark_inode_dirty(inode
, dirty_flags
);
1939 EXPORT_SYMBOL(generic_update_time
);
1942 * This does the actual work of updating an inodes time or version. Must have
1943 * had called mnt_want_write() before calling this.
1945 int inode_update_time(struct inode
*inode
, int flags
)
1947 if (inode
->i_op
->update_time
)
1948 return inode
->i_op
->update_time(inode
, flags
);
1949 generic_update_time(inode
, flags
);
1952 EXPORT_SYMBOL(inode_update_time
);
1955 * atime_needs_update - update the access time
1956 * @path: the &struct path to update
1957 * @inode: inode to update
1959 * Update the accessed time on an inode and mark it for writeback.
1960 * This function automatically handles read only file systems and media,
1961 * as well as the "noatime" flag and inode specific "noatime" markers.
1963 bool atime_needs_update(const struct path
*path
, struct inode
*inode
)
1965 struct vfsmount
*mnt
= path
->mnt
;
1966 struct timespec64 now
;
1968 if (inode
->i_flags
& S_NOATIME
)
1971 /* Atime updates will likely cause i_uid and i_gid to be written
1972 * back improprely if their true value is unknown to the vfs.
1974 if (HAS_UNMAPPED_ID(mnt_idmap(mnt
), inode
))
1977 if (IS_NOATIME(inode
))
1979 if ((inode
->i_sb
->s_flags
& SB_NODIRATIME
) && S_ISDIR(inode
->i_mode
))
1982 if (mnt
->mnt_flags
& MNT_NOATIME
)
1984 if ((mnt
->mnt_flags
& MNT_NODIRATIME
) && S_ISDIR(inode
->i_mode
))
1987 now
= current_time(inode
);
1989 if (!relatime_need_update(mnt
, inode
, now
))
1992 if (timespec64_equal(&inode
->i_atime
, &now
))
1998 void touch_atime(const struct path
*path
)
2000 struct vfsmount
*mnt
= path
->mnt
;
2001 struct inode
*inode
= d_inode(path
->dentry
);
2003 if (!atime_needs_update(path
, inode
))
2006 if (!sb_start_write_trylock(inode
->i_sb
))
2009 if (__mnt_want_write(mnt
) != 0)
2012 * File systems can error out when updating inodes if they need to
2013 * allocate new space to modify an inode (such is the case for
2014 * Btrfs), but since we touch atime while walking down the path we
2015 * really don't care if we failed to update the atime of the file,
2016 * so just ignore the return value.
2017 * We may also fail on filesystems that have the ability to make parts
2018 * of the fs read only, e.g. subvolumes in Btrfs.
2020 inode_update_time(inode
, S_ATIME
);
2021 __mnt_drop_write(mnt
);
2023 sb_end_write(inode
->i_sb
);
2025 EXPORT_SYMBOL(touch_atime
);
2028 * Return mask of changes for notify_change() that need to be done as a
2029 * response to write or truncate. Return 0 if nothing has to be changed.
2030 * Negative value on error (change should be denied).
2032 int dentry_needs_remove_privs(struct mnt_idmap
*idmap
,
2033 struct dentry
*dentry
)
2035 struct inode
*inode
= d_inode(dentry
);
2039 if (IS_NOSEC(inode
))
2042 mask
= setattr_should_drop_suidgid(idmap
, inode
);
2043 ret
= security_inode_need_killpriv(dentry
);
2047 mask
|= ATTR_KILL_PRIV
;
2051 static int __remove_privs(struct mnt_idmap
*idmap
,
2052 struct dentry
*dentry
, int kill
)
2054 struct iattr newattrs
;
2056 newattrs
.ia_valid
= ATTR_FORCE
| kill
;
2058 * Note we call this on write, so notify_change will not
2059 * encounter any conflicting delegations:
2061 return notify_change(idmap
, dentry
, &newattrs
, NULL
);
2064 static int __file_remove_privs(struct file
*file
, unsigned int flags
)
2066 struct dentry
*dentry
= file_dentry(file
);
2067 struct inode
*inode
= file_inode(file
);
2071 if (IS_NOSEC(inode
) || !S_ISREG(inode
->i_mode
))
2074 kill
= dentry_needs_remove_privs(file_mnt_idmap(file
), dentry
);
2079 if (flags
& IOCB_NOWAIT
)
2082 error
= __remove_privs(file_mnt_idmap(file
), dentry
, kill
);
2086 inode_has_no_xattr(inode
);
2091 * file_remove_privs - remove special file privileges (suid, capabilities)
2092 * @file: file to remove privileges from
2094 * When file is modified by a write or truncation ensure that special
2095 * file privileges are removed.
2097 * Return: 0 on success, negative errno on failure.
2099 int file_remove_privs(struct file
*file
)
2101 return __file_remove_privs(file
, 0);
2103 EXPORT_SYMBOL(file_remove_privs
);
2105 static int inode_needs_update_time(struct inode
*inode
)
2108 struct timespec64 now
= current_time(inode
);
2109 struct timespec64 ctime
;
2111 /* First try to exhaust all avenues to not sync */
2112 if (IS_NOCMTIME(inode
))
2115 if (!timespec64_equal(&inode
->i_mtime
, &now
))
2118 ctime
= inode_get_ctime(inode
);
2119 if (!timespec64_equal(&ctime
, &now
))
2122 if (IS_I_VERSION(inode
) && inode_iversion_need_inc(inode
))
2123 sync_it
|= S_VERSION
;
2128 static int __file_update_time(struct file
*file
, int sync_mode
)
2131 struct inode
*inode
= file_inode(file
);
2133 /* try to update time settings */
2134 if (!__mnt_want_write_file(file
)) {
2135 ret
= inode_update_time(inode
, sync_mode
);
2136 __mnt_drop_write_file(file
);
2143 * file_update_time - update mtime and ctime time
2144 * @file: file accessed
2146 * Update the mtime and ctime members of an inode and mark the inode for
2147 * writeback. Note that this function is meant exclusively for usage in
2148 * the file write path of filesystems, and filesystems may choose to
2149 * explicitly ignore updates via this function with the _NOCMTIME inode
2150 * flag, e.g. for network filesystem where these imestamps are handled
2151 * by the server. This can return an error for file systems who need to
2152 * allocate space in order to update an inode.
2154 * Return: 0 on success, negative errno on failure.
2156 int file_update_time(struct file
*file
)
2159 struct inode
*inode
= file_inode(file
);
2161 ret
= inode_needs_update_time(inode
);
2165 return __file_update_time(file
, ret
);
2167 EXPORT_SYMBOL(file_update_time
);
2170 * file_modified_flags - handle mandated vfs changes when modifying a file
2171 * @file: file that was modified
2172 * @flags: kiocb flags
2174 * When file has been modified ensure that special
2175 * file privileges are removed and time settings are updated.
2177 * If IOCB_NOWAIT is set, special file privileges will not be removed and
2178 * time settings will not be updated. It will return -EAGAIN.
2180 * Context: Caller must hold the file's inode lock.
2182 * Return: 0 on success, negative errno on failure.
2184 static int file_modified_flags(struct file
*file
, int flags
)
2187 struct inode
*inode
= file_inode(file
);
2190 * Clear the security bits if the process is not being run by root.
2191 * This keeps people from modifying setuid and setgid binaries.
2193 ret
= __file_remove_privs(file
, flags
);
2197 if (unlikely(file
->f_mode
& FMODE_NOCMTIME
))
2200 ret
= inode_needs_update_time(inode
);
2203 if (flags
& IOCB_NOWAIT
)
2206 return __file_update_time(file
, ret
);
2210 * file_modified - handle mandated vfs changes when modifying a file
2211 * @file: file that was modified
2213 * When file has been modified ensure that special
2214 * file privileges are removed and time settings are updated.
2216 * Context: Caller must hold the file's inode lock.
2218 * Return: 0 on success, negative errno on failure.
2220 int file_modified(struct file
*file
)
2222 return file_modified_flags(file
, 0);
2224 EXPORT_SYMBOL(file_modified
);
2227 * kiocb_modified - handle mandated vfs changes when modifying a file
2228 * @iocb: iocb that was modified
2230 * When file has been modified ensure that special
2231 * file privileges are removed and time settings are updated.
2233 * Context: Caller must hold the file's inode lock.
2235 * Return: 0 on success, negative errno on failure.
2237 int kiocb_modified(struct kiocb
*iocb
)
2239 return file_modified_flags(iocb
->ki_filp
, iocb
->ki_flags
);
2241 EXPORT_SYMBOL_GPL(kiocb_modified
);
2243 int inode_needs_sync(struct inode
*inode
)
2247 if (S_ISDIR(inode
->i_mode
) && IS_DIRSYNC(inode
))
2251 EXPORT_SYMBOL(inode_needs_sync
);
2254 * If we try to find an inode in the inode hash while it is being
2255 * deleted, we have to wait until the filesystem completes its
2256 * deletion before reporting that it isn't found. This function waits
2257 * until the deletion _might_ have completed. Callers are responsible
2258 * to recheck inode state.
2260 * It doesn't matter if I_NEW is not set initially, a call to
2261 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
2264 static void __wait_on_freeing_inode(struct inode
*inode
)
2266 wait_queue_head_t
*wq
;
2267 DEFINE_WAIT_BIT(wait
, &inode
->i_state
, __I_NEW
);
2268 wq
= bit_waitqueue(&inode
->i_state
, __I_NEW
);
2269 prepare_to_wait(wq
, &wait
.wq_entry
, TASK_UNINTERRUPTIBLE
);
2270 spin_unlock(&inode
->i_lock
);
2271 spin_unlock(&inode_hash_lock
);
2273 finish_wait(wq
, &wait
.wq_entry
);
2274 spin_lock(&inode_hash_lock
);
2277 static __initdata
unsigned long ihash_entries
;
2278 static int __init
set_ihash_entries(char *str
)
2282 ihash_entries
= simple_strtoul(str
, &str
, 0);
2285 __setup("ihash_entries=", set_ihash_entries
);
2288 * Initialize the waitqueues and inode hash table.
2290 void __init
inode_init_early(void)
2292 /* If hashes are distributed across NUMA nodes, defer
2293 * hash allocation until vmalloc space is available.
2299 alloc_large_system_hash("Inode-cache",
2300 sizeof(struct hlist_head
),
2303 HASH_EARLY
| HASH_ZERO
,
2310 void __init
inode_init(void)
2312 /* inode slab cache */
2313 inode_cachep
= kmem_cache_create("inode_cache",
2314 sizeof(struct inode
),
2316 (SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|
2317 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
),
2320 /* Hash may have been set up in inode_init_early */
2325 alloc_large_system_hash("Inode-cache",
2326 sizeof(struct hlist_head
),
2336 void init_special_inode(struct inode
*inode
, umode_t mode
, dev_t rdev
)
2338 inode
->i_mode
= mode
;
2339 if (S_ISCHR(mode
)) {
2340 inode
->i_fop
= &def_chr_fops
;
2341 inode
->i_rdev
= rdev
;
2342 } else if (S_ISBLK(mode
)) {
2343 if (IS_ENABLED(CONFIG_BLOCK
))
2344 inode
->i_fop
= &def_blk_fops
;
2345 inode
->i_rdev
= rdev
;
2346 } else if (S_ISFIFO(mode
))
2347 inode
->i_fop
= &pipefifo_fops
;
2348 else if (S_ISSOCK(mode
))
2349 ; /* leave it no_open_fops */
2351 printk(KERN_DEBUG
"init_special_inode: bogus i_mode (%o) for"
2352 " inode %s:%lu\n", mode
, inode
->i_sb
->s_id
,
2355 EXPORT_SYMBOL(init_special_inode
);
2358 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2359 * @idmap: idmap of the mount the inode was created from
2361 * @dir: Directory inode
2362 * @mode: mode of the new inode
2364 * If the inode has been created through an idmapped mount the idmap of
2365 * the vfsmount must be passed through @idmap. This function will then take
2366 * care to map the inode according to @idmap before checking permissions
2367 * and initializing i_uid and i_gid. On non-idmapped mounts or if permission
2368 * checking is to be performed on the raw inode simply pass @nop_mnt_idmap.
2370 void inode_init_owner(struct mnt_idmap
*idmap
, struct inode
*inode
,
2371 const struct inode
*dir
, umode_t mode
)
2373 inode_fsuid_set(inode
, idmap
);
2374 if (dir
&& dir
->i_mode
& S_ISGID
) {
2375 inode
->i_gid
= dir
->i_gid
;
2377 /* Directories are special, and always inherit S_ISGID */
2381 inode_fsgid_set(inode
, idmap
);
2382 inode
->i_mode
= mode
;
2384 EXPORT_SYMBOL(inode_init_owner
);
2387 * inode_owner_or_capable - check current task permissions to inode
2388 * @idmap: idmap of the mount the inode was found from
2389 * @inode: inode being checked
2391 * Return true if current either has CAP_FOWNER in a namespace with the
2392 * inode owner uid mapped, or owns the file.
2394 * If the inode has been found through an idmapped mount the idmap of
2395 * the vfsmount must be passed through @idmap. This function will then take
2396 * care to map the inode according to @idmap before checking permissions.
2397 * On non-idmapped mounts or if permission checking is to be performed on the
2398 * raw inode simply passs @nop_mnt_idmap.
2400 bool inode_owner_or_capable(struct mnt_idmap
*idmap
,
2401 const struct inode
*inode
)
2404 struct user_namespace
*ns
;
2406 vfsuid
= i_uid_into_vfsuid(idmap
, inode
);
2407 if (vfsuid_eq_kuid(vfsuid
, current_fsuid()))
2410 ns
= current_user_ns();
2411 if (vfsuid_has_mapping(ns
, vfsuid
) && ns_capable(ns
, CAP_FOWNER
))
2415 EXPORT_SYMBOL(inode_owner_or_capable
);
2418 * Direct i/o helper functions
2420 static void __inode_dio_wait(struct inode
*inode
)
2422 wait_queue_head_t
*wq
= bit_waitqueue(&inode
->i_state
, __I_DIO_WAKEUP
);
2423 DEFINE_WAIT_BIT(q
, &inode
->i_state
, __I_DIO_WAKEUP
);
2426 prepare_to_wait(wq
, &q
.wq_entry
, TASK_UNINTERRUPTIBLE
);
2427 if (atomic_read(&inode
->i_dio_count
))
2429 } while (atomic_read(&inode
->i_dio_count
));
2430 finish_wait(wq
, &q
.wq_entry
);
2434 * inode_dio_wait - wait for outstanding DIO requests to finish
2435 * @inode: inode to wait for
2437 * Waits for all pending direct I/O requests to finish so that we can
2438 * proceed with a truncate or equivalent operation.
2440 * Must be called under a lock that serializes taking new references
2441 * to i_dio_count, usually by inode->i_mutex.
2443 void inode_dio_wait(struct inode
*inode
)
2445 if (atomic_read(&inode
->i_dio_count
))
2446 __inode_dio_wait(inode
);
2448 EXPORT_SYMBOL(inode_dio_wait
);
2451 * inode_set_flags - atomically set some inode flags
2453 * Note: the caller should be holding i_mutex, or else be sure that
2454 * they have exclusive access to the inode structure (i.e., while the
2455 * inode is being instantiated). The reason for the cmpxchg() loop
2456 * --- which wouldn't be necessary if all code paths which modify
2457 * i_flags actually followed this rule, is that there is at least one
2458 * code path which doesn't today so we use cmpxchg() out of an abundance
2461 * In the long run, i_mutex is overkill, and we should probably look
2462 * at using the i_lock spinlock to protect i_flags, and then make sure
2463 * it is so documented in include/linux/fs.h and that all code follows
2464 * the locking convention!!
2466 void inode_set_flags(struct inode
*inode
, unsigned int flags
,
2469 WARN_ON_ONCE(flags
& ~mask
);
2470 set_mask_bits(&inode
->i_flags
, mask
, flags
);
2472 EXPORT_SYMBOL(inode_set_flags
);
2474 void inode_nohighmem(struct inode
*inode
)
2476 mapping_set_gfp_mask(inode
->i_mapping
, GFP_USER
);
2478 EXPORT_SYMBOL(inode_nohighmem
);
2481 * timestamp_truncate - Truncate timespec to a granularity
2483 * @inode: inode being updated
2485 * Truncate a timespec to the granularity supported by the fs
2486 * containing the inode. Always rounds down. gran must
2487 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2489 struct timespec64
timestamp_truncate(struct timespec64 t
, struct inode
*inode
)
2491 struct super_block
*sb
= inode
->i_sb
;
2492 unsigned int gran
= sb
->s_time_gran
;
2494 t
.tv_sec
= clamp(t
.tv_sec
, sb
->s_time_min
, sb
->s_time_max
);
2495 if (unlikely(t
.tv_sec
== sb
->s_time_max
|| t
.tv_sec
== sb
->s_time_min
))
2498 /* Avoid division in the common cases 1 ns and 1 s. */
2501 else if (gran
== NSEC_PER_SEC
)
2503 else if (gran
> 1 && gran
< NSEC_PER_SEC
)
2504 t
.tv_nsec
-= t
.tv_nsec
% gran
;
2506 WARN(1, "invalid file time granularity: %u", gran
);
2509 EXPORT_SYMBOL(timestamp_truncate
);
2512 * current_time - Return FS time
2515 * Return the current time truncated to the time granularity supported by
2518 * Note that inode and inode->sb cannot be NULL.
2519 * Otherwise, the function warns and returns time without truncation.
2521 struct timespec64
current_time(struct inode
*inode
)
2523 struct timespec64 now
;
2525 ktime_get_coarse_real_ts64(&now
);
2526 return timestamp_truncate(now
, inode
);
2528 EXPORT_SYMBOL(current_time
);
2531 * inode_set_ctime_current - set the ctime to current_time
2534 * Set the inode->i_ctime to the current value for the inode. Returns
2535 * the current value that was assigned to i_ctime.
2537 struct timespec64
inode_set_ctime_current(struct inode
*inode
)
2539 struct timespec64 now
= current_time(inode
);
2541 inode_set_ctime(inode
, now
.tv_sec
, now
.tv_nsec
);
2544 EXPORT_SYMBOL(inode_set_ctime_current
);
2547 * in_group_or_capable - check whether caller is CAP_FSETID privileged
2548 * @idmap: idmap of the mount @inode was found from
2549 * @inode: inode to check
2550 * @vfsgid: the new/current vfsgid of @inode
2552 * Check wether @vfsgid is in the caller's group list or if the caller is
2553 * privileged with CAP_FSETID over @inode. This can be used to determine
2554 * whether the setgid bit can be kept or must be dropped.
2556 * Return: true if the caller is sufficiently privileged, false if not.
2558 bool in_group_or_capable(struct mnt_idmap
*idmap
,
2559 const struct inode
*inode
, vfsgid_t vfsgid
)
2561 if (vfsgid_in_group_p(vfsgid
))
2563 if (capable_wrt_inode_uidgid(idmap
, inode
, CAP_FSETID
))
2569 * mode_strip_sgid - handle the sgid bit for non-directories
2570 * @idmap: idmap of the mount the inode was created from
2571 * @dir: parent directory inode
2572 * @mode: mode of the file to be created in @dir
2574 * If the @mode of the new file has both the S_ISGID and S_IXGRP bit
2575 * raised and @dir has the S_ISGID bit raised ensure that the caller is
2576 * either in the group of the parent directory or they have CAP_FSETID
2577 * in their user namespace and are privileged over the parent directory.
2578 * In all other cases, strip the S_ISGID bit from @mode.
2580 * Return: the new mode to use for the file
2582 umode_t
mode_strip_sgid(struct mnt_idmap
*idmap
,
2583 const struct inode
*dir
, umode_t mode
)
2585 if ((mode
& (S_ISGID
| S_IXGRP
)) != (S_ISGID
| S_IXGRP
))
2587 if (S_ISDIR(mode
) || !dir
|| !(dir
->i_mode
& S_ISGID
))
2589 if (in_group_or_capable(idmap
, dir
, i_gid_into_vfsgid(idmap
, dir
)))
2591 return mode
& ~S_ISGID
;
2593 EXPORT_SYMBOL(mode_strip_sgid
);