1 // SPDX-License-Identifier: GPL-2.0-only
3 * (C) 1997 Linus Torvalds
4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
6 #include <linux/export.h>
8 #include <linux/filelock.h>
10 #include <linux/backing-dev.h>
11 #include <linux/hash.h>
12 #include <linux/swap.h>
13 #include <linux/security.h>
14 #include <linux/cdev.h>
15 #include <linux/memblock.h>
16 #include <linux/fsnotify.h>
17 #include <linux/mount.h>
18 #include <linux/posix_acl.h>
19 #include <linux/buffer_head.h> /* for inode_has_buffers */
20 #include <linux/ratelimit.h>
21 #include <linux/list_lru.h>
22 #include <linux/iversion.h>
23 #include <trace/events/writeback.h>
27 * Inode locking rules:
29 * inode->i_lock protects:
30 * inode->i_state, inode->i_hash, __iget(), inode->i_io_list
31 * Inode LRU list locks protect:
32 * inode->i_sb->s_inode_lru, inode->i_lru
33 * inode->i_sb->s_inode_list_lock protects:
34 * inode->i_sb->s_inodes, inode->i_sb_list
35 * bdi->wb.list_lock protects:
36 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
37 * inode_hash_lock protects:
38 * inode_hashtable, inode->i_hash
42 * inode->i_sb->s_inode_list_lock
44 * Inode LRU list locks
50 * inode->i_sb->s_inode_list_lock
57 static unsigned int i_hash_mask __ro_after_init
;
58 static unsigned int i_hash_shift __ro_after_init
;
59 static struct hlist_head
*inode_hashtable __ro_after_init
;
60 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(inode_hash_lock
);
63 * Empty aops. Can be used for the cases where the user does not
64 * define any of the address_space operations.
66 const struct address_space_operations empty_aops
= {
68 EXPORT_SYMBOL(empty_aops
);
70 static DEFINE_PER_CPU(unsigned long, nr_inodes
);
71 static DEFINE_PER_CPU(unsigned long, nr_unused
);
73 static struct kmem_cache
*inode_cachep __ro_after_init
;
75 static long get_nr_inodes(void)
79 for_each_possible_cpu(i
)
80 sum
+= per_cpu(nr_inodes
, i
);
81 return sum
< 0 ? 0 : sum
;
84 static inline long get_nr_inodes_unused(void)
88 for_each_possible_cpu(i
)
89 sum
+= per_cpu(nr_unused
, i
);
90 return sum
< 0 ? 0 : sum
;
93 long get_nr_dirty_inodes(void)
95 /* not actually dirty inodes, but a wild approximation */
96 long nr_dirty
= get_nr_inodes() - get_nr_inodes_unused();
97 return nr_dirty
> 0 ? nr_dirty
: 0;
101 * Handle nr_inode sysctl
105 * Statistics gathering..
107 static struct inodes_stat_t inodes_stat
;
109 static int proc_nr_inodes(struct ctl_table
*table
, int write
, void *buffer
,
110 size_t *lenp
, loff_t
*ppos
)
112 inodes_stat
.nr_inodes
= get_nr_inodes();
113 inodes_stat
.nr_unused
= get_nr_inodes_unused();
114 return proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
117 static struct ctl_table inodes_sysctls
[] = {
119 .procname
= "inode-nr",
120 .data
= &inodes_stat
,
121 .maxlen
= 2*sizeof(long),
123 .proc_handler
= proc_nr_inodes
,
126 .procname
= "inode-state",
127 .data
= &inodes_stat
,
128 .maxlen
= 7*sizeof(long),
130 .proc_handler
= proc_nr_inodes
,
135 static int __init
init_fs_inode_sysctls(void)
137 register_sysctl_init("fs", inodes_sysctls
);
140 early_initcall(init_fs_inode_sysctls
);
143 static int no_open(struct inode
*inode
, struct file
*file
)
149 * inode_init_always - perform inode structure initialisation
150 * @sb: superblock inode belongs to
151 * @inode: inode to initialise
153 * These are initializations that need to be done on every inode
154 * allocation as the fields are not initialised by slab allocation.
156 int inode_init_always(struct super_block
*sb
, struct inode
*inode
)
158 static const struct inode_operations empty_iops
;
159 static const struct file_operations no_open_fops
= {.open
= no_open
};
160 struct address_space
*const mapping
= &inode
->i_data
;
163 inode
->i_blkbits
= sb
->s_blocksize_bits
;
165 atomic64_set(&inode
->i_sequence
, 0);
166 atomic_set(&inode
->i_count
, 1);
167 inode
->i_op
= &empty_iops
;
168 inode
->i_fop
= &no_open_fops
;
170 inode
->__i_nlink
= 1;
171 inode
->i_opflags
= 0;
173 inode
->i_opflags
|= IOP_XATTR
;
174 i_uid_write(inode
, 0);
175 i_gid_write(inode
, 0);
176 atomic_set(&inode
->i_writecount
, 0);
178 inode
->i_write_hint
= WRITE_LIFE_NOT_SET
;
181 inode
->i_generation
= 0;
182 inode
->i_pipe
= NULL
;
183 inode
->i_cdev
= NULL
;
184 inode
->i_link
= NULL
;
185 inode
->i_dir_seq
= 0;
187 inode
->dirtied_when
= 0;
189 #ifdef CONFIG_CGROUP_WRITEBACK
190 inode
->i_wb_frn_winner
= 0;
191 inode
->i_wb_frn_avg_time
= 0;
192 inode
->i_wb_frn_history
= 0;
195 spin_lock_init(&inode
->i_lock
);
196 lockdep_set_class(&inode
->i_lock
, &sb
->s_type
->i_lock_key
);
198 init_rwsem(&inode
->i_rwsem
);
199 lockdep_set_class(&inode
->i_rwsem
, &sb
->s_type
->i_mutex_key
);
201 atomic_set(&inode
->i_dio_count
, 0);
203 mapping
->a_ops
= &empty_aops
;
204 mapping
->host
= inode
;
207 atomic_set(&mapping
->i_mmap_writable
, 0);
208 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
209 atomic_set(&mapping
->nr_thps
, 0);
211 mapping_set_gfp_mask(mapping
, GFP_HIGHUSER_MOVABLE
);
212 mapping
->private_data
= NULL
;
213 mapping
->writeback_index
= 0;
214 init_rwsem(&mapping
->invalidate_lock
);
215 lockdep_set_class_and_name(&mapping
->invalidate_lock
,
216 &sb
->s_type
->invalidate_lock_key
,
217 "mapping.invalidate_lock");
218 if (sb
->s_iflags
& SB_I_STABLE_WRITES
)
219 mapping_set_stable_writes(mapping
);
220 inode
->i_private
= NULL
;
221 inode
->i_mapping
= mapping
;
222 INIT_HLIST_HEAD(&inode
->i_dentry
); /* buggered by rcu freeing */
223 #ifdef CONFIG_FS_POSIX_ACL
224 inode
->i_acl
= inode
->i_default_acl
= ACL_NOT_CACHED
;
227 #ifdef CONFIG_FSNOTIFY
228 inode
->i_fsnotify_mask
= 0;
230 inode
->i_flctx
= NULL
;
232 if (unlikely(security_inode_alloc(inode
)))
234 this_cpu_inc(nr_inodes
);
238 EXPORT_SYMBOL(inode_init_always
);
240 void free_inode_nonrcu(struct inode
*inode
)
242 kmem_cache_free(inode_cachep
, inode
);
244 EXPORT_SYMBOL(free_inode_nonrcu
);
246 static void i_callback(struct rcu_head
*head
)
248 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
249 if (inode
->free_inode
)
250 inode
->free_inode(inode
);
252 free_inode_nonrcu(inode
);
255 static struct inode
*alloc_inode(struct super_block
*sb
)
257 const struct super_operations
*ops
= sb
->s_op
;
260 if (ops
->alloc_inode
)
261 inode
= ops
->alloc_inode(sb
);
263 inode
= alloc_inode_sb(sb
, inode_cachep
, GFP_KERNEL
);
268 if (unlikely(inode_init_always(sb
, inode
))) {
269 if (ops
->destroy_inode
) {
270 ops
->destroy_inode(inode
);
271 if (!ops
->free_inode
)
274 inode
->free_inode
= ops
->free_inode
;
275 i_callback(&inode
->i_rcu
);
282 void __destroy_inode(struct inode
*inode
)
284 BUG_ON(inode_has_buffers(inode
));
285 inode_detach_wb(inode
);
286 security_inode_free(inode
);
287 fsnotify_inode_delete(inode
);
288 locks_free_lock_context(inode
);
289 if (!inode
->i_nlink
) {
290 WARN_ON(atomic_long_read(&inode
->i_sb
->s_remove_count
) == 0);
291 atomic_long_dec(&inode
->i_sb
->s_remove_count
);
294 #ifdef CONFIG_FS_POSIX_ACL
295 if (inode
->i_acl
&& !is_uncached_acl(inode
->i_acl
))
296 posix_acl_release(inode
->i_acl
);
297 if (inode
->i_default_acl
&& !is_uncached_acl(inode
->i_default_acl
))
298 posix_acl_release(inode
->i_default_acl
);
300 this_cpu_dec(nr_inodes
);
302 EXPORT_SYMBOL(__destroy_inode
);
304 static void destroy_inode(struct inode
*inode
)
306 const struct super_operations
*ops
= inode
->i_sb
->s_op
;
308 BUG_ON(!list_empty(&inode
->i_lru
));
309 __destroy_inode(inode
);
310 if (ops
->destroy_inode
) {
311 ops
->destroy_inode(inode
);
312 if (!ops
->free_inode
)
315 inode
->free_inode
= ops
->free_inode
;
316 call_rcu(&inode
->i_rcu
, i_callback
);
320 * drop_nlink - directly drop an inode's link count
323 * This is a low-level filesystem helper to replace any
324 * direct filesystem manipulation of i_nlink. In cases
325 * where we are attempting to track writes to the
326 * filesystem, a decrement to zero means an imminent
327 * write when the file is truncated and actually unlinked
330 void drop_nlink(struct inode
*inode
)
332 WARN_ON(inode
->i_nlink
== 0);
335 atomic_long_inc(&inode
->i_sb
->s_remove_count
);
337 EXPORT_SYMBOL(drop_nlink
);
340 * clear_nlink - directly zero an inode's link count
343 * This is a low-level filesystem helper to replace any
344 * direct filesystem manipulation of i_nlink. See
345 * drop_nlink() for why we care about i_nlink hitting zero.
347 void clear_nlink(struct inode
*inode
)
349 if (inode
->i_nlink
) {
350 inode
->__i_nlink
= 0;
351 atomic_long_inc(&inode
->i_sb
->s_remove_count
);
354 EXPORT_SYMBOL(clear_nlink
);
357 * set_nlink - directly set an inode's link count
359 * @nlink: new nlink (should be non-zero)
361 * This is a low-level filesystem helper to replace any
362 * direct filesystem manipulation of i_nlink.
364 void set_nlink(struct inode
*inode
, unsigned int nlink
)
369 /* Yes, some filesystems do change nlink from zero to one */
370 if (inode
->i_nlink
== 0)
371 atomic_long_dec(&inode
->i_sb
->s_remove_count
);
373 inode
->__i_nlink
= nlink
;
376 EXPORT_SYMBOL(set_nlink
);
379 * inc_nlink - directly increment an inode's link count
382 * This is a low-level filesystem helper to replace any
383 * direct filesystem manipulation of i_nlink. Currently,
384 * it is only here for parity with dec_nlink().
386 void inc_nlink(struct inode
*inode
)
388 if (unlikely(inode
->i_nlink
== 0)) {
389 WARN_ON(!(inode
->i_state
& I_LINKABLE
));
390 atomic_long_dec(&inode
->i_sb
->s_remove_count
);
395 EXPORT_SYMBOL(inc_nlink
);
397 static void __address_space_init_once(struct address_space
*mapping
)
399 xa_init_flags(&mapping
->i_pages
, XA_FLAGS_LOCK_IRQ
| XA_FLAGS_ACCOUNT
);
400 init_rwsem(&mapping
->i_mmap_rwsem
);
401 INIT_LIST_HEAD(&mapping
->private_list
);
402 spin_lock_init(&mapping
->private_lock
);
403 mapping
->i_mmap
= RB_ROOT_CACHED
;
406 void address_space_init_once(struct address_space
*mapping
)
408 memset(mapping
, 0, sizeof(*mapping
));
409 __address_space_init_once(mapping
);
411 EXPORT_SYMBOL(address_space_init_once
);
414 * These are initializations that only need to be done
415 * once, because the fields are idempotent across use
416 * of the inode, so let the slab aware of that.
418 void inode_init_once(struct inode
*inode
)
420 memset(inode
, 0, sizeof(*inode
));
421 INIT_HLIST_NODE(&inode
->i_hash
);
422 INIT_LIST_HEAD(&inode
->i_devices
);
423 INIT_LIST_HEAD(&inode
->i_io_list
);
424 INIT_LIST_HEAD(&inode
->i_wb_list
);
425 INIT_LIST_HEAD(&inode
->i_lru
);
426 INIT_LIST_HEAD(&inode
->i_sb_list
);
427 __address_space_init_once(&inode
->i_data
);
428 i_size_ordered_init(inode
);
430 EXPORT_SYMBOL(inode_init_once
);
432 static void init_once(void *foo
)
434 struct inode
*inode
= (struct inode
*) foo
;
436 inode_init_once(inode
);
440 * inode->i_lock must be held
442 void __iget(struct inode
*inode
)
444 atomic_inc(&inode
->i_count
);
448 * get additional reference to inode; caller must already hold one.
450 void ihold(struct inode
*inode
)
452 WARN_ON(atomic_inc_return(&inode
->i_count
) < 2);
454 EXPORT_SYMBOL(ihold
);
456 static void __inode_add_lru(struct inode
*inode
, bool rotate
)
458 if (inode
->i_state
& (I_DIRTY_ALL
| I_SYNC
| I_FREEING
| I_WILL_FREE
))
460 if (atomic_read(&inode
->i_count
))
462 if (!(inode
->i_sb
->s_flags
& SB_ACTIVE
))
464 if (!mapping_shrinkable(&inode
->i_data
))
467 if (list_lru_add(&inode
->i_sb
->s_inode_lru
, &inode
->i_lru
))
468 this_cpu_inc(nr_unused
);
470 inode
->i_state
|= I_REFERENCED
;
474 * Add inode to LRU if needed (inode is unused and clean).
476 * Needs inode->i_lock held.
478 void inode_add_lru(struct inode
*inode
)
480 __inode_add_lru(inode
, false);
483 static void inode_lru_list_del(struct inode
*inode
)
485 if (list_lru_del(&inode
->i_sb
->s_inode_lru
, &inode
->i_lru
))
486 this_cpu_dec(nr_unused
);
490 * inode_sb_list_add - add inode to the superblock list of inodes
491 * @inode: inode to add
493 void inode_sb_list_add(struct inode
*inode
)
495 spin_lock(&inode
->i_sb
->s_inode_list_lock
);
496 list_add(&inode
->i_sb_list
, &inode
->i_sb
->s_inodes
);
497 spin_unlock(&inode
->i_sb
->s_inode_list_lock
);
499 EXPORT_SYMBOL_GPL(inode_sb_list_add
);
501 static inline void inode_sb_list_del(struct inode
*inode
)
503 if (!list_empty(&inode
->i_sb_list
)) {
504 spin_lock(&inode
->i_sb
->s_inode_list_lock
);
505 list_del_init(&inode
->i_sb_list
);
506 spin_unlock(&inode
->i_sb
->s_inode_list_lock
);
510 static unsigned long hash(struct super_block
*sb
, unsigned long hashval
)
514 tmp
= (hashval
* (unsigned long)sb
) ^ (GOLDEN_RATIO_PRIME
+ hashval
) /
516 tmp
= tmp
^ ((tmp
^ GOLDEN_RATIO_PRIME
) >> i_hash_shift
);
517 return tmp
& i_hash_mask
;
521 * __insert_inode_hash - hash an inode
522 * @inode: unhashed inode
523 * @hashval: unsigned long value used to locate this object in the
526 * Add an inode to the inode hash for this superblock.
528 void __insert_inode_hash(struct inode
*inode
, unsigned long hashval
)
530 struct hlist_head
*b
= inode_hashtable
+ hash(inode
->i_sb
, hashval
);
532 spin_lock(&inode_hash_lock
);
533 spin_lock(&inode
->i_lock
);
534 hlist_add_head_rcu(&inode
->i_hash
, b
);
535 spin_unlock(&inode
->i_lock
);
536 spin_unlock(&inode_hash_lock
);
538 EXPORT_SYMBOL(__insert_inode_hash
);
541 * __remove_inode_hash - remove an inode from the hash
542 * @inode: inode to unhash
544 * Remove an inode from the superblock.
546 void __remove_inode_hash(struct inode
*inode
)
548 spin_lock(&inode_hash_lock
);
549 spin_lock(&inode
->i_lock
);
550 hlist_del_init_rcu(&inode
->i_hash
);
551 spin_unlock(&inode
->i_lock
);
552 spin_unlock(&inode_hash_lock
);
554 EXPORT_SYMBOL(__remove_inode_hash
);
556 void dump_mapping(const struct address_space
*mapping
)
559 const struct address_space_operations
*a_ops
;
560 struct hlist_node
*dentry_first
;
561 struct dentry
*dentry_ptr
;
562 struct dentry dentry
;
566 * If mapping is an invalid pointer, we don't want to crash
567 * accessing it, so probe everything depending on it carefully.
569 if (get_kernel_nofault(host
, &mapping
->host
) ||
570 get_kernel_nofault(a_ops
, &mapping
->a_ops
)) {
571 pr_warn("invalid mapping:%px\n", mapping
);
576 pr_warn("aops:%ps\n", a_ops
);
580 if (get_kernel_nofault(dentry_first
, &host
->i_dentry
.first
) ||
581 get_kernel_nofault(ino
, &host
->i_ino
)) {
582 pr_warn("aops:%ps invalid inode:%px\n", a_ops
, host
);
587 pr_warn("aops:%ps ino:%lx\n", a_ops
, ino
);
591 dentry_ptr
= container_of(dentry_first
, struct dentry
, d_u
.d_alias
);
592 if (get_kernel_nofault(dentry
, dentry_ptr
)) {
593 pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
594 a_ops
, ino
, dentry_ptr
);
599 * if dentry is corrupted, the %pd handler may still crash,
600 * but it's unlikely that we reach here with a corrupt mapping
602 pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n", a_ops
, ino
, &dentry
);
605 void clear_inode(struct inode
*inode
)
608 * We have to cycle the i_pages lock here because reclaim can be in the
609 * process of removing the last page (in __filemap_remove_folio())
610 * and we must not free the mapping under it.
612 xa_lock_irq(&inode
->i_data
.i_pages
);
613 BUG_ON(inode
->i_data
.nrpages
);
615 * Almost always, mapping_empty(&inode->i_data) here; but there are
616 * two known and long-standing ways in which nodes may get left behind
617 * (when deep radix-tree node allocation failed partway; or when THP
618 * collapse_file() failed). Until those two known cases are cleaned up,
619 * or a cleanup function is called here, do not BUG_ON(!mapping_empty),
620 * nor even WARN_ON(!mapping_empty).
622 xa_unlock_irq(&inode
->i_data
.i_pages
);
623 BUG_ON(!list_empty(&inode
->i_data
.private_list
));
624 BUG_ON(!(inode
->i_state
& I_FREEING
));
625 BUG_ON(inode
->i_state
& I_CLEAR
);
626 BUG_ON(!list_empty(&inode
->i_wb_list
));
627 /* don't need i_lock here, no concurrent mods to i_state */
628 inode
->i_state
= I_FREEING
| I_CLEAR
;
630 EXPORT_SYMBOL(clear_inode
);
633 * Free the inode passed in, removing it from the lists it is still connected
634 * to. We remove any pages still attached to the inode and wait for any IO that
635 * is still in progress before finally destroying the inode.
637 * An inode must already be marked I_FREEING so that we avoid the inode being
638 * moved back onto lists if we race with other code that manipulates the lists
639 * (e.g. writeback_single_inode). The caller is responsible for setting this.
641 * An inode must already be removed from the LRU list before being evicted from
642 * the cache. This should occur atomically with setting the I_FREEING state
643 * flag, so no inodes here should ever be on the LRU when being evicted.
645 static void evict(struct inode
*inode
)
647 const struct super_operations
*op
= inode
->i_sb
->s_op
;
649 BUG_ON(!(inode
->i_state
& I_FREEING
));
650 BUG_ON(!list_empty(&inode
->i_lru
));
652 if (!list_empty(&inode
->i_io_list
))
653 inode_io_list_del(inode
);
655 inode_sb_list_del(inode
);
658 * Wait for flusher thread to be done with the inode so that filesystem
659 * does not start destroying it while writeback is still running. Since
660 * the inode has I_FREEING set, flusher thread won't start new work on
661 * the inode. We just have to wait for running writeback to finish.
663 inode_wait_for_writeback(inode
);
665 if (op
->evict_inode
) {
666 op
->evict_inode(inode
);
668 truncate_inode_pages_final(&inode
->i_data
);
671 if (S_ISCHR(inode
->i_mode
) && inode
->i_cdev
)
674 remove_inode_hash(inode
);
676 spin_lock(&inode
->i_lock
);
677 wake_up_bit(&inode
->i_state
, __I_NEW
);
678 BUG_ON(inode
->i_state
!= (I_FREEING
| I_CLEAR
));
679 spin_unlock(&inode
->i_lock
);
681 destroy_inode(inode
);
685 * dispose_list - dispose of the contents of a local list
686 * @head: the head of the list to free
688 * Dispose-list gets a local list with local inodes in it, so it doesn't
689 * need to worry about list corruption and SMP locks.
691 static void dispose_list(struct list_head
*head
)
693 while (!list_empty(head
)) {
696 inode
= list_first_entry(head
, struct inode
, i_lru
);
697 list_del_init(&inode
->i_lru
);
705 * evict_inodes - evict all evictable inodes for a superblock
706 * @sb: superblock to operate on
708 * Make sure that no inodes with zero refcount are retained. This is
709 * called by superblock shutdown after having SB_ACTIVE flag removed,
710 * so any inode reaching zero refcount during or after that call will
711 * be immediately evicted.
713 void evict_inodes(struct super_block
*sb
)
715 struct inode
*inode
, *next
;
719 spin_lock(&sb
->s_inode_list_lock
);
720 list_for_each_entry_safe(inode
, next
, &sb
->s_inodes
, i_sb_list
) {
721 if (atomic_read(&inode
->i_count
))
724 spin_lock(&inode
->i_lock
);
725 if (inode
->i_state
& (I_NEW
| I_FREEING
| I_WILL_FREE
)) {
726 spin_unlock(&inode
->i_lock
);
730 inode
->i_state
|= I_FREEING
;
731 inode_lru_list_del(inode
);
732 spin_unlock(&inode
->i_lock
);
733 list_add(&inode
->i_lru
, &dispose
);
736 * We can have a ton of inodes to evict at unmount time given
737 * enough memory, check to see if we need to go to sleep for a
738 * bit so we don't livelock.
740 if (need_resched()) {
741 spin_unlock(&sb
->s_inode_list_lock
);
743 dispose_list(&dispose
);
747 spin_unlock(&sb
->s_inode_list_lock
);
749 dispose_list(&dispose
);
751 EXPORT_SYMBOL_GPL(evict_inodes
);
754 * invalidate_inodes - attempt to free all inodes on a superblock
755 * @sb: superblock to operate on
757 * Attempts to free all inodes (including dirty inodes) for a given superblock.
759 void invalidate_inodes(struct super_block
*sb
)
761 struct inode
*inode
, *next
;
765 spin_lock(&sb
->s_inode_list_lock
);
766 list_for_each_entry_safe(inode
, next
, &sb
->s_inodes
, i_sb_list
) {
767 spin_lock(&inode
->i_lock
);
768 if (inode
->i_state
& (I_NEW
| I_FREEING
| I_WILL_FREE
)) {
769 spin_unlock(&inode
->i_lock
);
772 if (atomic_read(&inode
->i_count
)) {
773 spin_unlock(&inode
->i_lock
);
777 inode
->i_state
|= I_FREEING
;
778 inode_lru_list_del(inode
);
779 spin_unlock(&inode
->i_lock
);
780 list_add(&inode
->i_lru
, &dispose
);
781 if (need_resched()) {
782 spin_unlock(&sb
->s_inode_list_lock
);
784 dispose_list(&dispose
);
788 spin_unlock(&sb
->s_inode_list_lock
);
790 dispose_list(&dispose
);
794 * Isolate the inode from the LRU in preparation for freeing it.
796 * If the inode has the I_REFERENCED flag set, then it means that it has been
797 * used recently - the flag is set in iput_final(). When we encounter such an
798 * inode, clear the flag and move it to the back of the LRU so it gets another
799 * pass through the LRU before it gets reclaimed. This is necessary because of
800 * the fact we are doing lazy LRU updates to minimise lock contention so the
801 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
802 * with this flag set because they are the inodes that are out of order.
804 static enum lru_status
inode_lru_isolate(struct list_head
*item
,
805 struct list_lru_one
*lru
, spinlock_t
*lru_lock
, void *arg
)
807 struct list_head
*freeable
= arg
;
808 struct inode
*inode
= container_of(item
, struct inode
, i_lru
);
811 * We are inverting the lru lock/inode->i_lock here, so use a
812 * trylock. If we fail to get the lock, just skip it.
814 if (!spin_trylock(&inode
->i_lock
))
818 * Inodes can get referenced, redirtied, or repopulated while
819 * they're already on the LRU, and this can make them
820 * unreclaimable for a while. Remove them lazily here; iput,
821 * sync, or the last page cache deletion will requeue them.
823 if (atomic_read(&inode
->i_count
) ||
824 (inode
->i_state
& ~I_REFERENCED
) ||
825 !mapping_shrinkable(&inode
->i_data
)) {
826 list_lru_isolate(lru
, &inode
->i_lru
);
827 spin_unlock(&inode
->i_lock
);
828 this_cpu_dec(nr_unused
);
832 /* Recently referenced inodes get one more pass */
833 if (inode
->i_state
& I_REFERENCED
) {
834 inode
->i_state
&= ~I_REFERENCED
;
835 spin_unlock(&inode
->i_lock
);
840 * On highmem systems, mapping_shrinkable() permits dropping
841 * page cache in order to free up struct inodes: lowmem might
842 * be under pressure before the cache inside the highmem zone.
844 if (inode_has_buffers(inode
) || !mapping_empty(&inode
->i_data
)) {
846 spin_unlock(&inode
->i_lock
);
847 spin_unlock(lru_lock
);
848 if (remove_inode_buffers(inode
)) {
850 reap
= invalidate_mapping_pages(&inode
->i_data
, 0, -1);
851 if (current_is_kswapd())
852 __count_vm_events(KSWAPD_INODESTEAL
, reap
);
854 __count_vm_events(PGINODESTEAL
, reap
);
855 mm_account_reclaimed_pages(reap
);
862 WARN_ON(inode
->i_state
& I_NEW
);
863 inode
->i_state
|= I_FREEING
;
864 list_lru_isolate_move(lru
, &inode
->i_lru
, freeable
);
865 spin_unlock(&inode
->i_lock
);
867 this_cpu_dec(nr_unused
);
872 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
873 * This is called from the superblock shrinker function with a number of inodes
874 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
875 * then are freed outside inode_lock by dispose_list().
877 long prune_icache_sb(struct super_block
*sb
, struct shrink_control
*sc
)
882 freed
= list_lru_shrink_walk(&sb
->s_inode_lru
, sc
,
883 inode_lru_isolate
, &freeable
);
884 dispose_list(&freeable
);
888 static void __wait_on_freeing_inode(struct inode
*inode
);
890 * Called with the inode lock held.
892 static struct inode
*find_inode(struct super_block
*sb
,
893 struct hlist_head
*head
,
894 int (*test
)(struct inode
*, void *),
897 struct inode
*inode
= NULL
;
900 hlist_for_each_entry(inode
, head
, i_hash
) {
901 if (inode
->i_sb
!= sb
)
903 if (!test(inode
, data
))
905 spin_lock(&inode
->i_lock
);
906 if (inode
->i_state
& (I_FREEING
|I_WILL_FREE
)) {
907 __wait_on_freeing_inode(inode
);
910 if (unlikely(inode
->i_state
& I_CREATING
)) {
911 spin_unlock(&inode
->i_lock
);
912 return ERR_PTR(-ESTALE
);
915 spin_unlock(&inode
->i_lock
);
922 * find_inode_fast is the fast path version of find_inode, see the comment at
923 * iget_locked for details.
925 static struct inode
*find_inode_fast(struct super_block
*sb
,
926 struct hlist_head
*head
, unsigned long ino
)
928 struct inode
*inode
= NULL
;
931 hlist_for_each_entry(inode
, head
, i_hash
) {
932 if (inode
->i_ino
!= ino
)
934 if (inode
->i_sb
!= sb
)
936 spin_lock(&inode
->i_lock
);
937 if (inode
->i_state
& (I_FREEING
|I_WILL_FREE
)) {
938 __wait_on_freeing_inode(inode
);
941 if (unlikely(inode
->i_state
& I_CREATING
)) {
942 spin_unlock(&inode
->i_lock
);
943 return ERR_PTR(-ESTALE
);
946 spin_unlock(&inode
->i_lock
);
953 * Each cpu owns a range of LAST_INO_BATCH numbers.
954 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
955 * to renew the exhausted range.
957 * This does not significantly increase overflow rate because every CPU can
958 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
959 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
960 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
961 * overflow rate by 2x, which does not seem too significant.
963 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
964 * error if st_ino won't fit in target struct field. Use 32bit counter
965 * here to attempt to avoid that.
967 #define LAST_INO_BATCH 1024
968 static DEFINE_PER_CPU(unsigned int, last_ino
);
970 unsigned int get_next_ino(void)
972 unsigned int *p
= &get_cpu_var(last_ino
);
973 unsigned int res
= *p
;
976 if (unlikely((res
& (LAST_INO_BATCH
-1)) == 0)) {
977 static atomic_t shared_last_ino
;
978 int next
= atomic_add_return(LAST_INO_BATCH
, &shared_last_ino
);
980 res
= next
- LAST_INO_BATCH
;
985 /* get_next_ino should not provide a 0 inode number */
989 put_cpu_var(last_ino
);
992 EXPORT_SYMBOL(get_next_ino
);
995 * new_inode_pseudo - obtain an inode
998 * Allocates a new inode for given superblock.
999 * Inode wont be chained in superblock s_inodes list
1001 * - fs can't be unmount
1002 * - quotas, fsnotify, writeback can't work
1004 struct inode
*new_inode_pseudo(struct super_block
*sb
)
1006 struct inode
*inode
= alloc_inode(sb
);
1009 spin_lock(&inode
->i_lock
);
1011 spin_unlock(&inode
->i_lock
);
1017 * new_inode - obtain an inode
1020 * Allocates a new inode for given superblock. The default gfp_mask
1021 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
1022 * If HIGHMEM pages are unsuitable or it is known that pages allocated
1023 * for the page cache are not reclaimable or migratable,
1024 * mapping_set_gfp_mask() must be called with suitable flags on the
1025 * newly created inode's mapping
1028 struct inode
*new_inode(struct super_block
*sb
)
1030 struct inode
*inode
;
1032 inode
= new_inode_pseudo(sb
);
1034 inode_sb_list_add(inode
);
1037 EXPORT_SYMBOL(new_inode
);
1039 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1040 void lockdep_annotate_inode_mutex_key(struct inode
*inode
)
1042 if (S_ISDIR(inode
->i_mode
)) {
1043 struct file_system_type
*type
= inode
->i_sb
->s_type
;
1045 /* Set new key only if filesystem hasn't already changed it */
1046 if (lockdep_match_class(&inode
->i_rwsem
, &type
->i_mutex_key
)) {
1048 * ensure nobody is actually holding i_mutex
1050 // mutex_destroy(&inode->i_mutex);
1051 init_rwsem(&inode
->i_rwsem
);
1052 lockdep_set_class(&inode
->i_rwsem
,
1053 &type
->i_mutex_dir_key
);
1057 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key
);
1061 * unlock_new_inode - clear the I_NEW state and wake up any waiters
1062 * @inode: new inode to unlock
1064 * Called when the inode is fully initialised to clear the new state of the
1065 * inode and wake up anyone waiting for the inode to finish initialisation.
1067 void unlock_new_inode(struct inode
*inode
)
1069 lockdep_annotate_inode_mutex_key(inode
);
1070 spin_lock(&inode
->i_lock
);
1071 WARN_ON(!(inode
->i_state
& I_NEW
));
1072 inode
->i_state
&= ~I_NEW
& ~I_CREATING
;
1074 wake_up_bit(&inode
->i_state
, __I_NEW
);
1075 spin_unlock(&inode
->i_lock
);
1077 EXPORT_SYMBOL(unlock_new_inode
);
1079 void discard_new_inode(struct inode
*inode
)
1081 lockdep_annotate_inode_mutex_key(inode
);
1082 spin_lock(&inode
->i_lock
);
1083 WARN_ON(!(inode
->i_state
& I_NEW
));
1084 inode
->i_state
&= ~I_NEW
;
1086 wake_up_bit(&inode
->i_state
, __I_NEW
);
1087 spin_unlock(&inode
->i_lock
);
1090 EXPORT_SYMBOL(discard_new_inode
);
1093 * lock_two_inodes - lock two inodes (may be regular files but also dirs)
1095 * Lock any non-NULL argument. The caller must make sure that if he is passing
1096 * in two directories, one is not ancestor of the other. Zero, one or two
1097 * objects may be locked by this function.
1099 * @inode1: first inode to lock
1100 * @inode2: second inode to lock
1101 * @subclass1: inode lock subclass for the first lock obtained
1102 * @subclass2: inode lock subclass for the second lock obtained
1104 void lock_two_inodes(struct inode
*inode1
, struct inode
*inode2
,
1105 unsigned subclass1
, unsigned subclass2
)
1107 if (!inode1
|| !inode2
) {
1109 * Make sure @subclass1 will be used for the acquired lock.
1110 * This is not strictly necessary (no current caller cares) but
1111 * let's keep things consistent.
1114 swap(inode1
, inode2
);
1119 * If one object is directory and the other is not, we must make sure
1120 * to lock directory first as the other object may be its child.
1122 if (S_ISDIR(inode2
->i_mode
) == S_ISDIR(inode1
->i_mode
)) {
1123 if (inode1
> inode2
)
1124 swap(inode1
, inode2
);
1125 } else if (!S_ISDIR(inode1
->i_mode
))
1126 swap(inode1
, inode2
);
1129 inode_lock_nested(inode1
, subclass1
);
1130 if (inode2
&& inode2
!= inode1
)
1131 inode_lock_nested(inode2
, subclass2
);
1135 * lock_two_nondirectories - take two i_mutexes on non-directory objects
1137 * Lock any non-NULL argument. Passed objects must not be directories.
1138 * Zero, one or two objects may be locked by this function.
1140 * @inode1: first inode to lock
1141 * @inode2: second inode to lock
1143 void lock_two_nondirectories(struct inode
*inode1
, struct inode
*inode2
)
1146 WARN_ON_ONCE(S_ISDIR(inode1
->i_mode
));
1148 WARN_ON_ONCE(S_ISDIR(inode2
->i_mode
));
1149 lock_two_inodes(inode1
, inode2
, I_MUTEX_NORMAL
, I_MUTEX_NONDIR2
);
1151 EXPORT_SYMBOL(lock_two_nondirectories
);
1154 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1155 * @inode1: first inode to unlock
1156 * @inode2: second inode to unlock
1158 void unlock_two_nondirectories(struct inode
*inode1
, struct inode
*inode2
)
1161 WARN_ON_ONCE(S_ISDIR(inode1
->i_mode
));
1162 inode_unlock(inode1
);
1164 if (inode2
&& inode2
!= inode1
) {
1165 WARN_ON_ONCE(S_ISDIR(inode2
->i_mode
));
1166 inode_unlock(inode2
);
1169 EXPORT_SYMBOL(unlock_two_nondirectories
);
1172 * inode_insert5 - obtain an inode from a mounted file system
1173 * @inode: pre-allocated inode to use for insert to cache
1174 * @hashval: hash value (usually inode number) to get
1175 * @test: callback used for comparisons between inodes
1176 * @set: callback used to initialize a new struct inode
1177 * @data: opaque data pointer to pass to @test and @set
1179 * Search for the inode specified by @hashval and @data in the inode cache,
1180 * and if present it is return it with an increased reference count. This is
1181 * a variant of iget5_locked() for callers that don't want to fail on memory
1182 * allocation of inode.
1184 * If the inode is not in cache, insert the pre-allocated inode to cache and
1185 * return it locked, hashed, and with the I_NEW flag set. The file system gets
1186 * to fill it in before unlocking it via unlock_new_inode().
1188 * Note both @test and @set are called with the inode_hash_lock held, so can't
1191 struct inode
*inode_insert5(struct inode
*inode
, unsigned long hashval
,
1192 int (*test
)(struct inode
*, void *),
1193 int (*set
)(struct inode
*, void *), void *data
)
1195 struct hlist_head
*head
= inode_hashtable
+ hash(inode
->i_sb
, hashval
);
1199 spin_lock(&inode_hash_lock
);
1200 old
= find_inode(inode
->i_sb
, head
, test
, data
);
1201 if (unlikely(old
)) {
1203 * Uhhuh, somebody else created the same inode under us.
1204 * Use the old inode instead of the preallocated one.
1206 spin_unlock(&inode_hash_lock
);
1210 if (unlikely(inode_unhashed(old
))) {
1217 if (set
&& unlikely(set(inode
, data
))) {
1223 * Return the locked inode with I_NEW set, the
1224 * caller is responsible for filling in the contents
1226 spin_lock(&inode
->i_lock
);
1227 inode
->i_state
|= I_NEW
;
1228 hlist_add_head_rcu(&inode
->i_hash
, head
);
1229 spin_unlock(&inode
->i_lock
);
1232 * Add inode to the sb list if it's not already. It has I_NEW at this
1233 * point, so it should be safe to test i_sb_list locklessly.
1235 if (list_empty(&inode
->i_sb_list
))
1236 inode_sb_list_add(inode
);
1238 spin_unlock(&inode_hash_lock
);
1242 EXPORT_SYMBOL(inode_insert5
);
1245 * iget5_locked - obtain an inode from a mounted file system
1246 * @sb: super block of file system
1247 * @hashval: hash value (usually inode number) to get
1248 * @test: callback used for comparisons between inodes
1249 * @set: callback used to initialize a new struct inode
1250 * @data: opaque data pointer to pass to @test and @set
1252 * Search for the inode specified by @hashval and @data in the inode cache,
1253 * and if present it is return it with an increased reference count. This is
1254 * a generalized version of iget_locked() for file systems where the inode
1255 * number is not sufficient for unique identification of an inode.
1257 * If the inode is not in cache, allocate a new inode and return it locked,
1258 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1259 * before unlocking it via unlock_new_inode().
1261 * Note both @test and @set are called with the inode_hash_lock held, so can't
1264 struct inode
*iget5_locked(struct super_block
*sb
, unsigned long hashval
,
1265 int (*test
)(struct inode
*, void *),
1266 int (*set
)(struct inode
*, void *), void *data
)
1268 struct inode
*inode
= ilookup5(sb
, hashval
, test
, data
);
1271 struct inode
*new = alloc_inode(sb
);
1275 inode
= inode_insert5(new, hashval
, test
, set
, data
);
1276 if (unlikely(inode
!= new))
1282 EXPORT_SYMBOL(iget5_locked
);
1285 * iget_locked - obtain an inode from a mounted file system
1286 * @sb: super block of file system
1287 * @ino: inode number to get
1289 * Search for the inode specified by @ino in the inode cache and if present
1290 * return it with an increased reference count. This is for file systems
1291 * where the inode number is sufficient for unique identification of an inode.
1293 * If the inode is not in cache, allocate a new inode and return it locked,
1294 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1295 * before unlocking it via unlock_new_inode().
1297 struct inode
*iget_locked(struct super_block
*sb
, unsigned long ino
)
1299 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1300 struct inode
*inode
;
1302 spin_lock(&inode_hash_lock
);
1303 inode
= find_inode_fast(sb
, head
, ino
);
1304 spin_unlock(&inode_hash_lock
);
1308 wait_on_inode(inode
);
1309 if (unlikely(inode_unhashed(inode
))) {
1316 inode
= alloc_inode(sb
);
1320 spin_lock(&inode_hash_lock
);
1321 /* We released the lock, so.. */
1322 old
= find_inode_fast(sb
, head
, ino
);
1325 spin_lock(&inode
->i_lock
);
1326 inode
->i_state
= I_NEW
;
1327 hlist_add_head_rcu(&inode
->i_hash
, head
);
1328 spin_unlock(&inode
->i_lock
);
1329 inode_sb_list_add(inode
);
1330 spin_unlock(&inode_hash_lock
);
1332 /* Return the locked inode with I_NEW set, the
1333 * caller is responsible for filling in the contents
1339 * Uhhuh, somebody else created the same inode under
1340 * us. Use the old inode instead of the one we just
1343 spin_unlock(&inode_hash_lock
);
1344 destroy_inode(inode
);
1348 wait_on_inode(inode
);
1349 if (unlikely(inode_unhashed(inode
))) {
1356 EXPORT_SYMBOL(iget_locked
);
1359 * search the inode cache for a matching inode number.
1360 * If we find one, then the inode number we are trying to
1361 * allocate is not unique and so we should not use it.
1363 * Returns 1 if the inode number is unique, 0 if it is not.
1365 static int test_inode_iunique(struct super_block
*sb
, unsigned long ino
)
1367 struct hlist_head
*b
= inode_hashtable
+ hash(sb
, ino
);
1368 struct inode
*inode
;
1370 hlist_for_each_entry_rcu(inode
, b
, i_hash
) {
1371 if (inode
->i_ino
== ino
&& inode
->i_sb
== sb
)
1378 * iunique - get a unique inode number
1380 * @max_reserved: highest reserved inode number
1382 * Obtain an inode number that is unique on the system for a given
1383 * superblock. This is used by file systems that have no natural
1384 * permanent inode numbering system. An inode number is returned that
1385 * is higher than the reserved limit but unique.
1388 * With a large number of inodes live on the file system this function
1389 * currently becomes quite slow.
1391 ino_t
iunique(struct super_block
*sb
, ino_t max_reserved
)
1394 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1395 * error if st_ino won't fit in target struct field. Use 32bit counter
1396 * here to attempt to avoid that.
1398 static DEFINE_SPINLOCK(iunique_lock
);
1399 static unsigned int counter
;
1403 spin_lock(&iunique_lock
);
1405 if (counter
<= max_reserved
)
1406 counter
= max_reserved
+ 1;
1408 } while (!test_inode_iunique(sb
, res
));
1409 spin_unlock(&iunique_lock
);
1414 EXPORT_SYMBOL(iunique
);
1416 struct inode
*igrab(struct inode
*inode
)
1418 spin_lock(&inode
->i_lock
);
1419 if (!(inode
->i_state
& (I_FREEING
|I_WILL_FREE
))) {
1421 spin_unlock(&inode
->i_lock
);
1423 spin_unlock(&inode
->i_lock
);
1425 * Handle the case where s_op->clear_inode is not been
1426 * called yet, and somebody is calling igrab
1427 * while the inode is getting freed.
1433 EXPORT_SYMBOL(igrab
);
1436 * ilookup5_nowait - search for an inode in the inode cache
1437 * @sb: super block of file system to search
1438 * @hashval: hash value (usually inode number) to search for
1439 * @test: callback used for comparisons between inodes
1440 * @data: opaque data pointer to pass to @test
1442 * Search for the inode specified by @hashval and @data in the inode cache.
1443 * If the inode is in the cache, the inode is returned with an incremented
1446 * Note: I_NEW is not waited upon so you have to be very careful what you do
1447 * with the returned inode. You probably should be using ilookup5() instead.
1449 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1451 struct inode
*ilookup5_nowait(struct super_block
*sb
, unsigned long hashval
,
1452 int (*test
)(struct inode
*, void *), void *data
)
1454 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1455 struct inode
*inode
;
1457 spin_lock(&inode_hash_lock
);
1458 inode
= find_inode(sb
, head
, test
, data
);
1459 spin_unlock(&inode_hash_lock
);
1461 return IS_ERR(inode
) ? NULL
: inode
;
1463 EXPORT_SYMBOL(ilookup5_nowait
);
1466 * ilookup5 - search for an inode in the inode cache
1467 * @sb: super block of file system to search
1468 * @hashval: hash value (usually inode number) to search for
1469 * @test: callback used for comparisons between inodes
1470 * @data: opaque data pointer to pass to @test
1472 * Search for the inode specified by @hashval and @data in the inode cache,
1473 * and if the inode is in the cache, return the inode with an incremented
1474 * reference count. Waits on I_NEW before returning the inode.
1475 * returned with an incremented reference count.
1477 * This is a generalized version of ilookup() for file systems where the
1478 * inode number is not sufficient for unique identification of an inode.
1480 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1482 struct inode
*ilookup5(struct super_block
*sb
, unsigned long hashval
,
1483 int (*test
)(struct inode
*, void *), void *data
)
1485 struct inode
*inode
;
1487 inode
= ilookup5_nowait(sb
, hashval
, test
, data
);
1489 wait_on_inode(inode
);
1490 if (unlikely(inode_unhashed(inode
))) {
1497 EXPORT_SYMBOL(ilookup5
);
1500 * ilookup - search for an inode in the inode cache
1501 * @sb: super block of file system to search
1502 * @ino: inode number to search for
1504 * Search for the inode @ino in the inode cache, and if the inode is in the
1505 * cache, the inode is returned with an incremented reference count.
1507 struct inode
*ilookup(struct super_block
*sb
, unsigned long ino
)
1509 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1510 struct inode
*inode
;
1512 spin_lock(&inode_hash_lock
);
1513 inode
= find_inode_fast(sb
, head
, ino
);
1514 spin_unlock(&inode_hash_lock
);
1519 wait_on_inode(inode
);
1520 if (unlikely(inode_unhashed(inode
))) {
1527 EXPORT_SYMBOL(ilookup
);
1530 * find_inode_nowait - find an inode in the inode cache
1531 * @sb: super block of file system to search
1532 * @hashval: hash value (usually inode number) to search for
1533 * @match: callback used for comparisons between inodes
1534 * @data: opaque data pointer to pass to @match
1536 * Search for the inode specified by @hashval and @data in the inode
1537 * cache, where the helper function @match will return 0 if the inode
1538 * does not match, 1 if the inode does match, and -1 if the search
1539 * should be stopped. The @match function must be responsible for
1540 * taking the i_lock spin_lock and checking i_state for an inode being
1541 * freed or being initialized, and incrementing the reference count
1542 * before returning 1. It also must not sleep, since it is called with
1543 * the inode_hash_lock spinlock held.
1545 * This is a even more generalized version of ilookup5() when the
1546 * function must never block --- find_inode() can block in
1547 * __wait_on_freeing_inode() --- or when the caller can not increment
1548 * the reference count because the resulting iput() might cause an
1549 * inode eviction. The tradeoff is that the @match funtion must be
1550 * very carefully implemented.
1552 struct inode
*find_inode_nowait(struct super_block
*sb
,
1553 unsigned long hashval
,
1554 int (*match
)(struct inode
*, unsigned long,
1558 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1559 struct inode
*inode
, *ret_inode
= NULL
;
1562 spin_lock(&inode_hash_lock
);
1563 hlist_for_each_entry(inode
, head
, i_hash
) {
1564 if (inode
->i_sb
!= sb
)
1566 mval
= match(inode
, hashval
, data
);
1574 spin_unlock(&inode_hash_lock
);
1577 EXPORT_SYMBOL(find_inode_nowait
);
1580 * find_inode_rcu - find an inode in the inode cache
1581 * @sb: Super block of file system to search
1582 * @hashval: Key to hash
1583 * @test: Function to test match on an inode
1584 * @data: Data for test function
1586 * Search for the inode specified by @hashval and @data in the inode cache,
1587 * where the helper function @test will return 0 if the inode does not match
1588 * and 1 if it does. The @test function must be responsible for taking the
1589 * i_lock spin_lock and checking i_state for an inode being freed or being
1592 * If successful, this will return the inode for which the @test function
1593 * returned 1 and NULL otherwise.
1595 * The @test function is not permitted to take a ref on any inode presented.
1596 * It is also not permitted to sleep.
1598 * The caller must hold the RCU read lock.
1600 struct inode
*find_inode_rcu(struct super_block
*sb
, unsigned long hashval
,
1601 int (*test
)(struct inode
*, void *), void *data
)
1603 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1604 struct inode
*inode
;
1606 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1607 "suspicious find_inode_rcu() usage");
1609 hlist_for_each_entry_rcu(inode
, head
, i_hash
) {
1610 if (inode
->i_sb
== sb
&&
1611 !(READ_ONCE(inode
->i_state
) & (I_FREEING
| I_WILL_FREE
)) &&
1617 EXPORT_SYMBOL(find_inode_rcu
);
1620 * find_inode_by_ino_rcu - Find an inode in the inode cache
1621 * @sb: Super block of file system to search
1622 * @ino: The inode number to match
1624 * Search for the inode specified by @hashval and @data in the inode cache,
1625 * where the helper function @test will return 0 if the inode does not match
1626 * and 1 if it does. The @test function must be responsible for taking the
1627 * i_lock spin_lock and checking i_state for an inode being freed or being
1630 * If successful, this will return the inode for which the @test function
1631 * returned 1 and NULL otherwise.
1633 * The @test function is not permitted to take a ref on any inode presented.
1634 * It is also not permitted to sleep.
1636 * The caller must hold the RCU read lock.
1638 struct inode
*find_inode_by_ino_rcu(struct super_block
*sb
,
1641 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1642 struct inode
*inode
;
1644 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1645 "suspicious find_inode_by_ino_rcu() usage");
1647 hlist_for_each_entry_rcu(inode
, head
, i_hash
) {
1648 if (inode
->i_ino
== ino
&&
1649 inode
->i_sb
== sb
&&
1650 !(READ_ONCE(inode
->i_state
) & (I_FREEING
| I_WILL_FREE
)))
1655 EXPORT_SYMBOL(find_inode_by_ino_rcu
);
1657 int insert_inode_locked(struct inode
*inode
)
1659 struct super_block
*sb
= inode
->i_sb
;
1660 ino_t ino
= inode
->i_ino
;
1661 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1664 struct inode
*old
= NULL
;
1665 spin_lock(&inode_hash_lock
);
1666 hlist_for_each_entry(old
, head
, i_hash
) {
1667 if (old
->i_ino
!= ino
)
1669 if (old
->i_sb
!= sb
)
1671 spin_lock(&old
->i_lock
);
1672 if (old
->i_state
& (I_FREEING
|I_WILL_FREE
)) {
1673 spin_unlock(&old
->i_lock
);
1679 spin_lock(&inode
->i_lock
);
1680 inode
->i_state
|= I_NEW
| I_CREATING
;
1681 hlist_add_head_rcu(&inode
->i_hash
, head
);
1682 spin_unlock(&inode
->i_lock
);
1683 spin_unlock(&inode_hash_lock
);
1686 if (unlikely(old
->i_state
& I_CREATING
)) {
1687 spin_unlock(&old
->i_lock
);
1688 spin_unlock(&inode_hash_lock
);
1692 spin_unlock(&old
->i_lock
);
1693 spin_unlock(&inode_hash_lock
);
1695 if (unlikely(!inode_unhashed(old
))) {
1702 EXPORT_SYMBOL(insert_inode_locked
);
1704 int insert_inode_locked4(struct inode
*inode
, unsigned long hashval
,
1705 int (*test
)(struct inode
*, void *), void *data
)
1709 inode
->i_state
|= I_CREATING
;
1710 old
= inode_insert5(inode
, hashval
, test
, NULL
, data
);
1718 EXPORT_SYMBOL(insert_inode_locked4
);
1721 int generic_delete_inode(struct inode
*inode
)
1725 EXPORT_SYMBOL(generic_delete_inode
);
1728 * Called when we're dropping the last reference
1731 * Call the FS "drop_inode()" function, defaulting to
1732 * the legacy UNIX filesystem behaviour. If it tells
1733 * us to evict inode, do so. Otherwise, retain inode
1734 * in cache if fs is alive, sync and evict if fs is
1737 static void iput_final(struct inode
*inode
)
1739 struct super_block
*sb
= inode
->i_sb
;
1740 const struct super_operations
*op
= inode
->i_sb
->s_op
;
1741 unsigned long state
;
1744 WARN_ON(inode
->i_state
& I_NEW
);
1747 drop
= op
->drop_inode(inode
);
1749 drop
= generic_drop_inode(inode
);
1752 !(inode
->i_state
& I_DONTCACHE
) &&
1753 (sb
->s_flags
& SB_ACTIVE
)) {
1754 __inode_add_lru(inode
, true);
1755 spin_unlock(&inode
->i_lock
);
1759 state
= inode
->i_state
;
1761 WRITE_ONCE(inode
->i_state
, state
| I_WILL_FREE
);
1762 spin_unlock(&inode
->i_lock
);
1764 write_inode_now(inode
, 1);
1766 spin_lock(&inode
->i_lock
);
1767 state
= inode
->i_state
;
1768 WARN_ON(state
& I_NEW
);
1769 state
&= ~I_WILL_FREE
;
1772 WRITE_ONCE(inode
->i_state
, state
| I_FREEING
);
1773 if (!list_empty(&inode
->i_lru
))
1774 inode_lru_list_del(inode
);
1775 spin_unlock(&inode
->i_lock
);
1781 * iput - put an inode
1782 * @inode: inode to put
1784 * Puts an inode, dropping its usage count. If the inode use count hits
1785 * zero, the inode is then freed and may also be destroyed.
1787 * Consequently, iput() can sleep.
1789 void iput(struct inode
*inode
)
1793 BUG_ON(inode
->i_state
& I_CLEAR
);
1795 if (atomic_dec_and_lock(&inode
->i_count
, &inode
->i_lock
)) {
1796 if (inode
->i_nlink
&& (inode
->i_state
& I_DIRTY_TIME
)) {
1797 atomic_inc(&inode
->i_count
);
1798 spin_unlock(&inode
->i_lock
);
1799 trace_writeback_lazytime_iput(inode
);
1800 mark_inode_dirty_sync(inode
);
1806 EXPORT_SYMBOL(iput
);
1810 * bmap - find a block number in a file
1811 * @inode: inode owning the block number being requested
1812 * @block: pointer containing the block to find
1814 * Replaces the value in ``*block`` with the block number on the device holding
1815 * corresponding to the requested block number in the file.
1816 * That is, asked for block 4 of inode 1 the function will replace the
1817 * 4 in ``*block``, with disk block relative to the disk start that holds that
1818 * block of the file.
1820 * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
1821 * hole, returns 0 and ``*block`` is also set to 0.
1823 int bmap(struct inode
*inode
, sector_t
*block
)
1825 if (!inode
->i_mapping
->a_ops
->bmap
)
1828 *block
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
, *block
);
1831 EXPORT_SYMBOL(bmap
);
1835 * With relative atime, only update atime if the previous atime is
1836 * earlier than or equal to either the ctime or mtime,
1837 * or if at least a day has passed since the last atime update.
1839 static int relatime_need_update(struct vfsmount
*mnt
, struct inode
*inode
,
1840 struct timespec64 now
)
1842 struct timespec64 atime
, mtime
, ctime
;
1844 if (!(mnt
->mnt_flags
& MNT_RELATIME
))
1847 * Is mtime younger than or equal to atime? If yes, update atime:
1849 atime
= inode_get_atime(inode
);
1850 mtime
= inode_get_mtime(inode
);
1851 if (timespec64_compare(&mtime
, &atime
) >= 0)
1854 * Is ctime younger than or equal to atime? If yes, update atime:
1856 ctime
= inode_get_ctime(inode
);
1857 if (timespec64_compare(&ctime
, &atime
) >= 0)
1861 * Is the previous atime value older than a day? If yes,
1864 if ((long)(now
.tv_sec
- atime
.tv_sec
) >= 24*60*60)
1867 * Good, we can skip the atime update:
1873 * inode_update_timestamps - update the timestamps on the inode
1874 * @inode: inode to be updated
1875 * @flags: S_* flags that needed to be updated
1877 * The update_time function is called when an inode's timestamps need to be
1878 * updated for a read or write operation. This function handles updating the
1879 * actual timestamps. It's up to the caller to ensure that the inode is marked
1880 * dirty appropriately.
1882 * In the case where any of S_MTIME, S_CTIME, or S_VERSION need to be updated,
1883 * attempt to update all three of them. S_ATIME updates can be handled
1884 * independently of the rest.
1886 * Returns a set of S_* flags indicating which values changed.
1888 int inode_update_timestamps(struct inode
*inode
, int flags
)
1891 struct timespec64 now
;
1893 if (flags
& (S_MTIME
|S_CTIME
|S_VERSION
)) {
1894 struct timespec64 ctime
= inode_get_ctime(inode
);
1895 struct timespec64 mtime
= inode_get_mtime(inode
);
1897 now
= inode_set_ctime_current(inode
);
1898 if (!timespec64_equal(&now
, &ctime
))
1900 if (!timespec64_equal(&now
, &mtime
)) {
1901 inode_set_mtime_to_ts(inode
, now
);
1904 if (IS_I_VERSION(inode
) && inode_maybe_inc_iversion(inode
, updated
))
1905 updated
|= S_VERSION
;
1907 now
= current_time(inode
);
1910 if (flags
& S_ATIME
) {
1911 struct timespec64 atime
= inode_get_atime(inode
);
1913 if (!timespec64_equal(&now
, &atime
)) {
1914 inode_set_atime_to_ts(inode
, now
);
1920 EXPORT_SYMBOL(inode_update_timestamps
);
1923 * generic_update_time - update the timestamps on the inode
1924 * @inode: inode to be updated
1925 * @flags: S_* flags that needed to be updated
1927 * The update_time function is called when an inode's timestamps need to be
1928 * updated for a read or write operation. In the case where any of S_MTIME, S_CTIME,
1929 * or S_VERSION need to be updated we attempt to update all three of them. S_ATIME
1930 * updates can be handled done independently of the rest.
1932 * Returns a S_* mask indicating which fields were updated.
1934 int generic_update_time(struct inode
*inode
, int flags
)
1936 int updated
= inode_update_timestamps(inode
, flags
);
1937 int dirty_flags
= 0;
1939 if (updated
& (S_ATIME
|S_MTIME
|S_CTIME
))
1940 dirty_flags
= inode
->i_sb
->s_flags
& SB_LAZYTIME
? I_DIRTY_TIME
: I_DIRTY_SYNC
;
1941 if (updated
& S_VERSION
)
1942 dirty_flags
|= I_DIRTY_SYNC
;
1943 __mark_inode_dirty(inode
, dirty_flags
);
1946 EXPORT_SYMBOL(generic_update_time
);
1949 * This does the actual work of updating an inodes time or version. Must have
1950 * had called mnt_want_write() before calling this.
1952 int inode_update_time(struct inode
*inode
, int flags
)
1954 if (inode
->i_op
->update_time
)
1955 return inode
->i_op
->update_time(inode
, flags
);
1956 generic_update_time(inode
, flags
);
1959 EXPORT_SYMBOL(inode_update_time
);
1962 * atime_needs_update - update the access time
1963 * @path: the &struct path to update
1964 * @inode: inode to update
1966 * Update the accessed time on an inode and mark it for writeback.
1967 * This function automatically handles read only file systems and media,
1968 * as well as the "noatime" flag and inode specific "noatime" markers.
1970 bool atime_needs_update(const struct path
*path
, struct inode
*inode
)
1972 struct vfsmount
*mnt
= path
->mnt
;
1973 struct timespec64 now
, atime
;
1975 if (inode
->i_flags
& S_NOATIME
)
1978 /* Atime updates will likely cause i_uid and i_gid to be written
1979 * back improprely if their true value is unknown to the vfs.
1981 if (HAS_UNMAPPED_ID(mnt_idmap(mnt
), inode
))
1984 if (IS_NOATIME(inode
))
1986 if ((inode
->i_sb
->s_flags
& SB_NODIRATIME
) && S_ISDIR(inode
->i_mode
))
1989 if (mnt
->mnt_flags
& MNT_NOATIME
)
1991 if ((mnt
->mnt_flags
& MNT_NODIRATIME
) && S_ISDIR(inode
->i_mode
))
1994 now
= current_time(inode
);
1996 if (!relatime_need_update(mnt
, inode
, now
))
1999 atime
= inode_get_atime(inode
);
2000 if (timespec64_equal(&atime
, &now
))
2006 void touch_atime(const struct path
*path
)
2008 struct vfsmount
*mnt
= path
->mnt
;
2009 struct inode
*inode
= d_inode(path
->dentry
);
2011 if (!atime_needs_update(path
, inode
))
2014 if (!sb_start_write_trylock(inode
->i_sb
))
2017 if (mnt_get_write_access(mnt
) != 0)
2020 * File systems can error out when updating inodes if they need to
2021 * allocate new space to modify an inode (such is the case for
2022 * Btrfs), but since we touch atime while walking down the path we
2023 * really don't care if we failed to update the atime of the file,
2024 * so just ignore the return value.
2025 * We may also fail on filesystems that have the ability to make parts
2026 * of the fs read only, e.g. subvolumes in Btrfs.
2028 inode_update_time(inode
, S_ATIME
);
2029 mnt_put_write_access(mnt
);
2031 sb_end_write(inode
->i_sb
);
2033 EXPORT_SYMBOL(touch_atime
);
2036 * Return mask of changes for notify_change() that need to be done as a
2037 * response to write or truncate. Return 0 if nothing has to be changed.
2038 * Negative value on error (change should be denied).
2040 int dentry_needs_remove_privs(struct mnt_idmap
*idmap
,
2041 struct dentry
*dentry
)
2043 struct inode
*inode
= d_inode(dentry
);
2047 if (IS_NOSEC(inode
))
2050 mask
= setattr_should_drop_suidgid(idmap
, inode
);
2051 ret
= security_inode_need_killpriv(dentry
);
2055 mask
|= ATTR_KILL_PRIV
;
2059 static int __remove_privs(struct mnt_idmap
*idmap
,
2060 struct dentry
*dentry
, int kill
)
2062 struct iattr newattrs
;
2064 newattrs
.ia_valid
= ATTR_FORCE
| kill
;
2066 * Note we call this on write, so notify_change will not
2067 * encounter any conflicting delegations:
2069 return notify_change(idmap
, dentry
, &newattrs
, NULL
);
2072 static int __file_remove_privs(struct file
*file
, unsigned int flags
)
2074 struct dentry
*dentry
= file_dentry(file
);
2075 struct inode
*inode
= file_inode(file
);
2079 if (IS_NOSEC(inode
) || !S_ISREG(inode
->i_mode
))
2082 kill
= dentry_needs_remove_privs(file_mnt_idmap(file
), dentry
);
2087 if (flags
& IOCB_NOWAIT
)
2090 error
= __remove_privs(file_mnt_idmap(file
), dentry
, kill
);
2094 inode_has_no_xattr(inode
);
2099 * file_remove_privs - remove special file privileges (suid, capabilities)
2100 * @file: file to remove privileges from
2102 * When file is modified by a write or truncation ensure that special
2103 * file privileges are removed.
2105 * Return: 0 on success, negative errno on failure.
2107 int file_remove_privs(struct file
*file
)
2109 return __file_remove_privs(file
, 0);
2111 EXPORT_SYMBOL(file_remove_privs
);
2113 static int inode_needs_update_time(struct inode
*inode
)
2116 struct timespec64 now
= current_time(inode
);
2117 struct timespec64 ts
;
2119 /* First try to exhaust all avenues to not sync */
2120 if (IS_NOCMTIME(inode
))
2123 ts
= inode_get_mtime(inode
);
2124 if (!timespec64_equal(&ts
, &now
))
2127 ts
= inode_get_ctime(inode
);
2128 if (!timespec64_equal(&ts
, &now
))
2131 if (IS_I_VERSION(inode
) && inode_iversion_need_inc(inode
))
2132 sync_it
|= S_VERSION
;
2137 static int __file_update_time(struct file
*file
, int sync_mode
)
2140 struct inode
*inode
= file_inode(file
);
2142 /* try to update time settings */
2143 if (!mnt_get_write_access_file(file
)) {
2144 ret
= inode_update_time(inode
, sync_mode
);
2145 mnt_put_write_access_file(file
);
2152 * file_update_time - update mtime and ctime time
2153 * @file: file accessed
2155 * Update the mtime and ctime members of an inode and mark the inode for
2156 * writeback. Note that this function is meant exclusively for usage in
2157 * the file write path of filesystems, and filesystems may choose to
2158 * explicitly ignore updates via this function with the _NOCMTIME inode
2159 * flag, e.g. for network filesystem where these imestamps are handled
2160 * by the server. This can return an error for file systems who need to
2161 * allocate space in order to update an inode.
2163 * Return: 0 on success, negative errno on failure.
2165 int file_update_time(struct file
*file
)
2168 struct inode
*inode
= file_inode(file
);
2170 ret
= inode_needs_update_time(inode
);
2174 return __file_update_time(file
, ret
);
2176 EXPORT_SYMBOL(file_update_time
);
2179 * file_modified_flags - handle mandated vfs changes when modifying a file
2180 * @file: file that was modified
2181 * @flags: kiocb flags
2183 * When file has been modified ensure that special
2184 * file privileges are removed and time settings are updated.
2186 * If IOCB_NOWAIT is set, special file privileges will not be removed and
2187 * time settings will not be updated. It will return -EAGAIN.
2189 * Context: Caller must hold the file's inode lock.
2191 * Return: 0 on success, negative errno on failure.
2193 static int file_modified_flags(struct file
*file
, int flags
)
2196 struct inode
*inode
= file_inode(file
);
2199 * Clear the security bits if the process is not being run by root.
2200 * This keeps people from modifying setuid and setgid binaries.
2202 ret
= __file_remove_privs(file
, flags
);
2206 if (unlikely(file
->f_mode
& FMODE_NOCMTIME
))
2209 ret
= inode_needs_update_time(inode
);
2212 if (flags
& IOCB_NOWAIT
)
2215 return __file_update_time(file
, ret
);
2219 * file_modified - handle mandated vfs changes when modifying a file
2220 * @file: file that was modified
2222 * When file has been modified ensure that special
2223 * file privileges are removed and time settings are updated.
2225 * Context: Caller must hold the file's inode lock.
2227 * Return: 0 on success, negative errno on failure.
2229 int file_modified(struct file
*file
)
2231 return file_modified_flags(file
, 0);
2233 EXPORT_SYMBOL(file_modified
);
2236 * kiocb_modified - handle mandated vfs changes when modifying a file
2237 * @iocb: iocb that was modified
2239 * When file has been modified ensure that special
2240 * file privileges are removed and time settings are updated.
2242 * Context: Caller must hold the file's inode lock.
2244 * Return: 0 on success, negative errno on failure.
2246 int kiocb_modified(struct kiocb
*iocb
)
2248 return file_modified_flags(iocb
->ki_filp
, iocb
->ki_flags
);
2250 EXPORT_SYMBOL_GPL(kiocb_modified
);
2252 int inode_needs_sync(struct inode
*inode
)
2256 if (S_ISDIR(inode
->i_mode
) && IS_DIRSYNC(inode
))
2260 EXPORT_SYMBOL(inode_needs_sync
);
2263 * If we try to find an inode in the inode hash while it is being
2264 * deleted, we have to wait until the filesystem completes its
2265 * deletion before reporting that it isn't found. This function waits
2266 * until the deletion _might_ have completed. Callers are responsible
2267 * to recheck inode state.
2269 * It doesn't matter if I_NEW is not set initially, a call to
2270 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
2273 static void __wait_on_freeing_inode(struct inode
*inode
)
2275 wait_queue_head_t
*wq
;
2276 DEFINE_WAIT_BIT(wait
, &inode
->i_state
, __I_NEW
);
2277 wq
= bit_waitqueue(&inode
->i_state
, __I_NEW
);
2278 prepare_to_wait(wq
, &wait
.wq_entry
, TASK_UNINTERRUPTIBLE
);
2279 spin_unlock(&inode
->i_lock
);
2280 spin_unlock(&inode_hash_lock
);
2282 finish_wait(wq
, &wait
.wq_entry
);
2283 spin_lock(&inode_hash_lock
);
2286 static __initdata
unsigned long ihash_entries
;
2287 static int __init
set_ihash_entries(char *str
)
2291 ihash_entries
= simple_strtoul(str
, &str
, 0);
2294 __setup("ihash_entries=", set_ihash_entries
);
2297 * Initialize the waitqueues and inode hash table.
2299 void __init
inode_init_early(void)
2301 /* If hashes are distributed across NUMA nodes, defer
2302 * hash allocation until vmalloc space is available.
2308 alloc_large_system_hash("Inode-cache",
2309 sizeof(struct hlist_head
),
2312 HASH_EARLY
| HASH_ZERO
,
2319 void __init
inode_init(void)
2321 /* inode slab cache */
2322 inode_cachep
= kmem_cache_create("inode_cache",
2323 sizeof(struct inode
),
2325 (SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|
2326 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
),
2329 /* Hash may have been set up in inode_init_early */
2334 alloc_large_system_hash("Inode-cache",
2335 sizeof(struct hlist_head
),
2345 void init_special_inode(struct inode
*inode
, umode_t mode
, dev_t rdev
)
2347 inode
->i_mode
= mode
;
2348 if (S_ISCHR(mode
)) {
2349 inode
->i_fop
= &def_chr_fops
;
2350 inode
->i_rdev
= rdev
;
2351 } else if (S_ISBLK(mode
)) {
2352 if (IS_ENABLED(CONFIG_BLOCK
))
2353 inode
->i_fop
= &def_blk_fops
;
2354 inode
->i_rdev
= rdev
;
2355 } else if (S_ISFIFO(mode
))
2356 inode
->i_fop
= &pipefifo_fops
;
2357 else if (S_ISSOCK(mode
))
2358 ; /* leave it no_open_fops */
2360 printk(KERN_DEBUG
"init_special_inode: bogus i_mode (%o) for"
2361 " inode %s:%lu\n", mode
, inode
->i_sb
->s_id
,
2364 EXPORT_SYMBOL(init_special_inode
);
2367 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2368 * @idmap: idmap of the mount the inode was created from
2370 * @dir: Directory inode
2371 * @mode: mode of the new inode
2373 * If the inode has been created through an idmapped mount the idmap of
2374 * the vfsmount must be passed through @idmap. This function will then take
2375 * care to map the inode according to @idmap before checking permissions
2376 * and initializing i_uid and i_gid. On non-idmapped mounts or if permission
2377 * checking is to be performed on the raw inode simply pass @nop_mnt_idmap.
2379 void inode_init_owner(struct mnt_idmap
*idmap
, struct inode
*inode
,
2380 const struct inode
*dir
, umode_t mode
)
2382 inode_fsuid_set(inode
, idmap
);
2383 if (dir
&& dir
->i_mode
& S_ISGID
) {
2384 inode
->i_gid
= dir
->i_gid
;
2386 /* Directories are special, and always inherit S_ISGID */
2390 inode_fsgid_set(inode
, idmap
);
2391 inode
->i_mode
= mode
;
2393 EXPORT_SYMBOL(inode_init_owner
);
2396 * inode_owner_or_capable - check current task permissions to inode
2397 * @idmap: idmap of the mount the inode was found from
2398 * @inode: inode being checked
2400 * Return true if current either has CAP_FOWNER in a namespace with the
2401 * inode owner uid mapped, or owns the file.
2403 * If the inode has been found through an idmapped mount the idmap of
2404 * the vfsmount must be passed through @idmap. This function will then take
2405 * care to map the inode according to @idmap before checking permissions.
2406 * On non-idmapped mounts or if permission checking is to be performed on the
2407 * raw inode simply passs @nop_mnt_idmap.
2409 bool inode_owner_or_capable(struct mnt_idmap
*idmap
,
2410 const struct inode
*inode
)
2413 struct user_namespace
*ns
;
2415 vfsuid
= i_uid_into_vfsuid(idmap
, inode
);
2416 if (vfsuid_eq_kuid(vfsuid
, current_fsuid()))
2419 ns
= current_user_ns();
2420 if (vfsuid_has_mapping(ns
, vfsuid
) && ns_capable(ns
, CAP_FOWNER
))
2424 EXPORT_SYMBOL(inode_owner_or_capable
);
2427 * Direct i/o helper functions
2429 static void __inode_dio_wait(struct inode
*inode
)
2431 wait_queue_head_t
*wq
= bit_waitqueue(&inode
->i_state
, __I_DIO_WAKEUP
);
2432 DEFINE_WAIT_BIT(q
, &inode
->i_state
, __I_DIO_WAKEUP
);
2435 prepare_to_wait(wq
, &q
.wq_entry
, TASK_UNINTERRUPTIBLE
);
2436 if (atomic_read(&inode
->i_dio_count
))
2438 } while (atomic_read(&inode
->i_dio_count
));
2439 finish_wait(wq
, &q
.wq_entry
);
2443 * inode_dio_wait - wait for outstanding DIO requests to finish
2444 * @inode: inode to wait for
2446 * Waits for all pending direct I/O requests to finish so that we can
2447 * proceed with a truncate or equivalent operation.
2449 * Must be called under a lock that serializes taking new references
2450 * to i_dio_count, usually by inode->i_mutex.
2452 void inode_dio_wait(struct inode
*inode
)
2454 if (atomic_read(&inode
->i_dio_count
))
2455 __inode_dio_wait(inode
);
2457 EXPORT_SYMBOL(inode_dio_wait
);
2460 * inode_set_flags - atomically set some inode flags
2462 * Note: the caller should be holding i_mutex, or else be sure that
2463 * they have exclusive access to the inode structure (i.e., while the
2464 * inode is being instantiated). The reason for the cmpxchg() loop
2465 * --- which wouldn't be necessary if all code paths which modify
2466 * i_flags actually followed this rule, is that there is at least one
2467 * code path which doesn't today so we use cmpxchg() out of an abundance
2470 * In the long run, i_mutex is overkill, and we should probably look
2471 * at using the i_lock spinlock to protect i_flags, and then make sure
2472 * it is so documented in include/linux/fs.h and that all code follows
2473 * the locking convention!!
2475 void inode_set_flags(struct inode
*inode
, unsigned int flags
,
2478 WARN_ON_ONCE(flags
& ~mask
);
2479 set_mask_bits(&inode
->i_flags
, mask
, flags
);
2481 EXPORT_SYMBOL(inode_set_flags
);
2483 void inode_nohighmem(struct inode
*inode
)
2485 mapping_set_gfp_mask(inode
->i_mapping
, GFP_USER
);
2487 EXPORT_SYMBOL(inode_nohighmem
);
2490 * timestamp_truncate - Truncate timespec to a granularity
2492 * @inode: inode being updated
2494 * Truncate a timespec to the granularity supported by the fs
2495 * containing the inode. Always rounds down. gran must
2496 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2498 struct timespec64
timestamp_truncate(struct timespec64 t
, struct inode
*inode
)
2500 struct super_block
*sb
= inode
->i_sb
;
2501 unsigned int gran
= sb
->s_time_gran
;
2503 t
.tv_sec
= clamp(t
.tv_sec
, sb
->s_time_min
, sb
->s_time_max
);
2504 if (unlikely(t
.tv_sec
== sb
->s_time_max
|| t
.tv_sec
== sb
->s_time_min
))
2507 /* Avoid division in the common cases 1 ns and 1 s. */
2510 else if (gran
== NSEC_PER_SEC
)
2512 else if (gran
> 1 && gran
< NSEC_PER_SEC
)
2513 t
.tv_nsec
-= t
.tv_nsec
% gran
;
2515 WARN(1, "invalid file time granularity: %u", gran
);
2518 EXPORT_SYMBOL(timestamp_truncate
);
2521 * current_time - Return FS time
2524 * Return the current time truncated to the time granularity supported by
2527 * Note that inode and inode->sb cannot be NULL.
2528 * Otherwise, the function warns and returns time without truncation.
2530 struct timespec64
current_time(struct inode
*inode
)
2532 struct timespec64 now
;
2534 ktime_get_coarse_real_ts64(&now
);
2535 return timestamp_truncate(now
, inode
);
2537 EXPORT_SYMBOL(current_time
);
2540 * inode_set_ctime_current - set the ctime to current_time
2543 * Set the inode->i_ctime to the current value for the inode. Returns
2544 * the current value that was assigned to i_ctime.
2546 struct timespec64
inode_set_ctime_current(struct inode
*inode
)
2548 struct timespec64 now
= current_time(inode
);
2550 inode_set_ctime(inode
, now
.tv_sec
, now
.tv_nsec
);
2553 EXPORT_SYMBOL(inode_set_ctime_current
);
2556 * in_group_or_capable - check whether caller is CAP_FSETID privileged
2557 * @idmap: idmap of the mount @inode was found from
2558 * @inode: inode to check
2559 * @vfsgid: the new/current vfsgid of @inode
2561 * Check wether @vfsgid is in the caller's group list or if the caller is
2562 * privileged with CAP_FSETID over @inode. This can be used to determine
2563 * whether the setgid bit can be kept or must be dropped.
2565 * Return: true if the caller is sufficiently privileged, false if not.
2567 bool in_group_or_capable(struct mnt_idmap
*idmap
,
2568 const struct inode
*inode
, vfsgid_t vfsgid
)
2570 if (vfsgid_in_group_p(vfsgid
))
2572 if (capable_wrt_inode_uidgid(idmap
, inode
, CAP_FSETID
))
2578 * mode_strip_sgid - handle the sgid bit for non-directories
2579 * @idmap: idmap of the mount the inode was created from
2580 * @dir: parent directory inode
2581 * @mode: mode of the file to be created in @dir
2583 * If the @mode of the new file has both the S_ISGID and S_IXGRP bit
2584 * raised and @dir has the S_ISGID bit raised ensure that the caller is
2585 * either in the group of the parent directory or they have CAP_FSETID
2586 * in their user namespace and are privileged over the parent directory.
2587 * In all other cases, strip the S_ISGID bit from @mode.
2589 * Return: the new mode to use for the file
2591 umode_t
mode_strip_sgid(struct mnt_idmap
*idmap
,
2592 const struct inode
*dir
, umode_t mode
)
2594 if ((mode
& (S_ISGID
| S_IXGRP
)) != (S_ISGID
| S_IXGRP
))
2596 if (S_ISDIR(mode
) || !dir
|| !(dir
->i_mode
& S_ISGID
))
2598 if (in_group_or_capable(idmap
, dir
, i_gid_into_vfsgid(idmap
, dir
)))
2600 return mode
& ~S_ISGID
;
2602 EXPORT_SYMBOL(mode_strip_sgid
);