1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of the diskquota system for the LINUX operating system. QUOTA
4 * is implemented using the BSD system call interface as the means of
5 * communication with the user level. This file contains the generic routines
6 * called by the different filesystems on allocation of an inode or block.
7 * These routines take care of the administration needed to have a consistent
8 * diskquota tracking system. The ideas of both user and group quotas are based
9 * on the Melbourne quota system as used on BSD derived systems. The internal
10 * implementation is based on one of the several variants of the LINUX
11 * inode-subsystem with added complexity of the diskquota system.
13 * Author: Marco van Wieringen <mvw@planets.elm.net>
15 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
17 * Revised list management to avoid races
18 * -- Bill Hawes, <whawes@star.net>, 9/98
20 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
21 * As the consequence the locking was moved from dquot_decr_...(),
22 * dquot_incr_...() to calling functions.
23 * invalidate_dquots() now writes modified dquots.
24 * Serialized quota_off() and quota_on() for mount point.
25 * Fixed a few bugs in grow_dquots().
26 * Fixed deadlock in write_dquot() - we no longer account quotas on
28 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
29 * add_dquot_ref() restarts after blocking
30 * Added check for bogus uid and fixed check for group in quotactl.
31 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
33 * Used struct list_head instead of own list struct
34 * Invalidation of referenced dquots is no longer possible
35 * Improved free_dquots list management
36 * Quota and i_blocks are now updated in one place to avoid races
37 * Warnings are now delayed so we won't block in critical section
38 * Write updated not to require dquot lock
39 * Jan Kara, <jack@suse.cz>, 9/2000
41 * Added dynamic quota structure allocation
42 * Jan Kara <jack@suse.cz> 12/2000
44 * Rewritten quota interface. Implemented new quota format and
45 * formats registering.
46 * Jan Kara, <jack@suse.cz>, 2001,2002
49 * Jan Kara, <jack@suse.cz>, 10/2002
51 * Added journalled quota support, fix lock inversion problems
52 * Jan Kara, <jack@suse.cz>, 2003,2004
54 * (C) Copyright 1994 - 1997 Marco van Wieringen
57 #include <linux/errno.h>
58 #include <linux/kernel.h>
60 #include <linux/mount.h>
62 #include <linux/time.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/fcntl.h>
66 #include <linux/stat.h>
67 #include <linux/tty.h>
68 #include <linux/file.h>
69 #include <linux/slab.h>
70 #include <linux/sysctl.h>
71 #include <linux/init.h>
72 #include <linux/module.h>
73 #include <linux/proc_fs.h>
74 #include <linux/security.h>
75 #include <linux/sched.h>
76 #include <linux/cred.h>
77 #include <linux/kmod.h>
78 #include <linux/namei.h>
79 #include <linux/capability.h>
80 #include <linux/quotaops.h>
81 #include <linux/blkdev.h>
82 #include <linux/sched/mm.h>
83 #include "../internal.h" /* ugh */
85 #include <linux/uaccess.h>
88 * There are five quota SMP locks:
89 * * dq_list_lock protects all lists with quotas and quota formats.
90 * * dquot->dq_dqb_lock protects data from dq_dqb
91 * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
92 * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
93 * dquot_transfer() can stabilize amount it transfers
94 * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
95 * pointers in the inode
96 * * dq_state_lock protects modifications of quota state (on quotaon and
97 * quotaoff) and readers who care about latest values take it as well.
99 * The spinlock ordering is hence:
100 * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
101 * dq_list_lock > dq_state_lock
103 * Note that some things (eg. sb pointer, type, id) doesn't change during
104 * the life of the dquot structure and so needn't to be protected by a lock
106 * Operation accessing dquots via inode pointers are protected by dquot_srcu.
107 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
108 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
109 * inode and before dropping dquot references to avoid use of dquots after
110 * they are freed. dq_data_lock is used to serialize the pointer setting and
111 * clearing operations.
112 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
113 * inode is a quota file). Functions adding pointers from inode to dquots have
114 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
115 * have to do all pointer modifications before dropping dq_data_lock. This makes
116 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
117 * then drops all pointers to dquots from an inode.
119 * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to
120 * memory (or space for it is being allocated) on the first dqget(), when it is
121 * being written out, and when it is being released on the last dqput(). The
122 * allocation and release operations are serialized by the dq_lock and by
123 * checking the use count in dquot_release().
125 * Lock ordering (including related VFS locks) is the following:
126 * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
129 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(dq_list_lock
);
130 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(dq_state_lock
);
131 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(dq_data_lock
);
132 EXPORT_SYMBOL(dq_data_lock
);
133 DEFINE_STATIC_SRCU(dquot_srcu
);
135 static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq
);
137 void __quota_error(struct super_block
*sb
, const char *func
,
138 const char *fmt
, ...)
140 if (printk_ratelimit()) {
142 struct va_format vaf
;
149 printk(KERN_ERR
"Quota error (device %s): %s: %pV\n",
150 sb
->s_id
, func
, &vaf
);
155 EXPORT_SYMBOL(__quota_error
);
157 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
158 static char *quotatypes
[] = INITQFNAMES
;
160 static struct quota_format_type
*quota_formats
; /* List of registered formats */
161 static struct quota_module_name module_names
[] = INIT_QUOTA_MODULE_NAMES
;
163 /* SLAB cache for dquot structures */
164 static struct kmem_cache
*dquot_cachep
;
166 int register_quota_format(struct quota_format_type
*fmt
)
168 spin_lock(&dq_list_lock
);
169 fmt
->qf_next
= quota_formats
;
171 spin_unlock(&dq_list_lock
);
174 EXPORT_SYMBOL(register_quota_format
);
176 void unregister_quota_format(struct quota_format_type
*fmt
)
178 struct quota_format_type
**actqf
;
180 spin_lock(&dq_list_lock
);
181 for (actqf
= "a_formats
; *actqf
&& *actqf
!= fmt
;
182 actqf
= &(*actqf
)->qf_next
)
185 *actqf
= (*actqf
)->qf_next
;
186 spin_unlock(&dq_list_lock
);
188 EXPORT_SYMBOL(unregister_quota_format
);
190 static struct quota_format_type
*find_quota_format(int id
)
192 struct quota_format_type
*actqf
;
194 spin_lock(&dq_list_lock
);
195 for (actqf
= quota_formats
; actqf
&& actqf
->qf_fmt_id
!= id
;
196 actqf
= actqf
->qf_next
)
198 if (!actqf
|| !try_module_get(actqf
->qf_owner
)) {
201 spin_unlock(&dq_list_lock
);
203 for (qm
= 0; module_names
[qm
].qm_fmt_id
&&
204 module_names
[qm
].qm_fmt_id
!= id
; qm
++)
206 if (!module_names
[qm
].qm_fmt_id
||
207 request_module(module_names
[qm
].qm_mod_name
))
210 spin_lock(&dq_list_lock
);
211 for (actqf
= quota_formats
; actqf
&& actqf
->qf_fmt_id
!= id
;
212 actqf
= actqf
->qf_next
)
214 if (actqf
&& !try_module_get(actqf
->qf_owner
))
217 spin_unlock(&dq_list_lock
);
221 static void put_quota_format(struct quota_format_type
*fmt
)
223 module_put(fmt
->qf_owner
);
227 * Dquot List Management:
228 * The quota code uses five lists for dquot management: the inuse_list,
229 * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
230 * A single dquot structure may be on some of those lists, depending on
233 * All dquots are placed to the end of inuse_list when first created, and this
234 * list is used for invalidate operation, which must look at every dquot.
236 * When the last reference of a dquot is dropped, the dquot is added to
237 * releasing_dquots. We'll then queue work item which will call
238 * synchronize_srcu() and after that perform the final cleanup of all the
239 * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
240 * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
243 * Unused and cleaned up dquots are in the free_dquots list and this list is
244 * searched whenever we need an available dquot. Dquots are removed from the
245 * list as soon as they are used again and dqstats.free_dquots gives the number
246 * of dquots on the list. When dquot is invalidated it's completely released
249 * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
250 * dirtied, and this list is searched when writing dirty dquots back to
251 * quota file. Note that some filesystems do dirty dquot tracking on their
252 * own (e.g. in a journal) and thus don't use dqi_dirty_list.
254 * Dquots with a specific identity (device, type and id) are placed on
255 * one of the dquot_hash[] hash chains. The provides an efficient search
256 * mechanism to locate a specific dquot.
259 static LIST_HEAD(inuse_list
);
260 static LIST_HEAD(free_dquots
);
261 static LIST_HEAD(releasing_dquots
);
262 static unsigned int dq_hash_bits
, dq_hash_mask
;
263 static struct hlist_head
*dquot_hash
;
265 struct dqstats dqstats
;
266 EXPORT_SYMBOL(dqstats
);
268 static qsize_t
inode_get_rsv_space(struct inode
*inode
);
269 static qsize_t
__inode_get_rsv_space(struct inode
*inode
);
270 static int __dquot_initialize(struct inode
*inode
, int type
);
272 static void quota_release_workfn(struct work_struct
*work
);
273 static DECLARE_DELAYED_WORK(quota_release_work
, quota_release_workfn
);
275 static inline unsigned int
276 hashfn(const struct super_block
*sb
, struct kqid qid
)
278 unsigned int id
= from_kqid(&init_user_ns
, qid
);
282 tmp
= (((unsigned long)sb
>>L1_CACHE_SHIFT
) ^ id
) * (MAXQUOTAS
- type
);
283 return (tmp
+ (tmp
>> dq_hash_bits
)) & dq_hash_mask
;
287 * Following list functions expect dq_list_lock to be held
289 static inline void insert_dquot_hash(struct dquot
*dquot
)
291 struct hlist_head
*head
;
292 head
= dquot_hash
+ hashfn(dquot
->dq_sb
, dquot
->dq_id
);
293 hlist_add_head(&dquot
->dq_hash
, head
);
296 static inline void remove_dquot_hash(struct dquot
*dquot
)
298 hlist_del_init(&dquot
->dq_hash
);
301 static struct dquot
*find_dquot(unsigned int hashent
, struct super_block
*sb
,
306 hlist_for_each_entry(dquot
, dquot_hash
+hashent
, dq_hash
)
307 if (dquot
->dq_sb
== sb
&& qid_eq(dquot
->dq_id
, qid
))
313 /* Add a dquot to the tail of the free list */
314 static inline void put_dquot_last(struct dquot
*dquot
)
316 list_add_tail(&dquot
->dq_free
, &free_dquots
);
317 dqstats_inc(DQST_FREE_DQUOTS
);
320 static inline void put_releasing_dquots(struct dquot
*dquot
)
322 list_add_tail(&dquot
->dq_free
, &releasing_dquots
);
323 set_bit(DQ_RELEASING_B
, &dquot
->dq_flags
);
326 static inline void remove_free_dquot(struct dquot
*dquot
)
328 if (list_empty(&dquot
->dq_free
))
330 list_del_init(&dquot
->dq_free
);
331 if (!test_bit(DQ_RELEASING_B
, &dquot
->dq_flags
))
332 dqstats_dec(DQST_FREE_DQUOTS
);
334 clear_bit(DQ_RELEASING_B
, &dquot
->dq_flags
);
337 static inline void put_inuse(struct dquot
*dquot
)
339 /* We add to the back of inuse list so we don't have to restart
340 * when traversing this list and we block */
341 list_add_tail(&dquot
->dq_inuse
, &inuse_list
);
342 dqstats_inc(DQST_ALLOC_DQUOTS
);
345 static inline void remove_inuse(struct dquot
*dquot
)
347 dqstats_dec(DQST_ALLOC_DQUOTS
);
348 list_del(&dquot
->dq_inuse
);
351 * End of list functions needing dq_list_lock
354 static void wait_on_dquot(struct dquot
*dquot
)
356 mutex_lock(&dquot
->dq_lock
);
357 mutex_unlock(&dquot
->dq_lock
);
360 static inline int dquot_active(struct dquot
*dquot
)
362 return test_bit(DQ_ACTIVE_B
, &dquot
->dq_flags
);
365 static inline int dquot_dirty(struct dquot
*dquot
)
367 return test_bit(DQ_MOD_B
, &dquot
->dq_flags
);
370 static inline int mark_dquot_dirty(struct dquot
*dquot
)
372 return dquot
->dq_sb
->dq_op
->mark_dirty(dquot
);
375 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
376 int dquot_mark_dquot_dirty(struct dquot
*dquot
)
380 if (!dquot_active(dquot
))
383 if (sb_dqopt(dquot
->dq_sb
)->flags
& DQUOT_NOLIST_DIRTY
)
384 return test_and_set_bit(DQ_MOD_B
, &dquot
->dq_flags
);
386 /* If quota is dirty already, we don't have to acquire dq_list_lock */
387 if (dquot_dirty(dquot
))
390 spin_lock(&dq_list_lock
);
391 if (!test_and_set_bit(DQ_MOD_B
, &dquot
->dq_flags
)) {
392 list_add(&dquot
->dq_dirty
, &sb_dqopt(dquot
->dq_sb
)->
393 info
[dquot
->dq_id
.type
].dqi_dirty_list
);
396 spin_unlock(&dq_list_lock
);
399 EXPORT_SYMBOL(dquot_mark_dquot_dirty
);
401 /* Dirtify all the dquots - this can block when journalling */
402 static inline int mark_all_dquot_dirty(struct dquot
* const *dquot
)
407 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
409 /* Even in case of error we have to continue */
410 ret
= mark_dquot_dirty(dquot
[cnt
]);
417 static inline void dqput_all(struct dquot
**dquot
)
421 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
425 static inline int clear_dquot_dirty(struct dquot
*dquot
)
427 if (sb_dqopt(dquot
->dq_sb
)->flags
& DQUOT_NOLIST_DIRTY
)
428 return test_and_clear_bit(DQ_MOD_B
, &dquot
->dq_flags
);
430 spin_lock(&dq_list_lock
);
431 if (!test_and_clear_bit(DQ_MOD_B
, &dquot
->dq_flags
)) {
432 spin_unlock(&dq_list_lock
);
435 list_del_init(&dquot
->dq_dirty
);
436 spin_unlock(&dq_list_lock
);
440 void mark_info_dirty(struct super_block
*sb
, int type
)
442 spin_lock(&dq_data_lock
);
443 sb_dqopt(sb
)->info
[type
].dqi_flags
|= DQF_INFO_DIRTY
;
444 spin_unlock(&dq_data_lock
);
446 EXPORT_SYMBOL(mark_info_dirty
);
449 * Read dquot from disk and alloc space for it
452 int dquot_acquire(struct dquot
*dquot
)
454 int ret
= 0, ret2
= 0;
455 unsigned int memalloc
;
456 struct quota_info
*dqopt
= sb_dqopt(dquot
->dq_sb
);
458 mutex_lock(&dquot
->dq_lock
);
459 memalloc
= memalloc_nofs_save();
460 if (!test_bit(DQ_READ_B
, &dquot
->dq_flags
)) {
461 ret
= dqopt
->ops
[dquot
->dq_id
.type
]->read_dqblk(dquot
);
465 /* Make sure flags update is visible after dquot has been filled */
466 smp_mb__before_atomic();
467 set_bit(DQ_READ_B
, &dquot
->dq_flags
);
468 /* Instantiate dquot if needed */
469 if (!dquot_active(dquot
) && !dquot
->dq_off
) {
470 ret
= dqopt
->ops
[dquot
->dq_id
.type
]->commit_dqblk(dquot
);
471 /* Write the info if needed */
472 if (info_dirty(&dqopt
->info
[dquot
->dq_id
.type
])) {
473 ret2
= dqopt
->ops
[dquot
->dq_id
.type
]->write_file_info(
474 dquot
->dq_sb
, dquot
->dq_id
.type
);
484 * Make sure flags update is visible after on-disk struct has been
485 * allocated. Paired with smp_rmb() in dqget().
487 smp_mb__before_atomic();
488 set_bit(DQ_ACTIVE_B
, &dquot
->dq_flags
);
490 memalloc_nofs_restore(memalloc
);
491 mutex_unlock(&dquot
->dq_lock
);
494 EXPORT_SYMBOL(dquot_acquire
);
497 * Write dquot to disk
499 int dquot_commit(struct dquot
*dquot
)
502 unsigned int memalloc
;
503 struct quota_info
*dqopt
= sb_dqopt(dquot
->dq_sb
);
505 mutex_lock(&dquot
->dq_lock
);
506 memalloc
= memalloc_nofs_save();
507 if (!clear_dquot_dirty(dquot
))
509 /* Inactive dquot can be only if there was error during read/init
510 * => we have better not writing it */
511 if (dquot_active(dquot
))
512 ret
= dqopt
->ops
[dquot
->dq_id
.type
]->commit_dqblk(dquot
);
516 memalloc_nofs_restore(memalloc
);
517 mutex_unlock(&dquot
->dq_lock
);
520 EXPORT_SYMBOL(dquot_commit
);
525 int dquot_release(struct dquot
*dquot
)
527 int ret
= 0, ret2
= 0;
528 unsigned int memalloc
;
529 struct quota_info
*dqopt
= sb_dqopt(dquot
->dq_sb
);
531 mutex_lock(&dquot
->dq_lock
);
532 memalloc
= memalloc_nofs_save();
533 /* Check whether we are not racing with some other dqget() */
534 if (dquot_is_busy(dquot
))
536 if (dqopt
->ops
[dquot
->dq_id
.type
]->release_dqblk
) {
537 ret
= dqopt
->ops
[dquot
->dq_id
.type
]->release_dqblk(dquot
);
539 if (info_dirty(&dqopt
->info
[dquot
->dq_id
.type
])) {
540 ret2
= dqopt
->ops
[dquot
->dq_id
.type
]->write_file_info(
541 dquot
->dq_sb
, dquot
->dq_id
.type
);
546 clear_bit(DQ_ACTIVE_B
, &dquot
->dq_flags
);
548 memalloc_nofs_restore(memalloc
);
549 mutex_unlock(&dquot
->dq_lock
);
552 EXPORT_SYMBOL(dquot_release
);
554 void dquot_destroy(struct dquot
*dquot
)
556 kmem_cache_free(dquot_cachep
, dquot
);
558 EXPORT_SYMBOL(dquot_destroy
);
560 static inline void do_destroy_dquot(struct dquot
*dquot
)
562 dquot
->dq_sb
->dq_op
->destroy_dquot(dquot
);
565 /* Invalidate all dquots on the list. Note that this function is called after
566 * quota is disabled and pointers from inodes removed so there cannot be new
567 * quota users. There can still be some users of quotas due to inodes being
568 * just deleted or pruned by prune_icache() (those are not attached to any
569 * list) or parallel quotactl call. We have to wait for such users.
571 static void invalidate_dquots(struct super_block
*sb
, int type
)
573 struct dquot
*dquot
, *tmp
;
576 flush_delayed_work("a_release_work
);
578 spin_lock(&dq_list_lock
);
579 list_for_each_entry_safe(dquot
, tmp
, &inuse_list
, dq_inuse
) {
580 if (dquot
->dq_sb
!= sb
)
582 if (dquot
->dq_id
.type
!= type
)
584 /* Wait for dquot users */
585 if (atomic_read(&dquot
->dq_count
)) {
586 atomic_inc(&dquot
->dq_count
);
587 spin_unlock(&dq_list_lock
);
589 * Once dqput() wakes us up, we know it's time to free
591 * IMPORTANT: we rely on the fact that there is always
592 * at most one process waiting for dquot to free.
593 * Otherwise dq_count would be > 1 and we would never
596 wait_event(dquot_ref_wq
,
597 atomic_read(&dquot
->dq_count
) == 1);
599 /* At this moment dquot() need not exist (it could be
600 * reclaimed by prune_dqcache(). Hence we must
605 * The last user already dropped its reference but dquot didn't
606 * get fully cleaned up yet. Restart the scan which flushes the
607 * work cleaning up released dquots.
609 if (test_bit(DQ_RELEASING_B
, &dquot
->dq_flags
)) {
610 spin_unlock(&dq_list_lock
);
614 * Quota now has no users and it has been written on last
617 remove_dquot_hash(dquot
);
618 remove_free_dquot(dquot
);
620 do_destroy_dquot(dquot
);
622 spin_unlock(&dq_list_lock
);
625 /* Call callback for every active dquot on given filesystem */
626 int dquot_scan_active(struct super_block
*sb
,
627 int (*fn
)(struct dquot
*dquot
, unsigned long priv
),
630 struct dquot
*dquot
, *old_dquot
= NULL
;
633 WARN_ON_ONCE(!rwsem_is_locked(&sb
->s_umount
));
635 spin_lock(&dq_list_lock
);
636 list_for_each_entry(dquot
, &inuse_list
, dq_inuse
) {
637 if (!dquot_active(dquot
))
639 if (dquot
->dq_sb
!= sb
)
641 /* Now we have active dquot so we can just increase use count */
642 atomic_inc(&dquot
->dq_count
);
643 spin_unlock(&dq_list_lock
);
647 * ->release_dquot() can be racing with us. Our reference
648 * protects us from new calls to it so just wait for any
649 * outstanding call and recheck the DQ_ACTIVE_B after that.
651 wait_on_dquot(dquot
);
652 if (dquot_active(dquot
)) {
653 ret
= fn(dquot
, priv
);
657 spin_lock(&dq_list_lock
);
658 /* We are safe to continue now because our dquot could not
659 * be moved out of the inuse list while we hold the reference */
661 spin_unlock(&dq_list_lock
);
666 EXPORT_SYMBOL(dquot_scan_active
);
668 static inline int dquot_write_dquot(struct dquot
*dquot
)
670 int ret
= dquot
->dq_sb
->dq_op
->write_dquot(dquot
);
672 quota_error(dquot
->dq_sb
, "Can't write quota structure "
673 "(error %d). Quota may get out of sync!", ret
);
674 /* Clear dirty bit anyway to avoid infinite loop. */
675 clear_dquot_dirty(dquot
);
680 /* Write all dquot structures to quota files */
681 int dquot_writeback_dquots(struct super_block
*sb
, int type
)
683 struct list_head dirty
;
685 struct quota_info
*dqopt
= sb_dqopt(sb
);
689 WARN_ON_ONCE(!rwsem_is_locked(&sb
->s_umount
));
691 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
692 if (type
!= -1 && cnt
!= type
)
694 if (!sb_has_quota_active(sb
, cnt
))
696 spin_lock(&dq_list_lock
);
697 /* Move list away to avoid livelock. */
698 list_replace_init(&dqopt
->info
[cnt
].dqi_dirty_list
, &dirty
);
699 while (!list_empty(&dirty
)) {
700 dquot
= list_first_entry(&dirty
, struct dquot
,
703 WARN_ON(!dquot_active(dquot
));
704 /* If the dquot is releasing we should not touch it */
705 if (test_bit(DQ_RELEASING_B
, &dquot
->dq_flags
)) {
706 spin_unlock(&dq_list_lock
);
707 flush_delayed_work("a_release_work
);
708 spin_lock(&dq_list_lock
);
712 /* Now we have active dquot from which someone is
713 * holding reference so we can safely just increase
716 spin_unlock(&dq_list_lock
);
717 err
= dquot_write_dquot(dquot
);
721 spin_lock(&dq_list_lock
);
723 spin_unlock(&dq_list_lock
);
726 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
727 if ((cnt
== type
|| type
== -1) && sb_has_quota_active(sb
, cnt
)
728 && info_dirty(&dqopt
->info
[cnt
]))
729 sb
->dq_op
->write_info(sb
, cnt
);
730 dqstats_inc(DQST_SYNCS
);
734 EXPORT_SYMBOL(dquot_writeback_dquots
);
736 /* Write all dquot structures to disk and make them visible from userspace */
737 int dquot_quota_sync(struct super_block
*sb
, int type
)
739 struct quota_info
*dqopt
= sb_dqopt(sb
);
743 ret
= dquot_writeback_dquots(sb
, type
);
746 if (dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
)
749 /* This is not very clever (and fast) but currently I don't know about
750 * any other simple way of getting quota data to disk and we must get
751 * them there for userspace to be visible... */
752 if (sb
->s_op
->sync_fs
) {
753 ret
= sb
->s_op
->sync_fs(sb
, 1);
757 ret
= sync_blockdev(sb
->s_bdev
);
762 * Now when everything is written we can discard the pagecache so
763 * that userspace sees the changes.
765 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
766 if (type
!= -1 && cnt
!= type
)
768 if (!sb_has_quota_active(sb
, cnt
))
770 inode_lock(dqopt
->files
[cnt
]);
771 truncate_inode_pages(&dqopt
->files
[cnt
]->i_data
, 0);
772 inode_unlock(dqopt
->files
[cnt
]);
777 EXPORT_SYMBOL(dquot_quota_sync
);
780 dqcache_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
783 unsigned long freed
= 0;
785 spin_lock(&dq_list_lock
);
786 while (!list_empty(&free_dquots
) && sc
->nr_to_scan
) {
787 dquot
= list_first_entry(&free_dquots
, struct dquot
, dq_free
);
788 remove_dquot_hash(dquot
);
789 remove_free_dquot(dquot
);
791 do_destroy_dquot(dquot
);
795 spin_unlock(&dq_list_lock
);
800 dqcache_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
802 return vfs_pressure_ratio(
803 percpu_counter_read_positive(&dqstats
.counter
[DQST_FREE_DQUOTS
]));
807 * Safely release dquot and put reference to dquot.
809 static void quota_release_workfn(struct work_struct
*work
)
812 struct list_head rls_head
;
814 spin_lock(&dq_list_lock
);
815 /* Exchange the list head to avoid livelock. */
816 list_replace_init(&releasing_dquots
, &rls_head
);
817 spin_unlock(&dq_list_lock
);
818 synchronize_srcu(&dquot_srcu
);
821 spin_lock(&dq_list_lock
);
822 while (!list_empty(&rls_head
)) {
823 dquot
= list_first_entry(&rls_head
, struct dquot
, dq_free
);
824 WARN_ON_ONCE(atomic_read(&dquot
->dq_count
));
826 * Note that DQ_RELEASING_B protects us from racing with
827 * invalidate_dquots() calls so we are safe to work with the
828 * dquot even after we drop dq_list_lock.
830 if (dquot_dirty(dquot
)) {
831 spin_unlock(&dq_list_lock
);
832 /* Commit dquot before releasing */
833 dquot_write_dquot(dquot
);
836 if (dquot_active(dquot
)) {
837 spin_unlock(&dq_list_lock
);
838 dquot
->dq_sb
->dq_op
->release_dquot(dquot
);
841 /* Dquot is inactive and clean, now move it to free list */
842 remove_free_dquot(dquot
);
843 put_dquot_last(dquot
);
845 spin_unlock(&dq_list_lock
);
849 * Put reference to dquot
851 void dqput(struct dquot
*dquot
)
855 #ifdef CONFIG_QUOTA_DEBUG
856 if (!atomic_read(&dquot
->dq_count
)) {
857 quota_error(dquot
->dq_sb
, "trying to free free dquot of %s %d",
858 quotatypes
[dquot
->dq_id
.type
],
859 from_kqid(&init_user_ns
, dquot
->dq_id
));
863 dqstats_inc(DQST_DROPS
);
865 spin_lock(&dq_list_lock
);
866 if (atomic_read(&dquot
->dq_count
) > 1) {
867 /* We have more than one user... nothing to do */
868 atomic_dec(&dquot
->dq_count
);
869 /* Releasing dquot during quotaoff phase? */
870 if (!sb_has_quota_active(dquot
->dq_sb
, dquot
->dq_id
.type
) &&
871 atomic_read(&dquot
->dq_count
) == 1)
872 wake_up(&dquot_ref_wq
);
873 spin_unlock(&dq_list_lock
);
877 /* Need to release dquot? */
878 #ifdef CONFIG_QUOTA_DEBUG
880 BUG_ON(!list_empty(&dquot
->dq_free
));
882 put_releasing_dquots(dquot
);
883 atomic_dec(&dquot
->dq_count
);
884 spin_unlock(&dq_list_lock
);
885 queue_delayed_work(system_unbound_wq
, "a_release_work
, 1);
887 EXPORT_SYMBOL(dqput
);
889 struct dquot
*dquot_alloc(struct super_block
*sb
, int type
)
891 return kmem_cache_zalloc(dquot_cachep
, GFP_NOFS
);
893 EXPORT_SYMBOL(dquot_alloc
);
895 static struct dquot
*get_empty_dquot(struct super_block
*sb
, int type
)
899 dquot
= sb
->dq_op
->alloc_dquot(sb
, type
);
903 mutex_init(&dquot
->dq_lock
);
904 INIT_LIST_HEAD(&dquot
->dq_free
);
905 INIT_LIST_HEAD(&dquot
->dq_inuse
);
906 INIT_HLIST_NODE(&dquot
->dq_hash
);
907 INIT_LIST_HEAD(&dquot
->dq_dirty
);
909 dquot
->dq_id
= make_kqid_invalid(type
);
910 atomic_set(&dquot
->dq_count
, 1);
911 spin_lock_init(&dquot
->dq_dqb_lock
);
917 * Get reference to dquot
919 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
920 * destroying our dquot by:
921 * a) checking for quota flags under dq_list_lock and
922 * b) getting a reference to dquot before we release dq_list_lock
924 struct dquot
*dqget(struct super_block
*sb
, struct kqid qid
)
926 unsigned int hashent
= hashfn(sb
, qid
);
927 struct dquot
*dquot
, *empty
= NULL
;
929 if (!qid_has_mapping(sb
->s_user_ns
, qid
))
930 return ERR_PTR(-EINVAL
);
932 if (!sb_has_quota_active(sb
, qid
.type
))
933 return ERR_PTR(-ESRCH
);
935 spin_lock(&dq_list_lock
);
936 spin_lock(&dq_state_lock
);
937 if (!sb_has_quota_active(sb
, qid
.type
)) {
938 spin_unlock(&dq_state_lock
);
939 spin_unlock(&dq_list_lock
);
940 dquot
= ERR_PTR(-ESRCH
);
943 spin_unlock(&dq_state_lock
);
945 dquot
= find_dquot(hashent
, sb
, qid
);
948 spin_unlock(&dq_list_lock
);
949 empty
= get_empty_dquot(sb
, qid
.type
);
951 schedule(); /* Try to wait for a moment... */
957 /* all dquots go on the inuse_list */
959 /* hash it first so it can be found */
960 insert_dquot_hash(dquot
);
961 spin_unlock(&dq_list_lock
);
962 dqstats_inc(DQST_LOOKUPS
);
964 if (!atomic_read(&dquot
->dq_count
))
965 remove_free_dquot(dquot
);
966 atomic_inc(&dquot
->dq_count
);
967 spin_unlock(&dq_list_lock
);
968 dqstats_inc(DQST_CACHE_HITS
);
969 dqstats_inc(DQST_LOOKUPS
);
971 /* Wait for dq_lock - after this we know that either dquot_release() is
972 * already finished or it will be canceled due to dq_count > 0 test */
973 wait_on_dquot(dquot
);
974 /* Read the dquot / allocate space in quota file */
975 if (!dquot_active(dquot
)) {
978 err
= sb
->dq_op
->acquire_dquot(dquot
);
981 dquot
= ERR_PTR(err
);
986 * Make sure following reads see filled structure - paired with
987 * smp_mb__before_atomic() in dquot_acquire().
990 #ifdef CONFIG_QUOTA_DEBUG
991 BUG_ON(!dquot
->dq_sb
); /* Has somebody invalidated entry under us? */
995 do_destroy_dquot(empty
);
999 EXPORT_SYMBOL(dqget
);
1001 static inline struct dquot
**i_dquot(struct inode
*inode
)
1003 return inode
->i_sb
->s_op
->get_dquots(inode
);
1006 static int dqinit_needed(struct inode
*inode
, int type
)
1008 struct dquot
* const *dquots
;
1011 if (IS_NOQUOTA(inode
))
1014 dquots
= i_dquot(inode
);
1016 return !dquots
[type
];
1017 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
1023 /* This routine is guarded by s_umount semaphore */
1024 static int add_dquot_ref(struct super_block
*sb
, int type
)
1026 struct inode
*inode
, *old_inode
= NULL
;
1027 #ifdef CONFIG_QUOTA_DEBUG
1032 spin_lock(&sb
->s_inode_list_lock
);
1033 list_for_each_entry(inode
, &sb
->s_inodes
, i_sb_list
) {
1034 spin_lock(&inode
->i_lock
);
1035 if ((inode
->i_state
& (I_FREEING
|I_WILL_FREE
|I_NEW
)) ||
1036 !atomic_read(&inode
->i_writecount
) ||
1037 !dqinit_needed(inode
, type
)) {
1038 spin_unlock(&inode
->i_lock
);
1042 spin_unlock(&inode
->i_lock
);
1043 spin_unlock(&sb
->s_inode_list_lock
);
1045 #ifdef CONFIG_QUOTA_DEBUG
1046 if (unlikely(inode_get_rsv_space(inode
) > 0))
1050 err
= __dquot_initialize(inode
, type
);
1057 * We hold a reference to 'inode' so it couldn't have been
1058 * removed from s_inodes list while we dropped the
1059 * s_inode_list_lock. We cannot iput the inode now as we can be
1060 * holding the last reference and we cannot iput it under
1061 * s_inode_list_lock. So we keep the reference and iput it
1066 spin_lock(&sb
->s_inode_list_lock
);
1068 spin_unlock(&sb
->s_inode_list_lock
);
1071 #ifdef CONFIG_QUOTA_DEBUG
1073 quota_error(sb
, "Writes happened before quota was turned on "
1074 "thus quota information is probably inconsistent. "
1075 "Please run quotacheck(8)");
1081 static void remove_dquot_ref(struct super_block
*sb
, int type
)
1083 struct inode
*inode
;
1084 #ifdef CONFIG_QUOTA_DEBUG
1088 spin_lock(&sb
->s_inode_list_lock
);
1089 list_for_each_entry(inode
, &sb
->s_inodes
, i_sb_list
) {
1091 * We have to scan also I_NEW inodes because they can already
1092 * have quota pointer initialized. Luckily, we need to touch
1093 * only quota pointers and these have separate locking
1096 spin_lock(&dq_data_lock
);
1097 if (!IS_NOQUOTA(inode
)) {
1098 struct dquot
**dquots
= i_dquot(inode
);
1099 struct dquot
*dquot
= dquots
[type
];
1101 #ifdef CONFIG_QUOTA_DEBUG
1102 if (unlikely(inode_get_rsv_space(inode
) > 0))
1105 dquots
[type
] = NULL
;
1109 spin_unlock(&dq_data_lock
);
1111 spin_unlock(&sb
->s_inode_list_lock
);
1112 #ifdef CONFIG_QUOTA_DEBUG
1114 printk(KERN_WARNING
"VFS (%s): Writes happened after quota"
1115 " was disabled thus quota information is probably "
1116 "inconsistent. Please run quotacheck(8).\n", sb
->s_id
);
1121 /* Gather all references from inodes and drop them */
1122 static void drop_dquot_ref(struct super_block
*sb
, int type
)
1125 remove_dquot_ref(sb
, type
);
1129 void dquot_free_reserved_space(struct dquot
*dquot
, qsize_t number
)
1131 if (dquot
->dq_dqb
.dqb_rsvspace
>= number
)
1132 dquot
->dq_dqb
.dqb_rsvspace
-= number
;
1135 dquot
->dq_dqb
.dqb_rsvspace
= 0;
1137 if (dquot
->dq_dqb
.dqb_curspace
+ dquot
->dq_dqb
.dqb_rsvspace
<=
1138 dquot
->dq_dqb
.dqb_bsoftlimit
)
1139 dquot
->dq_dqb
.dqb_btime
= (time64_t
) 0;
1140 clear_bit(DQ_BLKS_B
, &dquot
->dq_flags
);
1143 static void dquot_decr_inodes(struct dquot
*dquot
, qsize_t number
)
1145 if (sb_dqopt(dquot
->dq_sb
)->flags
& DQUOT_NEGATIVE_USAGE
||
1146 dquot
->dq_dqb
.dqb_curinodes
>= number
)
1147 dquot
->dq_dqb
.dqb_curinodes
-= number
;
1149 dquot
->dq_dqb
.dqb_curinodes
= 0;
1150 if (dquot
->dq_dqb
.dqb_curinodes
<= dquot
->dq_dqb
.dqb_isoftlimit
)
1151 dquot
->dq_dqb
.dqb_itime
= (time64_t
) 0;
1152 clear_bit(DQ_INODES_B
, &dquot
->dq_flags
);
1155 static void dquot_decr_space(struct dquot
*dquot
, qsize_t number
)
1157 if (sb_dqopt(dquot
->dq_sb
)->flags
& DQUOT_NEGATIVE_USAGE
||
1158 dquot
->dq_dqb
.dqb_curspace
>= number
)
1159 dquot
->dq_dqb
.dqb_curspace
-= number
;
1161 dquot
->dq_dqb
.dqb_curspace
= 0;
1162 if (dquot
->dq_dqb
.dqb_curspace
+ dquot
->dq_dqb
.dqb_rsvspace
<=
1163 dquot
->dq_dqb
.dqb_bsoftlimit
)
1164 dquot
->dq_dqb
.dqb_btime
= (time64_t
) 0;
1165 clear_bit(DQ_BLKS_B
, &dquot
->dq_flags
);
1169 struct super_block
*w_sb
;
1170 struct kqid w_dq_id
;
1174 static int warning_issued(struct dquot
*dquot
, const int warntype
)
1176 int flag
= (warntype
== QUOTA_NL_BHARDWARN
||
1177 warntype
== QUOTA_NL_BSOFTLONGWARN
) ? DQ_BLKS_B
:
1178 ((warntype
== QUOTA_NL_IHARDWARN
||
1179 warntype
== QUOTA_NL_ISOFTLONGWARN
) ? DQ_INODES_B
: 0);
1183 return test_and_set_bit(flag
, &dquot
->dq_flags
);
1186 #ifdef CONFIG_PRINT_QUOTA_WARNING
1187 static int flag_print_warnings
= 1;
1189 static int need_print_warning(struct dquot_warn
*warn
)
1191 if (!flag_print_warnings
)
1194 switch (warn
->w_dq_id
.type
) {
1196 return uid_eq(current_fsuid(), warn
->w_dq_id
.uid
);
1198 return in_group_p(warn
->w_dq_id
.gid
);
1205 /* Print warning to user which exceeded quota */
1206 static void print_warning(struct dquot_warn
*warn
)
1209 struct tty_struct
*tty
;
1210 int warntype
= warn
->w_type
;
1212 if (warntype
== QUOTA_NL_IHARDBELOW
||
1213 warntype
== QUOTA_NL_ISOFTBELOW
||
1214 warntype
== QUOTA_NL_BHARDBELOW
||
1215 warntype
== QUOTA_NL_BSOFTBELOW
|| !need_print_warning(warn
))
1218 tty
= get_current_tty();
1221 tty_write_message(tty
, warn
->w_sb
->s_id
);
1222 if (warntype
== QUOTA_NL_ISOFTWARN
|| warntype
== QUOTA_NL_BSOFTWARN
)
1223 tty_write_message(tty
, ": warning, ");
1225 tty_write_message(tty
, ": write failed, ");
1226 tty_write_message(tty
, quotatypes
[warn
->w_dq_id
.type
]);
1228 case QUOTA_NL_IHARDWARN
:
1229 msg
= " file limit reached.\r\n";
1231 case QUOTA_NL_ISOFTLONGWARN
:
1232 msg
= " file quota exceeded too long.\r\n";
1234 case QUOTA_NL_ISOFTWARN
:
1235 msg
= " file quota exceeded.\r\n";
1237 case QUOTA_NL_BHARDWARN
:
1238 msg
= " block limit reached.\r\n";
1240 case QUOTA_NL_BSOFTLONGWARN
:
1241 msg
= " block quota exceeded too long.\r\n";
1243 case QUOTA_NL_BSOFTWARN
:
1244 msg
= " block quota exceeded.\r\n";
1247 tty_write_message(tty
, msg
);
1252 static void prepare_warning(struct dquot_warn
*warn
, struct dquot
*dquot
,
1255 if (warning_issued(dquot
, warntype
))
1257 warn
->w_type
= warntype
;
1258 warn
->w_sb
= dquot
->dq_sb
;
1259 warn
->w_dq_id
= dquot
->dq_id
;
1263 * Write warnings to the console and send warning messages over netlink.
1265 * Note that this function can call into tty and networking code.
1267 static void flush_warnings(struct dquot_warn
*warn
)
1271 for (i
= 0; i
< MAXQUOTAS
; i
++) {
1272 if (warn
[i
].w_type
== QUOTA_NL_NOWARN
)
1274 #ifdef CONFIG_PRINT_QUOTA_WARNING
1275 print_warning(&warn
[i
]);
1277 quota_send_warning(warn
[i
].w_dq_id
,
1278 warn
[i
].w_sb
->s_dev
, warn
[i
].w_type
);
1282 static int ignore_hardlimit(struct dquot
*dquot
)
1284 struct mem_dqinfo
*info
= &sb_dqopt(dquot
->dq_sb
)->info
[dquot
->dq_id
.type
];
1286 return capable(CAP_SYS_RESOURCE
) &&
1287 (info
->dqi_format
->qf_fmt_id
!= QFMT_VFS_OLD
||
1288 !(info
->dqi_flags
& DQF_ROOT_SQUASH
));
1291 static int dquot_add_inodes(struct dquot
*dquot
, qsize_t inodes
,
1292 struct dquot_warn
*warn
)
1297 spin_lock(&dquot
->dq_dqb_lock
);
1298 newinodes
= dquot
->dq_dqb
.dqb_curinodes
+ inodes
;
1299 if (!sb_has_quota_limits_enabled(dquot
->dq_sb
, dquot
->dq_id
.type
) ||
1300 test_bit(DQ_FAKE_B
, &dquot
->dq_flags
))
1303 if (dquot
->dq_dqb
.dqb_ihardlimit
&&
1304 newinodes
> dquot
->dq_dqb
.dqb_ihardlimit
&&
1305 !ignore_hardlimit(dquot
)) {
1306 prepare_warning(warn
, dquot
, QUOTA_NL_IHARDWARN
);
1311 if (dquot
->dq_dqb
.dqb_isoftlimit
&&
1312 newinodes
> dquot
->dq_dqb
.dqb_isoftlimit
&&
1313 dquot
->dq_dqb
.dqb_itime
&&
1314 ktime_get_real_seconds() >= dquot
->dq_dqb
.dqb_itime
&&
1315 !ignore_hardlimit(dquot
)) {
1316 prepare_warning(warn
, dquot
, QUOTA_NL_ISOFTLONGWARN
);
1321 if (dquot
->dq_dqb
.dqb_isoftlimit
&&
1322 newinodes
> dquot
->dq_dqb
.dqb_isoftlimit
&&
1323 dquot
->dq_dqb
.dqb_itime
== 0) {
1324 prepare_warning(warn
, dquot
, QUOTA_NL_ISOFTWARN
);
1325 dquot
->dq_dqb
.dqb_itime
= ktime_get_real_seconds() +
1326 sb_dqopt(dquot
->dq_sb
)->info
[dquot
->dq_id
.type
].dqi_igrace
;
1329 dquot
->dq_dqb
.dqb_curinodes
= newinodes
;
1332 spin_unlock(&dquot
->dq_dqb_lock
);
1336 static int dquot_add_space(struct dquot
*dquot
, qsize_t space
,
1337 qsize_t rsv_space
, unsigned int flags
,
1338 struct dquot_warn
*warn
)
1341 struct super_block
*sb
= dquot
->dq_sb
;
1344 spin_lock(&dquot
->dq_dqb_lock
);
1345 if (!sb_has_quota_limits_enabled(sb
, dquot
->dq_id
.type
) ||
1346 test_bit(DQ_FAKE_B
, &dquot
->dq_flags
))
1349 tspace
= dquot
->dq_dqb
.dqb_curspace
+ dquot
->dq_dqb
.dqb_rsvspace
1350 + space
+ rsv_space
;
1352 if (dquot
->dq_dqb
.dqb_bhardlimit
&&
1353 tspace
> dquot
->dq_dqb
.dqb_bhardlimit
&&
1354 !ignore_hardlimit(dquot
)) {
1355 if (flags
& DQUOT_SPACE_WARN
)
1356 prepare_warning(warn
, dquot
, QUOTA_NL_BHARDWARN
);
1361 if (dquot
->dq_dqb
.dqb_bsoftlimit
&&
1362 tspace
> dquot
->dq_dqb
.dqb_bsoftlimit
&&
1363 dquot
->dq_dqb
.dqb_btime
&&
1364 ktime_get_real_seconds() >= dquot
->dq_dqb
.dqb_btime
&&
1365 !ignore_hardlimit(dquot
)) {
1366 if (flags
& DQUOT_SPACE_WARN
)
1367 prepare_warning(warn
, dquot
, QUOTA_NL_BSOFTLONGWARN
);
1372 if (dquot
->dq_dqb
.dqb_bsoftlimit
&&
1373 tspace
> dquot
->dq_dqb
.dqb_bsoftlimit
&&
1374 dquot
->dq_dqb
.dqb_btime
== 0) {
1375 if (flags
& DQUOT_SPACE_WARN
) {
1376 prepare_warning(warn
, dquot
, QUOTA_NL_BSOFTWARN
);
1377 dquot
->dq_dqb
.dqb_btime
= ktime_get_real_seconds() +
1378 sb_dqopt(sb
)->info
[dquot
->dq_id
.type
].dqi_bgrace
;
1381 * We don't allow preallocation to exceed softlimit so exceeding will
1390 * We have to be careful and go through warning generation & grace time
1391 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1394 if (flags
& DQUOT_SPACE_NOFAIL
)
1397 dquot
->dq_dqb
.dqb_rsvspace
+= rsv_space
;
1398 dquot
->dq_dqb
.dqb_curspace
+= space
;
1400 spin_unlock(&dquot
->dq_dqb_lock
);
1404 static int info_idq_free(struct dquot
*dquot
, qsize_t inodes
)
1408 if (test_bit(DQ_FAKE_B
, &dquot
->dq_flags
) ||
1409 dquot
->dq_dqb
.dqb_curinodes
<= dquot
->dq_dqb
.dqb_isoftlimit
||
1410 !sb_has_quota_limits_enabled(dquot
->dq_sb
, dquot
->dq_id
.type
))
1411 return QUOTA_NL_NOWARN
;
1413 newinodes
= dquot
->dq_dqb
.dqb_curinodes
- inodes
;
1414 if (newinodes
<= dquot
->dq_dqb
.dqb_isoftlimit
)
1415 return QUOTA_NL_ISOFTBELOW
;
1416 if (dquot
->dq_dqb
.dqb_curinodes
>= dquot
->dq_dqb
.dqb_ihardlimit
&&
1417 newinodes
< dquot
->dq_dqb
.dqb_ihardlimit
)
1418 return QUOTA_NL_IHARDBELOW
;
1419 return QUOTA_NL_NOWARN
;
1422 static int info_bdq_free(struct dquot
*dquot
, qsize_t space
)
1426 tspace
= dquot
->dq_dqb
.dqb_curspace
+ dquot
->dq_dqb
.dqb_rsvspace
;
1428 if (test_bit(DQ_FAKE_B
, &dquot
->dq_flags
) ||
1429 tspace
<= dquot
->dq_dqb
.dqb_bsoftlimit
)
1430 return QUOTA_NL_NOWARN
;
1432 if (tspace
- space
<= dquot
->dq_dqb
.dqb_bsoftlimit
)
1433 return QUOTA_NL_BSOFTBELOW
;
1434 if (tspace
>= dquot
->dq_dqb
.dqb_bhardlimit
&&
1435 tspace
- space
< dquot
->dq_dqb
.dqb_bhardlimit
)
1436 return QUOTA_NL_BHARDBELOW
;
1437 return QUOTA_NL_NOWARN
;
1440 static int inode_quota_active(const struct inode
*inode
)
1442 struct super_block
*sb
= inode
->i_sb
;
1444 if (IS_NOQUOTA(inode
))
1446 return sb_any_quota_loaded(sb
) & ~sb_any_quota_suspended(sb
);
1450 * Initialize quota pointers in inode
1452 * It is better to call this function outside of any transaction as it
1453 * might need a lot of space in journal for dquot structure allocation.
1455 static int __dquot_initialize(struct inode
*inode
, int type
)
1457 int cnt
, init_needed
= 0;
1458 struct dquot
**dquots
, *got
[MAXQUOTAS
] = {};
1459 struct super_block
*sb
= inode
->i_sb
;
1463 if (!inode_quota_active(inode
))
1466 dquots
= i_dquot(inode
);
1468 /* First get references to structures we might need. */
1469 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1473 struct dquot
*dquot
;
1475 if (type
!= -1 && cnt
!= type
)
1478 * The i_dquot should have been initialized in most cases,
1479 * we check it without locking here to avoid unnecessary
1480 * dqget()/dqput() calls.
1485 if (!sb_has_quota_active(sb
, cnt
))
1492 qid
= make_kqid_uid(inode
->i_uid
);
1495 qid
= make_kqid_gid(inode
->i_gid
);
1498 rc
= inode
->i_sb
->dq_op
->get_projid(inode
, &projid
);
1501 qid
= make_kqid_projid(projid
);
1504 dquot
= dqget(sb
, qid
);
1505 if (IS_ERR(dquot
)) {
1506 /* We raced with somebody turning quotas off... */
1507 if (PTR_ERR(dquot
) != -ESRCH
) {
1508 ret
= PTR_ERR(dquot
);
1516 /* All required i_dquot has been initialized */
1520 spin_lock(&dq_data_lock
);
1521 if (IS_NOQUOTA(inode
))
1523 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1524 if (type
!= -1 && cnt
!= type
)
1526 /* Avoid races with quotaoff() */
1527 if (!sb_has_quota_active(sb
, cnt
))
1529 /* We could race with quotaon or dqget() could have failed */
1533 dquots
[cnt
] = got
[cnt
];
1536 * Make quota reservation system happy if someone
1537 * did a write before quota was turned on
1539 rsv
= inode_get_rsv_space(inode
);
1540 if (unlikely(rsv
)) {
1541 spin_lock(&inode
->i_lock
);
1542 /* Get reservation again under proper lock */
1543 rsv
= __inode_get_rsv_space(inode
);
1544 spin_lock(&dquots
[cnt
]->dq_dqb_lock
);
1545 dquots
[cnt
]->dq_dqb
.dqb_rsvspace
+= rsv
;
1546 spin_unlock(&dquots
[cnt
]->dq_dqb_lock
);
1547 spin_unlock(&inode
->i_lock
);
1552 spin_unlock(&dq_data_lock
);
1554 /* Drop unused references */
1560 int dquot_initialize(struct inode
*inode
)
1562 return __dquot_initialize(inode
, -1);
1564 EXPORT_SYMBOL(dquot_initialize
);
1566 bool dquot_initialize_needed(struct inode
*inode
)
1568 struct dquot
**dquots
;
1571 if (!inode_quota_active(inode
))
1574 dquots
= i_dquot(inode
);
1575 for (i
= 0; i
< MAXQUOTAS
; i
++)
1576 if (!dquots
[i
] && sb_has_quota_active(inode
->i_sb
, i
))
1580 EXPORT_SYMBOL(dquot_initialize_needed
);
1583 * Release all quotas referenced by inode.
1585 * This function only be called on inode free or converting
1586 * a file to quota file, no other users for the i_dquot in
1587 * both cases, so we needn't call synchronize_srcu() after
1590 static void __dquot_drop(struct inode
*inode
)
1593 struct dquot
**dquots
= i_dquot(inode
);
1594 struct dquot
*put
[MAXQUOTAS
];
1596 spin_lock(&dq_data_lock
);
1597 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1598 put
[cnt
] = dquots
[cnt
];
1601 spin_unlock(&dq_data_lock
);
1605 void dquot_drop(struct inode
*inode
)
1607 struct dquot
* const *dquots
;
1610 if (IS_NOQUOTA(inode
))
1614 * Test before calling to rule out calls from proc and such
1615 * where we are not allowed to block. Note that this is
1616 * actually reliable test even without the lock - the caller
1617 * must assure that nobody can come after the DQUOT_DROP and
1618 * add quota pointers back anyway.
1620 dquots
= i_dquot(inode
);
1621 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1626 if (cnt
< MAXQUOTAS
)
1627 __dquot_drop(inode
);
1629 EXPORT_SYMBOL(dquot_drop
);
1632 * inode_reserved_space is managed internally by quota, and protected by
1633 * i_lock similar to i_blocks+i_bytes.
1635 static qsize_t
*inode_reserved_space(struct inode
* inode
)
1637 /* Filesystem must explicitly define it's own method in order to use
1638 * quota reservation interface */
1639 BUG_ON(!inode
->i_sb
->dq_op
->get_reserved_space
);
1640 return inode
->i_sb
->dq_op
->get_reserved_space(inode
);
1643 static qsize_t
__inode_get_rsv_space(struct inode
*inode
)
1645 if (!inode
->i_sb
->dq_op
->get_reserved_space
)
1647 return *inode_reserved_space(inode
);
1650 static qsize_t
inode_get_rsv_space(struct inode
*inode
)
1654 if (!inode
->i_sb
->dq_op
->get_reserved_space
)
1656 spin_lock(&inode
->i_lock
);
1657 ret
= __inode_get_rsv_space(inode
);
1658 spin_unlock(&inode
->i_lock
);
1663 * This functions updates i_blocks+i_bytes fields and quota information
1664 * (together with appropriate checks).
1666 * NOTE: We absolutely rely on the fact that caller dirties the inode
1667 * (usually helpers in quotaops.h care about this) and holds a handle for
1668 * the current transaction so that dquot write and inode write go into the
1673 * This operation can block, but only after everything is updated
1675 int __dquot_alloc_space(struct inode
*inode
, qsize_t number
, int flags
)
1677 int cnt
, ret
= 0, index
;
1678 struct dquot_warn warn
[MAXQUOTAS
];
1679 int reserve
= flags
& DQUOT_SPACE_RESERVE
;
1680 struct dquot
**dquots
;
1682 if (!inode_quota_active(inode
)) {
1684 spin_lock(&inode
->i_lock
);
1685 *inode_reserved_space(inode
) += number
;
1686 spin_unlock(&inode
->i_lock
);
1688 inode_add_bytes(inode
, number
);
1693 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
1694 warn
[cnt
].w_type
= QUOTA_NL_NOWARN
;
1696 dquots
= i_dquot(inode
);
1697 index
= srcu_read_lock(&dquot_srcu
);
1698 spin_lock(&inode
->i_lock
);
1699 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1703 ret
= dquot_add_space(dquots
[cnt
], 0, number
, flags
,
1706 ret
= dquot_add_space(dquots
[cnt
], number
, 0, flags
,
1710 /* Back out changes we already did */
1711 for (cnt
--; cnt
>= 0; cnt
--) {
1714 spin_lock(&dquots
[cnt
]->dq_dqb_lock
);
1716 dquot_free_reserved_space(dquots
[cnt
],
1719 dquot_decr_space(dquots
[cnt
], number
);
1720 spin_unlock(&dquots
[cnt
]->dq_dqb_lock
);
1722 spin_unlock(&inode
->i_lock
);
1723 goto out_flush_warn
;
1727 *inode_reserved_space(inode
) += number
;
1729 __inode_add_bytes(inode
, number
);
1730 spin_unlock(&inode
->i_lock
);
1733 goto out_flush_warn
;
1734 mark_all_dquot_dirty(dquots
);
1736 srcu_read_unlock(&dquot_srcu
, index
);
1737 flush_warnings(warn
);
1741 EXPORT_SYMBOL(__dquot_alloc_space
);
1744 * This operation can block, but only after everything is updated
1746 int dquot_alloc_inode(struct inode
*inode
)
1748 int cnt
, ret
= 0, index
;
1749 struct dquot_warn warn
[MAXQUOTAS
];
1750 struct dquot
* const *dquots
;
1752 if (!inode_quota_active(inode
))
1754 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
1755 warn
[cnt
].w_type
= QUOTA_NL_NOWARN
;
1757 dquots
= i_dquot(inode
);
1758 index
= srcu_read_lock(&dquot_srcu
);
1759 spin_lock(&inode
->i_lock
);
1760 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1763 ret
= dquot_add_inodes(dquots
[cnt
], 1, &warn
[cnt
]);
1765 for (cnt
--; cnt
>= 0; cnt
--) {
1768 /* Back out changes we already did */
1769 spin_lock(&dquots
[cnt
]->dq_dqb_lock
);
1770 dquot_decr_inodes(dquots
[cnt
], 1);
1771 spin_unlock(&dquots
[cnt
]->dq_dqb_lock
);
1778 spin_unlock(&inode
->i_lock
);
1780 mark_all_dquot_dirty(dquots
);
1781 srcu_read_unlock(&dquot_srcu
, index
);
1782 flush_warnings(warn
);
1785 EXPORT_SYMBOL(dquot_alloc_inode
);
1788 * Convert in-memory reserved quotas to real consumed quotas
1790 void dquot_claim_space_nodirty(struct inode
*inode
, qsize_t number
)
1792 struct dquot
**dquots
;
1795 if (!inode_quota_active(inode
)) {
1796 spin_lock(&inode
->i_lock
);
1797 *inode_reserved_space(inode
) -= number
;
1798 __inode_add_bytes(inode
, number
);
1799 spin_unlock(&inode
->i_lock
);
1803 dquots
= i_dquot(inode
);
1804 index
= srcu_read_lock(&dquot_srcu
);
1805 spin_lock(&inode
->i_lock
);
1806 /* Claim reserved quotas to allocated quotas */
1807 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1809 struct dquot
*dquot
= dquots
[cnt
];
1811 spin_lock(&dquot
->dq_dqb_lock
);
1812 if (WARN_ON_ONCE(dquot
->dq_dqb
.dqb_rsvspace
< number
))
1813 number
= dquot
->dq_dqb
.dqb_rsvspace
;
1814 dquot
->dq_dqb
.dqb_curspace
+= number
;
1815 dquot
->dq_dqb
.dqb_rsvspace
-= number
;
1816 spin_unlock(&dquot
->dq_dqb_lock
);
1819 /* Update inode bytes */
1820 *inode_reserved_space(inode
) -= number
;
1821 __inode_add_bytes(inode
, number
);
1822 spin_unlock(&inode
->i_lock
);
1823 mark_all_dquot_dirty(dquots
);
1824 srcu_read_unlock(&dquot_srcu
, index
);
1827 EXPORT_SYMBOL(dquot_claim_space_nodirty
);
1830 * Convert allocated space back to in-memory reserved quotas
1832 void dquot_reclaim_space_nodirty(struct inode
*inode
, qsize_t number
)
1834 struct dquot
**dquots
;
1837 if (!inode_quota_active(inode
)) {
1838 spin_lock(&inode
->i_lock
);
1839 *inode_reserved_space(inode
) += number
;
1840 __inode_sub_bytes(inode
, number
);
1841 spin_unlock(&inode
->i_lock
);
1845 dquots
= i_dquot(inode
);
1846 index
= srcu_read_lock(&dquot_srcu
);
1847 spin_lock(&inode
->i_lock
);
1848 /* Claim reserved quotas to allocated quotas */
1849 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1851 struct dquot
*dquot
= dquots
[cnt
];
1853 spin_lock(&dquot
->dq_dqb_lock
);
1854 if (WARN_ON_ONCE(dquot
->dq_dqb
.dqb_curspace
< number
))
1855 number
= dquot
->dq_dqb
.dqb_curspace
;
1856 dquot
->dq_dqb
.dqb_rsvspace
+= number
;
1857 dquot
->dq_dqb
.dqb_curspace
-= number
;
1858 spin_unlock(&dquot
->dq_dqb_lock
);
1861 /* Update inode bytes */
1862 *inode_reserved_space(inode
) += number
;
1863 __inode_sub_bytes(inode
, number
);
1864 spin_unlock(&inode
->i_lock
);
1865 mark_all_dquot_dirty(dquots
);
1866 srcu_read_unlock(&dquot_srcu
, index
);
1869 EXPORT_SYMBOL(dquot_reclaim_space_nodirty
);
1872 * This operation can block, but only after everything is updated
1874 void __dquot_free_space(struct inode
*inode
, qsize_t number
, int flags
)
1877 struct dquot_warn warn
[MAXQUOTAS
];
1878 struct dquot
**dquots
;
1879 int reserve
= flags
& DQUOT_SPACE_RESERVE
, index
;
1881 if (!inode_quota_active(inode
)) {
1883 spin_lock(&inode
->i_lock
);
1884 *inode_reserved_space(inode
) -= number
;
1885 spin_unlock(&inode
->i_lock
);
1887 inode_sub_bytes(inode
, number
);
1892 dquots
= i_dquot(inode
);
1893 index
= srcu_read_lock(&dquot_srcu
);
1894 spin_lock(&inode
->i_lock
);
1895 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1898 warn
[cnt
].w_type
= QUOTA_NL_NOWARN
;
1901 spin_lock(&dquots
[cnt
]->dq_dqb_lock
);
1902 wtype
= info_bdq_free(dquots
[cnt
], number
);
1903 if (wtype
!= QUOTA_NL_NOWARN
)
1904 prepare_warning(&warn
[cnt
], dquots
[cnt
], wtype
);
1906 dquot_free_reserved_space(dquots
[cnt
], number
);
1908 dquot_decr_space(dquots
[cnt
], number
);
1909 spin_unlock(&dquots
[cnt
]->dq_dqb_lock
);
1912 *inode_reserved_space(inode
) -= number
;
1914 __inode_sub_bytes(inode
, number
);
1915 spin_unlock(&inode
->i_lock
);
1919 mark_all_dquot_dirty(dquots
);
1921 srcu_read_unlock(&dquot_srcu
, index
);
1922 flush_warnings(warn
);
1924 EXPORT_SYMBOL(__dquot_free_space
);
1927 * This operation can block, but only after everything is updated
1929 void dquot_free_inode(struct inode
*inode
)
1932 struct dquot_warn warn
[MAXQUOTAS
];
1933 struct dquot
* const *dquots
;
1936 if (!inode_quota_active(inode
))
1939 dquots
= i_dquot(inode
);
1940 index
= srcu_read_lock(&dquot_srcu
);
1941 spin_lock(&inode
->i_lock
);
1942 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1945 warn
[cnt
].w_type
= QUOTA_NL_NOWARN
;
1948 spin_lock(&dquots
[cnt
]->dq_dqb_lock
);
1949 wtype
= info_idq_free(dquots
[cnt
], 1);
1950 if (wtype
!= QUOTA_NL_NOWARN
)
1951 prepare_warning(&warn
[cnt
], dquots
[cnt
], wtype
);
1952 dquot_decr_inodes(dquots
[cnt
], 1);
1953 spin_unlock(&dquots
[cnt
]->dq_dqb_lock
);
1955 spin_unlock(&inode
->i_lock
);
1956 mark_all_dquot_dirty(dquots
);
1957 srcu_read_unlock(&dquot_srcu
, index
);
1958 flush_warnings(warn
);
1960 EXPORT_SYMBOL(dquot_free_inode
);
1963 * Transfer the number of inode and blocks from one diskquota to an other.
1964 * On success, dquot references in transfer_to are consumed and references
1965 * to original dquots that need to be released are placed there. On failure,
1966 * references are kept untouched.
1968 * This operation can block, but only after everything is updated
1969 * A transaction must be started when entering this function.
1971 * We are holding reference on transfer_from & transfer_to, no need to
1972 * protect them by srcu_read_lock().
1974 int __dquot_transfer(struct inode
*inode
, struct dquot
**transfer_to
)
1977 qsize_t rsv_space
= 0;
1978 qsize_t inode_usage
= 1;
1979 struct dquot
*transfer_from
[MAXQUOTAS
] = {};
1981 char is_valid
[MAXQUOTAS
] = {};
1982 struct dquot_warn warn_to
[MAXQUOTAS
];
1983 struct dquot_warn warn_from_inodes
[MAXQUOTAS
];
1984 struct dquot_warn warn_from_space
[MAXQUOTAS
];
1986 if (IS_NOQUOTA(inode
))
1989 if (inode
->i_sb
->dq_op
->get_inode_usage
) {
1990 ret
= inode
->i_sb
->dq_op
->get_inode_usage(inode
, &inode_usage
);
1995 /* Initialize the arrays */
1996 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1997 warn_to
[cnt
].w_type
= QUOTA_NL_NOWARN
;
1998 warn_from_inodes
[cnt
].w_type
= QUOTA_NL_NOWARN
;
1999 warn_from_space
[cnt
].w_type
= QUOTA_NL_NOWARN
;
2002 spin_lock(&dq_data_lock
);
2003 spin_lock(&inode
->i_lock
);
2004 if (IS_NOQUOTA(inode
)) { /* File without quota accounting? */
2005 spin_unlock(&inode
->i_lock
);
2006 spin_unlock(&dq_data_lock
);
2009 cur_space
= __inode_get_bytes(inode
);
2010 rsv_space
= __inode_get_rsv_space(inode
);
2012 * Build the transfer_from list, check limits, and update usage in
2013 * the target structures.
2015 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2017 * Skip changes for same uid or gid or for turned off quota-type.
2019 if (!transfer_to
[cnt
])
2021 /* Avoid races with quotaoff() */
2022 if (!sb_has_quota_active(inode
->i_sb
, cnt
))
2025 transfer_from
[cnt
] = i_dquot(inode
)[cnt
];
2026 ret
= dquot_add_inodes(transfer_to
[cnt
], inode_usage
,
2030 ret
= dquot_add_space(transfer_to
[cnt
], cur_space
, rsv_space
,
2031 DQUOT_SPACE_WARN
, &warn_to
[cnt
]);
2033 spin_lock(&transfer_to
[cnt
]->dq_dqb_lock
);
2034 dquot_decr_inodes(transfer_to
[cnt
], inode_usage
);
2035 spin_unlock(&transfer_to
[cnt
]->dq_dqb_lock
);
2040 /* Decrease usage for source structures and update quota pointers */
2041 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2044 /* Due to IO error we might not have transfer_from[] structure */
2045 if (transfer_from
[cnt
]) {
2048 spin_lock(&transfer_from
[cnt
]->dq_dqb_lock
);
2049 wtype
= info_idq_free(transfer_from
[cnt
], inode_usage
);
2050 if (wtype
!= QUOTA_NL_NOWARN
)
2051 prepare_warning(&warn_from_inodes
[cnt
],
2052 transfer_from
[cnt
], wtype
);
2053 wtype
= info_bdq_free(transfer_from
[cnt
],
2054 cur_space
+ rsv_space
);
2055 if (wtype
!= QUOTA_NL_NOWARN
)
2056 prepare_warning(&warn_from_space
[cnt
],
2057 transfer_from
[cnt
], wtype
);
2058 dquot_decr_inodes(transfer_from
[cnt
], inode_usage
);
2059 dquot_decr_space(transfer_from
[cnt
], cur_space
);
2060 dquot_free_reserved_space(transfer_from
[cnt
],
2062 spin_unlock(&transfer_from
[cnt
]->dq_dqb_lock
);
2064 i_dquot(inode
)[cnt
] = transfer_to
[cnt
];
2066 spin_unlock(&inode
->i_lock
);
2067 spin_unlock(&dq_data_lock
);
2069 mark_all_dquot_dirty(transfer_from
);
2070 mark_all_dquot_dirty(transfer_to
);
2071 flush_warnings(warn_to
);
2072 flush_warnings(warn_from_inodes
);
2073 flush_warnings(warn_from_space
);
2074 /* Pass back references to put */
2075 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
2077 transfer_to
[cnt
] = transfer_from
[cnt
];
2080 /* Back out changes we already did */
2081 for (cnt
--; cnt
>= 0; cnt
--) {
2084 spin_lock(&transfer_to
[cnt
]->dq_dqb_lock
);
2085 dquot_decr_inodes(transfer_to
[cnt
], inode_usage
);
2086 dquot_decr_space(transfer_to
[cnt
], cur_space
);
2087 dquot_free_reserved_space(transfer_to
[cnt
], rsv_space
);
2088 spin_unlock(&transfer_to
[cnt
]->dq_dqb_lock
);
2090 spin_unlock(&inode
->i_lock
);
2091 spin_unlock(&dq_data_lock
);
2092 flush_warnings(warn_to
);
2095 EXPORT_SYMBOL(__dquot_transfer
);
2097 /* Wrapper for transferring ownership of an inode for uid/gid only
2098 * Called from FSXXX_setattr()
2100 int dquot_transfer(struct mnt_idmap
*idmap
, struct inode
*inode
,
2101 struct iattr
*iattr
)
2103 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2104 struct dquot
*dquot
;
2105 struct super_block
*sb
= inode
->i_sb
;
2108 if (!inode_quota_active(inode
))
2111 if (i_uid_needs_update(idmap
, iattr
, inode
)) {
2112 kuid_t kuid
= from_vfsuid(idmap
, i_user_ns(inode
),
2115 dquot
= dqget(sb
, make_kqid_uid(kuid
));
2116 if (IS_ERR(dquot
)) {
2117 if (PTR_ERR(dquot
) != -ESRCH
) {
2118 ret
= PTR_ERR(dquot
);
2123 transfer_to
[USRQUOTA
] = dquot
;
2125 if (i_gid_needs_update(idmap
, iattr
, inode
)) {
2126 kgid_t kgid
= from_vfsgid(idmap
, i_user_ns(inode
),
2129 dquot
= dqget(sb
, make_kqid_gid(kgid
));
2130 if (IS_ERR(dquot
)) {
2131 if (PTR_ERR(dquot
) != -ESRCH
) {
2132 ret
= PTR_ERR(dquot
);
2137 transfer_to
[GRPQUOTA
] = dquot
;
2139 ret
= __dquot_transfer(inode
, transfer_to
);
2141 dqput_all(transfer_to
);
2144 EXPORT_SYMBOL(dquot_transfer
);
2147 * Write info of quota file to disk
2149 int dquot_commit_info(struct super_block
*sb
, int type
)
2151 struct quota_info
*dqopt
= sb_dqopt(sb
);
2153 return dqopt
->ops
[type
]->write_file_info(sb
, type
);
2155 EXPORT_SYMBOL(dquot_commit_info
);
2157 int dquot_get_next_id(struct super_block
*sb
, struct kqid
*qid
)
2159 struct quota_info
*dqopt
= sb_dqopt(sb
);
2161 if (!sb_has_quota_active(sb
, qid
->type
))
2163 if (!dqopt
->ops
[qid
->type
]->get_next_id
)
2165 return dqopt
->ops
[qid
->type
]->get_next_id(sb
, qid
);
2167 EXPORT_SYMBOL(dquot_get_next_id
);
2170 * Definitions of diskquota operations.
2172 const struct dquot_operations dquot_operations
= {
2173 .write_dquot
= dquot_commit
,
2174 .acquire_dquot
= dquot_acquire
,
2175 .release_dquot
= dquot_release
,
2176 .mark_dirty
= dquot_mark_dquot_dirty
,
2177 .write_info
= dquot_commit_info
,
2178 .alloc_dquot
= dquot_alloc
,
2179 .destroy_dquot
= dquot_destroy
,
2180 .get_next_id
= dquot_get_next_id
,
2182 EXPORT_SYMBOL(dquot_operations
);
2185 * Generic helper for ->open on filesystems supporting disk quotas.
2187 int dquot_file_open(struct inode
*inode
, struct file
*file
)
2191 error
= generic_file_open(inode
, file
);
2192 if (!error
&& (file
->f_mode
& FMODE_WRITE
))
2193 error
= dquot_initialize(inode
);
2196 EXPORT_SYMBOL(dquot_file_open
);
2198 static void vfs_cleanup_quota_inode(struct super_block
*sb
, int type
)
2200 struct quota_info
*dqopt
= sb_dqopt(sb
);
2201 struct inode
*inode
= dqopt
->files
[type
];
2205 if (!(dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
)) {
2207 inode
->i_flags
&= ~S_NOQUOTA
;
2208 inode_unlock(inode
);
2210 dqopt
->files
[type
] = NULL
;
2215 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2217 int dquot_disable(struct super_block
*sb
, int type
, unsigned int flags
)
2220 struct quota_info
*dqopt
= sb_dqopt(sb
);
2222 /* s_umount should be held in exclusive mode */
2223 if (WARN_ON_ONCE(down_read_trylock(&sb
->s_umount
)))
2224 up_read(&sb
->s_umount
);
2226 /* Cannot turn off usage accounting without turning off limits, or
2227 * suspend quotas and simultaneously turn quotas off. */
2228 if ((flags
& DQUOT_USAGE_ENABLED
&& !(flags
& DQUOT_LIMITS_ENABLED
))
2229 || (flags
& DQUOT_SUSPENDED
&& flags
& (DQUOT_LIMITS_ENABLED
|
2230 DQUOT_USAGE_ENABLED
)))
2234 * Skip everything if there's nothing to do. We have to do this because
2235 * sometimes we are called when fill_super() failed and calling
2236 * sync_fs() in such cases does no good.
2238 if (!sb_any_quota_loaded(sb
))
2241 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2242 if (type
!= -1 && cnt
!= type
)
2244 if (!sb_has_quota_loaded(sb
, cnt
))
2247 if (flags
& DQUOT_SUSPENDED
) {
2248 spin_lock(&dq_state_lock
);
2250 dquot_state_flag(DQUOT_SUSPENDED
, cnt
);
2251 spin_unlock(&dq_state_lock
);
2253 spin_lock(&dq_state_lock
);
2254 dqopt
->flags
&= ~dquot_state_flag(flags
, cnt
);
2255 /* Turning off suspended quotas? */
2256 if (!sb_has_quota_loaded(sb
, cnt
) &&
2257 sb_has_quota_suspended(sb
, cnt
)) {
2258 dqopt
->flags
&= ~dquot_state_flag(
2259 DQUOT_SUSPENDED
, cnt
);
2260 spin_unlock(&dq_state_lock
);
2261 vfs_cleanup_quota_inode(sb
, cnt
);
2264 spin_unlock(&dq_state_lock
);
2267 /* We still have to keep quota loaded? */
2268 if (sb_has_quota_loaded(sb
, cnt
) && !(flags
& DQUOT_SUSPENDED
))
2271 /* Note: these are blocking operations */
2272 drop_dquot_ref(sb
, cnt
);
2273 invalidate_dquots(sb
, cnt
);
2275 * Now all dquots should be invalidated, all writes done so we
2276 * should be only users of the info. No locks needed.
2278 if (info_dirty(&dqopt
->info
[cnt
]))
2279 sb
->dq_op
->write_info(sb
, cnt
);
2280 if (dqopt
->ops
[cnt
]->free_file_info
)
2281 dqopt
->ops
[cnt
]->free_file_info(sb
, cnt
);
2282 put_quota_format(dqopt
->info
[cnt
].dqi_format
);
2283 dqopt
->info
[cnt
].dqi_flags
= 0;
2284 dqopt
->info
[cnt
].dqi_igrace
= 0;
2285 dqopt
->info
[cnt
].dqi_bgrace
= 0;
2286 dqopt
->ops
[cnt
] = NULL
;
2289 /* Skip syncing and setting flags if quota files are hidden */
2290 if (dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
)
2293 /* Sync the superblock so that buffers with quota data are written to
2294 * disk (and so userspace sees correct data afterwards). */
2295 if (sb
->s_op
->sync_fs
)
2296 sb
->s_op
->sync_fs(sb
, 1);
2297 sync_blockdev(sb
->s_bdev
);
2298 /* Now the quota files are just ordinary files and we can set the
2299 * inode flags back. Moreover we discard the pagecache so that
2300 * userspace sees the writes we did bypassing the pagecache. We
2301 * must also discard the blockdev buffers so that we see the
2302 * changes done by userspace on the next quotaon() */
2303 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
2304 if (!sb_has_quota_loaded(sb
, cnt
) && dqopt
->files
[cnt
]) {
2305 inode_lock(dqopt
->files
[cnt
]);
2306 truncate_inode_pages(&dqopt
->files
[cnt
]->i_data
, 0);
2307 inode_unlock(dqopt
->files
[cnt
]);
2310 invalidate_bdev(sb
->s_bdev
);
2312 /* We are done when suspending quotas */
2313 if (flags
& DQUOT_SUSPENDED
)
2316 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++)
2317 if (!sb_has_quota_loaded(sb
, cnt
))
2318 vfs_cleanup_quota_inode(sb
, cnt
);
2321 EXPORT_SYMBOL(dquot_disable
);
2323 int dquot_quota_off(struct super_block
*sb
, int type
)
2325 return dquot_disable(sb
, type
,
2326 DQUOT_USAGE_ENABLED
| DQUOT_LIMITS_ENABLED
);
2328 EXPORT_SYMBOL(dquot_quota_off
);
2331 * Turn quotas on on a device
2334 static int vfs_setup_quota_inode(struct inode
*inode
, int type
)
2336 struct super_block
*sb
= inode
->i_sb
;
2337 struct quota_info
*dqopt
= sb_dqopt(sb
);
2339 if (is_bad_inode(inode
))
2341 if (!S_ISREG(inode
->i_mode
))
2343 if (IS_RDONLY(inode
))
2345 if (sb_has_quota_loaded(sb
, type
))
2349 * Quota files should never be encrypted. They should be thought of as
2350 * filesystem metadata, not user data. New-style internal quota files
2351 * cannot be encrypted by users anyway, but old-style external quota
2352 * files could potentially be incorrectly created in an encrypted
2353 * directory, hence this explicit check. Some reasons why encrypted
2354 * quota files don't work include: (1) some filesystems that support
2355 * encryption don't handle it in their quota_read and quota_write, and
2356 * (2) cleaning up encrypted quota files at unmount would need special
2357 * consideration, as quota files are cleaned up later than user files.
2359 if (IS_ENCRYPTED(inode
))
2362 dqopt
->files
[type
] = igrab(inode
);
2363 if (!dqopt
->files
[type
])
2365 if (!(dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
)) {
2366 /* We don't want quota and atime on quota files (deadlocks
2367 * possible) Also nobody should write to the file - we use
2368 * special IO operations which ignore the immutable bit. */
2370 inode
->i_flags
|= S_NOQUOTA
;
2371 inode_unlock(inode
);
2373 * When S_NOQUOTA is set, remove dquot references as no more
2374 * references can be added
2376 __dquot_drop(inode
);
2381 int dquot_load_quota_sb(struct super_block
*sb
, int type
, int format_id
,
2384 struct quota_format_type
*fmt
= find_quota_format(format_id
);
2385 struct quota_info
*dqopt
= sb_dqopt(sb
);
2388 lockdep_assert_held_write(&sb
->s_umount
);
2390 /* Just unsuspend quotas? */
2391 BUG_ON(flags
& DQUOT_SUSPENDED
);
2395 if (!sb
->dq_op
|| !sb
->s_qcop
||
2396 (type
== PRJQUOTA
&& sb
->dq_op
->get_projid
== NULL
)) {
2400 /* Filesystems outside of init_user_ns not yet supported */
2401 if (sb
->s_user_ns
!= &init_user_ns
) {
2405 /* Usage always has to be set... */
2406 if (!(flags
& DQUOT_USAGE_ENABLED
)) {
2410 if (sb_has_quota_loaded(sb
, type
)) {
2415 if (!(dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
)) {
2416 /* As we bypass the pagecache we must now flush all the
2417 * dirty data and invalidate caches so that kernel sees
2418 * changes from userspace. It is not enough to just flush
2419 * the quota file since if blocksize < pagesize, invalidation
2420 * of the cache could fail because of other unrelated dirty
2422 sync_filesystem(sb
);
2423 invalidate_bdev(sb
->s_bdev
);
2427 if (!fmt
->qf_ops
->check_quota_file(sb
, type
))
2430 dqopt
->ops
[type
] = fmt
->qf_ops
;
2431 dqopt
->info
[type
].dqi_format
= fmt
;
2432 dqopt
->info
[type
].dqi_fmt_id
= format_id
;
2433 INIT_LIST_HEAD(&dqopt
->info
[type
].dqi_dirty_list
);
2434 error
= dqopt
->ops
[type
]->read_file_info(sb
, type
);
2437 if (dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
) {
2438 spin_lock(&dq_data_lock
);
2439 dqopt
->info
[type
].dqi_flags
|= DQF_SYS_FILE
;
2440 spin_unlock(&dq_data_lock
);
2442 spin_lock(&dq_state_lock
);
2443 dqopt
->flags
|= dquot_state_flag(flags
, type
);
2444 spin_unlock(&dq_state_lock
);
2446 error
= add_dquot_ref(sb
, type
);
2448 dquot_disable(sb
, type
,
2449 DQUOT_USAGE_ENABLED
| DQUOT_LIMITS_ENABLED
);
2453 put_quota_format(fmt
);
2457 EXPORT_SYMBOL(dquot_load_quota_sb
);
2460 * More powerful function for turning on quotas on given quota inode allowing
2461 * setting of individual quota flags
2463 int dquot_load_quota_inode(struct inode
*inode
, int type
, int format_id
,
2468 err
= vfs_setup_quota_inode(inode
, type
);
2471 err
= dquot_load_quota_sb(inode
->i_sb
, type
, format_id
, flags
);
2473 vfs_cleanup_quota_inode(inode
->i_sb
, type
);
2476 EXPORT_SYMBOL(dquot_load_quota_inode
);
2478 /* Reenable quotas on remount RW */
2479 int dquot_resume(struct super_block
*sb
, int type
)
2481 struct quota_info
*dqopt
= sb_dqopt(sb
);
2485 /* s_umount should be held in exclusive mode */
2486 if (WARN_ON_ONCE(down_read_trylock(&sb
->s_umount
)))
2487 up_read(&sb
->s_umount
);
2489 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
2490 if (type
!= -1 && cnt
!= type
)
2492 if (!sb_has_quota_suspended(sb
, cnt
))
2495 spin_lock(&dq_state_lock
);
2496 flags
= dqopt
->flags
& dquot_state_flag(DQUOT_USAGE_ENABLED
|
2497 DQUOT_LIMITS_ENABLED
,
2499 dqopt
->flags
&= ~dquot_state_flag(DQUOT_STATE_FLAGS
, cnt
);
2500 spin_unlock(&dq_state_lock
);
2502 flags
= dquot_generic_flag(flags
, cnt
);
2503 ret
= dquot_load_quota_sb(sb
, cnt
, dqopt
->info
[cnt
].dqi_fmt_id
,
2506 vfs_cleanup_quota_inode(sb
, cnt
);
2511 EXPORT_SYMBOL(dquot_resume
);
2513 int dquot_quota_on(struct super_block
*sb
, int type
, int format_id
,
2514 const struct path
*path
)
2516 int error
= security_quota_on(path
->dentry
);
2519 /* Quota file not on the same filesystem? */
2520 if (path
->dentry
->d_sb
!= sb
)
2523 error
= dquot_load_quota_inode(d_inode(path
->dentry
), type
,
2524 format_id
, DQUOT_USAGE_ENABLED
|
2525 DQUOT_LIMITS_ENABLED
);
2528 EXPORT_SYMBOL(dquot_quota_on
);
2531 * This function is used when filesystem needs to initialize quotas
2532 * during mount time.
2534 int dquot_quota_on_mount(struct super_block
*sb
, char *qf_name
,
2535 int format_id
, int type
)
2537 struct dentry
*dentry
;
2540 dentry
= lookup_positive_unlocked(qf_name
, sb
->s_root
, strlen(qf_name
));
2542 return PTR_ERR(dentry
);
2544 error
= security_quota_on(dentry
);
2546 error
= dquot_load_quota_inode(d_inode(dentry
), type
, format_id
,
2547 DQUOT_USAGE_ENABLED
| DQUOT_LIMITS_ENABLED
);
2552 EXPORT_SYMBOL(dquot_quota_on_mount
);
2554 static int dquot_quota_enable(struct super_block
*sb
, unsigned int flags
)
2558 struct quota_info
*dqopt
= sb_dqopt(sb
);
2560 if (!(dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
))
2562 /* Accounting cannot be turned on while fs is mounted */
2563 flags
&= ~(FS_QUOTA_UDQ_ACCT
| FS_QUOTA_GDQ_ACCT
| FS_QUOTA_PDQ_ACCT
);
2566 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2567 if (!(flags
& qtype_enforce_flag(type
)))
2569 /* Can't enforce without accounting */
2570 if (!sb_has_quota_usage_enabled(sb
, type
)) {
2574 if (sb_has_quota_limits_enabled(sb
, type
)) {
2578 spin_lock(&dq_state_lock
);
2579 dqopt
->flags
|= dquot_state_flag(DQUOT_LIMITS_ENABLED
, type
);
2580 spin_unlock(&dq_state_lock
);
2584 /* Backout enforcement enablement we already did */
2585 for (type
--; type
>= 0; type
--) {
2586 if (flags
& qtype_enforce_flag(type
))
2587 dquot_disable(sb
, type
, DQUOT_LIMITS_ENABLED
);
2589 /* Error code translation for better compatibility with XFS */
2595 static int dquot_quota_disable(struct super_block
*sb
, unsigned int flags
)
2599 struct quota_info
*dqopt
= sb_dqopt(sb
);
2601 if (!(dqopt
->flags
& DQUOT_QUOTA_SYS_FILE
))
2604 * We don't support turning off accounting via quotactl. In principle
2605 * quota infrastructure can do this but filesystems don't expect
2606 * userspace to be able to do it.
2609 (FS_QUOTA_UDQ_ACCT
| FS_QUOTA_GDQ_ACCT
| FS_QUOTA_PDQ_ACCT
))
2612 /* Filter out limits not enabled */
2613 for (type
= 0; type
< MAXQUOTAS
; type
++)
2614 if (!sb_has_quota_limits_enabled(sb
, type
))
2615 flags
&= ~qtype_enforce_flag(type
);
2619 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2620 if (flags
& qtype_enforce_flag(type
)) {
2621 ret
= dquot_disable(sb
, type
, DQUOT_LIMITS_ENABLED
);
2628 /* Backout enforcement disabling we already did */
2629 for (type
--; type
>= 0; type
--) {
2630 if (flags
& qtype_enforce_flag(type
)) {
2631 spin_lock(&dq_state_lock
);
2633 dquot_state_flag(DQUOT_LIMITS_ENABLED
, type
);
2634 spin_unlock(&dq_state_lock
);
2640 /* Generic routine for getting common part of quota structure */
2641 static void do_get_dqblk(struct dquot
*dquot
, struct qc_dqblk
*di
)
2643 struct mem_dqblk
*dm
= &dquot
->dq_dqb
;
2645 memset(di
, 0, sizeof(*di
));
2646 spin_lock(&dquot
->dq_dqb_lock
);
2647 di
->d_spc_hardlimit
= dm
->dqb_bhardlimit
;
2648 di
->d_spc_softlimit
= dm
->dqb_bsoftlimit
;
2649 di
->d_ino_hardlimit
= dm
->dqb_ihardlimit
;
2650 di
->d_ino_softlimit
= dm
->dqb_isoftlimit
;
2651 di
->d_space
= dm
->dqb_curspace
+ dm
->dqb_rsvspace
;
2652 di
->d_ino_count
= dm
->dqb_curinodes
;
2653 di
->d_spc_timer
= dm
->dqb_btime
;
2654 di
->d_ino_timer
= dm
->dqb_itime
;
2655 spin_unlock(&dquot
->dq_dqb_lock
);
2658 int dquot_get_dqblk(struct super_block
*sb
, struct kqid qid
,
2659 struct qc_dqblk
*di
)
2661 struct dquot
*dquot
;
2663 dquot
= dqget(sb
, qid
);
2665 return PTR_ERR(dquot
);
2666 do_get_dqblk(dquot
, di
);
2671 EXPORT_SYMBOL(dquot_get_dqblk
);
2673 int dquot_get_next_dqblk(struct super_block
*sb
, struct kqid
*qid
,
2674 struct qc_dqblk
*di
)
2676 struct dquot
*dquot
;
2679 if (!sb
->dq_op
->get_next_id
)
2681 err
= sb
->dq_op
->get_next_id(sb
, qid
);
2684 dquot
= dqget(sb
, *qid
);
2686 return PTR_ERR(dquot
);
2687 do_get_dqblk(dquot
, di
);
2692 EXPORT_SYMBOL(dquot_get_next_dqblk
);
2694 #define VFS_QC_MASK \
2695 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2696 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2697 QC_SPC_TIMER | QC_INO_TIMER)
2699 /* Generic routine for setting common part of quota structure */
2700 static int do_set_dqblk(struct dquot
*dquot
, struct qc_dqblk
*di
)
2702 struct mem_dqblk
*dm
= &dquot
->dq_dqb
;
2703 int check_blim
= 0, check_ilim
= 0;
2704 struct mem_dqinfo
*dqi
= &sb_dqopt(dquot
->dq_sb
)->info
[dquot
->dq_id
.type
];
2706 if (di
->d_fieldmask
& ~VFS_QC_MASK
)
2709 if (((di
->d_fieldmask
& QC_SPC_SOFT
) &&
2710 di
->d_spc_softlimit
> dqi
->dqi_max_spc_limit
) ||
2711 ((di
->d_fieldmask
& QC_SPC_HARD
) &&
2712 di
->d_spc_hardlimit
> dqi
->dqi_max_spc_limit
) ||
2713 ((di
->d_fieldmask
& QC_INO_SOFT
) &&
2714 (di
->d_ino_softlimit
> dqi
->dqi_max_ino_limit
)) ||
2715 ((di
->d_fieldmask
& QC_INO_HARD
) &&
2716 (di
->d_ino_hardlimit
> dqi
->dqi_max_ino_limit
)))
2719 spin_lock(&dquot
->dq_dqb_lock
);
2720 if (di
->d_fieldmask
& QC_SPACE
) {
2721 dm
->dqb_curspace
= di
->d_space
- dm
->dqb_rsvspace
;
2723 set_bit(DQ_LASTSET_B
+ QIF_SPACE_B
, &dquot
->dq_flags
);
2726 if (di
->d_fieldmask
& QC_SPC_SOFT
)
2727 dm
->dqb_bsoftlimit
= di
->d_spc_softlimit
;
2728 if (di
->d_fieldmask
& QC_SPC_HARD
)
2729 dm
->dqb_bhardlimit
= di
->d_spc_hardlimit
;
2730 if (di
->d_fieldmask
& (QC_SPC_SOFT
| QC_SPC_HARD
)) {
2732 set_bit(DQ_LASTSET_B
+ QIF_BLIMITS_B
, &dquot
->dq_flags
);
2735 if (di
->d_fieldmask
& QC_INO_COUNT
) {
2736 dm
->dqb_curinodes
= di
->d_ino_count
;
2738 set_bit(DQ_LASTSET_B
+ QIF_INODES_B
, &dquot
->dq_flags
);
2741 if (di
->d_fieldmask
& QC_INO_SOFT
)
2742 dm
->dqb_isoftlimit
= di
->d_ino_softlimit
;
2743 if (di
->d_fieldmask
& QC_INO_HARD
)
2744 dm
->dqb_ihardlimit
= di
->d_ino_hardlimit
;
2745 if (di
->d_fieldmask
& (QC_INO_SOFT
| QC_INO_HARD
)) {
2747 set_bit(DQ_LASTSET_B
+ QIF_ILIMITS_B
, &dquot
->dq_flags
);
2750 if (di
->d_fieldmask
& QC_SPC_TIMER
) {
2751 dm
->dqb_btime
= di
->d_spc_timer
;
2753 set_bit(DQ_LASTSET_B
+ QIF_BTIME_B
, &dquot
->dq_flags
);
2756 if (di
->d_fieldmask
& QC_INO_TIMER
) {
2757 dm
->dqb_itime
= di
->d_ino_timer
;
2759 set_bit(DQ_LASTSET_B
+ QIF_ITIME_B
, &dquot
->dq_flags
);
2763 if (!dm
->dqb_bsoftlimit
||
2764 dm
->dqb_curspace
+ dm
->dqb_rsvspace
<= dm
->dqb_bsoftlimit
) {
2766 clear_bit(DQ_BLKS_B
, &dquot
->dq_flags
);
2767 } else if (!(di
->d_fieldmask
& QC_SPC_TIMER
))
2768 /* Set grace only if user hasn't provided his own... */
2769 dm
->dqb_btime
= ktime_get_real_seconds() + dqi
->dqi_bgrace
;
2772 if (!dm
->dqb_isoftlimit
||
2773 dm
->dqb_curinodes
<= dm
->dqb_isoftlimit
) {
2775 clear_bit(DQ_INODES_B
, &dquot
->dq_flags
);
2776 } else if (!(di
->d_fieldmask
& QC_INO_TIMER
))
2777 /* Set grace only if user hasn't provided his own... */
2778 dm
->dqb_itime
= ktime_get_real_seconds() + dqi
->dqi_igrace
;
2780 if (dm
->dqb_bhardlimit
|| dm
->dqb_bsoftlimit
|| dm
->dqb_ihardlimit
||
2782 clear_bit(DQ_FAKE_B
, &dquot
->dq_flags
);
2784 set_bit(DQ_FAKE_B
, &dquot
->dq_flags
);
2785 spin_unlock(&dquot
->dq_dqb_lock
);
2786 mark_dquot_dirty(dquot
);
2791 int dquot_set_dqblk(struct super_block
*sb
, struct kqid qid
,
2792 struct qc_dqblk
*di
)
2794 struct dquot
*dquot
;
2797 dquot
= dqget(sb
, qid
);
2798 if (IS_ERR(dquot
)) {
2799 rc
= PTR_ERR(dquot
);
2802 rc
= do_set_dqblk(dquot
, di
);
2807 EXPORT_SYMBOL(dquot_set_dqblk
);
2809 /* Generic routine for getting common part of quota file information */
2810 int dquot_get_state(struct super_block
*sb
, struct qc_state
*state
)
2812 struct mem_dqinfo
*mi
;
2813 struct qc_type_state
*tstate
;
2814 struct quota_info
*dqopt
= sb_dqopt(sb
);
2817 memset(state
, 0, sizeof(*state
));
2818 for (type
= 0; type
< MAXQUOTAS
; type
++) {
2819 if (!sb_has_quota_active(sb
, type
))
2821 tstate
= state
->s_state
+ type
;
2822 mi
= sb_dqopt(sb
)->info
+ type
;
2823 tstate
->flags
= QCI_ACCT_ENABLED
;
2824 spin_lock(&dq_data_lock
);
2825 if (mi
->dqi_flags
& DQF_SYS_FILE
)
2826 tstate
->flags
|= QCI_SYSFILE
;
2827 if (mi
->dqi_flags
& DQF_ROOT_SQUASH
)
2828 tstate
->flags
|= QCI_ROOT_SQUASH
;
2829 if (sb_has_quota_limits_enabled(sb
, type
))
2830 tstate
->flags
|= QCI_LIMITS_ENFORCED
;
2831 tstate
->spc_timelimit
= mi
->dqi_bgrace
;
2832 tstate
->ino_timelimit
= mi
->dqi_igrace
;
2833 if (dqopt
->files
[type
]) {
2834 tstate
->ino
= dqopt
->files
[type
]->i_ino
;
2835 tstate
->blocks
= dqopt
->files
[type
]->i_blocks
;
2837 tstate
->nextents
= 1; /* We don't know... */
2838 spin_unlock(&dq_data_lock
);
2842 EXPORT_SYMBOL(dquot_get_state
);
2844 /* Generic routine for setting common part of quota file information */
2845 int dquot_set_dqinfo(struct super_block
*sb
, int type
, struct qc_info
*ii
)
2847 struct mem_dqinfo
*mi
;
2849 if ((ii
->i_fieldmask
& QC_WARNS_MASK
) ||
2850 (ii
->i_fieldmask
& QC_RT_SPC_TIMER
))
2852 if (!sb_has_quota_active(sb
, type
))
2854 mi
= sb_dqopt(sb
)->info
+ type
;
2855 if (ii
->i_fieldmask
& QC_FLAGS
) {
2856 if ((ii
->i_flags
& QCI_ROOT_SQUASH
&&
2857 mi
->dqi_format
->qf_fmt_id
!= QFMT_VFS_OLD
))
2860 spin_lock(&dq_data_lock
);
2861 if (ii
->i_fieldmask
& QC_SPC_TIMER
)
2862 mi
->dqi_bgrace
= ii
->i_spc_timelimit
;
2863 if (ii
->i_fieldmask
& QC_INO_TIMER
)
2864 mi
->dqi_igrace
= ii
->i_ino_timelimit
;
2865 if (ii
->i_fieldmask
& QC_FLAGS
) {
2866 if (ii
->i_flags
& QCI_ROOT_SQUASH
)
2867 mi
->dqi_flags
|= DQF_ROOT_SQUASH
;
2869 mi
->dqi_flags
&= ~DQF_ROOT_SQUASH
;
2871 spin_unlock(&dq_data_lock
);
2872 mark_info_dirty(sb
, type
);
2873 /* Force write to disk */
2874 return sb
->dq_op
->write_info(sb
, type
);
2876 EXPORT_SYMBOL(dquot_set_dqinfo
);
2878 const struct quotactl_ops dquot_quotactl_sysfile_ops
= {
2879 .quota_enable
= dquot_quota_enable
,
2880 .quota_disable
= dquot_quota_disable
,
2881 .quota_sync
= dquot_quota_sync
,
2882 .get_state
= dquot_get_state
,
2883 .set_info
= dquot_set_dqinfo
,
2884 .get_dqblk
= dquot_get_dqblk
,
2885 .get_nextdqblk
= dquot_get_next_dqblk
,
2886 .set_dqblk
= dquot_set_dqblk
2888 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops
);
2890 static int do_proc_dqstats(struct ctl_table
*table
, int write
,
2891 void *buffer
, size_t *lenp
, loff_t
*ppos
)
2893 unsigned int type
= (unsigned long *)table
->data
- dqstats
.stat
;
2894 s64 value
= percpu_counter_sum(&dqstats
.counter
[type
]);
2896 /* Filter negative values for non-monotonic counters */
2897 if (value
< 0 && (type
== DQST_ALLOC_DQUOTS
||
2898 type
== DQST_FREE_DQUOTS
))
2901 /* Update global table */
2902 dqstats
.stat
[type
] = value
;
2903 return proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
2906 static struct ctl_table fs_dqstats_table
[] = {
2908 .procname
= "lookups",
2909 .data
= &dqstats
.stat
[DQST_LOOKUPS
],
2910 .maxlen
= sizeof(unsigned long),
2912 .proc_handler
= do_proc_dqstats
,
2915 .procname
= "drops",
2916 .data
= &dqstats
.stat
[DQST_DROPS
],
2917 .maxlen
= sizeof(unsigned long),
2919 .proc_handler
= do_proc_dqstats
,
2922 .procname
= "reads",
2923 .data
= &dqstats
.stat
[DQST_READS
],
2924 .maxlen
= sizeof(unsigned long),
2926 .proc_handler
= do_proc_dqstats
,
2929 .procname
= "writes",
2930 .data
= &dqstats
.stat
[DQST_WRITES
],
2931 .maxlen
= sizeof(unsigned long),
2933 .proc_handler
= do_proc_dqstats
,
2936 .procname
= "cache_hits",
2937 .data
= &dqstats
.stat
[DQST_CACHE_HITS
],
2938 .maxlen
= sizeof(unsigned long),
2940 .proc_handler
= do_proc_dqstats
,
2943 .procname
= "allocated_dquots",
2944 .data
= &dqstats
.stat
[DQST_ALLOC_DQUOTS
],
2945 .maxlen
= sizeof(unsigned long),
2947 .proc_handler
= do_proc_dqstats
,
2950 .procname
= "free_dquots",
2951 .data
= &dqstats
.stat
[DQST_FREE_DQUOTS
],
2952 .maxlen
= sizeof(unsigned long),
2954 .proc_handler
= do_proc_dqstats
,
2957 .procname
= "syncs",
2958 .data
= &dqstats
.stat
[DQST_SYNCS
],
2959 .maxlen
= sizeof(unsigned long),
2961 .proc_handler
= do_proc_dqstats
,
2963 #ifdef CONFIG_PRINT_QUOTA_WARNING
2965 .procname
= "warnings",
2966 .data
= &flag_print_warnings
,
2967 .maxlen
= sizeof(int),
2969 .proc_handler
= proc_dointvec
,
2974 static int __init
dquot_init(void)
2977 unsigned long nr_hash
, order
;
2978 struct shrinker
*dqcache_shrinker
;
2980 printk(KERN_NOTICE
"VFS: Disk quotas %s\n", __DQUOT_VERSION__
);
2982 register_sysctl_init("fs/quota", fs_dqstats_table
);
2984 dquot_cachep
= kmem_cache_create("dquot",
2985 sizeof(struct dquot
), sizeof(unsigned long) * 4,
2986 (SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
|
2991 dquot_hash
= (struct hlist_head
*)__get_free_pages(GFP_KERNEL
, order
);
2993 panic("Cannot create dquot hash table");
2995 for (i
= 0; i
< _DQST_DQSTAT_LAST
; i
++) {
2996 ret
= percpu_counter_init(&dqstats
.counter
[i
], 0, GFP_KERNEL
);
2998 panic("Cannot create dquot stat counters");
3001 /* Find power-of-two hlist_heads which can fit into allocation */
3002 nr_hash
= (1UL << order
) * PAGE_SIZE
/ sizeof(struct hlist_head
);
3003 dq_hash_bits
= ilog2(nr_hash
);
3005 nr_hash
= 1UL << dq_hash_bits
;
3006 dq_hash_mask
= nr_hash
- 1;
3007 for (i
= 0; i
< nr_hash
; i
++)
3008 INIT_HLIST_HEAD(dquot_hash
+ i
);
3010 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
3011 " %ld bytes)\n", nr_hash
, order
, (PAGE_SIZE
<< order
));
3013 dqcache_shrinker
= shrinker_alloc(0, "dquota-cache");
3014 if (!dqcache_shrinker
)
3015 panic("Cannot allocate dquot shrinker");
3017 dqcache_shrinker
->count_objects
= dqcache_shrink_count
;
3018 dqcache_shrinker
->scan_objects
= dqcache_shrink_scan
;
3020 shrinker_register(dqcache_shrinker
);
3024 fs_initcall(dquot_init
);