]> git.ipfire.org Git - thirdparty/linux.git/blame - fs/gfs2/quota.c
mm: remove the pgprot argument to __vmalloc
[thirdparty/linux.git] / fs / gfs2 / quota.c
CommitLineData
7336d0e6 1// SPDX-License-Identifier: GPL-2.0-only
b3b94faa
DT
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
0d0868bd 4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
b3b94faa
DT
5 */
6
7/*
8 * Quota change tags are associated with each transaction that allocates or
9 * deallocates space. Those changes are accumulated locally to each node (in a
10 * per-node file) and then are periodically synced to the quota file. This
11 * avoids the bottleneck of constantly touching the quota file, but introduces
12 * fuzziness in the current usage value of IDs that are being used on different
13 * nodes in the cluster simultaneously. So, it is possible for a user on
14 * multiple nodes to overrun their quota, but that overrun is controlable.
1e72c0f7 15 * Since quota tags are part of transactions, there is no need for a quota check
b3b94faa
DT
16 * program to be run on node crashes or anything like that.
17 *
18 * There are couple of knobs that let the administrator manage the quota
19 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
20 * sitting on one node before being synced to the quota file. (The default is
21 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
22 * of quota file syncs increases as the user moves closer to their limit. The
23 * more frequent the syncs, the more accurate the quota enforcement, but that
24 * means that there is more contention between the nodes for the quota file.
25 * The default value is one. This sets the maximum theoretical quota overrun
26 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
27 * practice, the maximum overrun you see should be much less.) A "quota_scale"
28 * number greater than one makes quota syncs more frequent and reduces the
29 * maximum overrun. Numbers less than one (but greater than zero) make quota
30 * syncs less frequent.
31 *
32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
33 * the quota file, so it is not being constantly read.
34 */
35
d77d1b58
JP
36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
b3b94faa
DT
38#include <linux/sched.h>
39#include <linux/slab.h>
1495f230 40#include <linux/mm.h>
b3b94faa
DT
41#include <linux/spinlock.h>
42#include <linux/completion.h>
43#include <linux/buffer_head.h>
b3b94faa 44#include <linux/sort.h>
18ec7d5c 45#include <linux/fs.h>
2e565bb6 46#include <linux/bio.h>
5c676f6d 47#include <linux/gfs2_ondisk.h>
37b2c837
SW
48#include <linux/kthread.h>
49#include <linux/freezer.h>
2ec46505 50#include <linux/quota.h>
1d371b5e 51#include <linux/dqblk_xfs.h>
9b9f039d 52#include <linux/lockref.h>
2147dbfd 53#include <linux/list_lru.h>
c754fbbb
SW
54#include <linux/rcupdate.h>
55#include <linux/rculist_bl.h>
56#include <linux/bit_spinlock.h>
57#include <linux/jhash.h>
1e3d3620 58#include <linux/vmalloc.h>
b3b94faa
DT
59
60#include "gfs2.h"
5c676f6d 61#include "incore.h"
b3b94faa
DT
62#include "bmap.h"
63#include "glock.h"
64#include "glops.h"
b3b94faa
DT
65#include "log.h"
66#include "meta_io.h"
67#include "quota.h"
68#include "rgrp.h"
69#include "super.h"
70#include "trans.h"
18ec7d5c 71#include "inode.h"
5c676f6d 72#include "util.h"
b3b94faa 73
c754fbbb 74#define GFS2_QD_HASH_SHIFT 12
47a9a527 75#define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
c754fbbb
SW
76#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
77
78/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
2d9e7230 79/* -> sd_bitmap_lock */
7d80823e 80static DEFINE_SPINLOCK(qd_lock);
2147dbfd 81struct list_lru gfs2_qd_lru;
0a7ab79c 82
c754fbbb
SW
83static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
84
85static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
86 const struct kqid qid)
87{
88 unsigned int h;
89
90 h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
91 h = jhash(&qid, sizeof(struct kqid), h);
92
93 return h & GFS2_QD_HASH_MASK;
94}
95
96static inline void spin_lock_bucket(unsigned int hash)
97{
98 hlist_bl_lock(&qd_hash_table[hash]);
99}
100
101static inline void spin_unlock_bucket(unsigned int hash)
102{
103 hlist_bl_unlock(&qd_hash_table[hash]);
104}
105
106static void gfs2_qd_dealloc(struct rcu_head *rcu)
107{
108 struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
109 kmem_cache_free(gfs2_quotad_cachep, qd);
110}
111
2147dbfd 112static void gfs2_qd_dispose(struct list_head *list)
0a7ab79c
AD
113{
114 struct gfs2_quota_data *qd;
115 struct gfs2_sbd *sdp;
0a7ab79c 116
2147dbfd 117 while (!list_empty(list)) {
969183bc 118 qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
15562c43 119 sdp = qd->qd_gl->gl_name.ln_sbd;
0a7ab79c 120
2147dbfd
SW
121 list_del(&qd->qd_lru);
122
0a7ab79c 123 /* Free from the filesystem-specific list */
2147dbfd 124 spin_lock(&qd_lock);
0a7ab79c 125 list_del(&qd->qd_list);
2147dbfd 126 spin_unlock(&qd_lock);
0a7ab79c 127
c754fbbb
SW
128 spin_lock_bucket(qd->qd_hash);
129 hlist_bl_del_rcu(&qd->qd_hlist);
130 spin_unlock_bucket(qd->qd_hash);
131
0a7ab79c
AD
132 gfs2_assert_warn(sdp, !qd->qd_change);
133 gfs2_assert_warn(sdp, !qd->qd_slot_count);
134 gfs2_assert_warn(sdp, !qd->qd_bh_count);
135
f057f6cd 136 gfs2_glock_put(qd->qd_gl);
0a7ab79c
AD
137 atomic_dec(&sdp->sd_quota_count);
138
139 /* Delete it from the common reclaim list */
c754fbbb 140 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
0a7ab79c 141 }
2147dbfd
SW
142}
143
144
3f97b163
VD
145static enum lru_status gfs2_qd_isolate(struct list_head *item,
146 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
2147dbfd
SW
147{
148 struct list_head *dispose = arg;
149 struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
150
151 if (!spin_trylock(&qd->qd_lockref.lock))
152 return LRU_SKIP;
153
154 if (qd->qd_lockref.count == 0) {
155 lockref_mark_dead(&qd->qd_lockref);
3f97b163 156 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
2147dbfd
SW
157 }
158
159 spin_unlock(&qd->qd_lockref.lock);
160 return LRU_REMOVED;
161}
162
163static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
164 struct shrink_control *sc)
165{
166 LIST_HEAD(dispose);
167 unsigned long freed;
168
169 if (!(sc->gfp_mask & __GFP_FS))
170 return SHRINK_STOP;
171
503c358c
VD
172 freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
173 gfs2_qd_isolate, &dispose);
2147dbfd
SW
174
175 gfs2_qd_dispose(&dispose);
176
1ab6c499
DC
177 return freed;
178}
0a7ab79c 179
2147dbfd
SW
180static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
181 struct shrink_control *sc)
1ab6c499 182{
503c358c 183 return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
0a7ab79c
AD
184}
185
2147dbfd
SW
186struct shrinker gfs2_qd_shrinker = {
187 .count_objects = gfs2_qd_shrink_count,
188 .scan_objects = gfs2_qd_shrink_scan,
189 .seeks = DEFAULT_SEEKS,
190 .flags = SHRINKER_NUMA_AWARE,
191};
192
193
2f6c9896
EB
194static u64 qd2index(struct gfs2_quota_data *qd)
195{
05e0a60d
EB
196 struct kqid qid = qd->qd_id;
197 return (2 * (u64)from_kqid(&init_user_ns, qid)) +
37f71577 198 ((qid.type == USRQUOTA) ? 0 : 1);
2f6c9896
EB
199}
200
cd915493 201static u64 qd2offset(struct gfs2_quota_data *qd)
b3b94faa 202{
cd915493 203 u64 offset;
b3b94faa 204
2f6c9896 205 offset = qd2index(qd);
b3b94faa
DT
206 offset *= sizeof(struct gfs2_quota);
207
208 return offset;
209}
210
c754fbbb 211static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
b3b94faa
DT
212{
213 struct gfs2_quota_data *qd;
214 int error;
215
37b2c837 216 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
b3b94faa 217 if (!qd)
c754fbbb 218 return NULL;
b3b94faa 219
c754fbbb 220 qd->qd_sbd = sdp;
9b9f039d
SW
221 qd->qd_lockref.count = 1;
222 spin_lock_init(&qd->qd_lockref.lock);
05e0a60d 223 qd->qd_id = qid;
b3b94faa 224 qd->qd_slot = -1;
2147dbfd 225 INIT_LIST_HEAD(&qd->qd_lru);
c754fbbb 226 qd->qd_hash = hash;
b3b94faa 227
2f6c9896 228 error = gfs2_glock_get(sdp, qd2index(qd),
b3b94faa
DT
229 &gfs2_quota_glops, CREATE, &qd->qd_gl);
230 if (error)
231 goto fail;
232
c754fbbb 233 return qd;
b3b94faa 234
a91ea69f 235fail:
37b2c837 236 kmem_cache_free(gfs2_quotad_cachep, qd);
c754fbbb 237 return NULL;
b3b94faa
DT
238}
239
c754fbbb
SW
240static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
241 const struct gfs2_sbd *sdp,
242 struct kqid qid)
b3b94faa 243{
c754fbbb
SW
244 struct gfs2_quota_data *qd;
245 struct hlist_bl_node *h;
b3b94faa 246
c754fbbb
SW
247 hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
248 if (!qid_eq(qd->qd_id, qid))
249 continue;
250 if (qd->qd_sbd != sdp)
251 continue;
252 if (lockref_get_not_dead(&qd->qd_lockref)) {
253 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
254 return qd;
b3b94faa 255 }
c754fbbb 256 }
b3b94faa 257
c754fbbb
SW
258 return NULL;
259}
b3b94faa 260
b3b94faa 261
c754fbbb
SW
262static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
263 struct gfs2_quota_data **qdp)
264{
265 struct gfs2_quota_data *qd, *new_qd;
266 unsigned int hash = gfs2_qd_hash(sdp, qid);
b3b94faa 267
c754fbbb
SW
268 rcu_read_lock();
269 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
270 rcu_read_unlock();
b3b94faa 271
c754fbbb
SW
272 if (qd)
273 return 0;
274
275 new_qd = qd_alloc(hash, sdp, qid);
276 if (!new_qd)
277 return -ENOMEM;
278
279 spin_lock(&qd_lock);
280 spin_lock_bucket(hash);
281 *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
282 if (qd == NULL) {
283 *qdp = new_qd;
284 list_add(&new_qd->qd_list, &sdp->sd_quota_list);
285 hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
286 atomic_inc(&sdp->sd_quota_count);
287 }
288 spin_unlock_bucket(hash);
289 spin_unlock(&qd_lock);
290
291 if (qd) {
292 gfs2_glock_put(new_qd->qd_gl);
293 kmem_cache_free(gfs2_quotad_cachep, new_qd);
b3b94faa 294 }
c754fbbb
SW
295
296 return 0;
b3b94faa
DT
297}
298
c754fbbb 299
b3b94faa
DT
300static void qd_hold(struct gfs2_quota_data *qd)
301{
15562c43 302 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
9b9f039d
SW
303 gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
304 lockref_get(&qd->qd_lockref);
b3b94faa
DT
305}
306
307static void qd_put(struct gfs2_quota_data *qd)
308{
2147dbfd
SW
309 if (lockref_put_or_lock(&qd->qd_lockref))
310 return;
9b9f039d 311
2147dbfd
SW
312 qd->qd_lockref.count = 0;
313 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
314 spin_unlock(&qd->qd_lockref.lock);
9b9f039d 315
b3b94faa
DT
316}
317
318static int slot_get(struct gfs2_quota_data *qd)
319{
ee2411a8
SW
320 struct gfs2_sbd *sdp = qd->qd_sbd;
321 unsigned int bit;
322 int error = 0;
b3b94faa 323
2d9e7230 324 spin_lock(&sdp->sd_bitmap_lock);
ee2411a8
SW
325 if (qd->qd_slot_count != 0)
326 goto out;
b3b94faa 327
ee2411a8
SW
328 error = -ENOSPC;
329 bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
330 if (bit < sdp->sd_quota_slots) {
331 set_bit(bit, sdp->sd_quota_bitmap);
332 qd->qd_slot = bit;
e9fb7c73 333 error = 0;
ee2411a8
SW
334out:
335 qd->qd_slot_count++;
b3b94faa 336 }
2d9e7230 337 spin_unlock(&sdp->sd_bitmap_lock);
b3b94faa 338
ee2411a8 339 return error;
b3b94faa
DT
340}
341
342static void slot_hold(struct gfs2_quota_data *qd)
343{
ee2411a8 344 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 345
2d9e7230 346 spin_lock(&sdp->sd_bitmap_lock);
b3b94faa
DT
347 gfs2_assert(sdp, qd->qd_slot_count);
348 qd->qd_slot_count++;
2d9e7230 349 spin_unlock(&sdp->sd_bitmap_lock);
b3b94faa
DT
350}
351
352static void slot_put(struct gfs2_quota_data *qd)
353{
ee2411a8 354 struct gfs2_sbd *sdp = qd->qd_sbd;
b3b94faa 355
2d9e7230 356 spin_lock(&sdp->sd_bitmap_lock);
b3b94faa
DT
357 gfs2_assert(sdp, qd->qd_slot_count);
358 if (!--qd->qd_slot_count) {
ee2411a8 359 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
b3b94faa
DT
360 qd->qd_slot = -1;
361 }
2d9e7230 362 spin_unlock(&sdp->sd_bitmap_lock);
b3b94faa
DT
363}
364
365static int bh_get(struct gfs2_quota_data *qd)
366{
15562c43 367 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
feaa7bba 368 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 369 unsigned int block, offset;
b3b94faa
DT
370 struct buffer_head *bh;
371 int error;
23591256 372 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
b3b94faa 373
f55ab26a 374 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
375
376 if (qd->qd_bh_count++) {
f55ab26a 377 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
378 return 0;
379 }
380
381 block = qd->qd_slot / sdp->sd_qc_per_block;
0d0868bd 382 offset = qd->qd_slot % sdp->sd_qc_per_block;
b3b94faa 383
47a9a527 384 bh_map.b_size = BIT(ip->i_inode.i_blkbits);
e9e1ef2b 385 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
b3b94faa
DT
386 if (error)
387 goto fail;
c8d57703 388 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh);
b3b94faa
DT
389 if (error)
390 goto fail;
391 error = -EIO;
392 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
393 goto fail_brelse;
394
395 qd->qd_bh = bh;
396 qd->qd_bh_qc = (struct gfs2_quota_change *)
397 (bh->b_data + sizeof(struct gfs2_meta_header) +
398 offset * sizeof(struct gfs2_quota_change));
399
2e95b665 400 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
401
402 return 0;
403
a91ea69f 404fail_brelse:
b3b94faa 405 brelse(bh);
a91ea69f 406fail:
b3b94faa 407 qd->qd_bh_count--;
f55ab26a 408 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
409 return error;
410}
411
412static void bh_put(struct gfs2_quota_data *qd)
413{
15562c43 414 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
b3b94faa 415
f55ab26a 416 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
417 gfs2_assert(sdp, qd->qd_bh_count);
418 if (!--qd->qd_bh_count) {
419 brelse(qd->qd_bh);
420 qd->qd_bh = NULL;
421 qd->qd_bh_qc = NULL;
422 }
f55ab26a 423 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
424}
425
1bf59bf6
SW
426static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
427 u64 *sync_gen)
428{
429 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
430 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
431 (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
432 return 0;
433
2147dbfd
SW
434 if (!lockref_get_not_dead(&qd->qd_lockref))
435 return 0;
1bf59bf6 436
2147dbfd 437 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
1bf59bf6 438 set_bit(QDF_LOCKED, &qd->qd_flags);
1bf59bf6 439 qd->qd_change_sync = qd->qd_change;
2d9e7230 440 slot_hold(qd);
1bf59bf6
SW
441 return 1;
442}
443
b3b94faa
DT
444static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
445{
446 struct gfs2_quota_data *qd = NULL;
447 int error;
448 int found = 0;
449
450 *qdp = NULL;
451
bc98a42c 452 if (sb_rdonly(sdp->sd_vfs))
b3b94faa
DT
453 return 0;
454
7d80823e 455 spin_lock(&qd_lock);
b3b94faa
DT
456
457 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
1bf59bf6
SW
458 found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
459 if (found)
460 break;
b3b94faa
DT
461 }
462
463 if (!found)
464 qd = NULL;
465
7d80823e 466 spin_unlock(&qd_lock);
b3b94faa
DT
467
468 if (qd) {
469 gfs2_assert_warn(sdp, qd->qd_change_sync);
470 error = bh_get(qd);
471 if (error) {
472 clear_bit(QDF_LOCKED, &qd->qd_flags);
473 slot_put(qd);
474 qd_put(qd);
475 return error;
476 }
477 }
478
479 *qdp = qd;
480
481 return 0;
482}
483
b3b94faa
DT
484static void qd_unlock(struct gfs2_quota_data *qd)
485{
15562c43 486 gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
568f4c96 487 test_bit(QDF_LOCKED, &qd->qd_flags));
b3b94faa
DT
488 clear_bit(QDF_LOCKED, &qd->qd_flags);
489 bh_put(qd);
490 slot_put(qd);
491 qd_put(qd);
492}
493
b59c8b6f 494static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
b3b94faa
DT
495 struct gfs2_quota_data **qdp)
496{
497 int error;
498
05e0a60d 499 error = qd_get(sdp, qid, qdp);
b3b94faa
DT
500 if (error)
501 return error;
502
503 error = slot_get(*qdp);
504 if (error)
505 goto fail;
506
507 error = bh_get(*qdp);
508 if (error)
509 goto fail_slot;
510
511 return 0;
512
a91ea69f 513fail_slot:
b3b94faa 514 slot_put(*qdp);
a91ea69f 515fail:
b3b94faa
DT
516 qd_put(*qdp);
517 return error;
518}
519
520static void qdsb_put(struct gfs2_quota_data *qd)
521{
522 bh_put(qd);
523 slot_put(qd);
524 qd_put(qd);
525}
526
b54e9a0b 527/**
2fba46a0
BP
528 * gfs2_qa_get - make sure we have a quota allocations data structure,
529 * if necessary
b54e9a0b
BP
530 * @ip: the inode for this reservation
531 */
2fba46a0 532int gfs2_qa_get(struct gfs2_inode *ip)
b54e9a0b
BP
533{
534 int error = 0;
535 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
536
537 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
538 return 0;
539
540 down_write(&ip->i_rw_mutex);
541 if (ip->i_qadata == NULL) {
542 ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
2fba46a0 543 if (!ip->i_qadata) {
b54e9a0b 544 error = -ENOMEM;
2fba46a0
BP
545 goto out;
546 }
b54e9a0b 547 }
2fba46a0
BP
548 ip->i_qadata->qa_ref++;
549out:
b54e9a0b
BP
550 up_write(&ip->i_rw_mutex);
551 return error;
552}
553
2fba46a0 554void gfs2_qa_put(struct gfs2_inode *ip)
b54e9a0b
BP
555{
556 down_write(&ip->i_rw_mutex);
2fba46a0 557 if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
b54e9a0b
BP
558 kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
559 ip->i_qadata = NULL;
560 }
561 up_write(&ip->i_rw_mutex);
562}
563
7c06b5d6 564int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
b3b94faa 565{
feaa7bba 566 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
5407e242 567 struct gfs2_quota_data **qd;
b3b94faa
DT
568 int error;
569
b54e9a0b
BP
570 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
571 return 0;
572
2fba46a0
BP
573 error = gfs2_qa_get(ip);
574 if (error)
575 return error;
5407e242 576
b54e9a0b 577 qd = ip->i_qadata->qa_qd;
5407e242 578
b54e9a0b 579 if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
2fba46a0
BP
580 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
581 error = -EIO;
582 goto out;
583 }
b3b94faa 584
b59c8b6f 585 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
b3b94faa 586 if (error)
2fba46a0 587 goto out_unhold;
b54e9a0b 588 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
589 qd++;
590
b59c8b6f 591 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
b3b94faa 592 if (error)
2fba46a0 593 goto out_unhold;
b54e9a0b 594 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
595 qd++;
596
6b24c0d2
EB
597 if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
598 !uid_eq(uid, ip->i_inode.i_uid)) {
b59c8b6f 599 error = qdsb_get(sdp, make_kqid_uid(uid), qd);
b3b94faa 600 if (error)
2fba46a0 601 goto out_unhold;
b54e9a0b 602 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
603 qd++;
604 }
605
6b24c0d2
EB
606 if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
607 !gid_eq(gid, ip->i_inode.i_gid)) {
b59c8b6f 608 error = qdsb_get(sdp, make_kqid_gid(gid), qd);
b3b94faa 609 if (error)
2fba46a0 610 goto out_unhold;
b54e9a0b 611 ip->i_qadata->qa_qd_num++;
b3b94faa
DT
612 qd++;
613 }
614
2fba46a0 615out_unhold:
b3b94faa
DT
616 if (error)
617 gfs2_quota_unhold(ip);
2fba46a0 618out:
b3b94faa
DT
619 return error;
620}
621
622void gfs2_quota_unhold(struct gfs2_inode *ip)
623{
feaa7bba 624 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b58bf407 625 u32 x;
b3b94faa 626
b54e9a0b 627 if (ip->i_qadata == NULL)
5407e242 628 return;
2fba46a0 629
b3b94faa
DT
630 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
631
b54e9a0b
BP
632 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
633 qdsb_put(ip->i_qadata->qa_qd[x]);
634 ip->i_qadata->qa_qd[x] = NULL;
b3b94faa 635 }
b54e9a0b 636 ip->i_qadata->qa_qd_num = 0;
2fba46a0 637 gfs2_qa_put(ip);
b3b94faa
DT
638}
639
640static int sort_qd(const void *a, const void *b)
641{
48fac179
SW
642 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
643 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
b3b94faa 644
05e0a60d 645 if (qid_lt(qd_a->qd_id, qd_b->qd_id))
48fac179 646 return -1;
05e0a60d 647 if (qid_lt(qd_b->qd_id, qd_a->qd_id))
48fac179 648 return 1;
48fac179 649 return 0;
b3b94faa
DT
650}
651
cd915493 652static void do_qc(struct gfs2_quota_data *qd, s64 change)
b3b94faa 653{
15562c43 654 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
feaa7bba 655 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 656 struct gfs2_quota_change *qc = qd->qd_bh_qc;
cd915493 657 s64 x;
b3b94faa 658
f55ab26a 659 mutex_lock(&sdp->sd_quota_mutex);
350a9b0a 660 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
b3b94faa
DT
661
662 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
663 qc->qc_change = 0;
664 qc->qc_flags = 0;
05e0a60d 665 if (qd->qd_id.type == USRQUOTA)
b3b94faa 666 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
05e0a60d 667 qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
b3b94faa
DT
668 }
669
b44b84d7 670 x = be64_to_cpu(qc->qc_change) + change;
b3b94faa
DT
671 qc->qc_change = cpu_to_be64(x);
672
7d80823e 673 spin_lock(&qd_lock);
b3b94faa 674 qd->qd_change = x;
7d80823e 675 spin_unlock(&qd_lock);
b3b94faa
DT
676
677 if (!x) {
678 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
679 clear_bit(QDF_CHANGE, &qd->qd_flags);
680 qc->qc_flags = 0;
681 qc->qc_id = 0;
682 slot_put(qd);
683 qd_put(qd);
684 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
685 qd_hold(qd);
686 slot_hold(qd);
687 }
907b9bce 688
9cde2898
AD
689 if (change < 0) /* Reset quiet flag if we freed some blocks */
690 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
f55ab26a 691 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
692}
693
39a72580
AD
694static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
695 unsigned off, void *buf, unsigned bytes)
696{
697 struct inode *inode = &ip->i_inode;
698 struct gfs2_sbd *sdp = GFS2_SB(inode);
699 struct address_space *mapping = inode->i_mapping;
700 struct page *page;
701 struct buffer_head *bh;
702 void *kaddr;
703 u64 blk;
704 unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
705 unsigned to_write = bytes, pg_off = off;
706 int done = 0;
707
09cbfeaf 708 blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
39a72580
AD
709 boff = off % bsize;
710
711 page = find_or_create_page(mapping, index, GFP_NOFS);
712 if (!page)
713 return -ENOMEM;
714 if (!page_has_buffers(page))
715 create_empty_buffers(page, bsize, 0);
716
717 bh = page_buffers(page);
718 while (!done) {
719 /* Find the beginning block within the page */
720 if (pg_off >= ((bnum * bsize) + bsize)) {
721 bh = bh->b_this_page;
722 bnum++;
723 blk++;
724 continue;
725 }
726 if (!buffer_mapped(bh)) {
727 gfs2_block_map(inode, blk, bh, 1);
728 if (!buffer_mapped(bh))
729 goto unlock_out;
730 /* If it's a newly allocated disk block, zero it */
731 if (buffer_new(bh))
732 zero_user(page, bnum * bsize, bh->b_size);
733 }
734 if (PageUptodate(page))
735 set_buffer_uptodate(bh);
736 if (!buffer_uptodate(bh)) {
e477b24b 737 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
39a72580
AD
738 wait_on_buffer(bh);
739 if (!buffer_uptodate(bh))
740 goto unlock_out;
741 }
845802b1
AG
742 if (gfs2_is_jdata(ip))
743 gfs2_trans_add_data(ip->i_gl, bh);
744 else
745 gfs2_ordered_add_inode(ip);
39a72580
AD
746
747 /* If we need to write to the next block as well */
748 if (to_write > (bsize - boff)) {
749 pg_off += (bsize - boff);
750 to_write -= (bsize - boff);
751 boff = pg_off % bsize;
752 continue;
753 }
754 done = 1;
755 }
756
757 /* Write to the page, now that we have setup the buffer(s) */
758 kaddr = kmap_atomic(page);
759 memcpy(kaddr + off, buf, bytes);
760 flush_dcache_page(page);
761 kunmap_atomic(kaddr);
762 unlock_page(page);
09cbfeaf 763 put_page(page);
39a72580
AD
764
765 return 0;
766
767unlock_out:
768 unlock_page(page);
09cbfeaf 769 put_page(page);
39a72580
AD
770 return -EIO;
771}
772
773static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
774 loff_t loc)
775{
776 unsigned long pg_beg;
777 unsigned pg_off, nbytes, overflow = 0;
778 int pg_oflow = 0, error;
779 void *ptr;
780
781 nbytes = sizeof(struct gfs2_quota);
782
09cbfeaf 783 pg_beg = loc >> PAGE_SHIFT;
45eb0504 784 pg_off = offset_in_page(loc);
39a72580
AD
785
786 /* If the quota straddles a page boundary, split the write in two */
09cbfeaf 787 if ((pg_off + nbytes) > PAGE_SIZE) {
39a72580 788 pg_oflow = 1;
09cbfeaf 789 overflow = (pg_off + nbytes) - PAGE_SIZE;
39a72580
AD
790 }
791
792 ptr = qp;
793 error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
794 nbytes - overflow);
795 /* If there's an overflow, write the remaining bytes to the next page */
796 if (!error && pg_oflow)
797 error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
798 ptr + nbytes - overflow,
799 overflow);
800 return error;
801}
802
18ec7d5c 803/**
1e72c0f7
SW
804 * gfs2_adjust_quota - adjust record of current block usage
805 * @ip: The quota inode
806 * @loc: Offset of the entry in the quota file
e285c100 807 * @change: The amount of usage change to record
1e72c0f7 808 * @qd: The quota data
e285c100 809 * @fdq: The updated limits to record
18ec7d5c
SW
810 *
811 * This function was mostly borrowed from gfs2_block_truncate_page which was
812 * in turn mostly borrowed from ext3
1e72c0f7
SW
813 *
814 * Returns: 0 or -ve on error
18ec7d5c 815 */
1e72c0f7 816
18ec7d5c 817static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
e285c100 818 s64 change, struct gfs2_quota_data *qd,
14bf61ff 819 struct qc_dqblk *fdq)
18ec7d5c 820{
feaa7bba 821 struct inode *inode = &ip->i_inode;
14870b45 822 struct gfs2_sbd *sdp = GFS2_SB(inode);
951b4bd5 823 struct gfs2_quota q;
39a72580 824 int err;
e285c100 825 u64 size;
18ec7d5c 826
891a8e93
SW
827 if (gfs2_is_stuffed(ip)) {
828 err = gfs2_unstuff_dinode(ip, NULL);
829 if (err)
830 return err;
831 }
7e619bc3
AD
832
833 memset(&q, 0, sizeof(struct gfs2_quota));
4306629e 834 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
7e619bc3
AD
835 if (err < 0)
836 return err;
837
39a72580 838 loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
7e619bc3 839 err = -EIO;
951b4bd5 840 be64_add_cpu(&q.qu_value, change);
1bdf4535 841 if (((s64)be64_to_cpu(q.qu_value)) < 0)
39a72580 842 q.qu_value = 0; /* Never go negative on quota usage */
951b4bd5 843 qd->qd_qb.qb_value = q.qu_value;
7e619bc3 844 if (fdq) {
14bf61ff
JK
845 if (fdq->d_fieldmask & QC_SPC_SOFT) {
846 q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
951b4bd5 847 qd->qd_qb.qb_warn = q.qu_warn;
7e619bc3 848 }
14bf61ff
JK
849 if (fdq->d_fieldmask & QC_SPC_HARD) {
850 q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
951b4bd5 851 qd->qd_qb.qb_limit = q.qu_limit;
7e619bc3 852 }
14bf61ff
JK
853 if (fdq->d_fieldmask & QC_SPACE) {
854 q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
951b4bd5 855 qd->qd_qb.qb_value = q.qu_value;
802ec9b6 856 }
7e619bc3
AD
857 }
858
39a72580
AD
859 err = gfs2_write_disk_quota(ip, &q, loc);
860 if (!err) {
861 size = loc + sizeof(struct gfs2_quota);
862 if (size > inode->i_size)
863 i_size_write(inode, size);
078cd827 864 inode->i_mtime = inode->i_atime = current_time(inode);
39a72580
AD
865 mark_inode_dirty(inode);
866 set_bit(QDF_REFRESH, &qd->qd_flags);
7e619bc3 867 }
e285c100 868
18ec7d5c
SW
869 return err;
870}
871
b3b94faa
DT
872static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
873{
15562c43 874 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
feaa7bba 875 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
7b9cff46 876 struct gfs2_alloc_parms ap = { .aflags = 0, };
b3b94faa
DT
877 unsigned int data_blocks, ind_blocks;
878 struct gfs2_holder *ghs, i_gh;
879 unsigned int qx, x;
880 struct gfs2_quota_data *qd;
71f890f7 881 unsigned reserved;
f42faf4f 882 loff_t offset;
20b95bf2 883 unsigned int nalloc = 0, blocks;
b3b94faa
DT
884 int error;
885
2fba46a0 886 error = gfs2_qa_get(ip);
0a305e49
BP
887 if (error)
888 return error;
889
b3b94faa
DT
890 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
891 &data_blocks, &ind_blocks);
892
6da2ec56 893 ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
2fba46a0
BP
894 if (!ghs) {
895 error = -ENOMEM;
896 goto out;
897 }
b3b94faa
DT
898
899 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
5955102c 900 inode_lock(&ip->i_inode);
b3b94faa 901 for (qx = 0; qx < num_qd; qx++) {
1e72c0f7 902 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
b3b94faa
DT
903 GL_NOCACHE, &ghs[qx]);
904 if (error)
2fba46a0 905 goto out_dq;
b3b94faa
DT
906 }
907
908 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
909 if (error)
2fba46a0 910 goto out_dq;
b3b94faa
DT
911
912 for (x = 0; x < num_qd; x++) {
b3b94faa 913 offset = qd2offset(qda[x]);
461cb419
BP
914 if (gfs2_write_alloc_required(ip, offset,
915 sizeof(struct gfs2_quota)))
b3b94faa
DT
916 nalloc++;
917 }
918
20b95bf2
AD
919 /*
920 * 1 blk for unstuffing inode if stuffed. We add this extra
921 * block to the reservation unconditionally. If the inode
922 * doesn't need unstuffing, the block will be released to the
923 * rgrp since it won't be allocated during the transaction
924 */
7e619bc3
AD
925 /* +3 in the end for unstuffing block, inode size update block
926 * and another block in case quota straddles page boundary and
927 * two blocks need to be updated instead of 1 */
928 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
b3b94faa 929
71f890f7 930 reserved = 1 + (nalloc * (data_blocks + ind_blocks));
7b9cff46
SW
931 ap.target = reserved;
932 error = gfs2_inplace_reserve(ip, &ap);
20b95bf2
AD
933 if (error)
934 goto out_alloc;
b3b94faa 935
20b95bf2 936 if (nalloc)
71f890f7 937 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
20b95bf2
AD
938
939 error = gfs2_trans_begin(sdp, blocks, 0);
940 if (error)
941 goto out_ipres;
b3b94faa
DT
942
943 for (x = 0; x < num_qd; x++) {
b3b94faa
DT
944 qd = qda[x];
945 offset = qd2offset(qd);
e285c100 946 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
18ec7d5c 947 if (error)
b3b94faa 948 goto out_end_trans;
b3b94faa
DT
949
950 do_qc(qd, -qd->qd_change_sync);
662e3a55 951 set_bit(QDF_REFRESH, &qd->qd_flags);
b3b94faa
DT
952 }
953
954 error = 0;
955
a91ea69f 956out_end_trans:
b3b94faa 957 gfs2_trans_end(sdp);
a91ea69f 958out_ipres:
20b95bf2 959 gfs2_inplace_release(ip);
a91ea69f 960out_alloc:
b3b94faa 961 gfs2_glock_dq_uninit(&i_gh);
2fba46a0 962out_dq:
b3b94faa
DT
963 while (qx--)
964 gfs2_glock_dq_uninit(&ghs[qx]);
5955102c 965 inode_unlock(&ip->i_inode);
b3b94faa 966 kfree(ghs);
c1696fb8 967 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
805c0907 968 GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
2fba46a0
BP
969out:
970 gfs2_qa_put(ip);
b3b94faa
DT
971 return error;
972}
973
e285c100
SW
974static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
975{
976 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
977 struct gfs2_quota q;
978 struct gfs2_quota_lvb *qlvb;
979 loff_t pos;
980 int error;
981
982 memset(&q, 0, sizeof(struct gfs2_quota));
983 pos = qd2offset(qd);
4306629e 984 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
e285c100
SW
985 if (error < 0)
986 return error;
987
4e2f8849 988 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
e285c100
SW
989 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
990 qlvb->__pad = 0;
991 qlvb->qb_limit = q.qu_limit;
992 qlvb->qb_warn = q.qu_warn;
993 qlvb->qb_value = q.qu_value;
994 qd->qd_qb = *qlvb;
995
996 return 0;
997}
998
b3b94faa
DT
999static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1000 struct gfs2_holder *q_gh)
1001{
15562c43 1002 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
feaa7bba 1003 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
b3b94faa 1004 struct gfs2_holder i_gh;
b3b94faa
DT
1005 int error;
1006
a91ea69f 1007restart:
b3b94faa
DT
1008 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1009 if (error)
1010 return error;
1011
30133177
AD
1012 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1013 force_refresh = FORCE;
1014
4e2f8849 1015 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
b3b94faa 1016
e9fc2aa0 1017 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
b3b94faa 1018 gfs2_glock_dq_uninit(q_gh);
91094d0f
SW
1019 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1020 GL_NOCACHE, q_gh);
b3b94faa
DT
1021 if (error)
1022 return error;
1023
e9fc2aa0 1024 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
b3b94faa
DT
1025 if (error)
1026 goto fail;
1027
e285c100
SW
1028 error = update_qd(sdp, qd);
1029 if (error)
1e72c0f7 1030 goto fail_gunlock;
b3b94faa 1031
e285c100 1032 gfs2_glock_dq_uninit(&i_gh);
91094d0f
SW
1033 gfs2_glock_dq_uninit(q_gh);
1034 force_refresh = 0;
1035 goto restart;
b3b94faa
DT
1036 }
1037
1038 return 0;
1039
a91ea69f 1040fail_gunlock:
b3b94faa 1041 gfs2_glock_dq_uninit(&i_gh);
a91ea69f 1042fail:
b3b94faa 1043 gfs2_glock_dq_uninit(q_gh);
b3b94faa
DT
1044 return error;
1045}
1046
7c06b5d6 1047int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
b3b94faa 1048{
feaa7bba 1049 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
662e3a55 1050 struct gfs2_quota_data *qd;
b58bf407 1051 u32 x;
b3b94faa
DT
1052 int error = 0;
1053
4ed0c308 1054 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
b3b94faa
DT
1055 return 0;
1056
b54e9a0b
BP
1057 error = gfs2_quota_hold(ip, uid, gid);
1058 if (error)
1059 return error;
1060
1061 sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
5407e242 1062 sizeof(struct gfs2_quota_data *), sort_qd, NULL);
b3b94faa 1063
b54e9a0b
BP
1064 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1065 qd = ip->i_qadata->qa_qd[x];
1066 error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
b3b94faa
DT
1067 if (error)
1068 break;
1069 }
1070
1071 if (!error)
1072 set_bit(GIF_QD_LOCKED, &ip->i_flags);
1073 else {
1074 while (x--)
b54e9a0b 1075 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
b3b94faa
DT
1076 gfs2_quota_unhold(ip);
1077 }
1078
1079 return error;
1080}
1081
1082static int need_sync(struct gfs2_quota_data *qd)
1083{
15562c43 1084 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
b3b94faa 1085 struct gfs2_tune *gt = &sdp->sd_tune;
cd915493 1086 s64 value;
b3b94faa
DT
1087 unsigned int num, den;
1088 int do_sync = 1;
1089
1090 if (!qd->qd_qb.qb_limit)
1091 return 0;
1092
7d80823e 1093 spin_lock(&qd_lock);
b3b94faa 1094 value = qd->qd_change;
7d80823e 1095 spin_unlock(&qd_lock);
b3b94faa
DT
1096
1097 spin_lock(&gt->gt_spin);
1098 num = gt->gt_quota_scale_num;
1099 den = gt->gt_quota_scale_den;
1100 spin_unlock(&gt->gt_spin);
1101
1102 if (value < 0)
1103 do_sync = 0;
e9fc2aa0
SW
1104 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1105 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
b3b94faa
DT
1106 do_sync = 0;
1107 else {
1108 value *= gfs2_jindex_size(sdp) * num;
4abaca17 1109 value = div_s64(value, den);
e9fc2aa0 1110 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
cd915493 1111 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
b3b94faa
DT
1112 do_sync = 0;
1113 }
1114
1115 return do_sync;
1116}
1117
1118void gfs2_quota_unlock(struct gfs2_inode *ip)
1119{
aabd7c72 1120 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
1121 struct gfs2_quota_data *qda[4];
1122 unsigned int count = 0;
b58bf407 1123 u32 x;
aabd7c72 1124 int found;
b3b94faa
DT
1125
1126 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
c9cb9e38 1127 return;
b3b94faa 1128
b54e9a0b 1129 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
b3b94faa
DT
1130 struct gfs2_quota_data *qd;
1131 int sync;
1132
b54e9a0b 1133 qd = ip->i_qadata->qa_qd[x];
b3b94faa
DT
1134 sync = need_sync(qd);
1135
b54e9a0b 1136 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
aabd7c72
SW
1137 if (!sync)
1138 continue;
1139
7d80823e 1140 spin_lock(&qd_lock);
aabd7c72 1141 found = qd_check_sync(sdp, qd, NULL);
7d80823e 1142 spin_unlock(&qd_lock);
aabd7c72
SW
1143
1144 if (!found)
1145 continue;
1146
1147 gfs2_assert_warn(sdp, qd->qd_change_sync);
1148 if (bh_get(qd)) {
1149 clear_bit(QDF_LOCKED, &qd->qd_flags);
1150 slot_put(qd);
1151 qd_put(qd);
1152 continue;
1153 }
b3b94faa 1154
aabd7c72 1155 qda[count++] = qd;
b3b94faa
DT
1156 }
1157
1158 if (count) {
1159 do_sync(count, qda);
1160 for (x = 0; x < count; x++)
1161 qd_unlock(qda[x]);
1162 }
1163
b3b94faa
DT
1164 gfs2_quota_unhold(ip);
1165}
1166
1167#define MAX_LINE 256
1168
1169static int print_message(struct gfs2_quota_data *qd, char *type)
1170{
15562c43 1171 struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
b3b94faa 1172
8382e26b
JP
1173 fs_info(sdp, "quota %s for %s %u\n",
1174 type,
d77d1b58
JP
1175 (qd->qd_id.type == USRQUOTA) ? "user" : "group",
1176 from_kqid(&init_user_ns, qd->qd_id));
b3b94faa
DT
1177
1178 return 0;
1179}
1180
25435e5e
AD
1181/**
1182 * gfs2_quota_check - check if allocating new blocks will exceed quota
1183 * @ip: The inode for which this check is being performed
1184 * @uid: The uid to check against
1185 * @gid: The gid to check against
1186 * @ap: The allocation parameters. ap->target contains the requested
1187 * blocks. ap->min_target, if set, contains the minimum blks
1188 * requested.
1189 *
1190 * Returns: 0 on success.
1191 * min_req = ap->min_target ? ap->min_target : ap->target;
243fea4d 1192 * quota must allow at least min_req blks for success and
25435e5e
AD
1193 * ap->allowed is set to the number of blocks allowed
1194 *
1195 * -EDQUOT otherwise, quota violation. ap->allowed is set to number
1196 * of blocks available.
1197 */
b8fbf471
AD
1198int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1199 struct gfs2_alloc_parms *ap)
b3b94faa 1200{
feaa7bba 1201 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa 1202 struct gfs2_quota_data *qd;
25435e5e 1203 s64 value, warn, limit;
b58bf407 1204 u32 x;
b3b94faa
DT
1205 int error = 0;
1206
25435e5e 1207 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
b3b94faa
DT
1208 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1209 return 0;
1210
b54e9a0b
BP
1211 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1212 qd = ip->i_qadata->qa_qd[x];
b3b94faa 1213
05e0a60d
EB
1214 if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1215 qid_eq(qd->qd_id, make_kqid_gid(gid))))
b3b94faa
DT
1216 continue;
1217
25435e5e
AD
1218 warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1219 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
e9fc2aa0 1220 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
7d80823e 1221 spin_lock(&qd_lock);
25435e5e 1222 value += qd->qd_change;
7d80823e 1223 spin_unlock(&qd_lock);
b3b94faa 1224
25435e5e
AD
1225 if (limit > 0 && (limit - value) < ap->allowed)
1226 ap->allowed = limit - value;
1227 /* If we can't meet the target */
1228 if (limit && limit < (value + (s64)ap->target)) {
1229 /* If no min_target specified or we don't meet
1230 * min_target, return -EDQUOT */
1231 if (!ap->min_target || ap->min_target > ap->allowed) {
9cde2898
AD
1232 if (!test_and_set_bit(QDF_QMSG_QUIET,
1233 &qd->qd_flags)) {
1234 print_message(qd, "exceeded");
1235 quota_send_warning(qd->qd_id,
1236 sdp->sd_vfs->s_dev,
1237 QUOTA_NL_BHARDWARN);
1238 }
25435e5e
AD
1239 error = -EDQUOT;
1240 break;
1241 }
1242 } else if (warn && warn < value &&
b3b94faa 1243 time_after_eq(jiffies, qd->qd_last_warn +
25435e5e
AD
1244 gfs2_tune_get(sdp, gt_quota_warn_period)
1245 * HZ)) {
05e0a60d 1246 quota_send_warning(qd->qd_id,
2ec46505 1247 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
b3b94faa
DT
1248 error = print_message(qd, "warning");
1249 qd->qd_last_warn = jiffies;
1250 }
1251 }
b3b94faa
DT
1252 return error;
1253}
1254
cd915493 1255void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
7c06b5d6 1256 kuid_t uid, kgid_t gid)
b3b94faa 1257{
b3b94faa 1258 struct gfs2_quota_data *qd;
b58bf407 1259 u32 x;
b54e9a0b 1260 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa 1261
b54e9a0b
BP
1262 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
1263 gfs2_assert_warn(sdp, change))
b3b94faa 1264 return;
383f01fb 1265 if (ip->i_diskflags & GFS2_DIF_SYSTEM)
b3b94faa
DT
1266 return;
1267
f9615fe3
BP
1268 if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1269 ip->i_qadata->qa_ref > 0))
1270 return;
b54e9a0b
BP
1271 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1272 qd = ip->i_qadata->qa_qd[x];
b3b94faa 1273
05e0a60d
EB
1274 if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1275 qid_eq(qd->qd_id, make_kqid_gid(gid))) {
b3b94faa 1276 do_qc(qd, change);
b3b94faa
DT
1277 }
1278 }
1279}
1280
ceed1723 1281int gfs2_quota_sync(struct super_block *sb, int type)
b3b94faa 1282{
8c42d637 1283 struct gfs2_sbd *sdp = sb->s_fs_info;
b3b94faa 1284 struct gfs2_quota_data **qda;
f3b64b57 1285 unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
b3b94faa
DT
1286 unsigned int num_qd;
1287 unsigned int x;
1288 int error = 0;
1289
b3b94faa
DT
1290 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1291 if (!qda)
1292 return -ENOMEM;
1293
e46c772d
SW
1294 mutex_lock(&sdp->sd_quota_sync_mutex);
1295 sdp->sd_quota_sync_gen++;
1296
b3b94faa
DT
1297 do {
1298 num_qd = 0;
1299
1300 for (;;) {
1301 error = qd_fish(sdp, qda + num_qd);
1302 if (error || !qda[num_qd])
1303 break;
1304 if (++num_qd == max_qd)
1305 break;
1306 }
1307
1308 if (num_qd) {
1309 if (!error)
1310 error = do_sync(num_qd, qda);
1311 if (!error)
1312 for (x = 0; x < num_qd; x++)
1313 qda[x]->qd_sync_gen =
1314 sdp->sd_quota_sync_gen;
1315
1316 for (x = 0; x < num_qd; x++)
1317 qd_unlock(qda[x]);
1318 }
1319 } while (!error && num_qd == max_qd);
1320
e46c772d 1321 mutex_unlock(&sdp->sd_quota_sync_mutex);
b3b94faa
DT
1322 kfree(qda);
1323
1324 return error;
1325}
1326
ed87dabc 1327int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
b3b94faa
DT
1328{
1329 struct gfs2_quota_data *qd;
1330 struct gfs2_holder q_gh;
1331 int error;
1332
05e0a60d 1333 error = qd_get(sdp, qid, &qd);
b3b94faa
DT
1334 if (error)
1335 return error;
1336
1337 error = do_glock(qd, FORCE, &q_gh);
1338 if (!error)
1339 gfs2_glock_dq_uninit(&q_gh);
1340
1341 qd_put(qd);
b3b94faa
DT
1342 return error;
1343}
1344
b3b94faa
DT
1345int gfs2_quota_init(struct gfs2_sbd *sdp)
1346{
feaa7bba 1347 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
a2e0f799
SW
1348 u64 size = i_size_read(sdp->sd_qc_inode);
1349 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
b3b94faa
DT
1350 unsigned int x, slot = 0;
1351 unsigned int found = 0;
c754fbbb 1352 unsigned int hash;
ee2411a8 1353 unsigned int bm_size;
cd915493
SW
1354 u64 dblock;
1355 u32 extlen = 0;
b3b94faa
DT
1356 int error;
1357
a2e0f799 1358 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
907b9bce 1359 return -EIO;
a2e0f799 1360
b3b94faa 1361 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
ee2411a8
SW
1362 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1363 bm_size *= sizeof(unsigned long);
b3b94faa 1364 error = -ENOMEM;
fcf10d38 1365 sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
ee2411a8 1366 if (sdp->sd_quota_bitmap == NULL)
fcf10d38 1367 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
88dca4ca 1368 __GFP_ZERO);
b3b94faa
DT
1369 if (!sdp->sd_quota_bitmap)
1370 return error;
1371
b3b94faa
DT
1372 for (x = 0; x < blocks; x++) {
1373 struct buffer_head *bh;
7aed98fb 1374 const struct gfs2_quota_change *qc;
b3b94faa
DT
1375 unsigned int y;
1376
1377 if (!extlen) {
1378 int new = 0;
feaa7bba 1379 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
b3b94faa
DT
1380 if (error)
1381 goto fail;
1382 }
b3b94faa 1383 error = -EIO;
7276b3b0
SW
1384 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1385 if (!bh)
1386 goto fail;
b3b94faa
DT
1387 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1388 brelse(bh);
1389 goto fail;
1390 }
1391
7aed98fb 1392 qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
7276b3b0 1393 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
b3b94faa 1394 y++, slot++) {
b3b94faa 1395 struct gfs2_quota_data *qd;
7aed98fb
SW
1396 s64 qc_change = be64_to_cpu(qc->qc_change);
1397 u32 qc_flags = be32_to_cpu(qc->qc_flags);
1398 enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1399 USRQUOTA : GRPQUOTA;
1400 struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1401 be32_to_cpu(qc->qc_id));
1402 qc++;
1403 if (!qc_change)
b3b94faa
DT
1404 continue;
1405
c754fbbb
SW
1406 hash = gfs2_qd_hash(sdp, qc_id);
1407 qd = qd_alloc(hash, sdp, qc_id);
1408 if (qd == NULL) {
b3b94faa
DT
1409 brelse(bh);
1410 goto fail;
1411 }
1412
1413 set_bit(QDF_CHANGE, &qd->qd_flags);
7aed98fb 1414 qd->qd_change = qc_change;
b3b94faa
DT
1415 qd->qd_slot = slot;
1416 qd->qd_slot_count = 1;
b3b94faa 1417
7d80823e 1418 spin_lock(&qd_lock);
ee2411a8 1419 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
b3b94faa
DT
1420 list_add(&qd->qd_list, &sdp->sd_quota_list);
1421 atomic_inc(&sdp->sd_quota_count);
7d80823e 1422 spin_unlock(&qd_lock);
b3b94faa 1423
c754fbbb
SW
1424 spin_lock_bucket(hash);
1425 hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1426 spin_unlock_bucket(hash);
1427
b3b94faa
DT
1428 found++;
1429 }
1430
1431 brelse(bh);
1432 dblock++;
1433 extlen--;
1434 }
1435
1436 if (found)
1437 fs_info(sdp, "found %u quota changes\n", found);
1438
1439 return 0;
1440
a91ea69f 1441fail:
b3b94faa
DT
1442 gfs2_quota_cleanup(sdp);
1443 return error;
1444}
1445
b3b94faa
DT
1446void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1447{
1448 struct list_head *head = &sdp->sd_quota_list;
1449 struct gfs2_quota_data *qd;
b3b94faa 1450
7d80823e 1451 spin_lock(&qd_lock);
b3b94faa 1452 while (!list_empty(head)) {
969183bc 1453 qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
b3b94faa 1454
b3b94faa 1455 list_del(&qd->qd_list);
c754fbbb 1456
0a7ab79c 1457 /* Also remove if this qd exists in the reclaim list */
2147dbfd 1458 list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
b3b94faa 1459 atomic_dec(&sdp->sd_quota_count);
7d80823e 1460 spin_unlock(&qd_lock);
b3b94faa 1461
c754fbbb
SW
1462 spin_lock_bucket(qd->qd_hash);
1463 hlist_bl_del_rcu(&qd->qd_hlist);
1464 spin_unlock_bucket(qd->qd_hash);
1465
8ad151c2
SW
1466 gfs2_assert_warn(sdp, !qd->qd_change);
1467 gfs2_assert_warn(sdp, !qd->qd_slot_count);
b3b94faa
DT
1468 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1469
f057f6cd 1470 gfs2_glock_put(qd->qd_gl);
c754fbbb 1471 call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
b3b94faa 1472
7d80823e 1473 spin_lock(&qd_lock);
b3b94faa 1474 }
7d80823e 1475 spin_unlock(&qd_lock);
b3b94faa
DT
1476
1477 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1478
3cdcf63e
AV
1479 kvfree(sdp->sd_quota_bitmap);
1480 sdp->sd_quota_bitmap = NULL;
b3b94faa
DT
1481}
1482
37b2c837
SW
1483static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1484{
1485 if (error == 0 || error == -EROFS)
1486 return;
eb43e660 1487 if (!gfs2_withdrawn(sdp)) {
f34a6135
BP
1488 if (!cmpxchg(&sdp->sd_log_error, 0, error))
1489 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
942b0cdd
BP
1490 wake_up(&sdp->sd_logd_waitq);
1491 }
37b2c837
SW
1492}
1493
1494static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
8c42d637 1495 int (*fxn)(struct super_block *sb, int type),
37b2c837
SW
1496 unsigned long t, unsigned long *timeo,
1497 unsigned int *new_timeo)
1498{
1499 if (t >= *timeo) {
8c42d637 1500 int error = fxn(sdp->sd_vfs, 0);
37b2c837
SW
1501 quotad_error(sdp, msg, error);
1502 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1503 } else {
1504 *timeo -= t;
1505 }
1506}
1507
813e0c46
SW
1508static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1509{
1510 struct gfs2_inode *ip;
1511
1512 while(1) {
1513 ip = NULL;
1514 spin_lock(&sdp->sd_trunc_lock);
1515 if (!list_empty(&sdp->sd_trunc_list)) {
969183bc 1516 ip = list_first_entry(&sdp->sd_trunc_list,
813e0c46
SW
1517 struct gfs2_inode, i_trunc_list);
1518 list_del_init(&ip->i_trunc_list);
1519 }
1520 spin_unlock(&sdp->sd_trunc_lock);
1521 if (ip == NULL)
1522 return;
1523 gfs2_glock_finish_truncate(ip);
1524 }
1525}
1526
3d3c10f2
BM
1527void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1528 if (!sdp->sd_statfs_force_sync) {
1529 sdp->sd_statfs_force_sync = 1;
1530 wake_up(&sdp->sd_quota_wait);
1531 }
1532}
1533
1534
37b2c837
SW
1535/**
1536 * gfs2_quotad - Write cached quota changes into the quota file
1537 * @sdp: Pointer to GFS2 superblock
1538 *
1539 */
1540
1541int gfs2_quotad(void *data)
1542{
1543 struct gfs2_sbd *sdp = data;
1544 struct gfs2_tune *tune = &sdp->sd_tune;
1545 unsigned long statfs_timeo = 0;
1546 unsigned long quotad_timeo = 0;
1547 unsigned long t = 0;
1548 DEFINE_WAIT(wait);
813e0c46 1549 int empty;
37b2c837
SW
1550
1551 while (!kthread_should_stop()) {
1552
601ef0d5
BP
1553 if (gfs2_withdrawn(sdp))
1554 goto bypass;
37b2c837 1555 /* Update the master statfs file */
3d3c10f2
BM
1556 if (sdp->sd_statfs_force_sync) {
1557 int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1558 quotad_error(sdp, "statfs", error);
1559 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1560 }
1561 else
1562 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1563 &statfs_timeo,
1564 &tune->gt_statfs_quantum);
37b2c837
SW
1565
1566 /* Update quota file */
edd2e9ac 1567 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
37b2c837
SW
1568 &quotad_timeo, &tune->gt_quota_quantum);
1569
813e0c46
SW
1570 /* Check for & recover partially truncated inodes */
1571 quotad_check_trunc_list(sdp);
1572
a0acae0e
TH
1573 try_to_freeze();
1574
601ef0d5 1575bypass:
37b2c837
SW
1576 t = min(quotad_timeo, statfs_timeo);
1577
7fa5d20d 1578 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
813e0c46
SW
1579 spin_lock(&sdp->sd_trunc_lock);
1580 empty = list_empty(&sdp->sd_trunc_list);
1581 spin_unlock(&sdp->sd_trunc_lock);
3d3c10f2 1582 if (empty && !sdp->sd_statfs_force_sync)
813e0c46
SW
1583 t -= schedule_timeout(t);
1584 else
1585 t = 0;
37b2c837
SW
1586 finish_wait(&sdp->sd_quota_wait, &wait);
1587 }
1588
1589 return 0;
1590}
1591
e54b2e2d 1592static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
1d371b5e
SW
1593{
1594 struct gfs2_sbd *sdp = sb->s_fs_info;
1595
e54b2e2d 1596 memset(state, 0, sizeof(*state));
ad6bb90f
CH
1597
1598 switch (sdp->sd_args.ar_quota) {
1599 case GFS2_QUOTA_ON:
e54b2e2d
JK
1600 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1601 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
ad6bb90f
CH
1602 /*FALLTHRU*/
1603 case GFS2_QUOTA_ACCOUNT:
e54b2e2d
JK
1604 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1605 QCI_SYSFILE;
1606 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1607 QCI_SYSFILE;
ad6bb90f
CH
1608 break;
1609 case GFS2_QUOTA_OFF:
1610 break;
1611 }
1d371b5e 1612 if (sdp->sd_quota_inode) {
e54b2e2d
JK
1613 state->s_state[USRQUOTA].ino =
1614 GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1615 state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1d371b5e 1616 }
e54b2e2d
JK
1617 state->s_state[USRQUOTA].nextents = 1; /* unsupported */
1618 state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1619 state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1d371b5e
SW
1620 return 0;
1621}
1622
74a8a103 1623static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
14bf61ff 1624 struct qc_dqblk *fdq)
113d6b3c
SW
1625{
1626 struct gfs2_sbd *sdp = sb->s_fs_info;
1627 struct gfs2_quota_lvb *qlvb;
1628 struct gfs2_quota_data *qd;
1629 struct gfs2_holder q_gh;
1630 int error;
1631
14bf61ff 1632 memset(fdq, 0, sizeof(*fdq));
113d6b3c
SW
1633
1634 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1635 return -ESRCH; /* Crazy XFS error code */
1636
236c64e4
EB
1637 if ((qid.type != USRQUOTA) &&
1638 (qid.type != GRPQUOTA))
113d6b3c
SW
1639 return -EINVAL;
1640
05e0a60d 1641 error = qd_get(sdp, qid, &qd);
113d6b3c
SW
1642 if (error)
1643 return error;
1644 error = do_glock(qd, FORCE, &q_gh);
1645 if (error)
1646 goto out;
1647
4e2f8849 1648 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
14bf61ff
JK
1649 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1650 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1651 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
113d6b3c
SW
1652
1653 gfs2_glock_dq_uninit(&q_gh);
1654out:
1655 qd_put(qd);
1656 return error;
1657}
1658
e285c100 1659/* GFS2 only supports a subset of the XFS fields */
14bf61ff 1660#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
e285c100 1661
74a8a103 1662static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
14bf61ff 1663 struct qc_dqblk *fdq)
e285c100
SW
1664{
1665 struct gfs2_sbd *sdp = sb->s_fs_info;
1666 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1667 struct gfs2_quota_data *qd;
1668 struct gfs2_holder q_gh, i_gh;
1669 unsigned int data_blocks, ind_blocks;
1670 unsigned int blocks = 0;
1671 int alloc_required;
e285c100
SW
1672 loff_t offset;
1673 int error;
1674
1675 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1676 return -ESRCH; /* Crazy XFS error code */
1677
236c64e4
EB
1678 if ((qid.type != USRQUOTA) &&
1679 (qid.type != GRPQUOTA))
e285c100 1680 return -EINVAL;
e285c100
SW
1681
1682 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1683 return -EINVAL;
e285c100 1684
05e0a60d 1685 error = qd_get(sdp, qid, &qd);
e285c100
SW
1686 if (error)
1687 return error;
1688
2fba46a0 1689 error = gfs2_qa_get(ip);
0a305e49
BP
1690 if (error)
1691 goto out_put;
1692
5955102c 1693 inode_lock(&ip->i_inode);
e285c100
SW
1694 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1695 if (error)
0a305e49 1696 goto out_unlockput;
e285c100
SW
1697 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1698 if (error)
1699 goto out_q;
1700
1701 /* Check for existing entry, if none then alloc new blocks */
1702 error = update_qd(sdp, qd);
1703 if (error)
1704 goto out_i;
1705
1706 /* If nothing has changed, this is a no-op */
14bf61ff
JK
1707 if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1708 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1709 fdq->d_fieldmask ^= QC_SPC_SOFT;
802ec9b6 1710
14bf61ff
JK
1711 if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1712 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1713 fdq->d_fieldmask ^= QC_SPC_HARD;
802ec9b6 1714
14bf61ff
JK
1715 if ((fdq->d_fieldmask & QC_SPACE) &&
1716 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1717 fdq->d_fieldmask ^= QC_SPACE;
802ec9b6 1718
e285c100
SW
1719 if (fdq->d_fieldmask == 0)
1720 goto out_i;
1721
1722 offset = qd2offset(qd);
461cb419 1723 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
e79a46a0
AD
1724 if (gfs2_is_stuffed(ip))
1725 alloc_required = 1;
e285c100 1726 if (alloc_required) {
7b9cff46 1727 struct gfs2_alloc_parms ap = { .aflags = 0, };
e285c100
SW
1728 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1729 &data_blocks, &ind_blocks);
564e12b1 1730 blocks = 1 + data_blocks + ind_blocks;
7b9cff46
SW
1731 ap.target = blocks;
1732 error = gfs2_inplace_reserve(ip, &ap);
e285c100 1733 if (error)
564e12b1 1734 goto out_i;
71f890f7 1735 blocks += gfs2_rg_blocks(ip, blocks);
e285c100
SW
1736 }
1737
e79a46a0
AD
1738 /* Some quotas span block boundaries and can update two blocks,
1739 adding an extra block to the transaction to handle such quotas */
1740 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
e285c100
SW
1741 if (error)
1742 goto out_release;
1743
1744 /* Apply changes */
1745 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
9cde2898
AD
1746 if (!error)
1747 clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
e285c100
SW
1748
1749 gfs2_trans_end(sdp);
1750out_release:
564e12b1 1751 if (alloc_required)
e285c100 1752 gfs2_inplace_release(ip);
e285c100
SW
1753out_i:
1754 gfs2_glock_dq_uninit(&i_gh);
1755out_q:
1756 gfs2_glock_dq_uninit(&q_gh);
0a305e49 1757out_unlockput:
2fba46a0 1758 gfs2_qa_put(ip);
5955102c 1759 inode_unlock(&ip->i_inode);
0a305e49 1760out_put:
e285c100
SW
1761 qd_put(qd);
1762 return error;
1763}
1764
cc632e7f
SW
1765const struct quotactl_ops gfs2_quotactl_ops = {
1766 .quota_sync = gfs2_quota_sync,
e54b2e2d 1767 .get_state = gfs2_quota_get_state,
b9b2dd36 1768 .get_dqblk = gfs2_get_dqblk,
c472b432 1769 .set_dqblk = gfs2_set_dqblk,
cc632e7f 1770};
c754fbbb
SW
1771
1772void __init gfs2_quota_hash_init(void)
1773{
1774 unsigned i;
1775
1776 for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1777 INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1778}