From: Greg Kroah-Hartman Date: Tue, 3 Feb 2015 03:23:29 +0000 (-0800) Subject: 3.18-stable patches X-Git-Tag: v3.18.6~10 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=d0c210d09ffcbfa5b81543cfd0d6a7a4b869425b;p=thirdparty%2Fkernel%2Fstable-queue.git 3.18-stable patches added patches: quota-switch-get_dqblk-and-set_dqblk-to-use-bytes-as-space-units.patch --- diff --git a/queue-3.18/quota-switch-get_dqblk-and-set_dqblk-to-use-bytes-as-space-units.patch b/queue-3.18/quota-switch-get_dqblk-and-set_dqblk-to-use-bytes-as-space-units.patch new file mode 100644 index 00000000000..6c6ccb2c11d --- /dev/null +++ b/queue-3.18/quota-switch-get_dqblk-and-set_dqblk-to-use-bytes-as-space-units.patch @@ -0,0 +1,946 @@ +From 14bf61ffe6ac54afcd1e888a4407fe16054483db Mon Sep 17 00:00:00 2001 +From: Jan Kara +Date: Thu, 9 Oct 2014 16:03:13 +0200 +Subject: quota: Switch ->get_dqblk() and ->set_dqblk() to use bytes as space units + +From: Jan Kara + +commit 14bf61ffe6ac54afcd1e888a4407fe16054483db upstream. + +Currently ->get_dqblk() and ->set_dqblk() use struct fs_disk_quota which +tracks space limits and usage in 512-byte blocks. However VFS quotas +track usage in bytes (as some filesystems require that) and we need to +somehow pass this information. Upto now it wasn't a problem because we +didn't do any unit conversion (thus VFS quota routines happily stuck +number of bytes into d_bcount field of struct fd_disk_quota). Only if +you tried to use Q_XGETQUOTA or Q_XSETQLIM for VFS quotas (or Q_GETQUOTA +/ Q_SETQUOTA for XFS quotas), you got bogus results. Hardly anyone +tried this but reportedly some Samba users hit the problem in practice. +So when we want interfaces compatible we need to fix this. + +We bite the bullet and define another quota structure used for passing +information from/to ->get_dqblk()/->set_dqblk. It's somewhat sad we have +to have more conversion routines in fs/quota/quota.c and another copying +of quota structure slows down getting of quota information by about 2% +but it seems cleaner than overloading e.g. units of d_bcount to bytes. + +Reviewed-by: Christoph Hellwig +Signed-off-by: Jan Kara +Signed-off-by: Greg Kroah-Hartman + +--- + fs/gfs2/quota.c | 53 +++++++-------- + fs/quota/dquot.c | 83 +++++++++++------------- + fs/quota/quota.c | 162 +++++++++++++++++++++++++++++++++++++++-------- + fs/xfs/xfs_qm.h | 4 - + fs/xfs/xfs_qm_syscalls.c | 156 +++++++++++++++++++-------------------------- + fs/xfs/xfs_quotaops.c | 8 +- + include/linux/quota.h | 47 +++++++++++++ + include/linux/quotaops.h | 4 - + 8 files changed, 320 insertions(+), 197 deletions(-) + +--- a/fs/gfs2/quota.c ++++ b/fs/gfs2/quota.c +@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data + + static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, + s64 change, struct gfs2_quota_data *qd, +- struct fs_disk_quota *fdq) ++ struct qc_dqblk *fdq) + { + struct inode *inode = &ip->i_inode; + struct gfs2_sbd *sdp = GFS2_SB(inode); +@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2 + be64_add_cpu(&q.qu_value, change); + qd->qd_qb.qb_value = q.qu_value; + if (fdq) { +- if (fdq->d_fieldmask & FS_DQ_BSOFT) { +- q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); ++ if (fdq->d_fieldmask & QC_SPC_SOFT) { ++ q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); + qd->qd_qb.qb_warn = q.qu_warn; + } +- if (fdq->d_fieldmask & FS_DQ_BHARD) { +- q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); ++ if (fdq->d_fieldmask & QC_SPC_HARD) { ++ q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); + qd->qd_qb.qb_limit = q.qu_limit; + } +- if (fdq->d_fieldmask & FS_DQ_BCOUNT) { +- q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); ++ if (fdq->d_fieldmask & QC_SPACE) { ++ q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); + qd->qd_qb.qb_value = q.qu_value; + } + } +@@ -1502,7 +1502,7 @@ static int gfs2_quota_get_xstate(struct + } + + static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, +- struct fs_disk_quota *fdq) ++ struct qc_dqblk *fdq) + { + struct gfs2_sbd *sdp = sb->s_fs_info; + struct gfs2_quota_lvb *qlvb; +@@ -1510,7 +1510,7 @@ static int gfs2_get_dqblk(struct super_b + struct gfs2_holder q_gh; + int error; + +- memset(fdq, 0, sizeof(struct fs_disk_quota)); ++ memset(fdq, 0, sizeof(*fdq)); + + if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) + return -ESRCH; /* Crazy XFS error code */ +@@ -1527,12 +1527,9 @@ static int gfs2_get_dqblk(struct super_b + goto out; + + qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; +- fdq->d_version = FS_DQUOT_VERSION; +- fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA; +- fdq->d_id = from_kqid_munged(current_user_ns(), qid); +- fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift; +- fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift; +- fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift; ++ fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; ++ fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; ++ fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; + + gfs2_glock_dq_uninit(&q_gh); + out: +@@ -1541,10 +1538,10 @@ out: + } + + /* GFS2 only supports a subset of the XFS fields */ +-#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT) ++#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE) + + static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, +- struct fs_disk_quota *fdq) ++ struct qc_dqblk *fdq) + { + struct gfs2_sbd *sdp = sb->s_fs_info; + struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); +@@ -1588,17 +1585,17 @@ static int gfs2_set_dqblk(struct super_b + goto out_i; + + /* If nothing has changed, this is a no-op */ +- if ((fdq->d_fieldmask & FS_DQ_BSOFT) && +- ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) +- fdq->d_fieldmask ^= FS_DQ_BSOFT; +- +- if ((fdq->d_fieldmask & FS_DQ_BHARD) && +- ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) +- fdq->d_fieldmask ^= FS_DQ_BHARD; +- +- if ((fdq->d_fieldmask & FS_DQ_BCOUNT) && +- ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value))) +- fdq->d_fieldmask ^= FS_DQ_BCOUNT; ++ if ((fdq->d_fieldmask & QC_SPC_SOFT) && ++ ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) ++ fdq->d_fieldmask ^= QC_SPC_SOFT; ++ ++ if ((fdq->d_fieldmask & QC_SPC_HARD) && ++ ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) ++ fdq->d_fieldmask ^= QC_SPC_HARD; ++ ++ if ((fdq->d_fieldmask & QC_SPACE) && ++ ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) ++ fdq->d_fieldmask ^= QC_SPACE; + + if (fdq->d_fieldmask == 0) + goto out_i; +--- a/fs/quota/dquot.c ++++ b/fs/quota/dquot.c +@@ -2391,30 +2391,25 @@ static inline qsize_t stoqb(qsize_t spac + } + + /* Generic routine for getting common part of quota structure */ +-static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di) ++static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) + { + struct mem_dqblk *dm = &dquot->dq_dqb; + + memset(di, 0, sizeof(*di)); +- di->d_version = FS_DQUOT_VERSION; +- di->d_flags = dquot->dq_id.type == USRQUOTA ? +- FS_USER_QUOTA : FS_GROUP_QUOTA; +- di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id); +- + spin_lock(&dq_data_lock); +- di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit); +- di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit); ++ di->d_spc_hardlimit = dm->dqb_bhardlimit; ++ di->d_spc_softlimit = dm->dqb_bsoftlimit; + di->d_ino_hardlimit = dm->dqb_ihardlimit; + di->d_ino_softlimit = dm->dqb_isoftlimit; +- di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace; +- di->d_icount = dm->dqb_curinodes; +- di->d_btimer = dm->dqb_btime; +- di->d_itimer = dm->dqb_itime; ++ di->d_space = dm->dqb_curspace + dm->dqb_rsvspace; ++ di->d_ino_count = dm->dqb_curinodes; ++ di->d_spc_timer = dm->dqb_btime; ++ di->d_ino_timer = dm->dqb_itime; + spin_unlock(&dq_data_lock); + } + + int dquot_get_dqblk(struct super_block *sb, struct kqid qid, +- struct fs_disk_quota *di) ++ struct qc_dqblk *di) + { + struct dquot *dquot; + +@@ -2428,70 +2423,70 @@ int dquot_get_dqblk(struct super_block * + } + EXPORT_SYMBOL(dquot_get_dqblk); + +-#define VFS_FS_DQ_MASK \ +- (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \ +- FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \ +- FS_DQ_BTIMER | FS_DQ_ITIMER) ++#define VFS_QC_MASK \ ++ (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \ ++ QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \ ++ QC_SPC_TIMER | QC_INO_TIMER) + + /* Generic routine for setting common part of quota structure */ +-static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) ++static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) + { + struct mem_dqblk *dm = &dquot->dq_dqb; + int check_blim = 0, check_ilim = 0; + struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; + +- if (di->d_fieldmask & ~VFS_FS_DQ_MASK) ++ if (di->d_fieldmask & ~VFS_QC_MASK) + return -EINVAL; + +- if (((di->d_fieldmask & FS_DQ_BSOFT) && +- (di->d_blk_softlimit > dqi->dqi_maxblimit)) || +- ((di->d_fieldmask & FS_DQ_BHARD) && +- (di->d_blk_hardlimit > dqi->dqi_maxblimit)) || +- ((di->d_fieldmask & FS_DQ_ISOFT) && ++ if (((di->d_fieldmask & QC_SPC_SOFT) && ++ stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) || ++ ((di->d_fieldmask & QC_SPC_HARD) && ++ stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) || ++ ((di->d_fieldmask & QC_INO_SOFT) && + (di->d_ino_softlimit > dqi->dqi_maxilimit)) || +- ((di->d_fieldmask & FS_DQ_IHARD) && ++ ((di->d_fieldmask & QC_INO_HARD) && + (di->d_ino_hardlimit > dqi->dqi_maxilimit))) + return -ERANGE; + + spin_lock(&dq_data_lock); +- if (di->d_fieldmask & FS_DQ_BCOUNT) { +- dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace; ++ if (di->d_fieldmask & QC_SPACE) { ++ dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; + check_blim = 1; + set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); + } + +- if (di->d_fieldmask & FS_DQ_BSOFT) +- dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit); +- if (di->d_fieldmask & FS_DQ_BHARD) +- dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit); +- if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) { ++ if (di->d_fieldmask & QC_SPC_SOFT) ++ dm->dqb_bsoftlimit = di->d_spc_softlimit; ++ if (di->d_fieldmask & QC_SPC_HARD) ++ dm->dqb_bhardlimit = di->d_spc_hardlimit; ++ if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) { + check_blim = 1; + set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); + } + +- if (di->d_fieldmask & FS_DQ_ICOUNT) { +- dm->dqb_curinodes = di->d_icount; ++ if (di->d_fieldmask & QC_INO_COUNT) { ++ dm->dqb_curinodes = di->d_ino_count; + check_ilim = 1; + set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); + } + +- if (di->d_fieldmask & FS_DQ_ISOFT) ++ if (di->d_fieldmask & QC_INO_SOFT) + dm->dqb_isoftlimit = di->d_ino_softlimit; +- if (di->d_fieldmask & FS_DQ_IHARD) ++ if (di->d_fieldmask & QC_INO_HARD) + dm->dqb_ihardlimit = di->d_ino_hardlimit; +- if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) { ++ if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) { + check_ilim = 1; + set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); + } + +- if (di->d_fieldmask & FS_DQ_BTIMER) { +- dm->dqb_btime = di->d_btimer; ++ if (di->d_fieldmask & QC_SPC_TIMER) { ++ dm->dqb_btime = di->d_spc_timer; + check_blim = 1; + set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); + } + +- if (di->d_fieldmask & FS_DQ_ITIMER) { +- dm->dqb_itime = di->d_itimer; ++ if (di->d_fieldmask & QC_INO_TIMER) { ++ dm->dqb_itime = di->d_ino_timer; + check_ilim = 1; + set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); + } +@@ -2501,7 +2496,7 @@ static int do_set_dqblk(struct dquot *dq + dm->dqb_curspace < dm->dqb_bsoftlimit) { + dm->dqb_btime = 0; + clear_bit(DQ_BLKS_B, &dquot->dq_flags); +- } else if (!(di->d_fieldmask & FS_DQ_BTIMER)) ++ } else if (!(di->d_fieldmask & QC_SPC_TIMER)) + /* Set grace only if user hasn't provided his own... */ + dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; + } +@@ -2510,7 +2505,7 @@ static int do_set_dqblk(struct dquot *dq + dm->dqb_curinodes < dm->dqb_isoftlimit) { + dm->dqb_itime = 0; + clear_bit(DQ_INODES_B, &dquot->dq_flags); +- } else if (!(di->d_fieldmask & FS_DQ_ITIMER)) ++ } else if (!(di->d_fieldmask & QC_INO_TIMER)) + /* Set grace only if user hasn't provided his own... */ + dm->dqb_itime = get_seconds() + dqi->dqi_igrace; + } +@@ -2526,7 +2521,7 @@ static int do_set_dqblk(struct dquot *dq + } + + int dquot_set_dqblk(struct super_block *sb, struct kqid qid, +- struct fs_disk_quota *di) ++ struct qc_dqblk *di) + { + struct dquot *dquot; + int rc; +--- a/fs/quota/quota.c ++++ b/fs/quota/quota.c +@@ -115,17 +115,27 @@ static int quota_setinfo(struct super_bl + return sb->s_qcop->set_info(sb, type, &info); + } + +-static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src) ++static inline qsize_t qbtos(qsize_t blocks) ++{ ++ return blocks << QIF_DQBLKSIZE_BITS; ++} ++ ++static inline qsize_t stoqb(qsize_t space) ++{ ++ return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; ++} ++ ++static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) + { + memset(dst, 0, sizeof(*dst)); +- dst->dqb_bhardlimit = src->d_blk_hardlimit; +- dst->dqb_bsoftlimit = src->d_blk_softlimit; +- dst->dqb_curspace = src->d_bcount; ++ dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); ++ dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); ++ dst->dqb_curspace = src->d_space; + dst->dqb_ihardlimit = src->d_ino_hardlimit; + dst->dqb_isoftlimit = src->d_ino_softlimit; +- dst->dqb_curinodes = src->d_icount; +- dst->dqb_btime = src->d_btimer; +- dst->dqb_itime = src->d_itimer; ++ dst->dqb_curinodes = src->d_ino_count; ++ dst->dqb_btime = src->d_spc_timer; ++ dst->dqb_itime = src->d_ino_timer; + dst->dqb_valid = QIF_ALL; + } + +@@ -133,7 +143,7 @@ static int quota_getquota(struct super_b + void __user *addr) + { + struct kqid qid; +- struct fs_disk_quota fdq; ++ struct qc_dqblk fdq; + struct if_dqblk idq; + int ret; + +@@ -151,36 +161,36 @@ static int quota_getquota(struct super_b + return 0; + } + +-static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src) ++static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) + { +- dst->d_blk_hardlimit = src->dqb_bhardlimit; +- dst->d_blk_softlimit = src->dqb_bsoftlimit; +- dst->d_bcount = src->dqb_curspace; ++ dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); ++ dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); ++ dst->d_space = src->dqb_curspace; + dst->d_ino_hardlimit = src->dqb_ihardlimit; + dst->d_ino_softlimit = src->dqb_isoftlimit; +- dst->d_icount = src->dqb_curinodes; +- dst->d_btimer = src->dqb_btime; +- dst->d_itimer = src->dqb_itime; ++ dst->d_ino_count = src->dqb_curinodes; ++ dst->d_spc_timer = src->dqb_btime; ++ dst->d_ino_timer = src->dqb_itime; + + dst->d_fieldmask = 0; + if (src->dqb_valid & QIF_BLIMITS) +- dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD; ++ dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; + if (src->dqb_valid & QIF_SPACE) +- dst->d_fieldmask |= FS_DQ_BCOUNT; ++ dst->d_fieldmask |= QC_SPACE; + if (src->dqb_valid & QIF_ILIMITS) +- dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD; ++ dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; + if (src->dqb_valid & QIF_INODES) +- dst->d_fieldmask |= FS_DQ_ICOUNT; ++ dst->d_fieldmask |= QC_INO_COUNT; + if (src->dqb_valid & QIF_BTIME) +- dst->d_fieldmask |= FS_DQ_BTIMER; ++ dst->d_fieldmask |= QC_SPC_TIMER; + if (src->dqb_valid & QIF_ITIME) +- dst->d_fieldmask |= FS_DQ_ITIMER; ++ dst->d_fieldmask |= QC_INO_TIMER; + } + + static int quota_setquota(struct super_block *sb, int type, qid_t id, + void __user *addr) + { +- struct fs_disk_quota fdq; ++ struct qc_dqblk fdq; + struct if_dqblk idq; + struct kqid qid; + +@@ -244,10 +254,78 @@ static int quota_getxstatev(struct super + return ret; + } + ++/* ++ * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them ++ * out of there as xfsprogs rely on definitions being in that header file. So ++ * just define same functions here for quota purposes. ++ */ ++#define XFS_BB_SHIFT 9 ++ ++static inline u64 quota_bbtob(u64 blocks) ++{ ++ return blocks << XFS_BB_SHIFT; ++} ++ ++static inline u64 quota_btobb(u64 bytes) ++{ ++ return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; ++} ++ ++static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) ++{ ++ dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); ++ dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); ++ dst->d_ino_hardlimit = src->d_ino_hardlimit; ++ dst->d_ino_softlimit = src->d_ino_softlimit; ++ dst->d_space = quota_bbtob(src->d_bcount); ++ dst->d_ino_count = src->d_icount; ++ dst->d_ino_timer = src->d_itimer; ++ dst->d_spc_timer = src->d_btimer; ++ dst->d_ino_warns = src->d_iwarns; ++ dst->d_spc_warns = src->d_bwarns; ++ dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); ++ dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); ++ dst->d_rt_space = quota_bbtob(src->d_rtbcount); ++ dst->d_rt_spc_timer = src->d_rtbtimer; ++ dst->d_rt_spc_warns = src->d_rtbwarns; ++ dst->d_fieldmask = 0; ++ if (src->d_fieldmask & FS_DQ_ISOFT) ++ dst->d_fieldmask |= QC_INO_SOFT; ++ if (src->d_fieldmask & FS_DQ_IHARD) ++ dst->d_fieldmask |= QC_INO_HARD; ++ if (src->d_fieldmask & FS_DQ_BSOFT) ++ dst->d_fieldmask |= QC_SPC_SOFT; ++ if (src->d_fieldmask & FS_DQ_BHARD) ++ dst->d_fieldmask |= QC_SPC_HARD; ++ if (src->d_fieldmask & FS_DQ_RTBSOFT) ++ dst->d_fieldmask |= QC_RT_SPC_SOFT; ++ if (src->d_fieldmask & FS_DQ_RTBHARD) ++ dst->d_fieldmask |= QC_RT_SPC_HARD; ++ if (src->d_fieldmask & FS_DQ_BTIMER) ++ dst->d_fieldmask |= QC_SPC_TIMER; ++ if (src->d_fieldmask & FS_DQ_ITIMER) ++ dst->d_fieldmask |= QC_INO_TIMER; ++ if (src->d_fieldmask & FS_DQ_RTBTIMER) ++ dst->d_fieldmask |= QC_RT_SPC_TIMER; ++ if (src->d_fieldmask & FS_DQ_BWARNS) ++ dst->d_fieldmask |= QC_SPC_WARNS; ++ if (src->d_fieldmask & FS_DQ_IWARNS) ++ dst->d_fieldmask |= QC_INO_WARNS; ++ if (src->d_fieldmask & FS_DQ_RTBWARNS) ++ dst->d_fieldmask |= QC_RT_SPC_WARNS; ++ if (src->d_fieldmask & FS_DQ_BCOUNT) ++ dst->d_fieldmask |= QC_SPACE; ++ if (src->d_fieldmask & FS_DQ_ICOUNT) ++ dst->d_fieldmask |= QC_INO_COUNT; ++ if (src->d_fieldmask & FS_DQ_RTBCOUNT) ++ dst->d_fieldmask |= QC_RT_SPACE; ++} ++ + static int quota_setxquota(struct super_block *sb, int type, qid_t id, + void __user *addr) + { + struct fs_disk_quota fdq; ++ struct qc_dqblk qdq; + struct kqid qid; + + if (copy_from_user(&fdq, addr, sizeof(fdq))) +@@ -257,13 +335,44 @@ static int quota_setxquota(struct super_ + qid = make_kqid(current_user_ns(), type, id); + if (!qid_valid(qid)) + return -EINVAL; +- return sb->s_qcop->set_dqblk(sb, qid, &fdq); ++ copy_from_xfs_dqblk(&qdq, &fdq); ++ return sb->s_qcop->set_dqblk(sb, qid, &qdq); ++} ++ ++static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, ++ int type, qid_t id) ++{ ++ memset(dst, 0, sizeof(*dst)); ++ dst->d_version = FS_DQUOT_VERSION; ++ dst->d_id = id; ++ if (type == USRQUOTA) ++ dst->d_flags = FS_USER_QUOTA; ++ else if (type == PRJQUOTA) ++ dst->d_flags = FS_PROJ_QUOTA; ++ else ++ dst->d_flags = FS_GROUP_QUOTA; ++ dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); ++ dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); ++ dst->d_ino_hardlimit = src->d_ino_hardlimit; ++ dst->d_ino_softlimit = src->d_ino_softlimit; ++ dst->d_bcount = quota_btobb(src->d_space); ++ dst->d_icount = src->d_ino_count; ++ dst->d_itimer = src->d_ino_timer; ++ dst->d_btimer = src->d_spc_timer; ++ dst->d_iwarns = src->d_ino_warns; ++ dst->d_bwarns = src->d_spc_warns; ++ dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); ++ dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); ++ dst->d_rtbcount = quota_btobb(src->d_rt_space); ++ dst->d_rtbtimer = src->d_rt_spc_timer; ++ dst->d_rtbwarns = src->d_rt_spc_warns; + } + + static int quota_getxquota(struct super_block *sb, int type, qid_t id, + void __user *addr) + { + struct fs_disk_quota fdq; ++ struct qc_dqblk qdq; + struct kqid qid; + int ret; + +@@ -272,8 +381,11 @@ static int quota_getxquota(struct super_ + qid = make_kqid(current_user_ns(), type, id); + if (!qid_valid(qid)) + return -EINVAL; +- ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); +- if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) ++ ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); ++ if (ret) ++ return ret; ++ copy_to_xfs_dqblk(&fdq, &qdq, type, id); ++ if (copy_to_user(addr, &fdq, sizeof(fdq))) + return -EFAULT; + return ret; + } +--- a/fs/xfs/xfs_qm.h ++++ b/fs/xfs/xfs_qm.h +@@ -166,9 +166,9 @@ extern void xfs_qm_dqrele_all_inodes(st + /* quota ops */ + extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); + extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, +- uint, struct fs_disk_quota *); ++ uint, struct qc_dqblk *); + extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, +- struct fs_disk_quota *); ++ struct qc_dqblk *); + extern int xfs_qm_scall_getqstat(struct xfs_mount *, + struct fs_quota_stat *); + extern int xfs_qm_scall_getqstatv(struct xfs_mount *, +--- a/fs/xfs/xfs_qm_syscalls.c ++++ b/fs/xfs/xfs_qm_syscalls.c +@@ -40,7 +40,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount + STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, + uint); + STATIC uint xfs_qm_export_flags(uint); +-STATIC uint xfs_qm_export_qtype_flags(uint); + + /* + * Turn off quota accounting and/or enforcement for all udquots and/or +@@ -574,8 +573,8 @@ xfs_qm_scall_getqstatv( + return 0; + } + +-#define XFS_DQ_MASK \ +- (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) ++#define XFS_QC_MASK \ ++ (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK) + + /* + * Adjust quota limits, and start/stop timers accordingly. +@@ -585,7 +584,7 @@ xfs_qm_scall_setqlim( + struct xfs_mount *mp, + xfs_dqid_t id, + uint type, +- fs_disk_quota_t *newlim) ++ struct qc_dqblk *newlim) + { + struct xfs_quotainfo *q = mp->m_quotainfo; + struct xfs_disk_dquot *ddq; +@@ -594,9 +593,9 @@ xfs_qm_scall_setqlim( + int error; + xfs_qcnt_t hard, soft; + +- if (newlim->d_fieldmask & ~XFS_DQ_MASK) ++ if (newlim->d_fieldmask & ~XFS_QC_MASK) + return -EINVAL; +- if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) ++ if ((newlim->d_fieldmask & XFS_QC_MASK) == 0) + return 0; + + /* +@@ -634,11 +633,11 @@ xfs_qm_scall_setqlim( + /* + * Make sure that hardlimits are >= soft limits before changing. + */ +- hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? +- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : ++ hard = (newlim->d_fieldmask & QC_SPC_HARD) ? ++ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) : + be64_to_cpu(ddq->d_blk_hardlimit); +- soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? +- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : ++ soft = (newlim->d_fieldmask & QC_SPC_SOFT) ? ++ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) : + be64_to_cpu(ddq->d_blk_softlimit); + if (hard == 0 || hard >= soft) { + ddq->d_blk_hardlimit = cpu_to_be64(hard); +@@ -651,11 +650,11 @@ xfs_qm_scall_setqlim( + } else { + xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); + } +- hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? +- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : ++ hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ? ++ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) : + be64_to_cpu(ddq->d_rtb_hardlimit); +- soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? +- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : ++ soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ? ++ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) : + be64_to_cpu(ddq->d_rtb_softlimit); + if (hard == 0 || hard >= soft) { + ddq->d_rtb_hardlimit = cpu_to_be64(hard); +@@ -668,10 +667,10 @@ xfs_qm_scall_setqlim( + xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); + } + +- hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? ++ hard = (newlim->d_fieldmask & QC_INO_HARD) ? + (xfs_qcnt_t) newlim->d_ino_hardlimit : + be64_to_cpu(ddq->d_ino_hardlimit); +- soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? ++ soft = (newlim->d_fieldmask & QC_INO_SOFT) ? + (xfs_qcnt_t) newlim->d_ino_softlimit : + be64_to_cpu(ddq->d_ino_softlimit); + if (hard == 0 || hard >= soft) { +@@ -688,12 +687,12 @@ xfs_qm_scall_setqlim( + /* + * Update warnings counter(s) if requested + */ +- if (newlim->d_fieldmask & FS_DQ_BWARNS) +- ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); +- if (newlim->d_fieldmask & FS_DQ_IWARNS) +- ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); +- if (newlim->d_fieldmask & FS_DQ_RTBWARNS) +- ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); ++ if (newlim->d_fieldmask & QC_SPC_WARNS) ++ ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns); ++ if (newlim->d_fieldmask & QC_INO_WARNS) ++ ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns); ++ if (newlim->d_fieldmask & QC_RT_SPC_WARNS) ++ ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns); + + if (id == 0) { + /* +@@ -703,24 +702,24 @@ xfs_qm_scall_setqlim( + * soft and hard limit values (already done, above), and + * for warnings. + */ +- if (newlim->d_fieldmask & FS_DQ_BTIMER) { +- q->qi_btimelimit = newlim->d_btimer; +- ddq->d_btimer = cpu_to_be32(newlim->d_btimer); ++ if (newlim->d_fieldmask & QC_SPC_TIMER) { ++ q->qi_btimelimit = newlim->d_spc_timer; ++ ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer); + } +- if (newlim->d_fieldmask & FS_DQ_ITIMER) { +- q->qi_itimelimit = newlim->d_itimer; +- ddq->d_itimer = cpu_to_be32(newlim->d_itimer); ++ if (newlim->d_fieldmask & QC_INO_TIMER) { ++ q->qi_itimelimit = newlim->d_ino_timer; ++ ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer); + } +- if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { +- q->qi_rtbtimelimit = newlim->d_rtbtimer; +- ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); ++ if (newlim->d_fieldmask & QC_RT_SPC_TIMER) { ++ q->qi_rtbtimelimit = newlim->d_rt_spc_timer; ++ ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer); + } +- if (newlim->d_fieldmask & FS_DQ_BWARNS) +- q->qi_bwarnlimit = newlim->d_bwarns; +- if (newlim->d_fieldmask & FS_DQ_IWARNS) +- q->qi_iwarnlimit = newlim->d_iwarns; +- if (newlim->d_fieldmask & FS_DQ_RTBWARNS) +- q->qi_rtbwarnlimit = newlim->d_rtbwarns; ++ if (newlim->d_fieldmask & QC_SPC_WARNS) ++ q->qi_bwarnlimit = newlim->d_spc_warns; ++ if (newlim->d_fieldmask & QC_INO_WARNS) ++ q->qi_iwarnlimit = newlim->d_ino_warns; ++ if (newlim->d_fieldmask & QC_RT_SPC_WARNS) ++ q->qi_rtbwarnlimit = newlim->d_rt_spc_warns; + } else { + /* + * If the user is now over quota, start the timelimit. +@@ -831,7 +830,7 @@ xfs_qm_scall_getquota( + struct xfs_mount *mp, + xfs_dqid_t id, + uint type, +- struct fs_disk_quota *dst) ++ struct qc_dqblk *dst) + { + struct xfs_dquot *dqp; + int error; +@@ -855,28 +854,25 @@ xfs_qm_scall_getquota( + } + + memset(dst, 0, sizeof(*dst)); +- dst->d_version = FS_DQUOT_VERSION; +- dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); +- dst->d_id = be32_to_cpu(dqp->q_core.d_id); +- dst->d_blk_hardlimit = +- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); +- dst->d_blk_softlimit = +- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); ++ dst->d_spc_hardlimit = ++ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); ++ dst->d_spc_softlimit = ++ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); + dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); + dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); +- dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); +- dst->d_icount = dqp->q_res_icount; +- dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); +- dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); +- dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); +- dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); +- dst->d_rtb_hardlimit = +- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); +- dst->d_rtb_softlimit = +- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); +- dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); +- dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); +- dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); ++ dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount); ++ dst->d_ino_count = dqp->q_res_icount; ++ dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer); ++ dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer); ++ dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns); ++ dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns); ++ dst->d_rt_spc_hardlimit = ++ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); ++ dst->d_rt_spc_softlimit = ++ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); ++ dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount); ++ dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer); ++ dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns); + + /* + * Internally, we don't reset all the timers when quota enforcement +@@ -889,23 +885,23 @@ xfs_qm_scall_getquota( + dqp->q_core.d_flags == XFS_DQ_GROUP) || + (!XFS_IS_PQUOTA_ENFORCED(mp) && + dqp->q_core.d_flags == XFS_DQ_PROJ)) { +- dst->d_btimer = 0; +- dst->d_itimer = 0; +- dst->d_rtbtimer = 0; ++ dst->d_spc_timer = 0; ++ dst->d_ino_timer = 0; ++ dst->d_rt_spc_timer = 0; + } + + #ifdef DEBUG +- if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || +- (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) || +- (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) && +- dst->d_id != 0) { +- if ((dst->d_bcount > dst->d_blk_softlimit) && +- (dst->d_blk_softlimit > 0)) { +- ASSERT(dst->d_btimer != 0); ++ if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) || ++ (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) || ++ (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) && ++ id != 0) { ++ if ((dst->d_space > dst->d_spc_softlimit) && ++ (dst->d_spc_softlimit > 0)) { ++ ASSERT(dst->d_spc_timer != 0); + } +- if ((dst->d_icount > dst->d_ino_softlimit) && ++ if ((dst->d_ino_count > dst->d_ino_softlimit) && + (dst->d_ino_softlimit > 0)) { +- ASSERT(dst->d_itimer != 0); ++ ASSERT(dst->d_ino_timer != 0); + } + } + #endif +@@ -915,26 +911,6 @@ out_put: + } + + STATIC uint +-xfs_qm_export_qtype_flags( +- uint flags) +-{ +- /* +- * Can't be more than one, or none. +- */ +- ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) != +- (FS_PROJ_QUOTA | FS_USER_QUOTA)); +- ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) != +- (FS_PROJ_QUOTA | FS_GROUP_QUOTA)); +- ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) != +- (FS_USER_QUOTA | FS_GROUP_QUOTA)); +- ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0); +- +- return (flags & XFS_DQ_USER) ? +- FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ? +- FS_PROJ_QUOTA : FS_GROUP_QUOTA; +-} +- +-STATIC uint + xfs_qm_export_flags( + uint flags) + { +--- a/fs/xfs/xfs_quotaops.c ++++ b/fs/xfs/xfs_quotaops.c +@@ -133,7 +133,7 @@ STATIC int + xfs_fs_get_dqblk( + struct super_block *sb, + struct kqid qid, +- struct fs_disk_quota *fdq) ++ struct qc_dqblk *qdq) + { + struct xfs_mount *mp = XFS_M(sb); + +@@ -143,14 +143,14 @@ xfs_fs_get_dqblk( + return -ESRCH; + + return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), +- xfs_quota_type(qid.type), fdq); ++ xfs_quota_type(qid.type), qdq); + } + + STATIC int + xfs_fs_set_dqblk( + struct super_block *sb, + struct kqid qid, +- struct fs_disk_quota *fdq) ++ struct qc_dqblk *qdq) + { + struct xfs_mount *mp = XFS_M(sb); + +@@ -162,7 +162,7 @@ xfs_fs_set_dqblk( + return -ESRCH; + + return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), +- xfs_quota_type(qid.type), fdq); ++ xfs_quota_type(qid.type), qdq); + } + + const struct quotactl_ops xfs_quotactl_operations = { +--- a/include/linux/quota.h ++++ b/include/linux/quota.h +@@ -316,6 +316,49 @@ struct dquot_operations { + + struct path; + ++/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */ ++struct qc_dqblk { ++ int d_fieldmask; /* mask of fields to change in ->set_dqblk() */ ++ u64 d_spc_hardlimit; /* absolute limit on used space */ ++ u64 d_spc_softlimit; /* preferred limit on used space */ ++ u64 d_ino_hardlimit; /* maximum # allocated inodes */ ++ u64 d_ino_softlimit; /* preferred inode limit */ ++ u64 d_space; /* Space owned by the user */ ++ u64 d_ino_count; /* # inodes owned by the user */ ++ s64 d_ino_timer; /* zero if within inode limits */ ++ /* if not, we refuse service */ ++ s64 d_spc_timer; /* similar to above; for space */ ++ int d_ino_warns; /* # warnings issued wrt num inodes */ ++ int d_spc_warns; /* # warnings issued wrt used space */ ++ u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */ ++ u64 d_rt_spc_softlimit; /* preferred limit on RT space */ ++ u64 d_rt_space; /* realtime space owned */ ++ s64 d_rt_spc_timer; /* similar to above; for RT space */ ++ int d_rt_spc_warns; /* # warnings issued wrt RT space */ ++}; ++ ++/* Field specifiers for ->set_dqblk() in struct qc_dqblk */ ++#define QC_INO_SOFT (1<<0) ++#define QC_INO_HARD (1<<1) ++#define QC_SPC_SOFT (1<<2) ++#define QC_SPC_HARD (1<<3) ++#define QC_RT_SPC_SOFT (1<<4) ++#define QC_RT_SPC_HARD (1<<5) ++#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \ ++ QC_RT_SPC_SOFT | QC_RT_SPC_HARD) ++#define QC_SPC_TIMER (1<<6) ++#define QC_INO_TIMER (1<<7) ++#define QC_RT_SPC_TIMER (1<<8) ++#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER) ++#define QC_SPC_WARNS (1<<9) ++#define QC_INO_WARNS (1<<10) ++#define QC_RT_SPC_WARNS (1<<11) ++#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS) ++#define QC_SPACE (1<<12) ++#define QC_INO_COUNT (1<<13) ++#define QC_RT_SPACE (1<<14) ++#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE) ++ + /* Operations handling requests from userspace */ + struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, struct path *); +@@ -324,8 +367,8 @@ struct quotactl_ops { + int (*quota_sync)(struct super_block *, int); + int (*get_info)(struct super_block *, int, struct if_dqinfo *); + int (*set_info)(struct super_block *, int, struct if_dqinfo *); +- int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); +- int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); ++ int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); ++ int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_xstate)(struct super_block *, struct fs_quota_stat *); + int (*set_xstate)(struct super_block *, unsigned int, int); + int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); +--- a/include/linux/quotaops.h ++++ b/include/linux/quotaops.h +@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block + int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); + int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); + int dquot_get_dqblk(struct super_block *sb, struct kqid id, +- struct fs_disk_quota *di); ++ struct qc_dqblk *di); + int dquot_set_dqblk(struct super_block *sb, struct kqid id, +- struct fs_disk_quota *di); ++ struct qc_dqblk *di); + + int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); + int dquot_transfer(struct inode *inode, struct iattr *iattr); diff --git a/queue-3.18/series b/queue-3.18/series index 2c8db3e2256..d7f0ccb798a 100644 --- a/queue-3.18/series +++ b/queue-3.18/series @@ -47,3 +47,4 @@ drm-i915-only-fence-tiled-region-of-object.patch drm-i915-bdw-fix-halo-pci-ids-marked-as-ult.patch drm-i915-init-ppgtt-before-context-enable.patch drm-i915-fix-inconsistent-brightness-after-resume.patch +quota-switch-get_dqblk-and-set_dqblk-to-use-bytes-as-space-units.patch