]> git.ipfire.org Git - people/ms/linux.git/blame - fs/xfs/xfs_mount.h
xfs: queue inactivation immediately when quota is nearing enforcement
[people/ms/linux.git] / fs / xfs / xfs_mount.h
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
7b718769
NS
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
1da177e4
LT
5 */
6#ifndef __XFS_MOUNT_H__
7#define __XFS_MOUNT_H__
8
ad223e60 9struct xlog;
1da177e4 10struct xfs_inode;
2a82b8be 11struct xfs_mru_cache;
82fa9012 12struct xfs_ail;
7d095257 13struct xfs_quotainfo;
0650b554 14struct xfs_da_geometry;
07b6403a 15struct xfs_perag;
7d095257 16
055388a3
DC
17/* dynamic preallocation free space thresholds, 5% down to 1% */
18enum {
19 XFS_LOWSP_1_PCNT = 0,
20 XFS_LOWSP_2_PCNT,
21 XFS_LOWSP_3_PCNT,
22 XFS_LOWSP_4_PCNT,
23 XFS_LOWSP_5_PCNT,
24 XFS_LOWSP_MAX,
25};
26
192852be
CM
27/*
28 * Error Configuration
29 *
30 * Error classes define the subsystem the configuration belongs to.
31 * Error numbers define the errors that are configurable.
32 */
33enum {
ffd40ef6 34 XFS_ERR_METADATA,
192852be
CM
35 XFS_ERR_CLASS_MAX,
36};
37enum {
ffd40ef6 38 XFS_ERR_DEFAULT,
e0a431b3
CM
39 XFS_ERR_EIO,
40 XFS_ERR_ENOSPC,
41 XFS_ERR_ENODEV,
192852be
CM
42 XFS_ERR_ERRNO_MAX,
43};
44
a5ea70d2
CM
45#define XFS_ERR_RETRY_FOREVER -1
46
77169812
ES
47/*
48 * Although retry_timeout is in jiffies which is normally an unsigned long,
49 * we limit the retry timeout to 86400 seconds, or one day. So even a
50 * signed 32-bit long is sufficient for a HZ value up to 24855. Making it
51 * signed lets us store the special "-1" value, meaning retry forever.
52 */
192852be
CM
53struct xfs_error_cfg {
54 struct xfs_kobj kobj;
55 int max_retries;
77169812 56 long retry_timeout; /* in jiffies, -1 = infinite */
192852be
CM
57};
58
ab23a776
DC
59/*
60 * Per-cpu deferred inode inactivation GC lists.
61 */
62struct xfs_inodegc {
63 struct llist_head list;
64 struct work_struct work;
65
66 /* approximate count of inodes in the list */
67 unsigned int items;
68};
69
b0dff466
DC
70/*
71 * The struct xfsmount layout is optimised to separate read-mostly variables
72 * from variables that are frequently modified. We put the read-mostly variables
73 * first, then place all the other variables at the end.
74 *
75 * Typically, read-mostly variables are those that are set at mount time and
76 * never changed again, or only change rarely as a result of things like sysfs
77 * knobs being tweaked.
78 */
1da177e4 79typedef struct xfs_mount {
b0dff466 80 struct xfs_sb m_sb; /* copy of fs superblock */
b267ce99 81 struct super_block *m_super;
82fa9012 82 struct xfs_ail *m_ail; /* fs active log item list */
1da177e4 83 struct xfs_buf *m_sb_bp; /* buffer for superblock */
fc1f8c1c
NS
84 char *m_rtname; /* realtime device name */
85 char *m_logname; /* external log device name */
0650b554
DC
86 struct xfs_da_geometry *m_dir_geo; /* directory block geometry */
87 struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */
ad223e60 88 struct xlog *m_log; /* log specific stuff */
1da177e4
LT
89 struct xfs_inode *m_rbmip; /* pointer to bitmap inode */
90 struct xfs_inode *m_rsumip; /* pointer to summary inode */
91 struct xfs_inode *m_rootip; /* pointer to root directory */
92 struct xfs_quotainfo *m_quotainfo; /* disk quota information */
93 xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
94 xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
95 xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
0ed17f01 96 struct list_head m_mount_list; /* global mount list */
ab23a776
DC
97 void __percpu *m_inodegc; /* percpu inodegc structures */
98
b0dff466
DC
99 /*
100 * Optional cache of rt summary level per bitmap block with the
101 * invariant that m_rsum_cache[bbno] <= the minimum i for which
102 * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip
103 * inode lock.
104 */
105 uint8_t *m_rsum_cache;
106 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
107 struct workqueue_struct *m_buf_workqueue;
108 struct workqueue_struct *m_unwritten_workqueue;
109 struct workqueue_struct *m_cil_workqueue;
110 struct workqueue_struct *m_reclaim_workqueue;
b0dff466 111 struct workqueue_struct *m_sync_workqueue;
ab23a776
DC
112 struct workqueue_struct *m_blockgc_wq;
113 struct workqueue_struct *m_inodegc_wq;
b0dff466
DC
114
115 int m_bsize; /* fs logical block size */
c8ce540d
DW
116 uint8_t m_blkbit_log; /* blocklog + NBBY */
117 uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
118 uint8_t m_agno_log; /* log #ag's */
b0dff466 119 uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
1da177e4
LT
120 uint m_blockmask; /* sb_blocksize-1 */
121 uint m_blockwsize; /* sb_blocksize in words */
122 uint m_blockwmask; /* blockwsize-1 */
60197e8d
CH
123 uint m_alloc_mxr[2]; /* max alloc btree records */
124 uint m_alloc_mnr[2]; /* min alloc btree records */
125 uint m_bmap_dmxr[2]; /* max bmap btree records */
126 uint m_bmap_dmnr[2]; /* min bmap btree records */
035e00ac
DW
127 uint m_rmap_mxr[2]; /* max rmap btree records */
128 uint m_rmap_mnr[2]; /* min rmap btree records */
1946b91c
DW
129 uint m_refc_mxr[2]; /* max refc btree records */
130 uint m_refc_mnr[2]; /* min refc btree records */
1da177e4
LT
131 uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
132 uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
035e00ac 133 uint m_rmap_maxlevels; /* max rmap btree levels */
1946b91c 134 uint m_refc_maxlevels; /* max refcount btree level */
8018026e 135 xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */
52548852
DW
136 uint m_alloc_set_aside; /* space we can't use */
137 uint m_ag_max_usable; /* max space per AG */
b0dff466
DC
138 int m_dalign; /* stripe unit */
139 int m_swidth; /* stripe width */
140 xfs_agnumber_t m_maxagi; /* highest inode alloc group */
141 uint m_allocsize_log;/* min write size log bytes */
142 uint m_allocsize_blocks; /* min write size blocks */
143 int m_logbufs; /* number of log buffers */
144 int m_logbsize; /* size of each log buffer */
145 uint m_rsumlevels; /* rt summary levels */
146 uint m_rsumsize; /* size of rt summary, bytes */
1da177e4 147 int m_fixedfsid[2]; /* unchanged for life of FS */
1da177e4 148 uint m_qflags; /* quota status flags */
b0dff466
DC
149 uint64_t m_flags; /* global mount flags */
150 int64_t m_low_space[XFS_LOWSP_MAX];
151 struct xfs_ino_geometry m_ino_geo; /* inode geometry */
0eadd102 152 struct xfs_trans_resv m_resv; /* precomputed res values */
b0dff466 153 /* low free space thresholds */
ab23a776 154 unsigned long m_opstate; /* dynamic state flags */
b0dff466
DC
155 bool m_always_cow;
156 bool m_fail_unmount;
157 bool m_finobt_nores; /* no per-AG finobt resv. */
158 bool m_update_sb; /* sb needs update in mount */
159
160 /*
161 * Bitsets of per-fs metadata that have been checked and/or are sick.
162 * Callers must hold m_sb_lock to access these two fields.
163 */
164 uint8_t m_fs_checked;
165 uint8_t m_fs_sick;
166 /*
167 * Bitsets of rt metadata that have been checked and/or are sick.
168 * Callers must hold m_sb_lock to access this field.
169 */
170 uint8_t m_rt_checked;
171 uint8_t m_rt_sick;
172
173 /*
174 * End of read-mostly variables. Frequently written variables and locks
175 * should be placed below this comment from now on. The first variable
176 * here is marked as cacheline aligned so they it is separated from
177 * the read-mostly variables.
178 */
179
180 spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */
181 struct percpu_counter m_icount; /* allocated inodes counter */
182 struct percpu_counter m_ifree; /* free inodes counter */
183 struct percpu_counter m_fdblocks; /* free block counter */
184 /*
185 * Count of data device blocks reserved for delayed allocations,
186 * including indlen blocks. Does not include allocated CoW staging
187 * extents or anything related to the rt device.
188 */
189 struct percpu_counter m_delalloc_blks;
16eaab83
BF
190 /*
191 * Global count of allocation btree blocks in use across all AGs. Only
192 * used when perag reservation is enabled. Helps prevent block
193 * reservation from attempting to reserve allocation btree blocks.
194 */
195 atomic64_t m_allocbt_blks;
b0dff466
DC
196
197 struct radix_tree_root m_perag_tree; /* per-ag accounting info */
198 spinlock_t m_perag_lock; /* lock for m_perag_tree */
c8ce540d
DW
199 uint64_t m_resblks; /* total reserved blocks */
200 uint64_t m_resblks_avail;/* available reserved blocks */
201 uint64_t m_resblks_save; /* reserved blks @ remount,ro */
a7b339f1 202 struct delayed_work m_reclaim_work; /* background inode reclaim */
a31b1d3d 203 struct xfs_kobj m_kobj;
192852be 204 struct xfs_kobj m_error_kobj;
ffd40ef6 205 struct xfs_kobj m_error_meta_kobj;
192852be 206 struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
225e4635 207 struct xstats m_stats; /* per-fs stats */
b0dff466
DC
208 xfs_agnumber_t m_agfrotor; /* last ag where space found */
209 xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */
210 spinlock_t m_agirotor_lock;/* .. and lock protecting it */
aa6bf01d 211
f0f7a674
DW
212 /*
213 * Workqueue item so that we can coalesce multiple inode flush attempts
214 * into a single flush.
215 */
216 struct work_struct m_flush_inodes_work;
52785112
CH
217
218 /*
219 * Generation of the filesysyem layout. This is incremented by each
220 * growfs, and used by the pNFS server to ensure the client updates
221 * its view of the block device once it gets a layout that might
222 * reference the newly added blocks. Does not need to be persistent
223 * as long as we only allow file system size increments, but if we
224 * ever support shrinks it would have to be persisted in addition
225 * to various other kinds of pain inflicted on the pNFS server.
226 */
c8ce540d 227 uint32_t m_generation;
b0dff466 228 struct mutex m_growlock; /* growfs mutex */
801cc4e1
BF
229
230#ifdef DEBUG
31965ef3
DW
231 /*
232 * Frequency with which errors are injected. Replaces xfs_etest; the
233 * value stored in here is the inverse of the frequency with which the
234 * error triggers. 1 = always, 2 = half the time, etc.
235 */
236 unsigned int *m_errortag;
c6840101 237 struct xfs_kobj m_errortag_kobj;
801cc4e1 238#endif
1da177e4
LT
239} xfs_mount_t;
240
ef325959
DW
241#define M_IGEO(mp) (&(mp)->m_ino_geo)
242
1da177e4
LT
243/*
244 * Flags for m_flags.
245 */
92821e2b 246#define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops
1da177e4
LT
247 must be synchronous except
248 for space allocations */
e6b3bb78 249#define XFS_MOUNT_UNMOUNTING (1ULL << 1) /* filesystem is unmounting */
92821e2b 250#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
e718eeb4 251#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
1da177e4
LT
252 operations, typically for
253 disk errors in metadata */
e84661aa 254#define XFS_MOUNT_DISCARD (1ULL << 5) /* discard unused blocks */
e718eeb4 255#define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment
1da177e4 256 allocations */
13059ff0 257#define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */
bd186aa9 258#define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */
e718eeb4 259#define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */
3274d008 260#define XFS_MOUNT_ALLOCSIZE (1ULL << 12) /* specified allocation size */
12c3f05c
ES
261#define XFS_MOUNT_SMALL_INUMS (1ULL << 14) /* user wants 32bit inodes */
262#define XFS_MOUNT_32BITINODES (1ULL << 15) /* inode32 allocator active */
e718eeb4 263#define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */
1bd960ee 264#define XFS_MOUNT_IKEEP (1ULL << 18) /* keep empty inode clusters*/
e718eeb4 265#define XFS_MOUNT_SWALLOC (1ULL << 19) /* turn on stripe width
1da177e4 266 * allocation */
bd186aa9 267#define XFS_MOUNT_RDONLY (1ULL << 20) /* read-only fs */
e718eeb4 268#define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */
7c6b94b1 269#define XFS_MOUNT_LARGEIO (1ULL << 22) /* report large preferred
e8c8b3a7 270 * I/O size in stat() */
2a82b8be
DC
271#define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams
272 allocator */
7c12f296 273#define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */
606723d9 274#define XFS_MOUNT_DAX_ALWAYS (1ULL << 26)
8d6c3446 275#define XFS_MOUNT_DAX_NEVER (1ULL << 27)
cbe4dab1 276
ab23a776
DC
277/*
278 * If set, inactivation worker threads will be scheduled to process queued
279 * inodegc work. If not, queued inodes remain in memory waiting to be
280 * processed.
281 */
282#define XFS_OPSTATE_INODEGC_ENABLED 0
283
284#define __XFS_IS_OPSTATE(name, NAME) \
285static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
286{ \
287 return test_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
288} \
289static inline bool xfs_clear_ ## name (struct xfs_mount *mp) \
290{ \
291 return test_and_clear_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
292} \
293static inline bool xfs_set_ ## name (struct xfs_mount *mp) \
294{ \
295 return test_and_set_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
296}
297
298__XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
299
300#define XFS_OPSTATE_STRINGS \
301 { (1UL << XFS_OPSTATE_INODEGC_ENABLED), "inodegc" }
302
1da177e4 303/*
1f443ad7
NS
304 * Max and min values for mount-option defined I/O
305 * preallocation sizes.
1da177e4 306 */
1f443ad7 307#define XFS_MAX_IO_LOG 30 /* 1G */
1da177e4
LT
308#define XFS_MIN_IO_LOG PAGE_SHIFT
309
92821e2b
DC
310#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
311 ((mp)->m_flags & XFS_MOUNT_WAS_CLEAN)
1da177e4 312#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
745f6919
CH
313void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
314 int lnnum);
1da177e4 315#define xfs_force_shutdown(m,f) \
745f6919 316 xfs_do_force_shutdown(m, f, __FILE__, __LINE__)
1da177e4 317
2b5decd0
CH
318#define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */
319#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */
320#define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */
321#define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */
2b5decd0 322
1da177e4
LT
323/*
324 * Flags for xfs_mountfs
325 */
764d1f89 326#define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */
1da177e4 327
a844f451
NS
328static inline xfs_agnumber_t
329xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
1da177e4 330{
4f1adf33 331 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
a844f451
NS
332 do_div(ld, mp->m_sb.sb_agblocks);
333 return (xfs_agnumber_t) ld;
1da177e4
LT
334}
335
a844f451
NS
336static inline xfs_agblock_t
337xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
1da177e4 338{
4f1adf33 339 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
a844f451 340 return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
1da177e4
LT
341}
342
07b6403a
DC
343int xfs_buf_hash_init(struct xfs_perag *pag);
344void xfs_buf_hash_destroy(struct xfs_perag *pag);
6031e73a 345
af3b6382 346extern void xfs_uuid_table_free(void);
c8ce540d 347extern uint64_t xfs_default_resblks(xfs_mount_t *mp);
4249023a 348extern int xfs_mountfs(xfs_mount_t *mp);
41b5c2e7 349extern void xfs_unmountfs(xfs_mount_t *);
964aa8d9 350
7d6f07d2
DW
351/*
352 * Deltas for the block count can vary from 1 to very large, but lock contention
353 * only occurs on frequent small block count updates such as in the delayed
354 * allocation path for buffered writes (page a time updates). Hence we set
355 * a large batch count (1024) to minimise global counter updates except when
356 * we get near to ENOSPC and we have to be very accurate with our updates.
357 */
358#define XFS_FDBLOCKS_BATCH 1024
359
0d485ada
DC
360extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
361 bool reserved);
bab98bbe
DC
362extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
363
764d1f89 364extern int xfs_readsb(xfs_mount_t *, int);
1da177e4 365extern void xfs_freesb(xfs_mount_t *);
91ee575f 366extern bool xfs_fs_writable(struct xfs_mount *mp, int level);
c8ce540d 367extern int xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t);
1da177e4 368
d7658d48
CH
369extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
370
055388a3
DC
371extern void xfs_set_low_space_thresholds(struct xfs_mount *);
372
3fbbbea3
DC
373int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
374 xfs_off_t count_fsb);
375
df309390
CM
376struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
377 int error_class, int error);
f467cad9 378void xfs_force_summary_recalc(struct xfs_mount *mp);
9fe82b8c 379void xfs_mod_delalloc(struct xfs_mount *mp, int64_t delta);
df309390 380
1da177e4 381#endif /* __XFS_MOUNT_H__ */