1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
22 #include "xfs_log_priv.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
39 #include <linux/magic.h>
40 #include <linux/fs_context.h>
41 #include <linux/fs_parser.h>
43 static const struct super_operations xfs_super_operations
;
45 static struct kset
*xfs_kset
; /* top-level xfs sysfs dir */
47 static struct xfs_kobj xfs_dbg_kobj
; /* global debug sysfs attrs */
51 * Table driven mount option parser.
54 Opt_logbufs
, Opt_logbsize
, Opt_logdev
, Opt_rtdev
,
55 Opt_wsync
, Opt_noalign
, Opt_swalloc
, Opt_sunit
, Opt_swidth
, Opt_nouuid
,
56 Opt_grpid
, Opt_nogrpid
, Opt_bsdgroups
, Opt_sysvgroups
,
57 Opt_allocsize
, Opt_norecovery
, Opt_inode64
, Opt_inode32
, Opt_ikeep
,
58 Opt_noikeep
, Opt_largeio
, Opt_nolargeio
, Opt_attr2
, Opt_noattr2
,
59 Opt_filestreams
, Opt_quota
, Opt_noquota
, Opt_usrquota
, Opt_grpquota
,
60 Opt_prjquota
, Opt_uquota
, Opt_gquota
, Opt_pquota
,
61 Opt_uqnoenforce
, Opt_gqnoenforce
, Opt_pqnoenforce
, Opt_qnoenforce
,
62 Opt_discard
, Opt_nodiscard
, Opt_dax
,
65 static const struct fs_parameter_spec xfs_fs_parameters
[] = {
66 fsparam_u32("logbufs", Opt_logbufs
),
67 fsparam_string("logbsize", Opt_logbsize
),
68 fsparam_string("logdev", Opt_logdev
),
69 fsparam_string("rtdev", Opt_rtdev
),
70 fsparam_flag("wsync", Opt_wsync
),
71 fsparam_flag("noalign", Opt_noalign
),
72 fsparam_flag("swalloc", Opt_swalloc
),
73 fsparam_u32("sunit", Opt_sunit
),
74 fsparam_u32("swidth", Opt_swidth
),
75 fsparam_flag("nouuid", Opt_nouuid
),
76 fsparam_flag("grpid", Opt_grpid
),
77 fsparam_flag("nogrpid", Opt_nogrpid
),
78 fsparam_flag("bsdgroups", Opt_bsdgroups
),
79 fsparam_flag("sysvgroups", Opt_sysvgroups
),
80 fsparam_string("allocsize", Opt_allocsize
),
81 fsparam_flag("norecovery", Opt_norecovery
),
82 fsparam_flag("inode64", Opt_inode64
),
83 fsparam_flag("inode32", Opt_inode32
),
84 fsparam_flag("ikeep", Opt_ikeep
),
85 fsparam_flag("noikeep", Opt_noikeep
),
86 fsparam_flag("largeio", Opt_largeio
),
87 fsparam_flag("nolargeio", Opt_nolargeio
),
88 fsparam_flag("attr2", Opt_attr2
),
89 fsparam_flag("noattr2", Opt_noattr2
),
90 fsparam_flag("filestreams", Opt_filestreams
),
91 fsparam_flag("quota", Opt_quota
),
92 fsparam_flag("noquota", Opt_noquota
),
93 fsparam_flag("usrquota", Opt_usrquota
),
94 fsparam_flag("grpquota", Opt_grpquota
),
95 fsparam_flag("prjquota", Opt_prjquota
),
96 fsparam_flag("uquota", Opt_uquota
),
97 fsparam_flag("gquota", Opt_gquota
),
98 fsparam_flag("pquota", Opt_pquota
),
99 fsparam_flag("uqnoenforce", Opt_uqnoenforce
),
100 fsparam_flag("gqnoenforce", Opt_gqnoenforce
),
101 fsparam_flag("pqnoenforce", Opt_pqnoenforce
),
102 fsparam_flag("qnoenforce", Opt_qnoenforce
),
103 fsparam_flag("discard", Opt_discard
),
104 fsparam_flag("nodiscard", Opt_nodiscard
),
105 fsparam_flag("dax", Opt_dax
),
109 struct proc_xfs_info
{
119 static struct proc_xfs_info xfs_info_set
[] = {
120 /* the few simple ones we can get from the mount struct */
121 { XFS_MOUNT_IKEEP
, ",ikeep" },
122 { XFS_MOUNT_WSYNC
, ",wsync" },
123 { XFS_MOUNT_NOALIGN
, ",noalign" },
124 { XFS_MOUNT_SWALLOC
, ",swalloc" },
125 { XFS_MOUNT_NOUUID
, ",nouuid" },
126 { XFS_MOUNT_NORECOVERY
, ",norecovery" },
127 { XFS_MOUNT_ATTR2
, ",attr2" },
128 { XFS_MOUNT_FILESTREAMS
, ",filestreams" },
129 { XFS_MOUNT_GRPID
, ",grpid" },
130 { XFS_MOUNT_DISCARD
, ",discard" },
131 { XFS_MOUNT_LARGEIO
, ",largeio" },
132 { XFS_MOUNT_DAX
, ",dax" },
135 struct xfs_mount
*mp
= XFS_M(root
->d_sb
);
136 struct proc_xfs_info
*xfs_infop
;
138 for (xfs_infop
= xfs_info_set
; xfs_infop
->flag
; xfs_infop
++) {
139 if (mp
->m_flags
& xfs_infop
->flag
)
140 seq_puts(m
, xfs_infop
->str
);
143 seq_printf(m
, ",inode%d",
144 (mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
) ? 32 : 64);
146 if (mp
->m_flags
& XFS_MOUNT_ALLOCSIZE
)
147 seq_printf(m
, ",allocsize=%dk",
148 (1 << mp
->m_allocsize_log
) >> 10);
150 if (mp
->m_logbufs
> 0)
151 seq_printf(m
, ",logbufs=%d", mp
->m_logbufs
);
152 if (mp
->m_logbsize
> 0)
153 seq_printf(m
, ",logbsize=%dk", mp
->m_logbsize
>> 10);
156 seq_show_option(m
, "logdev", mp
->m_logname
);
158 seq_show_option(m
, "rtdev", mp
->m_rtname
);
160 if (mp
->m_dalign
> 0)
161 seq_printf(m
, ",sunit=%d",
162 (int)XFS_FSB_TO_BB(mp
, mp
->m_dalign
));
163 if (mp
->m_swidth
> 0)
164 seq_printf(m
, ",swidth=%d",
165 (int)XFS_FSB_TO_BB(mp
, mp
->m_swidth
));
167 if (mp
->m_qflags
& (XFS_UQUOTA_ACCT
|XFS_UQUOTA_ENFD
))
168 seq_puts(m
, ",usrquota");
169 else if (mp
->m_qflags
& XFS_UQUOTA_ACCT
)
170 seq_puts(m
, ",uqnoenforce");
172 if (mp
->m_qflags
& XFS_PQUOTA_ACCT
) {
173 if (mp
->m_qflags
& XFS_PQUOTA_ENFD
)
174 seq_puts(m
, ",prjquota");
176 seq_puts(m
, ",pqnoenforce");
178 if (mp
->m_qflags
& XFS_GQUOTA_ACCT
) {
179 if (mp
->m_qflags
& XFS_GQUOTA_ENFD
)
180 seq_puts(m
, ",grpquota");
182 seq_puts(m
, ",gqnoenforce");
185 if (!(mp
->m_qflags
& XFS_ALL_QUOTA_ACCT
))
186 seq_puts(m
, ",noquota");
192 * Set parameters for inode allocation heuristics, taking into account
193 * filesystem size and inode32/inode64 mount options; i.e. specifically
194 * whether or not XFS_MOUNT_SMALL_INUMS is set.
196 * Inode allocation patterns are altered only if inode32 is requested
197 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
198 * If altered, XFS_MOUNT_32BITINODES is set as well.
200 * An agcount independent of that in the mount structure is provided
201 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
202 * to the potentially higher ag count.
204 * Returns the maximum AG index which may contain inodes.
208 struct xfs_mount
*mp
,
209 xfs_agnumber_t agcount
)
211 xfs_agnumber_t index
;
212 xfs_agnumber_t maxagi
= 0;
213 xfs_sb_t
*sbp
= &mp
->m_sb
;
214 xfs_agnumber_t max_metadata
;
219 * Calculate how much should be reserved for inodes to meet
220 * the max inode percentage. Used only for inode32.
222 if (M_IGEO(mp
)->maxicount
) {
225 icount
= sbp
->sb_dblocks
* sbp
->sb_imax_pct
;
227 icount
+= sbp
->sb_agblocks
- 1;
228 do_div(icount
, sbp
->sb_agblocks
);
229 max_metadata
= icount
;
231 max_metadata
= agcount
;
234 /* Get the last possible inode in the filesystem */
235 agino
= XFS_AGB_TO_AGINO(mp
, sbp
->sb_agblocks
- 1);
236 ino
= XFS_AGINO_TO_INO(mp
, agcount
- 1, agino
);
239 * If user asked for no more than 32-bit inodes, and the fs is
240 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
241 * the allocator to accommodate the request.
243 if ((mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
) && ino
> XFS_MAXINUMBER_32
)
244 mp
->m_flags
|= XFS_MOUNT_32BITINODES
;
246 mp
->m_flags
&= ~XFS_MOUNT_32BITINODES
;
248 for (index
= 0; index
< agcount
; index
++) {
249 struct xfs_perag
*pag
;
251 ino
= XFS_AGINO_TO_INO(mp
, index
, agino
);
253 pag
= xfs_perag_get(mp
, index
);
255 if (mp
->m_flags
& XFS_MOUNT_32BITINODES
) {
256 if (ino
> XFS_MAXINUMBER_32
) {
257 pag
->pagi_inodeok
= 0;
258 pag
->pagf_metadata
= 0;
260 pag
->pagi_inodeok
= 1;
262 if (index
< max_metadata
)
263 pag
->pagf_metadata
= 1;
265 pag
->pagf_metadata
= 0;
268 pag
->pagi_inodeok
= 1;
269 pag
->pagf_metadata
= 0;
275 return (mp
->m_flags
& XFS_MOUNT_32BITINODES
) ? maxagi
: agcount
;
282 struct block_device
**bdevp
)
286 *bdevp
= blkdev_get_by_path(name
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
,
288 if (IS_ERR(*bdevp
)) {
289 error
= PTR_ERR(*bdevp
);
290 xfs_warn(mp
, "Invalid device [%s], error=%d", name
, error
);
298 struct block_device
*bdev
)
301 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
305 xfs_blkdev_issue_flush(
306 xfs_buftarg_t
*buftarg
)
308 blkdev_issue_flush(buftarg
->bt_bdev
, GFP_NOFS
, NULL
);
313 struct xfs_mount
*mp
)
315 struct dax_device
*dax_ddev
= mp
->m_ddev_targp
->bt_daxdev
;
317 if (mp
->m_logdev_targp
&& mp
->m_logdev_targp
!= mp
->m_ddev_targp
) {
318 struct block_device
*logdev
= mp
->m_logdev_targp
->bt_bdev
;
319 struct dax_device
*dax_logdev
= mp
->m_logdev_targp
->bt_daxdev
;
321 xfs_free_buftarg(mp
->m_logdev_targp
);
322 xfs_blkdev_put(logdev
);
323 fs_put_dax(dax_logdev
);
325 if (mp
->m_rtdev_targp
) {
326 struct block_device
*rtdev
= mp
->m_rtdev_targp
->bt_bdev
;
327 struct dax_device
*dax_rtdev
= mp
->m_rtdev_targp
->bt_daxdev
;
329 xfs_free_buftarg(mp
->m_rtdev_targp
);
330 xfs_blkdev_put(rtdev
);
331 fs_put_dax(dax_rtdev
);
333 xfs_free_buftarg(mp
->m_ddev_targp
);
334 fs_put_dax(dax_ddev
);
338 * The file system configurations are:
339 * (1) device (partition) with data and internal log
340 * (2) logical volume with data and log subvolumes.
341 * (3) logical volume with data, log, and realtime subvolumes.
343 * We only have to handle opening the log and realtime volumes here if
344 * they are present. The data subvolume has already been opened by
345 * get_sb_bdev() and is stored in sb->s_bdev.
349 struct xfs_mount
*mp
)
351 struct block_device
*ddev
= mp
->m_super
->s_bdev
;
352 struct dax_device
*dax_ddev
= fs_dax_get_by_bdev(ddev
);
353 struct dax_device
*dax_logdev
= NULL
, *dax_rtdev
= NULL
;
354 struct block_device
*logdev
= NULL
, *rtdev
= NULL
;
358 * Open real time and log devices - order is important.
361 error
= xfs_blkdev_get(mp
, mp
->m_logname
, &logdev
);
364 dax_logdev
= fs_dax_get_by_bdev(logdev
);
368 error
= xfs_blkdev_get(mp
, mp
->m_rtname
, &rtdev
);
370 goto out_close_logdev
;
372 if (rtdev
== ddev
|| rtdev
== logdev
) {
374 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
376 goto out_close_rtdev
;
378 dax_rtdev
= fs_dax_get_by_bdev(rtdev
);
382 * Setup xfs_mount buffer target pointers
385 mp
->m_ddev_targp
= xfs_alloc_buftarg(mp
, ddev
, dax_ddev
);
386 if (!mp
->m_ddev_targp
)
387 goto out_close_rtdev
;
390 mp
->m_rtdev_targp
= xfs_alloc_buftarg(mp
, rtdev
, dax_rtdev
);
391 if (!mp
->m_rtdev_targp
)
392 goto out_free_ddev_targ
;
395 if (logdev
&& logdev
!= ddev
) {
396 mp
->m_logdev_targp
= xfs_alloc_buftarg(mp
, logdev
, dax_logdev
);
397 if (!mp
->m_logdev_targp
)
398 goto out_free_rtdev_targ
;
400 mp
->m_logdev_targp
= mp
->m_ddev_targp
;
406 if (mp
->m_rtdev_targp
)
407 xfs_free_buftarg(mp
->m_rtdev_targp
);
409 xfs_free_buftarg(mp
->m_ddev_targp
);
411 xfs_blkdev_put(rtdev
);
412 fs_put_dax(dax_rtdev
);
414 if (logdev
&& logdev
!= ddev
) {
415 xfs_blkdev_put(logdev
);
416 fs_put_dax(dax_logdev
);
419 fs_put_dax(dax_ddev
);
424 * Setup xfs_mount buffer target pointers based on superblock
428 struct xfs_mount
*mp
)
432 error
= xfs_setsize_buftarg(mp
->m_ddev_targp
, mp
->m_sb
.sb_sectsize
);
436 if (mp
->m_logdev_targp
&& mp
->m_logdev_targp
!= mp
->m_ddev_targp
) {
437 unsigned int log_sector_size
= BBSIZE
;
439 if (xfs_sb_version_hassector(&mp
->m_sb
))
440 log_sector_size
= mp
->m_sb
.sb_logsectsize
;
441 error
= xfs_setsize_buftarg(mp
->m_logdev_targp
,
446 if (mp
->m_rtdev_targp
) {
447 error
= xfs_setsize_buftarg(mp
->m_rtdev_targp
,
448 mp
->m_sb
.sb_sectsize
);
457 xfs_init_mount_workqueues(
458 struct xfs_mount
*mp
)
460 mp
->m_buf_workqueue
= alloc_workqueue("xfs-buf/%s",
461 WQ_MEM_RECLAIM
|WQ_FREEZABLE
, 1, mp
->m_super
->s_id
);
462 if (!mp
->m_buf_workqueue
)
465 mp
->m_unwritten_workqueue
= alloc_workqueue("xfs-conv/%s",
466 WQ_MEM_RECLAIM
|WQ_FREEZABLE
, 0, mp
->m_super
->s_id
);
467 if (!mp
->m_unwritten_workqueue
)
468 goto out_destroy_buf
;
470 mp
->m_cil_workqueue
= alloc_workqueue("xfs-cil/%s",
471 WQ_MEM_RECLAIM
| WQ_FREEZABLE
| WQ_UNBOUND
,
472 0, mp
->m_super
->s_id
);
473 if (!mp
->m_cil_workqueue
)
474 goto out_destroy_unwritten
;
476 mp
->m_reclaim_workqueue
= alloc_workqueue("xfs-reclaim/%s",
477 WQ_MEM_RECLAIM
|WQ_FREEZABLE
, 0, mp
->m_super
->s_id
);
478 if (!mp
->m_reclaim_workqueue
)
479 goto out_destroy_cil
;
481 mp
->m_eofblocks_workqueue
= alloc_workqueue("xfs-eofblocks/%s",
482 WQ_MEM_RECLAIM
|WQ_FREEZABLE
, 0, mp
->m_super
->s_id
);
483 if (!mp
->m_eofblocks_workqueue
)
484 goto out_destroy_reclaim
;
486 mp
->m_sync_workqueue
= alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE
, 0,
488 if (!mp
->m_sync_workqueue
)
489 goto out_destroy_eofb
;
494 destroy_workqueue(mp
->m_eofblocks_workqueue
);
496 destroy_workqueue(mp
->m_reclaim_workqueue
);
498 destroy_workqueue(mp
->m_cil_workqueue
);
499 out_destroy_unwritten
:
500 destroy_workqueue(mp
->m_unwritten_workqueue
);
502 destroy_workqueue(mp
->m_buf_workqueue
);
508 xfs_destroy_mount_workqueues(
509 struct xfs_mount
*mp
)
511 destroy_workqueue(mp
->m_sync_workqueue
);
512 destroy_workqueue(mp
->m_eofblocks_workqueue
);
513 destroy_workqueue(mp
->m_reclaim_workqueue
);
514 destroy_workqueue(mp
->m_cil_workqueue
);
515 destroy_workqueue(mp
->m_unwritten_workqueue
);
516 destroy_workqueue(mp
->m_buf_workqueue
);
520 xfs_flush_inodes_worker(
521 struct work_struct
*work
)
523 struct xfs_mount
*mp
= container_of(work
, struct xfs_mount
,
524 m_flush_inodes_work
);
525 struct super_block
*sb
= mp
->m_super
;
527 if (down_read_trylock(&sb
->s_umount
)) {
529 up_read(&sb
->s_umount
);
534 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
535 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
536 * for IO to complete so that we effectively throttle multiple callers to the
537 * rate at which IO is completing.
541 struct xfs_mount
*mp
)
544 * If flush_work() returns true then that means we waited for a flush
545 * which was already in progress. Don't bother running another scan.
547 if (flush_work(&mp
->m_flush_inodes_work
))
550 queue_work(mp
->m_sync_workqueue
, &mp
->m_flush_inodes_work
);
551 flush_work(&mp
->m_flush_inodes_work
);
554 /* Catch misguided souls that try to use this interface on XFS */
555 STATIC
struct inode
*
557 struct super_block
*sb
)
566 struct xfs_inode
*ip
,
569 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
570 struct xfs_bmbt_irec got
;
571 struct xfs_iext_cursor icur
;
573 if (!ifp
|| !xfs_iext_lookup_extent(ip
, ifp
, 0, &icur
, &got
))
576 if (isnullstartblock(got
.br_startblock
)) {
577 xfs_warn(ip
->i_mount
,
578 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
580 whichfork
== XFS_DATA_FORK
? "data" : "cow",
581 got
.br_startoff
, got
.br_blockcount
);
583 } while (xfs_iext_next_extent(ifp
, &icur
, &got
));
586 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
590 * Now that the generic code is guaranteed not to be accessing
591 * the linux inode, we can inactivate and reclaim the inode.
594 xfs_fs_destroy_inode(
597 struct xfs_inode
*ip
= XFS_I(inode
);
599 trace_xfs_destroy_inode(ip
);
601 ASSERT(!rwsem_is_locked(&inode
->i_rwsem
));
602 XFS_STATS_INC(ip
->i_mount
, vn_rele
);
603 XFS_STATS_INC(ip
->i_mount
, vn_remove
);
607 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
) && ip
->i_delayed_blks
) {
608 xfs_check_delalloc(ip
, XFS_DATA_FORK
);
609 xfs_check_delalloc(ip
, XFS_COW_FORK
);
613 XFS_STATS_INC(ip
->i_mount
, vn_reclaim
);
616 * We should never get here with one of the reclaim flags already set.
618 ASSERT_ALWAYS(!xfs_iflags_test(ip
, XFS_IRECLAIMABLE
));
619 ASSERT_ALWAYS(!xfs_iflags_test(ip
, XFS_IRECLAIM
));
622 * We always use background reclaim here because even if the
623 * inode is clean, it still may be under IO and hence we have
624 * to take the flush lock. The background reclaim path handles
625 * this more efficiently than we can here, so simply let background
626 * reclaim tear down all inodes.
628 xfs_inode_set_reclaim_tag(ip
);
636 struct xfs_inode
*ip
= XFS_I(inode
);
637 struct xfs_mount
*mp
= ip
->i_mount
;
638 struct xfs_trans
*tp
;
640 if (!(inode
->i_sb
->s_flags
& SB_LAZYTIME
))
642 if (flag
!= I_DIRTY_SYNC
|| !(inode
->i_state
& I_DIRTY_TIME
))
645 if (xfs_trans_alloc(mp
, &M_RES(mp
)->tr_fsyncts
, 0, 0, 0, &tp
))
647 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
648 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
649 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_TIMESTAMP
);
650 xfs_trans_commit(tp
);
654 * Slab object creation initialisation for the XFS inode.
655 * This covers only the idempotent fields in the XFS inode;
656 * all other fields need to be initialised on allocation
657 * from the slab. This avoids the need to repeatedly initialise
658 * fields in the xfs inode that left in the initialise state
659 * when freeing the inode.
662 xfs_fs_inode_init_once(
665 struct xfs_inode
*ip
= inode
;
667 memset(ip
, 0, sizeof(struct xfs_inode
));
670 inode_init_once(VFS_I(ip
));
673 atomic_set(&ip
->i_pincount
, 0);
674 spin_lock_init(&ip
->i_flags_lock
);
676 mrlock_init(&ip
->i_mmaplock
, MRLOCK_ALLOW_EQUAL_PRI
|MRLOCK_BARRIER
,
677 "xfsino", ip
->i_ino
);
678 mrlock_init(&ip
->i_lock
, MRLOCK_ALLOW_EQUAL_PRI
|MRLOCK_BARRIER
,
679 "xfsino", ip
->i_ino
);
683 * We do an unlocked check for XFS_IDONTCACHE here because we are already
684 * serialised against cache hits here via the inode->i_lock and igrab() in
685 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
686 * racing with us, and it avoids needing to grab a spinlock here for every inode
687 * we drop the final reference on.
693 struct xfs_inode
*ip
= XFS_I(inode
);
696 * If this unlinked inode is in the middle of recovery, don't
697 * drop the inode just yet; log recovery will take care of
698 * that. See the comment for this inode flag.
700 if (ip
->i_flags
& XFS_IRECOVERY
) {
701 ASSERT(ip
->i_mount
->m_log
->l_flags
& XLOG_RECOVERY_NEEDED
);
705 return generic_drop_inode(inode
) || (ip
->i_flags
& XFS_IDONTCACHE
);
710 struct xfs_mount
*mp
)
713 kfree(mp
->m_logname
);
719 struct super_block
*sb
,
722 struct xfs_mount
*mp
= XFS_M(sb
);
725 * Doing anything during the async pass would be counterproductive.
730 xfs_log_force(mp
, XFS_LOG_SYNC
);
733 * The disk must be active because we're syncing.
734 * We schedule log work now (now that the disk is
735 * active) instead of later (when it might not be).
737 flush_delayed_work(&mp
->m_log
->l_work
);
745 struct dentry
*dentry
,
746 struct kstatfs
*statp
)
748 struct xfs_mount
*mp
= XFS_M(dentry
->d_sb
);
749 xfs_sb_t
*sbp
= &mp
->m_sb
;
750 struct xfs_inode
*ip
= XFS_I(d_inode(dentry
));
751 uint64_t fakeinos
, id
;
758 statp
->f_type
= XFS_SUPER_MAGIC
;
759 statp
->f_namelen
= MAXNAMELEN
- 1;
761 id
= huge_encode_dev(mp
->m_ddev_targp
->bt_dev
);
762 statp
->f_fsid
.val
[0] = (u32
)id
;
763 statp
->f_fsid
.val
[1] = (u32
)(id
>> 32);
765 icount
= percpu_counter_sum(&mp
->m_icount
);
766 ifree
= percpu_counter_sum(&mp
->m_ifree
);
767 fdblocks
= percpu_counter_sum(&mp
->m_fdblocks
);
769 spin_lock(&mp
->m_sb_lock
);
770 statp
->f_bsize
= sbp
->sb_blocksize
;
771 lsize
= sbp
->sb_logstart
? sbp
->sb_logblocks
: 0;
772 statp
->f_blocks
= sbp
->sb_dblocks
- lsize
;
773 spin_unlock(&mp
->m_sb_lock
);
775 statp
->f_bfree
= fdblocks
- mp
->m_alloc_set_aside
;
776 statp
->f_bavail
= statp
->f_bfree
;
778 fakeinos
= XFS_FSB_TO_INO(mp
, statp
->f_bfree
);
779 statp
->f_files
= min(icount
+ fakeinos
, (uint64_t)XFS_MAXINUMBER
);
780 if (M_IGEO(mp
)->maxicount
)
781 statp
->f_files
= min_t(typeof(statp
->f_files
),
783 M_IGEO(mp
)->maxicount
);
785 /* If sb_icount overshot maxicount, report actual allocation */
786 statp
->f_files
= max_t(typeof(statp
->f_files
),
790 /* make sure statp->f_ffree does not underflow */
791 ffree
= statp
->f_files
- (icount
- ifree
);
792 statp
->f_ffree
= max_t(int64_t, ffree
, 0);
795 if ((ip
->i_d
.di_flags
& XFS_DIFLAG_PROJINHERIT
) &&
796 ((mp
->m_qflags
& (XFS_PQUOTA_ACCT
|XFS_PQUOTA_ENFD
))) ==
797 (XFS_PQUOTA_ACCT
|XFS_PQUOTA_ENFD
))
798 xfs_qm_statvfs(ip
, statp
);
800 if (XFS_IS_REALTIME_MOUNT(mp
) &&
801 (ip
->i_d
.di_flags
& (XFS_DIFLAG_RTINHERIT
| XFS_DIFLAG_REALTIME
))) {
802 statp
->f_blocks
= sbp
->sb_rblocks
;
803 statp
->f_bavail
= statp
->f_bfree
=
804 sbp
->sb_frextents
* sbp
->sb_rextsize
;
811 xfs_save_resvblks(struct xfs_mount
*mp
)
813 uint64_t resblks
= 0;
815 mp
->m_resblks_save
= mp
->m_resblks
;
816 xfs_reserve_blocks(mp
, &resblks
, NULL
);
820 xfs_restore_resvblks(struct xfs_mount
*mp
)
824 if (mp
->m_resblks_save
) {
825 resblks
= mp
->m_resblks_save
;
826 mp
->m_resblks_save
= 0;
828 resblks
= xfs_default_resblks(mp
);
830 xfs_reserve_blocks(mp
, &resblks
, NULL
);
834 * Trigger writeback of all the dirty metadata in the file system.
836 * This ensures that the metadata is written to their location on disk rather
837 * than just existing in transactions in the log. This means after a quiesce
838 * there is no log replay required to write the inodes to disk - this is the
839 * primary difference between a sync and a quiesce.
841 * Note: xfs_log_quiesce() stops background log work - the callers must ensure
842 * it is started again when appropriate.
846 struct xfs_mount
*mp
)
850 /* wait for all modifications to complete */
851 while (atomic_read(&mp
->m_active_trans
) > 0)
854 /* force the log to unpin objects from the now complete transactions */
855 xfs_log_force(mp
, XFS_LOG_SYNC
);
857 /* reclaim inodes to do any IO before the freeze completes */
858 xfs_reclaim_inodes(mp
, 0);
859 xfs_reclaim_inodes(mp
, SYNC_WAIT
);
861 /* Push the superblock and write an unmount record */
862 error
= xfs_log_sbcount(mp
);
864 xfs_warn(mp
, "xfs_attr_quiesce: failed to log sb changes. "
865 "Frozen image may not be consistent.");
867 * Just warn here till VFS can correctly support
868 * read-only remount without racing.
870 WARN_ON(atomic_read(&mp
->m_active_trans
) != 0);
876 * Second stage of a freeze. The data is already frozen so we only
877 * need to take care of the metadata. Once that's done sync the superblock
878 * to the log to dirty it in case of a crash while frozen. This ensures that we
879 * will recover the unlinked inode lists on the next mount.
883 struct super_block
*sb
)
885 struct xfs_mount
*mp
= XFS_M(sb
);
887 xfs_stop_block_reaping(mp
);
888 xfs_save_resvblks(mp
);
889 xfs_quiesce_attr(mp
);
890 return xfs_sync_sb(mp
, true);
895 struct super_block
*sb
)
897 struct xfs_mount
*mp
= XFS_M(sb
);
899 xfs_restore_resvblks(mp
);
900 xfs_log_work_queue(mp
);
901 xfs_start_block_reaping(mp
);
906 * This function fills in xfs_mount_t fields based on mount args.
907 * Note: the superblock _has_ now been read in.
911 struct xfs_mount
*mp
)
913 int ronly
= (mp
->m_flags
& XFS_MOUNT_RDONLY
);
915 /* Fail a mount where the logbuf is smaller than the log stripe */
916 if (xfs_sb_version_haslogv2(&mp
->m_sb
)) {
917 if (mp
->m_logbsize
<= 0 &&
918 mp
->m_sb
.sb_logsunit
> XLOG_BIG_RECORD_BSIZE
) {
919 mp
->m_logbsize
= mp
->m_sb
.sb_logsunit
;
920 } else if (mp
->m_logbsize
> 0 &&
921 mp
->m_logbsize
< mp
->m_sb
.sb_logsunit
) {
923 "logbuf size must be greater than or equal to log stripe size");
927 /* Fail a mount if the logbuf is larger than 32K */
928 if (mp
->m_logbsize
> XLOG_BIG_RECORD_BSIZE
) {
930 "logbuf size for version 1 logs must be 16K or 32K");
936 * V5 filesystems always use attr2 format for attributes.
938 if (xfs_sb_version_hascrc(&mp
->m_sb
) &&
939 (mp
->m_flags
& XFS_MOUNT_NOATTR2
)) {
940 xfs_warn(mp
, "Cannot mount a V5 filesystem as noattr2. "
941 "attr2 is always enabled for V5 filesystems.");
946 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
947 * told by noattr2 to turn it off
949 if (xfs_sb_version_hasattr2(&mp
->m_sb
) &&
950 !(mp
->m_flags
& XFS_MOUNT_NOATTR2
))
951 mp
->m_flags
|= XFS_MOUNT_ATTR2
;
954 * prohibit r/w mounts of read-only filesystems
956 if ((mp
->m_sb
.sb_flags
& XFS_SBF_READONLY
) && !ronly
) {
958 "cannot mount a read-only filesystem as read-write");
962 if ((mp
->m_qflags
& (XFS_GQUOTA_ACCT
| XFS_GQUOTA_ACTIVE
)) &&
963 (mp
->m_qflags
& (XFS_PQUOTA_ACCT
| XFS_PQUOTA_ACTIVE
)) &&
964 !xfs_sb_version_has_pquotino(&mp
->m_sb
)) {
966 "Super block does not support project and group quota together");
974 xfs_init_percpu_counters(
975 struct xfs_mount
*mp
)
979 error
= percpu_counter_init(&mp
->m_icount
, 0, GFP_KERNEL
);
983 error
= percpu_counter_init(&mp
->m_ifree
, 0, GFP_KERNEL
);
987 error
= percpu_counter_init(&mp
->m_fdblocks
, 0, GFP_KERNEL
);
991 error
= percpu_counter_init(&mp
->m_delalloc_blks
, 0, GFP_KERNEL
);
998 percpu_counter_destroy(&mp
->m_fdblocks
);
1000 percpu_counter_destroy(&mp
->m_ifree
);
1002 percpu_counter_destroy(&mp
->m_icount
);
1007 xfs_reinit_percpu_counters(
1008 struct xfs_mount
*mp
)
1010 percpu_counter_set(&mp
->m_icount
, mp
->m_sb
.sb_icount
);
1011 percpu_counter_set(&mp
->m_ifree
, mp
->m_sb
.sb_ifree
);
1012 percpu_counter_set(&mp
->m_fdblocks
, mp
->m_sb
.sb_fdblocks
);
1016 xfs_destroy_percpu_counters(
1017 struct xfs_mount
*mp
)
1019 percpu_counter_destroy(&mp
->m_icount
);
1020 percpu_counter_destroy(&mp
->m_ifree
);
1021 percpu_counter_destroy(&mp
->m_fdblocks
);
1022 ASSERT(XFS_FORCED_SHUTDOWN(mp
) ||
1023 percpu_counter_sum(&mp
->m_delalloc_blks
) == 0);
1024 percpu_counter_destroy(&mp
->m_delalloc_blks
);
1029 struct super_block
*sb
)
1031 struct xfs_mount
*mp
= XFS_M(sb
);
1033 /* if ->fill_super failed, we have no mount to tear down */
1037 xfs_notice(mp
, "Unmounting Filesystem");
1038 xfs_filestream_unmount(mp
);
1042 free_percpu(mp
->m_stats
.xs_stats
);
1043 xfs_destroy_percpu_counters(mp
);
1044 xfs_destroy_mount_workqueues(mp
);
1045 xfs_close_devices(mp
);
1047 sb
->s_fs_info
= NULL
;
1052 xfs_fs_nr_cached_objects(
1053 struct super_block
*sb
,
1054 struct shrink_control
*sc
)
1056 /* Paranoia: catch incorrect calls during mount setup or teardown */
1057 if (WARN_ON_ONCE(!sb
->s_fs_info
))
1059 return xfs_reclaim_inodes_count(XFS_M(sb
));
1063 xfs_fs_free_cached_objects(
1064 struct super_block
*sb
,
1065 struct shrink_control
*sc
)
1067 return xfs_reclaim_inodes_nr(XFS_M(sb
), sc
->nr_to_scan
);
1070 static const struct super_operations xfs_super_operations
= {
1071 .alloc_inode
= xfs_fs_alloc_inode
,
1072 .destroy_inode
= xfs_fs_destroy_inode
,
1073 .dirty_inode
= xfs_fs_dirty_inode
,
1074 .drop_inode
= xfs_fs_drop_inode
,
1075 .put_super
= xfs_fs_put_super
,
1076 .sync_fs
= xfs_fs_sync_fs
,
1077 .freeze_fs
= xfs_fs_freeze
,
1078 .unfreeze_fs
= xfs_fs_unfreeze
,
1079 .statfs
= xfs_fs_statfs
,
1080 .show_options
= xfs_fs_show_options
,
1081 .nr_cached_objects
= xfs_fs_nr_cached_objects
,
1082 .free_cached_objects
= xfs_fs_free_cached_objects
,
1091 int last
, shift_left_factor
= 0, _res
;
1095 value
= kstrdup(s
, GFP_KERNEL
);
1099 last
= strlen(value
) - 1;
1100 if (value
[last
] == 'K' || value
[last
] == 'k') {
1101 shift_left_factor
= 10;
1104 if (value
[last
] == 'M' || value
[last
] == 'm') {
1105 shift_left_factor
= 20;
1108 if (value
[last
] == 'G' || value
[last
] == 'g') {
1109 shift_left_factor
= 30;
1113 if (kstrtoint(value
, base
, &_res
))
1116 *res
= _res
<< shift_left_factor
;
1121 * Set mount state from a mount option.
1123 * NOTE: mp->m_super is NULL here!
1127 struct fs_context
*fc
,
1128 struct fs_parameter
*param
)
1130 struct xfs_mount
*mp
= fc
->s_fs_info
;
1131 struct fs_parse_result result
;
1135 opt
= fs_parse(fc
, xfs_fs_parameters
, param
, &result
);
1141 mp
->m_logbufs
= result
.uint_32
;
1144 if (suffix_kstrtoint(param
->string
, 10, &mp
->m_logbsize
))
1148 kfree(mp
->m_logname
);
1149 mp
->m_logname
= kstrdup(param
->string
, GFP_KERNEL
);
1154 kfree(mp
->m_rtname
);
1155 mp
->m_rtname
= kstrdup(param
->string
, GFP_KERNEL
);
1160 if (suffix_kstrtoint(param
->string
, 10, &size
))
1162 mp
->m_allocsize_log
= ffs(size
) - 1;
1163 mp
->m_flags
|= XFS_MOUNT_ALLOCSIZE
;
1167 mp
->m_flags
|= XFS_MOUNT_GRPID
;
1170 case Opt_sysvgroups
:
1171 mp
->m_flags
&= ~XFS_MOUNT_GRPID
;
1174 mp
->m_flags
|= XFS_MOUNT_WSYNC
;
1176 case Opt_norecovery
:
1177 mp
->m_flags
|= XFS_MOUNT_NORECOVERY
;
1180 mp
->m_flags
|= XFS_MOUNT_NOALIGN
;
1183 mp
->m_flags
|= XFS_MOUNT_SWALLOC
;
1186 mp
->m_dalign
= result
.uint_32
;
1189 mp
->m_swidth
= result
.uint_32
;
1192 mp
->m_flags
|= XFS_MOUNT_SMALL_INUMS
;
1195 mp
->m_flags
&= ~XFS_MOUNT_SMALL_INUMS
;
1198 mp
->m_flags
|= XFS_MOUNT_NOUUID
;
1201 mp
->m_flags
|= XFS_MOUNT_IKEEP
;
1204 mp
->m_flags
&= ~XFS_MOUNT_IKEEP
;
1207 mp
->m_flags
|= XFS_MOUNT_LARGEIO
;
1210 mp
->m_flags
&= ~XFS_MOUNT_LARGEIO
;
1213 mp
->m_flags
|= XFS_MOUNT_ATTR2
;
1216 mp
->m_flags
&= ~XFS_MOUNT_ATTR2
;
1217 mp
->m_flags
|= XFS_MOUNT_NOATTR2
;
1219 case Opt_filestreams
:
1220 mp
->m_flags
|= XFS_MOUNT_FILESTREAMS
;
1223 mp
->m_qflags
&= ~XFS_ALL_QUOTA_ACCT
;
1224 mp
->m_qflags
&= ~XFS_ALL_QUOTA_ENFD
;
1225 mp
->m_qflags
&= ~XFS_ALL_QUOTA_ACTIVE
;
1230 mp
->m_qflags
|= (XFS_UQUOTA_ACCT
| XFS_UQUOTA_ACTIVE
|
1233 case Opt_qnoenforce
:
1234 case Opt_uqnoenforce
:
1235 mp
->m_qflags
|= (XFS_UQUOTA_ACCT
| XFS_UQUOTA_ACTIVE
);
1236 mp
->m_qflags
&= ~XFS_UQUOTA_ENFD
;
1240 mp
->m_qflags
|= (XFS_PQUOTA_ACCT
| XFS_PQUOTA_ACTIVE
|
1243 case Opt_pqnoenforce
:
1244 mp
->m_qflags
|= (XFS_PQUOTA_ACCT
| XFS_PQUOTA_ACTIVE
);
1245 mp
->m_qflags
&= ~XFS_PQUOTA_ENFD
;
1249 mp
->m_qflags
|= (XFS_GQUOTA_ACCT
| XFS_GQUOTA_ACTIVE
|
1252 case Opt_gqnoenforce
:
1253 mp
->m_qflags
|= (XFS_GQUOTA_ACCT
| XFS_GQUOTA_ACTIVE
);
1254 mp
->m_qflags
&= ~XFS_GQUOTA_ENFD
;
1257 mp
->m_flags
|= XFS_MOUNT_DISCARD
;
1260 mp
->m_flags
&= ~XFS_MOUNT_DISCARD
;
1262 #ifdef CONFIG_FS_DAX
1264 mp
->m_flags
|= XFS_MOUNT_DAX
;
1268 xfs_warn(mp
, "unknown mount option [%s].", param
->key
);
1276 xfs_fc_validate_params(
1277 struct xfs_mount
*mp
)
1280 * no recovery flag requires a read-only mount
1282 if ((mp
->m_flags
& XFS_MOUNT_NORECOVERY
) &&
1283 !(mp
->m_flags
& XFS_MOUNT_RDONLY
)) {
1284 xfs_warn(mp
, "no-recovery mounts must be read-only.");
1288 if ((mp
->m_flags
& XFS_MOUNT_NOALIGN
) &&
1289 (mp
->m_dalign
|| mp
->m_swidth
)) {
1291 "sunit and swidth options incompatible with the noalign option");
1295 if (!IS_ENABLED(CONFIG_XFS_QUOTA
) && mp
->m_qflags
!= 0) {
1296 xfs_warn(mp
, "quota support not available in this kernel.");
1300 if ((mp
->m_dalign
&& !mp
->m_swidth
) ||
1301 (!mp
->m_dalign
&& mp
->m_swidth
)) {
1302 xfs_warn(mp
, "sunit and swidth must be specified together");
1306 if (mp
->m_dalign
&& (mp
->m_swidth
% mp
->m_dalign
!= 0)) {
1308 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1309 mp
->m_swidth
, mp
->m_dalign
);
1313 if (mp
->m_logbufs
!= -1 &&
1314 mp
->m_logbufs
!= 0 &&
1315 (mp
->m_logbufs
< XLOG_MIN_ICLOGS
||
1316 mp
->m_logbufs
> XLOG_MAX_ICLOGS
)) {
1317 xfs_warn(mp
, "invalid logbufs value: %d [not %d-%d]",
1318 mp
->m_logbufs
, XLOG_MIN_ICLOGS
, XLOG_MAX_ICLOGS
);
1322 if (mp
->m_logbsize
!= -1 &&
1323 mp
->m_logbsize
!= 0 &&
1324 (mp
->m_logbsize
< XLOG_MIN_RECORD_BSIZE
||
1325 mp
->m_logbsize
> XLOG_MAX_RECORD_BSIZE
||
1326 !is_power_of_2(mp
->m_logbsize
))) {
1328 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1333 if ((mp
->m_flags
& XFS_MOUNT_ALLOCSIZE
) &&
1334 (mp
->m_allocsize_log
> XFS_MAX_IO_LOG
||
1335 mp
->m_allocsize_log
< XFS_MIN_IO_LOG
)) {
1336 xfs_warn(mp
, "invalid log iosize: %d [not %d-%d]",
1337 mp
->m_allocsize_log
, XFS_MIN_IO_LOG
, XFS_MAX_IO_LOG
);
1346 struct super_block
*sb
,
1347 struct fs_context
*fc
)
1349 struct xfs_mount
*mp
= sb
->s_fs_info
;
1351 int flags
= 0, error
;
1355 error
= xfs_fc_validate_params(mp
);
1357 goto out_free_names
;
1359 sb_min_blocksize(sb
, BBSIZE
);
1360 sb
->s_xattr
= xfs_xattr_handlers
;
1361 sb
->s_export_op
= &xfs_export_operations
;
1362 #ifdef CONFIG_XFS_QUOTA
1363 sb
->s_qcop
= &xfs_quotactl_operations
;
1364 sb
->s_quota_types
= QTYPE_MASK_USR
| QTYPE_MASK_GRP
| QTYPE_MASK_PRJ
;
1366 sb
->s_op
= &xfs_super_operations
;
1369 * Delay mount work if the debug hook is set. This is debug
1370 * instrumention to coordinate simulation of xfs mount failures with
1371 * VFS superblock operations
1373 if (xfs_globals
.mount_delay
) {
1374 xfs_notice(mp
, "Delaying mount for %d seconds.",
1375 xfs_globals
.mount_delay
);
1376 msleep(xfs_globals
.mount_delay
* 1000);
1379 if (fc
->sb_flags
& SB_SILENT
)
1380 flags
|= XFS_MFSI_QUIET
;
1382 error
= xfs_open_devices(mp
);
1384 goto out_free_names
;
1386 error
= xfs_init_mount_workqueues(mp
);
1388 goto out_close_devices
;
1390 error
= xfs_init_percpu_counters(mp
);
1392 goto out_destroy_workqueues
;
1394 /* Allocate stats memory before we do operations that might use it */
1395 mp
->m_stats
.xs_stats
= alloc_percpu(struct xfsstats
);
1396 if (!mp
->m_stats
.xs_stats
) {
1398 goto out_destroy_counters
;
1401 error
= xfs_readsb(mp
, flags
);
1403 goto out_free_stats
;
1405 error
= xfs_finish_flags(mp
);
1409 error
= xfs_setup_devices(mp
);
1414 * XFS block mappings use 54 bits to store the logical block offset.
1415 * This should suffice to handle the maximum file size that the VFS
1416 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1417 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1418 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1419 * to check this assertion.
1421 * Avoid integer overflow by comparing the maximum bmbt offset to the
1422 * maximum pagecache offset in units of fs blocks.
1424 if (XFS_B_TO_FSBT(mp
, MAX_LFS_FILESIZE
) > XFS_MAX_FILEOFF
) {
1426 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1427 XFS_B_TO_FSBT(mp
, MAX_LFS_FILESIZE
),
1433 error
= xfs_filestream_mount(mp
);
1438 * we must configure the block size in the superblock before we run the
1439 * full mount process as the mount process can lookup and cache inodes.
1441 sb
->s_magic
= XFS_SUPER_MAGIC
;
1442 sb
->s_blocksize
= mp
->m_sb
.sb_blocksize
;
1443 sb
->s_blocksize_bits
= ffs(sb
->s_blocksize
) - 1;
1444 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1445 sb
->s_max_links
= XFS_MAXLINK
;
1446 sb
->s_time_gran
= 1;
1447 sb
->s_time_min
= S32_MIN
;
1448 sb
->s_time_max
= S32_MAX
;
1449 sb
->s_iflags
|= SB_I_CGROUPWB
;
1451 set_posix_acl_flag(sb
);
1453 /* version 5 superblocks support inode version counters. */
1454 if (XFS_SB_VERSION_NUM(&mp
->m_sb
) == XFS_SB_VERSION_5
)
1455 sb
->s_flags
|= SB_I_VERSION
;
1457 if (mp
->m_flags
& XFS_MOUNT_DAX
) {
1458 bool rtdev_is_dax
= false, datadev_is_dax
;
1461 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1463 datadev_is_dax
= bdev_dax_supported(mp
->m_ddev_targp
->bt_bdev
,
1465 if (mp
->m_rtdev_targp
)
1466 rtdev_is_dax
= bdev_dax_supported(
1467 mp
->m_rtdev_targp
->bt_bdev
, sb
->s_blocksize
);
1468 if (!rtdev_is_dax
&& !datadev_is_dax
) {
1470 "DAX unsupported by block device. Turning off DAX.");
1471 mp
->m_flags
&= ~XFS_MOUNT_DAX
;
1473 if (xfs_sb_version_hasreflink(&mp
->m_sb
)) {
1475 "DAX and reflink cannot be used together!");
1477 goto out_filestream_unmount
;
1481 if (mp
->m_flags
& XFS_MOUNT_DISCARD
) {
1482 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1484 if (!blk_queue_discard(q
)) {
1485 xfs_warn(mp
, "mounting with \"discard\" option, but "
1486 "the device does not support discard");
1487 mp
->m_flags
&= ~XFS_MOUNT_DISCARD
;
1491 if (xfs_sb_version_hasreflink(&mp
->m_sb
)) {
1492 if (mp
->m_sb
.sb_rblocks
) {
1494 "reflink not compatible with realtime device!");
1496 goto out_filestream_unmount
;
1499 if (xfs_globals
.always_cow
) {
1500 xfs_info(mp
, "using DEBUG-only always_cow mode.");
1501 mp
->m_always_cow
= true;
1505 if (xfs_sb_version_hasrmapbt(&mp
->m_sb
) && mp
->m_sb
.sb_rblocks
) {
1507 "reverse mapping btree not compatible with realtime device!");
1509 goto out_filestream_unmount
;
1512 error
= xfs_mountfs(mp
);
1514 goto out_filestream_unmount
;
1516 root
= igrab(VFS_I(mp
->m_rootip
));
1521 sb
->s_root
= d_make_root(root
);
1529 out_filestream_unmount
:
1530 xfs_filestream_unmount(mp
);
1534 free_percpu(mp
->m_stats
.xs_stats
);
1535 out_destroy_counters
:
1536 xfs_destroy_percpu_counters(mp
);
1537 out_destroy_workqueues
:
1538 xfs_destroy_mount_workqueues(mp
);
1540 xfs_close_devices(mp
);
1542 sb
->s_fs_info
= NULL
;
1547 xfs_filestream_unmount(mp
);
1554 struct fs_context
*fc
)
1556 return get_tree_bdev(fc
, xfs_fc_fill_super
);
1561 struct xfs_mount
*mp
)
1563 struct xfs_sb
*sbp
= &mp
->m_sb
;
1566 if (mp
->m_flags
& XFS_MOUNT_NORECOVERY
) {
1568 "ro->rw transition prohibited on norecovery mount");
1572 if (XFS_SB_VERSION_NUM(sbp
) == XFS_SB_VERSION_5
&&
1573 xfs_sb_has_ro_compat_feature(sbp
, XFS_SB_FEAT_RO_COMPAT_UNKNOWN
)) {
1575 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1576 (sbp
->sb_features_ro_compat
&
1577 XFS_SB_FEAT_RO_COMPAT_UNKNOWN
));
1581 mp
->m_flags
&= ~XFS_MOUNT_RDONLY
;
1584 * If this is the first remount to writeable state we might have some
1585 * superblock changes to update.
1587 if (mp
->m_update_sb
) {
1588 error
= xfs_sync_sb(mp
, false);
1590 xfs_warn(mp
, "failed to write sb changes");
1593 mp
->m_update_sb
= false;
1597 * Fill out the reserve pool if it is empty. Use the stashed value if
1598 * it is non-zero, otherwise go with the default.
1600 xfs_restore_resvblks(mp
);
1601 xfs_log_work_queue(mp
);
1603 /* Recover any CoW blocks that never got remapped. */
1604 error
= xfs_reflink_recover_cow(mp
);
1607 "Error %d recovering leftover CoW allocations.", error
);
1608 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1611 xfs_start_block_reaping(mp
);
1613 /* Create the per-AG metadata reservation pool .*/
1614 error
= xfs_fs_reserve_ag_blocks(mp
);
1615 if (error
&& error
!= -ENOSPC
)
1623 struct xfs_mount
*mp
)
1628 * Cancel background eofb scanning so it cannot race with the final
1629 * log force+buftarg wait and deadlock the remount.
1631 xfs_stop_block_reaping(mp
);
1633 /* Get rid of any leftover CoW reservations... */
1634 error
= xfs_icache_free_cowblocks(mp
, NULL
);
1636 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1640 /* Free the per-AG metadata reservation pool. */
1641 error
= xfs_fs_unreserve_ag_blocks(mp
);
1643 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1648 * Before we sync the metadata, we need to free up the reserve block
1649 * pool so that the used block count in the superblock on disk is
1650 * correct at the end of the remount. Stash the current* reserve pool
1651 * size so that if we get remounted rw, we can return it to the same
1654 xfs_save_resvblks(mp
);
1656 xfs_quiesce_attr(mp
);
1657 mp
->m_flags
|= XFS_MOUNT_RDONLY
;
1663 * Logically we would return an error here to prevent users from believing
1664 * they might have changed mount options using remount which can't be changed.
1666 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1667 * arguments in some cases so we can't blindly reject options, but have to
1668 * check for each specified option if it actually differs from the currently
1669 * set option and only reject it if that's the case.
1671 * Until that is implemented we return success for every remount request, and
1672 * silently ignore all options that we can't actually change.
1676 struct fs_context
*fc
)
1678 struct xfs_mount
*mp
= XFS_M(fc
->root
->d_sb
);
1679 struct xfs_mount
*new_mp
= fc
->s_fs_info
;
1680 xfs_sb_t
*sbp
= &mp
->m_sb
;
1681 int flags
= fc
->sb_flags
;
1684 error
= xfs_fc_validate_params(new_mp
);
1688 sync_filesystem(mp
->m_super
);
1690 /* inode32 -> inode64 */
1691 if ((mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
) &&
1692 !(new_mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
)) {
1693 mp
->m_flags
&= ~XFS_MOUNT_SMALL_INUMS
;
1694 mp
->m_maxagi
= xfs_set_inode_alloc(mp
, sbp
->sb_agcount
);
1697 /* inode64 -> inode32 */
1698 if (!(mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
) &&
1699 (new_mp
->m_flags
& XFS_MOUNT_SMALL_INUMS
)) {
1700 mp
->m_flags
|= XFS_MOUNT_SMALL_INUMS
;
1701 mp
->m_maxagi
= xfs_set_inode_alloc(mp
, sbp
->sb_agcount
);
1705 if ((mp
->m_flags
& XFS_MOUNT_RDONLY
) && !(flags
& SB_RDONLY
)) {
1706 error
= xfs_remount_rw(mp
);
1712 if (!(mp
->m_flags
& XFS_MOUNT_RDONLY
) && (flags
& SB_RDONLY
)) {
1713 error
= xfs_remount_ro(mp
);
1721 static void xfs_fc_free(
1722 struct fs_context
*fc
)
1724 struct xfs_mount
*mp
= fc
->s_fs_info
;
1727 * mp is stored in the fs_context when it is initialized.
1728 * mp is transferred to the superblock on a successful mount,
1729 * but if an error occurs before the transfer we have to free
1736 static const struct fs_context_operations xfs_context_ops
= {
1737 .parse_param
= xfs_fc_parse_param
,
1738 .get_tree
= xfs_fc_get_tree
,
1739 .reconfigure
= xfs_fc_reconfigure
,
1740 .free
= xfs_fc_free
,
1743 static int xfs_init_fs_context(
1744 struct fs_context
*fc
)
1746 struct xfs_mount
*mp
;
1748 mp
= kmem_alloc(sizeof(struct xfs_mount
), KM_ZERO
);
1752 spin_lock_init(&mp
->m_sb_lock
);
1753 spin_lock_init(&mp
->m_agirotor_lock
);
1754 INIT_RADIX_TREE(&mp
->m_perag_tree
, GFP_ATOMIC
);
1755 spin_lock_init(&mp
->m_perag_lock
);
1756 mutex_init(&mp
->m_growlock
);
1757 atomic_set(&mp
->m_active_trans
, 0);
1758 INIT_WORK(&mp
->m_flush_inodes_work
, xfs_flush_inodes_worker
);
1759 INIT_DELAYED_WORK(&mp
->m_reclaim_work
, xfs_reclaim_worker
);
1760 INIT_DELAYED_WORK(&mp
->m_eofblocks_work
, xfs_eofblocks_worker
);
1761 INIT_DELAYED_WORK(&mp
->m_cowblocks_work
, xfs_cowblocks_worker
);
1762 mp
->m_kobj
.kobject
.kset
= xfs_kset
;
1764 * We don't create the finobt per-ag space reservation until after log
1765 * recovery, so we must set this to true so that an ifree transaction
1766 * started during log recovery will not depend on space reservations
1767 * for finobt expansion.
1769 mp
->m_finobt_nores
= true;
1772 * These can be overridden by the mount option parsing.
1775 mp
->m_logbsize
= -1;
1776 mp
->m_allocsize_log
= 16; /* 64k */
1779 * Copy binary VFS mount flags we are interested in.
1781 if (fc
->sb_flags
& SB_RDONLY
)
1782 mp
->m_flags
|= XFS_MOUNT_RDONLY
;
1783 if (fc
->sb_flags
& SB_DIRSYNC
)
1784 mp
->m_flags
|= XFS_MOUNT_DIRSYNC
;
1785 if (fc
->sb_flags
& SB_SYNCHRONOUS
)
1786 mp
->m_flags
|= XFS_MOUNT_WSYNC
;
1789 fc
->ops
= &xfs_context_ops
;
1794 static struct file_system_type xfs_fs_type
= {
1795 .owner
= THIS_MODULE
,
1797 .init_fs_context
= xfs_init_fs_context
,
1798 .parameters
= xfs_fs_parameters
,
1799 .kill_sb
= kill_block_super
,
1800 .fs_flags
= FS_REQUIRES_DEV
,
1802 MODULE_ALIAS_FS("xfs");
1805 xfs_init_zones(void)
1807 xfs_log_ticket_zone
= kmem_cache_create("xfs_log_ticket",
1808 sizeof(struct xlog_ticket
),
1810 if (!xfs_log_ticket_zone
)
1813 xfs_bmap_free_item_zone
= kmem_cache_create("xfs_bmap_free_item",
1814 sizeof(struct xfs_extent_free_item
),
1816 if (!xfs_bmap_free_item_zone
)
1817 goto out_destroy_log_ticket_zone
;
1819 xfs_btree_cur_zone
= kmem_cache_create("xfs_btree_cur",
1820 sizeof(struct xfs_btree_cur
),
1822 if (!xfs_btree_cur_zone
)
1823 goto out_destroy_bmap_free_item_zone
;
1825 xfs_da_state_zone
= kmem_cache_create("xfs_da_state",
1826 sizeof(struct xfs_da_state
),
1828 if (!xfs_da_state_zone
)
1829 goto out_destroy_btree_cur_zone
;
1831 xfs_ifork_zone
= kmem_cache_create("xfs_ifork",
1832 sizeof(struct xfs_ifork
),
1834 if (!xfs_ifork_zone
)
1835 goto out_destroy_da_state_zone
;
1837 xfs_trans_zone
= kmem_cache_create("xf_trans",
1838 sizeof(struct xfs_trans
),
1840 if (!xfs_trans_zone
)
1841 goto out_destroy_ifork_zone
;
1845 * The size of the zone allocated buf log item is the maximum
1846 * size possible under XFS. This wastes a little bit of memory,
1847 * but it is much faster.
1849 xfs_buf_item_zone
= kmem_cache_create("xfs_buf_item",
1850 sizeof(struct xfs_buf_log_item
),
1852 if (!xfs_buf_item_zone
)
1853 goto out_destroy_trans_zone
;
1855 xfs_efd_zone
= kmem_cache_create("xfs_efd_item",
1856 (sizeof(struct xfs_efd_log_item
) +
1857 (XFS_EFD_MAX_FAST_EXTENTS
- 1) *
1858 sizeof(struct xfs_extent
)),
1861 goto out_destroy_buf_item_zone
;
1863 xfs_efi_zone
= kmem_cache_create("xfs_efi_item",
1864 (sizeof(struct xfs_efi_log_item
) +
1865 (XFS_EFI_MAX_FAST_EXTENTS
- 1) *
1866 sizeof(struct xfs_extent
)),
1869 goto out_destroy_efd_zone
;
1871 xfs_inode_zone
= kmem_cache_create("xfs_inode",
1872 sizeof(struct xfs_inode
), 0,
1873 (SLAB_HWCACHE_ALIGN
|
1874 SLAB_RECLAIM_ACCOUNT
|
1875 SLAB_MEM_SPREAD
| SLAB_ACCOUNT
),
1876 xfs_fs_inode_init_once
);
1877 if (!xfs_inode_zone
)
1878 goto out_destroy_efi_zone
;
1880 xfs_ili_zone
= kmem_cache_create("xfs_ili",
1881 sizeof(struct xfs_inode_log_item
), 0,
1882 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
1885 goto out_destroy_inode_zone
;
1887 xfs_icreate_zone
= kmem_cache_create("xfs_icr",
1888 sizeof(struct xfs_icreate_item
),
1890 if (!xfs_icreate_zone
)
1891 goto out_destroy_ili_zone
;
1893 xfs_rud_zone
= kmem_cache_create("xfs_rud_item",
1894 sizeof(struct xfs_rud_log_item
),
1897 goto out_destroy_icreate_zone
;
1899 xfs_rui_zone
= kmem_cache_create("xfs_rui_item",
1900 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS
),
1903 goto out_destroy_rud_zone
;
1905 xfs_cud_zone
= kmem_cache_create("xfs_cud_item",
1906 sizeof(struct xfs_cud_log_item
),
1909 goto out_destroy_rui_zone
;
1911 xfs_cui_zone
= kmem_cache_create("xfs_cui_item",
1912 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS
),
1915 goto out_destroy_cud_zone
;
1917 xfs_bud_zone
= kmem_cache_create("xfs_bud_item",
1918 sizeof(struct xfs_bud_log_item
),
1921 goto out_destroy_cui_zone
;
1923 xfs_bui_zone
= kmem_cache_create("xfs_bui_item",
1924 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS
),
1927 goto out_destroy_bud_zone
;
1931 out_destroy_bud_zone
:
1932 kmem_cache_destroy(xfs_bud_zone
);
1933 out_destroy_cui_zone
:
1934 kmem_cache_destroy(xfs_cui_zone
);
1935 out_destroy_cud_zone
:
1936 kmem_cache_destroy(xfs_cud_zone
);
1937 out_destroy_rui_zone
:
1938 kmem_cache_destroy(xfs_rui_zone
);
1939 out_destroy_rud_zone
:
1940 kmem_cache_destroy(xfs_rud_zone
);
1941 out_destroy_icreate_zone
:
1942 kmem_cache_destroy(xfs_icreate_zone
);
1943 out_destroy_ili_zone
:
1944 kmem_cache_destroy(xfs_ili_zone
);
1945 out_destroy_inode_zone
:
1946 kmem_cache_destroy(xfs_inode_zone
);
1947 out_destroy_efi_zone
:
1948 kmem_cache_destroy(xfs_efi_zone
);
1949 out_destroy_efd_zone
:
1950 kmem_cache_destroy(xfs_efd_zone
);
1951 out_destroy_buf_item_zone
:
1952 kmem_cache_destroy(xfs_buf_item_zone
);
1953 out_destroy_trans_zone
:
1954 kmem_cache_destroy(xfs_trans_zone
);
1955 out_destroy_ifork_zone
:
1956 kmem_cache_destroy(xfs_ifork_zone
);
1957 out_destroy_da_state_zone
:
1958 kmem_cache_destroy(xfs_da_state_zone
);
1959 out_destroy_btree_cur_zone
:
1960 kmem_cache_destroy(xfs_btree_cur_zone
);
1961 out_destroy_bmap_free_item_zone
:
1962 kmem_cache_destroy(xfs_bmap_free_item_zone
);
1963 out_destroy_log_ticket_zone
:
1964 kmem_cache_destroy(xfs_log_ticket_zone
);
1970 xfs_destroy_zones(void)
1973 * Make sure all delayed rcu free are flushed before we
1977 kmem_cache_destroy(xfs_bui_zone
);
1978 kmem_cache_destroy(xfs_bud_zone
);
1979 kmem_cache_destroy(xfs_cui_zone
);
1980 kmem_cache_destroy(xfs_cud_zone
);
1981 kmem_cache_destroy(xfs_rui_zone
);
1982 kmem_cache_destroy(xfs_rud_zone
);
1983 kmem_cache_destroy(xfs_icreate_zone
);
1984 kmem_cache_destroy(xfs_ili_zone
);
1985 kmem_cache_destroy(xfs_inode_zone
);
1986 kmem_cache_destroy(xfs_efi_zone
);
1987 kmem_cache_destroy(xfs_efd_zone
);
1988 kmem_cache_destroy(xfs_buf_item_zone
);
1989 kmem_cache_destroy(xfs_trans_zone
);
1990 kmem_cache_destroy(xfs_ifork_zone
);
1991 kmem_cache_destroy(xfs_da_state_zone
);
1992 kmem_cache_destroy(xfs_btree_cur_zone
);
1993 kmem_cache_destroy(xfs_bmap_free_item_zone
);
1994 kmem_cache_destroy(xfs_log_ticket_zone
);
1998 xfs_init_workqueues(void)
2001 * The allocation workqueue can be used in memory reclaim situations
2002 * (writepage path), and parallelism is only limited by the number of
2003 * AGs in all the filesystems mounted. Hence use the default large
2004 * max_active value for this workqueue.
2006 xfs_alloc_wq
= alloc_workqueue("xfsalloc",
2007 WQ_MEM_RECLAIM
|WQ_FREEZABLE
, 0);
2011 xfs_discard_wq
= alloc_workqueue("xfsdiscard", WQ_UNBOUND
, 0);
2012 if (!xfs_discard_wq
)
2013 goto out_free_alloc_wq
;
2017 destroy_workqueue(xfs_alloc_wq
);
2022 xfs_destroy_workqueues(void)
2024 destroy_workqueue(xfs_discard_wq
);
2025 destroy_workqueue(xfs_alloc_wq
);
2033 xfs_check_ondisk_structs();
2035 printk(KERN_INFO XFS_VERSION_STRING
" with "
2036 XFS_BUILD_OPTIONS
" enabled\n");
2040 error
= xfs_init_zones();
2044 error
= xfs_init_workqueues();
2046 goto out_destroy_zones
;
2048 error
= xfs_mru_cache_init();
2050 goto out_destroy_wq
;
2052 error
= xfs_buf_init();
2054 goto out_mru_cache_uninit
;
2056 error
= xfs_init_procfs();
2058 goto out_buf_terminate
;
2060 error
= xfs_sysctl_register();
2062 goto out_cleanup_procfs
;
2064 xfs_kset
= kset_create_and_add("xfs", NULL
, fs_kobj
);
2067 goto out_sysctl_unregister
;
2070 xfsstats
.xs_kobj
.kobject
.kset
= xfs_kset
;
2072 xfsstats
.xs_stats
= alloc_percpu(struct xfsstats
);
2073 if (!xfsstats
.xs_stats
) {
2075 goto out_kset_unregister
;
2078 error
= xfs_sysfs_init(&xfsstats
.xs_kobj
, &xfs_stats_ktype
, NULL
,
2081 goto out_free_stats
;
2084 xfs_dbg_kobj
.kobject
.kset
= xfs_kset
;
2085 error
= xfs_sysfs_init(&xfs_dbg_kobj
, &xfs_dbg_ktype
, NULL
, "debug");
2087 goto out_remove_stats_kobj
;
2090 error
= xfs_qm_init();
2092 goto out_remove_dbg_kobj
;
2094 error
= register_filesystem(&xfs_fs_type
);
2101 out_remove_dbg_kobj
:
2103 xfs_sysfs_del(&xfs_dbg_kobj
);
2104 out_remove_stats_kobj
:
2106 xfs_sysfs_del(&xfsstats
.xs_kobj
);
2108 free_percpu(xfsstats
.xs_stats
);
2109 out_kset_unregister
:
2110 kset_unregister(xfs_kset
);
2111 out_sysctl_unregister
:
2112 xfs_sysctl_unregister();
2114 xfs_cleanup_procfs();
2116 xfs_buf_terminate();
2117 out_mru_cache_uninit
:
2118 xfs_mru_cache_uninit();
2120 xfs_destroy_workqueues();
2122 xfs_destroy_zones();
2131 unregister_filesystem(&xfs_fs_type
);
2133 xfs_sysfs_del(&xfs_dbg_kobj
);
2135 xfs_sysfs_del(&xfsstats
.xs_kobj
);
2136 free_percpu(xfsstats
.xs_stats
);
2137 kset_unregister(xfs_kset
);
2138 xfs_sysctl_unregister();
2139 xfs_cleanup_procfs();
2140 xfs_buf_terminate();
2141 xfs_mru_cache_uninit();
2142 xfs_destroy_workqueues();
2143 xfs_destroy_zones();
2144 xfs_uuid_table_free();
2147 module_init(init_xfs_fs
);
2148 module_exit(exit_xfs_fs
);
2150 MODULE_AUTHOR("Silicon Graphics, Inc.");
2151 MODULE_DESCRIPTION(XFS_VERSION_STRING
" with " XFS_BUILD_OPTIONS
" enabled");
2152 MODULE_LICENSE("GPL");