2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "libxfs_priv.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_defer.h"
26 #include "xfs_inode.h"
27 #include "xfs_errortag.h"
28 #include "xfs_cksum.h"
29 #include "xfs_trans.h"
30 #include "xfs_ialloc.h"
34 * Check that none of the inode's in the buffer have a next
35 * unlinked field of 0.
47 j
= mp
->m_inode_cluster_size
>> mp
->m_sb
.sb_inodelog
;
49 for (i
= 0; i
< j
; i
++) {
50 dip
= xfs_buf_offset(bp
, i
* mp
->m_sb
.sb_inodesize
);
51 if (!dip
->di_next_unlinked
) {
53 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
54 i
, (long long)bp
->b_bn
);
61 xfs_dinode_good_version(
65 if (xfs_sb_version_hascrc(&mp
->m_sb
))
68 return version
== 1 || version
== 2;
72 * If we are doing readahead on an inode buffer, we might be in log recovery
73 * reading an inode allocation buffer that hasn't yet been replayed, and hence
74 * has not had the inode cores stamped into it. Hence for readahead, the buffer
75 * may be potentially invalid.
77 * If the readahead buffer is invalid, we need to mark it with an error and
78 * clear the DONE status of the buffer so that a followup read will re-read it
79 * from disk. We don't report the error otherwise to avoid warnings during log
80 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
81 * because all we want to do is say readahead failed; there is no-one to report
82 * the error to, so this will distinguish it from a non-ra verifier failure.
83 * Changes to this readahead error behavour also need to be reflected in
84 * xfs_dquot_buf_readahead_verify().
91 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
96 * Validate the magic number and version of every inode in the buffer
98 ni
= XFS_BB_TO_FSB(mp
, bp
->b_length
) * mp
->m_sb
.sb_inopblock
;
99 for (i
= 0; i
< ni
; i
++) {
103 dip
= xfs_buf_offset(bp
, (i
<< mp
->m_sb
.sb_inodelog
));
104 di_ok
= dip
->di_magic
== cpu_to_be16(XFS_DINODE_MAGIC
) &&
105 xfs_dinode_good_version(mp
, dip
->di_version
);
106 if (unlikely(XFS_TEST_ERROR(!di_ok
, mp
,
107 XFS_ERRTAG_ITOBP_INOTOBP
))) {
109 bp
->b_flags
&= ~XBF_DONE
;
110 xfs_buf_ioerror(bp
, -EIO
);
114 xfs_verifier_error(bp
, -EFSCORRUPTED
, __this_address
);
117 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
118 (unsigned long long)bp
->b_bn
, i
,
119 be16_to_cpu(dip
->di_magic
));
123 xfs_inobp_check(mp
, bp
);
128 xfs_inode_buf_read_verify(
131 xfs_inode_buf_verify(bp
, false);
135 xfs_inode_buf_readahead_verify(
138 xfs_inode_buf_verify(bp
, true);
142 xfs_inode_buf_write_verify(
145 xfs_inode_buf_verify(bp
, false);
148 const struct xfs_buf_ops xfs_inode_buf_ops
= {
150 .verify_read
= xfs_inode_buf_read_verify
,
151 .verify_write
= xfs_inode_buf_write_verify
,
154 const struct xfs_buf_ops xfs_inode_buf_ra_ops
= {
155 .name
= "xxfs_inode_ra",
156 .verify_read
= xfs_inode_buf_readahead_verify
,
157 .verify_write
= xfs_inode_buf_write_verify
,
162 * This routine is called to map an inode to the buffer containing the on-disk
163 * version of the inode. It returns a pointer to the buffer containing the
164 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
165 * pointer to the on-disk inode within that buffer.
167 * If a non-zero error is returned, then the contents of bpp and dipp are
172 struct xfs_mount
*mp
,
173 struct xfs_trans
*tp
,
174 struct xfs_imap
*imap
,
175 struct xfs_dinode
**dipp
,
176 struct xfs_buf
**bpp
,
183 buf_flags
|= XBF_UNMAPPED
;
184 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, imap
->im_blkno
,
185 (int)imap
->im_len
, buf_flags
, &bp
,
188 if (error
== -EAGAIN
) {
189 ASSERT(buf_flags
& XBF_TRYLOCK
);
193 if (error
== -EFSCORRUPTED
&&
194 (iget_flags
& XFS_IGET_UNTRUSTED
))
197 xfs_warn(mp
, "%s: xfs_trans_read_buf() returned error %d.",
203 *dipp
= xfs_buf_offset(bp
, imap
->im_boffset
);
209 struct xfs_inode
*ip
,
210 struct xfs_dinode
*from
)
212 struct xfs_icdinode
*to
= &ip
->i_d
;
213 struct inode
*inode
= VFS_I(ip
);
217 * Convert v1 inodes immediately to v2 inode format as this is the
218 * minimum inode version format we support in the rest of the code.
220 to
->di_version
= from
->di_version
;
221 if (to
->di_version
== 1) {
222 set_nlink(inode
, be16_to_cpu(from
->di_onlink
));
223 to
->di_projid_lo
= 0;
224 to
->di_projid_hi
= 0;
227 set_nlink(inode
, be32_to_cpu(from
->di_nlink
));
228 to
->di_projid_lo
= be16_to_cpu(from
->di_projid_lo
);
229 to
->di_projid_hi
= be16_to_cpu(from
->di_projid_hi
);
232 to
->di_format
= from
->di_format
;
233 to
->di_uid
= be32_to_cpu(from
->di_uid
);
234 to
->di_gid
= be32_to_cpu(from
->di_gid
);
235 to
->di_flushiter
= be16_to_cpu(from
->di_flushiter
);
238 * Time is signed, so need to convert to signed 32 bit before
239 * storing in inode timestamp which may be 64 bit. Otherwise
240 * a time before epoch is converted to a time long after epoch
243 inode
->i_atime
.tv_sec
= (int)be32_to_cpu(from
->di_atime
.t_sec
);
244 inode
->i_atime
.tv_nsec
= (int)be32_to_cpu(from
->di_atime
.t_nsec
);
245 inode
->i_mtime
.tv_sec
= (int)be32_to_cpu(from
->di_mtime
.t_sec
);
246 inode
->i_mtime
.tv_nsec
= (int)be32_to_cpu(from
->di_mtime
.t_nsec
);
247 inode
->i_ctime
.tv_sec
= (int)be32_to_cpu(from
->di_ctime
.t_sec
);
248 inode
->i_ctime
.tv_nsec
= (int)be32_to_cpu(from
->di_ctime
.t_nsec
);
249 inode
->i_generation
= be32_to_cpu(from
->di_gen
);
250 inode
->i_mode
= be16_to_cpu(from
->di_mode
);
252 to
->di_size
= be64_to_cpu(from
->di_size
);
253 to
->di_nblocks
= be64_to_cpu(from
->di_nblocks
);
254 to
->di_extsize
= be32_to_cpu(from
->di_extsize
);
255 to
->di_nextents
= be32_to_cpu(from
->di_nextents
);
256 to
->di_anextents
= be16_to_cpu(from
->di_anextents
);
257 to
->di_forkoff
= from
->di_forkoff
;
258 to
->di_aformat
= from
->di_aformat
;
259 to
->di_dmevmask
= be32_to_cpu(from
->di_dmevmask
);
260 to
->di_dmstate
= be16_to_cpu(from
->di_dmstate
);
261 to
->di_flags
= be16_to_cpu(from
->di_flags
);
263 if (to
->di_version
== 3) {
264 inode
->i_version
= be64_to_cpu(from
->di_changecount
);
265 to
->di_crtime
.t_sec
= be32_to_cpu(from
->di_crtime
.t_sec
);
266 to
->di_crtime
.t_nsec
= be32_to_cpu(from
->di_crtime
.t_nsec
);
267 to
->di_flags2
= be64_to_cpu(from
->di_flags2
);
268 to
->di_cowextsize
= be32_to_cpu(from
->di_cowextsize
);
274 struct xfs_inode
*ip
,
275 struct xfs_dinode
*to
,
278 struct xfs_icdinode
*from
= &ip
->i_d
;
279 struct inode
*inode
= VFS_I(ip
);
281 to
->di_magic
= cpu_to_be16(XFS_DINODE_MAGIC
);
284 to
->di_version
= from
->di_version
;
285 to
->di_format
= from
->di_format
;
286 to
->di_uid
= cpu_to_be32(from
->di_uid
);
287 to
->di_gid
= cpu_to_be32(from
->di_gid
);
288 to
->di_projid_lo
= cpu_to_be16(from
->di_projid_lo
);
289 to
->di_projid_hi
= cpu_to_be16(from
->di_projid_hi
);
291 memset(to
->di_pad
, 0, sizeof(to
->di_pad
));
292 to
->di_atime
.t_sec
= cpu_to_be32(inode
->i_atime
.tv_sec
);
293 to
->di_atime
.t_nsec
= cpu_to_be32(inode
->i_atime
.tv_nsec
);
294 to
->di_mtime
.t_sec
= cpu_to_be32(inode
->i_mtime
.tv_sec
);
295 to
->di_mtime
.t_nsec
= cpu_to_be32(inode
->i_mtime
.tv_nsec
);
296 to
->di_ctime
.t_sec
= cpu_to_be32(inode
->i_ctime
.tv_sec
);
297 to
->di_ctime
.t_nsec
= cpu_to_be32(inode
->i_ctime
.tv_nsec
);
298 to
->di_nlink
= cpu_to_be32(inode
->i_nlink
);
299 to
->di_gen
= cpu_to_be32(inode
->i_generation
);
300 to
->di_mode
= cpu_to_be16(inode
->i_mode
);
302 to
->di_size
= cpu_to_be64(from
->di_size
);
303 to
->di_nblocks
= cpu_to_be64(from
->di_nblocks
);
304 to
->di_extsize
= cpu_to_be32(from
->di_extsize
);
305 to
->di_nextents
= cpu_to_be32(from
->di_nextents
);
306 to
->di_anextents
= cpu_to_be16(from
->di_anextents
);
307 to
->di_forkoff
= from
->di_forkoff
;
308 to
->di_aformat
= from
->di_aformat
;
309 to
->di_dmevmask
= cpu_to_be32(from
->di_dmevmask
);
310 to
->di_dmstate
= cpu_to_be16(from
->di_dmstate
);
311 to
->di_flags
= cpu_to_be16(from
->di_flags
);
313 if (from
->di_version
== 3) {
314 to
->di_changecount
= cpu_to_be64(inode
->i_version
);
315 to
->di_crtime
.t_sec
= cpu_to_be32(from
->di_crtime
.t_sec
);
316 to
->di_crtime
.t_nsec
= cpu_to_be32(from
->di_crtime
.t_nsec
);
317 to
->di_flags2
= cpu_to_be64(from
->di_flags2
);
318 to
->di_cowextsize
= cpu_to_be32(from
->di_cowextsize
);
319 to
->di_ino
= cpu_to_be64(ip
->i_ino
);
320 to
->di_lsn
= cpu_to_be64(lsn
);
321 memset(to
->di_pad2
, 0, sizeof(to
->di_pad2
));
322 uuid_copy(&to
->di_uuid
, &ip
->i_mount
->m_sb
.sb_meta_uuid
);
323 to
->di_flushiter
= 0;
325 to
->di_flushiter
= cpu_to_be16(from
->di_flushiter
);
330 xfs_log_dinode_to_disk(
331 struct xfs_log_dinode
*from
,
332 struct xfs_dinode
*to
)
334 to
->di_magic
= cpu_to_be16(from
->di_magic
);
335 to
->di_mode
= cpu_to_be16(from
->di_mode
);
336 to
->di_version
= from
->di_version
;
337 to
->di_format
= from
->di_format
;
339 to
->di_uid
= cpu_to_be32(from
->di_uid
);
340 to
->di_gid
= cpu_to_be32(from
->di_gid
);
341 to
->di_nlink
= cpu_to_be32(from
->di_nlink
);
342 to
->di_projid_lo
= cpu_to_be16(from
->di_projid_lo
);
343 to
->di_projid_hi
= cpu_to_be16(from
->di_projid_hi
);
344 memcpy(to
->di_pad
, from
->di_pad
, sizeof(to
->di_pad
));
346 to
->di_atime
.t_sec
= cpu_to_be32(from
->di_atime
.t_sec
);
347 to
->di_atime
.t_nsec
= cpu_to_be32(from
->di_atime
.t_nsec
);
348 to
->di_mtime
.t_sec
= cpu_to_be32(from
->di_mtime
.t_sec
);
349 to
->di_mtime
.t_nsec
= cpu_to_be32(from
->di_mtime
.t_nsec
);
350 to
->di_ctime
.t_sec
= cpu_to_be32(from
->di_ctime
.t_sec
);
351 to
->di_ctime
.t_nsec
= cpu_to_be32(from
->di_ctime
.t_nsec
);
353 to
->di_size
= cpu_to_be64(from
->di_size
);
354 to
->di_nblocks
= cpu_to_be64(from
->di_nblocks
);
355 to
->di_extsize
= cpu_to_be32(from
->di_extsize
);
356 to
->di_nextents
= cpu_to_be32(from
->di_nextents
);
357 to
->di_anextents
= cpu_to_be16(from
->di_anextents
);
358 to
->di_forkoff
= from
->di_forkoff
;
359 to
->di_aformat
= from
->di_aformat
;
360 to
->di_dmevmask
= cpu_to_be32(from
->di_dmevmask
);
361 to
->di_dmstate
= cpu_to_be16(from
->di_dmstate
);
362 to
->di_flags
= cpu_to_be16(from
->di_flags
);
363 to
->di_gen
= cpu_to_be32(from
->di_gen
);
365 if (from
->di_version
== 3) {
366 to
->di_changecount
= cpu_to_be64(from
->di_changecount
);
367 to
->di_crtime
.t_sec
= cpu_to_be32(from
->di_crtime
.t_sec
);
368 to
->di_crtime
.t_nsec
= cpu_to_be32(from
->di_crtime
.t_nsec
);
369 to
->di_flags2
= cpu_to_be64(from
->di_flags2
);
370 to
->di_cowextsize
= cpu_to_be32(from
->di_cowextsize
);
371 to
->di_ino
= cpu_to_be64(from
->di_ino
);
372 to
->di_lsn
= cpu_to_be64(from
->di_lsn
);
373 memcpy(to
->di_pad2
, from
->di_pad2
, sizeof(to
->di_pad2
));
374 uuid_copy(&to
->di_uuid
, &from
->di_uuid
);
375 to
->di_flushiter
= 0;
377 to
->di_flushiter
= cpu_to_be16(from
->di_flushiter
);
383 struct xfs_mount
*mp
,
385 struct xfs_dinode
*dip
)
391 if (dip
->di_magic
!= cpu_to_be16(XFS_DINODE_MAGIC
))
392 return __this_address
;
394 /* Verify v3 integrity information first */
395 if (dip
->di_version
>= 3) {
396 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
397 return __this_address
;
398 if (!xfs_verify_cksum((char *)dip
, mp
->m_sb
.sb_inodesize
,
400 return __this_address
;
401 if (be64_to_cpu(dip
->di_ino
) != ino
)
402 return __this_address
;
403 if (!uuid_equal(&dip
->di_uuid
, &mp
->m_sb
.sb_meta_uuid
))
404 return __this_address
;
407 /* don't allow invalid i_size */
408 if (be64_to_cpu(dip
->di_size
) & (1ULL << 63))
409 return __this_address
;
411 mode
= be16_to_cpu(dip
->di_mode
);
412 if (mode
&& xfs_mode_to_ftype(mode
) == XFS_DIR3_FT_UNKNOWN
)
413 return __this_address
;
415 /* No zero-length symlinks/dirs. */
416 if ((S_ISLNK(mode
) || S_ISDIR(mode
)) && dip
->di_size
== 0)
417 return __this_address
;
419 /* only version 3 or greater inodes are extensively verified here */
420 if (dip
->di_version
< 3)
423 flags
= be16_to_cpu(dip
->di_flags
);
424 flags2
= be64_to_cpu(dip
->di_flags2
);
426 /* don't allow reflink/cowextsize if we don't have reflink */
427 if ((flags2
& (XFS_DIFLAG2_REFLINK
| XFS_DIFLAG2_COWEXTSIZE
)) &&
428 !xfs_sb_version_hasreflink(&mp
->m_sb
))
429 return __this_address
;
431 /* don't let reflink and realtime mix */
432 if ((flags2
& XFS_DIFLAG2_REFLINK
) && (flags
& XFS_DIFLAG_REALTIME
))
433 return __this_address
;
435 /* don't let reflink and dax mix */
436 if ((flags2
& XFS_DIFLAG2_REFLINK
) && (flags2
& XFS_DIFLAG2_DAX
))
437 return __this_address
;
444 struct xfs_mount
*mp
,
445 struct xfs_dinode
*dip
)
449 if (dip
->di_version
< 3)
452 ASSERT(xfs_sb_version_hascrc(&mp
->m_sb
));
453 crc
= xfs_start_cksum_update((char *)dip
, mp
->m_sb
.sb_inodesize
,
455 dip
->di_crc
= xfs_end_cksum(crc
);
459 * Read the disk inode attributes into the in-core inode structure.
461 * For version 5 superblocks, if we are initialising a new inode and we are not
462 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
463 * inode core with a random generation number. If we are keeping inodes around,
464 * we need to read the inode cluster to get the existing generation number off
465 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
466 * format) then log recovery is dependent on the di_flushiter field being
467 * initialised from the current on-disk value and hence we must also read the
483 * Fill in the location information in the in-core inode.
485 error
= xfs_imap(mp
, tp
, ip
->i_ino
, &ip
->i_imap
, iget_flags
);
489 /* shortcut IO on inode allocation if possible */
490 if ((iget_flags
& XFS_IGET_CREATE
) &&
491 xfs_sb_version_hascrc(&mp
->m_sb
) &&
492 !(mp
->m_flags
& XFS_MOUNT_IKEEP
)) {
493 /* initialise the on-disk inode core */
494 memset(&ip
->i_d
, 0, sizeof(ip
->i_d
));
495 VFS_I(ip
)->i_generation
= prandom_u32();
496 if (xfs_sb_version_hascrc(&mp
->m_sb
))
497 ip
->i_d
.di_version
= 3;
499 ip
->i_d
.di_version
= 2;
504 * Get pointers to the on-disk inode and the buffer containing it.
506 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &dip
, &bp
, 0, iget_flags
);
510 /* even unallocated inodes are verified */
511 fa
= xfs_dinode_verify(mp
, ip
->i_ino
, dip
);
513 xfs_alert(mp
, "%s: validation failed for inode %lld at %pS",
514 __func__
, ip
->i_ino
, fa
);
516 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, dip
);
517 error
= -EFSCORRUPTED
;
522 * If the on-disk inode is already linked to a directory
523 * entry, copy all of the inode into the in-core inode.
524 * xfs_iformat_fork() handles copying in the inode format
525 * specific information.
526 * Otherwise, just get the truly permanent information.
529 xfs_inode_from_disk(ip
, dip
);
530 error
= xfs_iformat_fork(ip
, dip
);
533 xfs_alert(mp
, "%s: xfs_iformat() returned error %d",
540 * Partial initialisation of the in-core inode. Just the bits
541 * that xfs_ialloc won't overwrite or relies on being correct.
543 ip
->i_d
.di_version
= dip
->di_version
;
544 VFS_I(ip
)->i_generation
= be32_to_cpu(dip
->di_gen
);
545 ip
->i_d
.di_flushiter
= be16_to_cpu(dip
->di_flushiter
);
548 * Make sure to pull in the mode here as well in
549 * case the inode is released without being used.
550 * This ensures that xfs_inactive() will see that
551 * the inode is already free and not try to mess
552 * with the uninitialized part of it.
554 VFS_I(ip
)->i_mode
= 0;
557 ASSERT(ip
->i_d
.di_version
>= 2);
558 ip
->i_delayed_blks
= 0;
561 * Mark the buffer containing the inode as something to keep
562 * around for a while. This helps to keep recently accessed
563 * meta-data in-core longer.
565 xfs_buf_set_ref(bp
, XFS_INO_REF
);
568 * Use xfs_trans_brelse() to release the buffer containing the on-disk
569 * inode, because it was acquired with xfs_trans_read_buf() in
570 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
571 * brelse(). If we're within a transaction, then xfs_trans_brelse()
572 * will only release the buffer if it is not dirty within the
573 * transaction. It will be OK to release the buffer in this case,
574 * because inodes on disk are never destroyed and we will be locking the
575 * new in-core inode before putting it in the cache where other
576 * processes can find it. Thus we don't have to worry about the inode
577 * being changed just because we released the buffer.
580 xfs_trans_brelse(tp
, bp
);