]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_inode_buf.c
xfs: move di_changecount to VFS inode
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_inode_buf.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "libxfs_priv.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_cksum.h"
27 #include "xfs_trans.h"
28 #include "xfs_ialloc.h"
29
30 /*
31 * Check that none of the inode's in the buffer have a next
32 * unlinked field of 0.
33 */
34 #if defined(DEBUG)
35 void
36 xfs_inobp_check(
37 xfs_mount_t *mp,
38 xfs_buf_t *bp)
39 {
40 int i;
41 int j;
42 xfs_dinode_t *dip;
43
44 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
45
46 for (i = 0; i < j; i++) {
47 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
48 if (!dip->di_next_unlinked) {
49 xfs_alert(mp,
50 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
51 i, (long long)bp->b_bn);
52 }
53 }
54 }
55 #endif
56
57 bool
58 xfs_dinode_good_version(
59 struct xfs_mount *mp,
60 __u8 version)
61 {
62 if (xfs_sb_version_hascrc(&mp->m_sb))
63 return version == 3;
64
65 return version == 1 || version == 2;
66 }
67
68 /*
69 * If we are doing readahead on an inode buffer, we might be in log recovery
70 * reading an inode allocation buffer that hasn't yet been replayed, and hence
71 * has not had the inode cores stamped into it. Hence for readahead, the buffer
72 * may be potentially invalid.
73 *
74 * If the readahead buffer is invalid, we need to mark it with an error and
75 * clear the DONE status of the buffer so that a followup read will re-read it
76 * from disk. We don't report the error otherwise to avoid warnings during log
77 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
78 * because all we want to do is say readahead failed; there is no-one to report
79 * the error to, so this will distinguish it from a non-ra verifier failure.
80 * Changes to this readahead error behavour also need to be reflected in
81 * xfs_dquot_buf_readahead_verify().
82 */
83 static void
84 xfs_inode_buf_verify(
85 struct xfs_buf *bp,
86 bool readahead)
87 {
88 struct xfs_mount *mp = bp->b_target->bt_mount;
89 int i;
90 int ni;
91
92 /*
93 * Validate the magic number and version of every inode in the buffer
94 */
95 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
96 for (i = 0; i < ni; i++) {
97 int di_ok;
98 xfs_dinode_t *dip;
99
100 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
101 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
102 xfs_dinode_good_version(mp, dip->di_version);
103 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
104 XFS_ERRTAG_ITOBP_INOTOBP,
105 XFS_RANDOM_ITOBP_INOTOBP))) {
106 if (readahead) {
107 bp->b_flags &= ~XBF_DONE;
108 xfs_buf_ioerror(bp, -EIO);
109 return;
110 }
111
112 xfs_buf_ioerror(bp, -EFSCORRUPTED);
113 xfs_verifier_error(bp);
114 #ifdef DEBUG
115 xfs_alert(mp,
116 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
117 (unsigned long long)bp->b_bn, i,
118 be16_to_cpu(dip->di_magic));
119 #endif
120 }
121 }
122 xfs_inobp_check(mp, bp);
123 }
124
125
126 static void
127 xfs_inode_buf_read_verify(
128 struct xfs_buf *bp)
129 {
130 xfs_inode_buf_verify(bp, false);
131 }
132
133 static void
134 xfs_inode_buf_readahead_verify(
135 struct xfs_buf *bp)
136 {
137 xfs_inode_buf_verify(bp, true);
138 }
139
140 static void
141 xfs_inode_buf_write_verify(
142 struct xfs_buf *bp)
143 {
144 xfs_inode_buf_verify(bp, false);
145 }
146
147 const struct xfs_buf_ops xfs_inode_buf_ops = {
148 .name = "xfs_inode",
149 .verify_read = xfs_inode_buf_read_verify,
150 .verify_write = xfs_inode_buf_write_verify,
151 };
152
153 const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
154 .name = "xxfs_inode_ra",
155 .verify_read = xfs_inode_buf_readahead_verify,
156 .verify_write = xfs_inode_buf_write_verify,
157 };
158
159
160 /*
161 * This routine is called to map an inode to the buffer containing the on-disk
162 * version of the inode. It returns a pointer to the buffer containing the
163 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
164 * pointer to the on-disk inode within that buffer.
165 *
166 * If a non-zero error is returned, then the contents of bpp and dipp are
167 * undefined.
168 */
169 int
170 xfs_imap_to_bp(
171 struct xfs_mount *mp,
172 struct xfs_trans *tp,
173 struct xfs_imap *imap,
174 struct xfs_dinode **dipp,
175 struct xfs_buf **bpp,
176 uint buf_flags,
177 uint iget_flags)
178 {
179 struct xfs_buf *bp;
180 int error;
181
182 buf_flags |= XBF_UNMAPPED;
183 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
184 (int)imap->im_len, buf_flags, &bp,
185 &xfs_inode_buf_ops);
186 if (error) {
187 if (error == -EAGAIN) {
188 ASSERT(buf_flags & XBF_TRYLOCK);
189 return error;
190 }
191
192 if (error == -EFSCORRUPTED &&
193 (iget_flags & XFS_IGET_UNTRUSTED))
194 return -EINVAL;
195
196 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
197 __func__, error);
198 return error;
199 }
200
201 *bpp = bp;
202 *dipp = xfs_buf_offset(bp, imap->im_boffset);
203 return 0;
204 }
205
206 void
207 xfs_inode_from_disk(
208 struct xfs_inode *ip,
209 struct xfs_dinode *from)
210 {
211 struct xfs_icdinode *to = &ip->i_d;
212 struct inode *inode = VFS_I(ip);
213
214 to->di_mode = be16_to_cpu(from->di_mode);
215 to->di_version = from ->di_version;
216
217 /*
218 * Convert v1 inodes immediately to v2 inode format as this is the
219 * minimum inode version format we support in the rest of the code.
220 */
221 if (to->di_version == 1) {
222 set_nlink(inode, be16_to_cpu(from->di_onlink));
223 to->di_projid_lo = 0;
224 to->di_projid_hi = 0;
225 to->di_version = 2;
226 } else {
227 set_nlink(inode, be32_to_cpu(from->di_nlink));
228 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
229 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
230 }
231
232 to->di_format = from->di_format;
233 to->di_uid = be32_to_cpu(from->di_uid);
234 to->di_gid = be32_to_cpu(from->di_gid);
235 to->di_flushiter = be16_to_cpu(from->di_flushiter);
236
237 /*
238 * Time is signed, so need to convert to signed 32 bit before
239 * storing in inode timestamp which may be 64 bit. Otherwise
240 * a time before epoch is converted to a time long after epoch
241 * on 64 bit systems.
242 */
243 inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
244 inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
245 inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
246 inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
247 inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
248 inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
249 inode->i_generation = be32_to_cpu(from->di_gen);
250
251 to->di_size = be64_to_cpu(from->di_size);
252 to->di_nblocks = be64_to_cpu(from->di_nblocks);
253 to->di_extsize = be32_to_cpu(from->di_extsize);
254 to->di_nextents = be32_to_cpu(from->di_nextents);
255 to->di_anextents = be16_to_cpu(from->di_anextents);
256 to->di_forkoff = from->di_forkoff;
257 to->di_aformat = from->di_aformat;
258 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
259 to->di_dmstate = be16_to_cpu(from->di_dmstate);
260 to->di_flags = be16_to_cpu(from->di_flags);
261
262 if (to->di_version == 3) {
263 inode->i_version = be64_to_cpu(from->di_changecount);
264 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
265 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
266 to->di_flags2 = be64_to_cpu(from->di_flags2);
267 }
268 }
269
270 void
271 xfs_inode_to_disk(
272 struct xfs_inode *ip,
273 struct xfs_dinode *to,
274 xfs_lsn_t lsn)
275 {
276 struct xfs_icdinode *from = &ip->i_d;
277 struct inode *inode = VFS_I(ip);
278
279 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
280 to->di_onlink = 0;
281
282 to->di_mode = cpu_to_be16(from->di_mode);
283 to->di_version = from->di_version;
284 to->di_format = from->di_format;
285 to->di_uid = cpu_to_be32(from->di_uid);
286 to->di_gid = cpu_to_be32(from->di_gid);
287 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
288 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
289
290 memset(to->di_pad, 0, sizeof(to->di_pad));
291 to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
292 to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
293 to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
294 to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
295 to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
296 to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
297 to->di_nlink = cpu_to_be32(inode->i_nlink);
298 to->di_gen = cpu_to_be32(inode->i_generation);
299
300 to->di_size = cpu_to_be64(from->di_size);
301 to->di_nblocks = cpu_to_be64(from->di_nblocks);
302 to->di_extsize = cpu_to_be32(from->di_extsize);
303 to->di_nextents = cpu_to_be32(from->di_nextents);
304 to->di_anextents = cpu_to_be16(from->di_anextents);
305 to->di_forkoff = from->di_forkoff;
306 to->di_aformat = from->di_aformat;
307 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
308 to->di_dmstate = cpu_to_be16(from->di_dmstate);
309 to->di_flags = cpu_to_be16(from->di_flags);
310
311 if (from->di_version == 3) {
312 to->di_changecount = cpu_to_be64(inode->i_version);
313 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
314 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
315 to->di_flags2 = cpu_to_be64(from->di_flags2);
316
317 to->di_ino = cpu_to_be64(ip->i_ino);
318 to->di_lsn = cpu_to_be64(lsn);
319 memset(to->di_pad2, 0, sizeof(to->di_pad2));
320 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
321 to->di_flushiter = 0;
322 } else {
323 to->di_flushiter = cpu_to_be16(from->di_flushiter);
324 }
325 }
326
327 void
328 xfs_log_dinode_to_disk(
329 struct xfs_log_dinode *from,
330 struct xfs_dinode *to)
331 {
332 to->di_magic = cpu_to_be16(from->di_magic);
333 to->di_mode = cpu_to_be16(from->di_mode);
334 to->di_version = from->di_version;
335 to->di_format = from->di_format;
336 to->di_onlink = 0;
337 to->di_uid = cpu_to_be32(from->di_uid);
338 to->di_gid = cpu_to_be32(from->di_gid);
339 to->di_nlink = cpu_to_be32(from->di_nlink);
340 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
341 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
342 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
343
344 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
345 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
346 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
347 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
348 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
349 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
350
351 to->di_size = cpu_to_be64(from->di_size);
352 to->di_nblocks = cpu_to_be64(from->di_nblocks);
353 to->di_extsize = cpu_to_be32(from->di_extsize);
354 to->di_nextents = cpu_to_be32(from->di_nextents);
355 to->di_anextents = cpu_to_be16(from->di_anextents);
356 to->di_forkoff = from->di_forkoff;
357 to->di_aformat = from->di_aformat;
358 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
359 to->di_dmstate = cpu_to_be16(from->di_dmstate);
360 to->di_flags = cpu_to_be16(from->di_flags);
361 to->di_gen = cpu_to_be32(from->di_gen);
362
363 if (from->di_version == 3) {
364 to->di_changecount = cpu_to_be64(from->di_changecount);
365 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
366 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
367 to->di_flags2 = cpu_to_be64(from->di_flags2);
368 to->di_ino = cpu_to_be64(from->di_ino);
369 to->di_lsn = cpu_to_be64(from->di_lsn);
370 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
371 uuid_copy(&to->di_uuid, &from->di_uuid);
372 to->di_flushiter = 0;
373 } else {
374 to->di_flushiter = cpu_to_be16(from->di_flushiter);
375 }
376 }
377
378 bool
379 xfs_dinode_verify(
380 struct xfs_mount *mp,
381 xfs_ino_t ino,
382 struct xfs_dinode *dip)
383 {
384 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
385 return false;
386
387 /* only version 3 or greater inodes are extensively verified here */
388 if (dip->di_version < 3)
389 return true;
390
391 if (!xfs_sb_version_hascrc(&mp->m_sb))
392 return false;
393 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
394 XFS_DINODE_CRC_OFF))
395 return false;
396 if (be64_to_cpu(dip->di_ino) != ino)
397 return false;
398 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
399 return false;
400 return true;
401 }
402
403 void
404 xfs_dinode_calc_crc(
405 struct xfs_mount *mp,
406 struct xfs_dinode *dip)
407 {
408 __uint32_t crc;
409
410 if (dip->di_version < 3)
411 return;
412
413 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
414 crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
415 XFS_DINODE_CRC_OFF);
416 dip->di_crc = xfs_end_cksum(crc);
417 }
418
419 /*
420 * Read the disk inode attributes into the in-core inode structure.
421 *
422 * For version 5 superblocks, if we are initialising a new inode and we are not
423 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
424 * inode core with a random generation number. If we are keeping inodes around,
425 * we need to read the inode cluster to get the existing generation number off
426 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
427 * format) then log recovery is dependent on the di_flushiter field being
428 * initialised from the current on-disk value and hence we must also read the
429 * inode off disk.
430 */
431 int
432 xfs_iread(
433 xfs_mount_t *mp,
434 xfs_trans_t *tp,
435 xfs_inode_t *ip,
436 uint iget_flags)
437 {
438 xfs_buf_t *bp;
439 xfs_dinode_t *dip;
440 int error;
441
442 /*
443 * Fill in the location information in the in-core inode.
444 */
445 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
446 if (error)
447 return error;
448
449 /* shortcut IO on inode allocation if possible */
450 if ((iget_flags & XFS_IGET_CREATE) &&
451 xfs_sb_version_hascrc(&mp->m_sb) &&
452 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
453 /* initialise the on-disk inode core */
454 memset(&ip->i_d, 0, sizeof(ip->i_d));
455 VFS_I(ip)->i_generation = prandom_u32();
456 if (xfs_sb_version_hascrc(&mp->m_sb))
457 ip->i_d.di_version = 3;
458 else
459 ip->i_d.di_version = 2;
460 return 0;
461 }
462
463 /*
464 * Get pointers to the on-disk inode and the buffer containing it.
465 */
466 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
467 if (error)
468 return error;
469
470 /* even unallocated inodes are verified */
471 if (!xfs_dinode_verify(mp, ip->i_ino, dip)) {
472 xfs_alert(mp, "%s: validation failed for inode %lld failed",
473 __func__, ip->i_ino);
474
475 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
476 error = -EFSCORRUPTED;
477 goto out_brelse;
478 }
479
480 /*
481 * If the on-disk inode is already linked to a directory
482 * entry, copy all of the inode into the in-core inode.
483 * xfs_iformat_fork() handles copying in the inode format
484 * specific information.
485 * Otherwise, just get the truly permanent information.
486 */
487 if (dip->di_mode) {
488 xfs_inode_from_disk(ip, dip);
489 error = xfs_iformat_fork(ip, dip);
490 if (error) {
491 #ifdef DEBUG
492 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
493 __func__, error);
494 #endif /* DEBUG */
495 goto out_brelse;
496 }
497 } else {
498 /*
499 * Partial initialisation of the in-core inode. Just the bits
500 * that xfs_ialloc won't overwrite or relies on being correct.
501 */
502 ip->i_d.di_version = dip->di_version;
503 VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
504 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
505
506 /*
507 * Make sure to pull in the mode here as well in
508 * case the inode is released without being used.
509 * This ensures that xfs_inactive() will see that
510 * the inode is already free and not try to mess
511 * with the uninitialized part of it.
512 */
513 ip->i_d.di_mode = 0;
514 }
515
516 ASSERT(ip->i_d.di_version >= 2);
517
518 ip->i_delayed_blks = 0;
519
520 /*
521 * Mark the buffer containing the inode as something to keep
522 * around for a while. This helps to keep recently accessed
523 * meta-data in-core longer.
524 */
525 xfs_buf_set_ref(bp, XFS_INO_REF);
526
527 /*
528 * Use xfs_trans_brelse() to release the buffer containing the on-disk
529 * inode, because it was acquired with xfs_trans_read_buf() in
530 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
531 * brelse(). If we're within a transaction, then xfs_trans_brelse()
532 * will only release the buffer if it is not dirty within the
533 * transaction. It will be OK to release the buffer in this case,
534 * because inodes on disk are never destroyed and we will be locking the
535 * new in-core inode before putting it in the cache where other
536 * processes can find it. Thus we don't have to worry about the inode
537 * being changed just because we released the buffer.
538 */
539 out_brelse:
540 xfs_trans_brelse(tp, bp);
541 return error;
542 }