]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_inode_buf.c
libxfs: use a memory zone for log items
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_inode_buf.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "libxfs_priv.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_defer.h"
26 #include "xfs_inode.h"
27 #include "xfs_errortag.h"
28 #include "xfs_cksum.h"
29 #include "xfs_trans.h"
30 #include "xfs_ialloc.h"
31 #include "xfs_dir2.h"
32
33 /*
34 * Check that none of the inode's in the buffer have a next
35 * unlinked field of 0.
36 */
37 #if defined(DEBUG)
38 void
39 xfs_inobp_check(
40 xfs_mount_t *mp,
41 xfs_buf_t *bp)
42 {
43 int i;
44 int j;
45 xfs_dinode_t *dip;
46
47 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
48
49 for (i = 0; i < j; i++) {
50 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
51 if (!dip->di_next_unlinked) {
52 xfs_alert(mp,
53 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
54 i, (long long)bp->b_bn);
55 }
56 }
57 }
58 #endif
59
60 bool
61 xfs_dinode_good_version(
62 struct xfs_mount *mp,
63 __u8 version)
64 {
65 if (xfs_sb_version_hascrc(&mp->m_sb))
66 return version == 3;
67
68 return version == 1 || version == 2;
69 }
70
71 /*
72 * If we are doing readahead on an inode buffer, we might be in log recovery
73 * reading an inode allocation buffer that hasn't yet been replayed, and hence
74 * has not had the inode cores stamped into it. Hence for readahead, the buffer
75 * may be potentially invalid.
76 *
77 * If the readahead buffer is invalid, we need to mark it with an error and
78 * clear the DONE status of the buffer so that a followup read will re-read it
79 * from disk. We don't report the error otherwise to avoid warnings during log
80 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
81 * because all we want to do is say readahead failed; there is no-one to report
82 * the error to, so this will distinguish it from a non-ra verifier failure.
83 * Changes to this readahead error behavour also need to be reflected in
84 * xfs_dquot_buf_readahead_verify().
85 */
86 static void
87 xfs_inode_buf_verify(
88 struct xfs_buf *bp,
89 bool readahead)
90 {
91 struct xfs_mount *mp = bp->b_target->bt_mount;
92 int i;
93 int ni;
94
95 /*
96 * Validate the magic number and version of every inode in the buffer
97 */
98 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
99 for (i = 0; i < ni; i++) {
100 int di_ok;
101 xfs_dinode_t *dip;
102
103 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
104 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
105 xfs_dinode_good_version(mp, dip->di_version);
106 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
107 XFS_ERRTAG_ITOBP_INOTOBP))) {
108 if (readahead) {
109 bp->b_flags &= ~XBF_DONE;
110 xfs_buf_ioerror(bp, -EIO);
111 return;
112 }
113
114 xfs_buf_ioerror(bp, -EFSCORRUPTED);
115 xfs_verifier_error(bp);
116 #ifdef DEBUG
117 xfs_alert(mp,
118 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
119 (unsigned long long)bp->b_bn, i,
120 be16_to_cpu(dip->di_magic));
121 #endif
122 }
123 }
124 xfs_inobp_check(mp, bp);
125 }
126
127
128 static void
129 xfs_inode_buf_read_verify(
130 struct xfs_buf *bp)
131 {
132 xfs_inode_buf_verify(bp, false);
133 }
134
135 static void
136 xfs_inode_buf_readahead_verify(
137 struct xfs_buf *bp)
138 {
139 xfs_inode_buf_verify(bp, true);
140 }
141
142 static void
143 xfs_inode_buf_write_verify(
144 struct xfs_buf *bp)
145 {
146 xfs_inode_buf_verify(bp, false);
147 }
148
149 const struct xfs_buf_ops xfs_inode_buf_ops = {
150 .name = "xfs_inode",
151 .verify_read = xfs_inode_buf_read_verify,
152 .verify_write = xfs_inode_buf_write_verify,
153 };
154
155 const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
156 .name = "xxfs_inode_ra",
157 .verify_read = xfs_inode_buf_readahead_verify,
158 .verify_write = xfs_inode_buf_write_verify,
159 };
160
161
162 /*
163 * This routine is called to map an inode to the buffer containing the on-disk
164 * version of the inode. It returns a pointer to the buffer containing the
165 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
166 * pointer to the on-disk inode within that buffer.
167 *
168 * If a non-zero error is returned, then the contents of bpp and dipp are
169 * undefined.
170 */
171 int
172 xfs_imap_to_bp(
173 struct xfs_mount *mp,
174 struct xfs_trans *tp,
175 struct xfs_imap *imap,
176 struct xfs_dinode **dipp,
177 struct xfs_buf **bpp,
178 uint buf_flags,
179 uint iget_flags)
180 {
181 struct xfs_buf *bp;
182 int error;
183
184 buf_flags |= XBF_UNMAPPED;
185 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
186 (int)imap->im_len, buf_flags, &bp,
187 &xfs_inode_buf_ops);
188 if (error) {
189 if (error == -EAGAIN) {
190 ASSERT(buf_flags & XBF_TRYLOCK);
191 return error;
192 }
193
194 if (error == -EFSCORRUPTED &&
195 (iget_flags & XFS_IGET_UNTRUSTED))
196 return -EINVAL;
197
198 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
199 __func__, error);
200 return error;
201 }
202
203 *bpp = bp;
204 *dipp = xfs_buf_offset(bp, imap->im_boffset);
205 return 0;
206 }
207
208 void
209 xfs_inode_from_disk(
210 struct xfs_inode *ip,
211 struct xfs_dinode *from)
212 {
213 struct xfs_icdinode *to = &ip->i_d;
214 struct inode *inode = VFS_I(ip);
215
216
217 /*
218 * Convert v1 inodes immediately to v2 inode format as this is the
219 * minimum inode version format we support in the rest of the code.
220 */
221 to->di_version = from->di_version;
222 if (to->di_version == 1) {
223 set_nlink(inode, be16_to_cpu(from->di_onlink));
224 to->di_projid_lo = 0;
225 to->di_projid_hi = 0;
226 to->di_version = 2;
227 } else {
228 set_nlink(inode, be32_to_cpu(from->di_nlink));
229 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
230 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
231 }
232
233 to->di_format = from->di_format;
234 to->di_uid = be32_to_cpu(from->di_uid);
235 to->di_gid = be32_to_cpu(from->di_gid);
236 to->di_flushiter = be16_to_cpu(from->di_flushiter);
237
238 /*
239 * Time is signed, so need to convert to signed 32 bit before
240 * storing in inode timestamp which may be 64 bit. Otherwise
241 * a time before epoch is converted to a time long after epoch
242 * on 64 bit systems.
243 */
244 inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
245 inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
246 inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
247 inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
248 inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
249 inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
250 inode->i_generation = be32_to_cpu(from->di_gen);
251 inode->i_mode = be16_to_cpu(from->di_mode);
252
253 to->di_size = be64_to_cpu(from->di_size);
254 to->di_nblocks = be64_to_cpu(from->di_nblocks);
255 to->di_extsize = be32_to_cpu(from->di_extsize);
256 to->di_nextents = be32_to_cpu(from->di_nextents);
257 to->di_anextents = be16_to_cpu(from->di_anextents);
258 to->di_forkoff = from->di_forkoff;
259 to->di_aformat = from->di_aformat;
260 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
261 to->di_dmstate = be16_to_cpu(from->di_dmstate);
262 to->di_flags = be16_to_cpu(from->di_flags);
263
264 if (to->di_version == 3) {
265 inode->i_version = be64_to_cpu(from->di_changecount);
266 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
267 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
268 to->di_flags2 = be64_to_cpu(from->di_flags2);
269 to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
270 }
271 }
272
273 void
274 xfs_inode_to_disk(
275 struct xfs_inode *ip,
276 struct xfs_dinode *to,
277 xfs_lsn_t lsn)
278 {
279 struct xfs_icdinode *from = &ip->i_d;
280 struct inode *inode = VFS_I(ip);
281
282 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
283 to->di_onlink = 0;
284
285 to->di_version = from->di_version;
286 to->di_format = from->di_format;
287 to->di_uid = cpu_to_be32(from->di_uid);
288 to->di_gid = cpu_to_be32(from->di_gid);
289 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
290 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
291
292 memset(to->di_pad, 0, sizeof(to->di_pad));
293 to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
294 to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
295 to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
296 to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
297 to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
298 to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
299 to->di_nlink = cpu_to_be32(inode->i_nlink);
300 to->di_gen = cpu_to_be32(inode->i_generation);
301 to->di_mode = cpu_to_be16(inode->i_mode);
302
303 to->di_size = cpu_to_be64(from->di_size);
304 to->di_nblocks = cpu_to_be64(from->di_nblocks);
305 to->di_extsize = cpu_to_be32(from->di_extsize);
306 to->di_nextents = cpu_to_be32(from->di_nextents);
307 to->di_anextents = cpu_to_be16(from->di_anextents);
308 to->di_forkoff = from->di_forkoff;
309 to->di_aformat = from->di_aformat;
310 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
311 to->di_dmstate = cpu_to_be16(from->di_dmstate);
312 to->di_flags = cpu_to_be16(from->di_flags);
313
314 if (from->di_version == 3) {
315 to->di_changecount = cpu_to_be64(inode->i_version);
316 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
317 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
318 to->di_flags2 = cpu_to_be64(from->di_flags2);
319 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
320 to->di_ino = cpu_to_be64(ip->i_ino);
321 to->di_lsn = cpu_to_be64(lsn);
322 memset(to->di_pad2, 0, sizeof(to->di_pad2));
323 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
324 to->di_flushiter = 0;
325 } else {
326 to->di_flushiter = cpu_to_be16(from->di_flushiter);
327 }
328 }
329
330 void
331 xfs_log_dinode_to_disk(
332 struct xfs_log_dinode *from,
333 struct xfs_dinode *to)
334 {
335 to->di_magic = cpu_to_be16(from->di_magic);
336 to->di_mode = cpu_to_be16(from->di_mode);
337 to->di_version = from->di_version;
338 to->di_format = from->di_format;
339 to->di_onlink = 0;
340 to->di_uid = cpu_to_be32(from->di_uid);
341 to->di_gid = cpu_to_be32(from->di_gid);
342 to->di_nlink = cpu_to_be32(from->di_nlink);
343 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
344 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
345 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
346
347 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
348 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
349 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
350 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
351 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
352 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
353
354 to->di_size = cpu_to_be64(from->di_size);
355 to->di_nblocks = cpu_to_be64(from->di_nblocks);
356 to->di_extsize = cpu_to_be32(from->di_extsize);
357 to->di_nextents = cpu_to_be32(from->di_nextents);
358 to->di_anextents = cpu_to_be16(from->di_anextents);
359 to->di_forkoff = from->di_forkoff;
360 to->di_aformat = from->di_aformat;
361 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
362 to->di_dmstate = cpu_to_be16(from->di_dmstate);
363 to->di_flags = cpu_to_be16(from->di_flags);
364 to->di_gen = cpu_to_be32(from->di_gen);
365
366 if (from->di_version == 3) {
367 to->di_changecount = cpu_to_be64(from->di_changecount);
368 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
369 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
370 to->di_flags2 = cpu_to_be64(from->di_flags2);
371 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
372 to->di_ino = cpu_to_be64(from->di_ino);
373 to->di_lsn = cpu_to_be64(from->di_lsn);
374 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
375 uuid_copy(&to->di_uuid, &from->di_uuid);
376 to->di_flushiter = 0;
377 } else {
378 to->di_flushiter = cpu_to_be16(from->di_flushiter);
379 }
380 }
381
382 bool
383 xfs_dinode_verify(
384 struct xfs_mount *mp,
385 xfs_ino_t ino,
386 struct xfs_dinode *dip)
387 {
388 uint16_t mode;
389 uint16_t flags;
390 uint64_t flags2;
391
392 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
393 return false;
394
395 /* don't allow invalid i_size */
396 if (be64_to_cpu(dip->di_size) & (1ULL << 63))
397 return false;
398
399 mode = be16_to_cpu(dip->di_mode);
400 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
401 return false;
402
403 /* No zero-length symlinks/dirs. */
404 if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
405 return false;
406
407 /* only version 3 or greater inodes are extensively verified here */
408 if (dip->di_version < 3)
409 return true;
410
411 if (!xfs_sb_version_hascrc(&mp->m_sb))
412 return false;
413 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
414 XFS_DINODE_CRC_OFF))
415 return false;
416 if (be64_to_cpu(dip->di_ino) != ino)
417 return false;
418 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
419 return false;
420
421 flags = be16_to_cpu(dip->di_flags);
422 flags2 = be64_to_cpu(dip->di_flags2);
423
424 /* don't allow reflink/cowextsize if we don't have reflink */
425 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
426 !xfs_sb_version_hasreflink(&mp->m_sb))
427 return false;
428
429 /* don't let reflink and realtime mix */
430 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
431 return false;
432
433 /* don't let reflink and dax mix */
434 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
435 return false;
436
437 return true;
438 }
439
440 void
441 xfs_dinode_calc_crc(
442 struct xfs_mount *mp,
443 struct xfs_dinode *dip)
444 {
445 uint32_t crc;
446
447 if (dip->di_version < 3)
448 return;
449
450 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
451 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
452 XFS_DINODE_CRC_OFF);
453 dip->di_crc = xfs_end_cksum(crc);
454 }
455
456 /*
457 * Read the disk inode attributes into the in-core inode structure.
458 *
459 * For version 5 superblocks, if we are initialising a new inode and we are not
460 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
461 * inode core with a random generation number. If we are keeping inodes around,
462 * we need to read the inode cluster to get the existing generation number off
463 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
464 * format) then log recovery is dependent on the di_flushiter field being
465 * initialised from the current on-disk value and hence we must also read the
466 * inode off disk.
467 */
468 int
469 xfs_iread(
470 xfs_mount_t *mp,
471 xfs_trans_t *tp,
472 xfs_inode_t *ip,
473 uint iget_flags)
474 {
475 xfs_buf_t *bp;
476 xfs_dinode_t *dip;
477 int error;
478
479 /*
480 * Fill in the location information in the in-core inode.
481 */
482 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
483 if (error)
484 return error;
485
486 /* shortcut IO on inode allocation if possible */
487 if ((iget_flags & XFS_IGET_CREATE) &&
488 xfs_sb_version_hascrc(&mp->m_sb) &&
489 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
490 /* initialise the on-disk inode core */
491 memset(&ip->i_d, 0, sizeof(ip->i_d));
492 VFS_I(ip)->i_generation = prandom_u32();
493 if (xfs_sb_version_hascrc(&mp->m_sb))
494 ip->i_d.di_version = 3;
495 else
496 ip->i_d.di_version = 2;
497 return 0;
498 }
499
500 /*
501 * Get pointers to the on-disk inode and the buffer containing it.
502 */
503 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
504 if (error)
505 return error;
506
507 /* even unallocated inodes are verified */
508 if (!xfs_dinode_verify(mp, ip->i_ino, dip)) {
509 xfs_alert(mp, "%s: validation failed for inode %lld",
510 __func__, ip->i_ino);
511
512 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
513 error = -EFSCORRUPTED;
514 goto out_brelse;
515 }
516
517 /*
518 * If the on-disk inode is already linked to a directory
519 * entry, copy all of the inode into the in-core inode.
520 * xfs_iformat_fork() handles copying in the inode format
521 * specific information.
522 * Otherwise, just get the truly permanent information.
523 */
524 if (dip->di_mode) {
525 xfs_inode_from_disk(ip, dip);
526 error = xfs_iformat_fork(ip, dip);
527 if (error) {
528 #ifdef DEBUG
529 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
530 __func__, error);
531 #endif /* DEBUG */
532 goto out_brelse;
533 }
534 } else {
535 /*
536 * Partial initialisation of the in-core inode. Just the bits
537 * that xfs_ialloc won't overwrite or relies on being correct.
538 */
539 ip->i_d.di_version = dip->di_version;
540 VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
541 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
542
543 /*
544 * Make sure to pull in the mode here as well in
545 * case the inode is released without being used.
546 * This ensures that xfs_inactive() will see that
547 * the inode is already free and not try to mess
548 * with the uninitialized part of it.
549 */
550 VFS_I(ip)->i_mode = 0;
551 }
552
553 ASSERT(ip->i_d.di_version >= 2);
554 ip->i_delayed_blks = 0;
555
556 /*
557 * Mark the buffer containing the inode as something to keep
558 * around for a while. This helps to keep recently accessed
559 * meta-data in-core longer.
560 */
561 xfs_buf_set_ref(bp, XFS_INO_REF);
562
563 /*
564 * Use xfs_trans_brelse() to release the buffer containing the on-disk
565 * inode, because it was acquired with xfs_trans_read_buf() in
566 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
567 * brelse(). If we're within a transaction, then xfs_trans_brelse()
568 * will only release the buffer if it is not dirty within the
569 * transaction. It will be OK to release the buffer in this case,
570 * because inodes on disk are never destroyed and we will be locking the
571 * new in-core inode before putting it in the cache where other
572 * processes can find it. Thus we don't have to worry about the inode
573 * being changed just because we released the buffer.
574 */
575 out_brelse:
576 xfs_trans_brelse(tp, bp);
577 return error;
578 }