]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/xfs_inode_buf.c
xfs: verify extent size hint is valid in inode verifier
[thirdparty/xfsprogs-dev.git] / libxfs / xfs_inode_buf.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "libxfs_priv.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_defer.h"
26 #include "xfs_inode.h"
27 #include "xfs_errortag.h"
28 #include "xfs_cksum.h"
29 #include "xfs_trans.h"
30 #include "xfs_ialloc.h"
31 #include "xfs_dir2.h"
32
33 /*
34 * Check that none of the inode's in the buffer have a next
35 * unlinked field of 0.
36 */
37 #if defined(DEBUG)
38 void
39 xfs_inobp_check(
40 xfs_mount_t *mp,
41 xfs_buf_t *bp)
42 {
43 int i;
44 int j;
45 xfs_dinode_t *dip;
46
47 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
48
49 for (i = 0; i < j; i++) {
50 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
51 if (!dip->di_next_unlinked) {
52 xfs_alert(mp,
53 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
54 i, (long long)bp->b_bn);
55 }
56 }
57 }
58 #endif
59
60 bool
61 xfs_dinode_good_version(
62 struct xfs_mount *mp,
63 __u8 version)
64 {
65 if (xfs_sb_version_hascrc(&mp->m_sb))
66 return version == 3;
67
68 return version == 1 || version == 2;
69 }
70
71 /*
72 * If we are doing readahead on an inode buffer, we might be in log recovery
73 * reading an inode allocation buffer that hasn't yet been replayed, and hence
74 * has not had the inode cores stamped into it. Hence for readahead, the buffer
75 * may be potentially invalid.
76 *
77 * If the readahead buffer is invalid, we need to mark it with an error and
78 * clear the DONE status of the buffer so that a followup read will re-read it
79 * from disk. We don't report the error otherwise to avoid warnings during log
80 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
81 * because all we want to do is say readahead failed; there is no-one to report
82 * the error to, so this will distinguish it from a non-ra verifier failure.
83 * Changes to this readahead error behavour also need to be reflected in
84 * xfs_dquot_buf_readahead_verify().
85 */
86 static void
87 xfs_inode_buf_verify(
88 struct xfs_buf *bp,
89 bool readahead)
90 {
91 struct xfs_mount *mp = bp->b_target->bt_mount;
92 xfs_agnumber_t agno;
93 int i;
94 int ni;
95
96 /*
97 * Validate the magic number and version of every inode in the buffer
98 */
99 agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
100 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
101 for (i = 0; i < ni; i++) {
102 int di_ok;
103 xfs_dinode_t *dip;
104 xfs_agino_t unlinked_ino;
105
106 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
107 unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
108 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
109 xfs_dinode_good_version(mp, dip->di_version) &&
110 (unlinked_ino == NULLAGINO ||
111 xfs_verify_agino(mp, agno, unlinked_ino));
112 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
113 XFS_ERRTAG_ITOBP_INOTOBP))) {
114 if (readahead) {
115 bp->b_flags &= ~XBF_DONE;
116 xfs_buf_ioerror(bp, -EIO);
117 return;
118 }
119
120 #ifdef DEBUG
121 xfs_alert(mp,
122 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
123 (unsigned long long)bp->b_bn, i,
124 be16_to_cpu(dip->di_magic));
125 #endif
126 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
127 __func__, dip, sizeof(*dip),
128 NULL);
129 return;
130 }
131 }
132 }
133
134
135 static void
136 xfs_inode_buf_read_verify(
137 struct xfs_buf *bp)
138 {
139 xfs_inode_buf_verify(bp, false);
140 }
141
142 static void
143 xfs_inode_buf_readahead_verify(
144 struct xfs_buf *bp)
145 {
146 xfs_inode_buf_verify(bp, true);
147 }
148
149 static void
150 xfs_inode_buf_write_verify(
151 struct xfs_buf *bp)
152 {
153 xfs_inode_buf_verify(bp, false);
154 }
155
156 const struct xfs_buf_ops xfs_inode_buf_ops = {
157 .name = "xfs_inode",
158 .verify_read = xfs_inode_buf_read_verify,
159 .verify_write = xfs_inode_buf_write_verify,
160 };
161
162 const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
163 .name = "xxfs_inode_ra",
164 .verify_read = xfs_inode_buf_readahead_verify,
165 .verify_write = xfs_inode_buf_write_verify,
166 };
167
168
169 /*
170 * This routine is called to map an inode to the buffer containing the on-disk
171 * version of the inode. It returns a pointer to the buffer containing the
172 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
173 * pointer to the on-disk inode within that buffer.
174 *
175 * If a non-zero error is returned, then the contents of bpp and dipp are
176 * undefined.
177 */
178 int
179 xfs_imap_to_bp(
180 struct xfs_mount *mp,
181 struct xfs_trans *tp,
182 struct xfs_imap *imap,
183 struct xfs_dinode **dipp,
184 struct xfs_buf **bpp,
185 uint buf_flags,
186 uint iget_flags)
187 {
188 struct xfs_buf *bp;
189 int error;
190
191 buf_flags |= XBF_UNMAPPED;
192 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
193 (int)imap->im_len, buf_flags, &bp,
194 &xfs_inode_buf_ops);
195 if (error) {
196 if (error == -EAGAIN) {
197 ASSERT(buf_flags & XBF_TRYLOCK);
198 return error;
199 }
200
201 if (error == -EFSCORRUPTED &&
202 (iget_flags & XFS_IGET_UNTRUSTED))
203 return -EINVAL;
204
205 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
206 __func__, error);
207 return error;
208 }
209
210 *bpp = bp;
211 *dipp = xfs_buf_offset(bp, imap->im_boffset);
212 return 0;
213 }
214
215 void
216 xfs_inode_from_disk(
217 struct xfs_inode *ip,
218 struct xfs_dinode *from)
219 {
220 struct xfs_icdinode *to = &ip->i_d;
221 struct inode *inode = VFS_I(ip);
222
223
224 /*
225 * Convert v1 inodes immediately to v2 inode format as this is the
226 * minimum inode version format we support in the rest of the code.
227 */
228 to->di_version = from->di_version;
229 if (to->di_version == 1) {
230 set_nlink(inode, be16_to_cpu(from->di_onlink));
231 to->di_projid_lo = 0;
232 to->di_projid_hi = 0;
233 to->di_version = 2;
234 } else {
235 set_nlink(inode, be32_to_cpu(from->di_nlink));
236 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
237 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
238 }
239
240 to->di_format = from->di_format;
241 to->di_uid = be32_to_cpu(from->di_uid);
242 to->di_gid = be32_to_cpu(from->di_gid);
243 to->di_flushiter = be16_to_cpu(from->di_flushiter);
244
245 /*
246 * Time is signed, so need to convert to signed 32 bit before
247 * storing in inode timestamp which may be 64 bit. Otherwise
248 * a time before epoch is converted to a time long after epoch
249 * on 64 bit systems.
250 */
251 inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
252 inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
253 inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
254 inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
255 inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
256 inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
257 inode->i_generation = be32_to_cpu(from->di_gen);
258 inode->i_mode = be16_to_cpu(from->di_mode);
259
260 to->di_size = be64_to_cpu(from->di_size);
261 to->di_nblocks = be64_to_cpu(from->di_nblocks);
262 to->di_extsize = be32_to_cpu(from->di_extsize);
263 to->di_nextents = be32_to_cpu(from->di_nextents);
264 to->di_anextents = be16_to_cpu(from->di_anextents);
265 to->di_forkoff = from->di_forkoff;
266 to->di_aformat = from->di_aformat;
267 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
268 to->di_dmstate = be16_to_cpu(from->di_dmstate);
269 to->di_flags = be16_to_cpu(from->di_flags);
270
271 if (to->di_version == 3) {
272 inode_set_iversion_queried(inode,
273 be64_to_cpu(from->di_changecount));
274 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
275 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
276 to->di_flags2 = be64_to_cpu(from->di_flags2);
277 to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
278 }
279 }
280
281 void
282 xfs_inode_to_disk(
283 struct xfs_inode *ip,
284 struct xfs_dinode *to,
285 xfs_lsn_t lsn)
286 {
287 struct xfs_icdinode *from = &ip->i_d;
288 struct inode *inode = VFS_I(ip);
289
290 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
291 to->di_onlink = 0;
292
293 to->di_version = from->di_version;
294 to->di_format = from->di_format;
295 to->di_uid = cpu_to_be32(from->di_uid);
296 to->di_gid = cpu_to_be32(from->di_gid);
297 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
298 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
299
300 memset(to->di_pad, 0, sizeof(to->di_pad));
301 to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
302 to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
303 to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
304 to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
305 to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
306 to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
307 to->di_nlink = cpu_to_be32(inode->i_nlink);
308 to->di_gen = cpu_to_be32(inode->i_generation);
309 to->di_mode = cpu_to_be16(inode->i_mode);
310
311 to->di_size = cpu_to_be64(from->di_size);
312 to->di_nblocks = cpu_to_be64(from->di_nblocks);
313 to->di_extsize = cpu_to_be32(from->di_extsize);
314 to->di_nextents = cpu_to_be32(from->di_nextents);
315 to->di_anextents = cpu_to_be16(from->di_anextents);
316 to->di_forkoff = from->di_forkoff;
317 to->di_aformat = from->di_aformat;
318 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
319 to->di_dmstate = cpu_to_be16(from->di_dmstate);
320 to->di_flags = cpu_to_be16(from->di_flags);
321
322 if (from->di_version == 3) {
323 to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
324 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
325 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
326 to->di_flags2 = cpu_to_be64(from->di_flags2);
327 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
328 to->di_ino = cpu_to_be64(ip->i_ino);
329 to->di_lsn = cpu_to_be64(lsn);
330 memset(to->di_pad2, 0, sizeof(to->di_pad2));
331 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
332 to->di_flushiter = 0;
333 } else {
334 to->di_flushiter = cpu_to_be16(from->di_flushiter);
335 }
336 }
337
338 void
339 xfs_log_dinode_to_disk(
340 struct xfs_log_dinode *from,
341 struct xfs_dinode *to)
342 {
343 to->di_magic = cpu_to_be16(from->di_magic);
344 to->di_mode = cpu_to_be16(from->di_mode);
345 to->di_version = from->di_version;
346 to->di_format = from->di_format;
347 to->di_onlink = 0;
348 to->di_uid = cpu_to_be32(from->di_uid);
349 to->di_gid = cpu_to_be32(from->di_gid);
350 to->di_nlink = cpu_to_be32(from->di_nlink);
351 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
352 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
353 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
354
355 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
356 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
357 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
358 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
359 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
360 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
361
362 to->di_size = cpu_to_be64(from->di_size);
363 to->di_nblocks = cpu_to_be64(from->di_nblocks);
364 to->di_extsize = cpu_to_be32(from->di_extsize);
365 to->di_nextents = cpu_to_be32(from->di_nextents);
366 to->di_anextents = cpu_to_be16(from->di_anextents);
367 to->di_forkoff = from->di_forkoff;
368 to->di_aformat = from->di_aformat;
369 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
370 to->di_dmstate = cpu_to_be16(from->di_dmstate);
371 to->di_flags = cpu_to_be16(from->di_flags);
372 to->di_gen = cpu_to_be32(from->di_gen);
373
374 if (from->di_version == 3) {
375 to->di_changecount = cpu_to_be64(from->di_changecount);
376 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
377 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
378 to->di_flags2 = cpu_to_be64(from->di_flags2);
379 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
380 to->di_ino = cpu_to_be64(from->di_ino);
381 to->di_lsn = cpu_to_be64(from->di_lsn);
382 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
383 uuid_copy(&to->di_uuid, &from->di_uuid);
384 to->di_flushiter = 0;
385 } else {
386 to->di_flushiter = cpu_to_be16(from->di_flushiter);
387 }
388 }
389
390 xfs_failaddr_t
391 xfs_dinode_verify(
392 struct xfs_mount *mp,
393 xfs_ino_t ino,
394 struct xfs_dinode *dip)
395 {
396 xfs_failaddr_t fa;
397 uint16_t mode;
398 uint16_t flags;
399 uint64_t flags2;
400 uint64_t di_size;
401
402 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
403 return __this_address;
404
405 /* Verify v3 integrity information first */
406 if (dip->di_version >= 3) {
407 if (!xfs_sb_version_hascrc(&mp->m_sb))
408 return __this_address;
409 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
410 XFS_DINODE_CRC_OFF))
411 return __this_address;
412 if (be64_to_cpu(dip->di_ino) != ino)
413 return __this_address;
414 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
415 return __this_address;
416 }
417
418 /* don't allow invalid i_size */
419 di_size = be64_to_cpu(dip->di_size);
420 if (di_size & (1ULL << 63))
421 return __this_address;
422
423 mode = be16_to_cpu(dip->di_mode);
424 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
425 return __this_address;
426
427 /* No zero-length symlinks/dirs. */
428 if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
429 return __this_address;
430
431 /* Fork checks carried over from xfs_iformat_fork */
432 if (mode &&
433 be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
434 be64_to_cpu(dip->di_nblocks))
435 return __this_address;
436
437 if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
438 return __this_address;
439
440 flags = be16_to_cpu(dip->di_flags);
441
442 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
443 return __this_address;
444
445 /* Do we have appropriate data fork formats for the mode? */
446 switch (mode & S_IFMT) {
447 case S_IFIFO:
448 case S_IFCHR:
449 case S_IFBLK:
450 case S_IFSOCK:
451 if (dip->di_format != XFS_DINODE_FMT_DEV)
452 return __this_address;
453 break;
454 case S_IFREG:
455 case S_IFLNK:
456 case S_IFDIR:
457 switch (dip->di_format) {
458 case XFS_DINODE_FMT_LOCAL:
459 /*
460 * no local regular files yet
461 */
462 if (S_ISREG(mode))
463 return __this_address;
464 if (di_size > XFS_DFORK_DSIZE(dip, mp))
465 return __this_address;
466 if (dip->di_nextents)
467 return __this_address;
468 /* fall through */
469 case XFS_DINODE_FMT_EXTENTS:
470 case XFS_DINODE_FMT_BTREE:
471 break;
472 default:
473 return __this_address;
474 }
475 break;
476 case 0:
477 /* Uninitialized inode ok. */
478 break;
479 default:
480 return __this_address;
481 }
482
483 if (XFS_DFORK_Q(dip)) {
484 switch (dip->di_aformat) {
485 case XFS_DINODE_FMT_LOCAL:
486 if (dip->di_anextents)
487 return __this_address;
488 /* fall through */
489 case XFS_DINODE_FMT_EXTENTS:
490 case XFS_DINODE_FMT_BTREE:
491 break;
492 default:
493 return __this_address;
494 }
495 } else {
496 /*
497 * If there is no fork offset, this may be a freshly-made inode
498 * in a new disk cluster, in which case di_aformat is zeroed.
499 * Otherwise, such an inode must be in EXTENTS format; this goes
500 * for freed inodes as well.
501 */
502 switch (dip->di_aformat) {
503 case 0:
504 case XFS_DINODE_FMT_EXTENTS:
505 break;
506 default:
507 return __this_address;
508 }
509 if (dip->di_anextents)
510 return __this_address;
511 }
512
513 /* extent size hint validation */
514 fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
515 mode, flags);
516 if (fa)
517 return fa;
518
519 /* only version 3 or greater inodes are extensively verified here */
520 if (dip->di_version < 3)
521 return NULL;
522
523 flags2 = be64_to_cpu(dip->di_flags2);
524
525 /* don't allow reflink/cowextsize if we don't have reflink */
526 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
527 !xfs_sb_version_hasreflink(&mp->m_sb))
528 return __this_address;
529
530 /* only regular files get reflink */
531 if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
532 return __this_address;
533
534 /* don't let reflink and realtime mix */
535 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
536 return __this_address;
537
538 /* don't let reflink and dax mix */
539 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
540 return __this_address;
541
542 return NULL;
543 }
544
545 void
546 xfs_dinode_calc_crc(
547 struct xfs_mount *mp,
548 struct xfs_dinode *dip)
549 {
550 uint32_t crc;
551
552 if (dip->di_version < 3)
553 return;
554
555 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
556 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
557 XFS_DINODE_CRC_OFF);
558 dip->di_crc = xfs_end_cksum(crc);
559 }
560
561 /*
562 * Read the disk inode attributes into the in-core inode structure.
563 *
564 * For version 5 superblocks, if we are initialising a new inode and we are not
565 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
566 * inode core with a random generation number. If we are keeping inodes around,
567 * we need to read the inode cluster to get the existing generation number off
568 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
569 * format) then log recovery is dependent on the di_flushiter field being
570 * initialised from the current on-disk value and hence we must also read the
571 * inode off disk.
572 */
573 int
574 xfs_iread(
575 xfs_mount_t *mp,
576 xfs_trans_t *tp,
577 xfs_inode_t *ip,
578 uint iget_flags)
579 {
580 xfs_buf_t *bp;
581 xfs_dinode_t *dip;
582 xfs_failaddr_t fa;
583 int error;
584
585 /*
586 * Fill in the location information in the in-core inode.
587 */
588 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
589 if (error)
590 return error;
591
592 /* shortcut IO on inode allocation if possible */
593 if ((iget_flags & XFS_IGET_CREATE) &&
594 xfs_sb_version_hascrc(&mp->m_sb) &&
595 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
596 /* initialise the on-disk inode core */
597 memset(&ip->i_d, 0, sizeof(ip->i_d));
598 VFS_I(ip)->i_generation = prandom_u32();
599 ip->i_d.di_version = 3;
600 return 0;
601 }
602
603 /*
604 * Get pointers to the on-disk inode and the buffer containing it.
605 */
606 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
607 if (error)
608 return error;
609
610 /* even unallocated inodes are verified */
611 fa = xfs_dinode_verify(mp, ip->i_ino, dip);
612 if (fa) {
613 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
614 sizeof(*dip), fa);
615 error = -EFSCORRUPTED;
616 goto out_brelse;
617 }
618
619 /*
620 * If the on-disk inode is already linked to a directory
621 * entry, copy all of the inode into the in-core inode.
622 * xfs_iformat_fork() handles copying in the inode format
623 * specific information.
624 * Otherwise, just get the truly permanent information.
625 */
626 if (dip->di_mode) {
627 xfs_inode_from_disk(ip, dip);
628 error = xfs_iformat_fork(ip, dip);
629 if (error) {
630 #ifdef DEBUG
631 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
632 __func__, error);
633 #endif /* DEBUG */
634 goto out_brelse;
635 }
636 } else {
637 /*
638 * Partial initialisation of the in-core inode. Just the bits
639 * that xfs_ialloc won't overwrite or relies on being correct.
640 */
641 ip->i_d.di_version = dip->di_version;
642 VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
643 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
644
645 /*
646 * Make sure to pull in the mode here as well in
647 * case the inode is released without being used.
648 * This ensures that xfs_inactive() will see that
649 * the inode is already free and not try to mess
650 * with the uninitialized part of it.
651 */
652 VFS_I(ip)->i_mode = 0;
653 }
654
655 ASSERT(ip->i_d.di_version >= 2);
656 ip->i_delayed_blks = 0;
657
658 /*
659 * Mark the buffer containing the inode as something to keep
660 * around for a while. This helps to keep recently accessed
661 * meta-data in-core longer.
662 */
663 xfs_buf_set_ref(bp, XFS_INO_REF);
664
665 /*
666 * Use xfs_trans_brelse() to release the buffer containing the on-disk
667 * inode, because it was acquired with xfs_trans_read_buf() in
668 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
669 * brelse(). If we're within a transaction, then xfs_trans_brelse()
670 * will only release the buffer if it is not dirty within the
671 * transaction. It will be OK to release the buffer in this case,
672 * because inodes on disk are never destroyed and we will be locking the
673 * new in-core inode before putting it in the cache where other
674 * processes can find it. Thus we don't have to worry about the inode
675 * being changed just because we released the buffer.
676 */
677 out_brelse:
678 xfs_trans_brelse(tp, bp);
679 return error;
680 }
681
682 /*
683 * Validate di_extsize hint.
684 *
685 * The rules are documented at xfs_ioctl_setattr_check_extsize().
686 * These functions must be kept in sync with each other.
687 */
688 xfs_failaddr_t
689 xfs_inode_validate_extsize(
690 struct xfs_mount *mp,
691 uint32_t extsize,
692 uint16_t mode,
693 uint16_t flags)
694 {
695 bool rt_flag;
696 bool hint_flag;
697 bool inherit_flag;
698 uint32_t extsize_bytes;
699 uint32_t blocksize_bytes;
700
701 rt_flag = (flags & XFS_DIFLAG_REALTIME);
702 hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
703 inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
704 extsize_bytes = XFS_FSB_TO_B(mp, extsize);
705
706 if (rt_flag)
707 blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
708 else
709 blocksize_bytes = mp->m_sb.sb_blocksize;
710
711 if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
712 return __this_address;
713
714 if (hint_flag && !S_ISREG(mode))
715 return __this_address;
716
717 if (inherit_flag && !S_ISDIR(mode))
718 return __this_address;
719
720 if ((hint_flag || inherit_flag) && extsize == 0)
721 return __this_address;
722
723 if (!(hint_flag || inherit_flag) && extsize != 0)
724 return __this_address;
725
726 if (extsize_bytes % blocksize_bytes)
727 return __this_address;
728
729 if (extsize > MAXEXTLEN)
730 return __this_address;
731
732 if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
733 return __this_address;
734
735 return NULL;
736 }
737
738 /*
739 * Validate di_cowextsize hint.
740 *
741 * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
742 * These functions must be kept in sync with each other.
743 */
744 xfs_failaddr_t
745 xfs_inode_validate_cowextsize(
746 struct xfs_mount *mp,
747 uint32_t cowextsize,
748 uint16_t mode,
749 uint16_t flags,
750 uint64_t flags2)
751 {
752 bool rt_flag;
753 bool hint_flag;
754 uint32_t cowextsize_bytes;
755
756 rt_flag = (flags & XFS_DIFLAG_REALTIME);
757 hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
758 cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
759
760 if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
761 return __this_address;
762
763 if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
764 return __this_address;
765
766 if (hint_flag && cowextsize == 0)
767 return __this_address;
768
769 if (!hint_flag && cowextsize != 0)
770 return __this_address;
771
772 if (hint_flag && rt_flag)
773 return __this_address;
774
775 if (cowextsize_bytes % mp->m_sb.sb_blocksize)
776 return __this_address;
777
778 if (cowextsize > MAXEXTLEN)
779 return __this_address;
780
781 if (cowextsize > mp->m_sb.sb_agblocks / 2)
782 return __this_address;
783
784 return NULL;
785 }