+[cvs]
+ - Update xfs_io command to allow reading from non-XFS files.
+ - Sync up user/kernel source in libxfs, libxlog and headers.
+
xfsprogs-2.5.4 (23 July 2003)
- Update xfs_io bmap command to report unwritten extent flag
if it is set on an extent (in verbose mode only).
#define XFS_ITRUNC_DEFINITE 0x1
#define XFS_ITRUNC_MAYBE 0x2
-/*
- * max file offset is 2^(31+PAGE_SHIFT) - 1 (due to linux page cache)
- *
- * NOTE: XFS itself can handle 2^63 - 1 (largest positive value of xfs_fsize_t)
- * but this is the Linux limit.
- */
-#define XFS_MAX_FILE_OFFSET MAX_LFS_FILESIZE
-
#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_ITOV)
struct vnode *xfs_itov(xfs_inode_t *ip);
#define XFS_ITOV(ip) xfs_itov(ip)
* Macros, structures, prototypes for internal log manager use.
*/
-#define XLOG_NUM_ICLOGS 2
+#define XLOG_MIN_ICLOGS 2
+#define XLOG_MED_ICLOGS 4
#define XLOG_MAX_ICLOGS 8
#define XLOG_CALLBACK_SIZE 10
-#define XLOG_HEADER_MAGIC_NUM 0xFEEDbabe /* Illegal cycle number */
+#define XLOG_HEADER_MAGIC_NUM 0xFEEDbabe /* Invalid cycle number */
#define XLOG_VERSION_1 1
#define XLOG_VERSION_2 2 /* Large IClogs, Log sunit */
#define XLOG_VERSION_OKBITS (XLOG_VERSION_1 | XLOG_VERSION_2)
* that round off problems won't occur when releasing partial reservations.
*/
typedef struct log {
- /* The following block of fields are changed while holding icloglock */
- sema_t l_flushsema; /* iclog flushing semaphore */
- int l_flushcnt; /* # of procs waiting on this sema */
- int l_ticket_cnt; /* free ticket count */
- int l_ticket_tcnt; /* total ticket count */
- int l_covered_state;/* state of "covering disk log entries" */
- xlog_ticket_t *l_freelist; /* free list of tickets */
- xlog_ticket_t *l_unmount_free;/* kmem_free these addresses */
- xlog_ticket_t *l_tail; /* free list of tickets */
- xlog_in_core_t *l_iclog; /* head log queue */
- lock_t l_icloglock; /* grab to change iclog state */
- xfs_lsn_t l_tail_lsn; /* lsn of 1st LR w/ unflush buffers */
- xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */
- struct xfs_mount *l_mp; /* mount point */
- struct xfs_buf *l_xbuf; /* extra buffer for log wrapping */
- dev_t l_dev; /* dev_t of log */
- xfs_daddr_t l_logBBstart; /* start block of log */
- int l_logsize; /* size of log in bytes */
- int l_logBBsize; /* size of log in 512 byte chunks */
- int l_roundoff; /* round off error of all iclogs */
- int l_curr_cycle; /* Cycle number of log writes */
- int l_prev_cycle; /* Cycle # b4 last block increment */
- int l_curr_block; /* current logical block of log */
- int l_prev_block; /* previous logical block of log */
- int l_iclog_size; /* size of log in bytes */
- int l_iclog_size_log;/* log power size of log */
- int l_iclog_bufs; /* number of iclog buffers */
-
- /* The following field are used for debugging; need to hold icloglock */
- char *l_iclog_bak[XLOG_MAX_ICLOGS];
-
- /* The following block of fields are changed while holding grant_lock */
- lock_t l_grant_lock; /* protects below fields */
- xlog_ticket_t *l_reserve_headq; /* */
- xlog_ticket_t *l_write_headq; /* */
- int l_grant_reserve_cycle; /* */
- int l_grant_reserve_bytes; /* */
- int l_grant_write_cycle; /* */
- int l_grant_write_bytes; /* */
-
- /* The following fields don't need locking */
+ /* The following block of fields are changed while holding icloglock */
+ sema_t l_flushsema; /* iclog flushing semaphore */
+ int l_flushcnt; /* # of procs waiting on this
+ * sema */
+ int l_ticket_cnt; /* free ticket count */
+ int l_ticket_tcnt; /* total ticket count */
+ int l_covered_state;/* state of "covering disk
+ * log entries" */
+ xlog_ticket_t *l_freelist; /* free list of tickets */
+ xlog_ticket_t *l_unmount_free;/* kmem_free these addresses */
+ xlog_ticket_t *l_tail; /* free list of tickets */
+ xlog_in_core_t *l_iclog; /* head log queue */
+ lock_t l_icloglock; /* grab to change iclog state */
+ xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed
+ * buffers */
+ xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */
+ struct xfs_mount *l_mp; /* mount point */
+ struct xfs_buf *l_xbuf; /* extra buffer for log
+ * wrapping */
+ dev_t l_dev; /* dev_t of log */
+ xfs_daddr_t l_logBBstart; /* start block of log */
+ int l_logsize; /* size of log in bytes */
+ int l_logBBsize; /* size of log in BB chunks */
+ int l_roundoff; /* round off error of iclogs */
+ int l_curr_cycle; /* Cycle number of log writes */
+ int l_prev_cycle; /* Cycle number before last
+ * block increment */
+ int l_curr_block; /* current logical log block */
+ int l_prev_block; /* previous logical log block */
+ int l_iclog_size; /* size of log in bytes */
+ int l_iclog_size_log; /* log power size of log */
+ int l_iclog_bufs; /* number of iclog buffers */
+
+ /* The following field are used for debugging; need to hold icloglock */
+ char *l_iclog_bak[XLOG_MAX_ICLOGS];
+
+ /* The following block of fields are changed while holding grant_lock */
+ lock_t l_grant_lock;
+ xlog_ticket_t *l_reserve_headq;
+ xlog_ticket_t *l_write_headq;
+ int l_grant_reserve_cycle;
+ int l_grant_reserve_bytes;
+ int l_grant_write_cycle;
+ int l_grant_write_bytes;
+
+ /* The following fields don't need locking */
#ifdef DEBUG
- struct ktrace *l_trace;
- struct ktrace *l_grant_trace;
+ struct ktrace *l_trace;
+ struct ktrace *l_grant_trace;
#endif
- uint l_flags;
- uint l_quotaoffs_flag;/* XFS_DQ_*, if QUOTAOFFs found */
- struct xfs_buf_cancel **l_buf_cancel_table;
- int l_stripemask; /* log stripe mask */
- int l_iclog_hsize; /* size of iclog header */
- int l_iclog_heads; /* number of iclog header sectors */
- uint l_sectbb_log; /* log2 of sector size in bbs */
- uint l_sectbb_mask; /* sector size in bbs alignment mask */
+ uint l_flags;
+ uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
+ struct xfs_buf_cancel **l_buf_cancel_table;
+ int l_stripemask; /* log stripe mask */
+ int l_iclog_hsize; /* size of iclog header */
+ int l_iclog_heads; /* # of iclog header sectors */
+ uint l_sectbb_log; /* log2 of sector size in BBs */
+ uint l_sectbb_mask; /* sector size (in BBs)
+ * alignment mask */
} xlog_t;
struct xfs_bmbt_irec;
struct xfs_bmap_free;
-#define SPLDECL(s) unsigned long s
#define AIL_LOCK_T lock_t
#define AIL_LOCKINIT(x,y) spinlock_init(x,y)
#define AIL_LOCK_DESTROY(x) spinlock_destroy(x)
uint m_qflags; /* quota status flags */
xfs_trans_reservations_t m_reservations;/* precomputed res values */
__uint64_t m_maxicount; /* maximum inode count */
+ __uint64_t m_maxioffset; /* maximum inode offset */
__uint64_t m_resblks; /* total reserved blocks */
__uint64_t m_resblks_avail;/* available reserved blocks */
#if XFS_BIG_FILESYSTEMS
* 32 bits in size */
#define XFS_MOUNT_NOLOGFLUSH 0x00010000
-#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
-
/*
* Default minimum read and write sizes.
*/
#define XFS_WSYNC_READIO_LOG 15 /* 32K */
#define XFS_WSYNC_WRITEIO_LOG 14 /* 16K */
+#define XFS_MAXIOFFSET(mp) ((mp)->m_maxioffset)
+
+#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
#define xfs_force_shutdown(m,f) \
VFS_FORCE_SHUTDOWN((XFS_MTOVFS(m)), f, __FILE__, __LINE__)
xfs_trans_header_t t_header; /* header for in-log trans */
unsigned int t_busy_free; /* busy descs free */
xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */
+ xfs_pflags_t t_pflags; /* saved pflags state */
} xfs_trans_t;
#endif /* __KERNEL__ */
xfs_extlen_t ralen=0; /* realtime allocation length */
#endif
-#define ISLEGAL(x,y) \
+#define ISVALID(x,y) \
(rt ? \
(x) < mp->m_sb.sb_rblocks : \
XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
/*
* If we're now overlapping the next or previous extent that
* means we can't fit an extsz piece in this hole. Just move
- * the start forward to the first legal spot and set
+ * the start forward to the first valid spot and set
* the length so we hit the end.
*/
if ((ap->off != orig_off && ap->off < prevo) ||
ralen = ap->alen / mp->m_sb.sb_rextsize;
/*
* If the old value was close enough to MAXEXTLEN that
- * we rounded up to it, cut it back so it's legal again.
+ * we rounded up to it, cut it back so it's valid again.
* Note that if it's a really large request (bigger than
* MAXEXTLEN), we don't hear about that number, and can't
* adjust the starting point to match it.
*/
if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
!ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
- ISLEGAL(ap->prevp->br_startblock + ap->prevp->br_blockcount,
+ ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount,
ap->prevp->br_startblock)) {
ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount;
/*
adjust = ap->off -
(ap->prevp->br_startoff + ap->prevp->br_blockcount);
if (adjust &&
- ISLEGAL(ap->rval + adjust, ap->prevp->br_startblock))
+ ISVALID(ap->rval + adjust, ap->prevp->br_startblock))
ap->rval += adjust;
}
/*
!ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
(prevbno = ap->prevp->br_startblock +
ap->prevp->br_blockcount) &&
- ISLEGAL(prevbno, ap->prevp->br_startblock)) {
+ ISVALID(prevbno, ap->prevp->br_startblock)) {
/*
* Calculate gap to end of previous block.
*/
* end and the gap size.
* Heuristic!
* If the gap is large relative to the piece we're
- * allocating, or using it gives us an illegal block
+ * allocating, or using it gives us an invalid block
* number, then just use the end of the previous block.
*/
if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
- ISLEGAL(prevbno + prevdiff,
+ ISVALID(prevbno + prevdiff,
ap->prevp->br_startblock))
prevbno += adjust;
else
/*
* Heuristic!
* If the gap is large relative to the piece we're
- * allocating, or using it gives us an illegal block
+ * allocating, or using it gives us an invalid block
* number, then just use the start of the next block
* offset by our length.
*/
if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
- ISLEGAL(gotbno - gotdiff, gotbno))
+ ISVALID(gotbno - gotdiff, gotbno))
gotbno -= adjust;
- else if (ISLEGAL(gotbno - ap->alen, gotbno)) {
+ else if (ISVALID(gotbno - ap->alen, gotbno)) {
gotbno -= ap->alen;
gotdiff += adjust - ap->alen;
} else
}
}
return 0;
-#undef ISLEGAL
+#undef ISVALID
}
/*
/*
* We don't want to deal with the case of keeping inode data inline yet.
- * So sending the data fork of a regular inode is illegal.
+ * So sending the data fork of a regular inode is invalid.
*/
ASSERT(!((ip->i_d.di_mode & IFMT) == IFREG &&
whichfork == XFS_DATA_FORK));
size = XFS_IFORK_NEXTENTS(ip, whichfork) * (uint)sizeof(xfs_bmbt_rec_t);
ifp = XFS_IFORK_PTR(ip, whichfork);
/*
- * We know that the size is legal (it's checked in iformat_btree)
+ * We know that the size is valid (it's checked in iformat_btree)
*/
ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP);
ASSERT(ifp->if_u1.if_extents != NULL);
for (i = (*last_blk) - 1; i >= 0; i--) {
if (i < start_blk) {
- /* legal log record not found */
+ /* valid log record not found */
xlog_warn(
"XFS: Log inconsistent (didn't find previous header)");
ASSERT(0);
* then the entire log is stamped with the same cycle number. In this
* case, head_blk can't be set to zero (which makes sense). The below
* math doesn't work out properly with head_blk equal to zero. Instead,
- * we set it to log_bbnum which is an illegal block number, but this
+ * we set it to log_bbnum which is an invalid block number, but this
* value makes the math correct. If head_blk doesn't changed through
* all the tests below, *head_blk is set to zero at the very end rather
* than log_bbnum. In a sense, log_bbnum and zero are the same block