cluster_count * sizeof(xfs_buf_t*));
for (bp_index = 0; bp_index < cluster_count; bp_index++) {
-#ifdef XR_PF_TRACE
pftrace("about to read off %llu in AG %d",
(long long)XFS_AGB_TO_DADDR(mp, agno, agbno), agno);
-#endif
+
bplist[bp_index] = libxfs_readbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, agno, agbno),
XFS_FSB_TO_BB(mp, blks_per_cluster), 0);
}
agbno += blks_per_cluster;
-#ifdef XR_PF_TRACE
pftrace("readbuf %p (%llu, %d) in AG %d", bplist[bp_index],
(long long)XFS_BUF_ADDR(bplist[bp_index]),
XFS_BUF_COUNT(bplist[bp_index]), agno);
-#endif
}
agbno = XFS_AGINO_TO_AGBNO(mp, first_irec->ino_startnum);
* done! - finished up irec and block simultaneously
*/
for (bp_index = 0; bp_index < cluster_count; bp_index++) {
-#ifdef XR_PF_TRACE
- pftrace("put/writebuf %p (%llu) in AG %d", bplist[bp_index],
- (long long)XFS_BUF_ADDR(bplist[bp_index]), agno);
-#endif
+ pftrace("put/writebuf %p (%llu) in AG %d",
+ bplist[bp_index], (long long)
+ XFS_BUF_ADDR(bplist[bp_index]), agno);
+
if (dirty && !no_modify)
libxfs_writebuf(bplist[bp_index], 0);
else
bplist = bparray;
}
for (i = 0; i < nex; i++) {
-#ifdef XR_PF_TRACE
pftrace("about to read off %llu (len = %d)",
(long long)XFS_FSB_TO_DADDR(mp, bmp[i].startblock),
XFS_FSB_TO_BB(mp, bmp[i].blockcount));
-#endif
+
bplist[i] = libxfs_readbuf(mp->m_dev,
XFS_FSB_TO_DADDR(mp, bmp[i].startblock),
XFS_FSB_TO_BB(mp, bmp[i].blockcount), 0);
if (!bplist[i])
goto failed;
-#ifdef XR_PF_TRACE
+
pftrace("readbuf %p (%llu, %d)", bplist[i],
(long long)XFS_BUF_ADDR(bplist[i]),
XFS_BUF_COUNT(bplist[i]));
-#endif
}
dabuf = malloc(XFS_DA_BUF_SIZE(nex));
if (dabuf == NULL) {
}
da_buf_done(dabuf);
for (i = 0; i < nbuf; i++) {
-#ifdef XR_PF_TRACE
pftrace("putbuf %p (%llu)", bplist[i],
(long long)XFS_BUF_ADDR(bplist[i]));
-#endif
libxfs_putbuf(bplist[i]);
}
if (bplist != &bp)
/*
* bail out if this is the root block (top of tree)
*/
- if (this_level >= cursor->active)
+ if (this_level >= cursor->active)
return(0);
/*
* set hashvalue to correctl reflect the now-validated
* numbers. Do NOT touch the name until after we've computed
* the hashvalue and done a namecheck() on the name.
*
- * Conditions must either set clearino to zero or set
+ * Conditions must either set clearino to zero or set
* clearreason why it's being cleared.
*/
if (!ino_discovery && ent_ino == BADFSINO) {
if (ino_discovery) {
add_inode_uncertain(mp, ent_ino, 0);
clearino = 0;
- } else
+ } else
clearreason = _("non-existent");
} else {
/*
prefetch_args_t *args)
{
if (!args->can_start_processing) {
-#ifdef XR_PF_TRACE
pftrace("signalling processing for AG %d", args->agno);
-#endif
+
args->can_start_processing = 1;
pthread_cond_signal(&args->start_processing);
}
prefetch_args_t *args)
{
if (!args->can_start_reading) {
-#ifdef XR_PF_TRACE
pftrace("signalling reading for AG %d", args->agno);
-#endif
+
args->can_start_reading = 1;
pthread_cond_broadcast(&args->start_reading);
}
if (args->inode_bufs_queued == IO_THRESHOLD)
pf_start_io_workers(args);
}
-#ifdef XR_PF_TRACE
- pftrace("getbuf %c %p (%llu) in AG %d (fsbno = %lu) added to "
- "primary queue (inode_bufs_queued = %d, last_bno = %lu)",
- B_IS_INODE(flag) ? 'I' : 'M', bp,
- (long long)XFS_BUF_ADDR(bp), args->agno, fsbno,
- args->inode_bufs_queued, args->last_bno_read);
-#endif
} else {
-#ifdef XR_PF_TRACE
- pftrace("getbuf %c %p (%llu) in AG %d (fsbno = %lu) added to "
- "secondary queue (last_bno = %lu)",
- B_IS_INODE(flag) ? 'I' : 'M', bp,
- (long long)XFS_BUF_ADDR(bp), args->agno, fsbno,
- args->last_bno_read);
-#endif
ASSERT(!B_IS_INODE(flag));
XFS_BUF_SET_PRIORITY(bp, B_DIR_META_2);
}
+ pftrace("getbuf %c %p (%llu) in AG %d (fsbno = %lu) added to queue"
+ "(inode_bufs_queued = %d, last_bno = %lu)", B_IS_INODE(flag) ?
+ 'I' : 'M', bp, (long long)XFS_BUF_ADDR(bp), args->agno, fsbno,
+ args->inode_bufs_queued, args->last_bno_read);
+
pf_start_processing(args);
pthread_mutex_unlock(&args->lock);
while (irec.br_blockcount) {
unsigned int len;
-#ifdef XR_PF_TRACE
+
pftrace("queuing dir extent in AG %d", args->agno);
-#endif
+
len = (irec.br_blockcount > mp->m_dirblkfsbs) ?
mp->m_dirblkfsbs : irec.br_blockcount;
pf_queue_io(args, irec.br_startblock, len, B_DIR_META);
}
}
for (i = 0; i < num; i++) {
-#ifdef XR_PF_TRACE
pftrace("putbuf %c %p (%llu) in AG %d",
B_IS_INODE(XFS_BUF_PRIORITY(bplist[i])) ? 'I' : 'M',
bplist[i], (long long)XFS_BUF_ADDR(bplist[i]),
args->agno);
-#endif
libxfs_putbuf(bplist[i]);
}
pthread_mutex_lock(&args->lock);
if (which != PF_SECONDARY) {
-#ifdef XR_PF_TRACE
pftrace("inode_bufs_queued for AG %d = %d", args->agno,
args->inode_bufs_queued);
-#endif
/*
* if primary inode queue running low, process metadata
* in boths queues to avoid I/O starvation as the
*/
if (which == PF_PRIMARY && !args->queuing_done &&
args->inode_bufs_queued < IO_THRESHOLD) {
-#ifdef XR_PF_TRACE
pftrace("reading metadata bufs from primary queue for AG %d",
args->agno);
-#endif
+
pf_batch_read(args, PF_META_ONLY, buf);
-#ifdef XR_PF_TRACE
+
pftrace("reading bufs from secondary queue for AG %d",
args->agno);
-#endif
+
pf_batch_read(args, PF_SECONDARY, buf);
}
}
pthread_mutex_lock(&args->lock);
while (!args->queuing_done || !btree_is_empty(args->io_queue)) {
-#ifdef XR_PF_TRACE
pftrace("waiting to start prefetch I/O for AG %d", args->agno);
-#endif
+
while (!args->can_start_reading && !args->queuing_done)
pthread_cond_wait(&args->start_reading, &args->lock);
-#ifdef XR_PF_TRACE
+
pftrace("starting prefetch I/O for AG %d", args->agno);
-#endif
+
pf_batch_read(args, PF_PRIMARY, buf);
pf_batch_read(args, PF_SECONDARY, buf);
-#ifdef XR_PF_TRACE
pftrace("ran out of bufs to prefetch for AG %d", args->agno);
-#endif
+
if (!args->queuing_done)
args->can_start_reading = 0;
}
free(buf);
-#ifdef XR_PF_TRACE
pftrace("finished prefetch I/O for AG %d", args->agno);
-#endif
+
return NULL;
}
break;
}
}
-
-#ifdef XR_PF_TRACE
pftrace("starting prefetch for AG %d", args->agno);
-#endif
for (irec = findfirst_inode_rec(args->agno); irec != NULL;
irec = next_ino_rec(irec)) {
pthread_mutex_lock(&args->lock);
-#ifdef XR_PF_TRACE
pftrace("finished queuing inodes for AG %d (inode_bufs_queued = %d)",
args->agno, args->inode_bufs_queued);
-#endif
+
args->queuing_done = 1;
pf_start_io_workers(args);
pf_start_processing(args);
if (args->io_threads[i])
pthread_join(args->io_threads[i], NULL);
-#ifdef XR_PF_TRACE
pftrace("prefetch for AG %d finished", args->agno);
-#endif
+
pthread_mutex_lock(&args->lock);
ASSERT(btree_is_empty(args->io_queue));
{
int err;
-#ifdef XR_PF_TRACE
pftrace("creating queue thread for AG %d", args->agno);
-#endif
+
err = pthread_create(&args->queuing_thread, NULL,
pf_queuing_worker, args);
if (err != 0) {
pthread_mutex_lock(&args->lock);
while (!args->can_start_processing) {
-#ifdef XR_PF_TRACE
pftrace("waiting to start processing AG %d", args->agno);
-#endif
+
pthread_cond_wait(&args->start_processing, &args->lock);
}
-#ifdef XR_PF_TRACE
pftrace("can start processing AG %d", args->agno);
-#endif
+
pthread_mutex_unlock(&args->lock);
}
if (args == NULL)
return;
-#ifdef XR_PF_TRACE
pftrace("waiting AG %d prefetch to finish", args->agno);
-#endif
+
if (args->queuing_thread)
pthread_join(args->queuing_thread, NULL);
-#ifdef XR_PF_TRACE
pftrace("AG %d prefetch done", args->agno);
-#endif
+
pthread_mutex_destroy(&args->lock);
pthread_cond_destroy(&args->start_reading);
pthread_cond_destroy(&args->start_processing);
#ifdef XR_PF_TRACE
+static FILE *pf_trace_file;
+
+void
+pftrace_init(void)
+{
+ pf_trace_file = fopen("/tmp/xfs_repair_prefetch.trace", "w");
+ setvbuf(pf_trace_file, NULL, _IOLBF, 1024);
+}
+
+void
+pftrace_done(void)
+{
+ fclose(pf_trace_file);
+}
+
void
_pftrace(const char *func, const char *msg, ...)
{
buf[sizeof(buf)-1] = '\0';
va_end(args);
- fprintf(pf_trace_file, "%lu.%06lu %s: %s\n", tv.tv_sec, tv.tv_usec, func, buf);
+ fprintf(pf_trace_file, "%lu.%06lu %s: %s\n", tv.tv_sec, tv.tv_usec,
+ func, buf);
}
#endif