pthread_mutex_lock(&args->lock);
+ btree_insert(args->io_queue, fsbno, bp);
+
if (fsbno > args->last_bno_read) {
- btree_insert(args->primary_io_queue, fsbno, bp);
if (B_IS_INODE(flag)) {
args->inode_bufs_queued++;
if (args->inode_bufs_queued == IO_THRESHOLD)
#endif
ASSERT(!B_IS_INODE(flag));
XFS_BUF_SET_PRIORITY(bp, B_DIR_META_2);
- btree_insert(args->secondary_io_queue, fsbno, bp);
}
pf_start_processing(args);
pf_which_t which,
void *buf)
{
- struct btree_root *queue;
xfs_buf_t *bplist[MAX_BUFS];
unsigned int num;
off64_t first_off, last_off, next_off;
unsigned long max_fsbno;
char *pbuf;
- queue = (which != PF_SECONDARY) ? args->primary_io_queue
- : args->secondary_io_queue;
-
- while (btree_find(queue, 0, &fsbno) != NULL) {
- max_fsbno = fsbno + pf_max_fsbs;
+ for (;;) {
num = 0;
-
- bplist[0] = btree_lookup(queue, fsbno);
+ if (which == PF_SECONDARY) {
+ bplist[0] = btree_find(args->io_queue, 0, &fsbno);
+ max_fsbno = MIN(fsbno + pf_max_fsbs,
+ args->last_bno_read);
+ } else {
+ bplist[0] = btree_find(args->io_queue,
+ args->last_bno_read, &fsbno);
+ max_fsbno = fsbno + pf_max_fsbs;
+ }
while (bplist[num] && num < MAX_BUFS && fsbno < max_fsbno) {
if (which != PF_META_ONLY ||
!B_IS_INODE(XFS_BUF_PRIORITY(bplist[num])))
num++;
- bplist[num] = btree_lookup_next(queue, &fsbno);
+ bplist[num] = btree_lookup_next(args->io_queue, &fsbno);
}
if (!num)
return;
*/
first_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[0]));
last_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[num-1])) +
- XFS_BUF_SIZE(bplist[num-1]);
+ XFS_BUF_SIZE(bplist[num-1]);
while (last_off - first_off > pf_max_bytes) {
num--;
- last_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[num-1])) +
- XFS_BUF_SIZE(bplist[num-1]);
+ last_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(
+ bplist[num-1])) + XFS_BUF_SIZE(bplist[num-1]);
}
- if (num < ((last_off - first_off) >> (mp->m_sb.sb_blocklog + 3))) {
+ if (num < ((last_off - first_off) >>
+ (mp->m_sb.sb_blocklog + 3))) {
/*
* not enough blocks for one big read, so determine
* the number of blocks that are close enough.
*/
last_off = first_off + XFS_BUF_SIZE(bplist[0]);
for (i = 1; i < num; i++) {
- next_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[i])) +
- XFS_BUF_SIZE(bplist[i]);
+ next_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(
+ bplist[i])) + XFS_BUF_SIZE(bplist[i]);
if (next_off - last_off > pf_batch_bytes)
break;
last_off = next_off;
}
for (i = 0; i < num; i++) {
- if (btree_delete(queue, XFS_DADDR_TO_FSB(mp,
+ if (btree_delete(args->io_queue, XFS_DADDR_TO_FSB(mp,
XFS_BUF_ADDR(bplist[i]))) == NULL)
do_error(_("prefetch corruption\n"));
}
return NULL;
pthread_mutex_lock(&args->lock);
- while (!args->queuing_done || btree_find(args->primary_io_queue, 0, NULL)) {
+ while (!args->queuing_done || btree_find(args->io_queue, 0, NULL)) {
#ifdef XR_PF_TRACE
pftrace("waiting to start prefetch I/O for AG %d", args->agno);
#endif
pthread_mutex_lock(&args->lock);
- ASSERT(btree_find(args->primary_io_queue, 0, NULL) == NULL);
- ASSERT(btree_find(args->secondary_io_queue, 0, NULL) == NULL);
+ ASSERT(btree_find(args->io_queue, 0, NULL) == NULL);
args->prefetch_done = 1;
if (args->next_args)
args = calloc(1, sizeof(prefetch_args_t));
- btree_init(&args->primary_io_queue);
- btree_init(&args->secondary_io_queue);
+ btree_init(&args->io_queue);
if (pthread_mutex_init(&args->lock, NULL) != 0)
do_error(_("failed to initialize prefetch mutex\n"));
if (pthread_cond_init(&args->start_reading, NULL) != 0)
pthread_cond_destroy(&args->start_reading);
pthread_cond_destroy(&args->start_processing);
sem_destroy(&args->ra_count);
- btree_destroy(args->primary_io_queue);
- btree_destroy(args->secondary_io_queue);
+ btree_destroy(args->io_queue);
free(args);
}