]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/commitdiff
repair: use single prefetch queue
authorChristoph Hellwig <hch@infradead.org>
Wed, 2 Sep 2009 17:55:38 +0000 (17:55 +0000)
committerAlex Elder <aelder@sgi.com>
Fri, 23 Oct 2009 17:06:00 +0000 (12:06 -0500)
We don't need two prefetch queues as we guarantee execution in order anyway.

XXX: description could use some more details.

Signed-off-by: Barry Naujok <bnaujok@sgi.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Alex Elder <aelder@sgi.com>
Signed-off-by: Alex Elder <aelder@sgi.com>
repair/prefetch.c
repair/prefetch.h

index 5d27249902ba74fcbe8fa560b8bab609cc8eb0e6..7354c4d8b615bbe55a09c5074a93e44e128973c1 100644 (file)
@@ -128,8 +128,9 @@ pf_queue_io(
 
        pthread_mutex_lock(&args->lock);
 
+       btree_insert(args->io_queue, fsbno, bp);
+
        if (fsbno > args->last_bno_read) {
-               btree_insert(args->primary_io_queue, fsbno, bp);
                if (B_IS_INODE(flag)) {
                        args->inode_bufs_queued++;
                        if (args->inode_bufs_queued == IO_THRESHOLD)
@@ -152,7 +153,6 @@ pf_queue_io(
 #endif
                ASSERT(!B_IS_INODE(flag));
                XFS_BUF_SET_PRIORITY(bp, B_DIR_META_2);
-               btree_insert(args->secondary_io_queue, fsbno, bp);
        }
 
        pf_start_processing(args);
@@ -405,7 +405,6 @@ pf_batch_read(
        pf_which_t              which,
        void                    *buf)
 {
-       struct btree_root       *queue;
        xfs_buf_t               *bplist[MAX_BUFS];
        unsigned int            num;
        off64_t                 first_off, last_off, next_off;
@@ -416,19 +415,22 @@ pf_batch_read(
        unsigned long           max_fsbno;
        char                    *pbuf;
 
-       queue = (which != PF_SECONDARY) ? args->primary_io_queue
-                               : args->secondary_io_queue;
-
-       while (btree_find(queue, 0, &fsbno) != NULL) {
-               max_fsbno = fsbno + pf_max_fsbs;
+       for (;;) {
                num = 0;
-
-               bplist[0] = btree_lookup(queue, fsbno);
+               if (which == PF_SECONDARY) {
+                       bplist[0] = btree_find(args->io_queue, 0, &fsbno);
+                       max_fsbno = MIN(fsbno + pf_max_fsbs,
+                                                       args->last_bno_read);
+               } else {
+                       bplist[0] = btree_find(args->io_queue,
+                                               args->last_bno_read, &fsbno);
+                       max_fsbno = fsbno + pf_max_fsbs;
+               }
                while (bplist[num] && num < MAX_BUFS && fsbno < max_fsbno) {
                        if (which != PF_META_ONLY ||
                            !B_IS_INODE(XFS_BUF_PRIORITY(bplist[num])))
                                num++;
-                       bplist[num] = btree_lookup_next(queue, &fsbno);
+                       bplist[num] = btree_lookup_next(args->io_queue, &fsbno);
                }
                if (!num)
                        return;
@@ -440,21 +442,22 @@ pf_batch_read(
                 */
                first_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[0]));
                last_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[num-1])) +
-                       XFS_BUF_SIZE(bplist[num-1]);
+                                               XFS_BUF_SIZE(bplist[num-1]);
                while (last_off - first_off > pf_max_bytes) {
                        num--;
-                       last_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[num-1])) +
-                               XFS_BUF_SIZE(bplist[num-1]);
+                       last_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(
+                               bplist[num-1])) + XFS_BUF_SIZE(bplist[num-1]);
                }
-               if (num < ((last_off - first_off) >> (mp->m_sb.sb_blocklog + 3))) {
+               if (num < ((last_off - first_off) >>
+                                               (mp->m_sb.sb_blocklog + 3))) {
                        /*
                         * not enough blocks for one big read, so determine
                         * the number of blocks that are close enough.
                         */
                        last_off = first_off + XFS_BUF_SIZE(bplist[0]);
                        for (i = 1; i < num; i++) {
-                               next_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[i])) +
-                                               XFS_BUF_SIZE(bplist[i]);
+                               next_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(
+                                       bplist[i])) + XFS_BUF_SIZE(bplist[i]);
                                if (next_off - last_off > pf_batch_bytes)
                                        break;
                                last_off = next_off;
@@ -463,7 +466,7 @@ pf_batch_read(
                }
 
                for (i = 0; i < num; i++) {
-                       if (btree_delete(queue, XFS_DADDR_TO_FSB(mp,
+                       if (btree_delete(args->io_queue, XFS_DADDR_TO_FSB(mp,
                                        XFS_BUF_ADDR(bplist[i]))) == NULL)
                                do_error(_("prefetch corruption\n"));
                }
@@ -566,7 +569,7 @@ pf_io_worker(
                return NULL;
 
        pthread_mutex_lock(&args->lock);
-       while (!args->queuing_done || btree_find(args->primary_io_queue, 0, NULL)) {
+       while (!args->queuing_done || btree_find(args->io_queue, 0, NULL)) {
 
 #ifdef XR_PF_TRACE
                pftrace("waiting to start prefetch I/O for AG %d", args->agno);
@@ -692,8 +695,7 @@ pf_queuing_worker(
 #endif
        pthread_mutex_lock(&args->lock);
 
-       ASSERT(btree_find(args->primary_io_queue, 0, NULL) == NULL);
-       ASSERT(btree_find(args->secondary_io_queue, 0, NULL) == NULL);
+       ASSERT(btree_find(args->io_queue, 0, NULL) == NULL);
 
        args->prefetch_done = 1;
        if (args->next_args)
@@ -751,8 +753,7 @@ start_inode_prefetch(
 
        args = calloc(1, sizeof(prefetch_args_t));
 
-       btree_init(&args->primary_io_queue);
-       btree_init(&args->secondary_io_queue);
+       btree_init(&args->io_queue);
        if (pthread_mutex_init(&args->lock, NULL) != 0)
                do_error(_("failed to initialize prefetch mutex\n"));
        if (pthread_cond_init(&args->start_reading, NULL) != 0)
@@ -831,8 +832,7 @@ cleanup_inode_prefetch(
        pthread_cond_destroy(&args->start_reading);
        pthread_cond_destroy(&args->start_processing);
        sem_destroy(&args->ra_count);
-       btree_destroy(args->primary_io_queue);
-       btree_destroy(args->secondary_io_queue);
+       btree_destroy(args->io_queue);
 
        free(args);
 }
index ff3b6b0bc2a0bd1c888148556f73d5fe70d36ca7..17ef46b0621182ee442ece94a8ab245a883896c5 100644 (file)
@@ -13,8 +13,7 @@ typedef struct prefetch_args {
        pthread_mutex_t         lock;
        pthread_t               queuing_thread;
        pthread_t               io_threads[PF_THREAD_COUNT];
-       struct btree_root       *primary_io_queue;
-       struct btree_root       *secondary_io_queue;
+       struct btree_root       *io_queue;
        pthread_cond_t          start_reading;
        pthread_cond_t          start_processing;
        int                     agno;