]> git.ipfire.org Git - thirdparty/postgresql.git/commitdiff
Fix VACUUM so that it always updates pg_class.reltuples/relpages.
authorTom Lane <tgl@sss.pgh.pa.us>
Mon, 30 May 2011 21:05:40 +0000 (17:05 -0400)
committerTom Lane <tgl@sss.pgh.pa.us>
Mon, 30 May 2011 21:07:19 +0000 (17:07 -0400)
When we added the ability for vacuum to skip heap pages by consulting the
visibility map, we made it just not update the reltuples/relpages
statistics if it skipped any pages.  But this could leave us with extremely
out-of-date stats for a table that contains any unchanging areas,
especially for TOAST tables which never get processed by ANALYZE.  In
particular this could result in autovacuum making poor decisions about when
to process the table, as in recent report from Florian Helmberger.  And in
general it's a bad idea to not update the stats at all.  Instead, use the
previous values of reltuples/relpages as an estimate of the tuple density
in unvisited pages.  This approach results in a "moving average" estimate
of reltuples, which should converge to the correct value over multiple
VACUUM and ANALYZE cycles even when individual measurements aren't very
good.

This new method for updating reltuples is used by both VACUUM and ANALYZE,
with the result that we no longer need the grotty interconnections that
caused ANALYZE to not update the stats depending on what had happened
in the parent VACUUM command.

Also, fix the logic for skipping all-visible pages during VACUUM so that it
looks ahead rather than behind to decide what to do, as per a suggestion
from Greg Stark.  This eliminates useless scanning of all-visible pages at
the start of the relation or just after a not-all-visible page.  In
particular, the first few pages of the relation will not be invariably
included in the scanned pages, which seems to help in not overweighting
them in the reltuples estimate.

Back-patch to 8.4, where the visibility map was introduced.

src/backend/commands/analyze.c
src/backend/commands/vacuum.c
src/backend/commands/vacuumlazy.c
src/backend/postmaster/pgstat.c
src/include/commands/vacuum.h
src/include/pgstat.h

index 7b18fadc1a913c02f67861f320af022cbcf4330f..16169788576a479157900f9475c80c9744db0e1f 100644 (file)
@@ -102,18 +102,9 @@ static bool std_typanalyze(VacAttrStats *stats);
 
 /*
  *     analyze_rel() -- analyze one relation
- *
- * If update_reltuples is true, we update reltuples and relpages columns
- * in pg_class.  Caller should pass false if we're part of VACUUM ANALYZE,
- * and the VACUUM didn't skip any pages.  We only have an approximate count,
- * so we don't want to overwrite the accurate values already inserted by the
- * VACUUM in that case.  VACUUM always scans all indexes, however, so the
- * pg_class entries for indexes are never updated if we're part of VACUUM
- * ANALYZE.
  */
 void
-analyze_rel(Oid relid, VacuumStmt *vacstmt,
-                       BufferAccessStrategy bstrategy, bool update_reltuples)
+analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
 {
        Relation        onerel;
        int                     attr_cnt,
@@ -368,9 +359,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt,
        }
 
        /*
-        * Quit if no analyzable columns and no pg_class update needed.
+        * Quit if no analyzable columns.
         */
-       if (attr_cnt <= 0 && !analyzableindex && !update_reltuples)
+       if (attr_cnt <= 0 && !analyzableindex)
                goto cleanup;
 
        /*
@@ -461,14 +452,11 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt,
        /*
         * Update pages/tuples stats in pg_class.
         */
-       if (update_reltuples)
-       {
-               vac_update_relstats(onerel,
-                                                       RelationGetNumberOfBlocks(onerel),
-                                                       totalrows, hasindex, InvalidTransactionId);
-               /* report results to the stats collector, too */
-               pgstat_report_analyze(onerel, totalrows, totaldeadrows);
-       }
+       vac_update_relstats(onerel,
+                                               RelationGetNumberOfBlocks(onerel),
+                                               totalrows, hasindex, InvalidTransactionId);
+       /* report results to the stats collector, too */
+       pgstat_report_analyze(onerel, totalrows, totaldeadrows);
 
        /*
         * Same for indexes. Vacuum always scans all indexes, so if we're part of
@@ -1122,18 +1110,19 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
                qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
 
        /*
-        * Estimate total numbers of rows in relation.
+        * Estimate total numbers of rows in relation.  For live rows, use
+        * vac_estimate_reltuples; for dead rows, we have no source of old
+        * information, so we have to assume the density is the same in unseen
+        * pages as in the pages we scanned.
         */
+       *totalrows = vac_estimate_reltuples(onerel, true,
+                                                                               totalblocks,
+                                                                               bs.m,
+                                                                               liverows);
        if (bs.m > 0)
-       {
-               *totalrows = floor((liverows * totalblocks) / bs.m + 0.5);
-               *totaldeadrows = floor((deadrows * totalblocks) / bs.m + 0.5);
-       }
+               *totaldeadrows = floor((deadrows / bs.m) * totalblocks + 0.5);
        else
-       {
-               *totalrows = 0.0;
                *totaldeadrows = 0.0;
-       }
 
        /*
         * Emit some interesting relation info
index 9d2c7234b7e4142fb27d172871f632a5b850a520..e6a7cdcf901071841098bdaedb33f9605c2fbacb 100644 (file)
@@ -22,6 +22,8 @@
 #include <sys/time.h>
 #include <unistd.h>
 
+#include <math.h>
+
 #include "access/clog.h"
 #include "access/genam.h"
 #include "access/heapam.h"
@@ -218,7 +220,7 @@ static List *get_rel_oids(Oid relid, const RangeVar *vacrel,
                         const char *stmttype);
 static void vac_truncate_clog(TransactionId frozenXID);
 static void vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast,
-                  bool for_wraparound, bool *scanned_all);
+                  bool for_wraparound);
 static bool full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt);
 static void scan_heap(VRelStats *vacrelstats, Relation onerel,
                  VacPageList vacuum_pages, VacPageList fraged_pages);
@@ -438,11 +440,9 @@ vacuum(VacuumStmt *vacstmt, Oid relid, bool do_toast,
                foreach(cur, relations)
                {
                        Oid                     relid = lfirst_oid(cur);
-                       bool            scanned_all = false;
 
                        if (vacstmt->vacuum)
-                               vacuum_rel(relid, vacstmt, do_toast, for_wraparound,
-                                                  &scanned_all);
+                               vacuum_rel(relid, vacstmt, do_toast, for_wraparound);
 
                        if (vacstmt->analyze)
                        {
@@ -464,7 +464,7 @@ vacuum(VacuumStmt *vacstmt, Oid relid, bool do_toast,
                                else
                                        old_context = MemoryContextSwitchTo(anl_context);
 
-                               analyze_rel(relid, vacstmt, vac_strategy, !scanned_all);
+                               analyze_rel(relid, vacstmt, vac_strategy);
 
                                if (use_own_xacts)
                                {
@@ -684,6 +684,79 @@ vacuum_set_xid_limits(int freeze_min_age,
 }
 
 
+/*
+ * vac_estimate_reltuples() -- estimate the new value for pg_class.reltuples
+ *
+ *             If we scanned the whole relation then we should just use the count of
+ *             live tuples seen; but if we did not, we should not trust the count
+ *             unreservedly, especially not in VACUUM, which may have scanned a quite
+ *             nonrandom subset of the table.  When we have only partial information,
+ *             we take the old value of pg_class.reltuples as a measurement of the
+ *             tuple density in the unscanned pages.
+ *
+ *             This routine is shared by VACUUM and ANALYZE.
+ */
+double
+vac_estimate_reltuples(Relation relation, bool is_analyze,
+                                          BlockNumber total_pages,
+                                          BlockNumber scanned_pages,
+                                          double scanned_tuples)
+{
+       BlockNumber     old_rel_pages = relation->rd_rel->relpages;
+       double          old_rel_tuples = relation->rd_rel->reltuples;
+       double          old_density;
+       double          new_density;
+       double          multiplier;
+       double          updated_density;
+
+       /* If we did scan the whole table, just use the count as-is */
+       if (scanned_pages >= total_pages)
+               return scanned_tuples;
+
+       /*
+        * If scanned_pages is zero but total_pages isn't, keep the existing
+        * value of reltuples.
+        */
+       if (scanned_pages == 0)
+               return old_rel_tuples;
+
+       /*
+        * If old value of relpages is zero, old density is indeterminate; we
+        * can't do much except scale up scanned_tuples to match total_pages.
+        */
+       if (old_rel_pages == 0)
+               return floor((scanned_tuples / scanned_pages) * total_pages + 0.5);
+
+       /*
+        * Okay, we've covered the corner cases.  The normal calculation is to
+        * convert the old measurement to a density (tuples per page), then
+        * update the density using an exponential-moving-average approach,
+        * and finally compute reltuples as updated_density * total_pages.
+        *
+        * For ANALYZE, the moving average multiplier is just the fraction of
+        * the table's pages we scanned.  This is equivalent to assuming
+        * that the tuple density in the unscanned pages didn't change.  Of
+        * course, it probably did, if the new density measurement is different.
+        * But over repeated cycles, the value of reltuples will converge towards
+        * the correct value, if repeated measurements show the same new density.
+        *
+        * For VACUUM, the situation is a bit different: we have looked at a
+        * nonrandom sample of pages, but we know for certain that the pages we
+        * didn't look at are precisely the ones that haven't changed lately.
+        * Thus, there is a reasonable argument for doing exactly the same thing
+        * as for the ANALYZE case, that is use the old density measurement as
+        * the value for the unscanned pages.
+        *
+        * This logic could probably use further refinement.
+        */
+       old_density = old_rel_tuples / old_rel_pages;
+       new_density = scanned_tuples / scanned_pages;
+       multiplier = (double) scanned_pages / (double) total_pages;
+       updated_density = old_density + (new_density - old_density) * multiplier;
+       return floor(updated_density * total_pages + 0.5);
+}
+
+
 /*
  *     vac_update_relstats() -- update statistics for one relation
  *
@@ -1009,14 +1082,10 @@ vac_truncate_clog(TransactionId frozenXID)
  *             many small transactions.  Otherwise, two-phase locking would require
  *             us to lock the entire database during one pass of the vacuum cleaner.
  *
- *             We'll return true in *scanned_all if the vacuum scanned all heap
- *             pages, and updated pg_class.
- *
  *             At entry and exit, we are not inside a transaction.
  */
 static void
-vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound,
-                  bool *scanned_all)
+vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
 {
        LOCKMODE        lmode;
        Relation        onerel;
@@ -1027,9 +1096,6 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound,
        int                     save_nestlevel;
        bool            heldoff;
 
-       if (scanned_all)
-               *scanned_all = false;
-
        /* Begin a transaction for vacuuming this relation */
        StartTransactionCommand();
 
@@ -1198,7 +1264,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound,
        if (vacstmt->full)
                heldoff = full_vacuum_rel(onerel, vacstmt);
        else
-               heldoff = lazy_vacuum_rel(onerel, vacstmt, vac_strategy, scanned_all);
+               heldoff = lazy_vacuum_rel(onerel, vacstmt, vac_strategy);
 
        /* Roll back any GUC changes executed by index functions */
        AtEOXact_GUC(false, save_nestlevel);
@@ -1227,7 +1293,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound,
         * totally unimportant for toast relations.
         */
        if (toast_relid != InvalidOid)
-               vacuum_rel(toast_relid, vacstmt, false, for_wraparound, NULL);
+               vacuum_rel(toast_relid, vacstmt, false, for_wraparound);
 
        /*
         * Now release the session-level lock on the master table.
@@ -1344,7 +1410,7 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
 
        /* report results to the stats collector, too */
        pgstat_report_vacuum(RelationGetRelid(onerel), onerel->rd_rel->relisshared,
-                                                true, vacstmt->analyze, vacrelstats->rel_tuples);
+                                                vacstmt->analyze, vacrelstats->rel_tuples);
 
        return heldoff;
 }
index ad90e9d443b8c71d2c56249190c170196a24ffc7..5c9c7bc3087255878a48d427089e01035d2d79c5 100644 (file)
  * Before we consider skipping a page that's marked as clean in
  * visibility map, we must've seen at least this many clean pages.
  */
-#define SKIP_PAGES_THRESHOLD   32
+#define SKIP_PAGES_THRESHOLD   ((BlockNumber) 32)
 
 typedef struct LVRelStats
 {
        /* hasindex = true means two-pass strategy; false means one-pass */
        bool            hasindex;
-       bool            scanned_all;    /* have we scanned all pages (this far)? */
        /* Overall statistics about rel */
-       BlockNumber rel_pages;
+       BlockNumber rel_pages;          /* total number of pages */
+       BlockNumber scanned_pages;      /* number of pages we examined */
+       double          scanned_tuples; /* counts only tuples on scanned pages */
        double          old_rel_tuples; /* previous value of pg_class.reltuples */
-       double          rel_tuples;             /* counts only tuples on scanned pages */
+       double          new_rel_tuples; /* new estimated total # of tuples */
        BlockNumber pages_removed;
        double          tuples_deleted;
        BlockNumber nonempty_pages; /* actually, last nonempty page + 1 */
@@ -146,7 +147,7 @@ static int  vac_cmp_itemptr(const void *left, const void *right);
  */
 bool
 lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
-                               BufferAccessStrategy bstrategy, bool *scanned_all)
+                               BufferAccessStrategy bstrategy)
 {
        LVRelStats *vacrelstats;
        Relation   *Irel;
@@ -179,7 +180,6 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
 
        vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
 
-       vacrelstats->scanned_all = true;        /* will be cleared if we skip a page */
        vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples;
        vacrelstats->num_index_scans = 0;
 
@@ -219,24 +219,21 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
        FreeSpaceMapVacuum(onerel);
 
        /*
-        * Update statistics in pg_class.  But only if we didn't skip any pages;
-        * the tuple count only includes tuples from the pages we've visited, and
-        * we haven't frozen tuples in unvisited pages either.  The page count is
-        * accurate in any case, but because we use the reltuples / relpages ratio
-        * in the planner, it's better to not update relpages either if we can't
-        * update reltuples.
+        * Update statistics in pg_class.  But don't change relfrozenxid if we
+        * skipped any pages.
         */
-       if (vacrelstats->scanned_all)
-               vac_update_relstats(onerel,
-                                                       vacrelstats->rel_pages, vacrelstats->rel_tuples,
-                                                       vacrelstats->hasindex,
-                                                       FreezeLimit);
+       vac_update_relstats(onerel,
+                                               vacrelstats->rel_pages, vacrelstats->new_rel_tuples,
+                                               vacrelstats->hasindex,
+                                               (vacrelstats->scanned_pages < vacrelstats->rel_pages) ?
+                                               InvalidTransactionId :
+                                               FreezeLimit);
 
        /* report results to the stats collector, too */
        pgstat_report_vacuum(RelationGetRelid(onerel),
                                                 onerel->rd_rel->relisshared,
-                                                vacrelstats->scanned_all,
-                                                vacstmt->analyze, vacrelstats->rel_tuples);
+                                                vacstmt->analyze,
+                                                vacrelstats->new_rel_tuples);
 
        /* and log the action if appropriate */
        if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
@@ -253,14 +250,13 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
                                                        get_namespace_name(RelationGetNamespace(onerel)),
                                                        RelationGetRelationName(onerel),
                                                        vacrelstats->num_index_scans,
-                                                 vacrelstats->pages_removed, vacrelstats->rel_pages,
-                                               vacrelstats->tuples_deleted, vacrelstats->rel_tuples,
+                                                       vacrelstats->pages_removed,
+                                                       vacrelstats->rel_pages,
+                                                       vacrelstats->tuples_deleted,
+                                                       vacrelstats->new_rel_tuples,
                                                        pg_rusage_show(&ru0))));
        }
 
-       if (scanned_all)
-               *scanned_all = vacrelstats->scanned_all;
-
        return heldoff;
 }
 
@@ -285,7 +281,6 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
        HeapTupleData tuple;
        char       *relname;
        BlockNumber empty_pages,
-                               scanned_pages,
                                vacuumed_pages;
        double          num_tuples,
                                tups_vacuumed,
@@ -295,7 +290,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
        int                     i;
        PGRUsage        ru0;
        Buffer          vmbuffer = InvalidBuffer;
-       BlockNumber all_visible_streak;
+       BlockNumber next_not_all_visible_block;
+       bool            skipping_all_visible_blocks;
 
        pg_rusage_init(&ru0);
 
@@ -305,7 +301,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
                                        get_namespace_name(RelationGetNamespace(onerel)),
                                        relname)));
 
-       empty_pages = vacuumed_pages = scanned_pages = 0;
+       empty_pages = vacuumed_pages = 0;
        num_tuples = tups_vacuumed = nkeep = nunused = 0;
 
        indstats = (IndexBulkDeleteResult **)
@@ -313,11 +309,46 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
 
        nblocks = RelationGetNumberOfBlocks(onerel);
        vacrelstats->rel_pages = nblocks;
+       vacrelstats->scanned_pages = 0;
        vacrelstats->nonempty_pages = 0;
 
        lazy_space_alloc(vacrelstats, nblocks);
 
-       all_visible_streak = 0;
+       /*
+        * We want to skip pages that don't require vacuuming according to the
+        * visibility map, but only when we can skip at least SKIP_PAGES_THRESHOLD
+        * consecutive pages.  Since we're reading sequentially, the OS should be
+        * doing readahead for us, so there's no gain in skipping a page now and
+        * then; that's likely to disable readahead and so be counterproductive.
+        * Also, skipping even a single page means that we can't update
+        * relfrozenxid, so we only want to do it if we can skip a goodly number
+        * of pages.
+        *
+        * Before entering the main loop, establish the invariant that
+        * next_not_all_visible_block is the next block number >= blkno that's
+        * not all-visible according to the visibility map, or nblocks if there's
+        * no such block.  Also, we set up the skipping_all_visible_blocks flag,
+        * which is needed because we need hysteresis in the decision: once we've
+        * started skipping blocks, we may as well skip everything up to the next
+        * not-all-visible block.
+        *
+        * Note: if scan_all is true, we won't actually skip any pages; but we
+        * maintain next_not_all_visible_block anyway, so as to set up the
+        * all_visible_according_to_vm flag correctly for each page.
+        */
+       for (next_not_all_visible_block = 0;
+                next_not_all_visible_block < nblocks;
+                next_not_all_visible_block++)
+       {
+               if (!visibilitymap_test(onerel, next_not_all_visible_block, &vmbuffer))
+                       break;
+               vacuum_delay_point();
+       }
+       if (next_not_all_visible_block >= SKIP_PAGES_THRESHOLD)
+               skipping_all_visible_blocks = true;
+       else
+               skipping_all_visible_blocks = false;
+
        for (blkno = 0; blkno < nblocks; blkno++)
        {
                Buffer          buf;
@@ -330,41 +361,45 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
                OffsetNumber frozen[MaxOffsetNumber];
                int                     nfrozen;
                Size            freespace;
-               bool            all_visible_according_to_vm = false;
+               bool            all_visible_according_to_vm;
                bool            all_visible;
                bool            has_dead_tuples;
 
-               /*
-                * Skip pages that don't require vacuuming according to the visibility
-                * map. But only if we've seen a streak of at least
-                * SKIP_PAGES_THRESHOLD pages marked as clean. Since we're reading
-                * sequentially, the OS should be doing readahead for us and there's
-                * no gain in skipping a page now and then. You need a longer run of
-                * consecutive skipped pages before it's worthwhile. Also, skipping
-                * even a single page means that we can't update relfrozenxid or
-                * reltuples, so we only want to do it if there's a good chance to
-                * skip a goodly number of pages.
-                */
-               if (!scan_all)
+               if (blkno == next_not_all_visible_block)
                {
-                       all_visible_according_to_vm =
-                               visibilitymap_test(onerel, blkno, &vmbuffer);
-                       if (all_visible_according_to_vm)
+                       /* Time to advance next_not_all_visible_block */
+                       for (next_not_all_visible_block++;
+                                next_not_all_visible_block < nblocks;
+                                next_not_all_visible_block++)
                        {
-                               all_visible_streak++;
-                               if (all_visible_streak >= SKIP_PAGES_THRESHOLD)
-                               {
-                                       vacrelstats->scanned_all = false;
-                                       continue;
-                               }
+                               if (!visibilitymap_test(onerel, next_not_all_visible_block,
+                                                                               &vmbuffer))
+                                       break;
+                               vacuum_delay_point();
                        }
+
+                       /*
+                        * We know we can't skip the current block.  But set up
+                        * skipping_all_visible_blocks to do the right thing at the
+                        * following blocks.
+                        */
+                       if (next_not_all_visible_block - blkno > SKIP_PAGES_THRESHOLD)
+                               skipping_all_visible_blocks = true;
                        else
-                               all_visible_streak = 0;
+                               skipping_all_visible_blocks = false;
+                       all_visible_according_to_vm = false;
+               }
+               else
+               {
+                       /* Current block is all-visible */
+                       if (skipping_all_visible_blocks && !scan_all)
+                               continue;
+                       all_visible_according_to_vm = true;
                }
 
                vacuum_delay_point();
 
-               scanned_pages++;
+               vacrelstats->scanned_pages++;
 
                /*
                 * If we are close to overrunning the available space for dead-tuple
@@ -732,9 +767,15 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
        }
 
        /* save stats for use later */
-       vacrelstats->rel_tuples = num_tuples;
+       vacrelstats->scanned_tuples = num_tuples;
        vacrelstats->tuples_deleted = tups_vacuumed;
 
+       /* now we can compute the new value for pg_class.reltuples */
+       vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
+                                                                                                                nblocks,
+                                                                                                                vacrelstats->scanned_pages,
+                                                                                                                num_tuples);
+
        /* If any tuples need to be deleted, perform final vacuum cycle */
        /* XXX put a threshold on min number of tuples here? */
        if (vacrelstats->num_dead_tuples > 0)
@@ -770,7 +811,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
        ereport(elevel,
                        (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
                                        RelationGetRelationName(onerel),
-                                       tups_vacuumed, num_tuples, scanned_pages, nblocks),
+                                       tups_vacuumed, num_tuples,
+                                       vacrelstats->scanned_pages, nblocks),
                         errdetail("%.0f dead row versions cannot be removed yet.\n"
                                           "There were %.0f unused item pointers.\n"
                                           "%u pages are entirely empty.\n"
@@ -944,10 +986,9 @@ lazy_cleanup_index(Relation indrel,
        ivinfo.index = indrel;
        ivinfo.vacuum_full = false;
        ivinfo.analyze_only = false;
-       ivinfo.estimated_count = !vacrelstats->scanned_all;
+       ivinfo.estimated_count = (vacrelstats->scanned_pages < vacrelstats->rel_pages);
        ivinfo.message_level = elevel;
-       /* use rel_tuples only if we scanned all pages, else fall back */
-       ivinfo.num_heap_tuples = vacrelstats->scanned_all ? vacrelstats->rel_tuples : vacrelstats->old_rel_tuples;
+       ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples;
        ivinfo.strategy = vac_strategy;
 
        stats = index_vacuum_cleanup(&ivinfo, stats);
@@ -1008,8 +1049,13 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
        new_rel_pages = RelationGetNumberOfBlocks(onerel);
        if (new_rel_pages != old_rel_pages)
        {
-               /* might as well use the latest news when we update pg_class stats */
-               vacrelstats->rel_pages = new_rel_pages;
+               /*
+                * Note: we intentionally don't update vacrelstats->rel_pages with
+                * the new rel size here.  If we did, it would amount to assuming that
+                * the new pages are empty, which is unlikely.  Leaving the numbers
+                * alone amounts to assuming that the new pages have the same tuple
+                * density as existing ones, which is less unlikely.
+                */
                UnlockRelation(onerel, AccessExclusiveLock);
                return;
        }
@@ -1044,7 +1090,11 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
         * can safely access the table again.
         */
 
-       /* update statistics */
+       /*
+        * Update statistics.  Here, it *is* correct to adjust rel_pages without
+        * also touching reltuples, since the tuple count wasn't changed by the
+        * truncation.
+        */
        vacrelstats->rel_pages = new_rel_pages;
        vacrelstats->pages_removed = old_rel_pages - new_rel_pages;
 
index 05140c42791e69332e6d65afd3e398aba07cc47e..19b1f183166fedbac29317828be53ae2487fbd6e 100644 (file)
@@ -1180,7 +1180,7 @@ pgstat_report_autovac(Oid dboid)
  * ---------
  */
 void
-pgstat_report_vacuum(Oid tableoid, bool shared, bool scanned_all,
+pgstat_report_vacuum(Oid tableoid, bool shared,
                                         bool analyze, PgStat_Counter tuples)
 {
        PgStat_MsgVacuum msg;
@@ -1191,7 +1191,6 @@ pgstat_report_vacuum(Oid tableoid, bool shared, bool scanned_all,
        pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_VACUUM);
        msg.m_databaseid = shared ? InvalidOid : MyDatabaseId;
        msg.m_tableoid = tableoid;
-       msg.m_scanned_all = scanned_all;
        msg.m_analyze = analyze;
        msg.m_autovacuum = IsAutoVacuumWorkerProcess();         /* is this autovacuum? */
        msg.m_vacuumtime = GetCurrentTimestamp();
@@ -3773,21 +3772,12 @@ pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len)
                tabentry->autovac_vacuum_timestamp = msg->m_vacuumtime;
        else
                tabentry->vacuum_timestamp = msg->m_vacuumtime;
-       if (msg->m_scanned_all)
-               tabentry->n_live_tuples = msg->m_tuples;
+       tabentry->n_live_tuples = msg->m_tuples;
        /* Resetting dead_tuples to 0 is an approximation ... */
        tabentry->n_dead_tuples = 0;
        if (msg->m_analyze)
        {
-               if (msg->m_scanned_all)
-                       tabentry->last_anl_tuples = msg->m_tuples;
-               else
-               {
-                       /* last_anl_tuples must never exceed n_live_tuples+n_dead_tuples */
-                       tabentry->last_anl_tuples = Min(tabentry->last_anl_tuples,
-                                                                                       tabentry->n_live_tuples);
-               }
-
+               tabentry->last_anl_tuples = msg->m_tuples;
                if (msg->m_autovacuum)
                        tabentry->autovac_analyze_timestamp = msg->m_vacuumtime;
                else
index 22008e0b47c59c6e7be34a96848f7eeac6e90aca..9012b4e2e38e04287e2ac11af2e092a6bcb970b1 100644 (file)
@@ -131,6 +131,10 @@ extern void vacuum(VacuumStmt *vacstmt, Oid relid, bool do_toast,
 extern void vac_open_indexes(Relation relation, LOCKMODE lockmode,
                                 int *nindexes, Relation **Irel);
 extern void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode);
+extern double vac_estimate_reltuples(Relation relation, bool is_analyze,
+                                          BlockNumber total_pages,
+                                          BlockNumber scanned_pages,
+                                          double scanned_tuples);
 extern void vac_update_relstats(Relation relation,
                                        BlockNumber num_pages,
                                        double num_tuples,
@@ -147,10 +151,10 @@ extern void vacuum_delay_point(void);
 
 /* in commands/vacuumlazy.c */
 extern bool lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
-                               BufferAccessStrategy bstrategy, bool *scanned_all);
+                               BufferAccessStrategy bstrategy);
 
 /* in commands/analyze.c */
 extern void analyze_rel(Oid relid, VacuumStmt *vacstmt,
-                       BufferAccessStrategy bstrategy, bool update_reltuples);
+                       BufferAccessStrategy bstrategy);
 
 #endif   /* VACUUM_H */
index 7fa25175f44fdf742591fed0cdcbda8cf26fed9b..bac5262ed799a2ff744cae4c340e1a5c74a5924c 100644 (file)
@@ -284,7 +284,6 @@ typedef struct PgStat_MsgVacuum
        Oid                     m_tableoid;
        bool            m_analyze;
        bool            m_autovacuum;
-       bool            m_scanned_all;
        TimestampTz m_vacuumtime;
        PgStat_Counter m_tuples;
 } PgStat_MsgVacuum;
@@ -632,7 +631,7 @@ extern void pgstat_clear_snapshot(void);
 extern void pgstat_reset_counters(void);
 
 extern void pgstat_report_autovac(Oid dboid);
-extern void pgstat_report_vacuum(Oid tableoid, bool shared, bool scanned_all,
+extern void pgstat_report_vacuum(Oid tableoid, bool shared,
                                         bool analyze, PgStat_Counter tuples);
 extern void pgstat_report_analyze(Relation rel,
                                          PgStat_Counter livetuples,