int num_index_scans;
int num_dead_items_resets;
Size total_dead_items_bytes;
+
+ /*
+ * Total number of planned and actually launched parallel workers for
+ * index vacuuming and index cleanup.
+ */
+ PVWorkerUsage worker_usage;
+
/* Counters that follow are only for scanned_pages */
int64 tuples_deleted; /* # deleted from table */
int64 tuples_frozen; /* # newly frozen */
vacrel->new_all_visible_all_frozen_pages = 0;
vacrel->new_all_frozen_pages = 0;
+ vacrel->worker_usage.vacuum.nlaunched = 0;
+ vacrel->worker_usage.vacuum.nplanned = 0;
+ vacrel->worker_usage.cleanup.nlaunched = 0;
+ vacrel->worker_usage.cleanup.nplanned = 0;
+
/*
* Get cutoffs that determine which deleted tuples are considered DEAD,
* not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
orig_rel_pages == 0 ? 100.0 :
100.0 * vacrel->lpdead_item_pages / orig_rel_pages,
vacrel->lpdead_items);
+
+ if (vacrel->worker_usage.vacuum.nplanned > 0)
+ appendStringInfo(&buf,
+ _("parallel workers: index vacuum: %d planned, %d launched in total\n"),
+ vacrel->worker_usage.vacuum.nplanned,
+ vacrel->worker_usage.vacuum.nlaunched);
+
+ if (vacrel->worker_usage.cleanup.nplanned > 0)
+ appendStringInfo(&buf,
+ _("parallel workers: index cleanup: %d planned, %d launched\n"),
+ vacrel->worker_usage.cleanup.nplanned,
+ vacrel->worker_usage.cleanup.nlaunched);
+
for (int i = 0; i < vacrel->nindexes; i++)
{
IndexBulkDeleteResult *istat = vacrel->indstats[i];
{
/* Outsource everything to parallel variant */
parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples,
- vacrel->num_index_scans);
+ vacrel->num_index_scans,
+ &(vacrel->worker_usage.vacuum));
/*
* Do a postcheck to consider applying wraparound failsafe now. Note
/* Outsource everything to parallel variant */
parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
vacrel->num_index_scans,
- estimated_count);
+ estimated_count,
+ &(vacrel->worker_usage.cleanup));
}
/* Reset the progress counters */
static int parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested,
bool *will_parallel_vacuum);
static void parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scans,
- bool vacuum);
+ bool vacuum, PVWorkerStats *wstats);
static void parallel_vacuum_process_safe_indexes(ParallelVacuumState *pvs);
static void parallel_vacuum_process_unsafe_indexes(ParallelVacuumState *pvs);
static void parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel,
*/
void
parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples,
- int num_index_scans)
+ int num_index_scans, PVWorkerStats *wstats)
{
Assert(!IsParallelWorker());
pvs->shared->reltuples = num_table_tuples;
pvs->shared->estimated_count = true;
- parallel_vacuum_process_all_indexes(pvs, num_index_scans, true);
+ parallel_vacuum_process_all_indexes(pvs, num_index_scans, true, wstats);
}
/*
*/
void
parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples,
- int num_index_scans, bool estimated_count)
+ int num_index_scans, bool estimated_count,
+ PVWorkerStats *wstats)
{
Assert(!IsParallelWorker());
pvs->shared->reltuples = num_table_tuples;
pvs->shared->estimated_count = estimated_count;
- parallel_vacuum_process_all_indexes(pvs, num_index_scans, false);
+ parallel_vacuum_process_all_indexes(pvs, num_index_scans, false, wstats);
}
/*
/*
* Perform index vacuum or index cleanup with parallel workers. This function
* must be used by the parallel vacuum leader process.
+ *
+ * If wstats is not NULL, the parallel worker statistics are updated.
*/
static void
parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scans,
- bool vacuum)
+ bool vacuum, PVWorkerStats *wstats)
{
int nworkers;
PVIndVacStatus new_status;
*/
nworkers = Min(nworkers, pvs->pcxt->nworkers);
+ /* Update the statistics, if we asked to */
+ if (wstats != NULL && nworkers > 0)
+ wstats->nplanned += nworkers;
+
/*
* Set index vacuum status and mark whether parallel vacuum worker can
* process it.
/* Enable shared cost balance for leader backend */
VacuumSharedCostBalance = &(pvs->shared->cost_balance);
VacuumActiveNWorkers = &(pvs->shared->active_nworkers);
+
+ /* Update the statistics, if we asked to */
+ if (wstats != NULL)
+ wstats->nlaunched += pvs->pcxt->nworkers_launched;
}
if (vacuum)
int64 num_items; /* current # of entries */
} VacDeadItemsInfo;
+/*
+ * Statistics for parallel vacuum workers (planned vs. actual)
+ */
+typedef struct PVWorkerStats
+{
+ /* Number of parallel workers planned to launch */
+ int nplanned;
+
+ /* Number of parallel workers that were successfully launched */
+ int nlaunched;
+} PVWorkerStats;
+
+/*
+ * PVWorkerUsage stores information about total number of launched and
+ * planned workers during parallel vacuum (both for index vacuum and cleanup).
+ */
+typedef struct PVWorkerUsage
+{
+ PVWorkerStats vacuum;
+ PVWorkerStats cleanup;
+} PVWorkerUsage;
+
/* GUC parameters */
extern PGDLLIMPORT int default_statistics_target; /* PGDLLIMPORT for PostGIS */
extern PGDLLIMPORT int vacuum_freeze_min_age;
extern void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs);
extern void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs,
long num_table_tuples,
- int num_index_scans);
+ int num_index_scans,
+ PVWorkerStats *wstats);
extern void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs,
long num_table_tuples,
int num_index_scans,
- bool estimated_count);
+ bool estimated_count,
+ PVWorkerStats *wstats);
extern void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc);
/* in commands/analyze.c */
PVIndVacStatus
PVOID
PVShared
+PVWorkerUsage
+PVWorkerStats
PX_Alias
PX_Cipher
PX_Combo