Future commits will use the visibility map in on-access pruning to fix
VM corruption and set the VM if the page is all-visible.
Saving the vmbuffer in the scan descriptor reduces the number of times
it would need to be pinned and unpinned, making the overhead of doing so
negligible.
Author: Melanie Plageman <melanieplageman@gmail.com>
Reviewed-by: Chao Li <li.evan.chao@gmail.com>
Discussion: https://postgr.es/m/
C3AB3F5B-626E-4AAA-9529-
23E9A20C727F%40gmail.com
/*
* Prune and repair fragmentation for the whole page, if possible.
*/
- heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
+ heap_page_prune_opt(scan->rs_base.rs_rd, buffer, &scan->rs_vmbuffer);
/*
* We must hold share lock on the buffer content while examining tuple
sizeof(TBMIterateResult));
}
+ scan->rs_vmbuffer = InvalidBuffer;
return (TableScanDesc) scan;
}
scan->rs_cbuf = InvalidBuffer;
}
+ if (BufferIsValid(scan->rs_vmbuffer))
+ {
+ ReleaseBuffer(scan->rs_vmbuffer);
+ scan->rs_vmbuffer = InvalidBuffer;
+ }
+
/*
* SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
* additional data vs a normal HeapScan
if (BufferIsValid(scan->rs_cbuf))
ReleaseBuffer(scan->rs_cbuf);
+ if (BufferIsValid(scan->rs_vmbuffer))
+ ReleaseBuffer(scan->rs_vmbuffer);
+
/*
* Must free the read stream before freeing the BufferAccessStrategy.
*/
hscan->xs_base.rel = rel;
hscan->xs_cbuf = InvalidBuffer;
+ hscan->xs_vmbuffer = InvalidBuffer;
return &hscan->xs_base;
}
ReleaseBuffer(hscan->xs_cbuf);
hscan->xs_cbuf = InvalidBuffer;
}
+
+ if (BufferIsValid(hscan->xs_vmbuffer))
+ {
+ ReleaseBuffer(hscan->xs_vmbuffer);
+ hscan->xs_vmbuffer = InvalidBuffer;
+ }
}
static void
* Prune page, but only if we weren't already on this page
*/
if (prev_buf != hscan->xs_cbuf)
- heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf);
+ heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf,
+ &hscan->xs_vmbuffer);
}
/* Obtain share-lock on the buffer so we can examine visibility */
/*
* Prune and repair fragmentation for the whole page, if possible.
*/
- heap_page_prune_opt(scan->rs_rd, buffer);
+ heap_page_prune_opt(scan->rs_rd, buffer, &hscan->rs_vmbuffer);
/*
* We must hold share lock on the buffer content while examining tuple
* if there's not any use in pruning.
*
* Caller must have pin on the buffer, and must *not* have a lock on it.
+ *
+ * This function may pin *vmbuffer. It's passed by reference so the caller can
+ * reuse the pin across calls, avoiding repeated pin/unpin cycles. Caller is
+ * responsible for unpinning it.
*/
void
-heap_page_prune_opt(Relation relation, Buffer buffer)
+heap_page_prune_opt(Relation relation, Buffer buffer, Buffer *vmbuffer)
{
Page page = BufferGetPage(buffer);
TransactionId prune_xid;
*/
ParallelBlockTableScanWorkerData *rs_parallelworkerdata;
+ /*
+ * For sequential scans and bitmap heap scans. The current heap block's
+ * corresponding page in the visibility map.
+ */
+ Buffer rs_vmbuffer;
+
/* these fields only used in page-at-a-time mode and for bitmap scans */
uint32 rs_cindex; /* current tuple's index in vistuples */
uint32 rs_ntuples; /* number of visible tuples on page */
{
IndexFetchTableData xs_base; /* AM independent part of the descriptor */
- Buffer xs_cbuf; /* current heap buffer in scan, if any */
- /* NB: if xs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
+ /*
+ * Current heap buffer in scan, if any. NB: if xs_cbuf is not
+ * InvalidBuffer, we hold a pin on that buffer.
+ */
+ Buffer xs_cbuf;
+
+ /* Current heap block's corresponding page in the visibility map */
+ Buffer xs_vmbuffer;
} IndexFetchHeapData;
/* Result codes for HeapTupleSatisfiesVacuum */
TM_IndexDeleteOp *delstate);
/* in heap/pruneheap.c */
-extern void heap_page_prune_opt(Relation relation, Buffer buffer);
+extern void heap_page_prune_opt(Relation relation, Buffer buffer,
+ Buffer *vmbuffer);
extern void heap_page_prune_and_freeze(PruneFreezeParams *params,
PruneFreezeResult *presult,
OffsetNumber *off_loc,