OBJS = \
heapam.o \
heapam_handler.o \
+ heapam_indexscan.o \
heapam_visibility.o \
heapam_xlog.o \
heaptoast.o \
return false;
}
-/*
- * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
- *
- * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
- * of a HOT chain), and buffer is the buffer holding this tuple. We search
- * for the first chain member satisfying the given snapshot. If one is
- * found, we update *tid to reference that tuple's offset number, and
- * return true. If no match, return false without modifying *tid.
- *
- * heapTuple is a caller-supplied buffer. When a match is found, we return
- * the tuple here, in addition to updating *tid. If no match is found, the
- * contents of this buffer on return are undefined.
- *
- * If all_dead is not NULL, we check non-visible tuples to see if they are
- * globally dead; *all_dead is set true if all members of the HOT chain
- * are vacuumable, false if not.
- *
- * Unlike heap_fetch, the caller must already have pin and (at least) share
- * lock on the buffer; it is still pinned/locked at exit.
- */
-bool
-heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
- Snapshot snapshot, HeapTuple heapTuple,
- bool *all_dead, bool first_call)
-{
- Page page = BufferGetPage(buffer);
- TransactionId prev_xmax = InvalidTransactionId;
- BlockNumber blkno;
- OffsetNumber offnum;
- bool at_chain_start;
- bool valid;
- bool skip;
- GlobalVisState *vistest = NULL;
-
- /* If this is not the first call, previous call returned a (live!) tuple */
- if (all_dead)
- *all_dead = first_call;
-
- blkno = ItemPointerGetBlockNumber(tid);
- offnum = ItemPointerGetOffsetNumber(tid);
- at_chain_start = first_call;
- skip = !first_call;
-
- /* XXX: we should assert that a snapshot is pushed or registered */
- Assert(TransactionIdIsValid(RecentXmin));
- Assert(BufferGetBlockNumber(buffer) == blkno);
-
- /* Scan through possible multiple members of HOT-chain */
- for (;;)
- {
- ItemId lp;
-
- /* check for bogus TID */
- if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
- break;
-
- lp = PageGetItemId(page, offnum);
-
- /* check for unused, dead, or redirected items */
- if (!ItemIdIsNormal(lp))
- {
- /* We should only see a redirect at start of chain */
- if (ItemIdIsRedirected(lp) && at_chain_start)
- {
- /* Follow the redirect */
- offnum = ItemIdGetRedirect(lp);
- at_chain_start = false;
- continue;
- }
- /* else must be end of chain */
- break;
- }
-
- /*
- * Update heapTuple to point to the element of the HOT chain we're
- * currently investigating. Having t_self set correctly is important
- * because the SSI checks and the *Satisfies routine for historical
- * MVCC snapshots need the correct tid to decide about the visibility.
- */
- heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
- heapTuple->t_len = ItemIdGetLength(lp);
- heapTuple->t_tableOid = RelationGetRelid(relation);
- ItemPointerSet(&heapTuple->t_self, blkno, offnum);
-
- /*
- * Shouldn't see a HEAP_ONLY tuple at chain start.
- */
- if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
- break;
-
- /*
- * The xmin should match the previous xmax value, else chain is
- * broken.
- */
- if (TransactionIdIsValid(prev_xmax) &&
- !TransactionIdEquals(prev_xmax,
- HeapTupleHeaderGetXmin(heapTuple->t_data)))
- break;
-
- /*
- * When first_call is true (and thus, skip is initially false) we'll
- * return the first tuple we find. But on later passes, heapTuple
- * will initially be pointing to the tuple we returned last time.
- * Returning it again would be incorrect (and would loop forever), so
- * we skip it and return the next match we find.
- */
- if (!skip)
- {
- /* If it's visible per the snapshot, we must return it */
- valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
- HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
- buffer, snapshot);
-
- if (valid)
- {
- ItemPointerSetOffsetNumber(tid, offnum);
- PredicateLockTID(relation, &heapTuple->t_self, snapshot,
- HeapTupleHeaderGetXmin(heapTuple->t_data));
- if (all_dead)
- *all_dead = false;
- return true;
- }
- }
- skip = false;
-
- /*
- * If we can't see it, maybe no one else can either. At caller
- * request, check whether all chain members are dead to all
- * transactions.
- *
- * Note: if you change the criterion here for what is "dead", fix the
- * planner's get_actual_variable_range() function to match.
- */
- if (all_dead && *all_dead)
- {
- if (!vistest)
- vistest = GlobalVisTestFor(relation);
-
- if (!HeapTupleIsSurelyDead(heapTuple, vistest))
- *all_dead = false;
- }
-
- /*
- * Check to see if HOT chain continues past this tuple; if so fetch
- * the next offnum and loop around.
- */
- if (HeapTupleIsHotUpdated(heapTuple))
- {
- Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
- blkno);
- offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
- at_chain_start = false;
- prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
- }
- else
- break; /* end of chain */
- }
-
- return false;
-}
-
/*
* heap_get_latest_tid - get the latest tid of a specified tuple
*
}
-/* ------------------------------------------------------------------------
- * Index Scan Callbacks for heap AM
- * ------------------------------------------------------------------------
- */
-
-static IndexFetchTableData *
-heapam_index_fetch_begin(Relation rel, uint32 flags)
-{
- IndexFetchHeapData *hscan = palloc0_object(IndexFetchHeapData);
-
- hscan->xs_base.rel = rel;
- hscan->xs_base.flags = flags;
- hscan->xs_cbuf = InvalidBuffer;
- hscan->xs_vmbuffer = InvalidBuffer;
-
- return &hscan->xs_base;
-}
-
-static void
-heapam_index_fetch_reset(IndexFetchTableData *scan)
-{
- IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
-
- if (BufferIsValid(hscan->xs_cbuf))
- {
- ReleaseBuffer(hscan->xs_cbuf);
- hscan->xs_cbuf = InvalidBuffer;
- }
-
- if (BufferIsValid(hscan->xs_vmbuffer))
- {
- ReleaseBuffer(hscan->xs_vmbuffer);
- hscan->xs_vmbuffer = InvalidBuffer;
- }
-}
-
-static void
-heapam_index_fetch_end(IndexFetchTableData *scan)
-{
- IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
-
- heapam_index_fetch_reset(scan);
-
- pfree(hscan);
-}
-
-static bool
-heapam_index_fetch_tuple(struct IndexFetchTableData *scan,
- ItemPointer tid,
- Snapshot snapshot,
- TupleTableSlot *slot,
- bool *heap_continue, bool *all_dead)
-{
- IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
- BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
- bool got_heap_tuple;
-
- Assert(TTS_IS_BUFFERTUPLE(slot));
-
- /* We can skip the buffer-switching logic if we're in mid-HOT chain. */
- if (!*heap_continue)
- {
- /* Switch to correct buffer if we don't have it already */
- Buffer prev_buf = hscan->xs_cbuf;
-
- hscan->xs_cbuf = ReleaseAndReadBuffer(hscan->xs_cbuf,
- hscan->xs_base.rel,
- ItemPointerGetBlockNumber(tid));
-
- /*
- * Prune page, but only if we weren't already on this page
- */
- if (prev_buf != hscan->xs_cbuf)
- heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf,
- &hscan->xs_vmbuffer,
- hscan->xs_base.flags & SO_HINT_REL_READ_ONLY);
- }
-
- /* Obtain share-lock on the buffer so we can examine visibility */
- LockBuffer(hscan->xs_cbuf, BUFFER_LOCK_SHARE);
- got_heap_tuple = heap_hot_search_buffer(tid,
- hscan->xs_base.rel,
- hscan->xs_cbuf,
- snapshot,
- &bslot->base.tupdata,
- all_dead,
- !*heap_continue);
- bslot->base.tupdata.t_self = *tid;
- LockBuffer(hscan->xs_cbuf, BUFFER_LOCK_UNLOCK);
-
- if (got_heap_tuple)
- {
- /*
- * Only in a non-MVCC snapshot can more than one member of the HOT
- * chain be visible.
- */
- *heap_continue = !IsMVCCLikeSnapshot(snapshot);
-
- slot->tts_tableOid = RelationGetRelid(scan->rel);
- ExecStoreBufferHeapTuple(&bslot->base.tupdata, slot, hscan->xs_cbuf);
- }
- else
- {
- /* We've reached the end of the HOT chain. */
- *heap_continue = false;
- }
-
- return got_heap_tuple;
-}
-
-
/* ------------------------------------------------------------------------
* Callbacks for non-modifying operations on individual tuples for heap AM
* ------------------------------------------------------------------------
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * heapam_indexscan.c
+ * heap table plain index scan and index-only scan code
+ *
+ * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/access/heap/heapam_indexscan.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/heapam.h"
+#include "access/relscan.h"
+#include "storage/predicate.h"
+
+
+/* ------------------------------------------------------------------------
+ * Index Scan Callbacks for heap AM
+ * ------------------------------------------------------------------------
+ */
+
+IndexFetchTableData *
+heapam_index_fetch_begin(Relation rel, uint32 flags)
+{
+ IndexFetchHeapData *hscan = palloc0_object(IndexFetchHeapData);
+
+ hscan->xs_base.rel = rel;
+ hscan->xs_base.flags = flags;
+ hscan->xs_cbuf = InvalidBuffer;
+ hscan->xs_vmbuffer = InvalidBuffer;
+
+ return &hscan->xs_base;
+}
+
+void
+heapam_index_fetch_reset(IndexFetchTableData *scan)
+{
+ IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
+
+ if (BufferIsValid(hscan->xs_cbuf))
+ {
+ ReleaseBuffer(hscan->xs_cbuf);
+ hscan->xs_cbuf = InvalidBuffer;
+ }
+
+ if (BufferIsValid(hscan->xs_vmbuffer))
+ {
+ ReleaseBuffer(hscan->xs_vmbuffer);
+ hscan->xs_vmbuffer = InvalidBuffer;
+ }
+}
+
+void
+heapam_index_fetch_end(IndexFetchTableData *scan)
+{
+ IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
+
+ heapam_index_fetch_reset(scan);
+
+ pfree(hscan);
+}
+
+/*
+ * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
+ *
+ * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
+ * of a HOT chain), and buffer is the buffer holding this tuple. We search
+ * for the first chain member satisfying the given snapshot. If one is
+ * found, we update *tid to reference that tuple's offset number, and
+ * return true. If no match, return false without modifying *tid.
+ *
+ * heapTuple is a caller-supplied buffer. When a match is found, we return
+ * the tuple here, in addition to updating *tid. If no match is found, the
+ * contents of this buffer on return are undefined.
+ *
+ * If all_dead is not NULL, we check non-visible tuples to see if they are
+ * globally dead; *all_dead is set true if all members of the HOT chain
+ * are vacuumable, false if not.
+ *
+ * Unlike heap_fetch, the caller must already have pin and (at least) share
+ * lock on the buffer; it is still pinned/locked at exit.
+ */
+bool
+heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
+ Snapshot snapshot, HeapTuple heapTuple,
+ bool *all_dead, bool first_call)
+{
+ Page page = BufferGetPage(buffer);
+ TransactionId prev_xmax = InvalidTransactionId;
+ BlockNumber blkno;
+ OffsetNumber offnum;
+ bool at_chain_start;
+ bool valid;
+ bool skip;
+ GlobalVisState *vistest = NULL;
+
+ /* If this is not the first call, previous call returned a (live!) tuple */
+ if (all_dead)
+ *all_dead = first_call;
+
+ blkno = ItemPointerGetBlockNumber(tid);
+ offnum = ItemPointerGetOffsetNumber(tid);
+ at_chain_start = first_call;
+ skip = !first_call;
+
+ /* XXX: we should assert that a snapshot is pushed or registered */
+ Assert(TransactionIdIsValid(RecentXmin));
+ Assert(BufferGetBlockNumber(buffer) == blkno);
+
+ /* Scan through possible multiple members of HOT-chain */
+ for (;;)
+ {
+ ItemId lp;
+
+ /* check for bogus TID */
+ if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
+ break;
+
+ lp = PageGetItemId(page, offnum);
+
+ /* check for unused, dead, or redirected items */
+ if (!ItemIdIsNormal(lp))
+ {
+ /* We should only see a redirect at start of chain */
+ if (ItemIdIsRedirected(lp) && at_chain_start)
+ {
+ /* Follow the redirect */
+ offnum = ItemIdGetRedirect(lp);
+ at_chain_start = false;
+ continue;
+ }
+ /* else must be end of chain */
+ break;
+ }
+
+ /*
+ * Update heapTuple to point to the element of the HOT chain we're
+ * currently investigating. Having t_self set correctly is important
+ * because the SSI checks and the *Satisfies routine for historical
+ * MVCC snapshots need the correct tid to decide about the visibility.
+ */
+ heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
+ heapTuple->t_len = ItemIdGetLength(lp);
+ heapTuple->t_tableOid = RelationGetRelid(relation);
+ ItemPointerSet(&heapTuple->t_self, blkno, offnum);
+
+ /*
+ * Shouldn't see a HEAP_ONLY tuple at chain start.
+ */
+ if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
+ break;
+
+ /*
+ * The xmin should match the previous xmax value, else chain is
+ * broken.
+ */
+ if (TransactionIdIsValid(prev_xmax) &&
+ !TransactionIdEquals(prev_xmax,
+ HeapTupleHeaderGetXmin(heapTuple->t_data)))
+ break;
+
+ /*
+ * When first_call is true (and thus, skip is initially false) we'll
+ * return the first tuple we find. But on later passes, heapTuple
+ * will initially be pointing to the tuple we returned last time.
+ * Returning it again would be incorrect (and would loop forever), so
+ * we skip it and return the next match we find.
+ */
+ if (!skip)
+ {
+ /* If it's visible per the snapshot, we must return it */
+ valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
+ HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
+ buffer, snapshot);
+
+ if (valid)
+ {
+ ItemPointerSetOffsetNumber(tid, offnum);
+ PredicateLockTID(relation, &heapTuple->t_self, snapshot,
+ HeapTupleHeaderGetXmin(heapTuple->t_data));
+ if (all_dead)
+ *all_dead = false;
+ return true;
+ }
+ }
+ skip = false;
+
+ /*
+ * If we can't see it, maybe no one else can either. At caller
+ * request, check whether all chain members are dead to all
+ * transactions.
+ *
+ * Note: if you change the criterion here for what is "dead", fix the
+ * planner's get_actual_variable_range() function to match.
+ */
+ if (all_dead && *all_dead)
+ {
+ if (!vistest)
+ vistest = GlobalVisTestFor(relation);
+
+ if (!HeapTupleIsSurelyDead(heapTuple, vistest))
+ *all_dead = false;
+ }
+
+ /*
+ * Check to see if HOT chain continues past this tuple; if so fetch
+ * the next offnum and loop around.
+ */
+ if (HeapTupleIsHotUpdated(heapTuple))
+ {
+ Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
+ blkno);
+ offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
+ at_chain_start = false;
+ prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
+ }
+ else
+ break; /* end of chain */
+
+ }
+
+ return false;
+}
+
+bool
+heapam_index_fetch_tuple(struct IndexFetchTableData *scan,
+ ItemPointer tid,
+ Snapshot snapshot,
+ TupleTableSlot *slot,
+ bool *heap_continue, bool *all_dead)
+{
+ IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan;
+ BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot;
+ bool got_heap_tuple;
+
+ Assert(TTS_IS_BUFFERTUPLE(slot));
+
+ /* We can skip the buffer-switching logic if we're in mid-HOT chain. */
+ if (!*heap_continue)
+ {
+ /* Switch to correct buffer if we don't have it already */
+ Buffer prev_buf = hscan->xs_cbuf;
+
+ hscan->xs_cbuf = ReleaseAndReadBuffer(hscan->xs_cbuf,
+ hscan->xs_base.rel,
+ ItemPointerGetBlockNumber(tid));
+
+ /*
+ * Prune page, but only if we weren't already on this page
+ */
+ if (prev_buf != hscan->xs_cbuf)
+ heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf,
+ &hscan->xs_vmbuffer,
+ hscan->xs_base.flags & SO_HINT_REL_READ_ONLY);
+ }
+
+ /* Obtain share-lock on the buffer so we can examine visibility */
+ LockBuffer(hscan->xs_cbuf, BUFFER_LOCK_SHARE);
+ got_heap_tuple = heap_hot_search_buffer(tid,
+ hscan->xs_base.rel,
+ hscan->xs_cbuf,
+ snapshot,
+ &bslot->base.tupdata,
+ all_dead,
+ !*heap_continue);
+ bslot->base.tupdata.t_self = *tid;
+ LockBuffer(hscan->xs_cbuf, BUFFER_LOCK_UNLOCK);
+
+ if (got_heap_tuple)
+ {
+ /*
+ * Only in a non-MVCC snapshot can more than one member of the HOT
+ * chain be visible.
+ */
+ *heap_continue = !IsMVCCLikeSnapshot(snapshot);
+
+ slot->tts_tableOid = RelationGetRelid(scan->rel);
+ ExecStoreBufferHeapTuple(&bslot->base.tupdata, slot, hscan->xs_cbuf);
+ }
+ else
+ {
+ /* We've reached the end of the HOT chain. */
+ *heap_continue = false;
+ }
+
+ return got_heap_tuple;
+}
backend_sources += files(
'heapam.c',
'heapam_handler.c',
+ 'heapam_indexscan.c',
'heapam_visibility.c',
'heapam_xlog.c',
'heaptoast.c',
TupleTableSlot *slot);
extern bool heap_fetch(Relation relation, Snapshot snapshot,
HeapTuple tuple, Buffer *userbuf, bool keep_buf);
-extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
- Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
- bool *all_dead, bool first_call);
extern void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid);
extern TransactionId heap_index_delete_tuples(Relation rel,
TM_IndexDeleteOp *delstate);
+/* in heap/heapam_indexscan.c */
+extern IndexFetchTableData *heapam_index_fetch_begin(Relation rel, uint32 flags);
+extern void heapam_index_fetch_reset(IndexFetchTableData *scan);
+extern void heapam_index_fetch_end(IndexFetchTableData *scan);
+extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation,
+ Buffer buffer, Snapshot snapshot, HeapTuple heapTuple,
+ bool *all_dead, bool first_call);
+extern bool heapam_index_fetch_tuple(struct IndexFetchTableData *scan,
+ ItemPointer tid, Snapshot snapshot,
+ TupleTableSlot *slot, bool *heap_continue,
+ bool *all_dead);
+
/* in heap/pruneheap.c */
extern void heap_page_prune_opt(Relation relation, Buffer buffer,
Buffer *vmbuffer, bool rel_read_only);