From: Heikki Linnakangas Date: Sun, 5 Apr 2026 23:13:02 +0000 (+0300) Subject: Convert SLRUs to use the new shmem allocation functions X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2e0943a8597e1eeaec184465ba8b1f2f453a69e0;p=thirdparty%2Fpostgresql.git Convert SLRUs to use the new shmem allocation functions I replaced the old SimpleLruInit() function without a backwards compatibility wrapper, because few extensions define their own SLRUs. Reviewed-by: Ashutosh Bapat Reviewed-by: Matthias van de Meent Reviewed-by: Daniel Gustafsson Discussion: https://www.postgresql.org/message-id/CAExHW5vM1bneLYfg0wGeAa=52UiJ3z4vKd3AJ72X8Fw6k3KKrg@mail.gmail.com --- diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index c654e0929b3..75012d4b8f0 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -43,6 +43,7 @@ #include "pg_trace.h" #include "pgstat.h" #include "storage/proc.h" +#include "storage/subsystems.h" #include "storage/sync.h" #include "utils/guc_hooks.h" #include "utils/wait_event.h" @@ -106,13 +107,21 @@ TransactionIdToPage(TransactionId xid) /* * Link to shared-memory data structures for CLOG control */ -static SlruCtlData XactCtlData; +static void CLOGShmemRequest(void *arg); +static void CLOGShmemInit(void *arg); +static bool CLOGPagePrecedes(int64 page1, int64 page2); +static int clog_errdetail_for_io_error(const void *opaque_data); -#define XactCtl (&XactCtlData) +const ShmemCallbacks CLOGShmemCallbacks = { + .request_fn = CLOGShmemRequest, + .init_fn = CLOGShmemInit, +}; + +static SlruDesc XactSlruDesc; + +#define XactCtl (&XactSlruDesc) -static bool CLOGPagePrecedes(int64 page1, int64 page2); -static int clog_errdetail_for_io_error(const void *opaque_data); static void WriteTruncateXlogRec(int64 pageno, TransactionId oldestXact, Oid oldestXactDb); static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids, @@ -775,16 +784,10 @@ CLOGShmemBuffers(void) } /* - * Initialization of shared memory for CLOG + * Register shared memory for CLOG */ -Size -CLOGShmemSize(void) -{ - return SimpleLruShmemSize(CLOGShmemBuffers(), CLOG_LSNS_PER_PAGE); -} - -void -CLOGShmemInit(void) +static void +CLOGShmemRequest(void *arg) { /* If auto-tuning is requested, now is the time to do it */ if (transaction_buffers == 0) @@ -806,12 +809,26 @@ CLOGShmemInit(void) PGC_S_OVERRIDE); } Assert(transaction_buffers != 0); + SimpleLruRequest(.desc = &XactSlruDesc, + .name = "transaction", + .Dir = "pg_xact", + .long_segment_names = false, + + .nslots = CLOGShmemBuffers(), + .nlsns = CLOG_LSNS_PER_PAGE, + + .sync_handler = SYNC_HANDLER_CLOG, + .PagePrecedes = CLOGPagePrecedes, + .errdetail_for_io_error = clog_errdetail_for_io_error, - XactCtl->PagePrecedes = CLOGPagePrecedes; - XactCtl->errdetail_for_io_error = clog_errdetail_for_io_error; - SimpleLruInit(XactCtl, "transaction", CLOGShmemBuffers(), CLOG_LSNS_PER_PAGE, - "pg_xact", LWTRANCHE_XACT_BUFFER, - LWTRANCHE_XACT_SLRU, SYNC_HANDLER_CLOG, false); + .buffer_tranche_id = LWTRANCHE_XACT_BUFFER, + .bank_tranche_id = LWTRANCHE_XACT_SLRU, + ); +} + +static void +CLOGShmemInit(void *arg) +{ SlruPagePrecedesUnitTests(XactCtl, CLOG_XACTS_PER_PAGE); } diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c index 36219dd13cc..2625cbf93bf 100644 --- a/src/backend/access/transam/commit_ts.c +++ b/src/backend/access/transam/commit_ts.c @@ -30,6 +30,7 @@ #include "funcapi.h" #include "miscadmin.h" #include "storage/shmem.h" +#include "storage/subsystems.h" #include "utils/fmgrprotos.h" #include "utils/guc_hooks.h" #include "utils/timestamp.h" @@ -80,9 +81,19 @@ TransactionIdToCTsPage(TransactionId xid) /* * Link to shared-memory data structures for CommitTs control */ -static SlruCtlData CommitTsCtlData; +static void CommitTsShmemRequest(void *arg); +static void CommitTsShmemInit(void *arg); +static bool CommitTsPagePrecedes(int64 page1, int64 page2); +static int commit_ts_errdetail_for_io_error(const void *opaque_data); + +const ShmemCallbacks CommitTsShmemCallbacks = { + .request_fn = CommitTsShmemRequest, + .init_fn = CommitTsShmemInit, +}; + +static SlruDesc CommitTsSlruDesc; -#define CommitTsCtl (&CommitTsCtlData) +#define CommitTsCtl (&CommitTsSlruDesc) /* * We keep a cache of the last value set in shared memory. @@ -104,6 +115,7 @@ typedef struct CommitTimestampShared static CommitTimestampShared *commitTsShared; +static void CommitTsShmemInit(void *arg); /* GUC variable */ bool track_commit_timestamp; @@ -114,8 +126,6 @@ static void SetXidCommitTsInPage(TransactionId xid, int nsubxids, static void TransactionIdSetCommitTs(TransactionId xid, TimestampTz ts, ReplOriginId nodeid, int slotno); static void error_commit_ts_disabled(void); -static bool CommitTsPagePrecedes(int64 page1, int64 page2); -static int commit_ts_errdetail_for_io_error(const void *opaque_data); static void ActivateCommitTs(void); static void DeactivateCommitTs(void); static void WriteTruncateXlogRec(int64 pageno, TransactionId oldestXid); @@ -512,24 +522,12 @@ CommitTsShmemBuffers(void) } /* - * Shared memory sizing for CommitTs + * Register CommitTs shared memory needs at system startup (postmaster start + * or standalone backend) */ -Size -CommitTsShmemSize(void) -{ - return SimpleLruShmemSize(CommitTsShmemBuffers(), 0) + - sizeof(CommitTimestampShared); -} - -/* - * Initialize CommitTs at system startup (postmaster start or standalone - * backend) - */ -void -CommitTsShmemInit(void) +static void +CommitTsShmemRequest(void *arg) { - bool found; - /* If auto-tuning is requested, now is the time to do it */ if (commit_timestamp_buffers == 0) { @@ -550,31 +548,36 @@ CommitTsShmemInit(void) PGC_S_OVERRIDE); } Assert(commit_timestamp_buffers != 0); + SimpleLruRequest(.desc = &CommitTsSlruDesc, + .name = "commit_timestamp", + .Dir = "pg_commit_ts", + .long_segment_names = false, - CommitTsCtl->PagePrecedes = CommitTsPagePrecedes; - CommitTsCtl->errdetail_for_io_error = commit_ts_errdetail_for_io_error; - SimpleLruInit(CommitTsCtl, "commit_timestamp", CommitTsShmemBuffers(), 0, - "pg_commit_ts", LWTRANCHE_COMMITTS_BUFFER, - LWTRANCHE_COMMITTS_SLRU, - SYNC_HANDLER_COMMIT_TS, - false); - SlruPagePrecedesUnitTests(CommitTsCtl, COMMIT_TS_XACTS_PER_PAGE); + .nslots = CommitTsShmemBuffers(), - commitTsShared = ShmemInitStruct("CommitTs shared", - sizeof(CommitTimestampShared), - &found); + .PagePrecedes = CommitTsPagePrecedes, + .errdetail_for_io_error = commit_ts_errdetail_for_io_error, - if (!IsUnderPostmaster) - { - Assert(!found); + .sync_handler = SYNC_HANDLER_COMMIT_TS, + .buffer_tranche_id = LWTRANCHE_COMMITTS_BUFFER, + .bank_tranche_id = LWTRANCHE_COMMITTS_SLRU, + ); - commitTsShared->xidLastCommit = InvalidTransactionId; - TIMESTAMP_NOBEGIN(commitTsShared->dataLastCommit.time); - commitTsShared->dataLastCommit.nodeid = InvalidReplOriginId; - commitTsShared->commitTsActive = false; - } - else - Assert(found); + ShmemRequestStruct(.name = "CommitTs shared", + .size = sizeof(CommitTimestampShared), + .ptr = (void **) &commitTsShared, + ); +} + +static void +CommitTsShmemInit(void *arg) +{ + commitTsShared->xidLastCommit = InvalidTransactionId; + TIMESTAMP_NOBEGIN(commitTsShared->dataLastCommit.time); + commitTsShared->dataLastCommit.nodeid = InvalidReplOriginId; + commitTsShared->commitTsActive = false; + + SlruPagePrecedesUnitTests(CommitTsCtl, COMMIT_TS_XACTS_PER_PAGE); } /* diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 9f8d542c098..cb78ba0842d 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -83,6 +83,7 @@ #include "storage/pmsignal.h" #include "storage/proc.h" #include "storage/procarray.h" +#include "storage/subsystems.h" #include "utils/guc_hooks.h" #include "utils/injection_point.h" #include "utils/lsyscache.h" @@ -113,11 +114,16 @@ PreviousMultiXactId(MultiXactId multi) /* * Links to shared-memory data structures for MultiXact control */ -static SlruCtlData MultiXactOffsetCtlData; -static SlruCtlData MultiXactMemberCtlData; +static bool MultiXactOffsetPagePrecedes(int64 page1, int64 page2); +static int MultiXactOffsetIoErrorDetail(const void *opaque_data); +static bool MultiXactMemberPagePrecedes(int64 page1, int64 page2); +static int MultiXactMemberIoErrorDetail(const void *opaque_data); + +static SlruDesc MultiXactOffsetSlruDesc; +static SlruDesc MultiXactMemberSlruDesc; -#define MultiXactOffsetCtl (&MultiXactOffsetCtlData) -#define MultiXactMemberCtl (&MultiXactMemberCtlData) +#define MultiXactOffsetCtl (&MultiXactOffsetSlruDesc) +#define MultiXactMemberCtl (&MultiXactMemberSlruDesc) /* * MultiXact state shared across all backends. All this state is protected @@ -220,6 +226,15 @@ static MultiXactStateData *MultiXactState; static MultiXactId *OldestMemberMXactId; static MultiXactId *OldestVisibleMXactId; +static void MultiXactShmemRequest(void *arg); +static void MultiXactShmemInit(void *arg); +static void MultiXactShmemAttach(void *arg); + +const ShmemCallbacks MultiXactShmemCallbacks = { + .request_fn = MultiXactShmemRequest, + .init_fn = MultiXactShmemInit, + .attach_fn = MultiXactShmemAttach, +}; static inline MultiXactId * MyOldestMemberMXactIdSlot(void) @@ -321,10 +336,6 @@ typedef struct MultiXactMemberSlruReadContext MultiXactOffset offset; } MultiXactMemberSlruReadContext; -static bool MultiXactOffsetPagePrecedes(int64 page1, int64 page2); -static bool MultiXactMemberPagePrecedes(int64 page1, int64 page2); -static int MultiXactOffsetIoErrorDetail(const void *opaque_data); -static int MultiXactMemberIoErrorDetail(const void *opaque_data); static void ExtendMultiXactOffset(MultiXactId multi); static void ExtendMultiXactMember(MultiXactOffset offset, int nmembers); static void SetOldestOffset(void); @@ -1747,83 +1758,80 @@ multixact_twophase_postabort(FullTransactionId fxid, uint16 info, multixact_twophase_postcommit(fxid, info, recdata, len); } + /* - * Initialization of shared memory for MultiXact. - * - * MultiXactSharedStateShmemSize() calculates the size of the MultiXactState - * struct, and the two per-backend MultiXactId arrays. They are carved out of - * the same allocation. MultiXactShmemSize() additionally includes the memory - * needed for the two SLRU areas. + * Register shared memory needs for MultiXact. */ -static Size -MultiXactSharedStateShmemSize(void) +static void +MultiXactShmemRequest(void *arg) { Size size; + /* + * Calculate the size of the MultiXactState struct, and the two + * per-backend MultiXactId arrays. They are carved out of the same + * allocation. + */ size = offsetof(MultiXactStateData, perBackendXactIds); size = add_size(size, mul_size(sizeof(MultiXactId), NumMemberSlots)); size = add_size(size, mul_size(sizeof(MultiXactId), NumVisibleSlots)); - return size; -} + ShmemRequestStruct(.name = "Shared MultiXact State", + .size = size, + .ptr = (void **) &MultiXactState, + ); -Size -MultiXactShmemSize(void) -{ - Size size; + SimpleLruRequest(.desc = &MultiXactOffsetSlruDesc, + .name = "multixact_offset", + .Dir = "pg_multixact/offsets", + .long_segment_names = false, - size = MultiXactSharedStateShmemSize(); - size = add_size(size, SimpleLruShmemSize(multixact_offset_buffers, 0)); - size = add_size(size, SimpleLruShmemSize(multixact_member_buffers, 0)); + .nslots = multixact_offset_buffers, - return size; -} + .sync_handler = SYNC_HANDLER_MULTIXACT_OFFSET, + .PagePrecedes = MultiXactOffsetPagePrecedes, + .errdetail_for_io_error = MultiXactOffsetIoErrorDetail, -void -MultiXactShmemInit(void) -{ - bool found; + .buffer_tranche_id = LWTRANCHE_MULTIXACTOFFSET_BUFFER, + .bank_tranche_id = LWTRANCHE_MULTIXACTOFFSET_SLRU, + ); - debug_elog2(DEBUG2, "Shared Memory Init for MultiXact"); + SimpleLruRequest(.desc = &MultiXactMemberSlruDesc, + .name = "multixact_member", + .Dir = "pg_multixact/members", + .long_segment_names = true, - MultiXactOffsetCtl->PagePrecedes = MultiXactOffsetPagePrecedes; - MultiXactMemberCtl->PagePrecedes = MultiXactMemberPagePrecedes; - MultiXactOffsetCtl->errdetail_for_io_error = MultiXactOffsetIoErrorDetail; - MultiXactMemberCtl->errdetail_for_io_error = MultiXactMemberIoErrorDetail; + .nslots = multixact_member_buffers, - SimpleLruInit(MultiXactOffsetCtl, - "multixact_offset", multixact_offset_buffers, 0, - "pg_multixact/offsets", LWTRANCHE_MULTIXACTOFFSET_BUFFER, - LWTRANCHE_MULTIXACTOFFSET_SLRU, - SYNC_HANDLER_MULTIXACT_OFFSET, - false); - SlruPagePrecedesUnitTests(MultiXactOffsetCtl, MULTIXACT_OFFSETS_PER_PAGE); - SimpleLruInit(MultiXactMemberCtl, - "multixact_member", multixact_member_buffers, 0, - "pg_multixact/members", LWTRANCHE_MULTIXACTMEMBER_BUFFER, - LWTRANCHE_MULTIXACTMEMBER_SLRU, - SYNC_HANDLER_MULTIXACT_MEMBER, - true); - /* doesn't call SimpleLruTruncate() or meet criteria for unit tests */ - - /* Initialize our shared state struct */ - MultiXactState = ShmemInitStruct("Shared MultiXact State", - MultiXactSharedStateShmemSize(), - &found); - if (!IsUnderPostmaster) - { - Assert(!found); + .sync_handler = SYNC_HANDLER_MULTIXACT_MEMBER, + .PagePrecedes = MultiXactMemberPagePrecedes, + .errdetail_for_io_error = MultiXactMemberIoErrorDetail, - /* Make sure we zero out the per-backend state */ - MemSet(MultiXactState, 0, MultiXactSharedStateShmemSize()); - } - else - Assert(found); + .buffer_tranche_id = LWTRANCHE_MULTIXACTMEMBER_BUFFER, + .bank_tranche_id = LWTRANCHE_MULTIXACTMEMBER_SLRU, + ); +} + +static void +MultiXactShmemInit(void *arg) +{ + SlruPagePrecedesUnitTests(MultiXactOffsetCtl, MULTIXACT_OFFSETS_PER_PAGE); /* - * Set up array pointers. + * members SLRU doesn't call SimpleLruTruncate() or meet criteria for unit + * tests */ + + /* Set up array pointers */ + OldestMemberMXactId = MultiXactState->perBackendXactIds; + OldestVisibleMXactId = OldestMemberMXactId + NumMemberSlots; +} + +static void +MultiXactShmemAttach(void *arg) +{ + /* Set up array pointers */ OldestMemberMXactId = MultiXactState->perBackendXactIds; OldestVisibleMXactId = OldestMemberMXactId + NumMemberSlots; } diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index a2bb8fa8033..47dd52d6749 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -70,7 +70,9 @@ #include "pgstat.h" #include "storage/fd.h" #include "storage/shmem.h" +#include "storage/shmem_internal.h" #include "utils/guc.h" +#include "utils/memutils.h" #include "utils/wait_event.h" /* @@ -89,9 +91,9 @@ * dir/123456 for [2^20, 2^24-1] */ static inline int -SlruFileName(SlruCtl ctl, char *path, int64 segno) +SlruFileName(SlruDesc *ctl, char *path, int64 segno) { - if (ctl->long_segment_names) + if (ctl->options.long_segment_names) { /* * We could use 16 characters here but the disadvantage would be that @@ -101,7 +103,7 @@ SlruFileName(SlruCtl ctl, char *path, int64 segno) * that in the future we can't decrease SLRU_PAGES_PER_SEGMENT easily. */ Assert(segno >= 0 && segno <= INT64CONST(0xFFFFFFFFFFFFFFF)); - return snprintf(path, MAXPGPATH, "%s/%015" PRIX64, ctl->Dir, segno); + return snprintf(path, MAXPGPATH, "%s/%015" PRIX64, ctl->options.Dir, segno); } else { @@ -110,7 +112,7 @@ SlruFileName(SlruCtl ctl, char *path, int64 segno) * integers are allowed. See SlruCorrectSegmentFilenameLength() */ Assert(segno >= 0 && segno <= INT64CONST(0xFFFFFF)); - return snprintf(path, MAXPGPATH, "%s/%04X", (ctl)->Dir, + return snprintf(path, MAXPGPATH, "%s/%04X", (ctl)->options.Dir, (unsigned int) segno); } } @@ -176,19 +178,19 @@ static SlruErrorCause slru_errcause; static int slru_errno; -static void SimpleLruZeroLSNs(SlruCtl ctl, int slotno); -static void SimpleLruWaitIO(SlruCtl ctl, int slotno); -static void SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata); -static bool SlruPhysicalReadPage(SlruCtl ctl, int64 pageno, int slotno); -static bool SlruPhysicalWritePage(SlruCtl ctl, int64 pageno, int slotno, +static void SimpleLruZeroLSNs(SlruDesc *ctl, int slotno); +static void SimpleLruWaitIO(SlruDesc *ctl, int slotno); +static void SlruInternalWritePage(SlruDesc *ctl, int slotno, SlruWriteAll fdata); +static bool SlruPhysicalReadPage(SlruDesc *ctl, int64 pageno, int slotno); +static bool SlruPhysicalWritePage(SlruDesc *ctl, int64 pageno, int slotno, SlruWriteAll fdata); -static void SlruReportIOError(SlruCtl ctl, int64 pageno, +static void SlruReportIOError(SlruDesc *ctl, int64 pageno, const void *opaque_data); -static int SlruSelectLRUPage(SlruCtl ctl, int64 pageno); +static int SlruSelectLRUPage(SlruDesc *ctl, int64 pageno); -static bool SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, +static bool SlruScanDirCbDeleteCutoff(SlruDesc *ctl, char *filename, int64 segpage, void *data); -static void SlruInternalDeleteSegment(SlruCtl ctl, int64 segno); +static void SlruInternalDeleteSegment(SlruDesc *ctl, int64 segno); static inline void SlruRecentlyUsed(SlruShared shared, int slotno); @@ -196,7 +198,7 @@ static inline void SlruRecentlyUsed(SlruShared shared, int slotno); * Initialization of shared memory */ -Size +static Size SimpleLruShmemSize(int nslots, int nlsns) { int nbanks = nslots / SLRU_BANK_SIZE; @@ -238,120 +240,135 @@ SimpleLruAutotuneBuffers(int divisor, int max) } /* - * Initialize, or attach to, a simple LRU cache in shared memory. - * - * ctl: address of local (unshared) control structure. - * name: name of SLRU. (This is user-visible, pick with care!) - * nslots: number of page slots to use. - * nlsns: number of LSN groups per page (set to zero if not relevant). - * subdir: PGDATA-relative subdirectory that will contain the files. - * buffer_tranche_id: tranche ID to use for the SLRU's per-buffer LWLocks. - * bank_tranche_id: tranche ID to use for the bank LWLocks. - * sync_handler: which set of functions to use to handle sync requests - * long_segment_names: use short or long segment names + * Register a simple LRU cache in shared memory. */ void -SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, - const char *subdir, int buffer_tranche_id, int bank_tranche_id, - SyncRequestHandler sync_handler, bool long_segment_names) +SimpleLruRequestWithOpts(const SlruOpts *options) { + SlruOpts *options_copy; + + Assert(options->name != NULL); + Assert(options->nslots > 0); + Assert(options->PagePrecedes != NULL); + Assert(options->errdetail_for_io_error != NULL); + + options_copy = MemoryContextAlloc(TopMemoryContext, + sizeof(SlruOpts)); + memcpy(options_copy, options, sizeof(SlruOpts)); + + options_copy->base.name = options->name; + options_copy->base.size = SimpleLruShmemSize(options_copy->nslots, options_copy->nlsns); + + ShmemRequestInternal(&options_copy->base, SHMEM_KIND_SLRU); +} + +/* Initialize locks and shared memory area */ +void +shmem_slru_init(void *location, ShmemStructOpts *base_options) +{ + SlruOpts *options = (SlruOpts *) base_options; + SlruDesc *desc = (SlruDesc *) options->desc; + char namebuf[NAMEDATALEN]; SlruShared shared; - bool found; + int nslots = options->nslots; int nbanks = nslots / SLRU_BANK_SIZE; + int nlsns = options->nlsns; + char *ptr; + Size offset; + + shared = (SlruShared) location; + desc->shared = shared; + desc->nbanks = nbanks; + memcpy(&desc->options, options, sizeof(SlruOpts)); + + /* assign new tranche IDs, if not given */ + if (desc->options.buffer_tranche_id == 0) + { + snprintf(namebuf, sizeof(namebuf), "%s buffer", desc->options.name); + desc->options.buffer_tranche_id = LWLockNewTrancheId(namebuf); + } + if (desc->options.bank_tranche_id == 0) + { + snprintf(namebuf, sizeof(namebuf), "%s bank", desc->options.name); + desc->options.bank_tranche_id = LWLockNewTrancheId(namebuf); + } Assert(nslots <= SLRU_MAX_ALLOWED_BUFFERS); - Assert(ctl->PagePrecedes != NULL); - Assert(ctl->errdetail_for_io_error != NULL); + memset(shared, 0, sizeof(SlruSharedData)); - shared = (SlruShared) ShmemInitStruct(name, - SimpleLruShmemSize(nslots, nlsns), - &found); + shared->num_slots = nslots; + shared->lsn_groups_per_page = nlsns; - if (!IsUnderPostmaster) - { - /* Initialize locks and shared memory area */ - char *ptr; - Size offset; - - Assert(!found); - - memset(shared, 0, sizeof(SlruSharedData)); - - shared->num_slots = nslots; - shared->lsn_groups_per_page = nlsns; - - pg_atomic_init_u64(&shared->latest_page_number, 0); - - shared->slru_stats_idx = pgstat_get_slru_index(name); - - ptr = (char *) shared; - offset = MAXALIGN(sizeof(SlruSharedData)); - shared->page_buffer = (char **) (ptr + offset); - offset += MAXALIGN(nslots * sizeof(char *)); - shared->page_status = (SlruPageStatus *) (ptr + offset); - offset += MAXALIGN(nslots * sizeof(SlruPageStatus)); - shared->page_dirty = (bool *) (ptr + offset); - offset += MAXALIGN(nslots * sizeof(bool)); - shared->page_number = (int64 *) (ptr + offset); - offset += MAXALIGN(nslots * sizeof(int64)); - shared->page_lru_count = (int *) (ptr + offset); - offset += MAXALIGN(nslots * sizeof(int)); - - /* Initialize LWLocks */ - shared->buffer_locks = (LWLockPadded *) (ptr + offset); - offset += MAXALIGN(nslots * sizeof(LWLockPadded)); - shared->bank_locks = (LWLockPadded *) (ptr + offset); - offset += MAXALIGN(nbanks * sizeof(LWLockPadded)); - shared->bank_cur_lru_count = (int *) (ptr + offset); - offset += MAXALIGN(nbanks * sizeof(int)); - - if (nlsns > 0) - { - shared->group_lsn = (XLogRecPtr *) (ptr + offset); - offset += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); - } + pg_atomic_init_u64(&shared->latest_page_number, 0); - ptr += BUFFERALIGN(offset); - for (int slotno = 0; slotno < nslots; slotno++) - { - LWLockInitialize(&shared->buffer_locks[slotno].lock, - buffer_tranche_id); + shared->slru_stats_idx = pgstat_get_slru_index(desc->options.name); - shared->page_buffer[slotno] = ptr; - shared->page_status[slotno] = SLRU_PAGE_EMPTY; - shared->page_dirty[slotno] = false; - shared->page_lru_count[slotno] = 0; - ptr += BLCKSZ; - } + ptr = (char *) shared; + offset = MAXALIGN(sizeof(SlruSharedData)); + shared->page_buffer = (char **) (ptr + offset); + offset += MAXALIGN(nslots * sizeof(char *)); + shared->page_status = (SlruPageStatus *) (ptr + offset); + offset += MAXALIGN(nslots * sizeof(SlruPageStatus)); + shared->page_dirty = (bool *) (ptr + offset); + offset += MAXALIGN(nslots * sizeof(bool)); + shared->page_number = (int64 *) (ptr + offset); + offset += MAXALIGN(nslots * sizeof(int64)); + shared->page_lru_count = (int *) (ptr + offset); + offset += MAXALIGN(nslots * sizeof(int)); - /* Initialize the slot banks. */ - for (int bankno = 0; bankno < nbanks; bankno++) - { - LWLockInitialize(&shared->bank_locks[bankno].lock, bank_tranche_id); - shared->bank_cur_lru_count[bankno] = 0; - } + /* Initialize LWLocks */ + shared->buffer_locks = (LWLockPadded *) (ptr + offset); + offset += MAXALIGN(nslots * sizeof(LWLockPadded)); + shared->bank_locks = (LWLockPadded *) (ptr + offset); + offset += MAXALIGN(nbanks * sizeof(LWLockPadded)); + shared->bank_cur_lru_count = (int *) (ptr + offset); + offset += MAXALIGN(nbanks * sizeof(int)); - /* Should fit to estimated shmem size */ - Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns)); + if (nlsns > 0) + { + shared->group_lsn = (XLogRecPtr *) (ptr + offset); + offset += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); } - else + + ptr += BUFFERALIGN(offset); + for (int slotno = 0; slotno < nslots; slotno++) { - Assert(found); - Assert(shared->num_slots == nslots); + LWLockInitialize(&shared->buffer_locks[slotno].lock, + desc->options.buffer_tranche_id); + + shared->page_buffer[slotno] = ptr; + shared->page_status[slotno] = SLRU_PAGE_EMPTY; + shared->page_dirty[slotno] = false; + shared->page_lru_count[slotno] = 0; + ptr += BLCKSZ; } - /* - * Initialize the unshared control struct, including directory path. We - * assume caller set PagePrecedes. - */ - ctl->shared = shared; - ctl->sync_handler = sync_handler; - ctl->long_segment_names = long_segment_names; - ctl->nbanks = nbanks; - strlcpy(ctl->Dir, subdir, sizeof(ctl->Dir)); + /* Initialize the slot banks. */ + for (int bankno = 0; bankno < nbanks; bankno++) + { + LWLockInitialize(&shared->bank_locks[bankno].lock, desc->options.bank_tranche_id); + shared->bank_cur_lru_count[bankno] = 0; + } + + /* Should fit to estimated shmem size */ + Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns)); +} + +void +shmem_slru_attach(void *location, ShmemStructOpts *base_options) +{ + SlruOpts *options = (SlruOpts *) base_options; + SlruDesc *desc = (SlruDesc *) options->desc; + int nslots = options->nslots; + int nbanks = nslots / SLRU_BANK_SIZE; + + desc->shared = (SlruShared) location; + desc->nbanks = nbanks; + memcpy(&desc->options, options, sizeof(SlruOpts)); } + /* * Helper function for GUC check_hook to check whether slru buffers are in * multiples of SLRU_BANK_SIZE. @@ -377,7 +394,7 @@ check_slru_buffers(const char *name, int *newval) * Bank lock must be held at entry, and will be held at exit. */ int -SimpleLruZeroPage(SlruCtl ctl, int64 pageno) +SimpleLruZeroPage(SlruDesc *ctl, int64 pageno) { SlruShared shared = ctl->shared; int slotno; @@ -430,7 +447,7 @@ SimpleLruZeroPage(SlruCtl ctl, int64 pageno) * This assumes that InvalidXLogRecPtr is bitwise-all-0. */ static void -SimpleLruZeroLSNs(SlruCtl ctl, int slotno) +SimpleLruZeroLSNs(SlruDesc *ctl, int slotno) { SlruShared shared = ctl->shared; @@ -446,7 +463,7 @@ SimpleLruZeroLSNs(SlruCtl ctl, int slotno) * SLRU bank lock is acquired and released here. */ void -SimpleLruZeroAndWritePage(SlruCtl ctl, int64 pageno) +SimpleLruZeroAndWritePage(SlruDesc *ctl, int64 pageno) { int slotno; LWLock *lock; @@ -472,7 +489,7 @@ SimpleLruZeroAndWritePage(SlruCtl ctl, int64 pageno) * Bank lock must be held at entry, and will be held at exit. */ static void -SimpleLruWaitIO(SlruCtl ctl, int slotno) +SimpleLruWaitIO(SlruDesc *ctl, int slotno) { SlruShared shared = ctl->shared; int bankno = SlotGetBankNumber(slotno); @@ -530,7 +547,7 @@ SimpleLruWaitIO(SlruCtl ctl, int slotno) * The correct bank lock must be held at entry, and will be held at exit. */ int -SimpleLruReadPage(SlruCtl ctl, int64 pageno, bool write_ok, +SimpleLruReadPage(SlruDesc *ctl, int64 pageno, bool write_ok, const void *opaque_data) { SlruShared shared = ctl->shared; @@ -634,7 +651,7 @@ SimpleLruReadPage(SlruCtl ctl, int64 pageno, bool write_ok, * It is unspecified whether the lock will be shared or exclusive. */ int -SimpleLruReadPage_ReadOnly(SlruCtl ctl, int64 pageno, const void *opaque_data) +SimpleLruReadPage_ReadOnly(SlruDesc *ctl, int64 pageno, const void *opaque_data) { SlruShared shared = ctl->shared; LWLock *banklock = SimpleLruGetBankLock(ctl, pageno); @@ -681,7 +698,7 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int64 pageno, const void *opaque_data) * Bank lock must be held at entry, and will be held at exit. */ static void -SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata) +SlruInternalWritePage(SlruDesc *ctl, int slotno, SlruWriteAll fdata) { SlruShared shared = ctl->shared; int64 pageno = shared->page_number[slotno]; @@ -761,7 +778,7 @@ SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata) * fdata is always passed a NULL here. */ void -SimpleLruWritePage(SlruCtl ctl, int slotno) +SimpleLruWritePage(SlruDesc *ctl, int slotno) { Assert(ctl->shared->page_status[slotno] != SLRU_PAGE_EMPTY); @@ -775,7 +792,7 @@ SimpleLruWritePage(SlruCtl ctl, int slotno) * large enough to contain the given page. */ bool -SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int64 pageno) +SimpleLruDoesPhysicalPageExist(SlruDesc *ctl, int64 pageno) { int64 segno = pageno / SLRU_PAGES_PER_SEGMENT; int rpageno = pageno % SLRU_PAGES_PER_SEGMENT; @@ -833,7 +850,7 @@ SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int64 pageno) * read/write operations. We could cache one virtual file pointer ... */ static bool -SlruPhysicalReadPage(SlruCtl ctl, int64 pageno, int slotno) +SlruPhysicalReadPage(SlruDesc *ctl, int64 pageno, int slotno) { SlruShared shared = ctl->shared; int64 segno = pageno / SLRU_PAGES_PER_SEGMENT; @@ -905,7 +922,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int64 pageno, int slotno) * SimpleLruWriteAll. */ static bool -SlruPhysicalWritePage(SlruCtl ctl, int64 pageno, int slotno, SlruWriteAll fdata) +SlruPhysicalWritePage(SlruDesc *ctl, int64 pageno, int slotno, SlruWriteAll fdata) { SlruShared shared = ctl->shared; int64 segno = pageno / SLRU_PAGES_PER_SEGMENT; @@ -1037,11 +1054,11 @@ SlruPhysicalWritePage(SlruCtl ctl, int64 pageno, int slotno, SlruWriteAll fdata) pgstat_report_wait_end(); /* Queue up a sync request for the checkpointer. */ - if (ctl->sync_handler != SYNC_HANDLER_NONE) + if (ctl->options.sync_handler != SYNC_HANDLER_NONE) { FileTag tag; - INIT_SLRUFILETAG(tag, ctl->sync_handler, segno); + INIT_SLRUFILETAG(tag, ctl->options.sync_handler, segno); if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false)) { /* No space to enqueue sync request. Do it synchronously. */ @@ -1077,7 +1094,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int64 pageno, int slotno, SlruWriteAll fdata) * SlruPhysicalWritePage. Call this after cleaning up shared-memory state. */ static void -SlruReportIOError(SlruCtl ctl, int64 pageno, const void *opaque_data) +SlruReportIOError(SlruDesc *ctl, int64 pageno, const void *opaque_data) { int64 segno = pageno / SLRU_PAGES_PER_SEGMENT; int rpageno = pageno % SLRU_PAGES_PER_SEGMENT; @@ -1092,14 +1109,14 @@ SlruReportIOError(SlruCtl ctl, int64 pageno, const void *opaque_data) ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", path), - opaque_data ? ctl->errdetail_for_io_error(opaque_data) : 0)); + opaque_data ? ctl->options.errdetail_for_io_error(opaque_data) : 0)); break; case SLRU_SEEK_FAILED: ereport(ERROR, (errcode_for_file_access(), errmsg("could not seek in file \"%s\" to offset %d: %m", path, offset), - opaque_data ? ctl->errdetail_for_io_error(opaque_data) : 0)); + opaque_data ? ctl->options.errdetail_for_io_error(opaque_data) : 0)); break; case SLRU_READ_FAILED: if (errno) @@ -1107,12 +1124,12 @@ SlruReportIOError(SlruCtl ctl, int64 pageno, const void *opaque_data) (errcode_for_file_access(), errmsg("could not read from file \"%s\" at offset %d: %m", path, offset), - opaque_data ? ctl->errdetail_for_io_error(opaque_data) : 0)); + opaque_data ? ctl->options.errdetail_for_io_error(opaque_data) : 0)); else ereport(ERROR, (errmsg("could not read from file \"%s\" at offset %d: read too few bytes", path, offset), - opaque_data ? ctl->errdetail_for_io_error(opaque_data) : 0)); + opaque_data ? ctl->options.errdetail_for_io_error(opaque_data) : 0)); break; case SLRU_WRITE_FAILED: if (errno) @@ -1120,26 +1137,26 @@ SlruReportIOError(SlruCtl ctl, int64 pageno, const void *opaque_data) (errcode_for_file_access(), errmsg("Could not write to file \"%s\" at offset %d: %m", path, offset), - opaque_data ? ctl->errdetail_for_io_error(opaque_data) : 0)); + opaque_data ? ctl->options.errdetail_for_io_error(opaque_data) : 0)); else ereport(ERROR, (errmsg("Could not write to file \"%s\" at offset %d: wrote too few bytes.", path, offset), - opaque_data ? ctl->errdetail_for_io_error(opaque_data) : 0)); + opaque_data ? ctl->options.errdetail_for_io_error(opaque_data) : 0)); break; case SLRU_FSYNC_FAILED: ereport(data_sync_elevel(ERROR), (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", path), - opaque_data ? ctl->errdetail_for_io_error(opaque_data) : 0)); + opaque_data ? ctl->options.errdetail_for_io_error(opaque_data) : 0)); break; case SLRU_CLOSE_FAILED: ereport(ERROR, (errcode_for_file_access(), errmsg("could not close file \"%s\": %m", path), - opaque_data ? ctl->errdetail_for_io_error(opaque_data) : 0)); + opaque_data ? ctl->options.errdetail_for_io_error(opaque_data) : 0)); break; default: /* can't get here, we trust */ @@ -1199,7 +1216,7 @@ SlruRecentlyUsed(SlruShared shared, int slotno) * The correct bank lock must be held at entry, and will be held at exit. */ static int -SlruSelectLRUPage(SlruCtl ctl, int64 pageno) +SlruSelectLRUPage(SlruDesc *ctl, int64 pageno) { SlruShared shared = ctl->shared; @@ -1291,8 +1308,8 @@ SlruSelectLRUPage(SlruCtl ctl, int64 pageno) { if (this_delta > best_valid_delta || (this_delta == best_valid_delta && - ctl->PagePrecedes(this_page_number, - best_valid_page_number))) + ctl->options.PagePrecedes(this_page_number, + best_valid_page_number))) { bestvalidslot = slotno; best_valid_delta = this_delta; @@ -1303,8 +1320,8 @@ SlruSelectLRUPage(SlruCtl ctl, int64 pageno) { if (this_delta > best_invalid_delta || (this_delta == best_invalid_delta && - ctl->PagePrecedes(this_page_number, - best_invalid_page_number))) + ctl->options.PagePrecedes(this_page_number, + best_invalid_page_number))) { bestinvalidslot = slotno; best_invalid_delta = this_delta; @@ -1352,7 +1369,7 @@ SlruSelectLRUPage(SlruCtl ctl, int64 pageno) * entries are on disk. */ void -SimpleLruWriteAll(SlruCtl ctl, bool allow_redirtied) +SimpleLruWriteAll(SlruDesc *ctl, bool allow_redirtied) { SlruShared shared = ctl->shared; SlruWriteAllData fdata; @@ -1422,8 +1439,8 @@ SimpleLruWriteAll(SlruCtl ctl, bool allow_redirtied) SlruReportIOError(ctl, pageno, NULL); /* Ensure that directory entries for new files are on disk. */ - if (ctl->sync_handler != SYNC_HANDLER_NONE) - fsync_fname(ctl->Dir, true); + if (ctl->options.sync_handler != SYNC_HANDLER_NONE) + fsync_fname(ctl->options.Dir, true); } /* @@ -1438,7 +1455,7 @@ SimpleLruWriteAll(SlruCtl ctl, bool allow_redirtied) * after it has accrued freshly-written data. */ void -SimpleLruTruncate(SlruCtl ctl, int64 cutoffPage) +SimpleLruTruncate(SlruDesc *ctl, int64 cutoffPage) { SlruShared shared = ctl->shared; int prevbank; @@ -1460,12 +1477,12 @@ restart: * bugs elsewhere in SLRU handling, so we don't care if we read a slightly * outdated value; therefore we don't add a memory barrier. */ - if (ctl->PagePrecedes(pg_atomic_read_u64(&shared->latest_page_number), - cutoffPage)) + if (ctl->options.PagePrecedes(pg_atomic_read_u64(&shared->latest_page_number), + cutoffPage)) { ereport(LOG, (errmsg("could not truncate directory \"%s\": apparent wraparound", - ctl->Dir))); + ctl->options.Dir))); return; } @@ -1488,7 +1505,7 @@ restart: if (shared->page_status[slotno] == SLRU_PAGE_EMPTY) continue; - if (!ctl->PagePrecedes(shared->page_number[slotno], cutoffPage)) + if (!ctl->options.PagePrecedes(shared->page_number[slotno], cutoffPage)) continue; /* @@ -1533,16 +1550,16 @@ restart: * they either can't yet contain anything, or have already been cleaned out. */ static void -SlruInternalDeleteSegment(SlruCtl ctl, int64 segno) +SlruInternalDeleteSegment(SlruDesc *ctl, int64 segno) { char path[MAXPGPATH]; /* Forget any fsync requests queued for this segment. */ - if (ctl->sync_handler != SYNC_HANDLER_NONE) + if (ctl->options.sync_handler != SYNC_HANDLER_NONE) { FileTag tag; - INIT_SLRUFILETAG(tag, ctl->sync_handler, segno); + INIT_SLRUFILETAG(tag, ctl->options.sync_handler, segno); RegisterSyncRequest(&tag, SYNC_FORGET_REQUEST, true); } @@ -1556,7 +1573,7 @@ SlruInternalDeleteSegment(SlruCtl ctl, int64 segno) * Delete an individual SLRU segment, identified by the segment number. */ void -SlruDeleteSegment(SlruCtl ctl, int64 segno) +SlruDeleteSegment(SlruDesc *ctl, int64 segno) { SlruShared shared = ctl->shared; int prevbank = SlotGetBankNumber(0); @@ -1633,19 +1650,19 @@ restart: * first>=cutoff && last>=cutoff: no; every page of this segment is too young */ static bool -SlruMayDeleteSegment(SlruCtl ctl, int64 segpage, int64 cutoffPage) +SlruMayDeleteSegment(SlruDesc *ctl, int64 segpage, int64 cutoffPage) { int64 seg_last_page = segpage + SLRU_PAGES_PER_SEGMENT - 1; Assert(segpage % SLRU_PAGES_PER_SEGMENT == 0); - return (ctl->PagePrecedes(segpage, cutoffPage) && - ctl->PagePrecedes(seg_last_page, cutoffPage)); + return (ctl->options.PagePrecedes(segpage, cutoffPage) && + ctl->options.PagePrecedes(seg_last_page, cutoffPage)); } #ifdef USE_ASSERT_CHECKING static void -SlruPagePrecedesTestOffset(SlruCtl ctl, int per_page, uint32 offset) +SlruPagePrecedesTestOffset(SlruDesc *ctl, int per_page, uint32 offset) { TransactionId lhs, rhs; @@ -1654,6 +1671,9 @@ SlruPagePrecedesTestOffset(SlruCtl ctl, int per_page, uint32 offset) TransactionId newestXact, oldestXact; + /* This must be called after the Slru has been initialized */ + Assert(ctl->options.PagePrecedes); + /* * Compare an XID pair having undefined order (see RFC 1982), a pair at * "opposite ends" of the XID space. TransactionIdPrecedes() treats each @@ -1670,19 +1690,19 @@ SlruPagePrecedesTestOffset(SlruCtl ctl, int per_page, uint32 offset) Assert(!TransactionIdPrecedes(rhs, lhs + 1)); Assert(!TransactionIdFollowsOrEquals(lhs, rhs)); Assert(!TransactionIdFollowsOrEquals(rhs, lhs)); - Assert(!ctl->PagePrecedes(lhs / per_page, lhs / per_page)); - Assert(!ctl->PagePrecedes(lhs / per_page, rhs / per_page)); - Assert(!ctl->PagePrecedes(rhs / per_page, lhs / per_page)); - Assert(!ctl->PagePrecedes((lhs - per_page) / per_page, rhs / per_page)); - Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 3 * per_page) / per_page)); - Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 2 * per_page) / per_page)); - Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 1 * per_page) / per_page) + Assert(!ctl->options.PagePrecedes(lhs / per_page, lhs / per_page)); + Assert(!ctl->options.PagePrecedes(lhs / per_page, rhs / per_page)); + Assert(!ctl->options.PagePrecedes(rhs / per_page, lhs / per_page)); + Assert(!ctl->options.PagePrecedes((lhs - per_page) / per_page, rhs / per_page)); + Assert(ctl->options.PagePrecedes(rhs / per_page, (lhs - 3 * per_page) / per_page)); + Assert(ctl->options.PagePrecedes(rhs / per_page, (lhs - 2 * per_page) / per_page)); + Assert(ctl->options.PagePrecedes(rhs / per_page, (lhs - 1 * per_page) / per_page) || (1U << 31) % per_page != 0); /* See CommitTsPagePrecedes() */ - Assert(ctl->PagePrecedes((lhs + 1 * per_page) / per_page, rhs / per_page) + Assert(ctl->options.PagePrecedes((lhs + 1 * per_page) / per_page, rhs / per_page) || (1U << 31) % per_page != 0); - Assert(ctl->PagePrecedes((lhs + 2 * per_page) / per_page, rhs / per_page)); - Assert(ctl->PagePrecedes((lhs + 3 * per_page) / per_page, rhs / per_page)); - Assert(!ctl->PagePrecedes(rhs / per_page, (lhs + per_page) / per_page)); + Assert(ctl->options.PagePrecedes((lhs + 2 * per_page) / per_page, rhs / per_page)); + Assert(ctl->options.PagePrecedes((lhs + 3 * per_page) / per_page, rhs / per_page)); + Assert(!ctl->options.PagePrecedes(rhs / per_page, (lhs + per_page) / per_page)); /* * GetNewTransactionId() has assigned the last XID it can safely use, and @@ -1727,7 +1747,7 @@ SlruPagePrecedesTestOffset(SlruCtl ctl, int per_page, uint32 offset) * do not apply to them.) */ void -SlruPagePrecedesUnitTests(SlruCtl ctl, int per_page) +SlruPagePrecedesUnitTests(SlruDesc *ctl, int per_page) { /* Test first, middle and last entries of a page. */ SlruPagePrecedesTestOffset(ctl, per_page, 0); @@ -1742,7 +1762,7 @@ SlruPagePrecedesUnitTests(SlruCtl ctl, int per_page) * one containing the page passed as "data". */ bool -SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int64 segpage, +SlruScanDirCbReportPresence(SlruDesc *ctl, char *filename, int64 segpage, void *data) { int64 cutoffPage = *(int64 *) data; @@ -1758,7 +1778,7 @@ SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int64 segpage, * This callback deletes segments prior to the one passed in as "data". */ static bool -SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int64 segpage, +SlruScanDirCbDeleteCutoff(SlruDesc *ctl, char *filename, int64 segpage, void *data) { int64 cutoffPage = *(int64 *) data; @@ -1774,7 +1794,7 @@ SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int64 segpage, * This callback deletes all segments. */ bool -SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int64 segpage, void *data) +SlruScanDirCbDeleteAll(SlruDesc *ctl, char *filename, int64 segpage, void *data) { SlruInternalDeleteSegment(ctl, segpage / SLRU_PAGES_PER_SEGMENT); @@ -1788,9 +1808,9 @@ SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int64 segpage, void *data) * SLRU segment. */ static inline bool -SlruCorrectSegmentFilenameLength(SlruCtl ctl, size_t len) +SlruCorrectSegmentFilenameLength(SlruDesc *ctl, size_t len) { - if (ctl->long_segment_names) + if (ctl->options.long_segment_names) return (len == 15); /* see SlruFileName() */ else @@ -1821,7 +1841,7 @@ SlruCorrectSegmentFilenameLength(SlruCtl ctl, size_t len) * Note that no locking is applied. */ bool -SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data) +SlruScanDirectory(SlruDesc *ctl, SlruScanCallback callback, void *data) { bool retval = false; DIR *cldir; @@ -1829,8 +1849,8 @@ SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data) int64 segno; int64 segpage; - cldir = AllocateDir(ctl->Dir); - while ((clde = ReadDir(cldir, ctl->Dir)) != NULL) + cldir = AllocateDir(ctl->options.Dir); + while ((clde = ReadDir(cldir, ctl->options.Dir)) != NULL) { size_t len; @@ -1843,7 +1863,7 @@ SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data) segpage = segno * SLRU_PAGES_PER_SEGMENT; elog(DEBUG2, "SlruScanDirectory invoking callback on %s/%s", - ctl->Dir, clde->d_name); + ctl->options.Dir, clde->d_name); retval = callback(ctl, clde->d_name, segpage, data); if (retval) break; @@ -1861,7 +1881,7 @@ SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data) * performs the fsync. */ int -SlruSyncFileTag(SlruCtl ctl, const FileTag *ftag, char *path) +SlruSyncFileTag(SlruDesc *ctl, const FileTag *ftag, char *path) { int fd; int save_errno; diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index c6ce71fc703..b79e648b899 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -33,6 +33,7 @@ #include "access/transam.h" #include "miscadmin.h" #include "pg_trace.h" +#include "storage/subsystems.h" #include "utils/guc_hooks.h" #include "utils/snapmgr.h" @@ -66,16 +67,22 @@ TransactionIdToPage(TransactionId xid) #define TransactionIdToEntry(xid) ((xid) % (TransactionId) SUBTRANS_XACTS_PER_PAGE) +static void SUBTRANSShmemRequest(void *arg); +static void SUBTRANSShmemInit(void *arg); +static bool SubTransPagePrecedes(int64 page1, int64 page2); +static int subtrans_errdetail_for_io_error(const void *opaque_data); + +const ShmemCallbacks SUBTRANSShmemCallbacks = { + .request_fn = SUBTRANSShmemRequest, + .init_fn = SUBTRANSShmemInit, +}; + /* * Link to shared-memory data structures for SUBTRANS control */ -static SlruCtlData SubTransCtlData; - -#define SubTransCtl (&SubTransCtlData) +static SlruDesc SubTransSlruDesc; - -static bool SubTransPagePrecedes(int64 page1, int64 page2); -static int subtrans_errdetail_for_io_error(const void *opaque_data); +#define SubTransCtl (&SubTransSlruDesc) /* @@ -207,17 +214,13 @@ SUBTRANSShmemBuffers(void) return Min(Max(16, subtransaction_buffers), SLRU_MAX_ALLOWED_BUFFERS); } + + /* - * Initialization of shared memory for SUBTRANS + * Register shared memory for SUBTRANS */ -Size -SUBTRANSShmemSize(void) -{ - return SimpleLruShmemSize(SUBTRANSShmemBuffers(), 0); -} - -void -SUBTRANSShmemInit(void) +static void +SUBTRANSShmemRequest(void *arg) { /* If auto-tuning is requested, now is the time to do it */ if (subtransaction_buffers == 0) @@ -240,11 +243,25 @@ SUBTRANSShmemInit(void) } Assert(subtransaction_buffers != 0); - SubTransCtl->PagePrecedes = SubTransPagePrecedes; - SubTransCtl->errdetail_for_io_error = subtrans_errdetail_for_io_error; - SimpleLruInit(SubTransCtl, "subtransaction", SUBTRANSShmemBuffers(), 0, - "pg_subtrans", LWTRANCHE_SUBTRANS_BUFFER, - LWTRANCHE_SUBTRANS_SLRU, SYNC_HANDLER_NONE, false); + SimpleLruRequest(.desc = &SubTransSlruDesc, + .name = "subtransaction", + .Dir = "pg_subtrans", + .long_segment_names = false, + + .nslots = SUBTRANSShmemBuffers(), + + .sync_handler = SYNC_HANDLER_NONE, + .PagePrecedes = SubTransPagePrecedes, + .errdetail_for_io_error = subtrans_errdetail_for_io_error, + + .buffer_tranche_id = LWTRANCHE_SUBTRANS_BUFFER, + .bank_tranche_id = LWTRANCHE_SUBTRANS_SLRU, + ); +} + +static void +SUBTRANSShmemInit(void *arg) +{ SlruPagePrecedesUnitTests(SubTransCtl, SUBTRANS_XACTS_PER_PAGE); } diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index e91a62ff42a..db6a9a6561b 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -179,6 +179,7 @@ #include "storage/latch.h" #include "storage/lmgr.h" #include "storage/procsignal.h" +#include "storage/subsystems.h" #include "tcop/tcopprot.h" #include "utils/builtins.h" #include "utils/dsa.h" @@ -345,6 +346,15 @@ typedef struct AsyncQueueControl static AsyncQueueControl *asyncQueueControl; +static void AsyncShmemRequest(void *arg); +static void AsyncShmemInit(void *arg); + +const ShmemCallbacks AsyncShmemCallbacks = { + .request_fn = AsyncShmemRequest, + .init_fn = AsyncShmemInit, +}; + + #define QUEUE_HEAD (asyncQueueControl->head) #define QUEUE_TAIL (asyncQueueControl->tail) #define QUEUE_STOP_PAGE (asyncQueueControl->stopPage) @@ -359,9 +369,13 @@ static AsyncQueueControl *asyncQueueControl; /* * The SLRU buffer area through which we access the notification queue */ -static SlruCtlData NotifyCtlData; +static inline bool asyncQueuePagePrecedes(int64 p, int64 q); +static int asyncQueueErrdetailForIoError(const void *opaque_data); + +static SlruDesc NotifySlruDesc; -#define NotifyCtl (&NotifyCtlData) + +#define NotifyCtl (&NotifySlruDesc) #define QUEUE_PAGESIZE BLCKSZ #define QUEUE_FULL_WARN_INTERVAL 5000 /* warn at most once every 5s */ @@ -570,9 +584,7 @@ bool Trace_notify = false; int max_notify_queue_pages = 1048576; /* local function prototypes */ -static int asyncQueueErrdetailForIoError(const void *opaque_data); static inline int64 asyncQueuePageDiff(int64 p, int64 q); -static inline bool asyncQueuePagePrecedes(int64 p, int64 q); static inline void GlobalChannelKeyInit(GlobalChannelKey *key, Oid dboid, const char *channel); static dshash_hash globalChannelTableHash(const void *key, size_t size, @@ -780,78 +792,63 @@ initPendingListenActions(void) } /* - * Report space needed for our shared memory area + * Register our shared memory needs */ -Size -AsyncShmemSize(void) +static void +AsyncShmemRequest(void *arg) { Size size; - /* This had better match AsyncShmemInit */ size = mul_size(MaxBackends, sizeof(QueueBackendStatus)); size = add_size(size, offsetof(AsyncQueueControl, backend)); - size = add_size(size, SimpleLruShmemSize(notify_buffers, 0)); + ShmemRequestStruct(.name = "Async Queue Control", + .size = size, + .ptr = (void **) &asyncQueueControl, + ); - return size; -} + SimpleLruRequest(.desc = &NotifySlruDesc, + .name = "notify", + .Dir = "pg_notify", -/* - * Initialize our shared memory area - */ -void -AsyncShmemInit(void) -{ - bool found; - Size size; + /* long segment names are used in order to avoid wraparound */ + .long_segment_names = true, - /* - * Create or attach to the AsyncQueueControl structure. - */ - size = mul_size(MaxBackends, sizeof(QueueBackendStatus)); - size = add_size(size, offsetof(AsyncQueueControl, backend)); + .nslots = notify_buffers, - asyncQueueControl = (AsyncQueueControl *) - ShmemInitStruct("Async Queue Control", size, &found); + .sync_handler = SYNC_HANDLER_NONE, + .PagePrecedes = asyncQueuePagePrecedes, + .errdetail_for_io_error = asyncQueueErrdetailForIoError, - if (!found) + .buffer_tranche_id = LWTRANCHE_NOTIFY_BUFFER, + .bank_tranche_id = LWTRANCHE_NOTIFY_SLRU, + ); +} + +static void +AsyncShmemInit(void *arg) +{ + SET_QUEUE_POS(QUEUE_HEAD, 0, 0); + SET_QUEUE_POS(QUEUE_TAIL, 0, 0); + QUEUE_STOP_PAGE = 0; + QUEUE_FIRST_LISTENER = INVALID_PROC_NUMBER; + asyncQueueControl->lastQueueFillWarn = 0; + asyncQueueControl->globalChannelTableDSA = DSA_HANDLE_INVALID; + asyncQueueControl->globalChannelTableDSH = DSHASH_HANDLE_INVALID; + for (int i = 0; i < MaxBackends; i++) { - /* First time through, so initialize it */ - SET_QUEUE_POS(QUEUE_HEAD, 0, 0); - SET_QUEUE_POS(QUEUE_TAIL, 0, 0); - QUEUE_STOP_PAGE = 0; - QUEUE_FIRST_LISTENER = INVALID_PROC_NUMBER; - asyncQueueControl->lastQueueFillWarn = 0; - asyncQueueControl->globalChannelTableDSA = DSA_HANDLE_INVALID; - asyncQueueControl->globalChannelTableDSH = DSHASH_HANDLE_INVALID; - for (int i = 0; i < MaxBackends; i++) - { - QUEUE_BACKEND_PID(i) = InvalidPid; - QUEUE_BACKEND_DBOID(i) = InvalidOid; - QUEUE_NEXT_LISTENER(i) = INVALID_PROC_NUMBER; - SET_QUEUE_POS(QUEUE_BACKEND_POS(i), 0, 0); - QUEUE_BACKEND_WAKEUP_PENDING(i) = false; - QUEUE_BACKEND_IS_ADVANCING(i) = false; - } + QUEUE_BACKEND_PID(i) = InvalidPid; + QUEUE_BACKEND_DBOID(i) = InvalidOid; + QUEUE_NEXT_LISTENER(i) = INVALID_PROC_NUMBER; + SET_QUEUE_POS(QUEUE_BACKEND_POS(i), 0, 0); + QUEUE_BACKEND_WAKEUP_PENDING(i) = false; + QUEUE_BACKEND_IS_ADVANCING(i) = false; } /* - * Set up SLRU management of the pg_notify data. Note that long segment - * names are used in order to avoid wraparound. + * During start or reboot, clean out the pg_notify directory. */ - NotifyCtl->PagePrecedes = asyncQueuePagePrecedes; - NotifyCtl->errdetail_for_io_error = asyncQueueErrdetailForIoError; - SimpleLruInit(NotifyCtl, "notify", notify_buffers, 0, - "pg_notify", LWTRANCHE_NOTIFY_BUFFER, LWTRANCHE_NOTIFY_SLRU, - SYNC_HANDLER_NONE, true); - - if (!found) - { - /* - * During start or reboot, clean out the pg_notify directory. - */ - (void) SlruScanDirectory(NotifyCtl, SlruScanDirCbDeleteAll, NULL); - } + (void) SlruScanDirectory(NotifyCtl, SlruScanDirCbDeleteAll, NULL); } diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c index 4f707158303..7a8c69de802 100644 --- a/src/backend/storage/ipc/ipci.c +++ b/src/backend/storage/ipc/ipci.c @@ -101,16 +101,11 @@ CalculateShmemSize(void) /* legacy subsystems */ size = add_size(size, BufferManagerShmemSize()); size = add_size(size, LockManagerShmemSize()); - size = add_size(size, PredicateLockShmemSize()); size = add_size(size, XLogPrefetchShmemSize()); size = add_size(size, XLOGShmemSize()); size = add_size(size, XLogRecoveryShmemSize()); - size = add_size(size, CLOGShmemSize()); - size = add_size(size, CommitTsShmemSize()); - size = add_size(size, SUBTRANSShmemSize()); size = add_size(size, TwoPhaseShmemSize()); size = add_size(size, BackgroundWorkerShmemSize()); - size = add_size(size, MultiXactShmemSize()); size = add_size(size, BackendStatusShmemSize()); size = add_size(size, CheckpointerShmemSize()); size = add_size(size, AutoVacuumShmemSize()); @@ -123,7 +118,6 @@ CalculateShmemSize(void) size = add_size(size, ApplyLauncherShmemSize()); size = add_size(size, BTreeShmemSize()); size = add_size(size, SyncScanShmemSize()); - size = add_size(size, AsyncShmemSize()); size = add_size(size, StatsShmemSize()); size = add_size(size, WaitEventCustomShmemSize()); size = add_size(size, InjectionPointShmemSize()); @@ -270,10 +264,6 @@ CreateOrAttachShmemStructs(void) XLOGShmemInit(); XLogPrefetchShmemInit(); XLogRecoveryShmemInit(); - CLOGShmemInit(); - CommitTsShmemInit(); - SUBTRANSShmemInit(); - MultiXactShmemInit(); BufferManagerShmemInit(); /* @@ -281,11 +271,6 @@ CreateOrAttachShmemStructs(void) */ LockManagerShmemInit(); - /* - * Set up predicate lock manager - */ - PredicateLockShmemInit(); - /* * Set up process table */ @@ -313,7 +298,6 @@ CreateOrAttachShmemStructs(void) */ BTreeShmemInit(); SyncScanShmemInit(); - AsyncShmemInit(); StatsShmemInit(); WaitEventCustomShmemInit(); InjectionPointShmemInit(); diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index 51d974523d4..e60dfb4272c 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -130,6 +130,7 @@ #include +#include "access/slru.h" #include "common/int.h" #include "fmgr.h" #include "funcapi.h" @@ -303,8 +304,6 @@ Datum pg_numa_available(PG_FUNCTION_ARGS); * cases, we ensure that all the shmem areas are registered the same way in * all processes. * - * 'desc' is a backend-private handle for the shared memory area. - * * 'options' defines the name and size of the area, and any other optional * features. Leave unused options as zeros. The options are copied to * longer-lived memory, so it doesn't need to live after the @@ -546,6 +545,9 @@ InitShmemIndexEntry(ShmemRequest *request) case SHMEM_KIND_HASH: shmem_hash_init(structPtr, request->options); break; + case SHMEM_KIND_SLRU: + shmem_slru_init(structPtr, request->options); + break; } } @@ -599,6 +601,9 @@ AttachShmemIndexEntry(ShmemRequest *request, bool missing_ok) case SHMEM_KIND_HASH: shmem_hash_attach(index_entry->location, request->options); break; + case SHMEM_KIND_SLRU: + shmem_slru_attach(index_entry->location, request->options); + break; } return true; diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index b509fbb2759..899a4ef06e4 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -152,10 +152,6 @@ /* * INTERFACE ROUTINES * - * housekeeping for setting up shared memory predicate lock structures - * PredicateLockShmemInit(void) - * PredicateLockShmemSize(void) - * * predicate lock reporting * GetPredicateLockStatusData(void) * PageIsPredicateLocked(Relation relation, BlockNumber blkno) @@ -211,6 +207,8 @@ #include "storage/predicate_internals.h" #include "storage/proc.h" #include "storage/procarray.h" +#include "storage/shmem.h" +#include "storage/subsystems.h" #include "utils/guc_hooks.h" #include "utils/rel.h" #include "utils/snapmgr.h" @@ -322,9 +320,12 @@ /* * The SLRU buffer area through which we access the old xids. */ -static SlruCtlData SerialSlruCtlData; +static bool SerialPagePrecedesLogically(int64 page1, int64 page2); +static int serial_errdetail_for_io_error(const void *opaque_data); -#define SerialSlruCtl (&SerialSlruCtlData) +static SlruDesc SerialSlruDesc; + +#define SerialSlruCtl (&SerialSlruDesc) #define SERIAL_PAGESIZE BLCKSZ #define SERIAL_ENTRYSIZE sizeof(SerCommitSeqNo) @@ -384,6 +385,17 @@ int max_predicate_locks_per_page; /* in guc_tables.c */ */ static PredXactList PredXact; +static void PredicateLockShmemRequest(void *arg); +static void PredicateLockShmemInit(void *arg); +static void PredicateLockShmemAttach(void *arg); + +const ShmemCallbacks PredicateLockShmemCallbacks = { + .request_fn = PredicateLockShmemRequest, + .init_fn = PredicateLockShmemInit, + .attach_fn = PredicateLockShmemAttach, +}; + + /* * This provides a pool of RWConflict data elements to use in conflict lists * between transactions. @@ -431,6 +443,8 @@ static bool MyXactDidWrite = false; */ static SERIALIZABLEXACT *SavedSerializableXact = InvalidSerializableXact; +static int64 max_serializable_xacts; + /* local functions */ static SERIALIZABLEXACT *CreatePredXact(void); @@ -442,13 +456,12 @@ static void SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact, SERIALIZABLEXACT static void ReleaseRWConflict(RWConflict conflict); static void FlagSxactUnsafe(SERIALIZABLEXACT *sxact); -static bool SerialPagePrecedesLogically(int64 page1, int64 page2); -static int serial_errdetail_for_io_error(const void *opaque_data); static void SerialAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo); static SerCommitSeqNo SerialGetMinConflictCommitSeqNo(TransactionId xid); static void SerialSetActiveSerXmin(TransactionId xid); static uint32 predicatelock_hash(const void *key, Size keysize); + static void SummarizeOldestCommittedSxact(void); static Snapshot GetSafeSnapshot(Snapshot origSnapshot); static Snapshot GetSerializableTransactionSnapshotInt(Snapshot snapshot, @@ -1100,71 +1113,47 @@ CheckPointPredicate(void) /*------------------------------------------------------------------------*/ /* - * PredicateLockShmemInit -- Initialize the predicate locking data structures. - * - * This is called from CreateSharedMemoryAndSemaphores(), which see for - * more comments. In the normal postmaster case, the shared hash tables - * are created here. Backends inherit the pointers - * to the shared tables via fork(). In the EXEC_BACKEND case, each - * backend re-executes this code to obtain pointers to the already existing - * shared hash tables. + * PredicateLockShmemRequest -- Register the predicate locking data structures. */ -void -PredicateLockShmemInit(void) +static void +PredicateLockShmemRequest(void *arg) { - HASHCTL info; int64 max_predicate_lock_targets; int64 max_predicate_locks; - int64 max_serializable_xacts; int64 max_rw_conflicts; - Size requestSize; - bool found; - -#ifndef EXEC_BACKEND - Assert(!IsUnderPostmaster); -#endif - - /* - * Compute size of predicate lock target hashtable. Note these - * calculations must agree with PredicateLockShmemSize! - */ - max_predicate_lock_targets = NPREDICATELOCKTARGETENTS(); /* - * Allocate hash table for PREDICATELOCKTARGET structs. This stores + * Register hash table for PREDICATELOCKTARGET structs. This stores * per-predicate-lock-target information. */ - info.keysize = sizeof(PREDICATELOCKTARGETTAG); - info.entrysize = sizeof(PREDICATELOCKTARGET); - info.num_partitions = NUM_PREDICATELOCK_PARTITIONS; - - PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash", - max_predicate_lock_targets, - &info, - HASH_ELEM | HASH_BLOBS | - HASH_PARTITION | HASH_FIXED_SIZE); + max_predicate_lock_targets = NPREDICATELOCKTARGETENTS(); - /* Pre-calculate the hash and partition lock of the scratch entry */ - ScratchTargetTagHash = PredicateLockTargetTagHashCode(&ScratchTargetTag); - ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash); + ShmemRequestHash(.name = "PREDICATELOCKTARGET hash", + .nelems = max_predicate_lock_targets, + .ptr = &PredicateLockTargetHash, + .hash_info.keysize = sizeof(PREDICATELOCKTARGETTAG), + .hash_info.entrysize = sizeof(PREDICATELOCKTARGET), + .hash_info.num_partitions = NUM_PREDICATELOCK_PARTITIONS, + .hash_flags = HASH_ELEM | HASH_BLOBS | HASH_PARTITION | HASH_FIXED_SIZE, + ); /* * Allocate hash table for PREDICATELOCK structs. This stores per * xact-lock-of-a-target information. + * + * Assume an average of 2 xacts per target. */ - info.keysize = sizeof(PREDICATELOCKTAG); - info.entrysize = sizeof(PREDICATELOCK); - info.hash = predicatelock_hash; - info.num_partitions = NUM_PREDICATELOCK_PARTITIONS; - - /* Assume an average of 2 xacts per target */ max_predicate_locks = max_predicate_lock_targets * 2; - PredicateLockHash = ShmemInitHash("PREDICATELOCK hash", - max_predicate_locks, - &info, - HASH_ELEM | HASH_FUNCTION | - HASH_PARTITION | HASH_FIXED_SIZE); + ShmemRequestHash(.name = "PREDICATELOCK hash", + .nelems = max_predicate_locks, + .ptr = &PredicateLockHash, + .hash_info.keysize = sizeof(PREDICATELOCKTAG), + .hash_info.entrysize = sizeof(PREDICATELOCK), + .hash_info.hash = predicatelock_hash, + .hash_info.num_partitions = NUM_PREDICATELOCK_PARTITIONS, + .hash_flags = HASH_ELEM | HASH_FUNCTION | HASH_PARTITION | HASH_FIXED_SIZE, + ); /* * Compute size for serializable transaction hashtable. Note these @@ -1177,29 +1166,27 @@ PredicateLockShmemInit(void) max_serializable_xacts = (MaxBackends + max_prepared_xacts) * 10; /* - * Allocate a list to hold information on transactions participating in + * Register a list to hold information on transactions participating in * predicate locking. */ - requestSize = add_size(PredXactListDataSize, - (mul_size((Size) max_serializable_xacts, - sizeof(SERIALIZABLEXACT)))); - PredXact = ShmemInitStruct("PredXactList", - requestSize, - &found); - Assert(found == IsUnderPostmaster); + ShmemRequestStruct(.name = "PredXactList", + .size = add_size(PredXactListDataSize, + (mul_size((Size) max_serializable_xacts, + sizeof(SERIALIZABLEXACT)))), + .ptr = (void **) &PredXact, + ); /* - * Allocate hash table for SERIALIZABLEXID structs. This stores per-xid + * Register hash table for SERIALIZABLEXID structs. This stores per-xid * information for serializable transactions which have accessed data. */ - info.keysize = sizeof(SERIALIZABLEXIDTAG); - info.entrysize = sizeof(SERIALIZABLEXID); - - SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash", - max_serializable_xacts, - &info, - HASH_ELEM | HASH_BLOBS | - HASH_FIXED_SIZE); + ShmemRequestHash(.name = "SERIALIZABLEXID hash", + .nelems = max_serializable_xacts, + .ptr = &SerializableXidHash, + .hash_info.keysize = sizeof(SERIALIZABLEXIDTAG), + .hash_info.entrysize = sizeof(SERIALIZABLEXID), + .hash_flags = HASH_ELEM | HASH_BLOBS | HASH_FIXED_SIZE, + ); /* * Allocate space for tracking rw-conflicts in lists attached to the @@ -1214,58 +1201,50 @@ PredicateLockShmemInit(void) */ max_rw_conflicts = max_serializable_xacts * 5; - requestSize = RWConflictPoolHeaderDataSize + - mul_size((Size) max_rw_conflicts, - RWConflictDataSize); + ShmemRequestStruct(.name = "RWConflictPool", + .size = RWConflictPoolHeaderDataSize + mul_size((Size) max_rw_conflicts, + RWConflictDataSize), + .ptr = (void **) &RWConflictPool, + ); - RWConflictPool = ShmemInitStruct("RWConflictPool", - requestSize, - &found); - Assert(found == IsUnderPostmaster); - - /* - * Create or attach to the header for the list of finished serializable - * transactions. - */ - FinishedSerializableTransactions = (dlist_head *) - ShmemInitStruct("FinishedSerializableTransactions", - sizeof(dlist_head), - &found); - Assert(found == IsUnderPostmaster); + ShmemRequestStruct(.name = "FinishedSerializableTransactions", + .size = sizeof(dlist_head), + .ptr = (void **) &FinishedSerializableTransactions, + ); /* * Initialize the SLRU storage for old committed serializable * transactions. */ - SerialSlruCtl->PagePrecedes = SerialPagePrecedesLogically; - SerialSlruCtl->errdetail_for_io_error = serial_errdetail_for_io_error; - SimpleLruInit(SerialSlruCtl, "serializable", - serializable_buffers, 0, "pg_serial", - LWTRANCHE_SERIAL_BUFFER, LWTRANCHE_SERIAL_SLRU, - SYNC_HANDLER_NONE, false); + SimpleLruRequest(.desc = &SerialSlruDesc, + .name = "serializable", + .Dir = "pg_serial", + .long_segment_names = false, + + .nslots = serializable_buffers, + + .sync_handler = SYNC_HANDLER_NONE, + .PagePrecedes = SerialPagePrecedesLogically, + .errdetail_for_io_error = serial_errdetail_for_io_error, + + .buffer_tranche_id = LWTRANCHE_SERIAL_BUFFER, + .bank_tranche_id = LWTRANCHE_SERIAL_SLRU, + ); #ifdef USE_ASSERT_CHECKING SerialPagePrecedesLogicallyUnitTests(); #endif - SlruPagePrecedesUnitTests(SerialSlruCtl, SERIAL_ENTRIESPERPAGE); - /* - * Create or attach to the SerialControl structure. - */ - serialControl = (SerialControl) - ShmemInitStruct("SerialControlData", sizeof(SerialControlData), &found); - Assert(found == IsUnderPostmaster); + ShmemRequestStruct(.name = "SerialControlData", + .size = sizeof(SerialControlData), + .ptr = (void **) &serialControl, + ); +} - /* - * If we just attached to existing shared memory (EXEC_BACKEND), we're all - * done. Otherwise, during postmaster startup, proceed to initialize all - * the shared memory areas that we allocated. - */ - if (IsUnderPostmaster) - { - /* This never changes, so let's keep a local copy. */ - OldCommittedSxact = PredXact->OldCommittedSxact; - return; - } +static void +PredicateLockShmemInit(void *arg) +{ + int max_rw_conflicts; + bool found; /* * Reserve a dummy entry in the hash table; we use it to make sure there's @@ -1277,7 +1256,6 @@ PredicateLockShmemInit(void) HASH_ENTER, &found); Assert(!found); - /* Initialize PredXact list */ dlist_init(&PredXact->availableList); dlist_init(&PredXact->activeList); PredXact->SxactGlobalXmin = InvalidTransactionId; @@ -1312,13 +1290,13 @@ PredicateLockShmemInit(void) PredXact->OldCommittedSxact->pid = 0; PredXact->OldCommittedSxact->pgprocno = INVALID_PROC_NUMBER; - /* This never changes, so let's keep a local copy. */ - OldCommittedSxact = PredXact->OldCommittedSxact; - /* Initialize the rw-conflict pool */ dlist_init(&RWConflictPool->availableList); RWConflictPool->element = (RWConflict) ((char *) RWConflictPool + RWConflictPoolHeaderDataSize); + + max_rw_conflicts = max_serializable_xacts * 5; + /* Add all elements to available list, clean. */ for (int i = 0; i < max_rw_conflicts; i++) { @@ -1335,57 +1313,28 @@ PredicateLockShmemInit(void) serialControl->headXid = InvalidTransactionId; serialControl->tailXid = InvalidTransactionId; LWLockRelease(SerialControlLock); -} - -/* - * Estimate shared-memory space used for predicate lock table - */ -Size -PredicateLockShmemSize(void) -{ - Size size = 0; - int64 max_predicate_lock_targets; - int64 max_predicate_locks; - int64 max_serializable_xacts; - int64 max_rw_conflicts; - - /* predicate lock target hash table */ - max_predicate_lock_targets = NPREDICATELOCKTARGETENTS(); - size = add_size(size, hash_estimate_size(max_predicate_lock_targets, - sizeof(PREDICATELOCKTARGET))); - - /* predicate lock hash table */ - max_predicate_locks = max_predicate_lock_targets * 2; - size = add_size(size, hash_estimate_size(max_predicate_locks, - sizeof(PREDICATELOCK))); - /* transaction list */ - max_serializable_xacts = (MaxBackends + max_prepared_xacts) * 10; - size = add_size(size, PredXactListDataSize); - size = add_size(size, mul_size((Size) max_serializable_xacts, - sizeof(SERIALIZABLEXACT))); - - /* transaction xid table */ - size = add_size(size, hash_estimate_size(max_serializable_xacts, - sizeof(SERIALIZABLEXID))); + SlruPagePrecedesUnitTests(SerialSlruCtl, SERIAL_ENTRIESPERPAGE); - /* rw-conflict pool */ - max_rw_conflicts = max_serializable_xacts * 5; - size = add_size(size, RWConflictPoolHeaderDataSize); - size = add_size(size, mul_size((Size) max_rw_conflicts, - RWConflictDataSize)); + /* This never changes, so let's keep a local copy. */ + OldCommittedSxact = PredXact->OldCommittedSxact; - /* Head for list of finished serializable transactions. */ - size = add_size(size, sizeof(dlist_head)); + /* Pre-calculate the hash and partition lock of the scratch entry */ + ScratchTargetTagHash = PredicateLockTargetTagHashCode(&ScratchTargetTag); + ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash); +} - /* Shared memory structures for SLRU tracking of old committed xids. */ - size = add_size(size, sizeof(SerialControlData)); - size = add_size(size, SimpleLruShmemSize(serializable_buffers, 0)); +static void +PredicateLockShmemAttach(void *arg) +{ + /* This never changes, so let's keep a local copy. */ + OldCommittedSxact = PredXact->OldCommittedSxact; - return size; + /* Pre-calculate the hash and partition lock of the scratch entry */ + ScratchTargetTagHash = PredicateLockTargetTagHashCode(&ScratchTargetTag); + ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash); } - /* * Compute the hash code associated with a PREDICATELOCKTAG. * diff --git a/src/backend/utils/activity/pgstat_slru.c b/src/backend/utils/activity/pgstat_slru.c index 2190f388eae..f4dfe8697d7 100644 --- a/src/backend/utils/activity/pgstat_slru.c +++ b/src/backend/utils/activity/pgstat_slru.c @@ -119,6 +119,7 @@ pgstat_get_slru_index(const char *name) { int i; + Assert(name); for (i = 0; i < SLRU_NUM_ELEMENTS; i++) { if (strcmp(slru_names[i], name) == 0) diff --git a/src/include/access/clog.h b/src/include/access/clog.h index a1cfed5f43c..7894998c763 100644 --- a/src/include/access/clog.h +++ b/src/include/access/clog.h @@ -40,8 +40,6 @@ extern void TransactionIdSetTreeStatus(TransactionId xid, int nsubxids, TransactionId *subxids, XidStatus status, XLogRecPtr lsn); extern XidStatus TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn); -extern Size CLOGShmemSize(void); -extern void CLOGShmemInit(void); extern void BootStrapCLOG(void); extern void StartupCLOG(void); extern void TrimCLOG(void); diff --git a/src/include/access/commit_ts.h b/src/include/access/commit_ts.h index 49ee21cd5d2..825ccda90ed 100644 --- a/src/include/access/commit_ts.h +++ b/src/include/access/commit_ts.h @@ -27,8 +27,6 @@ extern bool TransactionIdGetCommitTsData(TransactionId xid, extern TransactionId GetLatestCommitTsData(TimestampTz *ts, ReplOriginId *nodeid); -extern Size CommitTsShmemSize(void); -extern void CommitTsShmemInit(void); extern void BootStrapCommitTs(void); extern void StartupCommitTs(void); extern void CommitTsParameterChange(bool newvalue, bool oldvalue); diff --git a/src/include/access/multixact.h b/src/include/access/multixact.h index 2ae8b571dcc..6be5299ab68 100644 --- a/src/include/access/multixact.h +++ b/src/include/access/multixact.h @@ -121,8 +121,6 @@ extern void AtEOXact_MultiXact(void); extern void AtPrepare_MultiXact(void); extern void PostPrepare_MultiXact(FullTransactionId fxid); -extern Size MultiXactShmemSize(void); -extern void MultiXactShmemInit(void); extern void BootStrapMultiXact(void); extern void StartupMultiXact(void); extern void TrimMultiXact(void); diff --git a/src/include/access/slru.h b/src/include/access/slru.h index f966d0d9fe7..b4adb1789c7 100644 --- a/src/include/access/slru.h +++ b/src/include/access/slru.h @@ -16,6 +16,7 @@ #include "access/transam.h" #include "access/xlogdefs.h" #include "storage/lwlock.h" +#include "storage/shmem.h" #include "storage/sync.h" /* @@ -106,23 +107,32 @@ typedef struct SlruSharedData typedef SlruSharedData *SlruShared; +typedef struct SlruDesc SlruDesc; + /* - * SlruCtlData is an unshared structure that points to the active information - * in shared memory. + * Options for SimpleLruRequest() */ -typedef struct SlruCtlData +typedef struct SlruOpts { - SlruShared shared; + /* Options for allocating the underlying shmem area; do not touch directly */ + ShmemStructOpts base; - /* Number of banks in this SLRU. */ - uint16 nbanks; + /* + * name of SLRU. (This is user-visible, pick with care!) + */ + const char *name; /* - * If true, use long segment file names. Otherwise, use short file names. - * - * For details about the file name format, see SlruFileName(). + * Pointer to a backend-private handle for the SLRU. It is initialized + * when the SLRU is initialized or attached to. */ - bool long_segment_names; + SlruDesc *desc; + + /* number of page slots to use. */ + int nslots; + + /* number of LSN groups per page (set to zero if not relevant). */ + int nlsns; /* * Which sync handler function to use when handing sync requests over to @@ -130,6 +140,19 @@ typedef struct SlruCtlData */ SyncRequestHandler sync_handler; + /* + * PGDATA-relative subdirectory that will contain the files. + */ + const char *Dir; + + /* + * If true, use long segment file names. Otherwise, use short file names. + * + * For details about the file name format, see SlruFileName(). + */ + bool long_segment_names; + + /* * Decide whether a page is "older" for truncation and as a hint for * evicting pages in LRU order. Return true if every entry of the first @@ -153,63 +176,80 @@ typedef struct SlruCtlData int (*errdetail_for_io_error) (const void *opaque_data); /* - * Dir is set during SimpleLruInit and does not change thereafter. Since - * it's always the same, it doesn't need to be in shared memory. + * Tranche IDs to use for the SLRU's per-buffer and per-bank LWLocks. If + * these are left as zeros, new tranches will be assigned dynamically. */ - char Dir[64]; -} SlruCtlData; + int buffer_tranche_id; + int bank_tranche_id; +} SlruOpts; + +/* + * SlruDesc is an unshared structure that points to the active information + * in shared memory. + */ +typedef struct SlruDesc +{ + SlruOpts options; -typedef SlruCtlData *SlruCtl; + SlruShared shared; + + /* Number of banks in this SLRU. */ + uint16 nbanks; +} SlruDesc; /* - * Get the SLRU bank lock for given SlruCtl and the pageno. + * Get the SLRU bank lock for the given pageno. * * This lock needs to be acquired to access the slru buffer slots in the * respective bank. */ static inline LWLock * -SimpleLruGetBankLock(SlruCtl ctl, int64 pageno) +SimpleLruGetBankLock(SlruDesc *ctl, int64 pageno) { int bankno; + Assert(ctl->nbanks != 0); bankno = pageno % ctl->nbanks; return &(ctl->shared->bank_locks[bankno].lock); } -extern Size SimpleLruShmemSize(int nslots, int nlsns); +extern void SimpleLruRequestWithOpts(const SlruOpts *options); + +#define SimpleLruRequest(...) \ + SimpleLruRequestWithOpts(&(SlruOpts){__VA_ARGS__}) + extern int SimpleLruAutotuneBuffers(int divisor, int max); -extern void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, - const char *subdir, int buffer_tranche_id, - int bank_tranche_id, SyncRequestHandler sync_handler, - bool long_segment_names); -extern int SimpleLruZeroPage(SlruCtl ctl, int64 pageno); -extern void SimpleLruZeroAndWritePage(SlruCtl ctl, int64 pageno); -extern int SimpleLruReadPage(SlruCtl ctl, int64 pageno, bool write_ok, +extern int SimpleLruZeroPage(SlruDesc *ctl, int64 pageno); +extern void SimpleLruZeroAndWritePage(SlruDesc *ctl, int64 pageno); +extern int SimpleLruReadPage(SlruDesc *ctl, int64 pageno, bool write_ok, const void *opaque_data); -extern int SimpleLruReadPage_ReadOnly(SlruCtl ctl, int64 pageno, +extern int SimpleLruReadPage_ReadOnly(SlruDesc *ctl, int64 pageno, const void *opaque_data); -extern void SimpleLruWritePage(SlruCtl ctl, int slotno); -extern void SimpleLruWriteAll(SlruCtl ctl, bool allow_redirtied); +extern void SimpleLruWritePage(SlruDesc *ctl, int slotno); +extern void SimpleLruWriteAll(SlruDesc *ctl, bool allow_redirtied); #ifdef USE_ASSERT_CHECKING -extern void SlruPagePrecedesUnitTests(SlruCtl ctl, int per_page); +extern void SlruPagePrecedesUnitTests(SlruDesc *ctl, int per_page); #else #define SlruPagePrecedesUnitTests(ctl, per_page) do {} while (0) #endif -extern void SimpleLruTruncate(SlruCtl ctl, int64 cutoffPage); -extern bool SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int64 pageno); +extern void SimpleLruTruncate(SlruDesc *ctl, int64 cutoffPage); +extern bool SimpleLruDoesPhysicalPageExist(SlruDesc *ctl, int64 pageno); -typedef bool (*SlruScanCallback) (SlruCtl ctl, char *filename, int64 segpage, +typedef bool (*SlruScanCallback) (SlruDesc *ctl, char *filename, int64 segpage, void *data); -extern bool SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data); -extern void SlruDeleteSegment(SlruCtl ctl, int64 segno); +extern bool SlruScanDirectory(SlruDesc *ctl, SlruScanCallback callback, void *data); +extern void SlruDeleteSegment(SlruDesc *ctl, int64 segno); -extern int SlruSyncFileTag(SlruCtl ctl, const FileTag *ftag, char *path); +extern int SlruSyncFileTag(SlruDesc *ctl, const FileTag *ftag, char *path); /* SlruScanDirectory public callbacks */ -extern bool SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, +extern bool SlruScanDirCbReportPresence(SlruDesc *ctl, char *filename, int64 segpage, void *data); -extern bool SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int64 segpage, +extern bool SlruScanDirCbDeleteAll(SlruDesc *ctl, char *filename, int64 segpage, void *data); extern bool check_slru_buffers(const char *name, int *newval); +extern void shmem_slru_init(void *location, ShmemStructOpts *base_options); +extern void shmem_slru_attach(void *location, ShmemStructOpts *base_options); + #endif /* SLRU_H */ diff --git a/src/include/access/subtrans.h b/src/include/access/subtrans.h index 11b7355dbdf..d986cd9e802 100644 --- a/src/include/access/subtrans.h +++ b/src/include/access/subtrans.h @@ -15,8 +15,6 @@ extern void SubTransSetParent(TransactionId xid, TransactionId parent); extern TransactionId SubTransGetParent(TransactionId xid); extern TransactionId SubTransGetTopmostTransaction(TransactionId xid); -extern Size SUBTRANSShmemSize(void); -extern void SUBTRANSShmemInit(void); extern void BootStrapSUBTRANS(void); extern void StartupSUBTRANS(TransactionId oldestActiveXID); extern void CheckPointSUBTRANS(void); diff --git a/src/include/commands/async.h b/src/include/commands/async.h index 3baae7cb8dc..202e4aa5e74 100644 --- a/src/include/commands/async.h +++ b/src/include/commands/async.h @@ -19,9 +19,6 @@ extern PGDLLIMPORT bool Trace_notify; extern PGDLLIMPORT int max_notify_queue_pages; extern PGDLLIMPORT volatile sig_atomic_t notifyInterruptPending; -extern Size AsyncShmemSize(void); -extern void AsyncShmemInit(void); - extern void NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid); diff --git a/src/include/storage/predicate.h b/src/include/storage/predicate.h index a5ac55b8f7e..443bffb58fd 100644 --- a/src/include/storage/predicate.h +++ b/src/include/storage/predicate.h @@ -41,11 +41,6 @@ typedef void *SerializableXactHandle; /* * function prototypes */ - -/* housekeeping for shared memory predicate lock structures */ -extern void PredicateLockShmemInit(void); -extern Size PredicateLockShmemSize(void); - extern void CheckPointPredicate(void); /* predicate lock reporting */ diff --git a/src/include/storage/shmem_internal.h b/src/include/storage/shmem_internal.h index 9064b86b9a1..8746b614fa3 100644 --- a/src/include/storage/shmem_internal.h +++ b/src/include/storage/shmem_internal.h @@ -21,6 +21,7 @@ typedef enum { SHMEM_KIND_STRUCT = 0, /* plain, contiguous area of memory */ SHMEM_KIND_HASH, /* a hash table */ + SHMEM_KIND_SLRU, /* SLRU buffers and control structures */ } ShmemRequestKind; /* shmem.c */ diff --git a/src/include/storage/subsystemlist.h b/src/include/storage/subsystemlist.h index d62c29f1361..c199f18a27a 100644 --- a/src/include/storage/subsystemlist.h +++ b/src/include/storage/subsystemlist.h @@ -32,6 +32,13 @@ PG_SHMEM_SUBSYSTEM(DSMRegistryShmemCallbacks) /* xlog, clog, and buffers */ PG_SHMEM_SUBSYSTEM(VarsupShmemCallbacks) +PG_SHMEM_SUBSYSTEM(CLOGShmemCallbacks) +PG_SHMEM_SUBSYSTEM(CommitTsShmemCallbacks) +PG_SHMEM_SUBSYSTEM(SUBTRANSShmemCallbacks) +PG_SHMEM_SUBSYSTEM(MultiXactShmemCallbacks) + +/* predicate lock manager */ +PG_SHMEM_SUBSYSTEM(PredicateLockShmemCallbacks) /* process table */ PG_SHMEM_SUBSYSTEM(ProcGlobalShmemCallbacks) @@ -43,3 +50,6 @@ PG_SHMEM_SUBSYSTEM(SharedInvalShmemCallbacks) /* interprocess signaling mechanisms */ PG_SHMEM_SUBSYSTEM(PMSignalShmemCallbacks) PG_SHMEM_SUBSYSTEM(ProcSignalShmemCallbacks) + +/* other modules that need some shared memory space */ +PG_SHMEM_SUBSYSTEM(AsyncShmemCallbacks) diff --git a/src/test/modules/test_slru/test_slru.c b/src/test/modules/test_slru/test_slru.c index e4bd2af0bf5..40efffdbf62 100644 --- a/src/test/modules/test_slru/test_slru.c +++ b/src/test/modules/test_slru/test_slru.c @@ -40,14 +40,22 @@ PG_FUNCTION_INFO_V1(test_slru_delete_all); /* Number of SLRU page slots */ #define NUM_TEST_BUFFERS 16 -static SlruCtlData TestSlruCtlData; -#define TestSlruCtl (&TestSlruCtlData) +static void test_slru_shmem_request(void *arg); +static bool test_slru_page_precedes_logically(int64 page1, int64 page2); +static int test_slru_errdetail_for_io_error(const void *opaque_data); -static shmem_request_hook_type prev_shmem_request_hook = NULL; -static shmem_startup_hook_type prev_shmem_startup_hook = NULL; +static const char *TestSlruDir = "pg_test_slru"; + +static SlruDesc TestSlruDesc; + +static const ShmemCallbacks test_slru_shmem_callbacks = { + .request_fn = test_slru_shmem_request +}; + +#define TestSlruCtl (&TestSlruDesc) static bool -test_slru_scan_cb(SlruCtl ctl, char *filename, int64 segpage, void *data) +test_slru_scan_cb(SlruDesc *ctl, char *filename, int64 segpage, void *data) { elog(NOTICE, "Calling test_slru_scan_cb()"); return SlruScanDirCbDeleteAll(ctl, filename, segpage, data); @@ -190,20 +198,6 @@ test_slru_delete_all(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -/* - * Module load callbacks and initialization. - */ - -static void -test_slru_shmem_request(void) -{ - if (prev_shmem_request_hook) - prev_shmem_request_hook(); - - /* reserve shared memory for the test SLRU */ - RequestAddinShmemSpace(SimpleLruShmemSize(NUM_TEST_BUFFERS, 0)); -} - static bool test_slru_page_precedes_logically(int64 page1, int64 page2) { @@ -218,60 +212,46 @@ test_slru_errdetail_for_io_error(const void *opaque_data) return errdetail("Could not access test_slru entry %u.", xid); } -static void -test_slru_shmem_startup(void) +void +_PG_init(void) { - /* - * Short segments names are well tested elsewhere so in this test we are - * focusing on long names. - */ - const bool long_segment_names = true; - const char slru_dir_name[] = "pg_test_slru"; - int test_tranche_id = -1; - int test_buffer_tranche_id = -1; - - if (prev_shmem_startup_hook) - prev_shmem_startup_hook(); + if (!process_shared_preload_libraries_in_progress) + ereport(ERROR, + (errmsg("cannot load \"%s\" after startup", "test_slru"), + errdetail("\"%s\" must be loaded with \"shared_preload_libraries\".", + "test_slru"))); /* * Create the SLRU directory if it does not exist yet, from the root of * the data directory. */ - (void) MakePGDirectory(slru_dir_name); + (void) MakePGDirectory(TestSlruDir); - /* - * Initialize the SLRU facility. In EXEC_BACKEND builds, the - * shmem_startup_hook is called in the postmaster and in each backend, but - * we only need to generate the LWLock tranches once. Note that these - * tranche ID variables are not used by SimpleLruInit() when - * IsUnderPostmaster is true. - */ - if (!IsUnderPostmaster) - { - test_tranche_id = LWLockNewTrancheId("test_slru_tranche"); - test_buffer_tranche_id = LWLockNewTrancheId("test_buffer_tranche"); - } - - TestSlruCtl->PagePrecedes = test_slru_page_precedes_logically; - TestSlruCtl->errdetail_for_io_error = test_slru_errdetail_for_io_error; - SimpleLruInit(TestSlruCtl, "TestSLRU", - NUM_TEST_BUFFERS, 0, slru_dir_name, - test_buffer_tranche_id, test_tranche_id, SYNC_HANDLER_NONE, - long_segment_names); + RegisterShmemCallbacks(&test_slru_shmem_callbacks); } -void -_PG_init(void) +static void +test_slru_shmem_request(void *arg) { - if (!process_shared_preload_libraries_in_progress) - ereport(ERROR, - (errmsg("cannot load \"%s\" after startup", "test_slru"), - errdetail("\"%s\" must be loaded with \"shared_preload_libraries\".", - "test_slru"))); + SimpleLruRequest(.desc = &TestSlruDesc, + .name = "TestSLRU", + .Dir = TestSlruDir, + + /* + * Short segments names are well tested elsewhere so in this test we are + * focusing on long names. + */ + .long_segment_names = true, + + .nslots = NUM_TEST_BUFFERS, + .nlsns = 0, - prev_shmem_request_hook = shmem_request_hook; - shmem_request_hook = test_slru_shmem_request; + .sync_handler = SYNC_HANDLER_NONE, + .PagePrecedes = test_slru_page_precedes_logically, + .errdetail_for_io_error = test_slru_errdetail_for_io_error, - prev_shmem_startup_hook = shmem_startup_hook; - shmem_startup_hook = test_slru_shmem_startup; + /* let slru.c assign these */ + .buffer_tranche_id = 0, + .bank_tranche_id = 0, + ); } diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 298d6ac1639..35acda59851 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -2902,9 +2902,9 @@ SlotInvalidationCauseMap SlotNumber SlotSyncCtxStruct SlotSyncSkipReason -SlruCtl -SlruCtlData +SlruDesc SlruErrorCause +SlruOpts SlruPageStatus SlruScanCallback SlruSegState