NODE *tmp;
int32 pos = 0;
-#ifdef BS_DEBUG
- char pbuf[16384],
- *cur;
-#endif
-
/* init state */
state.buf = buf;
state.state = WAITOPERAND;
</para>
<para>
This option is effective only when
- <literal>retain_conflict_info</literal> is enabled and the apply
+ <literal>retain_dead_tuples</literal> is enabled and the apply
worker associated with the subscription is active.
</para>
<warning>
uint16 *result_infomask2);
static TM_Result heap_lock_updated_tuple(Relation rel,
uint16 prior_infomask,
- TransactionId prior_rawxmax,
+ TransactionId prior_raw_xmax,
const ItemPointerData *prior_ctid,
TransactionId xid,
LockTupleMode mode);
* unlogged and maintained heuristically, it often becomes stale on
* standbys. If such a standby is later promoted and runs VACUUM, it will
* skip recalculating free space for pages that were marked
- * all-visible/all-forzen. FreeSpaceMapVacuum() can then propagate overly
+ * all-visible/all-frozen. FreeSpaceMapVacuum() can then propagate overly
* optimistic free space values upward, causing future insertions to
* select pages that turn out to be unusable. In bulk, this can lead to
* long stalls.
appendStringInfo(buf, " prevTail: %u",
xlrec->prevTail);
if (xlrec->newRightlink != InvalidBlockNumber)
- appendStringInfo(buf, " newRightLink: %u",
+ appendStringInfo(buf, " newRightlink: %u",
xlrec->newRightlink);
}
break;
* retreat in the calculated xmin, necessitating additional handling.
*
* XXX To address the above race conditions, we can define
- * oldest_nonremovable_xid as FullTransactionID and adds the check to
+ * oldest_nonremovable_xid as FullTransactionId and adds the check to
* disallow retreating the conflict slot's xmin. For now, we kept the
* implementation simple by disallowing change to the retain_dead_tuples,
* but in the future we can change this after some more analysis.
JUMBLE_FIELD(paramkind);
JUMBLE_FIELD(paramid);
JUMBLE_FIELD(paramtype);
- /* paramtypmode and paramcollid are ignored */
+ /* paramtypmod and paramcollid are ignored */
if (expr->paramkind == PARAM_EXTERN)
{
continue;
/*
- * Remove rrel ReloptInfo from the planner structures and the
+ * Remove rrel RelOptInfo from the planner structures and the
* corresponding row mark.
*/
remove_self_join_rel(root, kmark, rmark, krel, rrel, restrictlist);
* However, there are several cases when this optimization is not safe. If
* the rel isn't partitioned, then none of the paths will be Append or
* MergeAppend paths, so we should definitely not do this. If it is
- * parititoned but is a joinrel, it may have Append and MergeAppend paths,
- * but it can also have join paths that we can't afford to discard.
+ * partititoned but is a joinrel, it may have Append and MergeAppend
+ * paths, but it can also have join paths that we can't afford to discard.
*
* Some care is needed, because we have to allow
* generate_useful_gather_paths to see the old partial paths in the next
Assert(idxForm->indisready);
/*
- * Set up inferElems and inferPredExprs to match the
+ * Set up inferElems and inferIndexExprs to match the
* constraint index, so that we can match them in the loop
* below.
*/
* second_name: name of the second partition
* second_bound: bound of the second partition
* defaultPart: true if one of the new partitions is DEFAULT
- * is_merge: true ndicates the operation is MERGE PARTITIONS;
+ * is_merge: true indicates the operation is MERGE PARTITIONS;
* false indicates the operation is SPLIT PARTITION.
* pstate: pointer to ParseState struct for determining error position
*/
slot = MyReplicationSlot;
/*
- * Update the slot sync related stats in pg_stat_replication_slot when a
+ * Update the slot sync related stats in pg_stat_replication_slots when a
* slot sync is skipped
*/
if (skip_reason != SS_SKIP_NONE)
backend = INVALID_PROC_NUMBER;
}
- /* theoretically we should lock the bufhdr here */
+ /* theoretically we should lock the bufHdr here */
buf_state = pg_atomic_read_u32(&buf->state);
result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
* two-phase commit support
* AtPrepare_PredicateLocks(void);
* PostPrepare_PredicateLocks(TransactionId xid);
- * PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit);
+ * PredicateLockTwoPhaseFinish(FullTransactionId fxid, bool isCommit);
* predicatelock_twophase_recover(FullTransactionId fxid, uint16 info,
* void *recdata, uint32 len);
*/
ctype = TextDatumGetCString(datum);
/*
- * Historcally, we set LC_COLLATE from datcollate, as well. That's no
+ * Historically, we set LC_COLLATE from datcollate, as well. That's no
* longer necessary because all collation behavior is handled through
* pg_locale_t.
*/
short_desc => 'Set this to force all parse and plan trees to be passed through outfuncs.c/readfuncs.c, to facilitate catching errors and omissions in those modules.',
flags => 'GUC_NOT_IN_SAMPLE',
variable => 'Debug_write_read_parse_plan_trees',
- boot_val => 'DEFAULT_DEBUG_READ_WRITE_PARSE_PLAN_TREES',
+ boot_val => 'DEFAULT_DEBUG_WRITE_READ_PARSE_PLAN_TREES',
ifdef => 'DEBUG_NODE_TESTS_ENABLED',
},
# Reset a cluster's next multixid and mxoffset to given values.
#
-# Note: This is used on the old insallation, so the command arguments
+# Note: This is used on the old installation, so the command arguments
# and the output parsing used here must work with all pre-v19
# PostgreSQL versions supported by the test.
sub reset_mxid_mxoffset_pre_v19
* is only valid if we froze some tuples (nfrozen > 0), and all_frozen is
* true.
*
- * These are only set if the HEAP_PRUNE_FREEZE option is set.
+ * These are only set if the HEAP_PAGE_PRUNE_FREEZE option is set.
*/
bool all_visible;
bool all_frozen;
*/
#if !HAVE_DECL_FDATASYNC
-extern int fdatasync(int fildes);
+extern int fdatasync(int fd);
#endif
/*
extern void SequenceChangePersistence(Oid relid, char newrelpersistence);
extern void DeleteSequenceTuple(Oid relid);
extern void ResetSequence(Oid seq_relid);
-extern void SetSequence(Oid relid, int64 next, bool is_called);
+extern void SetSequence(Oid relid, int64 next, bool iscalled);
extern void ResetSequenceCaches(void);
#endif /* SEQUENCE_H */
n_lo = i128->lo;
}
- /* denomimator: absolute value of v */
+ /* denominator: absolute value of v */
d = abs(v);
/* quotient and remainder of high 64 bits */
extern LLVMTypeRef llvm_pg_var_type(const char *varname);
extern LLVMTypeRef llvm_pg_var_func_type(const char *varname);
extern LLVMValueRef llvm_pg_func(LLVMModuleRef mod, const char *funcname);
-extern void llvm_copy_attributes(LLVMValueRef from, LLVMValueRef to);
+extern void llvm_copy_attributes(LLVMValueRef v_from, LLVMValueRef v_to);
extern LLVMValueRef llvm_function_reference(LLVMJitContext *context,
LLVMBuilderRef builder,
LLVMModuleRef mod,
/* list of AggClauseInfos */
List *agg_clause_list;
- /* list of GroupExprInfos */
+ /* list of GroupingExprInfos */
List *group_expr_list;
/* list of plain Vars contained in targetlist and havingQual */
int num_gene, City * city_table);
/* order crossover [OX1] according to Davis */
-extern void ox1(PlannerInfo *root, Gene *mom, Gene *dad, Gene *offspring,
+extern void ox1(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring,
int num_gene, City * city_table);
/* order crossover [OX2] according to Syswerda */
-extern void ox2(PlannerInfo *root, Gene *mom, Gene *dad, Gene *offspring,
+extern void ox2(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring,
int num_gene, City * city_table);
#endif /* GEQO_RECOMBINATION_H */
#endif
#if !HAVE_DECL_TIMINGSAFE_BCMP
-extern int timingsafe_bcmp(const void *b1, const void *b2, size_t len);
+extern int timingsafe_bcmp(const void *b1, const void *b2, size_t n);
#endif
/*
*/
#undef gai_strerror
-extern const char *gai_strerror(int ecode);
+extern const char *gai_strerror(int errcode);
#endif /* WIN32_SYS_SOCKET_H */
extern int pgwin32_bind(SOCKET s, struct sockaddr *addr, int addrlen);
extern int pgwin32_listen(SOCKET s, int backlog);
extern SOCKET pgwin32_accept(SOCKET s, struct sockaddr *addr, int *addrlen);
-extern int pgwin32_connect(SOCKET s, const struct sockaddr *name, int namelen);
+extern int pgwin32_connect(SOCKET s, const struct sockaddr *addr, int addrlen);
extern int pgwin32_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, const struct timeval *timeout);
extern int pgwin32_recv(SOCKET s, char *buf, int len, int flags);
extern int pgwin32_send(SOCKET s, const void *buf, int len, int flags);
#endif
/* in port/win32pread.c */
-extern ssize_t pg_pread(int fd, void *buf, size_t nbyte, pgoff_t offset);
+extern ssize_t pg_pread(int fd, void *buf, size_t size, pgoff_t offset);
/* in port/win32pwrite.c */
-extern ssize_t pg_pwrite(int fd, const void *buf, size_t nbyte, pgoff_t offset);
+extern ssize_t pg_pwrite(int fd, const void *buf, size_t size, pgoff_t offset);
#endif /* PG_WIN32_PORT_H */
extern Size LogicalDecodingCtlShmemSize(void);
extern void LogicalDecodingCtlShmemInit(void);
-extern void StartupLogicalDecodingStatus(bool status_in_control_file);
+extern void StartupLogicalDecodingStatus(bool last_status);
extern void InitializeProcessXLogLogicalInfo(void);
extern bool ProcessBarrierUpdateXLogLogicalInfo(void);
extern bool IsLogicalDecodingEnabled(void);
extern bool IsXLogLogicalInfoEnabled(void);
-extern bool CheckXLogLogicalInfo(void);
extern void AtEOXact_LogicalCtl(void);
extern void EnsureLogicalDecodingEnabled(void);
extern void EnableLogicalDecoding(void);
Oid relid, TimestampTz *last_start_time);
extern void ProcessSyncingRelations(XLogRecPtr current_lsn);
extern void FetchRelationStates(bool *has_pending_subtables,
- bool *has_pending_sequences, bool *started_tx);
+ bool *has_pending_subsequences, bool *started_tx);
extern void stream_start_internal(TransactionId xid, bool first_segment);
extern void stream_stop_internal(TransactionId xid);
int argnum1, int argnum2);
extern void RangeVarCallbackForStats(const RangeVar *relation,
- Oid relId, Oid oldRelid, void *arg);
+ Oid relId, Oid oldRelId, void *arg);
extern bool stats_fill_fcinfo_from_arg_pairs(FunctionCallInfo pairs_fcinfo,
FunctionCallInfo positional_fcinfo,
/* two-phase commit support */
extern void AtPrepare_PredicateLocks(void);
extern void PostPrepare_PredicateLocks(FullTransactionId fxid);
-extern void PredicateLockTwoPhaseFinish(FullTransactionId xid, bool isCommit);
+extern void PredicateLockTwoPhaseFinish(FullTransactionId fxid, bool isCommit);
extern void predicatelock_twophase_recover(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len);
#define DEFAULT_DEBUG_COPY_PARSE_PLAN_TREES false
#endif
-#ifdef READ_WRITE_PARSE_PLAN_TREES
-#define DEFAULT_DEBUG_READ_WRITE_PARSE_PLAN_TREES true
+#ifdef WRITE_READ_PARSE_PLAN_TREES
+#define DEFAULT_DEBUG_WRITE_READ_PARSE_PLAN_TREES true
#else
-#define DEFAULT_DEBUG_READ_WRITE_PARSE_PLAN_TREES false
+#define DEFAULT_DEBUG_WRITE_READ_PARSE_PLAN_TREES false
#endif
#ifdef RAW_EXPRESSION_COVERAGE_TEST
my $count = $node->safe_psql('postgres',
"select count(*) from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)"
);
-is($count, '1', "initial brin_wi_index index state is correct");
+is($count, '1', "initial brin_wi_idx index state is correct");
$count = $node->safe_psql('postgres',
"select count(*) from brin_page_items(get_raw_page('brin_packdate_idx', 2), 'brin_packdate_idx'::regclass)"
);
# Our test relies on two rounds of index vacuuming for reasons elaborated
# later. To trigger two rounds of index vacuuming, we must fill up the
-# TIDStore with dead items partway through a vacuum of the table. The number
+# TidStore with dead items partway through a vacuum of the table. The number
# of rows is just enough to ensure we exceed maintenance_work_mem on all
# supported platforms, while keeping test runtime as short as we can.
my $nrows = 2000;
# Move the cursor forward to the next 7. We inserted the 7 much later, so
# advancing the cursor should allow vacuum to proceed vacuuming most pages of
-# the relation. Because we set maintanence_work_mem sufficiently low, we
+# the relation. Because we set maintenance_work_mem sufficiently low, we
# expect that a round of index vacuuming has happened and that the vacuum is
# now waiting for the cursor to release its pin on the last page of the
# relation.
}
# NUMA introspection requires touching memory first, and some of it may
-# be marked as noacess (e.g. unpinned buffers). So just ignore that.
+# be marked as noaccess (e.g. unpinned buffers). So just ignore that.
{
pg_numa_touch_mem_if_required
Memcheck:Addr4