} gbtree_vinfo;
/*
- * Free ptr1 in case its a copy of ptr2.
+ * Free ptr1 in case it's a copy of ptr2.
*
* This is adapted from varlena's PG_FREE_IF_COPY, though doesn't require
* fcinfo access.
char *relkind;
SubPlanRTInfo *next_rtinfo;
- /* Advance to next SubRTInfo, if it's time. */
+ /* Advance to next SubPlanRTInfo, if it's time. */
if (lc_subrtinfo != NULL)
{
next_rtinfo = lfirst(lc_subrtinfo);
/*
* We should not see RTE of this kind here since property
* graph RTE gets converted to subquery RTE in
- * RewriteGraphTable(). In case we decide not to do the
- * conversion and leave RTEkind unchanged in future, print
+ * rewriteGraphTable(). In case we decide not to do the
+ * conversion and leave RTE kind unchanged in future, print
* correct name of RTE kind.
*/
kind = "graph_table";
* Returns true if every target or sub-target is matched by at least one
* identifier, and otherwise false.
*
- * Also sets rids_used[i] = true for each idenifier that matches at least one
+ * Also sets rids_used[i] = true for each identifier that matches at least one
* target.
*/
static bool
} pgpa_advice_item;
/*
- * Result of comparing an array of pgpa_relation_identifier objects to a
+ * Result of comparing an array of pgpa_identifier objects to a
* pgpa_advice_target.
*
* PGPA_ITM_EQUAL means all targets are matched by some identifier, and
* RTE_JOIN entries are excluded because they cannot be mentioned by plan
* advice.
*
- * The caller is responsible for making sure that the tkeys array is large
+ * The caller is responsible for making sure that the "rids" array is large
* enough to store the results.
*
* The return value is the number of identifiers computed.
*
* pgpa_plan_walker creates a "top level" join unroller object when it
* encounters a join in a portion of the plan tree in which no join unroller
- * is already active. From there, this function is responsible for determing
+ * is already active. From there, this function is responsible for determining
* to what portion of the plan tree that join unroller applies, and for
* creating any subordinate join unroller objects that are needed as a result
* of non-outer-deep join trees. We do this by returning the join unroller
/*
* Each piece of JOIN_ORDER() advice fully describes the join order for a
- * a single unrolled join. Merging is not permitted, because that would
+ * single unrolled join. Merging is not permitted, because that would
* change the meaning, e.g. SEQ_SCAN(a b c d) means simply that sequential
* scans should be used for all of those relations, and is thus equivalent
* to SEQ_SCAN(a b) SEQ_SCAN(c d), but JOIN_ORDER(a b c d) means that "a"
/*
* Currently, PGS_CONSIDER_INDEXONLY can suppress Bitmap Heap
* Scans, so don't clear it when such a scan is requested. This
- * happens because build_index_scan() thinks that the possibility
- * of an index-only scan is a sufficient reason to consider using
- * an otherwise-useless index, and get_index_paths() thinks that
- * the same paths that are useful for index or index-only scans
- * should also be considered for bitmap scans. Perhaps that logic
- * should be tightened up, but until then we need to include
- * PGS_CONSIDER_INDEXONLY in my_scan_type here.
+ * happens because build_index_scankeys() thinks that the
+ * possibility of an index-only scan is a sufficient reason to
+ * consider using an otherwise-useless index, and
+ * get_index_paths() thinks that the same paths that are useful
+ * for index or index-only scans should also be considered for
+ * bitmap scans. Perhaps that logic should be tightened up, but
+ * until then we need to include PGS_CONSIDER_INDEXONLY in
+ * my_scan_type here.
*/
my_scan_type = PGS_BITMAPSCAN | PGS_CONSIDER_INDEXONLY;
}
/*
* It's not guaranteed that every plan name we saw during planning has
- * a SubPlanInfo, but any that do not certainly don't appear in the
+ * a SubPlanRTInfo, but any that do not certainly don't appear in the
* final range table.
*/
foreach_node(SubPlanRTInfo, rtinfo, pstmt->subrtinfos)
/*
* Get and check query ID.
*
- * queryID 0 means no query ID was computed, so reject that.
+ * Query ID 0 means no query ID was computed, so reject that.
*/
queryId = PG_GETARG_INT64(1);
if (queryId == 0)
/*
* Background worker entry point for pg_stash_advice persistence.
*
- * On startup, if load_from_disk_pending is set, we load previously saved
+ * On startup, if stashes_ready is set, we load previously saved
* stash data from disk. Then we enter a loop, periodically checking whether
* any changes have been made (via the change_count atomic counter) and
* writing them to disk. On shutdown, we perform a final write.
#define ST_DECLARE
#include "lib/sort_template.h"
-/* Sort an array of trigrams, handling signedess correctly */
+/* Sort an array of trigrams, handling signedness correctly */
static void
trigram_qsort(trgm *array, size_t n)
{
remote_relname,
column_list.data);
- /* If any attribute statsare missing, fallback to sampling. */
+ /* If any attribute stats are missing, fallback to sampling. */
if (!match_attrmap(attstats,
local_schemaname, local_relname,
remote_schemaname, remote_relname,
<literal>pg_stash_advice.stash_name</literal> for their session, and this
may reveal the contents of any advice stash with that name. Users should
assume that information embedded in stashed advice strings may become visible
- to nonprivileged users.
+ to non-privileged users.
</para>
<sect2 id="pgstashadvice-functions">
<para>
This form removes vertex or edge tables from the property graph. (Only
the association of the tables with the graph is removed. The tables
- themself are not dropped.)
+ themselves are not dropped.)
</para>
</listitem>
</varlistentry>
This form replaces the expression of a generated column. Existing data
in a stored generated column is rewritten and all the future changes
will apply the new generation expression.
- Replacing the expression of a virtual generated column do not require a
+ Replacing the expression of a virtual generated column does not require a
table rewrite, but if the column is used in a constraint, the table will
be scanned to check that existing rows meet the constraint.
</para>
<para>
The <command>CLUSTER</command> command is equivalent to
- <xref linkend="sql-repack"/> with an <literal>USING INDEX</literal>
+ <xref linkend="sql-repack"/> with a <literal>USING INDEX</literal>
clause. See there for more details.
</para>
</refsect1>
</para>
<para>
-The functions are pg_buffercache_mark_dirty(), pg_buffercache_mark_dirt_relation(), and pg_buffercache_mark_dirty_all().
+The functions are pg_buffercache_mark_dirty(), pg_buffercache_mark_dirty_relation(), and pg_buffercache_mark_dirty_all().
</para>
</listitem>
* nocache_index_getattr
*
* This gets called from index_getattr() macro, and only in cases
- * where we can't use cacheoffset and the value is not null.
+ * where we can't use attcacheoff and the value is not null.
* ----------------
*/
Datum
area = dsa_attach(area_handle);
- /* Find the shared the shared radix tree */
+ /* Find the shared radix tree */
ts->tree.shared = shared_ts_attach(area, handle);
ts->area = area;
}
/*
- * Simple wrapper around SetHintBitExt(), use when operating on a single
+ * Simple wrapper around SetHintBitsExt(), use when operating on a single
* tuple.
*/
static inline void
}
/*
- * Perform HeaptupleSatisfiesMVCC() on each passed in tuple. This is more
+ * Perform HeapTupleSatisfiesMVCC() on each passed in tuple. This is more
* efficient than doing HeapTupleSatisfiesMVCC() one-by-one.
*
* To be checked tuples are passed via BatchMVCCState->tuples. Each tuple's
ginxlogSplit *xlrec = (ginxlogSplit *) rec;
appendStringInfo(buf, "isrootsplit: %c",
- (((ginxlogSplit *) rec)->flags & GIN_SPLIT_ROOT) ? 'T' : 'F');
+ (xlrec->flags & GIN_SPLIT_ROOT) ? 'T' : 'F');
appendStringInfo(buf, " isdata: %c isleaf: %c",
(xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F',
(xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F');
/* frozen and bootstrap xids are always committed far in the past */
*ts = 0;
if (nodeid)
- *nodeid = 0;
+ *nodeid = InvalidReplOriginId;
return false;
}
static void SetOldestOffset(void);
static bool find_multixact_start(MultiXactId multi, MultiXactOffset *result);
static void WriteMTruncateXlogRec(Oid oldestMultiDB,
- MultiXactId endTruncOff,
- MultiXactOffset endTruncMemb);
+ MultiXactId oldestMulti,
+ MultiXactOffset oldestOffset);
/*
static bool updateMinRecoveryPoint = true;
/*
- * Local state for Controlfile data_checksum_version. After initialization
+ * Local state for ControlFile data_checksum_version. After initialization
* this is only updated when absorbing a procsignal barrier during interrupt
* processing. The reason for keeping a copy in backend-private memory is to
* avoid locking for interrogating the data checksum state. Possible values
{
/*
* If we haven't yet changed the boot_val default of -1, just let it
- * be. We'll fix it when XLOGShmemSize is called.
+ * be. We'll fix it when XLOGShmemRequest is called.
*/
if (XLOGbuffers == -1)
return true;
* for an EXPLAIN extension option, the caller is entitled to assume that
* a suitably constructed DefElem passed to the main option handler will
* not cause an error. To construct this DefElem, the caller should set
- * the DefElem's defname to option_name. If option_values is NULL, arg
+ * the DefElem's defname to option_name. If option_value is NULL, arg
* should be NULL. Otherwise, arg should be of the type given by
* option_type, with option_value as the associated value. The only option
* types that should be passed are T_String, T_Float, and T_Integer; in
/* The relation the changes are applied to. */
Relation cc_rel;
- /* Needed to update indexes of rel_dst. */
+ /* Needed to update indexes of cc_rel. */
ResultRelInfo *cc_rri;
EState *cc_estate;
/*
* DSM detach callback. This is invoked when an autovacuum worker detaches
* from the DSM segment holding PVShared. It ensures to reset the local pointer
- * to the shared state even if paralell vacuum raises an error and doesn't
+ * to the shared state even if parallel vacuum raises an error and doesn't
* call parallel_vacuum_end().
*/
static void
/*
* Initialize the heap tuple pointer to access attributes of the minimal
- * tuple contained in the slot as if its a heap tuple.
+ * tuple contained in the slot as if it's a heap tuple.
*/
mslot->tuple = &mslot->minhdr;
}
* Precalculate the maximum guaranteed attribute that has to exist in
* every tuple which gets deformed into this slot. When the
* TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS flag is enabled, we simply take
- * the precalculated value from the tupleDesc, otherwise the
+ * the pre-calculated value from the tupleDesc, otherwise the
* optimization is disabled, and we set the value to 0.
*/
if ((flags & TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS) != 0)
/*
* If we don't have a ForPortionOfState yet, we must be a partition
* child being hit for the first time. Make a copy from the root, with
- * our own tupleTableSlot. We do this lazily so that we don't pay the
+ * our own TupleTableSlot. We do this lazily so that we don't pay the
* price of unused partitions.
*/
ForPortionOfState *leafState = makeNode(ForPortionOfState);
* starting from the first, reading from most significant to least significant
* bits.
*
- * Example (when considering fist 10 bits of x):
+ * Example (when considering first 10 bits of x):
*
* rho(x = 0b1000000000) returns 1
* rho(x = 0b0010000000) returns 3
*
* The reason for not doing everything in this if-else conditional is that
* we want to use the same processing of postgresql.conf for when ssl_sni
- * is off as well as when it's on but the hostsfile is missing etc. Thus
+ * is off as well as when it's on but the hosts file is missing etc. Thus
* we set res to the state and continue with a new conditional instead of
* duplicating logic and risk it diverging over time.
*/
{
/*
* The GUC check hook should have already blocked this but to be on
- * the safe side we doublecheck here.
+ * the safe side we double-check here.
*/
#ifndef HAVE_SSL_CTX_SET_CLIENT_HELLO_CB
ereport(isServerStart ? FATAL : LOG,
Oid inputcollid; /* OID of the OpClause input collation */
int argindex; /* index of the clause in the list of
* arguments */
- int groupindex; /* value of argindex for the fist clause in
+ int groupindex; /* value of argindex for the first clause in
* the group of similar clauses */
} OrArgIndexMatch;
OnConflictClause *onConflictClause);
static ForPortionOfExpr *transformForPortionOfClause(ParseState *pstate,
int rtindex,
- const ForPortionOfClause *forPortionOfClause,
+ const ForPortionOfClause *forPortionOf,
bool isUpdate);
static int count_rowexpr_columns(ParseState *pstate, Node *expr);
static Query *transformSelectStmt(ParseState *pstate, SelectStmt *stmt,
* A label expression is parsed as either a ColumnRef with a single field or a
* label expression like label disjunction. The single field in the ColumnRef is
* treated as a label name and transformed to a GraphLabelRef node. The label
- * expression is recursively transformed into an expression tree containg
+ * expression is recursively transformed into an expression tree containing
* GraphLabelRef nodes corresponding to the names of the labels appearing in the
* expression. If any label name cannot be resolved to a label in the property
* graph, an error is raised.
*
* One exception to the previous paragraph is for tables nearing wraparound,
* i.e., those that have surpassed the effective failsafe ages. In that case,
- * the relfrozen/relminmxid-based score is scaled aggressively so that the
+ * the relfrozenxid/relminmxid-based score is scaled aggressively so that the
* table has a decent chance of sorting to the front of the list.
*
* To adjust how strongly each component contributes to the score, the
* state will also be set to "off".
*
* Backends transition Bd -> Bi via a procsignalbarrier which is emitted by the
- * DataChecksumsLauncher. When all backends have acknowledged the barrier then
- * Bd will be empty and the next phase can begin: calculating and writing data
- * checksums with DataChecksumsWorkers. When the DataChecksumsWorker processes
- * have finished writing checksums on all pages, data checksums are enabled
- * cluster-wide via another procsignalbarrier. There are four sets of backends
- * where Bd shall be an empty set:
+ * DataChecksumsWorkerLauncherMain. When all backends have acknowledged the
+ * barrier then Bd will be empty and the next phase can begin: calculating and
+ * writing data checksums with DataChecksumsWorkers. When the
+ * DataChecksumsWorker processes have finished writing checksums on all pages,
+ * data checksums are enabled cluster-wide via another procsignalbarrier.
+ * There are four sets of backends where Bd shall be an empty set:
*
* Bg: Backend updating the global state and emitting the procsignalbarrier
* Bd: Backends in "off" state
relns = get_namespace_name(RelationGetNamespace(reln));
- /* Report the current relation to pgstat_activity */
+ /* Report the current relation to pg_stat_activity */
snprintf(activity, sizeof(activity) - 1, "processing: %s.%s (%s, %u blocks)",
(relns ? relns : ""), RelationGetRelationName(reln), forkNames[forkNum], numblocks);
pgstat_report_activity(STATE_RUNNING, activity);
* re-write the page to WAL even if the checksum hasn't changed,
* because if there is a replica it might have a slightly different
* version of the page with an invalid checksum, caused by unlogged
- * changes (e.g. hintbits) on the primary happening while checksums
+ * changes (e.g. hint bits) on the primary happening while checksums
* were off. This can happen if there was a valid checksum on the page
* at one point in the past, so only when checksums are first on, then
* off, and then turned on again. TODO: investigate if this could be
}
/*
- * DataChecksumShmemRequest
+ * DataChecksumsShmemRequest
* Request datachecksumsworker-related shared memory
*/
static void
/*
* Clear the per-transaction replication origin state.
*
- * replorigin_session_origin is also cleared if clear_origin is set.
+ * replorigin_xact_state.origin is also cleared if clear_origin is set.
*/
void
replorigin_xact_clear(bool clear_origin)
/*
* Retention has been stopped, so double the interval-capped at a
* maximum of 3 minutes. The wal_receiver_status_interval is
- * intentionally not used as a upper bound, since the likelihood of
+ * intentionally not used as an upper bound, since the likelihood of
* retention resuming is lower than that of general activity resuming.
*/
rdt_data->xid_advance_interval = Min(rdt_data->xid_advance_interval * 2,
static Node *replace_property_refs(Oid propgraphid, Node *node, const List *mappings);
static List *build_edge_vertex_link_quals(HeapTuple edgetup, int edgerti, int refrti, Oid refid, AttrNumber catalog_key_attnum, AttrNumber catalog_ref_attnum, AttrNumber catalog_eqop_attnum);
-static List *generate_queries_for_path_pattern(RangeTblEntry *rte, List *element_patterns);
+static List *generate_queries_for_path_pattern(RangeTblEntry *rte, List *path_pattern);
static Query *generate_query_for_graph_path(RangeTblEntry *rte, List *path);
static Node *generate_setop_from_pathqueries(List *pathqueries, List **rtable, List **targetlist);
-static List *generate_queries_for_path_pattern_recurse(RangeTblEntry *rte, List *pathqueries, List *cur_path, List *path_pattern_lists, int elempos);
+static List *generate_queries_for_path_pattern_recurse(RangeTblEntry *rte, List *pathqueries, List *cur_path, List *path_elem_lists, int elempos);
static Query *generate_query_for_empty_path_pattern(RangeTblEntry *rte);
static Query *generate_union_from_pathqueries(List **pathqueries);
static List *get_path_elements_for_path_factor(Oid propgraphid, struct path_factor *pf);
/*
* Either of these statistic types requires that we supply a semi-filled
- * VacAttrStatP array.
+ * VacAttrStatsP array.
*
* It is not possible to use the existing lookup_var_attr_stats() and
* examine_attribute() because these functions will skip attributes where
/*
* The leading stxkeys are attribute numbers up through numattnums.
- * These keys must be in ascending AttNumber order, but we do not rely
- * on that.
+ * These keys must be in ascending AttrNumber order, but we do not
+ * rely on that.
*/
for (int i = 0; i < numattnums; i++)
{
/*
* Generate the expressions array.
*
- * The attytypids, attytypmods, and atttypcolls arrays have all the
+ * The atttypids, atttypmods, and atttypcolls arrays have all the
* regular attributes listed first, so we can pass those arrays with a
* start point after the last regular attribute. There are numexprs
* elements remaining.
* still return a legit tuple datum.
*
* Set pg_statistic_ok to true if all of the values found in the container
- * were imported without issue. pg_statistic_ok is swicthed to "true" once
+ * were imported without issue. pg_statistic_ok is switched to "true" once
* the full pg_statistic tuple has been built and validated.
*/
static Datum
* if they aren't then we need to reject that stakind completely.
* Currently we go a step further and reject the expression array
* completely.
- *
- * Once it is established that the pairs are in NULL/NOT-NULL alignment,
- * we can test either expr_nulls[] value to see if the stakind has
- * value(s) that we can set or not.
*/
if (found[MOST_COMMON_VALS_ELEM])
* it. Crossing the current worker count is a useful signal
* because it's clearly too deep to avoid queuing latency already,
* but still leaves a small window of opportunity to improve the
- * situation before the queue oveflows.
+ * situation before the queue overflows.
*
* 2. The worker pool is keeping up, no latency is being
* introduced and an extra worker would be a waste of resources.
*
* On its own, this is an extremely crude signal. When combined
* with the wakeup propagation test that precedes it (but on its
- * own tends to overshoot) and io_worker_launch_delay, the result
- * is that we gradually test each pool size until we find one that
- * doesn't trigger further expansion, and then hold it for at
- * least io_worker_idle_timeout.
+ * own tends to overshoot) and io_worker_launch_interval, the
+ * result is that we gradually test each pool size until we find
+ * one that doesn't trigger further expansion, and then hold it
+ * for at least io_worker_idle_timeout.
*
* XXX Perhaps ideas from queueing theory or control theory could
* do a better job of this.
* Used to clean up after errors.
*
* Currently, we can expect that resource owner cleanup, via
- * ResOwnerReleaseBufferPin(), took care of releasing buffer content locks per
+ * ResOwnerReleaseBuffer(), took care of releasing buffer content locks per
* se; the only thing we need to deal with here is clearing any PIN_COUNT
* request that was in progress.
*/
pgstat_report_wait_end();
- /* Retrying, allow BufferLockRelease to release waiters again. */
+ /* Retrying, allow BufferLockReleaseSub to release waiters again. */
pg_atomic_fetch_and_u64(&buf_hdr->state, ~BM_LOCK_WAKE_IN_PROGRESS);
}
{
/*
* Call RegisterShmemCallbacks(...) on each subsystem listed in
- * subsystemslist.h
+ * subsystemlist.h
*/
#define PG_SHMEM_SUBSYSTEM(subsystem_callbacks) \
RegisterShmemCallbacks(&(subsystem_callbacks));
int64 max_table_size;
/*
- * Compute sizes for lock hashtables. Note that these calculations must
- * agree with LockManagerShmemSize!
+ * Compute sizes for lock hashtables.
*/
max_table_size = NLOCKENTS();
);
/*
- * Compute size for serializable transaction hashtable. Note these
- * calculations must agree with PredicateLockShmemSize!
+ * Compute size for serializable transaction hashtable.
*
* Assume an average of 10 predicate locking transactions per backend.
* This allows aggressive cleanup while detail is present before data must
* all their subnet bits *must* be zero (1.2.3.0/24).
*
* IPv4 and IPv6 are identical in this makeup, with the difference being that
- * IPv4 addresses have a maximum of 32 bits compared to IPv6's 64 bits, so in
+ * IPv4 addresses have a maximum of 32 bits compared to IPv6's 128 bits, so in
* IPv6 each part may be larger.
*
* inet/cidr types compare using these sorting rules. If inequality is detected
/*
* Try to write it as FROM ... TO ... if we received it that way,
- * otherwise (targetExpr).
+ * otherwise (targetRange).
*/
if (forPortionOf->targetFrom && forPortionOf->targetTo)
{
* GUC check_hook for log_min_messages
*
* This value is parsed as a comma-separated list of zero or more TYPE:LEVEL
- * elements. For each element, TYPE corresponds to a bk_category value (see
+ * elements. For each element, TYPE corresponds to a bkcategory value (see
* postmaster/proctypelist.h); LEVEL is one of server_message_level_options.
*
* In addition, there must be a single LEVEL element (with no TYPE part)
'WAL summary has correct VM fork truncation limit');
# Combine full and incremental backups. Before the fix, this failed because
-# the INCREMENTAL file header contained an incorrect truncation_block value.
+# the INCREMENTAL file header contained an incorrect truncation_block_length
+# value.
my $restored = PostgreSQL::Test::Cluster->new('node2');
$restored->init_from_backup($primary, 'incr', combine_with_prior => ['full']);
$restored->start();
{
/*
* There is no ordering column in pg_stats_ext_exprs. However, we
- * can rely on the unnesting of pg_statistic.ext_data.stxdexpr to
+ * can rely on the unnesting of pg_statistic_ext_data.stxdexpr to
* maintain the desired order of expression elements.
*/
appendPQExpBufferStr(pq,
* We don't have a connection yet but that doesn't matter. The connection
* is initialized to NULL and if we terminate through exit_nicely() while
* it's still NULL, the cleanup function will just be a no-op. If we are
- * restoring multiple databases, then only update AX handle for cleanup as
+ * restoring multiple databases, then only update AH handle for cleanup as
* the previous entry was already in the array and we had closed previous
* connection, so we can use the same array slot.
*/
break;
case 'l':
- if (strspn(optarg, "01234567890ABCDEFabcdef") != XLOG_FNAME_LEN)
+ if (strspn(optarg, "0123456789ABCDEFabcdef") != XLOG_FNAME_LEN)
{
pg_log_error("invalid argument for option %s", "-l");
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
conn_template1 = connectToServer(cluster, "template1");
/*
- * Get database, user/role and tablespacenames from cluster. Can't use
+ * Get database, user/role and tablespace names from cluster. Can't use
* pg_authid because only superusers can view it.
*/
res = executeQueryOrDie(conn_template1,
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_fatal("%d: controldata retrieval problem", __LINE__);
- p = strpbrk(p, "01234567890ABCDEF");
+ p = strpbrk(p, "0123456789ABCDEF");
if (p == NULL || strlen(p) <= 1)
pg_fatal("%d: controldata retrieval problem", __LINE__);
* this pair specifically. Byte pair range constraints, in encoding
* originator documentation, always excluded this pair. No core conversion
* could translate it. However, longstanding verifychar implementations
- * accepted any non-NUL byte. big5_to_euc_tw and big5_to_mic even translate
- * pairs not valid per encoding originator documentation. To avoid tightening
- * core or non-core conversions in a security patch, we sought this one pair.
+ * accepted any non-NUL byte. big5_to_euc_tw even translates pairs not
+ * valid per encoding originator documentation. To avoid tightening core
+ * or non-core conversions in a security patch, we sought this one pair.
*
* PQescapeString() historically used spaces for BYTE1; many other values
* could suffice for BYTE1.
* crosscheck - if not InvalidSnapshot, also check old tuple against this
* options - These allow the caller to specify options that may change the
* behavior of the AM. The AM will ignore options that it does not support.
- * TABLE_UPDATE_WAIT -- set if should wait for any conflicting update to
- * commit/abort
* TABLE_UPDATE_NO_LOGICAL -- force-disables the emitting of logical
* decoding information for the tuple.
*
#include "storage/procsignal.h"
-/* Possible operations the Datachecksumsworker can perform */
+/* Possible operations the DataChecksumsWorker can perform */
typedef enum DataChecksumsWorkerOperation
{
ENABLE_DATACHECKSUMS,
/*
* parseInput subroutine to read a BackendKeyData message.
- * Entry: 'v' message type and length have already been consumed.
+ * Entry: 'K' message type and length have already been consumed.
* Exit: returns 0 if successfully consumed message.
* returns EOF if not enough data.
*/
# Ensure that the primary and standby has switched to off
wait_for_checksum_state($node_primary, 'off');
wait_for_checksum_state($node_standby, 'off');
-# Doublecheck reading data without errors
+# Double-check reading data without errors
$result =
$node_primary->safe_psql('postgres', "SELECT count(a) FROM t WHERE a > 1");
is($result, "19998", 'ensure we can safely read all data without checksums');
# Test cluster setup
#
-# Initiate testcluster
+# Initiate test cluster
my $node = PostgreSQL::Test::Cluster->new('injection_node');
$node->init(no_data_checksums => 1);
$node->start;
PG_MODULE_MAGIC;
extern PGDLLEXPORT void dc_delay_barrier(const char *name, const void *private_data, void *arg);
-extern PGDLLEXPORT void dc_modify_db_result(const char *name, const void *private_data, void *arg);
-extern PGDLLEXPORT void dc_fake_temptable(const char *name, const void *private_data, void *arg);
/*
* Test for delaying emission of procsignalbarriers.
# Start node
$node->start;
-# Create an user to test permissions to read extension locations.
+# Create a user to test permissions to read extension locations.
my $user = "user01";
$node->safe_psql('postgres', "CREATE USER $user");
$node->safe_psql('postgres',
qq{SELECT pg_logical_emit_message(true, 'test 026', repeat('xyzxz', 123456))}
);
-#$node->safe_psql('postgres', qq{create table foo ()});
+
my $endfile = $node->safe_psql('postgres',
'SELECT pg_walfile_name(pg_current_wal_insert_lsn())');
ok($initfile ne $endfile, "$initfile differs from $endfile");
DROP TABLE T;
DROP FUNCTION foolme(timestamptz);
--- Simple querie
+-- Simple queries
CREATE TABLE T (pk INT NOT NULL PRIMARY KEY);
SELECT set('t');
set
DROP TABLE T;
DROP FUNCTION foolme(timestamptz);
--- Simple querie
+-- Simple queries
CREATE TABLE T (pk INT NOT NULL PRIMARY KEY);
SELECT set('t');
}
if (ps.com_ind <= 1)
- ps.com_ind = 2; /* dont put normal comments before column 2 */
+ ps.com_ind = 2; /* don't put normal comments before column 2 */
if (block_comment_max_col <= 0)
block_comment_max_col = max_col;
if (ps.local_decl_indent < 0) /* if not specified by user, set this */
if (verbose)
diag2(0, "Line broken");
dump_line();
- ps.want_blank = false; /* dont insert blank at line start */
+ ps.want_blank = false; /* don't insert blank at line start */
force_nl = false;
}
ps.in_stmt = true; /* turn on flag which causes an extra level of
force_nl = true;/* must force newline after if */
ps.last_u_d = true; /* inform lexi that a following
* operator is unary */
- ps.in_stmt = false; /* dont use stmt continuation
+ ps.in_stmt = false; /* don't use stmt continuation
* indentation */
parse(hd_type); /* let parser worry about if, or whatever */
if (sp_sw) { /* this is a check for an if, while, etc. with
* unbalanced parens */
sp_sw = false;
- parse(hd_type); /* dont lose the if, or whatever */
+ parse(hd_type); /* don't lose the if, or whatever */
}
}
*e_code++ = ';';
break;
case lbrace: /* got a '{' */
- ps.in_stmt = false; /* dont indent the {} */
+ ps.in_stmt = false; /* don't indent the {} */
if (!ps.block_init)
force_nl = true;/* force other stuff on same line as '{' onto
* new line */
}
}
if (s_code == e_code)
- ps.ind_stmt = false; /* dont put extra indentation on line
+ ps.ind_stmt = false; /* don't put extra indentation on line
* with '{' */
if (ps.in_decl && ps.in_or_st) { /* this is either a structure
* declaration or an init */
case period: /* treat a period kind of like a binary
* operation */
*e_code++ = '.'; /* move the period into line */
- ps.want_blank = false; /* dont put a blank after a period */
+ ps.want_blank = false; /* don't put a blank after a period */
break;
case comma:
putc('\n', output);
n_real_blanklines = 0;
if (ps.ind_level == 0)
- ps.ind_stmt = 0; /* this is a class A kludge. dont do
+ ps.ind_stmt = 0; /* this is a class A kludge. don't do
* additional statement indentation if we are
* at bracket level 0 */
if ( /* ps.bl_line && */ (s_lab == e_lab) && (s_code == e_code)) {
/* klg: check only if this line is blank */
/*
- * If this (*and previous lines are*) blank, dont put comment way
+ * If this (*and previous lines are*) blank, don't put comment way
* out at left
*/
ps.com_col = (ps.ind_level - ps.unindent_displace) * ps.ind_size + 1;
last_bl = NULL;
CHECK_SIZE_COM(4);
if (ps.box_com || ps.last_nl) { /* if this is a boxed comment,
- * we dont ignore the newline */
+ * we don't ignore the newline */
if (s_com == e_com)
*e_com++ = ' ';
if (!ps.box_com && e_com - s_com > 3) {