</entry>
</row>
+ <row>
+ <entry><structname>pg_stat_lock</structname><indexterm><primary>pg_stat_lock</primary></indexterm></entry>
+ <entry>
+ One row for each lock type, containing cluster-wide locks statistics.
+ See <link linkend="monitoring-pg-stat-lock-view">
+ <structname>pg_stat_lock</structname></link> for details.
+ </entry>
+ </row>
+
<row>
<entry><structname>pg_stat_replication_slots</structname><indexterm><primary>pg_stat_replication_slots</primary></indexterm></entry>
<entry>One row per replication slot, showing statistics about the
</sect2>
+
+ <sect2 id="monitoring-pg-stat-lock-view">
+ <title><structname>pg_stat_lock</structname></title>
+
+ <indexterm>
+ <primary>pg_stat_lock</primary>
+ </indexterm>
+
+ <para>
+ The <structname>pg_stat_lock</structname> view will contain one row for each
+ lock type, showing cluster-wide locks statistics.
+ </para>
+
+ <table id="pg-stat-lock-view" xreflabel="pg_stat_lock">
+ <title><structname>pg_stat_lock</structname> View</title>
+ <tgroup cols="1">
+ <thead>
+ <row>
+ <entry role="catalog_table_entry">
+ <para role="column_definition">
+ Column Type
+ </para>
+ <para>
+ Description
+ </para>
+ </entry>
+ </row>
+ </thead>
+ <tbody>
+ <row>
+ <entry role="catalog_table_entry">
+ <para role="column_definition">
+ <structfield>locktype</structfield> <type>text</type>
+ </para>
+ <para>
+ Type of the lockable object. See <link linkend="view-pg-locks">
+ <structname>pg_locks</structname></link> for details.
+ </para>
+ </entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry">
+ <para role="column_definition">
+ <structfield>waits</structfield> <type>bigint</type>
+ </para>
+ <para>
+ Number of times a lock of this type had to wait because of a
+ conflicting lock. Only incremented when the lock was successfully
+ acquired after waiting longer than <xref linkend="guc-deadlock-timeout"/>.
+ </para>
+ </entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry">
+ <para role="column_definition">
+ <structfield>wait_time</structfield> <type>bigint</type>
+ </para>
+ <para>
+ Total time spent waiting for locks of this type, in milliseconds.
+ Only incremented when the lock was successfully acquired after waiting
+ longer than <xref linkend="guc-deadlock-timeout"/>.
+ </para>
+ </entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry">
+ <para role="column_definition">
+ <structfield>fastpath_exceeded</structfield> <type>bigint</type>
+ </para>
+ <para>
+ Number of times a lock of this type could not be acquired via fast path
+ because the fast path slot limit was exceeded. Increasing
+ <xref linkend="guc-max-locks-per-transaction"/> can reduce this number.
+ </para>
+ </entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry">
+ <para role="column_definition">
+ <structfield>stats_reset</structfield> <type>timestamp with time zone</type>
+ </para>
+ <para>
+ Time at which these statistics were last reset.
+ </para>
+ </entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </sect2>
+
<sect2 id="monitoring-pg-stat-bgwriter-view">
<title><structname>pg_stat_bgwriter</structname></title>
<structname>pg_stat_io</structname> view.
</para>
</listitem>
+ <listitem>
+ <para>
+ <literal>lock</literal>: Reset all the counters shown in the
+ <structname>pg_stat_lock</structname> view.
+ </para>
+ </listitem>
<listitem>
<para>
<literal>recovery_prefetch</literal>: Reset all the counters shown in
s.stats_reset
FROM pg_stat_get_slru() s;
+CREATE VIEW pg_stat_lock AS
+ SELECT
+ l.locktype,
+ l.waits,
+ l.wait_time,
+ l.fastpath_exceeded,
+ l.stats_reset
+ FROM pg_stat_get_lock() l;
+
CREATE VIEW pg_stat_wal_receiver AS
SELECT
s.pid,
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pg_trace.h"
+#include "pgstat.h"
#include "storage/lmgr.h"
#include "storage/proc.h"
#include "storage/procarray.h"
return LOCKACQUIRE_OK;
}
}
+ else
+ {
+ /*
+ * Increment the lock statistics counter if lock could not be
+ * acquired via the fast-path.
+ */
+ pgstat_count_lock_fastpath_exceeded(locallock->tag.lock.locktag_type);
+ }
}
/*
}
/*
- * If awoken after the deadlock check interrupt has run, and
- * log_lock_waits is on, then report about the wait.
+ * If awoken after the deadlock check interrupt has run, increment the
+ * lock statistics counters and if log_lock_waits is on, then report
+ * about the wait.
*/
if (deadlock_state != DS_NOT_YET_CHECKED)
{
msecs = secs * 1000 + usecs / 1000;
usecs = usecs % 1000;
+ /* Increment the lock statistics counters if done waiting. */
+ if (myWaitStatus == PROC_WAIT_STATUS_OK)
+ pgstat_count_lock_waits(locallock->tag.lock.locktag_type, msecs);
+
if (log_lock_waits)
{
StringInfoData buf,
pgstat_database.o \
pgstat_function.o \
pgstat_io.o \
+ pgstat_lock.o \
pgstat_relation.o \
pgstat_replslot.o \
pgstat_shmem.o \
'pgstat_database.c',
'pgstat_function.c',
'pgstat_io.c',
+ 'pgstat_lock.c',
'pgstat_relation.c',
'pgstat_replslot.c',
'pgstat_shmem.c',
* - pgstat_database.c
* - pgstat_function.c
* - pgstat_io.c
+ * - pgstat_lock.c
* - pgstat_relation.c
* - pgstat_replslot.c
* - pgstat_slru.c
.snapshot_cb = pgstat_io_snapshot_cb,
},
+ [PGSTAT_KIND_LOCK] = {
+ .name = "lock",
+
+ .fixed_amount = true,
+ .write_to_file = true,
+
+ .snapshot_ctl_off = offsetof(PgStat_Snapshot, lock),
+ .shared_ctl_off = offsetof(PgStat_ShmemControl, lock),
+ .shared_data_off = offsetof(PgStatShared_Lock, stats),
+ .shared_data_len = sizeof(((PgStatShared_Lock *) 0)->stats),
+
+ .flush_static_cb = pgstat_lock_flush_cb,
+ .init_shmem_cb = pgstat_lock_init_shmem_cb,
+ .reset_all_cb = pgstat_lock_reset_all_cb,
+ .snapshot_cb = pgstat_lock_snapshot_cb,
+ },
+
[PGSTAT_KIND_SLRU] = {
.name = "slru",
--- /dev/null
+/* -------------------------------------------------------------------------
+ *
+ * pgstat_lock.c
+ * Implementation of lock statistics.
+ *
+ * This file contains the implementation of lock statistics. It is kept
+ * separate from pgstat.c to enforce the line between the statistics
+ * access / storage implementation and the details about individual types
+ * of statistics.
+ *
+ * Copyright (c) 2021-2026, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/backend/utils/activity/pgstat_lock.c
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "utils/pgstat_internal.h"
+
+static PgStat_PendingLock PendingLockStats;
+static bool have_lockstats = false;
+
+PgStat_Lock *
+pgstat_fetch_stat_lock(void)
+{
+ pgstat_snapshot_fixed(PGSTAT_KIND_LOCK);
+
+ return &pgStatLocal.snapshot.lock;
+}
+
+/*
+ * Simpler wrapper of pgstat_lock_flush_cb()
+ */
+void
+pgstat_lock_flush(bool nowait)
+{
+ (void) pgstat_lock_flush_cb(nowait);
+}
+
+/*
+ * Flush out locally pending lock statistics
+ *
+ * If no stats have been recorded, this function returns false.
+ *
+ * If nowait is true, this function returns true if the lock could not be
+ * acquired. Otherwise, return false.
+ */
+bool
+pgstat_lock_flush_cb(bool nowait)
+{
+ LWLock *lcktype_lock;
+ PgStat_LockEntry *lck_shstats;
+ bool lock_not_acquired = false;
+
+ if (!have_lockstats)
+ return false;
+
+ for (int i = 0; i <= LOCKTAG_LAST_TYPE; i++)
+ {
+ lcktype_lock = &pgStatLocal.shmem->lock.locks[i];
+ lck_shstats =
+ &pgStatLocal.shmem->lock.stats.stats[i];
+
+ if (!nowait)
+ LWLockAcquire(lcktype_lock, LW_EXCLUSIVE);
+ else if (!LWLockConditionalAcquire(lcktype_lock, LW_EXCLUSIVE))
+ {
+ lock_not_acquired = true;
+ continue;
+ }
+
+#define LOCKSTAT_ACC(fld) \
+ (lck_shstats->fld += PendingLockStats.stats[i].fld)
+ LOCKSTAT_ACC(waits);
+ LOCKSTAT_ACC(wait_time);
+ LOCKSTAT_ACC(fastpath_exceeded);
+#undef LOCKSTAT_ACC
+
+ LWLockRelease(lcktype_lock);
+ }
+
+ memset(&PendingLockStats, 0, sizeof(PendingLockStats));
+
+ have_lockstats = false;
+
+ return lock_not_acquired;
+}
+
+
+void
+pgstat_lock_init_shmem_cb(void *stats)
+{
+ PgStatShared_Lock *stat_shmem = (PgStatShared_Lock *) stats;
+
+ for (int i = 0; i <= LOCKTAG_LAST_TYPE; i++)
+ LWLockInitialize(&stat_shmem->locks[i], LWTRANCHE_PGSTATS_DATA);
+}
+
+void
+pgstat_lock_reset_all_cb(TimestampTz ts)
+{
+ for (int i = 0; i <= LOCKTAG_LAST_TYPE; i++)
+ {
+ LWLock *lcktype_lock = &pgStatLocal.shmem->lock.locks[i];
+ PgStat_LockEntry *lck_shstats = &pgStatLocal.shmem->lock.stats.stats[i];
+
+ LWLockAcquire(lcktype_lock, LW_EXCLUSIVE);
+
+ /*
+ * Use the lock in the first lock type PgStat_LockEntry to protect the
+ * reset timestamp as well.
+ */
+ if (i == 0)
+ pgStatLocal.shmem->lock.stats.stat_reset_timestamp = ts;
+
+ memset(lck_shstats, 0, sizeof(*lck_shstats));
+ LWLockRelease(lcktype_lock);
+ }
+}
+
+void
+pgstat_lock_snapshot_cb(void)
+{
+ for (int i = 0; i <= LOCKTAG_LAST_TYPE; i++)
+ {
+ LWLock *lcktype_lock = &pgStatLocal.shmem->lock.locks[i];
+ PgStat_LockEntry *lck_shstats = &pgStatLocal.shmem->lock.stats.stats[i];
+ PgStat_LockEntry *lck_snap = &pgStatLocal.snapshot.lock.stats[i];
+
+ LWLockAcquire(lcktype_lock, LW_SHARED);
+
+ /*
+ * Use the lock in the first lock type PgStat_LockEntry to protect the
+ * reset timestamp as well.
+ */
+ if (i == 0)
+ pgStatLocal.snapshot.lock.stat_reset_timestamp =
+ pgStatLocal.shmem->lock.stats.stat_reset_timestamp;
+
+ /* using struct assignment due to better type safety */
+ *lck_snap = *lck_shstats;
+ LWLockRelease(lcktype_lock);
+ }
+}
+
+/*
+ * Increment counter for lock not acquired with the fast-path, per lock
+ * type, due to the fast-path slot limit reached.
+ *
+ * Note: This function should not be called in performance-sensitive paths,
+ * like lock acquisitions.
+ */
+void
+pgstat_count_lock_fastpath_exceeded(uint8 locktag_type)
+{
+ Assert(locktag_type <= LOCKTAG_LAST_TYPE);
+ PendingLockStats.stats[locktag_type].fastpath_exceeded++;
+ have_lockstats = true;
+ pgstat_report_fixed = true;
+}
+
+/*
+ * Increment the number of waits and wait time, per lock type.
+ *
+ * Note: This function should not be called in performance-sensitive paths,
+ * like lock acquisitions.
+ */
+void
+pgstat_count_lock_waits(uint8 locktag_type, long msecs)
+{
+ Assert(locktag_type <= LOCKTAG_LAST_TYPE);
+ PendingLockStats.stats[locktag_type].waits++;
+ PendingLockStats.stats[locktag_type].wait_time += (PgStat_Counter) msecs;
+ have_lockstats = true;
+ pgstat_report_fixed = true;
+}
wal_stats->stat_reset_timestamp));
}
+Datum
+pg_stat_get_lock(PG_FUNCTION_ARGS)
+{
+#define PG_STAT_LOCK_COLS 5
+ ReturnSetInfo *rsinfo;
+ PgStat_Lock *lock_stats;
+
+ InitMaterializedSRF(fcinfo, 0);
+ rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+
+ lock_stats = pgstat_fetch_stat_lock();
+
+ for (int lcktype = 0; lcktype <= LOCKTAG_LAST_TYPE; lcktype++)
+ {
+ const char *locktypename;
+ Datum values[PG_STAT_LOCK_COLS] = {0};
+ bool nulls[PG_STAT_LOCK_COLS] = {0};
+ PgStat_LockEntry *lck_stats = &lock_stats->stats[lcktype];
+ int i = 0;
+
+ locktypename = LockTagTypeNames[lcktype];
+
+ values[i++] = CStringGetTextDatum(locktypename);
+ values[i++] = Int64GetDatum(lck_stats->waits);
+ values[i++] = Int64GetDatum(lck_stats->wait_time);
+ values[i++] = Int64GetDatum(lck_stats->fastpath_exceeded);
+ values[i] = TimestampTzGetDatum(lock_stats->stat_reset_timestamp);
+
+ Assert(i + 1 == PG_STAT_LOCK_COLS);
+
+ tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
+ }
+
+ return (Datum) 0;
+}
+
/*
* Returns statistics of SLRU caches.
*/
pgstat_reset_of_kind(PGSTAT_KIND_BGWRITER);
pgstat_reset_of_kind(PGSTAT_KIND_CHECKPOINTER);
pgstat_reset_of_kind(PGSTAT_KIND_IO);
+ pgstat_reset_of_kind(PGSTAT_KIND_LOCK);
XLogPrefetchResetStats();
pgstat_reset_of_kind(PGSTAT_KIND_SLRU);
pgstat_reset_of_kind(PGSTAT_KIND_WAL);
pgstat_reset_of_kind(PGSTAT_KIND_CHECKPOINTER);
else if (strcmp(target, "io") == 0)
pgstat_reset_of_kind(PGSTAT_KIND_IO);
+ else if (strcmp(target, "lock") == 0)
+ pgstat_reset_of_kind(PGSTAT_KIND_LOCK);
else if (strcmp(target, "recovery_prefetch") == 0)
XLogPrefetchResetStats();
else if (strcmp(target, "slru") == 0)
*/
/* yyyymmddN */
-#define CATALOG_VERSION_NO 202603201
+#define CATALOG_VERSION_NO 202603241
#endif
proargnames => '{backend_type,object,context,reads,read_bytes,read_time,writes,write_bytes,write_time,writebacks,writeback_time,extends,extend_bytes,extend_time,hits,evictions,reuses,fsyncs,fsync_time,stats_reset}',
prosrc => 'pg_stat_get_io' },
+{ oid => '9375', descr => 'statistics: per lock type statistics',
+ proname => 'pg_stat_get_lock', prorows => '10', proretset => 't',
+ provolatile => 'v', proparallel => 'r', prorettype => 'record',
+ proargtypes => '', proallargtypes => '{text,int8,int8,int8,timestamptz}',
+ proargmodes => '{o,o,o,o,o}',
+ proargnames => '{locktype,waits,wait_time,fastpath_exceeded,stats_reset}',
+ prosrc => 'pg_stat_get_lock' },
+
{ oid => '6386', descr => 'statistics: backend IO statistics',
proname => 'pg_stat_get_backend_io', prorows => '5', proretset => 't',
provolatile => 'v', proparallel => 'r', prorettype => 'record',
#include "portability/instr_time.h"
#include "postmaster/pgarch.h" /* for MAX_XFN_CHARS */
#include "replication/conflict.h"
+#include "storage/locktag.h"
#include "utils/backend_progress.h" /* for backward compatibility */ /* IWYU pragma: export */
#include "utils/backend_status.h" /* for backward compatibility */ /* IWYU pragma: export */
#include "utils/pgstat_kind.h"
* ------------------------------------------------------------
*/
-#define PGSTAT_FILE_FORMAT_ID 0x01A5BCBB
+#define PGSTAT_FILE_FORMAT_ID 0x01A5BCBC
typedef struct PgStat_ArchiverStats
{
PgStat_BktypeIO stats[BACKEND_NUM_TYPES];
} PgStat_IO;
+typedef struct PgStat_LockEntry
+{
+ PgStat_Counter waits;
+ PgStat_Counter wait_time; /* time in milliseconds */
+ PgStat_Counter fastpath_exceeded;
+} PgStat_LockEntry;
+
+typedef struct PgStat_PendingLock
+{
+ PgStat_LockEntry stats[LOCKTAG_LAST_TYPE + 1];
+} PgStat_PendingLock;
+
+typedef struct PgStat_Lock
+{
+ TimestampTz stat_reset_timestamp;
+ PgStat_LockEntry stats[LOCKTAG_LAST_TYPE + 1];
+} PgStat_Lock;
+
typedef struct PgStat_StatDBEntry
{
PgStat_Counter xact_commit;
IOContext io_context, IOOp io_op);
+/*
+ * Functions in pgstat_lock.c
+ */
+
+extern void pgstat_lock_flush(bool nowait);
+extern void pgstat_count_lock_fastpath_exceeded(uint8 locktag_type);
+extern void pgstat_count_lock_waits(uint8 locktag_type, long msecs);
+extern PgStat_Lock *pgstat_fetch_stat_lock(void);
+
/*
* Functions in pgstat_database.c
*/
PgStat_IO stats;
} PgStatShared_IO;
+typedef struct PgStatShared_Lock
+{
+ /*
+ * locks[i] protects stats.stats[i]. locks[0] also protects
+ * stats.stat_reset_timestamp.
+ */
+ LWLock locks[LOCKTAG_LAST_TYPE + 1];
+ PgStat_Lock stats;
+} PgStatShared_Lock;
+
typedef struct PgStatShared_SLRU
{
/* lock protects ->stats */
PgStatShared_BgWriter bgwriter;
PgStatShared_Checkpointer checkpointer;
PgStatShared_IO io;
+ PgStatShared_Lock lock;
PgStatShared_SLRU slru;
PgStatShared_Wal wal;
PgStat_IO io;
+ PgStat_Lock lock;
+
PgStat_SLRUStats slru[SLRU_NUM_ELEMENTS];
PgStat_WalStats wal;
extern void pgstat_io_reset_all_cb(TimestampTz ts);
extern void pgstat_io_snapshot_cb(void);
+/*
+ * Functions in pgstat_lock.c
+ */
+
+extern bool pgstat_lock_flush_cb(bool nowait);
+extern void pgstat_lock_init_shmem_cb(void *stats);
+extern void pgstat_lock_reset_all_cb(TimestampTz ts);
+extern void pgstat_lock_snapshot_cb(void);
/*
* Functions in pgstat_relation.c
#define PGSTAT_KIND_BGWRITER 8
#define PGSTAT_KIND_CHECKPOINTER 9
#define PGSTAT_KIND_IO 10
-#define PGSTAT_KIND_SLRU 11
-#define PGSTAT_KIND_WAL 12
+#define PGSTAT_KIND_LOCK 11
+#define PGSTAT_KIND_SLRU 12
+#define PGSTAT_KIND_WAL 13
#define PGSTAT_KIND_BUILTIN_MIN PGSTAT_KIND_DATABASE
#define PGSTAT_KIND_BUILTIN_MAX PGSTAT_KIND_WAL
--- /dev/null
+Parsed test spec with 2 sessions
+
+starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s2_set_deadlock_timeout s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation
+pg_stat_force_next_flush
+------------------------
+
+(1 row)
+
+step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms';
+step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock');
+pg_stat_reset_shared
+--------------------
+
+(1 row)
+
+step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms';
+step s1_begin: BEGIN;
+step s1_lock_relation: LOCK TABLE test_stat_tab;
+step s2_begin: BEGIN;
+step s2_ff: SELECT pg_stat_force_next_flush();
+pg_stat_force_next_flush
+------------------------
+
+(1 row)
+
+step s2_lock_relation: LOCK TABLE test_stat_tab; <waiting ...>
+step s1_sleep: SELECT pg_sleep(0.05);
+pg_sleep
+--------
+
+(1 row)
+
+step s1_commit: COMMIT;
+step s2_lock_relation: <... completed>
+step s2_commit: COMMIT;
+step s2_report_stat_lock_relation:
+ SELECT waits > 0 AS has_waits, wait_time > 50 AS has_wait_time
+ FROM pg_stat_lock WHERE locktype = 'relation';
+
+has_waits|has_wait_time
+---------+-------------
+t |t
+(1 row)
+
+
+starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s2_set_deadlock_timeout s2_set_log_lock_waits s1_table_insert s1_begin s1_table_update_k1 s2_begin s2_ff s2_table_update_k1 s1_sleep s1_commit s2_commit s2_report_stat_lock_transactionid
+pg_stat_force_next_flush
+------------------------
+
+(1 row)
+
+step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms';
+step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock');
+pg_stat_reset_shared
+--------------------
+
+(1 row)
+
+step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms';
+step s2_set_log_lock_waits: SET log_lock_waits = on;
+step s1_table_insert: INSERT INTO test_stat_tab(key, value) VALUES('k1', 1), ('k2', 1), ('k3', 1);
+step s1_begin: BEGIN;
+step s1_table_update_k1: UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1';
+step s2_begin: BEGIN;
+step s2_ff: SELECT pg_stat_force_next_flush();
+pg_stat_force_next_flush
+------------------------
+
+(1 row)
+
+step s2_table_update_k1: UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1'; <waiting ...>
+step s1_sleep: SELECT pg_sleep(0.05);
+pg_sleep
+--------
+
+(1 row)
+
+step s1_commit: COMMIT;
+step s2_table_update_k1: <... completed>
+step s2_commit: COMMIT;
+step s2_report_stat_lock_transactionid:
+ SELECT waits > 0 AS has_waits, wait_time > 50 AS has_wait_time
+ FROM pg_stat_lock WHERE locktype = 'transactionid';
+
+has_waits|has_wait_time
+---------+-------------
+t |t
+(1 row)
+
+
+starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s2_set_deadlock_timeout s2_set_log_lock_waits s1_lock_advisory_lock s2_begin s2_ff s2_lock_advisory_lock s1_sleep s1_lock_advisory_unlock s2_lock_advisory_unlock s2_commit s2_report_stat_lock_advisory
+pg_stat_force_next_flush
+------------------------
+
+(1 row)
+
+step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms';
+step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock');
+pg_stat_reset_shared
+--------------------
+
+(1 row)
+
+step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms';
+step s2_set_log_lock_waits: SET log_lock_waits = on;
+step s1_lock_advisory_lock: SELECT pg_advisory_lock(1);
+pg_advisory_lock
+----------------
+
+(1 row)
+
+step s2_begin: BEGIN;
+step s2_ff: SELECT pg_stat_force_next_flush();
+pg_stat_force_next_flush
+------------------------
+
+(1 row)
+
+step s2_lock_advisory_lock: SELECT pg_advisory_lock(1); <waiting ...>
+step s1_sleep: SELECT pg_sleep(0.05);
+pg_sleep
+--------
+
+(1 row)
+
+step s1_lock_advisory_unlock: SELECT pg_advisory_unlock(1);
+pg_advisory_unlock
+------------------
+t
+(1 row)
+
+step s2_lock_advisory_lock: <... completed>
+pg_advisory_lock
+----------------
+
+(1 row)
+
+step s2_lock_advisory_unlock: SELECT pg_advisory_unlock(1);
+pg_advisory_unlock
+------------------
+t
+(1 row)
+
+step s2_commit: COMMIT;
+step s2_report_stat_lock_advisory:
+ SELECT waits > 0 AS has_waits, wait_time > 50 AS has_wait_time
+ FROM pg_stat_lock WHERE locktype = 'advisory';
+
+has_waits|has_wait_time
+---------+-------------
+t |t
+(1 row)
+
+
+starting permutation: s1_set_deadlock_timeout s1_reset_stat_lock s2_set_deadlock_timeout s2_unset_log_lock_waits s1_begin s1_lock_relation s2_begin s2_ff s2_lock_relation s1_sleep s1_commit s2_commit s2_report_stat_lock_relation
+pg_stat_force_next_flush
+------------------------
+
+(1 row)
+
+step s1_set_deadlock_timeout: SET deadlock_timeout = '10ms';
+step s1_reset_stat_lock: SELECT pg_stat_reset_shared('lock');
+pg_stat_reset_shared
+--------------------
+
+(1 row)
+
+step s2_set_deadlock_timeout: SET deadlock_timeout = '10ms';
+step s2_unset_log_lock_waits: SET log_lock_waits = off;
+step s1_begin: BEGIN;
+step s1_lock_relation: LOCK TABLE test_stat_tab;
+step s2_begin: BEGIN;
+step s2_ff: SELECT pg_stat_force_next_flush();
+pg_stat_force_next_flush
+------------------------
+
+(1 row)
+
+step s2_lock_relation: LOCK TABLE test_stat_tab; <waiting ...>
+step s1_sleep: SELECT pg_sleep(0.05);
+pg_sleep
+--------
+
+(1 row)
+
+step s1_commit: COMMIT;
+step s2_lock_relation: <... completed>
+step s2_commit: COMMIT;
+step s2_report_stat_lock_relation:
+ SELECT waits > 0 AS has_waits, wait_time > 50 AS has_wait_time
+ FROM pg_stat_lock WHERE locktype = 'relation';
+
+has_waits|has_wait_time
+---------+-------------
+t |t
+(1 row)
+
test: inplace-inval
test: intra-grant-inplace
test: intra-grant-inplace-db
+test: lock-stats
test: lock-update-delete
test: lock-update-traversal
test: inherit-temp
--- /dev/null
+# Test for the lock statistics
+#
+# This test creates multiple locking situations when a session (s2) has to
+# wait on a lock for longer than deadlock_timeout. The first permutations
+# test various lock tags. The last permutation checks that log_lock_waits
+# has no impact on the statistics counters.
+
+setup
+{
+ CREATE TABLE test_stat_tab(key text not null, value int);
+ INSERT INTO test_stat_tab(key, value) VALUES('k0', 1);
+ SELECT pg_stat_force_next_flush();
+}
+
+teardown
+{
+ DROP TABLE IF EXISTS test_stat_tab;
+}
+
+session s1
+setup { SET stats_fetch_consistency = 'none'; }
+step s1_begin { BEGIN; }
+step s1_commit { COMMIT; }
+step s1_table_insert { INSERT INTO test_stat_tab(key, value) VALUES('k1', 1), ('k2', 1), ('k3', 1);}
+step s1_table_update_k1 { UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1';}
+step s1_set_deadlock_timeout { SET deadlock_timeout = '10ms'; }
+step s1_reset_stat_lock { SELECT pg_stat_reset_shared('lock'); }
+step s1_sleep { SELECT pg_sleep(0.05); }
+step s1_lock_relation { LOCK TABLE test_stat_tab; }
+step s1_lock_advisory_lock { SELECT pg_advisory_lock(1); }
+step s1_lock_advisory_unlock { SELECT pg_advisory_unlock(1); }
+
+session s2
+setup { SET stats_fetch_consistency = 'none'; }
+step s2_begin { BEGIN; }
+step s2_commit { COMMIT; }
+step s2_ff { SELECT pg_stat_force_next_flush(); }
+step s2_table_update_k1 { UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1';}
+step s2_set_deadlock_timeout { SET deadlock_timeout = '10ms'; }
+step s2_set_log_lock_waits { SET log_lock_waits = on; }
+step s2_unset_log_lock_waits { SET log_lock_waits = off; }
+step s2_report_stat_lock_relation {
+ SELECT waits > 0 AS has_waits, wait_time > 50 AS has_wait_time
+ FROM pg_stat_lock WHERE locktype = 'relation';
+}
+step s2_report_stat_lock_transactionid {
+ SELECT waits > 0 AS has_waits, wait_time > 50 AS has_wait_time
+ FROM pg_stat_lock WHERE locktype = 'transactionid';
+}
+step s2_report_stat_lock_advisory {
+ SELECT waits > 0 AS has_waits, wait_time > 50 AS has_wait_time
+ FROM pg_stat_lock WHERE locktype = 'advisory';
+}
+step s2_lock_relation { LOCK TABLE test_stat_tab; }
+step s2_lock_advisory_lock { SELECT pg_advisory_lock(1); }
+step s2_lock_advisory_unlock { SELECT pg_advisory_unlock(1); }
+
+######################
+# Lock stats tests
+######################
+
+# relation lock
+
+permutation
+ s1_set_deadlock_timeout
+ s1_reset_stat_lock
+ s2_set_deadlock_timeout
+ s1_begin
+ s1_lock_relation
+ s2_begin
+ s2_ff
+ s2_lock_relation
+ s1_sleep
+ s1_commit
+ s2_commit
+ s2_report_stat_lock_relation
+
+# transaction lock
+
+permutation
+ s1_set_deadlock_timeout
+ s1_reset_stat_lock
+ s2_set_deadlock_timeout
+ s2_set_log_lock_waits
+ s1_table_insert
+ s1_begin
+ s1_table_update_k1
+ s2_begin
+ s2_ff
+ s2_table_update_k1
+ s1_sleep
+ s1_commit
+ s2_commit
+ s2_report_stat_lock_transactionid
+
+# advisory lock
+
+permutation
+ s1_set_deadlock_timeout
+ s1_reset_stat_lock
+ s2_set_deadlock_timeout
+ s2_set_log_lock_waits
+ s1_lock_advisory_lock
+ s2_begin
+ s2_ff
+ s2_lock_advisory_lock
+ s1_sleep
+ s1_lock_advisory_unlock
+ s2_lock_advisory_unlock
+ s2_commit
+ s2_report_stat_lock_advisory
+
+# Ensure log_lock_waits has no impact
+
+permutation
+ s1_set_deadlock_timeout
+ s1_reset_stat_lock
+ s2_set_deadlock_timeout
+ s2_unset_log_lock_waits
+ s1_begin
+ s1_lock_relation
+ s2_begin
+ s2_ff
+ s2_lock_relation
+ s1_sleep
+ s1_commit
+ s2_commit
+ s2_report_stat_lock_relation
fsync_time,
stats_reset
FROM pg_stat_get_io() b(backend_type, object, context, reads, read_bytes, read_time, writes, write_bytes, write_time, writebacks, writeback_time, extends, extend_bytes, extend_time, hits, evictions, reuses, fsyncs, fsync_time, stats_reset);
+pg_stat_lock| SELECT locktype,
+ waits,
+ wait_time,
+ fastpath_exceeded,
+ stats_reset
+ FROM pg_stat_get_lock() l(locktype, waits, wait_time, fastpath_exceeded, stats_reset);
pg_stat_progress_analyze| SELECT s.pid,
s.datid,
d.datname,
(1 row)
DROP TABLE table_fillfactor;
+-- Test fastpath_exceeded stat
+CREATE TABLE part_test (id int) PARTITION BY RANGE (id);
+SELECT pg_stat_reset_shared('lock');
+ pg_stat_reset_shared
+----------------------
+
+(1 row)
+
+-- Create partitions (exceeds number of slots)
+DO $$
+DECLARE
+ max_locks int;
+BEGIN
+ SELECT setting::int INTO max_locks
+ FROM pg_settings
+ WHERE name = 'max_locks_per_transaction';
+
+ FOR i IN 1..(max_locks + 10) LOOP
+ EXECUTE format(
+ 'CREATE TABLE part_test_%s PARTITION OF part_test
+ FOR VALUES FROM (%s) TO (%s)',
+ i, (i-1)*1000, i*1000
+ );
+ END LOOP;
+END;
+$$;
+SELECT fastpath_exceeded AS fastpath_exceeded_before FROM pg_stat_lock WHERE locktype = 'relation' \gset
+-- Needs a lock on each partition
+SELECT count(*) FROM part_test;
+ count
+-------
+ 0
+(1 row)
+
+-- Ensure pending stats are flushed
+SELECT pg_stat_force_next_flush();
+ pg_stat_force_next_flush
+--------------------------
+
+(1 row)
+
+SELECT fastpath_exceeded > :fastpath_exceeded_before FROM pg_stat_lock WHERE locktype = 'relation';
+ ?column?
+----------
+ t
+(1 row)
+
+DROP TABLE part_test;
-- End of Stats Test
DROP TABLE table_fillfactor;
+-- Test fastpath_exceeded stat
+CREATE TABLE part_test (id int) PARTITION BY RANGE (id);
+
+SELECT pg_stat_reset_shared('lock');
+
+-- Create partitions (exceeds number of slots)
+DO $$
+DECLARE
+ max_locks int;
+BEGIN
+ SELECT setting::int INTO max_locks
+ FROM pg_settings
+ WHERE name = 'max_locks_per_transaction';
+
+ FOR i IN 1..(max_locks + 10) LOOP
+ EXECUTE format(
+ 'CREATE TABLE part_test_%s PARTITION OF part_test
+ FOR VALUES FROM (%s) TO (%s)',
+ i, (i-1)*1000, i*1000
+ );
+ END LOOP;
+END;
+$$;
+
+SELECT fastpath_exceeded AS fastpath_exceeded_before FROM pg_stat_lock WHERE locktype = 'relation' \gset
+
+-- Needs a lock on each partition
+SELECT count(*) FROM part_test;
+
+-- Ensure pending stats are flushed
+SELECT pg_stat_force_next_flush();
+
+SELECT fastpath_exceeded > :fastpath_exceeded_before FROM pg_stat_lock WHERE locktype = 'relation';
+
+DROP TABLE part_test;
+
-- End of Stats Test
PgStatShared_Function
PgStatShared_HashEntry
PgStatShared_IO
+PgStatShared_Lock
PgStatShared_Relation
PgStatShared_ReplSlot
PgStatShared_SLRU
PgStat_IO
PgStat_KindInfo
PgStat_LocalState
+PgStat_Lock
+PgStat_LockEntry
PgStat_PendingDroppedStatsItem
PgStat_PendingIO
+PgStat_PendingLock
PgStat_SLRUStats
PgStat_ShmemControl
PgStat_Snapshot