]> git.ipfire.org Git - thirdparty/postgresql.git/commitdiff
Unpin buffer before inplace update waits for an XID to end.
authorNoah Misch <noah@leadboat.com>
Tue, 29 Oct 2024 16:39:55 +0000 (09:39 -0700)
committerNoah Misch <noah@leadboat.com>
Tue, 29 Oct 2024 16:40:00 +0000 (09:40 -0700)
Commit a07e03fd8fa7daf4d1356f7cb501ffe784ea6257 changed inplace updates
to wait for heap_update() commands like GRANT TABLE and GRANT DATABASE.
By keeping the pin during that wait, a sequence of autovacuum workers
and an uncommitted GRANT starved one foreground LockBufferForCleanup()
for six minutes, on buildfarm member sarus.  Prevent, at the cost of a
bit of complexity.  Back-patch to v12, like the earlier commit.  That
commit and heap_inplace_lock() have not yet appeared in any release.

Discussion: https://postgr.es/m/20241026184936.ae.nmisch@google.com

src/backend/access/heap/heapam.c
src/backend/access/index/genam.c
src/include/access/heapam.h

index c0ebfb8ec0d851749cb8aadd12db0c225dcfaf4f..236f5e78fef4e8aa80837c12bbe1dc9de7745c77 100644 (file)
@@ -5974,8 +5974,8 @@ heap_abort_speculative(Relation relation, ItemPointer tid)
  * transaction.  If compatible, return true with the buffer exclusive-locked,
  * and the caller must release that by calling
  * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
- * an error.  Otherwise, return false after blocking transactions, if any,
- * have ended.
+ * an error.  Otherwise, call release_callback(arg), wait for blocking
+ * transactions to end, and return false.
  *
  * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
  * DDL, this doesn't guarantee any particular predicate locking.
@@ -6009,7 +6009,8 @@ heap_abort_speculative(Relation relation, ItemPointer tid)
  */
 bool
 heap_inplace_lock(Relation relation,
-                                 HeapTuple oldtup_ptr, Buffer buffer)
+                                 HeapTuple oldtup_ptr, Buffer buffer,
+                                 void (*release_callback) (void *), void *arg)
 {
        HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
        TM_Result       result;
@@ -6074,6 +6075,7 @@ heap_inplace_lock(Relation relation,
                                                                                lockmode, NULL))
                        {
                                LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+                               release_callback(arg);
                                ret = false;
                                MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
                                                                relation, &oldtup.t_self, XLTW_Update,
@@ -6089,6 +6091,7 @@ heap_inplace_lock(Relation relation,
                else
                {
                        LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+                       release_callback(arg);
                        ret = false;
                        XactLockTableWait(xwait, relation, &oldtup.t_self,
                                                          XLTW_Update);
@@ -6100,6 +6103,7 @@ heap_inplace_lock(Relation relation,
                if (!ret)
                {
                        LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+                       release_callback(arg);
                }
        }
 
index dcd1d703192a7fb260a8dfcbbf8d2afb2debd3ec..8051342eaa5627cb815076e03f55abf67fe8ae66 100644 (file)
@@ -704,6 +704,7 @@ systable_inplace_update_begin(Relation relation,
        int                     retries = 0;
        SysScanDesc scan;
        HeapTuple       oldtup;
+       BufferHeapTupleTableSlot *bslot;
 
        /*
         * For now, we don't allow parallel updates.  Unlike a regular update,
@@ -725,10 +726,9 @@ systable_inplace_update_begin(Relation relation,
        Assert(IsInplaceUpdateRelation(relation) || !IsSystemRelation(relation));
 
        /* Loop for an exclusive-locked buffer of a non-updated tuple. */
-       for (;;)
+       do
        {
                TupleTableSlot *slot;
-               BufferHeapTupleTableSlot *bslot;
 
                CHECK_FOR_INTERRUPTS();
 
@@ -754,11 +754,9 @@ systable_inplace_update_begin(Relation relation,
                slot = scan->slot;
                Assert(TTS_IS_BUFFERTUPLE(slot));
                bslot = (BufferHeapTupleTableSlot *) slot;
-               if (heap_inplace_lock(scan->heap_rel,
-                                                         bslot->base.tuple, bslot->buffer))
-                       break;
-               systable_endscan(scan);
-       };
+       } while (!heap_inplace_lock(scan->heap_rel,
+                                                               bslot->base.tuple, bslot->buffer,
+                                                               (void (*) (void *)) systable_endscan, scan));
 
        *oldtupcopy = heap_copytuple(oldtup);
        *state = scan;
index dc093a347190d052d06682e7a6b13cad369076e9..a33f28dd7c4129bb39d277b7ad717223ff1ebd9b 100644 (file)
@@ -157,7 +157,8 @@ extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
                                                                 Buffer *buffer, struct TM_FailureData *tmfd);
 
 extern bool heap_inplace_lock(Relation relation,
-                                                         HeapTuple oldtup_ptr, Buffer buffer);
+                                                         HeapTuple oldtup_ptr, Buffer buffer,
+                                                         void (*release_callback) (void *), void *arg);
 extern void heap_inplace_update_and_unlock(Relation relation,
                                                                                   HeapTuple oldtup, HeapTuple tuple,
                                                                                   Buffer buffer);