stack->next = ptr;
}
}
- LockBuffer(buffer, GIN_UNLOCK);
- ReleaseBuffer(buffer);
+ UnlockReleaseBuffer(buffer);
/* Step to next item in the queue */
stack_next = stack->next;
prev_attnum = current_attnum;
}
- LockBuffer(buffer, GIN_UNLOCK);
- ReleaseBuffer(buffer);
+ UnlockReleaseBuffer(buffer);
/* Step to next item in the queue */
stack_next = stack->next;
memcpy(raw_page_data, BufferGetPage(buf), BLCKSZ);
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
relation_close(rel, AccessShareLock);
offnum = ItemPointerGetOffsetNumber(tid);
if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
{
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buffer);
+ UnlockReleaseBuffer(buffer);
*userbuf = InvalidBuffer;
tuple->t_data = NULL;
return false;
*/
if (!ItemIdIsNormal(lp))
{
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buffer);
+ UnlockReleaseBuffer(buffer);
*userbuf = InvalidBuffer;
tuple->t_data = NULL;
return false;
* unlock the two buffers in, so this can be slightly simpler than the
* code above.
*/
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
if (otherBuffer == InvalidBuffer)
- ReleaseBuffer(buffer);
+ UnlockReleaseBuffer(buffer);
else if (otherBlock != targetBlock)
{
+ UnlockReleaseBuffer(buffer);
LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buffer);
}
+ else
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/* Is there an ongoing bulk extension? */
if (bistate && bistate->next_free != InvalidBlockNumber)
Assert(BlockNumberIsValid(blkno));
if (BufferIsValid(obuf))
- _bt_unlockbuf(rel, obuf);
- buf = ReleaseAndReadBuffer(obuf, rel, blkno);
- _bt_lockbuf(rel, buf, access);
+ {
+ if (BufferGetBlockNumber(obuf) == blkno)
+ {
+ /* trade in old lock mode for new lock */
+ _bt_unlockbuf(rel, obuf);
+ buf = obuf;
+ }
+ else
+ {
+ /* release lock and pin at once, that's a bit more efficient */
+ _bt_relbuf(rel, obuf);
+ buf = ReadBuffer(rel, blkno);
+ }
+ }
+ else
+ buf = ReadBuffer(rel, blkno);
+ _bt_lockbuf(rel, buf, access);
_bt_checkpage(rel, buf);
+
return buf;
}
/*
* _bt_relbuf() -- release a locked buffer.
*
- * Lock and pin (refcount) are both dropped.
+ * Lock and pin (refcount) are both dropped. This is a bit more efficient than
+ * doing the two operations separately.
*/
void
_bt_relbuf(Relation rel, Buffer buf)
{
- _bt_unlockbuf(rel, buf);
- ReleaseBuffer(buf);
+ /*
+ * Buffer is pinned and locked, which means that it is expected to be
+ * defined and addressable. Check that proactively.
+ */
+ VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ);
+ if (!RelationUsesLocalBuffers(rel))
+ VALGRIND_MAKE_MEM_NOACCESS(BufferGetPage(buf), BLCKSZ);
+
+ UnlockReleaseBuffer(buf);
}
/*
XLogNeedsFlush(BufferGetLSN(buf_hdr)) &&
StrategyRejectBuffer(strategy, buf_hdr, from_ring))
{
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- UnpinBuffer(buf_hdr);
+ UnlockReleaseBuffer(buf);
goto again;
}
((FSMPage) PageGetContents(page))->fp_next_slot = 0;
BufferFinishSetHintBits(buf, false, false);
}
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
return max_avail;
}
*/
memcpy(page, BufferGetPage(buf), BLCKSZ);
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
/*
* Don't want to have a buffer in-memory that's marked valid where the
else
FlushOneBuffer(buf);
}
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
if (BufferIsLocal(buf))
InvalidateLocalBuffer(GetLocalBufferDescriptor(-buf - 1), true);