summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Haas2022-08-24 19:50:48 +0000
committerRobert Haas2022-08-24 19:50:48 +0000
commit82ac34db2036ec5b3cb32c9180f40549aa790dc2 (patch)
tree19385f9f12f528216ece87e22c29a487a0d0dc69
parent396d348b046c6b7e5dc83158c4c1df1377a1d2ef (diff)
Include RelFileLocator fields individually in BufferTag.
This is preparatory work for a project to increase the number of bits in a RelFileNumber from 32 to 56. Along the way, introduce static inline accessor functions for a couple of BufferTag fields. Dilip Kumar, reviewed by me. The overall patch series has also had review at various times from Andres Freund, Ashutosh Sharma, Hannu Krosing, Vignesh C, Álvaro Herrera, and Tom Lane. Discussion: http://postgr.es/m/CAFiTN-trubju5YbWAq-BSpZ90-Z6xCVBQE8BVqXqANOZAF1Znw@mail.gmail.com
-rw-r--r--contrib/pg_buffercache/pg_buffercache_pages.c8
-rw-r--r--contrib/pg_prewarm/autoprewarm.c10
-rw-r--r--src/backend/storage/buffer/bufmgr.c115
-rw-r--r--src/backend/storage/buffer/localbuf.c21
-rw-r--r--src/include/storage/buf_internals.h64
5 files changed, 145 insertions, 73 deletions
diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c
index 131bd629b9..c5754ea9fa 100644
--- a/contrib/pg_buffercache/pg_buffercache_pages.c
+++ b/contrib/pg_buffercache/pg_buffercache_pages.c
@@ -153,10 +153,10 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
buf_state = LockBufHdr(bufHdr);
fctx->record[i].bufferid = BufferDescriptorGetBuffer(bufHdr);
- fctx->record[i].relfilenumber = bufHdr->tag.rlocator.relNumber;
- fctx->record[i].reltablespace = bufHdr->tag.rlocator.spcOid;
- fctx->record[i].reldatabase = bufHdr->tag.rlocator.dbOid;
- fctx->record[i].forknum = bufHdr->tag.forkNum;
+ fctx->record[i].relfilenumber = BufTagGetRelNumber(&bufHdr->tag);
+ fctx->record[i].reltablespace = bufHdr->tag.spcOid;
+ fctx->record[i].reldatabase = bufHdr->tag.dbOid;
+ fctx->record[i].forknum = BufTagGetForkNum(&bufHdr->tag);
fctx->record[i].blocknum = bufHdr->tag.blockNum;
fctx->record[i].usagecount = BUF_STATE_GET_USAGECOUNT(buf_state);
fctx->record[i].pinning_backends = BUF_STATE_GET_REFCOUNT(buf_state);
diff --git a/contrib/pg_prewarm/autoprewarm.c b/contrib/pg_prewarm/autoprewarm.c
index d9ab39dd90..c8d673a20e 100644
--- a/contrib/pg_prewarm/autoprewarm.c
+++ b/contrib/pg_prewarm/autoprewarm.c
@@ -630,10 +630,12 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
if (buf_state & BM_TAG_VALID &&
((buf_state & BM_PERMANENT) || dump_unlogged))
{
- block_info_array[num_blocks].database = bufHdr->tag.rlocator.dbOid;
- block_info_array[num_blocks].tablespace = bufHdr->tag.rlocator.spcOid;
- block_info_array[num_blocks].filenumber = bufHdr->tag.rlocator.relNumber;
- block_info_array[num_blocks].forknum = bufHdr->tag.forkNum;
+ block_info_array[num_blocks].database = bufHdr->tag.dbOid;
+ block_info_array[num_blocks].tablespace = bufHdr->tag.spcOid;
+ block_info_array[num_blocks].filenumber =
+ BufTagGetRelNumber(&bufHdr->tag);
+ block_info_array[num_blocks].forknum =
+ BufTagGetForkNum(&bufHdr->tag);
block_info_array[num_blocks].blocknum = bufHdr->tag.blockNum;
++num_blocks;
}
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 49d3b8c9dd..e898ffad7b 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -1657,8 +1657,8 @@ ReleaseAndReadBuffer(Buffer buffer,
{
bufHdr = GetLocalBufferDescriptor(-buffer - 1);
if (bufHdr->tag.blockNum == blockNum &&
- RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) &&
- bufHdr->tag.forkNum == forkNum)
+ BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
+ BufTagGetForkNum(&bufHdr->tag) == forkNum)
return buffer;
ResourceOwnerForgetBuffer(CurrentResourceOwner, buffer);
LocalRefCount[-buffer - 1]--;
@@ -1668,8 +1668,8 @@ ReleaseAndReadBuffer(Buffer buffer,
bufHdr = GetBufferDescriptor(buffer - 1);
/* we have pin, so it's ok to examine tag without spinlock */
if (bufHdr->tag.blockNum == blockNum &&
- RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) &&
- bufHdr->tag.forkNum == forkNum)
+ BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
+ BufTagGetForkNum(&bufHdr->tag) == forkNum)
return buffer;
UnpinBuffer(bufHdr, true);
}
@@ -2010,9 +2010,9 @@ BufferSync(int flags)
item = &CkptBufferIds[num_to_scan++];
item->buf_id = buf_id;
- item->tsId = bufHdr->tag.rlocator.spcOid;
- item->relNumber = bufHdr->tag.rlocator.relNumber;
- item->forkNum = bufHdr->tag.forkNum;
+ item->tsId = bufHdr->tag.spcOid;
+ item->relNumber = BufTagGetRelNumber(&bufHdr->tag);
+ item->forkNum = BufTagGetForkNum(&bufHdr->tag);
item->blockNum = bufHdr->tag.blockNum;
}
@@ -2718,7 +2718,8 @@ PrintBufferLeakWarning(Buffer buffer)
}
/* theoretically we should lock the bufhdr here */
- path = relpathbackend(buf->tag.rlocator, backend, buf->tag.forkNum);
+ path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
+ BufTagGetForkNum(&buf->tag));
buf_state = pg_atomic_read_u32(&buf->state);
elog(WARNING,
"buffer refcount leak: [%03d] "
@@ -2797,8 +2798,8 @@ BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum,
bufHdr = GetBufferDescriptor(buffer - 1);
/* pinned, so OK to read tag without spinlock */
- *rlocator = bufHdr->tag.rlocator;
- *forknum = bufHdr->tag.forkNum;
+ *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
+ *forknum = BufTagGetForkNum(&bufHdr->tag);
*blknum = bufHdr->tag.blockNum;
}
@@ -2848,9 +2849,9 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
/* Find smgr relation for buffer */
if (reln == NULL)
- reln = smgropen(buf->tag.rlocator, InvalidBackendId);
+ reln = smgropen(BufTagGetRelFileLocator(&buf->tag), InvalidBackendId);
- TRACE_POSTGRESQL_BUFFER_FLUSH_START(buf->tag.forkNum,
+ TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag),
buf->tag.blockNum,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
@@ -2909,7 +2910,7 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
* bufToWrite is either the shared buffer or a copy, as appropriate.
*/
smgrwrite(reln,
- buf->tag.forkNum,
+ BufTagGetForkNum(&buf->tag),
buf->tag.blockNum,
bufToWrite,
false);
@@ -2930,7 +2931,7 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
*/
TerminateBufferIO(buf, true, 0);
- TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(buf->tag.forkNum,
+ TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag),
buf->tag.blockNum,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
@@ -3151,15 +3152,15 @@ DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
* We could check forkNum and blockNum as well as the rlocator, but
* the incremental win from doing so seems small.
*/
- if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator))
+ if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator))
continue;
buf_state = LockBufHdr(bufHdr);
for (j = 0; j < nforks; j++)
{
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator) &&
- bufHdr->tag.forkNum == forkNum[j] &&
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) &&
+ BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
bufHdr->tag.blockNum >= firstDelBlock[j])
{
InvalidateBuffer(bufHdr); /* releases spinlock */
@@ -3309,7 +3310,7 @@ DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
for (j = 0; j < n; j++)
{
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, locators[j]))
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j]))
{
rlocator = &locators[j];
break;
@@ -3318,7 +3319,10 @@ DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
}
else
{
- rlocator = bsearch((const void *) &(bufHdr->tag.rlocator),
+ RelFileLocator locator;
+
+ locator = BufTagGetRelFileLocator(&bufHdr->tag);
+ rlocator = bsearch((const void *) &(locator),
locators, n, sizeof(RelFileLocator),
rlocator_comparator);
}
@@ -3328,7 +3332,7 @@ DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
continue;
buf_state = LockBufHdr(bufHdr);
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, (*rlocator)))
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator))
InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr, buf_state);
@@ -3388,8 +3392,8 @@ FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum,
*/
buf_state = LockBufHdr(bufHdr);
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator) &&
- bufHdr->tag.forkNum == forkNum &&
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
+ BufTagGetForkNum(&bufHdr->tag) == forkNum &&
bufHdr->tag.blockNum >= firstDelBlock)
InvalidateBuffer(bufHdr); /* releases spinlock */
else
@@ -3427,11 +3431,11 @@ DropDatabaseBuffers(Oid dbid)
* As in DropRelationBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
- if (bufHdr->tag.rlocator.dbOid != dbid)
+ if (bufHdr->tag.dbOid != dbid)
continue;
buf_state = LockBufHdr(bufHdr);
- if (bufHdr->tag.rlocator.dbOid == dbid)
+ if (bufHdr->tag.dbOid == dbid)
InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr, buf_state);
@@ -3461,7 +3465,8 @@ PrintBufferDescs(void)
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
- relpathbackend(buf->tag.rlocator, InvalidBackendId, buf->tag.forkNum),
+ relpathbackend(BufTagGetRelFileLocator(&buf->tag),
+ InvalidBackendId, BufTagGetForkNum(&buf->tag)),
buf->tag.blockNum, buf->flags,
buf->refcount, GetPrivateRefCount(b));
}
@@ -3486,7 +3491,8 @@ PrintPinnedBufs(void)
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
- relpathperm(buf->tag.rlocator, buf->tag.forkNum),
+ relpathperm(BufTagGetRelFileLocator(&buf->tag),
+ BufTagGetForkNum(&buf->tag)),
buf->tag.blockNum, buf->flags,
buf->refcount, GetPrivateRefCount(b));
}
@@ -3525,7 +3531,7 @@ FlushRelationBuffers(Relation rel)
uint32 buf_state;
bufHdr = GetLocalBufferDescriptor(i);
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) &&
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
(BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
@@ -3543,7 +3549,7 @@ FlushRelationBuffers(Relation rel)
PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
smgrwrite(RelationGetSmgr(rel),
- bufHdr->tag.forkNum,
+ BufTagGetForkNum(&bufHdr->tag),
bufHdr->tag.blockNum,
localpage,
false);
@@ -3572,13 +3578,13 @@ FlushRelationBuffers(Relation rel)
* As in DropRelationBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
- if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator))
+ if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) &&
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
@@ -3652,7 +3658,7 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
for (j = 0; j < nrels; j++)
{
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, srels[j].rlocator))
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator))
{
srelent = &srels[j];
break;
@@ -3661,7 +3667,10 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
}
else
{
- srelent = bsearch((const void *) &(bufHdr->tag.rlocator),
+ RelFileLocator rlocator;
+
+ rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
+ srelent = bsearch((const void *) &(rlocator),
srels, nrels, sizeof(SMgrSortArray),
rlocator_comparator);
}
@@ -3673,7 +3682,7 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
- if (RelFileLocatorEquals(bufHdr->tag.rlocator, srelent->rlocator) &&
+ if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
@@ -3877,13 +3886,13 @@ FlushDatabaseBuffers(Oid dbid)
* As in DropRelationBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
- if (bufHdr->tag.rlocator.dbOid != dbid)
+ if (bufHdr->tag.dbOid != dbid)
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
- if (bufHdr->tag.rlocator.dbOid == dbid &&
+ if (bufHdr->tag.dbOid == dbid &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
@@ -4052,7 +4061,7 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
* See src/backend/storage/page/README for longer discussion.
*/
if (RecoveryInProgress() ||
- RelFileLocatorSkippingWAL(bufHdr->tag.rlocator))
+ RelFileLocatorSkippingWAL(BufTagGetRelFileLocator(&bufHdr->tag)))
return;
/*
@@ -4661,7 +4670,8 @@ AbortBufferIO(void)
/* Buffer is pinned, so we can read tag without spinlock */
char *path;
- path = relpathperm(buf->tag.rlocator, buf->tag.forkNum);
+ path = relpathperm(BufTagGetRelFileLocator(&buf->tag),
+ BufTagGetForkNum(&buf->tag));
ereport(WARNING,
(errcode(ERRCODE_IO_ERROR),
errmsg("could not write block %u of %s",
@@ -4685,7 +4695,8 @@ shared_buffer_write_error_callback(void *arg)
/* Buffer is pinned, so we can read the tag without locking the spinlock */
if (bufHdr != NULL)
{
- char *path = relpathperm(bufHdr->tag.rlocator, bufHdr->tag.forkNum);
+ char *path = relpathperm(BufTagGetRelFileLocator(&bufHdr->tag),
+ BufTagGetForkNum(&bufHdr->tag));
errcontext("writing block %u of relation %s",
bufHdr->tag.blockNum, path);
@@ -4703,8 +4714,9 @@ local_buffer_write_error_callback(void *arg)
if (bufHdr != NULL)
{
- char *path = relpathbackend(bufHdr->tag.rlocator, MyBackendId,
- bufHdr->tag.forkNum);
+ char *path = relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
+ MyBackendId,
+ BufTagGetForkNum(&bufHdr->tag));
errcontext("writing block %u of relation %s",
bufHdr->tag.blockNum, path);
@@ -4798,15 +4810,20 @@ static inline int
buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
{
int ret;
+ RelFileLocator rlocatora;
+ RelFileLocator rlocatorb;
- ret = rlocator_comparator(&ba->rlocator, &bb->rlocator);
+ rlocatora = BufTagGetRelFileLocator(ba);
+ rlocatorb = BufTagGetRelFileLocator(bb);
+
+ ret = rlocator_comparator(&rlocatora, &rlocatorb);
if (ret != 0)
return ret;
- if (ba->forkNum < bb->forkNum)
+ if (BufTagGetForkNum(ba) < BufTagGetForkNum(bb))
return -1;
- if (ba->forkNum > bb->forkNum)
+ if (BufTagGetForkNum(ba) > BufTagGetForkNum(bb))
return 1;
if (ba->blockNum < bb->blockNum)
@@ -4956,10 +4973,12 @@ IssuePendingWritebacks(WritebackContext *context)
SMgrRelation reln;
int ahead;
BufferTag tag;
+ RelFileLocator currlocator;
Size nblocks = 1;
cur = &context->pending_writebacks[i];
tag = cur->tag;
+ currlocator = BufTagGetRelFileLocator(&tag);
/*
* Peek ahead, into following writeback requests, to see if they can
@@ -4967,11 +4986,13 @@ IssuePendingWritebacks(WritebackContext *context)
*/
for (ahead = 0; i + ahead + 1 < context->nr_pending; ahead++)
{
+
next = &context->pending_writebacks[i + ahead + 1];
/* different file, stop */
- if (!RelFileLocatorEquals(cur->tag.rlocator, next->tag.rlocator) ||
- cur->tag.forkNum != next->tag.forkNum)
+ if (!RelFileLocatorEquals(currlocator,
+ BufTagGetRelFileLocator(&next->tag)) ||
+ BufTagGetForkNum(&cur->tag) != BufTagGetForkNum(&next->tag))
break;
/* ok, block queued twice, skip */
@@ -4989,8 +5010,8 @@ IssuePendingWritebacks(WritebackContext *context)
i += ahead;
/* and finally tell the kernel to write the data to storage */
- reln = smgropen(tag.rlocator, InvalidBackendId);
- smgrwriteback(reln, tag.forkNum, tag.blockNum, nblocks);
+ reln = smgropen(currlocator, InvalidBackendId);
+ smgrwriteback(reln, BufTagGetForkNum(&tag), tag.blockNum, nblocks);
}
context->nr_pending = 0;
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 014f644bf9..98530078a6 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -215,13 +215,13 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
/* Find smgr relation for buffer */
- oreln = smgropen(bufHdr->tag.rlocator, MyBackendId);
+ oreln = smgropen(BufTagGetRelFileLocator(&bufHdr->tag), MyBackendId);
PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
/* And write... */
smgrwrite(oreln,
- bufHdr->tag.forkNum,
+ BufTagGetForkNum(&bufHdr->tag),
bufHdr->tag.blockNum,
localpage,
false);
@@ -337,16 +337,18 @@ DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum,
buf_state = pg_atomic_read_u32(&bufHdr->state);
if ((buf_state & BM_TAG_VALID) &&
- RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator) &&
- bufHdr->tag.forkNum == forkNum &&
+ BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
+ BufTagGetForkNum(&bufHdr->tag) == forkNum &&
bufHdr->tag.blockNum >= firstDelBlock)
{
if (LocalRefCount[i] != 0)
elog(ERROR, "block %u of %s is still referenced (local %u)",
bufHdr->tag.blockNum,
- relpathbackend(bufHdr->tag.rlocator, MyBackendId,
- bufHdr->tag.forkNum),
+ relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
+ MyBackendId,
+ BufTagGetForkNum(&bufHdr->tag)),
LocalRefCount[i]);
+
/* Remove entry from hashtable */
hresult = (LocalBufferLookupEnt *)
hash_search(LocalBufHash, (void *) &bufHdr->tag,
@@ -383,13 +385,14 @@ DropRelationAllLocalBuffers(RelFileLocator rlocator)
buf_state = pg_atomic_read_u32(&bufHdr->state);
if ((buf_state & BM_TAG_VALID) &&
- RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator))
+ BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator))
{
if (LocalRefCount[i] != 0)
elog(ERROR, "block %u of %s is still referenced (local %u)",
bufHdr->tag.blockNum,
- relpathbackend(bufHdr->tag.rlocator, MyBackendId,
- bufHdr->tag.forkNum),
+ relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
+ MyBackendId,
+ BufTagGetForkNum(&bufHdr->tag)),
LocalRefCount[i]);
/* Remove entry from hashtable */
hresult = (LocalBufferLookupEnt *)
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index 72466551d7..406db6be78 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -90,18 +90,51 @@
*/
typedef struct buftag
{
- RelFileLocator rlocator; /* physical relation identifier */
- ForkNumber forkNum;
+ Oid spcOid; /* tablespace oid */
+ Oid dbOid; /* database oid */
+ RelFileNumber relNumber; /* relation file number */
+ ForkNumber forkNum; /* fork number */
BlockNumber blockNum; /* blknum relative to begin of reln */
} BufferTag;
+static inline RelFileNumber
+BufTagGetRelNumber(const BufferTag *tag)
+{
+ return tag->relNumber;
+}
+
+static inline ForkNumber
+BufTagGetForkNum(const BufferTag *tag)
+{
+ return tag->forkNum;
+}
+
+static inline void
+BufTagSetRelForkDetails(BufferTag *tag, RelFileNumber relnumber,
+ ForkNumber forknum)
+{
+ tag->relNumber = relnumber;
+ tag->forkNum = forknum;
+}
+
+static inline RelFileLocator
+BufTagGetRelFileLocator(const BufferTag *tag)
+{
+ RelFileLocator rlocator;
+
+ rlocator.spcOid = tag->spcOid;
+ rlocator.dbOid = tag->dbOid;
+ rlocator.relNumber = BufTagGetRelNumber(tag);
+
+ return rlocator;
+}
+
static inline void
ClearBufferTag(BufferTag *tag)
{
- tag->rlocator.spcOid = InvalidOid;
- tag->rlocator.dbOid = InvalidOid;
- tag->rlocator.relNumber = InvalidRelFileNumber;
- tag->forkNum = InvalidForkNumber;
+ tag->spcOid = InvalidOid;
+ tag->dbOid = InvalidOid;
+ BufTagSetRelForkDetails(tag, InvalidRelFileNumber, InvalidForkNumber);
tag->blockNum = InvalidBlockNumber;
}
@@ -109,19 +142,32 @@ static inline void
InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator,
ForkNumber forkNum, BlockNumber blockNum)
{
- tag->rlocator = *rlocator;
- tag->forkNum = forkNum;
+ tag->spcOid = rlocator->spcOid;
+ tag->dbOid = rlocator->dbOid;
+ BufTagSetRelForkDetails(tag, rlocator->relNumber, forkNum);
tag->blockNum = blockNum;
}
static inline bool
BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
{
- return RelFileLocatorEquals(tag1->rlocator, tag2->rlocator) &&
+ return (tag1->spcOid == tag2->spcOid) &&
+ (tag1->dbOid == tag2->dbOid) &&
+ (tag1->relNumber == tag2->relNumber) &&
(tag1->blockNum == tag2->blockNum) &&
(tag1->forkNum == tag2->forkNum);
}
+static inline bool
+BufTagMatchesRelFileLocator(const BufferTag *tag,
+ const RelFileLocator *rlocator)
+{
+ return (tag->spcOid == rlocator->spcOid) &&
+ (tag->dbOid == rlocator->dbOid) &&
+ (BufTagGetRelNumber(tag) == rlocator->relNumber);
+}
+
+
/*
* The shared buffer mapping table is partitioned to reduce contention.
* To determine which partition lock a given tag requires, compute the tag's