summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHeikki Linnakangas2008-10-31 15:05:00 +0000
committerHeikki Linnakangas2008-10-31 15:05:00 +0000
commit2597b2f8e4bf7fc737ca1eeb2fc58482daceddea (patch)
treef8edb955e3978e22a6cc63f54b91befc5321eafb
parent886406c2301efba1dd894aab7c01c132b4ee9eac (diff)
Unite ReadBufferWithFork, ReadBufferWithStrategy, and ZeroOrReadBuffer
functions into one ReadBufferExtended function, that takes the strategy and mode as argument. There's three modes, RBM_NORMAL which is the default used by plain ReadBuffer(), RBM_ZERO, which replaces ZeroOrReadBuffer, and a new mode RBM_ZERO_ON_ERROR, which allows callers to read corrupt pages without throwing an error. The FSM needs the new mode to recover from corrupt pages, which could happend if we crash after extending an FSM file, and the new page is "torn". Add fork number to some error messages in bufmgr.c, that still lacked it.
-rw-r--r--contrib/pageinspect/rawpage.c2
-rw-r--r--src/backend/access/gin/ginvacuum.c42
-rw-r--r--src/backend/access/gist/gistvacuum.c13
-rw-r--r--src/backend/access/hash/hashpage.c6
-rw-r--r--src/backend/access/heap/heapam.c5
-rw-r--r--src/backend/access/nbtree/nbtree.c3
-rw-r--r--src/backend/access/transam/xlog.c4
-rw-r--r--src/backend/access/transam/xlogutils.c60
-rw-r--r--src/backend/commands/analyze.c3
-rw-r--r--src/backend/commands/vacuum.c46
-rw-r--r--src/backend/commands/vacuumlazy.c9
-rw-r--r--src/backend/storage/buffer/bufmgr.c167
-rw-r--r--src/backend/storage/freespace/freespace.c33
-rw-r--r--src/include/access/xlogutils.h5
-rw-r--r--src/include/storage/bufmgr.h19
15 files changed, 224 insertions, 193 deletions
diff --git a/contrib/pageinspect/rawpage.c b/contrib/pageinspect/rawpage.c
index feed0af905..a10780d666 100644
--- a/contrib/pageinspect/rawpage.c
+++ b/contrib/pageinspect/rawpage.c
@@ -85,7 +85,7 @@ get_raw_page(PG_FUNCTION_ARGS)
/* Take a verbatim copy of the page */
- buf = ReadBufferWithFork(rel, forknum, blkno);
+ buf = ReadBufferExtended(rel, forknum, blkno, RBM_NORMAL, NULL);
LockBuffer(buf, BUFFER_LOCK_SHARE);
memcpy(raw_page_data, BufferGetPage(buf), BLCKSZ);
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index a0fa4cafe7..4aecf9f756 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -155,10 +155,14 @@ xlogVacuumPage(Relation index, Buffer buffer)
static bool
ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, Buffer *rootBuffer)
{
- Buffer buffer = ReadBufferWithStrategy(gvs->index, blkno, gvs->strategy);
- Page page = BufferGetPage(buffer);
+ Buffer buffer;
+ Page page;
bool hasVoidPage = FALSE;
+ buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
+ RBM_NORMAL, gvs->strategy);
+ page = BufferGetPage(buffer);
+
/*
* We should be sure that we don't concurrent with inserts, insert process
* never release root page until end (but it can unlock it and lock
@@ -241,13 +245,24 @@ static void
ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno,
BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot)
{
- Buffer dBuffer = ReadBufferWithStrategy(gvs->index, deleteBlkno, gvs->strategy);
- Buffer lBuffer = (leftBlkno == InvalidBlockNumber) ?
- InvalidBuffer : ReadBufferWithStrategy(gvs->index, leftBlkno, gvs->strategy);
- Buffer pBuffer = ReadBufferWithStrategy(gvs->index, parentBlkno, gvs->strategy);
+ Buffer dBuffer;
+ Buffer lBuffer;
+ Buffer pBuffer;
Page page,
parentPage;
+ dBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, deleteBlkno,
+ RBM_NORMAL, gvs->strategy);
+
+ if (leftBlkno != InvalidBlockNumber)
+ lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno,
+ RBM_NORMAL, gvs->strategy);
+ else
+ lBuffer = InvalidBuffer;
+
+ pBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, parentBlkno,
+ RBM_NORMAL, gvs->strategy);
+
LockBuffer(dBuffer, GIN_EXCLUSIVE);
if (!isParentRoot) /* parent is already locked by
* LockBufferForCleanup() */
@@ -401,7 +416,8 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, DataPageDel
me = parent->child;
}
- buffer = ReadBufferWithStrategy(gvs->index, blkno, gvs->strategy);
+ buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno,
+ RBM_NORMAL, gvs->strategy);
page = BufferGetPage(buffer);
Assert(GinPageIsData(page));
@@ -589,7 +605,8 @@ ginbulkdelete(PG_FUNCTION_ARGS)
gvs.strategy = info->strategy;
initGinState(&gvs.ginstate, index);
- buffer = ReadBufferWithStrategy(index, blkno, info->strategy);
+ buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,
+ RBM_NORMAL, info->strategy);
/* find leaf page */
for (;;)
@@ -621,7 +638,8 @@ ginbulkdelete(PG_FUNCTION_ARGS)
Assert(blkno != InvalidBlockNumber);
UnlockReleaseBuffer(buffer);
- buffer = ReadBufferWithStrategy(index, blkno, info->strategy);
+ buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,
+ RBM_NORMAL, info->strategy);
}
/* right now we found leftmost page in entry's BTree */
@@ -663,7 +681,8 @@ ginbulkdelete(PG_FUNCTION_ARGS)
if (blkno == InvalidBlockNumber) /* rightmost page */
break;
- buffer = ReadBufferWithStrategy(index, blkno, info->strategy);
+ buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,
+ RBM_NORMAL, info->strategy);
LockBuffer(buffer, GIN_EXCLUSIVE);
}
@@ -718,7 +737,8 @@ ginvacuumcleanup(PG_FUNCTION_ARGS)
vacuum_delay_point();
- buffer = ReadBufferWithStrategy(index, blkno, info->strategy);
+ buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,
+ RBM_NORMAL, info->strategy);
LockBuffer(buffer, GIN_SHARE);
page = (Page) BufferGetPage(buffer);
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index f83f2a40cb..4eb7663df0 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -86,7 +86,8 @@ gistDeleteSubtree(GistVacuum *gv, BlockNumber blkno)
Buffer buffer;
Page page;
- buffer = ReadBufferWithStrategy(gv->index, blkno, gv->strategy);
+ buffer = ReadBufferExtended(gv->index, MAIN_FORKNUM, blkno, RBM_NORMAL,
+ gv->strategy);
LockBuffer(buffer, GIST_EXCLUSIVE);
page = (Page) BufferGetPage(buffer);
@@ -306,7 +307,8 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
vacuum_delay_point();
- buffer = ReadBufferWithStrategy(gv->index, blkno, gv->strategy);
+ buffer = ReadBufferExtended(gv->index, MAIN_FORKNUM, blkno, RBM_NORMAL,
+ gv->strategy);
LockBuffer(buffer, GIST_EXCLUSIVE);
gistcheckpage(gv->index, buffer);
page = (Page) BufferGetPage(buffer);
@@ -595,7 +597,8 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
vacuum_delay_point();
- buffer = ReadBufferWithStrategy(rel, blkno, info->strategy);
+ buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
+ info->strategy);
LockBuffer(buffer, GIST_SHARE);
page = (Page) BufferGetPage(buffer);
@@ -691,13 +694,15 @@ gistbulkdelete(PG_FUNCTION_ARGS)
while (stack)
{
- Buffer buffer = ReadBufferWithStrategy(rel, stack->blkno, info->strategy);
+ Buffer buffer;
Page page;
OffsetNumber i,
maxoff;
IndexTuple idxtuple;
ItemId iid;
+ buffer = ReadBufferExtended(rel, MAIN_FORKNUM, stack->blkno,
+ RBM_NORMAL, info->strategy);
LockBuffer(buffer, GIST_SHARE);
gistcheckpage(rel, buffer);
page = (Page) BufferGetPage(buffer);
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 431bb2e204..7b96e35c78 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -158,7 +158,7 @@ _hash_getinitbuf(Relation rel, BlockNumber blkno)
if (blkno == P_NEW)
elog(ERROR, "hash AM does not use P_NEW");
- buf = ReadOrZeroBuffer(rel, MAIN_FORKNUM, blkno);
+ buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO, NULL);
LockBuffer(buf, HASH_WRITE);
@@ -203,7 +203,7 @@ _hash_getnewbuf(Relation rel, BlockNumber blkno)
BufferGetBlockNumber(buf), blkno);
}
else
- buf = ReadOrZeroBuffer(rel, MAIN_FORKNUM, blkno);
+ buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO, NULL);
LockBuffer(buf, HASH_WRITE);
@@ -231,7 +231,7 @@ _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
if (blkno == P_NEW)
elog(ERROR, "hash AM does not use P_NEW");
- buf = ReadBufferWithStrategy(rel, blkno, bstrategy);
+ buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
if (access != HASH_NOLOCK)
LockBuffer(buf, access);
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index c528c7cf80..f6584e9b53 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -205,9 +205,8 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
}
/* read page using selected strategy */
- scan->rs_cbuf = ReadBufferWithStrategy(scan->rs_rd,
- page,
- scan->rs_strategy);
+ scan->rs_cbuf = ReadBufferExtended(scan->rs_rd, MAIN_FORKNUM, page,
+ RBM_NORMAL, scan->rs_strategy);
scan->rs_cblock = page;
if (!scan->rs_pageatatime)
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index a7156823d3..366da36517 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -750,7 +750,8 @@ restart:
* recycle all-zero pages, not fail. Also, we want to use a nondefault
* buffer access strategy.
*/
- buf = ReadBufferWithStrategy(rel, blkno, info->strategy);
+ buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
+ info->strategy);
LockBuffer(buf, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index b1bb5ae911..003098f3b3 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -2897,8 +2897,8 @@ RestoreBkpBlocks(XLogRecord *record, XLogRecPtr lsn)
memcpy(&bkpb, blk, sizeof(BkpBlock));
blk += sizeof(BkpBlock);
- buffer = XLogReadBufferWithFork(bkpb.node, bkpb.fork, bkpb.block,
- true);
+ buffer = XLogReadBufferExtended(bkpb.node, bkpb.fork, bkpb.block,
+ RBM_ZERO);
Assert(BufferIsValid(buffer));
page = (Page) BufferGetPage(buffer);
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index fdc03041e7..38b819da1f 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -200,6 +200,20 @@ XLogCheckInvalidPages(void)
invalid_page_tab = NULL;
}
+/*
+ * XLogReadBufferExtended
+ * A shorthand of XLogReadBufferExtended(), for reading from the main
+ * fork.
+ *
+ * For historical reasons, instead of a ReadBufferMode argument, this only
+ * supports RBM_ZERO (init == true) and RBM_NORMAL (init == false) modes.
+ */
+Buffer
+XLogReadBuffer(RelFileNode rnode, BlockNumber blkno, bool init)
+{
+ return XLogReadBufferExtended(rnode, MAIN_FORKNUM, blkno,
+ init ? RBM_ZERO : RBM_NORMAL);
+}
/*
* XLogReadBuffer
@@ -211,34 +225,21 @@ XLogCheckInvalidPages(void)
* expect that this is only used during single-process XLOG replay, but
* some subroutines such as MarkBufferDirty will complain if we don't.)
*
- * If "init" is true then the caller intends to rewrite the page fully
- * using the info in the XLOG record. In this case we will extend the
- * relation if needed to make the page exist, and we will not complain about
- * the page being "new" (all zeroes); in fact, we usually will supply a
- * zeroed buffer without reading the page at all, so as to avoid unnecessary
- * failure if the page is present on disk but has corrupt headers.
+ * There's some differences in the behavior wrt. the "mode" argument,
+ * compared to ReadBufferExtended:
*
- * If "init" is false then the caller needs the page to be valid already.
- * If the page doesn't exist or contains zeroes, we return InvalidBuffer.
- * In this case the caller should silently skip the update on this page.
- * (In this situation, we expect that the page was later dropped or truncated.
- * If we don't see evidence of that later in the WAL sequence, we'll complain
- * at the end of WAL replay.)
- */
-Buffer
-XLogReadBuffer(RelFileNode rnode, BlockNumber blkno, bool init)
-{
- return XLogReadBufferWithFork(rnode, MAIN_FORKNUM, blkno, init);
-}
-
-/*
- * XLogReadBufferWithFork
- * Like XLogReadBuffer, but for reading other relation forks than
- * the main one.
+ * In RBM_NORMAL mode, if the page doesn't exist, or contains all-zeroes, we
+ * return InvalidBuffer. In this case the caller should silently skip the
+ * update on this page. (In this situation, we expect that the page was later
+ * dropped or truncated. If we don't see evidence of that later in the WAL
+ * sequence, we'll complain at the end of WAL replay.)
+ *
+ * In RBM_ZERO and RBM_ZERO_ON_ERROR modes, if the page doesn't exist, the
+ * relation is extended with all-zeroes pages up to the given block number.
*/
Buffer
-XLogReadBufferWithFork(RelFileNode rnode, ForkNumber forknum,
- BlockNumber blkno, bool init)
+XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
+ BlockNumber blkno, ReadBufferMode mode)
{
BlockNumber lastblock;
Buffer buffer;
@@ -264,12 +265,13 @@ XLogReadBufferWithFork(RelFileNode rnode, ForkNumber forknum,
if (blkno < lastblock)
{
/* page exists in file */
- buffer = ReadBufferWithoutRelcache(rnode, false, forknum, blkno, init);
+ buffer = ReadBufferWithoutRelcache(rnode, false, forknum, blkno,
+ mode, NULL);
}
else
{
/* hm, page doesn't exist in file */
- if (!init)
+ if (mode == RBM_NORMAL)
{
log_invalid_page(rnode, forknum, blkno, false);
return InvalidBuffer;
@@ -283,7 +285,7 @@ XLogReadBufferWithFork(RelFileNode rnode, ForkNumber forknum,
if (buffer != InvalidBuffer)
ReleaseBuffer(buffer);
buffer = ReadBufferWithoutRelcache(rnode, false, forknum,
- P_NEW, false);
+ P_NEW, mode, NULL);
lastblock++;
}
Assert(BufferGetBlockNumber(buffer) == blkno);
@@ -291,7 +293,7 @@ XLogReadBufferWithFork(RelFileNode rnode, ForkNumber forknum,
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
- if (!init)
+ if (mode == RBM_NORMAL)
{
/* check that page has been initialized */
Page page = (Page) BufferGetPage(buffer);
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index d96b7666f0..0e78b42d07 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -911,7 +911,8 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
* each tuple, but since we aren't doing much work per tuple, the
* extra lock traffic is probably better avoided.
*/
- targbuffer = ReadBufferWithStrategy(onerel, targblock, vac_strategy);
+ targbuffer = ReadBufferExtended(onerel, MAIN_FORKNUM, targblock,
+ RBM_NORMAL, vac_strategy);
LockBuffer(targbuffer, BUFFER_LOCK_SHARE);
targpage = BufferGetPage(targbuffer);
maxoffset = PageGetMaxOffsetNumber(targpage);
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index c18e6bcd52..955e7bf4de 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -1348,7 +1348,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
vacuum_delay_point();
- buf = ReadBufferWithStrategy(onerel, blkno, vac_strategy);
+ buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno, RBM_NORMAL,
+ vac_strategy);
page = BufferGetPage(buf);
/*
@@ -1919,7 +1920,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Process this page of relation.
*/
- buf = ReadBufferWithStrategy(onerel, blkno, vac_strategy);
+ buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno, RBM_NORMAL,
+ vac_strategy);
page = BufferGetPage(buf);
vacpage->offsets_free = 0;
@@ -2173,9 +2175,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
nextTid = tp.t_data->t_ctid;
priorXmax = HeapTupleHeaderGetXmax(tp.t_data);
/* assume block# is OK (see heap_fetch comments) */
- nextBuf = ReadBufferWithStrategy(onerel,
+ nextBuf = ReadBufferExtended(onerel, MAIN_FORKNUM,
ItemPointerGetBlockNumber(&nextTid),
- vac_strategy);
+ RBM_NORMAL, vac_strategy);
nextPage = BufferGetPage(nextBuf);
/* If bogus or unused slot, assume tp is end of chain */
nextOffnum = ItemPointerGetOffsetNumber(&nextTid);
@@ -2318,9 +2320,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
break; /* out of check-all-items loop */
}
tp.t_self = vtlp->this_tid;
- Pbuf = ReadBufferWithStrategy(onerel,
+ Pbuf = ReadBufferExtended(onerel, MAIN_FORKNUM,
ItemPointerGetBlockNumber(&(tp.t_self)),
- vac_strategy);
+ RBM_NORMAL, vac_strategy);
Ppage = BufferGetPage(Pbuf);
Pitemid = PageGetItemId(Ppage,
ItemPointerGetOffsetNumber(&(tp.t_self)));
@@ -2402,14 +2404,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* Get page to move from */
tuple.t_self = vtmove[ti].tid;
- Cbuf = ReadBufferWithStrategy(onerel,
+ Cbuf = ReadBufferExtended(onerel, MAIN_FORKNUM,
ItemPointerGetBlockNumber(&(tuple.t_self)),
- vac_strategy);
+ RBM_NORMAL, vac_strategy);
/* Get page to move to */
- dst_buffer = ReadBufferWithStrategy(onerel,
- destvacpage->blkno,
- vac_strategy);
+ dst_buffer = ReadBufferExtended(onerel, MAIN_FORKNUM,
+ destvacpage->blkno,
+ RBM_NORMAL, vac_strategy);
LockBuffer(dst_buffer, BUFFER_LOCK_EXCLUSIVE);
if (dst_buffer != Cbuf)
@@ -2502,9 +2504,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (i == num_fraged_pages)
break; /* can't move item anywhere */
dst_vacpage = fraged_pages->pagedesc[i];
- dst_buffer = ReadBufferWithStrategy(onerel,
- dst_vacpage->blkno,
- vac_strategy);
+ dst_buffer = ReadBufferExtended(onerel, MAIN_FORKNUM,
+ dst_vacpage->blkno,
+ RBM_NORMAL, vac_strategy);
LockBuffer(dst_buffer, BUFFER_LOCK_EXCLUSIVE);
dst_page = BufferGetPage(dst_buffer);
/* if this page was not used before - clean it */
@@ -2681,9 +2683,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
Page page;
/* this page was not used as a move target, so must clean it */
- buf = ReadBufferWithStrategy(onerel,
- (*curpage)->blkno,
- vac_strategy);
+ buf = ReadBufferExtended(onerel, MAIN_FORKNUM, (*curpage)->blkno,
+ RBM_NORMAL, vac_strategy);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
page = BufferGetPage(buf);
if (!PageIsEmpty(page))
@@ -2770,7 +2771,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
int uncnt = 0;
int num_tuples = 0;
- buf = ReadBufferWithStrategy(onerel, vacpage->blkno, vac_strategy);
+ buf = ReadBufferExtended(onerel, MAIN_FORKNUM, vacpage->blkno,
+ RBM_NORMAL, vac_strategy);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
page = BufferGetPage(buf);
maxoff = PageGetMaxOffsetNumber(page);
@@ -3150,7 +3152,8 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages,
break; /* no need to scan any further */
if ((*curpage)->offsets_used == 0)
continue; /* this page was never used as a move dest */
- buf = ReadBufferWithStrategy(rel, (*curpage)->blkno, vac_strategy);
+ buf = ReadBufferExtended(rel, MAIN_FORKNUM, (*curpage)->blkno,
+ RBM_NORMAL, vac_strategy);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
page = BufferGetPage(buf);
max_offset = PageGetMaxOffsetNumber(page);
@@ -3219,9 +3222,8 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
if ((*vacpage)->offsets_free > 0)
{
- buf = ReadBufferWithStrategy(onerel,
- (*vacpage)->blkno,
- vac_strategy);
+ buf = ReadBufferExtended(onerel, MAIN_FORKNUM, (*vacpage)->blkno,
+ RBM_NORMAL, vac_strategy);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
vacuum_page(onerel, buf, *vacpage);
UnlockReleaseBuffer(buf);
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 77754be1b4..ca0dcc131c 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -301,7 +301,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
vacrelstats->num_index_scans++;
}
- buf = ReadBufferWithStrategy(onerel, blkno, vac_strategy);
+ buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
+ RBM_NORMAL, vac_strategy);
/* We need buffer cleanup lock so that we can prune HOT chains. */
LockBufferForCleanup(buf);
@@ -618,7 +619,8 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
vacuum_delay_point();
tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
- buf = ReadBufferWithStrategy(onerel, tblk, vac_strategy);
+ buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
+ vac_strategy);
LockBufferForCleanup(buf);
tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats);
@@ -880,7 +882,8 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
blkno--;
- buf = ReadBufferWithStrategy(onerel, blkno, vac_strategy);
+ buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
+ RBM_NORMAL, vac_strategy);
/* In this phase we only need shared access to the buffer */
LockBuffer(buf, BUFFER_LOCK_SHARE);
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 560293d372..3cd3ce5b65 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -72,11 +72,10 @@ static bool IsForInput;
static volatile BufferDesc *PinCountWaitBuf = NULL;
-static Buffer ReadBuffer_relcache(Relation reln, ForkNumber forkNum,
- BlockNumber blockNum, bool zeroPage, BufferAccessStrategy strategy);
static Buffer ReadBuffer_common(SMgrRelation reln, bool isLocalBuf,
- ForkNumber forkNum, BlockNumber blockNum,
- bool zeroPage, BufferAccessStrategy strategy, bool *hit);
+ ForkNumber forkNum, BlockNumber blockNum,
+ ReadBufferMode mode , BufferAccessStrategy strategy,
+ bool *hit);
static bool PinBuffer(volatile BufferDesc *buf, BufferAccessStrategy strategy);
static void PinBuffer_Locked(volatile BufferDesc *buf);
static void UnpinBuffer(volatile BufferDesc *buf, bool fixOwner);
@@ -96,7 +95,17 @@ static void AtProcExit_Buffers(int code, Datum arg);
/*
- * ReadBuffer -- returns a buffer containing the requested
+ * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
+ * fork with RBM_NORMAL mode and default strategy.
+ */
+Buffer
+ReadBuffer(Relation reln, BlockNumber blockNum)
+{
+ return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
+}
+
+/*
+ * ReadBufferExtended -- returns a buffer containing the requested
* block of the requested relation. If the blknum
* requested is P_NEW, extend the relation file and
* allocate a new block. (Caller is responsible for
@@ -107,75 +116,29 @@ static void AtProcExit_Buffers(int code, Datum arg);
* the block read. The returned buffer has been pinned.
* Does not return on error --- elog's instead.
*
- * Assume when this function is called, that reln has been
- * opened already.
- */
-Buffer
-ReadBuffer(Relation reln, BlockNumber blockNum)
-{
- return ReadBuffer_relcache(reln, MAIN_FORKNUM, blockNum, false, NULL);
-}
-
-/*
- * ReadBufferWithFork -- same as ReadBuffer, but for accessing relation
- * forks other than MAIN_FORKNUM.
- */
-Buffer
-ReadBufferWithFork(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
-{
- return ReadBuffer_relcache(reln, forkNum, blockNum, false, NULL);
-}
-
-/*
- * ReadBufferWithStrategy -- same as ReadBuffer, except caller can specify
- * a nondefault buffer access strategy. See buffer/README for details.
- */
-Buffer
-ReadBufferWithStrategy(Relation reln, BlockNumber blockNum,
- BufferAccessStrategy strategy)
-{
- return ReadBuffer_relcache(reln, MAIN_FORKNUM, blockNum, false, strategy);
-}
-
-/*
- * ReadOrZeroBuffer -- like ReadBuffer, but if the page isn't in buffer
- * cache already, it's filled with zeros instead of reading it from
- * disk. Useful when the caller intends to fill the page from scratch,
- * since this saves I/O and avoids unnecessary failure if the
- * page-on-disk has corrupt page headers.
- *
- * Caution: do not use this to read a page that is beyond the relation's
- * current physical EOF; that is likely to cause problems in md.c when
- * the page is modified and written out. P_NEW is OK, though.
- */
-Buffer
-ReadOrZeroBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
-{
- return ReadBuffer_relcache(reln, forkNum, blockNum, true, NULL);
-}
-
-/*
- * ReadBufferWithoutRelcache -- like ReadBuffer, but doesn't require a
- * relcache entry for the relation. If zeroPage is true, this behaves
- * like ReadOrZeroBuffer rather than ReadBuffer.
+ * Assume when this function is called, that reln has been opened already.
+ *
+ * In RBM_NORMAL mode, the page is read from disk, and the page header is
+ * validated. An error is thrown if the page header is not valid.
+ *
+ * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
+ * valid, the page is zeroed instead of throwing an error. This is intended
+ * for non-critical data, where the caller is prepared to repair errors.
+ *
+ * In RBM_ZERO mode, if the page isn't in buffer cache already, it's filled
+ * with zeros instead of reading it from disk. Useful when the caller is
+ * going to fill the page from scratch, since this saves I/O and avoids
+ * unnecessary failure if the page-on-disk has corrupt page headers.
+ * Caution: do not use this mode to read a page that is beyond the relation's
+ * current physical EOF; that is likely to cause problems in md.c when
+ * the page is modified and written out. P_NEW is OK, though.
+ *
+ * If strategy is not NULL, a nondefault buffer access strategy is used.
+ * See buffer/README for details.
*/
Buffer
-ReadBufferWithoutRelcache(RelFileNode rnode, bool isTemp,
- ForkNumber forkNum, BlockNumber blockNum, bool zeroPage)
-{
- bool hit;
-
- SMgrRelation smgr = smgropen(rnode);
- return ReadBuffer_common(smgr, isTemp, forkNum, blockNum, zeroPage, NULL, &hit);
-}
-
-/*
- * ReadBuffer_relcache -- common logic for ReadBuffer-variants that
- * operate on a Relation.
- */
-static Buffer
-ReadBuffer_relcache(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
- bool zeroPage, BufferAccessStrategy strategy)
+ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
+ ReadBufferMode mode, BufferAccessStrategy strategy)
{
bool hit;
Buffer buf;
@@ -189,12 +152,30 @@ ReadBuffer_relcache(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
*/
pgstat_count_buffer_read(reln);
buf = ReadBuffer_common(reln->rd_smgr, reln->rd_istemp, forkNum, blockNum,
- zeroPage, strategy, &hit);
+ mode, strategy, &hit);
if (hit)
pgstat_count_buffer_hit(reln);
return buf;
}
+
+/*
+ * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
+ * a relcache entry for the relation.
+ */
+Buffer
+ReadBufferWithoutRelcache(RelFileNode rnode, bool isTemp,
+ ForkNumber forkNum, BlockNumber blockNum,
+ ReadBufferMode mode, BufferAccessStrategy strategy)
+{
+ bool hit;
+
+ SMgrRelation smgr = smgropen(rnode);
+ return ReadBuffer_common(smgr, isTemp, forkNum, blockNum, mode, strategy,
+ &hit);
+}
+
+
/*
* ReadBuffer_common -- common logic for all ReadBuffer variants
*
@@ -202,7 +183,7 @@ ReadBuffer_relcache(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
*/
static Buffer
ReadBuffer_common(SMgrRelation smgr, bool isLocalBuf, ForkNumber forkNum,
- BlockNumber blockNum, bool zeroPage,
+ BlockNumber blockNum, ReadBufferMode mode,
BufferAccessStrategy strategy, bool *hit)
{
volatile BufferDesc *bufHdr;
@@ -295,8 +276,8 @@ ReadBuffer_common(SMgrRelation smgr, bool isLocalBuf, ForkNumber forkNum,
bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
if (!PageIsNew((Page) bufBlock))
ereport(ERROR,
- (errmsg("unexpected data beyond EOF in block %u of relation %u/%u/%u",
- blockNum, smgr->smgr_rnode.spcNode, smgr->smgr_rnode.dbNode, smgr->smgr_rnode.relNode),
+ (errmsg("unexpected data beyond EOF in block %u of relation %u/%u/%u/%u",
+ blockNum, smgr->smgr_rnode.spcNode, smgr->smgr_rnode.dbNode, smgr->smgr_rnode.relNode, forkNum),
errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
/*
@@ -356,7 +337,7 @@ ReadBuffer_common(SMgrRelation smgr, bool isLocalBuf, ForkNumber forkNum,
* Read in the page, unless the caller intends to overwrite it and
* just wants us to allocate a buffer.
*/
- if (zeroPage)
+ if (mode == RBM_ZERO)
MemSet((char *) bufBlock, 0, BLCKSZ);
else
{
@@ -365,24 +346,25 @@ ReadBuffer_common(SMgrRelation smgr, bool isLocalBuf, ForkNumber forkNum,
/* check for garbage data */
if (!PageHeaderIsValid((PageHeader) bufBlock))
{
- if (zero_damaged_pages)
+ if (mode == RBM_ZERO_ON_ERROR || zero_damaged_pages)
{
ereport(WARNING,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("invalid page header in block %u of relation %u/%u/%u; zeroing out page",
+ errmsg("invalid page header in block %u of relation %u/%u/%u/%u; zeroing out page",
blockNum,
smgr->smgr_rnode.spcNode,
smgr->smgr_rnode.dbNode,
- smgr->smgr_rnode.relNode)));
+ smgr->smgr_rnode.relNode,
+ forkNum)));
MemSet((char *) bufBlock, 0, BLCKSZ);
}
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("invalid page header in block %u of relation %u/%u/%u",
+ errmsg("invalid page header in block %u of relation %u/%u/%u/%u",
blockNum, smgr->smgr_rnode.spcNode,
smgr->smgr_rnode.dbNode,
- smgr->smgr_rnode.relNode)));
+ smgr->smgr_rnode.relNode, forkNum)));
}
}
}
@@ -1679,10 +1661,10 @@ PrintBufferLeakWarning(Buffer buffer)
/* theoretically we should lock the bufhdr here */
elog(WARNING,
"buffer refcount leak: [%03d] "
- "(rel=%u/%u/%u, blockNum=%u, flags=0x%x, refcount=%u %d)",
+ "(rel=%u/%u/%u, forkNum=%u, blockNum=%u, flags=0x%x, refcount=%u %d)",
buffer,
buf->tag.rnode.spcNode, buf->tag.rnode.dbNode,
- buf->tag.rnode.relNode,
+ buf->tag.rnode.relNode, buf->tag.forkNum,
buf->tag.blockNum, buf->flags,
buf->refcount, loccount);
}
@@ -1991,11 +1973,11 @@ PrintBufferDescs(void)
{
/* theoretically we should lock the bufhdr here */
elog(LOG,
- "[%02d] (freeNext=%d, rel=%u/%u/%u, "
+ "[%02d] (freeNext=%d, rel=%u/%u/%u, forkNum=%u, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
buf->tag.rnode.spcNode, buf->tag.rnode.dbNode,
- buf->tag.rnode.relNode,
+ buf->tag.rnode.relNode, buf->tag.forkNum,
buf->tag.blockNum, buf->flags,
buf->refcount, PrivateRefCount[i]);
}
@@ -2015,11 +1997,11 @@ PrintPinnedBufs(void)
{
/* theoretically we should lock the bufhdr here */
elog(LOG,
- "[%02d] (freeNext=%d, rel=%u/%u/%u, "
+ "[%02d] (freeNext=%d, rel=%u/%u/%u, forkNum=%u, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
buf->tag.rnode.spcNode, buf->tag.rnode.dbNode,
- buf->tag.rnode.relNode,
+ buf->tag.rnode.relNode, buf->tag.forkNum,
buf->tag.blockNum, buf->flags,
buf->refcount, PrivateRefCount[i]);
}
@@ -2654,11 +2636,11 @@ AbortBufferIO(void)
/* Buffer is pinned, so we can read tag without spinlock */
ereport(WARNING,
(errcode(ERRCODE_IO_ERROR),
- errmsg("could not write block %u of %u/%u/%u",
+ errmsg("could not write block %u of %u/%u/%u/%u",
buf->tag.blockNum,
buf->tag.rnode.spcNode,
buf->tag.rnode.dbNode,
- buf->tag.rnode.relNode),
+ buf->tag.rnode.relNode, buf->tag.forkNum),
errdetail("Multiple failures --- write error might be permanent.")));
}
}
@@ -2676,9 +2658,10 @@ buffer_write_error_callback(void *arg)
/* Buffer is pinned, so we can read the tag without locking the spinlock */
if (bufHdr != NULL)
- errcontext("writing block %u of relation %u/%u/%u",
+ errcontext("writing block %u of relation %u/%u/%u/%u",
bufHdr->tag.blockNum,
bufHdr->tag.rnode.spcNode,
bufHdr->tag.rnode.dbNode,
- bufHdr->tag.rnode.relNode);
+ bufHdr->tag.rnode.relNode,
+ bufHdr->tag.forkNum);
}
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index 17f733fe1f..171fe63af5 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -504,6 +504,7 @@ static Buffer
fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
{
BlockNumber blkno = fsm_logical_to_physical(addr);
+ Buffer buf;
RelationOpenSmgr(rel);
@@ -518,7 +519,18 @@ fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
else
return InvalidBuffer;
}
- return ReadBufferWithFork(rel, FSM_FORKNUM, blkno);
+
+ /*
+ * Use ZERO_ON_ERROR mode, and initialize the page if necessary. The FSM
+ * information is not accurate anyway, so it's better to clear corrupt
+ * pages than error out. Since the FSM changes are not WAL-logged, the
+ * so-called torn page problem on crash can lead to pages with corrupt
+ * headers, for example.
+ */
+ buf = ReadBufferExtended(rel, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR, NULL);
+ if (PageIsNew(BufferGetPage(buf)))
+ PageInit(BufferGetPage(buf), BLCKSZ, 0);
+ return buf;
}
/*
@@ -779,23 +791,18 @@ fsm_redo_truncate(xl_fsm_truncate *xlrec)
* replay of the smgr truncation record to remove completely unused
* pages.
*/
- buf = XLogReadBufferWithFork(xlrec->node, FSM_FORKNUM, fsmblk, false);
+ buf = XLogReadBufferExtended(xlrec->node, FSM_FORKNUM, fsmblk,
+ RBM_ZERO_ON_ERROR);
if (BufferIsValid(buf))
{
- fsm_truncate_avail(BufferGetPage(buf), first_removed_slot);
+ Page page = BufferGetPage(buf);
+
+ if (PageIsNew(page))
+ PageInit(page, BLCKSZ, 0);
+ fsm_truncate_avail(page, first_removed_slot);
MarkBufferDirty(buf);
UnlockReleaseBuffer(buf);
}
- else
- {
- /*
- * The page doesn't exist. Because FSM extensions are not WAL-logged,
- * it's normal to have a truncation record for a page that doesn't
- * exist. Tell xlogutils.c not to PANIC at the end of recovery
- * because of the missing page
- */
- XLogTruncateRelation(xlrec->node, FSM_FORKNUM, fsmblk);
- }
}
void
diff --git a/src/include/access/xlogutils.h b/src/include/access/xlogutils.h
index 29327f5c34..0164f83e4e 100644
--- a/src/include/access/xlogutils.h
+++ b/src/include/access/xlogutils.h
@@ -12,6 +12,7 @@
#define XLOG_UTILS_H
#include "storage/buf.h"
+#include "storage/bufmgr.h"
#include "storage/relfilenode.h"
#include "storage/block.h"
#include "utils/relcache.h"
@@ -25,8 +26,8 @@ extern void XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum,
BlockNumber nblocks);
extern Buffer XLogReadBuffer(RelFileNode rnode, BlockNumber blkno, bool init);
-extern Buffer XLogReadBufferWithFork(RelFileNode rnode, ForkNumber forknum,
- BlockNumber blkno, bool init);
+extern Buffer XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
+ BlockNumber blkno, ReadBufferMode mode);
extern Relation CreateFakeRelcacheEntry(RelFileNode rnode);
extern void FreeFakeRelcacheEntry(Relation fakerel);
diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h
index a94a36d9a5..c9cb8e8576 100644
--- a/src/include/storage/bufmgr.h
+++ b/src/include/storage/bufmgr.h
@@ -31,6 +31,14 @@ typedef enum BufferAccessStrategyType
BAS_VACUUM /* VACUUM */
} BufferAccessStrategyType;
+/* Possible modes for ReadBufferExtended() */
+typedef enum
+{
+ RBM_NORMAL, /* Normal read */
+ RBM_ZERO, /* Don't read from disk, caller will initialize */
+ RBM_ZERO_ON_ERROR /* Read, but return an all-zeros page on error */
+} ReadBufferMode;
+
/* in globals.c ... this duplicates miscadmin.h */
extern PGDLLIMPORT int NBuffers;
@@ -144,13 +152,12 @@ extern PGDLLIMPORT int32 *LocalRefCount;
* prototypes for functions in bufmgr.c
*/
extern Buffer ReadBuffer(Relation reln, BlockNumber blockNum);
-extern Buffer ReadBufferWithFork(Relation reln, ForkNumber forkNum, BlockNumber blockNum);
-extern Buffer ReadBufferWithStrategy(Relation reln, BlockNumber blockNum,
- BufferAccessStrategy strategy);
-extern Buffer ReadOrZeroBuffer(Relation reln, ForkNumber forkNum,
- BlockNumber blockNum);
+extern Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum,
+ BlockNumber blockNum, ReadBufferMode mode,
+ BufferAccessStrategy strategy);
extern Buffer ReadBufferWithoutRelcache(RelFileNode rnode, bool isTemp,
- ForkNumber forkNum, BlockNumber blockNum, bool zeroPage);
+ ForkNumber forkNum, BlockNumber blockNum,
+ ReadBufferMode mode, BufferAccessStrategy strategy);
extern void ReleaseBuffer(Buffer buffer);
extern void UnlockReleaseBuffer(Buffer buffer);
extern void MarkBufferDirty(Buffer buffer);