summaryrefslogtreecommitdiff
path: root/src/backend/access/heap/heapam_handler.c
diff options
context:
space:
mode:
authorDavid Rowley2024-04-04 03:41:13 +0000
committerDavid Rowley2024-04-04 03:41:13 +0000
commit44086b097537a8157ee1dd98d6635b3503f2f534 (patch)
tree4f0ce528bb5a901bb8295730f2848bfe44651cf6 /src/backend/access/heap/heapam_handler.c
parent85230a247c74b92d9676abdf6693ac9d56c373cf (diff)
Preliminary refactor of heap scanning functions
To allow the use of the read stream API added in b5a9b18cd for sequential scans on heap tables, here we make some adjustments to make that change less invasive and perhaps make the code easier to follow in the process. Here heapgetpage() gets broken into two functions: 1) The part which reads the block has now been moved into a function named heapfetchbuf(). 2) The part which performed pruning and populated the scan's rs_vistuples[] array is now moved into a new function named heap_prepare_pagescan(). The functionality provided by heap_prepare_pagescan() was only ever required by SO_ALLOW_PAGEMODE scans, so the branching that was previously done in heapgetpage() is no longer needed as we simply just don't call heap_prepare_pagescan() from heapgettup() in the refactored code. Author: Melanie Plageman Discussion: https://postgr.es/m/CAAKRu_YtXJiYKQvb5JsA2SkwrsizYLugs4sSOZh3EAjKUg=gEQ@mail.gmail.com
Diffstat (limited to 'src/backend/access/heap/heapam_handler.c')
-rw-r--r--src/backend/access/heap/heapam_handler.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 0952d4a98eb..db99d2fcfa7 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -2352,11 +2352,15 @@ heapam_scan_sample_next_block(TableScanDesc scan, SampleScanState *scanstate)
if (hscan->rs_nblocks == 0)
return false;
- if (tsm->NextSampleBlock)
+ /* release previous scan buffer, if any */
+ if (BufferIsValid(hscan->rs_cbuf))
{
- blockno = tsm->NextSampleBlock(scanstate, hscan->rs_nblocks);
- hscan->rs_cblock = blockno;
+ ReleaseBuffer(hscan->rs_cbuf);
+ hscan->rs_cbuf = InvalidBuffer;
}
+
+ if (tsm->NextSampleBlock)
+ blockno = tsm->NextSampleBlock(scanstate, hscan->rs_nblocks);
else
{
/* scanning table sequentially */
@@ -2398,20 +2402,32 @@ heapam_scan_sample_next_block(TableScanDesc scan, SampleScanState *scanstate)
}
}
+ hscan->rs_cblock = blockno;
+
if (!BlockNumberIsValid(blockno))
{
- if (BufferIsValid(hscan->rs_cbuf))
- ReleaseBuffer(hscan->rs_cbuf);
- hscan->rs_cbuf = InvalidBuffer;
- hscan->rs_cblock = InvalidBlockNumber;
hscan->rs_inited = false;
-
return false;
}
- heapgetpage(scan, blockno);
- hscan->rs_inited = true;
+ Assert(hscan->rs_cblock < hscan->rs_nblocks);
+
+ /*
+ * Be sure to check for interrupts at least once per page. Checks at
+ * higher code levels won't be able to stop a sample scan that encounters
+ * many pages' worth of consecutive dead tuples.
+ */
+ CHECK_FOR_INTERRUPTS();
+
+ /* Read page using selected strategy */
+ hscan->rs_cbuf = ReadBufferExtended(hscan->rs_base.rs_rd, MAIN_FORKNUM,
+ blockno, RBM_NORMAL, hscan->rs_strategy);
+ /* in pagemode, prune the page and determine visible tuple offsets */
+ if (hscan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
+ heap_prepare_pagescan(scan);
+
+ hscan->rs_inited = true;
return true;
}
@@ -2572,8 +2588,8 @@ SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer,
if (scan->rs_flags & SO_ALLOW_PAGEMODE)
{
/*
- * In pageatatime mode, heapgetpage() already did visibility checks,
- * so just look at the info it left in rs_vistuples[].
+ * In pageatatime mode, heap_prepare_pagescan() already did visibility
+ * checks, so just look at the info it left in rs_vistuples[].
*
* We use a binary search over the known-sorted array. Note: we could
* save some effort if we insisted that NextSampleTuple select tuples