summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Rowley2021-03-29 21:17:09 +0000
committerDavid Rowley2021-03-29 21:17:09 +0000
commitaf527705edc3fd0b335264d17e0521c05edc5cca (patch)
tree11f0d9972f7f2602394cc9b0ead1194a9515de2f
parent6d7a6feac48b1970c4cd127ee65d4c487acbb5e9 (diff)
Adjust design of per-worker parallel seqscan data struct
The design of the data structures which allow storage of the per-worker memory during parallel seq scans were not ideal. The work done in 56788d215 required an additional data structure to allow workers to remember the range of pages that had been allocated to them for processing during a parallel seqscan. That commit added a void pointer field to TableScanDescData to allow heapam to store the per-worker allocation information. However putting the field there made very little sense given that we have AM specific structs for that, e.g. HeapScanDescData. Here we remove the void pointer field from TableScanDescData and add a dedicated field for this purpose to HeapScanDescData. Previously we also allocated memory for this parallel per-worker data for all scans, regardless if it was a parallel scan or not. This was just a wasted allocation for non-parallel scans, so here we make the allocation conditional on the scan being parallel. Also, add previously missing pfree() to free the per-worker data in heap_endscan(). Reported-by: Andres Freund Reviewed-by: Andres Freund Discussion: https://postgr.es/m/[email protected]
-rw-r--r--src/backend/access/heap/heapam.c22
-rw-r--r--src/include/access/heapam.h6
-rw-r--r--src/include/access/relscan.h1
3 files changed, 22 insertions, 7 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 90711b2fcd..595310ba1b 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -540,7 +540,7 @@ heapgettup(HeapScanDesc scan,
ParallelBlockTableScanDesc pbscan =
(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
ParallelBlockTableScanWorker pbscanwork =
- (ParallelBlockTableScanWorker) scan->rs_base.rs_private;
+ scan->rs_parallelworkerdata;
table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
pbscanwork, pbscan);
@@ -748,7 +748,7 @@ heapgettup(HeapScanDesc scan,
ParallelBlockTableScanDesc pbscan =
(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
ParallelBlockTableScanWorker pbscanwork =
- (ParallelBlockTableScanWorker) scan->rs_base.rs_private;
+ scan->rs_parallelworkerdata;
page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
pbscanwork, pbscan);
@@ -864,7 +864,7 @@ heapgettup_pagemode(HeapScanDesc scan,
ParallelBlockTableScanDesc pbscan =
(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
ParallelBlockTableScanWorker pbscanwork =
- (ParallelBlockTableScanWorker) scan->rs_base.rs_private;
+ scan->rs_parallelworkerdata;
table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
pbscanwork, pbscan);
@@ -1057,7 +1057,7 @@ heapgettup_pagemode(HeapScanDesc scan,
ParallelBlockTableScanDesc pbscan =
(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
ParallelBlockTableScanWorker pbscanwork =
- (ParallelBlockTableScanWorker) scan->rs_base.rs_private;
+ scan->rs_parallelworkerdata;
page = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
pbscanwork, pbscan);
@@ -1194,8 +1194,6 @@ heap_beginscan(Relation relation, Snapshot snapshot,
scan->rs_base.rs_nkeys = nkeys;
scan->rs_base.rs_flags = flags;
scan->rs_base.rs_parallel = parallel_scan;
- scan->rs_base.rs_private =
- palloc(sizeof(ParallelBlockTableScanWorkerData));
scan->rs_strategy = NULL; /* set in initscan */
/*
@@ -1232,6 +1230,15 @@ heap_beginscan(Relation relation, Snapshot snapshot,
scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
/*
+ * Allocate memory to keep track of page allocation for parallel workers
+ * when doing a parallel scan.
+ */
+ if (parallel_scan != NULL)
+ scan->rs_parallelworkerdata = palloc(sizeof(ParallelBlockTableScanWorkerData));
+ else
+ scan->rs_parallelworkerdata = NULL;
+
+ /*
* we do this here instead of in initscan() because heap_rescan also calls
* initscan() and we don't want to allocate memory again
*/
@@ -1306,6 +1313,9 @@ heap_endscan(TableScanDesc sscan)
if (scan->rs_strategy != NULL)
FreeAccessStrategy(scan->rs_strategy);
+ if (scan->rs_parallelworkerdata != NULL)
+ pfree(scan->rs_parallelworkerdata);
+
if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
UnregisterSnapshot(scan->rs_base.rs_snapshot);
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index bc0936bc2d..d803f27787 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -65,6 +65,12 @@ typedef struct HeapScanDescData
HeapTupleData rs_ctup; /* current tuple in scan, if any */
+ /*
+ * For parallel scans to store page allocation data. NULL when not
+ * performing a parallel scan.
+ */
+ ParallelBlockTableScanWorkerData *rs_parallelworkerdata;
+
/* these fields only used in page-at-a-time mode and for bitmap scans */
int rs_cindex; /* current tuple's index in vistuples */
int rs_ntuples; /* number of visible tuples on page */
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index 0ef6d8edf7..17a161c69a 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -46,7 +46,6 @@ typedef struct TableScanDescData
*/
uint32 rs_flags;
- void *rs_private; /* per-worker private memory for AM to use */
struct ParallelTableScanDescData *rs_parallel; /* parallel scan
* information */
} TableScanDescData;