summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Korotkov2024-01-07 07:03:55 +0000
committerAlexander Korotkov2024-01-07 07:10:19 +0000
commit2a67b5a60ee68892bb028587ddc6de7650822480 (patch)
tree618c227dd8bd441867117d149acba26a4cdcf210
parent5ef34a8fc3899a306fbc907a762fee0ba3782462 (diff)
Fix oversized memory allocation in Parallel Hash Join
During the calculations of the maximum for the number of buckets, take into account that later we round that to the next power of 2. Reported-by: Karen Talarico Bug: #16925 Discussion: https://postgr.es/m/16925-ec96d83529d0d629%40postgresql.org Author: Thomas Munro, Andrei Lepikhov, Alexander Korotkov Reviewed-by: Alena Rybakina Backpatch-through: 12
-rw-r--r--src/backend/executor/nodeHash.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 42099b6d751..6e5297a159b 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -1155,6 +1155,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
double dtuples;
double dbuckets;
int new_nbuckets;
+ uint32 max_buckets;
/*
* We probably also need a smaller bucket array. How many
@@ -1167,9 +1168,16 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
* array.
*/
dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
+ /*
+ * We need to calculate the maximum number of buckets to
+ * stay within the MaxAllocSize boundary. Round the
+ * maximum number to the previous power of 2 given that
+ * later we round the number to the next power of 2.
+ */
+ max_buckets = pg_prevpower2_32((uint32)
+ (MaxAllocSize / sizeof(dsa_pointer_atomic)));
dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
- dbuckets = Min(dbuckets,
- MaxAllocSize / sizeof(dsa_pointer_atomic));
+ dbuckets = Min(dbuckets, max_buckets);
new_nbuckets = (int) dbuckets;
new_nbuckets = Max(new_nbuckets, 1024);
new_nbuckets = pg_nextpower2_32(new_nbuckets);