summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndres Freund2019-02-09 08:35:57 +0000
committerAndres Freund2019-02-09 09:05:49 +0000
commit356687bd825e5ca7230d43c1bffe7a59ad2e77bd (patch)
treebe80a6148a4164edae691341e586a604b39775d9
parent317ffdfeaac12e434b2befa24993dc1b52a140fd (diff)
Reset, not recreate, execGrouping.c style hashtables.
This uses the facility added in the preceding commit to fix performance issues caused by rebuilding the hashtable (with its comparator expression being the most expensive bit), after every reset. That's especially important when the comparator is JIT compiled. Bug: #15592 #15486 Reported-By: Jakub Janeček, Dmitry Marakasov Author: Andres Freund Discussion: https://postgr.es/m/[email protected] https://postgr.es/m/[email protected] Backpatch: 11, where I broke this in bf6c614a2f2c5
-rw-r--r--src/backend/executor/nodeAgg.c34
-rw-r--r--src/backend/executor/nodeRecursiveunion.c27
-rw-r--r--src/backend/executor/nodeSetOp.c25
-rw-r--r--src/backend/executor/nodeSubplan.c57
4 files changed, 79 insertions, 64 deletions
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 263a0f8127..bae7989a42 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -1246,7 +1246,7 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
}
/*
- * Initialize the hash table(s) to empty.
+ * (Re-)initialize the hash table(s) to empty.
*
* To implement hashed aggregation, we need a hashtable that stores a
* representative tuple and an array of AggStatePerGroup structs for each
@@ -1257,9 +1257,9 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
* We have a separate hashtable and associated perhash data structure for each
* grouping set for which we're doing hashing.
*
- * The hash tables always live in the hashcontext's per-tuple memory context
- * (there is only one of these for all tables together, since they are all
- * reset at the same time).
+ * The contents of the hash tables always live in the hashcontext's per-tuple
+ * memory context (there is only one of these for all tables together, since
+ * they are all reset at the same time).
*/
static void
build_hash_table(AggState *aggstate)
@@ -1278,17 +1278,21 @@ build_hash_table(AggState *aggstate)
Assert(perhash->aggnode->numGroups > 0);
- perhash->hashtable = BuildTupleHashTable(&aggstate->ss.ps,
- perhash->hashslot->tts_tupleDescriptor,
- perhash->numCols,
- perhash->hashGrpColIdxHash,
- perhash->eqfuncoids,
- perhash->hashfunctions,
- perhash->aggnode->numGroups,
- additionalsize,
- aggstate->hashcontext->ecxt_per_tuple_memory,
- tmpmem,
- DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
+ if (perhash->hashtable)
+ ResetTupleHashTable(perhash->hashtable);
+ else
+ perhash->hashtable = BuildTupleHashTableExt(&aggstate->ss.ps,
+ perhash->hashslot->tts_tupleDescriptor,
+ perhash->numCols,
+ perhash->hashGrpColIdxHash,
+ perhash->eqfuncoids,
+ perhash->hashfunctions,
+ perhash->aggnode->numGroups,
+ additionalsize,
+ aggstate->ss.ps.state->es_query_cxt,
+ aggstate->hashcontext->ecxt_per_tuple_memory,
+ tmpmem,
+ DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
}
}
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index d2488ad988..9b74ed3208 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -37,17 +37,18 @@ build_hash_table(RecursiveUnionState *rustate)
Assert(node->numCols > 0);
Assert(node->numGroups > 0);
- rustate->hashtable = BuildTupleHashTable(&rustate->ps,
- desc,
- node->numCols,
- node->dupColIdx,
- rustate->eqfuncoids,
- rustate->hashfunctions,
- node->numGroups,
- 0,
- rustate->tableContext,
- rustate->tempContext,
- false);
+ rustate->hashtable = BuildTupleHashTableExt(&rustate->ps,
+ desc,
+ node->numCols,
+ node->dupColIdx,
+ rustate->eqfuncoids,
+ rustate->hashfunctions,
+ node->numGroups,
+ 0,
+ rustate->ps.state->es_query_cxt,
+ rustate->tableContext,
+ rustate->tempContext,
+ false);
}
@@ -317,9 +318,9 @@ ExecReScanRecursiveUnion(RecursiveUnionState *node)
if (node->tableContext)
MemoryContextResetAndDeleteChildren(node->tableContext);
- /* And rebuild empty hashtable if needed */
+ /* Empty hashtable if needed */
if (plan->numCols > 0)
- build_hash_table(node);
+ ResetTupleHashTable(node->hashtable);
/* reset processing state */
node->recursing = false;
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 5d8c8b8b02..26aeaee083 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -126,17 +126,18 @@ build_hash_table(SetOpState *setopstate)
Assert(node->strategy == SETOP_HASHED);
Assert(node->numGroups > 0);
- setopstate->hashtable = BuildTupleHashTable(&setopstate->ps,
- desc,
- node->numCols,
- node->dupColIdx,
- setopstate->eqfuncoids,
- setopstate->hashfunctions,
- node->numGroups,
- 0,
- setopstate->tableContext,
- econtext->ecxt_per_tuple_memory,
- false);
+ setopstate->hashtable = BuildTupleHashTableExt(&setopstate->ps,
+ desc,
+ node->numCols,
+ node->dupColIdx,
+ setopstate->eqfuncoids,
+ setopstate->hashfunctions,
+ node->numGroups,
+ 0,
+ setopstate->ps.state->es_query_cxt,
+ setopstate->tableContext,
+ econtext->ecxt_per_tuple_memory,
+ false);
}
/*
@@ -635,7 +636,7 @@ ExecReScanSetOp(SetOpState *node)
/* And rebuild empty hashtable if needed */
if (((SetOp *) node->ps.plan)->strategy == SETOP_HASHED)
{
- build_hash_table(node);
+ ResetTupleHashTable(node->hashtable);
node->table_filled = false;
}
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 152696035c..d7d076758c 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -481,8 +481,8 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
Assert(subplan->subLinkType == ANY_SUBLINK);
/*
- * If we already had any hash tables, destroy 'em; then create empty hash
- * table(s).
+ * If we already had any hash tables, reset 'em; otherwise create empty
+ * hash table(s).
*
* If we need to distinguish accurately between FALSE and UNKNOWN (i.e.,
* NULL) results of the IN operation, then we have to store subplan output
@@ -505,17 +505,21 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
if (nbuckets < 1)
nbuckets = 1;
- node->hashtable = BuildTupleHashTable(node->parent,
- node->descRight,
- ncols,
- node->keyColIdx,
- node->tab_eq_funcoids,
- node->tab_hash_funcs,
- nbuckets,
- 0,
- node->hashtablecxt,
- node->hashtempcxt,
- false);
+ if (node->hashtable)
+ ResetTupleHashTable(node->hashtable);
+ else
+ node->hashtable = BuildTupleHashTableExt(node->parent,
+ node->descRight,
+ ncols,
+ node->keyColIdx,
+ node->tab_eq_funcoids,
+ node->tab_hash_funcs,
+ nbuckets,
+ 0,
+ node->planstate->state->es_query_cxt,
+ node->hashtablecxt,
+ node->hashtempcxt,
+ false);
if (!subplan->unknownEqFalse)
{
@@ -527,17 +531,22 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
if (nbuckets < 1)
nbuckets = 1;
}
- node->hashnulls = BuildTupleHashTable(node->parent,
- node->descRight,
- ncols,
- node->keyColIdx,
- node->tab_eq_funcoids,
- node->tab_hash_funcs,
- nbuckets,
- 0,
- node->hashtablecxt,
- node->hashtempcxt,
- false);
+
+ if (node->hashnulls)
+ ResetTupleHashTable(node->hashtable);
+ else
+ node->hashnulls = BuildTupleHashTableExt(node->parent,
+ node->descRight,
+ ncols,
+ node->keyColIdx,
+ node->tab_eq_funcoids,
+ node->tab_hash_funcs,
+ nbuckets,
+ 0,
+ node->planstate->state->es_query_cxt,
+ node->hashtablecxt,
+ node->hashtempcxt,
+ false);
}
/*