summaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
authorPavan Deolasee2017-06-15 07:41:07 +0000
committerPavan Deolasee2017-06-15 07:41:07 +0000
commit0ffa504a17f58f2bc045b0039f40e4917ee50d20 (patch)
treec629c449bcfcc45de1d03b2586e89932d546e8ba /src/backend/executor
parent36ccc8d64e61fe9d77bb7ac62267945f7c146baa (diff)
parente800656d9a9b40b2f55afabe76354ab6d93353b3 (diff)
Merge 'remotes/PGSQL/master' into xl10devel
Merge upstream master branch upto e800656d9a9b40b2f55afabe76354ab6d93353b3. Code compiles and regression works ok (with lots and lots of failures though).
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/execMain.c138
-rw-r--r--src/backend/executor/execParallel.c16
-rw-r--r--src/backend/executor/functions.c1
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c2
-rw-r--r--src/backend/executor/nodeCustom.c2
-rw-r--r--src/backend/executor/nodeForeignscan.c2
-rw-r--r--src/backend/executor/nodeIndexonlyscan.c2
-rw-r--r--src/backend/executor/nodeIndexscan.c2
-rw-r--r--src/backend/executor/nodeModifyTable.c42
-rw-r--r--src/backend/executor/nodeSeqscan.c2
10 files changed, 118 insertions, 91 deletions
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 7232b0911f..34cca85563 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -112,6 +112,8 @@ static char *ExecBuildSlotPartitionKeyDescription(Relation rel,
int maxfieldlen);
static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
Plan *planTree);
+static void ExecPartitionCheck(ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot, EState *estate);
/*
* Note that GetUpdatedColumns() also exists in commands/trigger.c. There does
@@ -1404,34 +1406,19 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_projectReturning = NULL;
/*
- * If partition_root has been specified, that means we are building the
- * ResultRelInfo for one of its leaf partitions. In that case, we need
- * *not* initialize the leaf partition's constraint, but rather the
- * partition_root's (if any). We must do that explicitly like this,
- * because implicit partition constraints are not inherited like user-
- * defined constraints and would fail to be enforced by ExecConstraints()
- * after a tuple is routed to a leaf partition.
+ * Partition constraint, which also includes the partition constraint of
+ * all the ancestors that are partitions. Note that it will be checked
+ * even in the case of tuple-routing where this table is the target leaf
+ * partition, if there any BR triggers defined on the table. Although
+ * tuple-routing implicitly preserves the partition constraint of the
+ * target partition for a given row, the BR triggers may change the row
+ * such that the constraint is no longer satisfied, which we must fail for
+ * by checking it explicitly.
+ *
+ * If this is a partitioned table, the partition constraint (if any) of a
+ * given row will be checked just before performing tuple-routing.
*/
- if (partition_root)
- {
- /*
- * Root table itself may or may not be a partition; partition_check
- * would be NIL in the latter case.
- */
- partition_check = RelationGetPartitionQual(partition_root);
-
- /*
- * This is not our own partition constraint, but rather an ancestor's.
- * So any Vars in it bear the ancestor's attribute numbers. We must
- * switch them to our own. (dummy varno = 1)
- */
- if (partition_check != NIL)
- partition_check = map_partition_varattnos(partition_check, 1,
- resultRelationDesc,
- partition_root);
- }
- else
- partition_check = RelationGetPartitionQual(resultRelationDesc);
+ partition_check = RelationGetPartitionQual(resultRelationDesc);
resultRelInfo->ri_PartitionCheck = partition_check;
resultRelInfo->ri_PartitionRoot = partition_root;
@@ -1900,13 +1887,16 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
/*
* ExecPartitionCheck --- check that tuple meets the partition constraint.
- *
- * Note: This is called *iff* resultRelInfo is the main target table.
*/
-static bool
+static void
ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
EState *estate)
{
+ Relation rel = resultRelInfo->ri_RelationDesc;
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ Bitmapset *modifiedCols;
+ Bitmapset *insertedCols;
+ Bitmapset *updatedCols;
ExprContext *econtext;
/*
@@ -1934,7 +1924,44 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
* As in case of the catalogued constraints, we treat a NULL result as
* success here, not a failure.
*/
- return ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
+ if (!ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext))
+ {
+ char *val_desc;
+ Relation orig_rel = rel;
+
+ /* See the comment above. */
+ if (resultRelInfo->ri_PartitionRoot)
+ {
+ HeapTuple tuple = ExecFetchSlotTuple(slot);
+ TupleDesc old_tupdesc = RelationGetDescr(rel);
+ TupleConversionMap *map;
+
+ rel = resultRelInfo->ri_PartitionRoot;
+ tupdesc = RelationGetDescr(rel);
+ /* a reverse map */
+ map = convert_tuples_by_name(old_tupdesc, tupdesc,
+ gettext_noop("could not convert row type"));
+ if (map != NULL)
+ {
+ tuple = do_convert_tuple(tuple, map);
+ ExecStoreTuple(tuple, slot, InvalidBuffer, false);
+ }
+ }
+
+ insertedCols = GetInsertedColumns(resultRelInfo, estate);
+ updatedCols = GetUpdatedColumns(resultRelInfo, estate);
+ modifiedCols = bms_union(insertedCols, updatedCols);
+ val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
+ slot,
+ tupdesc,
+ modifiedCols,
+ 64);
+ ereport(ERROR,
+ (errcode(ERRCODE_CHECK_VIOLATION),
+ errmsg("new row for relation \"%s\" violates partition constraint",
+ RelationGetRelationName(orig_rel)),
+ val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
+ }
}
/*
@@ -2062,47 +2089,11 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
}
}
- if (resultRelInfo->ri_PartitionCheck &&
- !ExecPartitionCheck(resultRelInfo, slot, estate))
- {
- char *val_desc;
- Relation orig_rel = rel;
-
- /* See the comment above. */
- if (resultRelInfo->ri_PartitionRoot)
- {
- HeapTuple tuple = ExecFetchSlotTuple(slot);
- TupleDesc old_tupdesc = RelationGetDescr(rel);
- TupleConversionMap *map;
-
- rel = resultRelInfo->ri_PartitionRoot;
- tupdesc = RelationGetDescr(rel);
- /* a reverse map */
- map = convert_tuples_by_name(old_tupdesc, tupdesc,
- gettext_noop("could not convert row type"));
- if (map != NULL)
- {
- tuple = do_convert_tuple(tuple, map);
- ExecStoreTuple(tuple, slot, InvalidBuffer, false);
- }
- }
-
- insertedCols = GetInsertedColumns(resultRelInfo, estate);
- updatedCols = GetUpdatedColumns(resultRelInfo, estate);
- modifiedCols = bms_union(insertedCols, updatedCols);
- val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
- slot,
- tupdesc,
- modifiedCols,
- 64);
- ereport(ERROR,
- (errcode(ERRCODE_CHECK_VIOLATION),
- errmsg("new row for relation \"%s\" violates partition constraint",
- RelationGetRelationName(orig_rel)),
- val_desc ? errdetail("Failing row contains %s.", val_desc) : 0));
- }
+ if (resultRelInfo->ri_PartitionCheck)
+ ExecPartitionCheck(resultRelInfo, slot, estate);
}
+
/*
* ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
* of the specified kind.
@@ -3387,6 +3378,13 @@ ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd,
PartitionDispatchData *failed_at;
TupleTableSlot *failed_slot;
+ /*
+ * First check the root table's partition constraint, if any. No point in
+ * routing the tuple it if it doesn't belong in the root table itself.
+ */
+ if (resultRelInfo->ri_PartitionCheck)
+ ExecPartitionCheck(resultRelInfo, slot, estate);
+
result = get_partition_for_tuple(pd, slot, estate,
&failed_at, &failed_slot);
if (result < 0)
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index 0610180016..1c02fa140b 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -341,7 +341,7 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
mul_size(PARALLEL_TUPLE_QUEUE_SIZE,
pcxt->nworkers));
else
- tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE);
+ tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE, false);
/* Create the queues, and become the receiver for each. */
for (i = 0; i < pcxt->nworkers; ++i)
@@ -684,7 +684,7 @@ ExecParallelGetReceiver(dsm_segment *seg, shm_toc *toc)
char *mqspace;
shm_mq *mq;
- mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE);
+ mqspace = shm_toc_lookup(toc, PARALLEL_KEY_TUPLE_QUEUE, false);
mqspace += ParallelWorkerNumber * PARALLEL_TUPLE_QUEUE_SIZE;
mq = (shm_mq *) mqspace;
shm_mq_set_sender(mq, MyProc);
@@ -705,14 +705,14 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
char *queryString;
/* Get the query string from shared memory */
- queryString = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT);
+ queryString = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT, false);
/* Reconstruct leader-supplied PlannedStmt. */
- pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT);
+ pstmtspace = shm_toc_lookup(toc, PARALLEL_KEY_PLANNEDSTMT, false);
pstmt = (PlannedStmt *) stringToNode(pstmtspace);
/* Reconstruct ParamListInfo. */
- paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMS);
+ paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMS, false);
paramLI = RestoreParamList(&paramspace);
/*
@@ -843,7 +843,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
/* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */
receiver = ExecParallelGetReceiver(seg, toc);
- instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION);
+ instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION, true);
if (instrumentation != NULL)
instrument_options = instrumentation->instrument_options;
queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options);
@@ -858,7 +858,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
InstrStartParallelQuery();
/* Attach to the dynamic shared memory area. */
- area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA);
+ area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA, false);
area = dsa_attach_in_place(area_space, seg);
/* Start up the executor */
@@ -875,7 +875,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
ExecutorFinish(queryDesc);
/* Report buffer usage during parallel execution. */
- buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE);
+ buffer_usage = shm_toc_lookup(toc, PARALLEL_KEY_BUFFER_USAGE, false);
InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber]);
/* Report instrumentation data if any instrumentation options are set. */
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index f1a71e26c8..bb5c609e54 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -392,6 +392,7 @@ sql_fn_post_column_ref(ParseState *pstate, ColumnRef *cref, Node *var)
param = ParseFuncOrColumn(pstate,
list_make1(subfield),
list_make1(param),
+ pstate->p_last_srf,
NULL,
cref->location);
}
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index c453362230..77f65db0ca 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -1005,7 +1005,7 @@ ExecBitmapHeapInitializeWorker(BitmapHeapScanState *node, shm_toc *toc)
ParallelBitmapHeapState *pstate;
Snapshot snapshot;
- pstate = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
+ pstate = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false);
node->pstate = pstate;
snapshot = RestoreSnapshot(pstate->phs_snapshot_data);
diff --git a/src/backend/executor/nodeCustom.c b/src/backend/executor/nodeCustom.c
index 5d309828ef..69e27047f1 100644
--- a/src/backend/executor/nodeCustom.c
+++ b/src/backend/executor/nodeCustom.c
@@ -194,7 +194,7 @@ ExecCustomScanInitializeWorker(CustomScanState *node, shm_toc *toc)
int plan_node_id = node->ss.ps.plan->plan_node_id;
void *coordinate;
- coordinate = shm_toc_lookup(toc, plan_node_id);
+ coordinate = shm_toc_lookup(toc, plan_node_id, false);
methods->InitializeWorkerCustomScan(node, toc, coordinate);
}
}
diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c
index 707db92178..2bb28a70ff 100644
--- a/src/backend/executor/nodeForeignscan.c
+++ b/src/backend/executor/nodeForeignscan.c
@@ -352,7 +352,7 @@ ExecForeignScanInitializeWorker(ForeignScanState *node, shm_toc *toc)
int plan_node_id = node->ss.ps.plan->plan_node_id;
void *coordinate;
- coordinate = shm_toc_lookup(toc, plan_node_id);
+ coordinate = shm_toc_lookup(toc, plan_node_id, false);
fdwroutine->InitializeWorkerForeignScan(node, toc, coordinate);
}
}
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index 5550f6c0a4..fb3d3bb121 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -676,7 +676,7 @@ ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node, shm_toc *toc)
{
ParallelIndexScanDesc piscan;
- piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
+ piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false);
node->ioss_ScanDesc =
index_beginscan_parallel(node->ss.ss_currentRelation,
node->ioss_RelationDesc,
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 5afd02e09d..0fb3fb5e7e 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -1714,7 +1714,7 @@ ExecIndexScanInitializeWorker(IndexScanState *node, shm_toc *toc)
{
ParallelIndexScanDesc piscan;
- piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
+ piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false);
node->iss_ScanDesc =
index_beginscan_parallel(node->ss.ss_currentRelation,
node->iss_RelationDesc,
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 0ee82e3add..bdff68513b 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -415,6 +415,16 @@ ExecInsert(ModifyTableState *mtstate,
else
{
/*
+ * We always check the partition constraint, including when the tuple
+ * got here via tuple-routing. However we don't need to in the latter
+ * case if no BR trigger is defined on the partition. Note that a BR
+ * trigger might modify the tuple such that the partition constraint
+ * is no longer satisfied, so we need to check in that case.
+ */
+ bool check_partition_constr =
+ (resultRelInfo->ri_PartitionCheck != NIL);
+
+ /*
* Constraints might reference the tableoid column, so initialize
* t_tableOid before evaluating them.
*/
@@ -431,9 +441,16 @@ ExecInsert(ModifyTableState *mtstate,
resultRelInfo, slot, estate);
/*
- * Check the constraints of the tuple
+ * No need though if the tuple has been routed, and a BR trigger
+ * doesn't exist.
*/
- if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck)
+ if (saved_resultRelInfo != NULL &&
+ !(resultRelInfo->ri_TrigDesc &&
+ resultRelInfo->ri_TrigDesc->trig_insert_before_row))
+ check_partition_constr = false;
+
+ /* Check the constraints of the tuple */
+ if (resultRelationDesc->rd_att->constr || check_partition_constr)
ExecConstraints(resultRelInfo, slot, estate);
if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
@@ -1826,10 +1843,21 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
{
List *wcoList;
+ PlanState *plan;
- Assert(operation == CMD_INSERT);
- resultRelInfo = mtstate->mt_partitions;
+ /*
+ * In case of INSERT on partitioned tables, there is only one plan.
+ * Likewise, there is only one WITH CHECK OPTIONS list, not one per
+ * partition. We make a copy of the WCO qual for each partition; note
+ * that, if there are SubPlans in there, they all end up attached to
+ * the one parent Plan node.
+ */
+ Assert(operation == CMD_INSERT &&
+ list_length(node->withCheckOptionLists) == 1 &&
+ mtstate->mt_nplans == 1);
wcoList = linitial(node->withCheckOptionLists);
+ plan = mtstate->mt_plans[0];
+ resultRelInfo = mtstate->mt_partitions;
for (i = 0; i < mtstate->mt_num_partitions; i++)
{
Relation partrel = resultRelInfo->ri_RelationDesc;
@@ -1843,9 +1871,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
partrel, rel);
foreach(ll, mapped_wcoList)
{
- WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
- ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
- mtstate->mt_plans[i]);
+ WithCheckOption *wco = castNode(WithCheckOption, lfirst(ll));
+ ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
+ plan);
wcoExprs = lappend(wcoExprs, wcoExpr);
}
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 5680464fa2..c0e37dcd83 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -332,7 +332,7 @@ ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc)
{
ParallelHeapScanDesc pscan;
- pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
+ pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false);
node->ss.ss_currentScanDesc =
heap_beginscan_parallel(node->ss.ss_currentRelation, pscan);
}