summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBernd Helmle2011-06-08 11:25:58 +0000
committerBernd Helmle2011-06-08 11:25:58 +0000
commit5fd3024a3d606946063ffa2ffce0e9167e3f3ec4 (patch)
tree79eb48bae43abe49646beb8c6ab58956b85af3b8
parent820e91132277e492d4a09737ef3530b8a53649dd (diff)
parent8f9622bbb3c02b06176760c3ca2d33c5b5f629a7 (diff)
Merge branch 'master' of ../bernd_pg into notnull_constraint
-rw-r--r--doc/src/sgml/ecpg.sgml13
-rw-r--r--doc/src/sgml/plpython.sgml3
-rw-r--r--src/backend/catalog/heap.c9
-rw-r--r--src/backend/catalog/index.c77
-rw-r--r--src/backend/commands/cluster.c9
-rw-r--r--src/backend/commands/tablecmds.c19
-rw-r--r--src/backend/optimizer/util/plancat.c6
-rw-r--r--src/backend/parser/analyze.c3
-rw-r--r--src/backend/rewrite/rewriteHandler.c187
-rw-r--r--src/backend/storage/lmgr/predicate.c551
-rw-r--r--src/bin/psql/describe.c9
-rw-r--r--src/include/storage/predicate.h2
-rw-r--r--src/include/storage/predicate_internals.h6
-rw-r--r--src/pl/plperl/plperl.c2
-rw-r--r--src/pl/plperl/plperl.h5
-rw-r--r--src/test/regress/expected/with.out40
-rw-r--r--src/test/regress/input/constraints.source3
-rw-r--r--src/test/regress/output/constraints.source2
-rw-r--r--src/test/regress/sql/with.sql23
19 files changed, 781 insertions, 188 deletions
diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml
index a8ffde5150..9130b12d69 100644
--- a/doc/src/sgml/ecpg.sgml
+++ b/doc/src/sgml/ecpg.sgml
@@ -3948,8 +3948,9 @@ typedef struct sqlda_struct sqlda_t;
<term><literal>desc_next</></term>
<listitem>
<para>
- If the query returns more than one records, multiple linked SQLDA structures
- are returned, the first record is stored in the SQLDA returned in the
+ If the query returns more than one record, multiple linked
+ SQLDA structures are returned, and <literal>desc_next</> holds
+ a pointer to the next entry in the list.
</para>
</listitem>
</varlistentry>
@@ -8018,7 +8019,7 @@ typedef struct sqlda_compat sqlda_t;
<term><literal>desc_next</></term>
<listitem>
<para>
- Pointer to the next SQLDA structure if the result set contains more than one records.
+ Pointer to the next SQLDA structure if the result set contains more than one record.
</para>
</listitem>
</varlistentry>
@@ -9354,7 +9355,8 @@ risnull(CINTTYPE, (char *) &i);
<term><literal>ECPG_INFORMIX_DATE_CONVERT</></term>
<listitem>
<para>
- Functions return this value if Internally it is defined to -1210 (the
+ Functions return this value if an error occurred during date
+ formatting. Internally it is defined to -1210 (the
<productname>Informix</productname> definition).
</para>
</listitem>
@@ -9364,7 +9366,8 @@ risnull(CINTTYPE, (char *) &i);
<term><literal>ECPG_INFORMIX_OUT_OF_MEMORY</></term>
<listitem>
<para>
- Functions return this value if Internally it is defined to -1211 (the
+ Functions return this value if memory was exhausted during
+ their operation. Internally it is defined to -1211 (the
<productname>Informix</productname> definition).
</para>
</listitem>
diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml
index ffc1d3ab3d..eda2bbf34c 100644
--- a/doc/src/sgml/plpython.sgml
+++ b/doc/src/sgml/plpython.sgml
@@ -400,7 +400,8 @@ $$ LANGUAGE plpythonu;
If an SQL null value<indexterm><primary>null value</primary><secondary
sortas="PL/Python">in PL/Python</secondary></indexterm> is passed to a
function, the argument value will appear as <symbol>None</symbol> in
- Python. The above function definition will return the wrong answer for null
+ Python. For example, the function definition of <function>pymax</function>
+ shown in <xref linkend="plpython-funcs"> will return the wrong answer for null
inputs. We could add <literal>STRICT</literal> to the function definition
to make <productname>PostgreSQL</productname> do something more reasonable:
if a null value is passed, the function will not be called at all,
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 1e4967e293..281887b586 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -63,6 +63,7 @@
#include "parser/parse_relation.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
+#include "storage/predicate.h"
#include "storage/smgr.h"
#include "utils/acl.h"
#include "utils/builtins.h"
@@ -1658,6 +1659,14 @@ heap_drop_with_catalog(Oid relid)
CheckTableNotInUse(rel, "DROP TABLE");
/*
+ * This effectively deletes all rows in the table, and may be done in a
+ * serializable transaction. In that case we must record a rw-conflict in
+ * to this transaction from each transaction holding a predicate lock on
+ * the table.
+ */
+ CheckTableForSerializableConflictIn(rel);
+
+ /*
* Delete pg_foreign_table tuple first.
*/
if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 53b4c3c59b..0898cf363e 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -54,6 +54,7 @@
#include "parser/parser.h"
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
+#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "utils/builtins.h"
@@ -115,6 +116,7 @@ static void validate_index_heapscan(Relation heapRelation,
Snapshot snapshot,
v_i_state *state);
static Oid IndexGetRelation(Oid indexId);
+static bool ReindexIsCurrentlyProcessingIndex(Oid indexOid);
static void SetReindexProcessing(Oid heapOid, Oid indexOid);
static void ResetReindexProcessing(void);
static void SetReindexPending(List *indexes);
@@ -1311,6 +1313,12 @@ index_drop(Oid indexId)
CheckTableNotInUse(userIndexRelation, "DROP INDEX");
/*
+ * All predicate locks on the index are about to be made invalid. Promote
+ * them to relation locks on the heap.
+ */
+ TransferPredicateLocksToHeapRelation(userIndexRelation);
+
+ /*
* Schedule physical removal of the files
*/
RelationDropStorage(userIndexRelation);
@@ -1747,8 +1755,8 @@ index_build(Relation heapRelation,
* created it, or truncated twice in a subsequent transaction, the
* relfilenode won't change, and nothing needs to be done here.
*/
- if (heapRelation->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED
- && !smgrexists(indexRelation->rd_smgr, INIT_FORKNUM))
+ if (heapRelation->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
+ !smgrexists(indexRelation->rd_smgr, INIT_FORKNUM))
{
RegProcedure ambuildempty = indexRelation->rd_am->ambuildempty;
@@ -1758,19 +1766,6 @@ index_build(Relation heapRelation,
}
/*
- * If it's for an exclusion constraint, make a second pass over the heap
- * to verify that the constraint is satisfied.
- */
- if (indexInfo->ii_ExclusionOps != NULL)
- IndexCheckExclusion(heapRelation, indexRelation, indexInfo);
-
- /* Roll back any GUC changes executed by index functions */
- AtEOXact_GUC(false, save_nestlevel);
-
- /* Restore userid and security context */
- SetUserIdAndSecContext(save_userid, save_sec_context);
-
- /*
* If we found any potentially broken HOT chains, mark the index as not
* being usable until the current transaction is below the event horizon.
* See src/backend/access/heap/README.HOT for discussion.
@@ -1824,8 +1819,23 @@ index_build(Relation heapRelation,
InvalidOid,
stats->index_tuples);
- /* Make the updated versions visible */
+ /* Make the updated catalog row versions visible */
CommandCounterIncrement();
+
+ /*
+ * If it's for an exclusion constraint, make a second pass over the heap
+ * to verify that the constraint is satisfied. We must not do this until
+ * the index is fully valid. (Broken HOT chains shouldn't matter, though;
+ * see comments for IndexCheckExclusion.)
+ */
+ if (indexInfo->ii_ExclusionOps != NULL)
+ IndexCheckExclusion(heapRelation, indexRelation, indexInfo);
+
+ /* Roll back any GUC changes executed by index functions */
+ AtEOXact_GUC(false, save_nestlevel);
+
+ /* Restore userid and security context */
+ SetUserIdAndSecContext(save_userid, save_sec_context);
}
@@ -2270,6 +2280,15 @@ IndexCheckExclusion(Relation heapRelation,
ExprContext *econtext;
/*
+ * If we are reindexing the target index, mark it as no longer being
+ * reindexed, to forestall an Assert in index_beginscan when we try to
+ * use the index for probes. This is OK because the index is now
+ * fully valid.
+ */
+ if (ReindexIsCurrentlyProcessingIndex(RelationGetRelid(indexRelation)))
+ ResetReindexProcessing();
+
+ /*
* Need an EState for evaluation of index expressions and partial-index
* predicates. Also a slot to hold the current tuple.
*/
@@ -2787,6 +2806,12 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
*/
CheckTableNotInUse(iRel, "REINDEX INDEX");
+ /*
+ * All predicate locks on the index are about to be made invalid. Promote
+ * them to relation locks on the heap.
+ */
+ TransferPredicateLocksToHeapRelation(iRel);
+
PG_TRY();
{
/* Suppress use of the target index while rebuilding it */
@@ -2989,8 +3014,8 @@ reindex_relation(Oid relid, int flags)
CommandCounterIncrement();
- if (flags & REINDEX_REL_SUPPRESS_INDEX_USE)
- RemoveReindexPending(indexOid);
+ /* Index should no longer be in the pending list */
+ Assert(!ReindexIsProcessingIndex(indexOid));
if (is_pg_class)
doneIndexes = lappend_oid(doneIndexes, indexOid);
@@ -3030,7 +3055,9 @@ reindex_relation(Oid relid, int flags)
* System index reindexing support
*
* When we are busy reindexing a system index, this code provides support
- * for preventing catalog lookups from using that index.
+ * for preventing catalog lookups from using that index. We also make use
+ * of this to catch attempted uses of user indexes during reindexing of
+ * those indexes.
* ----------------------------------------------------------------
*/
@@ -3049,6 +3076,16 @@ ReindexIsProcessingHeap(Oid heapOid)
}
/*
+ * ReindexIsCurrentlyProcessingIndex
+ * True if index specified by OID is currently being reindexed.
+ */
+static bool
+ReindexIsCurrentlyProcessingIndex(Oid indexOid)
+{
+ return indexOid == currentlyReindexedIndex;
+}
+
+/*
* ReindexIsProcessingIndex
* True if index specified by OID is currently being reindexed,
* or should be treated as invalid because it is awaiting reindex.
@@ -3075,6 +3112,8 @@ SetReindexProcessing(Oid heapOid, Oid indexOid)
elog(ERROR, "cannot reindex while reindexing");
currentlyReindexedHeap = heapOid;
currentlyReindexedIndex = indexOid;
+ /* Index is no longer "pending" reindex. */
+ RemoveReindexPending(indexOid);
}
/*
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index dc0f6059b0..0ab3a8bcfa 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -39,6 +39,7 @@
#include "optimizer/planner.h"
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
+#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "utils/acl.h"
@@ -385,6 +386,14 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose,
if (OidIsValid(indexOid))
check_index_is_clusterable(OldHeap, indexOid, recheck, AccessExclusiveLock);
+ /*
+ * All predicate locks on the tuples or pages are about to be made
+ * invalid, because we move tuples around. Promote them to relation
+ * locks. Predicate locks on indexes will be promoted when they are
+ * reindexed.
+ */
+ TransferPredicateLocksToHeapRelation(OldHeap);
+
/* rebuild_relation does all the dirty work */
rebuild_relation(OldHeap, indexOid, freeze_min_age, freeze_table_age,
verbose);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 17a0384465..2cf7bebdc9 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -70,6 +70,7 @@
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
#include "storage/lock.h"
+#include "storage/predicate.h"
#include "storage/smgr.h"
#include "utils/acl.h"
#include "utils/builtins.h"
@@ -1145,6 +1146,14 @@ ExecuteTruncate(TruncateStmt *stmt)
Oid toast_relid;
/*
+ * This effectively deletes all rows in the table, and may be done
+ * in a serializable transaction. In that case we must record a
+ * rw-conflict in to this transaction from each transaction
+ * holding a predicate lock on the table.
+ */
+ CheckTableForSerializableConflictIn(rel);
+
+ /*
* Need the full transaction-safe pushups.
*
* Create a new empty storage file for the relation, and assign it
@@ -3884,6 +3893,16 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
(errmsg("verifying table \"%s\"",
RelationGetRelationName(oldrel))));
+ if (newrel)
+ {
+ /*
+ * All predicate locks on the tuples or pages are about to be made
+ * invalid, because we move tuples around. Promote them to
+ * relation locks.
+ */
+ TransferPredicateLocksToHeapRelation(oldrel);
+ }
+
econtext = GetPerTupleExprContext(estate);
/*
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index fd8ea45b4a..b28681630b 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -90,6 +90,12 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
*/
relation = heap_open(relationObjectId, NoLock);
+ /* Temporary and unlogged relations are inaccessible during recovery. */
+ if (!RelationNeedsWAL(relation) && RecoveryInProgress())
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot access temporary or unlogged relations during recovery")));
+
rel->min_attr = FirstLowInvalidHeapAttributeNumber + 1;
rel->max_attr = RelationGetNumberOfAttributes(relation);
rel->reltablespace = RelationGetForm(relation)->reltablespace;
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index b203287047..4867685c7f 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -999,7 +999,7 @@ transformSelectStmt(ParseState *pstate, SelectStmt *stmt)
* transforms a VALUES clause that's being used as a standalone SELECT
*
* We build a Query containing a VALUES RTE, rather as if one had written
- * SELECT * FROM (VALUES ...)
+ * SELECT * FROM (VALUES ...) AS "*VALUES*"
*/
static Query *
transformValuesClause(ParseState *pstate, SelectStmt *stmt)
@@ -1162,6 +1162,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
rtr->rtindex = list_length(pstate->p_rtable);
Assert(rte == rt_fetch(rtr->rtindex, pstate->p_rtable));
pstate->p_joinlist = lappend(pstate->p_joinlist, rtr);
+ pstate->p_relnamespace = lappend(pstate->p_relnamespace, rte);
pstate->p_varnamespace = lappend(pstate->p_varnamespace, rte);
/*
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index bfc8fd7ee0..be9e7a4598 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -455,6 +455,44 @@ rewriteRuleAction(Query *parsetree,
}
/*
+ * If the original query has any CTEs, copy them into the rule action.
+ * But we don't need them for a utility action.
+ */
+ if (parsetree->cteList != NIL && sub_action->commandType != CMD_UTILITY)
+ {
+ ListCell *lc;
+
+ /*
+ * Annoying implementation restriction: because CTEs are identified
+ * by name within a cteList, we can't merge a CTE from the original
+ * query if it has the same name as any CTE in the rule action.
+ *
+ * This could possibly be fixed by using some sort of internally
+ * generated ID, instead of names, to link CTE RTEs to their CTEs.
+ */
+ foreach(lc, parsetree->cteList)
+ {
+ CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
+ ListCell *lc2;
+
+ foreach(lc2, sub_action->cteList)
+ {
+ CommonTableExpr *cte2 = (CommonTableExpr *) lfirst(lc2);
+
+ if (strcmp(cte->ctename, cte2->ctename) == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("WITH query name \"%s\" appears in both a rule action and the query being rewritten",
+ cte->ctename)));
+ }
+ }
+
+ /* OK, it's safe to combine the CTE lists */
+ sub_action->cteList = list_concat(sub_action->cteList,
+ copyObject(parsetree->cteList));
+ }
+
+ /*
* Event Qualification forces copying of parsetree and splitting into two
* queries one w/rule_qual, one w/NOT rule_qual. Also add user query qual
* onto rule action
@@ -1806,6 +1844,69 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
ListCell *lc1;
/*
+ * First, recursively process any insert/update/delete statements in WITH
+ * clauses. (We have to do this first because the WITH clauses may get
+ * copied into rule actions below.)
+ */
+ foreach(lc1, parsetree->cteList)
+ {
+ CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc1);
+ Query *ctequery = (Query *) cte->ctequery;
+ List *newstuff;
+
+ Assert(IsA(ctequery, Query));
+
+ if (ctequery->commandType == CMD_SELECT)
+ continue;
+
+ newstuff = RewriteQuery(ctequery, rewrite_events);
+
+ /*
+ * Currently we can only handle unconditional, single-statement DO
+ * INSTEAD rules correctly; we have to get exactly one Query out of
+ * the rewrite operation to stuff back into the CTE node.
+ */
+ if (list_length(newstuff) == 1)
+ {
+ /* Push the single Query back into the CTE node */
+ ctequery = (Query *) linitial(newstuff);
+ Assert(IsA(ctequery, Query));
+ /* WITH queries should never be canSetTag */
+ Assert(!ctequery->canSetTag);
+ cte->ctequery = (Node *) ctequery;
+ }
+ else if (newstuff == NIL)
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("DO INSTEAD NOTHING rules are not supported for data-modifying statements in WITH")));
+ }
+ else
+ {
+ ListCell *lc2;
+
+ /* examine queries to determine which error message to issue */
+ foreach(lc2, newstuff)
+ {
+ Query *q = (Query *) lfirst(lc2);
+
+ if (q->querySource == QSRC_QUAL_INSTEAD_RULE)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("conditional DO INSTEAD rules are not supported for data-modifying statements in WITH")));
+ if (q->querySource == QSRC_NON_INSTEAD_RULE)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("DO ALSO rules are not supported for data-modifying statements in WITH")));
+ }
+
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("multi-statement DO INSTEAD rules are not supported for data-modifying statements in WITH")));
+ }
+ }
+
+ /*
* If the statement is an insert, update, or delete, adjust its targetlist
* as needed, and then fire INSERT/UPDATE/DELETE rules on it.
*
@@ -1984,67 +2085,6 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
}
/*
- * Recursively process any insert/update/delete statements in WITH clauses
- */
- foreach(lc1, parsetree->cteList)
- {
- CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc1);
- Query *ctequery = (Query *) cte->ctequery;
- List *newstuff;
-
- Assert(IsA(ctequery, Query));
-
- if (ctequery->commandType == CMD_SELECT)
- continue;
-
- newstuff = RewriteQuery(ctequery, rewrite_events);
-
- /*
- * Currently we can only handle unconditional, single-statement DO
- * INSTEAD rules correctly; we have to get exactly one Query out of
- * the rewrite operation to stuff back into the CTE node.
- */
- if (list_length(newstuff) == 1)
- {
- /* Push the single Query back into the CTE node */
- ctequery = (Query *) linitial(newstuff);
- Assert(IsA(ctequery, Query));
- /* WITH queries should never be canSetTag */
- Assert(!ctequery->canSetTag);
- cte->ctequery = (Node *) ctequery;
- }
- else if (newstuff == NIL)
- {
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DO INSTEAD NOTHING rules are not supported for data-modifying statements in WITH")));
- }
- else
- {
- ListCell *lc2;
-
- /* examine queries to determine which error message to issue */
- foreach(lc2, newstuff)
- {
- Query *q = (Query *) lfirst(lc2);
-
- if (q->querySource == QSRC_QUAL_INSTEAD_RULE)
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("conditional DO INSTEAD rules are not supported for data-modifying statements in WITH")));
- if (q->querySource == QSRC_NON_INSTEAD_RULE)
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DO ALSO rules are not supported for data-modifying statements in WITH")));
- }
-
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("multi-statement DO INSTEAD rules are not supported for data-modifying statements in WITH")));
- }
- }
-
- /*
* For INSERTs, the original query is done first; for UPDATE/DELETE, it is
* done last. This is needed because update and delete rule actions might
* not do anything if they are invoked after the update or delete is
@@ -2074,6 +2114,31 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
}
}
+ /*
+ * If the original query has a CTE list, and we generated more than one
+ * non-utility result query, we have to fail because we'll have copied
+ * the CTE list into each result query. That would break the expectation
+ * of single evaluation of CTEs. This could possibly be fixed by
+ * restructuring so that a CTE list can be shared across multiple Query
+ * and PlannableStatement nodes.
+ */
+ if (parsetree->cteList != NIL)
+ {
+ int qcount = 0;
+
+ foreach(lc1, rewritten)
+ {
+ Query *q = (Query *) lfirst(lc1);
+
+ if (q->commandType != CMD_UTILITY)
+ qcount++;
+ }
+ if (qcount > 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("WITH cannot be used in a query that is rewritten by rules into multiple queries")));
+ }
+
return rewritten;
}
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 6facb533e0..28da729b6d 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -155,6 +155,7 @@
* BlockNumber newblkno);
* PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
* BlockNumber newblkno);
+ * TransferPredicateLocksToHeapRelation(Relation relation)
* ReleasePredicateLocks(bool isCommit)
*
* conflict detection (may also trigger rollback)
@@ -162,6 +163,7 @@
* HeapTupleData *tup, Buffer buffer)
* CheckForSerializableConflictIn(Relation relation, HeapTupleData *tup,
* Buffer buffer)
+ * CheckTableForSerializableConflictIn(Relation relation)
*
* final rollback checking
* PreCommit_CheckForSerializationFailure(void)
@@ -257,10 +259,10 @@
#define SxactIsMarkedForDeath(sxact) (((sxact)->flags & SXACT_FLAG_MARKED_FOR_DEATH) != 0)
/*
- * When a public interface method is called for a split on an index relation,
- * this is the test to see if we should do a quick return.
+ * Is this relation exempt from predicate locking? We don't do predicate
+ * locking on system or temporary relations.
*/
-#define SkipSplitTracking(relation) \
+#define SkipPredicateLocksForRelation(relation) \
(((relation)->rd_id < FirstBootstrapObjectId) \
|| RelationUsesLocalBuffers(relation))
@@ -273,7 +275,7 @@
((!IsolationIsSerializable()) \
|| ((MySerializableXact == InvalidSerializableXact)) \
|| ReleasePredicateLocksIfROSafe() \
- || SkipSplitTracking(relation))
+ || SkipPredicateLocksForRelation(relation))
/*
@@ -374,11 +376,13 @@ static HTAB *PredicateLockHash;
static SHM_QUEUE *FinishedSerializableTransactions;
/*
- * Tag for a reserved entry in PredicateLockTargetHash; used to ensure
- * there's an element available for scratch space if we need it,
- * e.g. in PredicateLockPageSplit. This is an otherwise-invalid tag.
+ * Tag for a dummy entry in PredicateLockTargetHash. By temporarily removing
+ * this entry, you can ensure that there's enough scratch space available for
+ * inserting one entry in the hash table. This is an otherwise-invalid tag.
*/
-static const PREDICATELOCKTARGETTAG ReservedTargetTag = {0, 0, 0, 0, 0};
+static const PREDICATELOCKTARGETTAG ScratchTargetTag = {0, 0, 0, 0, 0};
+static uint32 ScratchTargetTagHash;
+static int ScratchPartitionLock;
/*
* The local hash table used to determine when to combine multiple fine-
@@ -420,6 +424,8 @@ static bool PredicateLockExists(const PREDICATELOCKTARGETTAG *targettag);
static bool GetParentPredicateLockTag(const PREDICATELOCKTARGETTAG *tag,
PREDICATELOCKTARGETTAG *parent);
static bool CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag);
+static void RemoveScratchTarget(bool lockheld);
+static void RestoreScratchTarget(bool lockheld);
static void RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target,
uint32 targettaghash);
static void DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag);
@@ -434,6 +440,8 @@ static bool TransferPredicateLocksToNewTarget(const PREDICATELOCKTARGETTAG oldta
const PREDICATELOCKTARGETTAG newtargettag,
bool removeOld);
static void PredicateLockAcquire(const PREDICATELOCKTARGETTAG *targettag);
+static void DropAllPredicateLocksFromTable(const Relation relation,
+ bool transfer);
static void SetNewSxactGlobalXmin(void);
static bool ReleasePredicateLocksIfROSafe(void);
static void ClearOldPredicateLocks(void);
@@ -977,8 +985,8 @@ InitPredicateLocks(void)
bool found;
/*
- * Compute size of predicate lock target hashtable.
- * Note these calculations must agree with PredicateLockShmemSize!
+ * Compute size of predicate lock target hashtable. Note these
+ * calculations must agree with PredicateLockShmemSize!
*/
max_table_size = NPREDICATELOCKTARGETENTS();
@@ -1003,14 +1011,12 @@ InitPredicateLocks(void)
max_table_size *= 2;
/*
- * Reserve an entry in the hash table; we use it to make sure there's
+ * Reserve a dummy entry in the hash table; we use it to make sure there's
* always one entry available when we need to split or combine a page,
* because running out of space there could mean aborting a
* non-serializable transaction.
*/
- hash_search(PredicateLockTargetHash, &ReservedTargetTag,
- HASH_ENTER, NULL);
-
+ hash_search(PredicateLockTargetHash, &ScratchTargetTag, HASH_ENTER, NULL);
/*
* Allocate hash table for PREDICATELOCK structs. This stores per
@@ -1030,8 +1036,8 @@ InitPredicateLocks(void)
hash_flags);
/*
- * Compute size for serializable transaction hashtable.
- * Note these calculations must agree with PredicateLockShmemSize!
+ * Compute size for serializable transaction hashtable. Note these
+ * calculations must agree with PredicateLockShmemSize!
*/
max_table_size = (MaxBackends + max_prepared_xacts);
@@ -1165,6 +1171,10 @@ InitPredicateLocks(void)
* transactions.
*/
OldSerXidInit();
+
+ /* Pre-calculate the hash and partition lock of the scratch entry */
+ ScratchTargetTagHash = PredicateLockTargetTagHashCode(&ScratchTargetTag);
+ ScratchPartitionLock = PredicateLockHashPartitionLock(ScratchTargetTagHash);
}
/*
@@ -1759,6 +1769,54 @@ CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag)
}
/*
+ * Remove the dummy entry from the predicate lock target hash, to free up some
+ * scratch space. The caller must be holding SerializablePredicateLockListLock,
+ * and must restore the entry with RestoreScratchTarget() before releasing the
+ * lock.
+ *
+ * If lockheld is true, the caller is already holding the partition lock
+ * of the partition containing the scratch entry.
+ */
+static void
+RemoveScratchTarget(bool lockheld)
+{
+ bool found;
+
+ Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
+
+ if (!lockheld)
+ LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
+ hash_search_with_hash_value(PredicateLockTargetHash,
+ &ScratchTargetTag,
+ ScratchTargetTagHash,
+ HASH_REMOVE, &found);
+ Assert(found);
+ if (!lockheld)
+ LWLockRelease(ScratchPartitionLock);
+}
+
+/*
+ * Re-insert the dummy entry in predicate lock target hash.
+ */
+static void
+RestoreScratchTarget(bool lockheld)
+{
+ bool found;
+
+ Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
+
+ if (!lockheld)
+ LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
+ hash_search_with_hash_value(PredicateLockTargetHash,
+ &ScratchTargetTag,
+ ScratchTargetTagHash,
+ HASH_ENTER, &found);
+ Assert(!found);
+ if (!lockheld)
+ LWLockRelease(ScratchPartitionLock);
+}
+
+/*
* Check whether the list of related predicate locks is empty for a
* predicate lock target, and remove the target if it is.
*/
@@ -2317,8 +2375,8 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
*
* Returns true on success, or false if we ran out of shared memory to
* allocate the new target or locks. Guaranteed to always succeed if
- * removeOld is set (by using the reserved entry in
- * PredicateLockTargetHash for scratch space).
+ * removeOld is set (by using the scratch entry in PredicateLockTargetHash
+ * for scratch space).
*
* Warning: the "removeOld" option should be used only with care,
* because this function does not (indeed, can not) update other
@@ -2345,9 +2403,6 @@ TransferPredicateLocksToNewTarget(const PREDICATELOCKTARGETTAG oldtargettag,
LWLockId newpartitionLock;
bool found;
bool outOfShmem = false;
- uint32 reservedtargettaghash;
- LWLockId reservedpartitionLock;
-
Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
@@ -2356,24 +2411,13 @@ TransferPredicateLocksToNewTarget(const PREDICATELOCKTARGETTAG oldtargettag,
oldpartitionLock = PredicateLockHashPartitionLock(oldtargettaghash);
newpartitionLock = PredicateLockHashPartitionLock(newtargettaghash);
- reservedtargettaghash = 0; /* Quiet compiler warnings. */
- reservedpartitionLock = 0; /* Quiet compiler warnings. */
-
if (removeOld)
{
/*
- * Remove the reserved entry to give us scratch space, so we know
- * we'll be able to create the new lock target.
+ * Remove the dummy entry to give us scratch space, so we know we'll
+ * be able to create the new lock target.
*/
- reservedtargettaghash = PredicateLockTargetTagHashCode(&ReservedTargetTag);
- reservedpartitionLock = PredicateLockHashPartitionLock(reservedtargettaghash);
- LWLockAcquire(reservedpartitionLock, LW_EXCLUSIVE);
- hash_search_with_hash_value(PredicateLockTargetHash,
- &ReservedTargetTag,
- reservedtargettaghash,
- HASH_REMOVE, &found);
- Assert(found);
- LWLockRelease(reservedpartitionLock);
+ RemoveScratchTarget(false);
}
/*
@@ -2431,6 +2475,10 @@ TransferPredicateLocksToNewTarget(const PREDICATELOCKTARGETTAG oldtargettag,
newpredlocktag.myTarget = newtarget;
+ /*
+ * Loop through all the locks on the old target, replacing them with
+ * locks on the new target.
+ */
oldpredlock = (PREDICATELOCK *)
SHMQueueNext(&(oldtarget->predicateLocks),
&(oldtarget->predicateLocks),
@@ -2530,19 +2578,238 @@ exit:
/* We shouldn't run out of memory if we're moving locks */
Assert(!outOfShmem);
- /* Put the reserved entry back */
- LWLockAcquire(reservedpartitionLock, LW_EXCLUSIVE);
- hash_search_with_hash_value(PredicateLockTargetHash,
- &ReservedTargetTag,
- reservedtargettaghash,
- HASH_ENTER, &found);
- Assert(!found);
- LWLockRelease(reservedpartitionLock);
+ /* Put the scrach entry back */
+ RestoreScratchTarget(false);
}
return !outOfShmem;
}
+/*
+ * Drop all predicate locks of any granularity from the specified relation,
+ * which can be a heap relation or an index relation. If 'transfer' is true,
+ * acquire a relation lock on the heap for any transactions with any lock(s)
+ * on the specified relation.
+ *
+ * This requires grabbing a lot of LW locks and scanning the entire lock
+ * target table for matches. That makes this more expensive than most
+ * predicate lock management functions, but it will only be called for DDL
+ * type commands that are expensive anyway, and there are fast returns when
+ * no serializable transactions are active or the relation is temporary.
+ *
+ * We don't use the TransferPredicateLocksToNewTarget function because it
+ * acquires its own locks on the partitions of the two targets involved,
+ * and we'll already be holding all partition locks.
+ *
+ * We can't throw an error from here, because the call could be from a
+ * transaction which is not serializable.
+ *
+ * NOTE: This is currently only called with transfer set to true, but that may
+ * change. If we decide to clean up the locks from a table on commit of a
+ * transaction which executed DROP TABLE, the false condition will be useful.
+ */
+static void
+DropAllPredicateLocksFromTable(const Relation relation, bool transfer)
+{
+ HASH_SEQ_STATUS seqstat;
+ PREDICATELOCKTARGET *oldtarget;
+ PREDICATELOCKTARGET *heaptarget;
+ Oid dbId;
+ Oid relId;
+ Oid heapId;
+ int i;
+ bool isIndex;
+ bool found;
+ uint32 heaptargettaghash;
+
+ /*
+ * Bail out quickly if there are no serializable transactions running.
+ * It's safe to check this without taking locks because the caller is
+ * holding an ACCESS EXCLUSIVE lock on the relation. No new locks which
+ * would matter here can be acquired while that is held.
+ */
+ if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
+ return;
+
+ if (SkipPredicateLocksForRelation(relation))
+ return;
+
+ dbId = relation->rd_node.dbNode;
+ relId = relation->rd_id;
+ if (relation->rd_index == NULL)
+ {
+ isIndex = false;
+ heapId = relId;
+ }
+ else
+ {
+ isIndex = true;
+ heapId = relation->rd_index->indrelid;
+ }
+ Assert(heapId != InvalidOid);
+ Assert(transfer || !isIndex); /* index OID only makes sense with
+ * transfer */
+
+ /* Retrieve first time needed, then keep. */
+ heaptargettaghash = 0;
+ heaptarget = NULL;
+
+ /* Acquire locks on all lock partitions */
+ LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
+ for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
+ LWLockAcquire(FirstPredicateLockMgrLock + i, LW_EXCLUSIVE);
+ LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
+
+ /*
+ * Remove the dummy entry to give us scratch space, so we know we'll be
+ * able to create the new lock target.
+ */
+ if (transfer)
+ RemoveScratchTarget(true);
+
+ /* Scan through target map */
+ hash_seq_init(&seqstat, PredicateLockTargetHash);
+
+ while ((oldtarget = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
+ {
+ PREDICATELOCK *oldpredlock;
+
+ /*
+ * Check whether this is a target which needs attention.
+ */
+ if (GET_PREDICATELOCKTARGETTAG_RELATION(oldtarget->tag) != relId)
+ continue; /* wrong relation id */
+ if (GET_PREDICATELOCKTARGETTAG_DB(oldtarget->tag) != dbId)
+ continue; /* wrong database id */
+ if (transfer && !isIndex
+ && GET_PREDICATELOCKTARGETTAG_TYPE(oldtarget->tag) == PREDLOCKTAG_RELATION)
+ continue; /* already the right lock */
+
+ /*
+ * If we made it here, we have work to do. We make sure the heap
+ * relation lock exists, then we walk the list of predicate locks for
+ * the old target we found, moving all locks to the heap relation lock
+ * -- unless they already hold that.
+ */
+
+ /*
+ * First make sure we have the heap relation target. We only need to
+ * do this once.
+ */
+ if (transfer && heaptarget == NULL)
+ {
+ PREDICATELOCKTARGETTAG heaptargettag;
+
+ SET_PREDICATELOCKTARGETTAG_RELATION(heaptargettag, dbId, heapId);
+ heaptargettaghash = PredicateLockTargetTagHashCode(&heaptargettag);
+ heaptarget = hash_search_with_hash_value(PredicateLockTargetHash,
+ &heaptargettag,
+ heaptargettaghash,
+ HASH_ENTER, &found);
+ if (!found)
+ SHMQueueInit(&heaptarget->predicateLocks);
+ }
+
+ /*
+ * Loop through all the locks on the old target, replacing them with
+ * locks on the new target.
+ */
+ oldpredlock = (PREDICATELOCK *)
+ SHMQueueNext(&(oldtarget->predicateLocks),
+ &(oldtarget->predicateLocks),
+ offsetof(PREDICATELOCK, targetLink));
+ while (oldpredlock)
+ {
+ PREDICATELOCK *nextpredlock;
+ PREDICATELOCK *newpredlock;
+ SerCommitSeqNo oldCommitSeqNo;
+ SERIALIZABLEXACT *oldXact;
+
+ nextpredlock = (PREDICATELOCK *)
+ SHMQueueNext(&(oldtarget->predicateLocks),
+ &(oldpredlock->targetLink),
+ offsetof(PREDICATELOCK, targetLink));
+
+ /*
+ * Remove the old lock first. This avoids the chance of running
+ * out of lock structure entries for the hash table.
+ */
+ oldCommitSeqNo = oldpredlock->commitSeqNo;
+ oldXact = oldpredlock->tag.myXact;
+
+ SHMQueueDelete(&(oldpredlock->xactLink));
+
+ /*
+ * No need for retail delete from oldtarget list, we're removing
+ * the whole target anyway.
+ */
+ hash_search(PredicateLockHash,
+ &oldpredlock->tag,
+ HASH_REMOVE, &found);
+ Assert(found);
+
+ if (transfer)
+ {
+ PREDICATELOCKTAG newpredlocktag;
+
+ newpredlocktag.myTarget = heaptarget;
+ newpredlocktag.myXact = oldXact;
+ newpredlock = (PREDICATELOCK *)
+ hash_search_with_hash_value
+ (PredicateLockHash,
+ &newpredlocktag,
+ PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
+ heaptargettaghash),
+ HASH_ENTER, &found);
+ if (!found)
+ {
+ SHMQueueInsertBefore(&(heaptarget->predicateLocks),
+ &(newpredlock->targetLink));
+ SHMQueueInsertBefore(&(newpredlocktag.myXact->predicateLocks),
+ &(newpredlock->xactLink));
+ newpredlock->commitSeqNo = oldCommitSeqNo;
+ }
+ else
+ {
+ if (newpredlock->commitSeqNo < oldCommitSeqNo)
+ newpredlock->commitSeqNo = oldCommitSeqNo;
+ }
+
+ Assert(newpredlock->commitSeqNo != 0);
+ Assert((newpredlock->commitSeqNo == InvalidSerCommitSeqNo)
+ || (newpredlock->tag.myXact == OldCommittedSxact));
+ }
+
+ oldpredlock = nextpredlock;
+ }
+
+ hash_search(PredicateLockTargetHash, &oldtarget->tag, HASH_REMOVE,
+ &found);
+ Assert(found);
+ }
+
+ /* Put the scratch entry back */
+ if (transfer)
+ RestoreScratchTarget(true);
+
+ /* Release locks in reverse order */
+ LWLockRelease(SerializableXactHashLock);
+ for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
+ LWLockRelease(FirstPredicateLockMgrLock + i);
+ LWLockRelease(SerializablePredicateLockListLock);
+}
+
+/*
+ * TransferPredicateLocksToHeapRelation
+ * For all transactions, transfer all predicate locks for the given
+ * relation to a single relation lock on the heap.
+ */
+void
+TransferPredicateLocksToHeapRelation(const Relation relation)
+{
+ DropAllPredicateLocksFromTable(relation, true);
+}
+
/*
* PredicateLockPageSplit
@@ -2567,21 +2834,19 @@ PredicateLockPageSplit(const Relation relation, const BlockNumber oldblkno,
bool success;
/*
- * Bail out quickly if there are no serializable transactions
- * running.
+ * Bail out quickly if there are no serializable transactions running.
*
- * It's safe to do this check without taking any additional
- * locks. Even if a serializable transaction starts concurrently,
- * we know it can't take any SIREAD locks on the page being split
- * because the caller is holding the associated buffer page lock.
- * Memory reordering isn't an issue; the memory barrier in the
- * LWLock acquisition guarantees that this read occurs while the
- * buffer page lock is held.
+ * It's safe to do this check without taking any additional locks. Even if
+ * a serializable transaction starts concurrently, we know it can't take
+ * any SIREAD locks on the page being split because the caller is holding
+ * the associated buffer page lock. Memory reordering isn't an issue; the
+ * memory barrier in the LWLock acquisition guarantees that this read
+ * occurs while the buffer page lock is held.
*/
if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
return;
- if (SkipSplitTracking(relation))
+ if (SkipPredicateLocksForRelation(relation))
return;
Assert(oldblkno != newblkno);
@@ -2764,7 +3029,7 @@ ReleasePredicateLocks(const bool isCommit)
* If this value is changing, we don't care that much whether we get the
* old or new value -- it is just used to determine how far
* GlobalSerizableXmin must advance before this transaction can be fully
- * cleaned up. The worst that could happen is we wait for one more
+ * cleaned up. The worst that could happen is we wait for one more
* transaction to complete before freeing some RAM; correctness of visible
* behavior is not affected.
*/
@@ -3610,15 +3875,14 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
if (sxact == MySerializableXact)
{
/*
- * If we're getting a write lock on a tuple, we don't need
- * a predicate (SIREAD) lock on the same tuple. We can
- * safely remove our SIREAD lock, but we'll defer doing so
- * until after the loop because that requires upgrading to
- * an exclusive partition lock.
+ * If we're getting a write lock on a tuple, we don't need a
+ * predicate (SIREAD) lock on the same tuple. We can safely remove
+ * our SIREAD lock, but we'll defer doing so until after the loop
+ * because that requires upgrading to an exclusive partition lock.
*
- * We can't use this optimization within a subtransaction
- * because the subtransaction could roll back, and we
- * would be left without any lock at the top level.
+ * We can't use this optimization within a subtransaction because
+ * the subtransaction could roll back, and we would be left
+ * without any lock at the top level.
*/
if (!IsSubTransaction()
&& GET_PREDICATELOCKTARGETTAG_OFFSET(*targettag))
@@ -3660,14 +3924,12 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
LWLockRelease(partitionLock);
/*
- * If we found one of our own SIREAD locks to remove, remove it
- * now.
+ * If we found one of our own SIREAD locks to remove, remove it now.
*
- * At this point our transaction already has an ExclusiveRowLock
- * on the relation, so we are OK to drop the predicate lock on the
- * tuple, if found, without fearing that another write against the
- * tuple will occur before the MVCC information makes it to the
- * buffer.
+ * At this point our transaction already has an ExclusiveRowLock on the
+ * relation, so we are OK to drop the predicate lock on the tuple, if
+ * found, without fearing that another write against the tuple will occur
+ * before the MVCC information makes it to the buffer.
*/
if (mypredlock != NULL)
{
@@ -3679,9 +3941,9 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
/*
- * Remove the predicate lock from shared memory, if it wasn't
- * removed while the locks were released. One way that could
- * happen is from autovacuum cleaning up an index.
+ * Remove the predicate lock from shared memory, if it wasn't removed
+ * while the locks were released. One way that could happen is from
+ * autovacuum cleaning up an index.
*/
predlockhashcode = PredicateLockHashCodeFromTargetHashCode
(&mypredlocktag, targettaghash);
@@ -3710,13 +3972,13 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
LWLockRelease(SerializableXactHashLock);
LWLockRelease(partitionLock);
LWLockRelease(SerializablePredicateLockListLock);
-
+
if (rmpredlock != NULL)
{
/*
- * Remove entry in local lock table if it exists. It's OK
- * if it doesn't exist; that means the lock was
- * transferred to a new target by a different backend.
+ * Remove entry in local lock table if it exists. It's OK if it
+ * doesn't exist; that means the lock was transferred to a new
+ * target by a different backend.
*/
hash_search_with_hash_value(LocalPredicateLockHash,
targettag, targettaghash,
@@ -3792,6 +4054,113 @@ CheckForSerializableConflictIn(const Relation relation, const HeapTuple tuple,
}
/*
+ * CheckTableForSerializableConflictIn
+ * The entire table is going through a DDL-style logical mass delete
+ * like TRUNCATE or DROP TABLE. If that causes a rw-conflict in from
+ * another serializable transaction, take appropriate action.
+ *
+ * While these operations do not operate entirely within the bounds of
+ * snapshot isolation, they can occur inside a serializable transaction, and
+ * will logically occur after any reads which saw rows which were destroyed
+ * by these operations, so we do what we can to serialize properly under
+ * SSI.
+ *
+ * The relation passed in must be a heap relation. Any predicate lock of any
+ * granularity on the heap will cause a rw-conflict in to this transaction.
+ * Predicate locks on indexes do not matter because they only exist to guard
+ * against conflicting inserts into the index, and this is a mass *delete*.
+ * When a table is truncated or dropped, the index will also be truncated
+ * or dropped, and we'll deal with locks on the index when that happens.
+ *
+ * Dropping or truncating a table also needs to drop any existing predicate
+ * locks on heap tuples or pages, because they're about to go away. This
+ * should be done before altering the predicate locks because the transaction
+ * could be rolled back because of a conflict, in which case the lock changes
+ * are not needed. (At the moment, we don't actually bother to drop the
+ * existing locks on a dropped or truncated table at the moment. That might
+ * lead to some false positives, but it doesn't seem worth the trouble.)
+ */
+void
+CheckTableForSerializableConflictIn(const Relation relation)
+{
+ HASH_SEQ_STATUS seqstat;
+ PREDICATELOCKTARGET *target;
+ Oid dbId;
+ Oid heapId;
+ int i;
+
+ /*
+ * Bail out quickly if there are no serializable transactions running.
+ * It's safe to check this without taking locks because the caller is
+ * holding an ACCESS EXCLUSIVE lock on the relation. No new locks which
+ * would matter here can be acquired while that is held.
+ */
+ if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
+ return;
+
+ if (SkipSerialization(relation))
+ return;
+
+ Assert(relation->rd_index == NULL); /* not an index relation */
+
+ dbId = relation->rd_node.dbNode;
+ heapId = relation->rd_id;
+
+ LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
+ for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
+ LWLockAcquire(FirstPredicateLockMgrLock + i, LW_SHARED);
+ LWLockAcquire(SerializableXactHashLock, LW_SHARED);
+
+ /* Scan through target list */
+ hash_seq_init(&seqstat, PredicateLockTargetHash);
+
+ while ((target = (PREDICATELOCKTARGET *) hash_seq_search(&seqstat)))
+ {
+ PREDICATELOCK *predlock;
+
+ /*
+ * Check whether this is a target which needs attention.
+ */
+ if (GET_PREDICATELOCKTARGETTAG_RELATION(target->tag) != heapId)
+ continue; /* wrong relation id */
+ if (GET_PREDICATELOCKTARGETTAG_DB(target->tag) != dbId)
+ continue; /* wrong database id */
+
+ /*
+ * Loop through locks for this target and flag conflicts.
+ */
+ predlock = (PREDICATELOCK *)
+ SHMQueueNext(&(target->predicateLocks),
+ &(target->predicateLocks),
+ offsetof(PREDICATELOCK, targetLink));
+ while (predlock)
+ {
+ PREDICATELOCK *nextpredlock;
+
+ nextpredlock = (PREDICATELOCK *)
+ SHMQueueNext(&(target->predicateLocks),
+ &(predlock->targetLink),
+ offsetof(PREDICATELOCK, targetLink));
+
+ if (predlock->tag.myXact != MySerializableXact
+ && !RWConflictExists(predlock->tag.myXact,
+ (SERIALIZABLEXACT *) MySerializableXact))
+ FlagRWConflict(predlock->tag.myXact,
+ (SERIALIZABLEXACT *) MySerializableXact);
+
+ predlock = nextpredlock;
+ }
+ }
+
+ /* Release locks in reverse order */
+ LWLockRelease(SerializableXactHashLock);
+ for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
+ LWLockRelease(FirstPredicateLockMgrLock + i);
+ LWLockRelease(SerializablePredicateLockListLock);
+}
+
+
+/*
* Flag a rw-dependency between two serializable transactions.
*
* The caller is responsible for ensuring that we have a LW lock on
@@ -3814,7 +4183,7 @@ FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
SetRWConflict(reader, writer);
}
-/*
+/*----------------------------------------------------------------------------
* We are about to add a RW-edge to the dependency graph - check that we don't
* introduce a dangerous structure by doing so, and abort one of the
* transactions if so.
@@ -3823,13 +4192,14 @@ FlagRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
* in the dependency graph:
*
* Tin ------> Tpivot ------> Tout
- * rw rw
+ * rw rw
*
* Furthermore, Tout must commit first.
*
* One more optimization is that if Tin is declared READ ONLY (or commits
* without writing), we can only have a problem if Tout committed before Tin
* acquired its snapshot.
+ *----------------------------------------------------------------------------
*/
static void
OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
@@ -3842,32 +4212,34 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
failure = false;
- /*
+ /*------------------------------------------------------------------------
* Check for already-committed writer with rw-conflict out flagged
* (conflict-flag on W means that T2 committed before W):
*
* R ------> W ------> T2
- * rw rw
+ * rw rw
*
* That is a dangerous structure, so we must abort. (Since the writer
* has already committed, we must be the reader)
+ *------------------------------------------------------------------------
*/
if (SxactIsCommitted(writer)
&& (SxactHasConflictOut(writer) || SxactHasSummaryConflictOut(writer)))
failure = true;
- /*
+ /*------------------------------------------------------------------------
* Check whether the writer has become a pivot with an out-conflict
* committed transaction (T2), and T2 committed first:
*
* R ------> W ------> T2
- * rw rw
+ * rw rw
*
* Because T2 must've committed first, there is no anomaly if:
* - the reader committed before T2
* - the writer committed before T2
* - the reader is a READ ONLY transaction and the reader was concurrent
* with T2 (= reader acquired its snapshot before T2 committed)
+ *------------------------------------------------------------------------
*/
if (!failure)
{
@@ -3891,7 +4263,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
&& (!SxactIsCommitted(writer)
|| t2->commitSeqNo <= writer->commitSeqNo)
&& (!SxactIsReadOnly(reader)
- || t2->commitSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
+ || t2->commitSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
{
failure = true;
break;
@@ -3903,16 +4275,17 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
}
}
- /*
+ /*------------------------------------------------------------------------
* Check whether the reader has become a pivot with a committed writer:
*
* T0 ------> R ------> W
- * rw rw
+ * rw rw
*
* Because W must've committed first for an anomaly to occur, there is no
* anomaly if:
* - T0 committed before the writer
* - T0 is READ ONLY, and overlaps the writer
+ *------------------------------------------------------------------------
*/
if (!failure && SxactIsCommitted(writer) && !SxactIsReadOnly(reader))
{
@@ -3934,7 +4307,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
&& (!SxactIsCommitted(t0)
|| t0->commitSeqNo >= writer->commitSeqNo)
&& (!SxactIsReadOnly(t0)
- || t0->SeqNo.lastCommitBeforeSnapshot >= writer->commitSeqNo))
+ || t0->SeqNo.lastCommitBeforeSnapshot >= writer->commitSeqNo))
{
failure = true;
break;
@@ -3950,8 +4323,8 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
{
/*
* We have to kill a transaction to avoid a possible anomaly from
- * occurring. If the writer is us, we can just ereport() to cause
- * a transaction abort. Otherwise we flag the writer for termination,
+ * occurring. If the writer is us, we can just ereport() to cause a
+ * transaction abort. Otherwise we flag the writer for termination,
* causing it to abort when it tries to commit. However, if the writer
* is a prepared transaction, already prepared, we can't abort it
* anymore, so we have to kill the reader instead.
@@ -3962,7 +4335,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to read/write dependencies among transactions"),
- errdetail("Cancelled on identification as a pivot, during write."),
+ errdetail("Cancelled on identification as a pivot, during write."),
errhint("The transaction might succeed if retried.")));
}
else if (SxactIsPrepared(writer))
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index e01fb7bdeb..6f603d4512 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -1746,10 +1746,8 @@ describeOneTableDetails(const char *schemaname,
{
printfPQExpBuffer(&buf,
"SELECT conname,\n"
- " pg_catalog.pg_get_constraintdef(r.oid, true) as condef\n");
- if (pset.sversion >= 90100)
- appendPQExpBuffer(&buf, " ,convalidated\n");
- appendPQExpBuffer(&buf, "FROM pg_catalog.pg_constraint r\n"
+ " pg_catalog.pg_get_constraintdef(r.oid, true) as condef\n"
+ "FROM pg_catalog.pg_constraint r\n"
"WHERE r.conrelid = '%s' AND r.contype = 'f' ORDER BY 1",
oid);
result = PSQLexec(buf.data, false);
@@ -1768,9 +1766,6 @@ describeOneTableDetails(const char *schemaname,
PQgetvalue(result, i, 0),
PQgetvalue(result, i, 1));
- if (pset.sversion >= 90100 && strcmp(PQgetvalue(result, i, 2), "f") == 0)
- appendPQExpBuffer(&buf, " NOT VALID");
-
printTableAddFooter(&cont, buf.data);
}
}
diff --git a/src/include/storage/predicate.h b/src/include/storage/predicate.h
index 77ae8f904d..760c76cff0 100644
--- a/src/include/storage/predicate.h
+++ b/src/include/storage/predicate.h
@@ -49,11 +49,13 @@ extern void PredicateLockPage(const Relation relation, const BlockNumber blkno);
extern void PredicateLockTuple(const Relation relation, const HeapTuple tuple);
extern void PredicateLockPageSplit(const Relation relation, const BlockNumber oldblkno, const BlockNumber newblkno);
extern void PredicateLockPageCombine(const Relation relation, const BlockNumber oldblkno, const BlockNumber newblkno);
+extern void TransferPredicateLocksToHeapRelation(const Relation relation);
extern void ReleasePredicateLocks(const bool isCommit);
/* conflict detection (may also trigger rollback) */
extern void CheckForSerializableConflictOut(const bool valid, const Relation relation, const HeapTuple tuple, const Buffer buffer);
extern void CheckForSerializableConflictIn(const Relation relation, const HeapTuple tuple, const Buffer buffer);
+extern void CheckTableForSerializableConflictIn(const Relation relation);
/* final rollback checking */
extern void PreCommit_CheckForSerializationFailure(void);
diff --git a/src/include/storage/predicate_internals.h b/src/include/storage/predicate_internals.h
index b144ab319a..56a01f0b91 100644
--- a/src/include/storage/predicate_internals.h
+++ b/src/include/storage/predicate_internals.h
@@ -273,9 +273,7 @@ typedef struct PREDICATELOCKTARGETTAG
* up the targets as the related tuples are pruned or vacuumed, we check the
* xmin on access. This should be far less costly.
*/
-typedef struct PREDICATELOCKTARGET PREDICATELOCKTARGET;
-
-struct PREDICATELOCKTARGET
+typedef struct PREDICATELOCKTARGET
{
/* hash key */
PREDICATELOCKTARGETTAG tag; /* unique identifier of lockable object */
@@ -283,7 +281,7 @@ struct PREDICATELOCKTARGET
/* data */
SHM_QUEUE predicateLocks; /* list of PREDICATELOCK objects assoc. with
* predicate lock target */
-};
+} PREDICATELOCKTARGET;
/*
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index f2e8ad2207..d2c672c7bb 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -926,7 +926,7 @@ plperl_trusted_init(void)
if (!isGV_with_GP(sv) || !GvCV(sv))
continue;
SvREFCNT_dec(GvCV(sv)); /* free the CV */
- GvCV(sv) = NULL; /* prevent call via GV */
+ GvCV_set(sv, NULL); /* prevent call via GV */
}
hv_clear(stash);
diff --git a/src/pl/plperl/plperl.h b/src/pl/plperl/plperl.h
index a375bb5e31..c1236b7efc 100644
--- a/src/pl/plperl/plperl.h
+++ b/src/pl/plperl/plperl.h
@@ -85,6 +85,11 @@
(U32)HeKUTF8(he))
#endif
+/* supply GvCV_set if it's missing - ppport.h doesn't supply it, unfortunately */
+#ifndef GvCV_set
+#define GvCV_set(gv, cv) (GvCV(gv) = cv)
+#endif
+
/* declare routines from plperl.c for access by .xs files */
HV *plperl_spi_exec(char *, int);
void plperl_return_next(SV *);
diff --git a/src/test/regress/expected/with.out b/src/test/regress/expected/with.out
index b31d58f816..a1b089921d 100644
--- a/src/test/regress/expected/with.out
+++ b/src/test/regress/expected/with.out
@@ -1397,6 +1397,46 @@ SELECT * FROM y;
(17 rows)
DROP RULE y_rule ON y;
+-- check merging of outer CTE with CTE in a rule action
+CREATE TEMP TABLE bug6051 AS
+ select i from generate_series(1,3) as t(i);
+SELECT * FROM bug6051;
+ i
+---
+ 1
+ 2
+ 3
+(3 rows)
+
+WITH t1 AS ( DELETE FROM bug6051 RETURNING * )
+INSERT INTO bug6051 SELECT * FROM t1;
+SELECT * FROM bug6051;
+ i
+---
+ 1
+ 2
+ 3
+(3 rows)
+
+CREATE TEMP TABLE bug6051_2 (i int);
+CREATE RULE bug6051_ins AS ON INSERT TO bug6051 DO INSTEAD
+ INSERT INTO bug6051_2
+ SELECT NEW.i;
+WITH t1 AS ( DELETE FROM bug6051 RETURNING * )
+INSERT INTO bug6051 SELECT * FROM t1;
+SELECT * FROM bug6051;
+ i
+---
+(0 rows)
+
+SELECT * FROM bug6051_2;
+ i
+---
+ 1
+ 2
+ 3
+(3 rows)
+
-- a truly recursive CTE in the same list
WITH RECURSIVE t(a) AS (
SELECT 0
diff --git a/src/test/regress/input/constraints.source b/src/test/regress/input/constraints.source
index 0d278212c0..b84d51e9e5 100644
--- a/src/test/regress/input/constraints.source
+++ b/src/test/regress/input/constraints.source
@@ -397,6 +397,9 @@ INSERT INTO circles VALUES('<(20,20), 10>', '<(10,10), 5>');
ALTER TABLE circles ADD EXCLUDE USING gist
(c1 WITH &&, (c2::circle) WITH &&);
+-- try reindexing an existing constraint
+REINDEX INDEX circles_c1_c2_excl;
+
DROP TABLE circles;
-- Check deferred exclusion constraint
diff --git a/src/test/regress/output/constraints.source b/src/test/regress/output/constraints.source
index d164b90af7..e2f2939931 100644
--- a/src/test/regress/output/constraints.source
+++ b/src/test/regress/output/constraints.source
@@ -543,6 +543,8 @@ ALTER TABLE circles ADD EXCLUDE USING gist
NOTICE: ALTER TABLE / ADD EXCLUDE will create implicit index "circles_c1_c2_excl1" for table "circles"
ERROR: could not create exclusion constraint "circles_c1_c2_excl1"
DETAIL: Key (c1, (c2::circle))=(<(0,0),5>, <(0,0),5>) conflicts with key (c1, (c2::circle))=(<(0,0),5>, <(0,0),4>).
+-- try reindexing an existing constraint
+REINDEX INDEX circles_c1_c2_excl;
DROP TABLE circles;
-- Check deferred exclusion constraint
CREATE TABLE deferred_excl (
diff --git a/src/test/regress/sql/with.sql b/src/test/regress/sql/with.sql
index 4f6b517103..bc340e4543 100644
--- a/src/test/regress/sql/with.sql
+++ b/src/test/regress/sql/with.sql
@@ -610,6 +610,29 @@ SELECT * FROM y;
DROP RULE y_rule ON y;
+-- check merging of outer CTE with CTE in a rule action
+CREATE TEMP TABLE bug6051 AS
+ select i from generate_series(1,3) as t(i);
+
+SELECT * FROM bug6051;
+
+WITH t1 AS ( DELETE FROM bug6051 RETURNING * )
+INSERT INTO bug6051 SELECT * FROM t1;
+
+SELECT * FROM bug6051;
+
+CREATE TEMP TABLE bug6051_2 (i int);
+
+CREATE RULE bug6051_ins AS ON INSERT TO bug6051 DO INSTEAD
+ INSERT INTO bug6051_2
+ SELECT NEW.i;
+
+WITH t1 AS ( DELETE FROM bug6051 RETURNING * )
+INSERT INTO bug6051 SELECT * FROM t1;
+
+SELECT * FROM bug6051;
+SELECT * FROM bug6051_2;
+
-- a truly recursive CTE in the same list
WITH RECURSIVE t(a) AS (
SELECT 0