From e89bd02f58ac07e44e0388a32b7ee1b42f1fd7c6 Mon Sep 17 00:00:00 2001
From: Stephen Frost
Date: Fri, 24 Apr 2015 20:34:26 -0400
Subject: Perform RLS WITH CHECK before constraints, etc
The RLS capability is built on top of the WITH CHECK OPTION
system which was added for auto-updatable views, however, unlike
WCOs on views (which are mandated by the SQL spec to not fire until
after all other constraints and checks are done), it makes much more
sense for RLS checks to happen earlier than constraint and uniqueness
checks.
This patch reworks the structure which holds the WCOs a bit to be
explicitly either VIEW or RLS checks and the RLS-related checks are
done prior to the constraint and uniqueness checks. This also allows
better error reporting as we are now reporting when a violation is due
to a WITH CHECK OPTION and when it's due to an RLS policy violation,
which was independently noted by Craig Ringer as being confusing.
The documentation is also updated to include a paragraph about when RLS
WITH CHECK handling is performed, as there have been a number of
questions regarding that and the documentation was previously silent on
the matter.
Author: Dean Rasheed, with some kabitzing and comment changes by me.
---
src/backend/executor/nodeModifyTable.c | 58 ++++++++++++++++++++++++++++------
1 file changed, 49 insertions(+), 9 deletions(-)
(limited to 'src/backend/executor/nodeModifyTable.c')
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index f96fb2432b..06ec82e246 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -252,6 +252,16 @@ ExecInsert(TupleTableSlot *slot,
*/
tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
+ /*
+ * Check any RLS INSERT WITH CHECK policies
+ *
+ * ExecWithCheckOptions() will skip any WCOs which are not of
+ * the kind we are looking for at this point.
+ */
+ if (resultRelInfo->ri_WithCheckOptions != NIL)
+ ExecWithCheckOptions(WCO_RLS_INSERT_CHECK,
+ resultRelInfo, slot, estate);
+
/*
* Check the constraints of the tuple
*/
@@ -287,9 +297,21 @@ ExecInsert(TupleTableSlot *slot,
list_free(recheckIndexes);
- /* Check any WITH CHECK OPTION constraints */
+ /*
+ * Check any WITH CHECK OPTION constraints from parent views. We
+ * are required to do this after testing all constraints and
+ * uniqueness violations per the SQL spec, so we do it after actually
+ * inserting the record into the heap and all indexes.
+ *
+ * ExecWithCheckOptions will elog(ERROR) if a violation is found, so
+ * the tuple will never be seen, if it violates the the WITH CHECK
+ * OPTION.
+ *
+ * ExecWithCheckOptions() will skip any WCOs which are not of
+ * the kind we are looking for at this point.
+ */
if (resultRelInfo->ri_WithCheckOptions != NIL)
- ExecWithCheckOptions(resultRelInfo, slot, estate);
+ ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
/* Process RETURNING if present */
if (resultRelInfo->ri_projectReturning)
@@ -653,15 +675,25 @@ ExecUpdate(ItemPointer tupleid,
tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
/*
- * Check the constraints of the tuple
+ * Check any RLS UPDATE WITH CHECK policies
*
* If we generate a new candidate tuple after EvalPlanQual testing, we
- * must loop back here and recheck constraints. (We don't need to
- * redo triggers, however. If there are any BEFORE triggers then
- * trigger.c will have done heap_lock_tuple to lock the correct tuple,
- * so there's no need to do them again.)
+ * must loop back here and recheck any RLS policies and constraints.
+ * (We don't need to redo triggers, however. If there are any BEFORE
+ * triggers then trigger.c will have done heap_lock_tuple to lock the
+ * correct tuple, so there's no need to do them again.)
+ *
+ * ExecWithCheckOptions() will skip any WCOs which are not of
+ * the kind we are looking for at this point.
*/
lreplace:;
+ if (resultRelInfo->ri_WithCheckOptions != NIL)
+ ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
+ resultRelInfo, slot, estate);
+
+ /*
+ * Check the constraints of the tuple
+ */
if (resultRelationDesc->rd_att->constr)
ExecConstraints(resultRelInfo, slot, estate);
@@ -780,9 +812,17 @@ lreplace:;
list_free(recheckIndexes);
- /* Check any WITH CHECK OPTION constraints */
+ /*
+ * Check any WITH CHECK OPTION constraints from parent views. We
+ * are required to do this after testing all constraints and
+ * uniqueness violations per the SQL spec, so we do it after actually
+ * updating the record in the heap and all indexes.
+ *
+ * ExecWithCheckOptions() will skip any WCOs which are not of
+ * the kind we are looking for at this point.
+ */
if (resultRelInfo->ri_WithCheckOptions != NIL)
- ExecWithCheckOptions(resultRelInfo, slot, estate);
+ ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
/* Process RETURNING if present */
if (resultRelInfo->ri_projectReturning)
--
cgit v1.2.3
From 6aab1f45acaa4cf90e62357ebdf5e6a38829204e Mon Sep 17 00:00:00 2001
From: Andres Freund
Date: Sun, 26 Apr 2015 18:42:31 +0200
Subject: Fix various typos and grammar errors in comments.
Author: Dmitriy Olshevskiy
Discussion: 553D00A6.4090205@bk.ru
---
src/backend/access/brin/brin_tuple.c | 2 +-
src/backend/access/nbtree/nbtree.c | 2 +-
src/backend/access/transam/twophase.c | 2 +-
src/backend/catalog/objectaddress.c | 4 ++--
src/backend/commands/indexcmds.c | 2 +-
src/backend/executor/nodeModifyTable.c | 2 +-
src/backend/postmaster/bgworker.c | 2 +-
src/backend/replication/logical/snapbuild.c | 2 +-
src/backend/storage/lmgr/lwlock.c | 4 ++--
src/backend/utils/cache/inval.c | 2 +-
src/bin/pg_archivecleanup/pg_archivecleanup.c | 2 +-
src/bin/pg_basebackup/pg_recvlogical.c | 4 ++--
src/bin/pg_upgrade/parallel.c | 2 +-
src/bin/pg_upgrade/relfilenode.c | 2 +-
src/include/access/attnum.h | 2 +-
src/include/access/xact.h | 2 +-
src/include/mb/pg_wchar.h | 2 +-
src/include/storage/s_lock.h | 4 ++--
src/interfaces/ecpg/pgtypeslib/datetime.c | 2 +-
src/interfaces/ecpg/pgtypeslib/numeric.c | 2 +-
src/port/pgmkdirp.c | 2 +-
21 files changed, 25 insertions(+), 25 deletions(-)
(limited to 'src/backend/executor/nodeModifyTable.c')
diff --git a/src/backend/access/brin/brin_tuple.c b/src/backend/access/brin/brin_tuple.c
index 93f00f6a8f..08fa998a52 100644
--- a/src/backend/access/brin/brin_tuple.c
+++ b/src/backend/access/brin/brin_tuple.c
@@ -304,7 +304,7 @@ brin_free_tuple(BrinTuple *tuple)
}
/*
- * Create an palloc'd copy of a BrinTuple.
+ * Create a palloc'd copy of a BrinTuple.
*/
BrinTuple *
brin_copy_tuple(BrinTuple *tuple, Size len)
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 9a6dcdd4b1..c2d52faa96 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -40,7 +40,7 @@ typedef struct
BTSpool *spool;
/*
- * spool2 is needed only when the index is an unique index. Dead tuples
+ * spool2 is needed only when the index is a unique index. Dead tuples
* are put into spool2 instead of spool in order to avoid uniqueness
* check.
*/
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 3ac339bebf..d9a3fabb8f 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -291,7 +291,7 @@ AtAbort_Twophase(void)
}
/*
- * This is called after we have finished transfering state to the prepared
+ * This is called after we have finished transferring state to the prepared
* PGXACT entry.
*/
void
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index 5e1bda4ed2..10f0396561 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -100,7 +100,7 @@ typedef struct
AclObjectKind acl_kind; /* ACL_KIND_* of this object type */
bool is_nsp_name_unique; /* can the nsp/name combination (or
* name alone, if there's no
- * namespace) be considered an unique
+ * namespace) be considered a unique
* identifier for an object of this
* class? */
} ObjectPropertyType;
@@ -3241,7 +3241,7 @@ pg_identify_object(PG_FUNCTION_ARGS)
/*
* We only return the object name if it can be used (together with
- * the schema name, if any) as an unique identifier.
+ * the schema name, if any) as a unique identifier.
*/
if (get_object_namensp_unique(address.classId))
{
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 99acd4a6a2..351d48ece6 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -1051,7 +1051,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
*/
/*
- * A expression using mutable functions is probably wrong,
+ * An expression using mutable functions is probably wrong,
* since if you aren't going to get the same result for the
* same data every time, it's not clear what the index entries
* mean at all.
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 06ec82e246..31666edfa8 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -304,7 +304,7 @@ ExecInsert(TupleTableSlot *slot,
* inserting the record into the heap and all indexes.
*
* ExecWithCheckOptions will elog(ERROR) if a violation is found, so
- * the tuple will never be seen, if it violates the the WITH CHECK
+ * the tuple will never be seen, if it violates the WITH CHECK
* OPTION.
*
* ExecWithCheckOptions() will skip any WCOs which are not of
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index 99f4b65ea6..d4939415f0 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -130,7 +130,7 @@ BackgroundWorkerShmemInit(void)
/*
* Copy contents of worker list into shared memory. Record the shared
* memory slot assigned to each worker. This ensures a 1-to-1
- * correspondence betwen the postmaster's private list and the array
+ * correspondence between the postmaster's private list and the array
* in shared memory.
*/
slist_foreach(siter, &BackgroundWorkerList)
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 9b40bc8eca..c878f62985 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -1597,7 +1597,7 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn)
/*
* We may overwrite the work from some other backend, but that's ok, our
- * snapshot is valid as well, we'll just have done some superflous work.
+ * snapshot is valid as well, we'll just have done some superfluous work.
*/
if (rename(tmppath, path) != 0)
{
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 5813e71da0..1acd2f090b 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -848,7 +848,7 @@ LWLockDequeueSelf(LWLock *lock)
/*
* Somebody else dequeued us and has or will wake us up. Deal with the
- * superflous absorption of a wakeup.
+ * superfluous absorption of a wakeup.
*/
/*
@@ -1183,7 +1183,7 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
{
/*
* Wait until awakened. Like in LWLockAcquire, be prepared for bogus
- * wakups, because we share the semaphore with ProcWaitForSignal.
+ * wakeups, because we share the semaphore with ProcWaitForSignal.
*/
LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 8826a5d50b..1907a87458 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -516,7 +516,7 @@ RegisterRelcacheInvalidation(Oid dbId, Oid relId)
/*
* RegisterSnapshotInvalidation
*
- * Register a invalidation event for MVCC scans against a given catalog.
+ * Register an invalidation event for MVCC scans against a given catalog.
* Only needed for catalogs that don't have catcaches.
*/
static void
diff --git a/src/bin/pg_archivecleanup/pg_archivecleanup.c b/src/bin/pg_archivecleanup/pg_archivecleanup.c
index 2ff2a270b6..ba6e242f15 100644
--- a/src/bin/pg_archivecleanup/pg_archivecleanup.c
+++ b/src/bin/pg_archivecleanup/pg_archivecleanup.c
@@ -46,7 +46,7 @@ char exclusiveCleanupFileName[MAXPGPATH]; /* the oldest file we
* accessible directory. If you want to make other assumptions,
* such as using a vendor-specific archive and access API, these
* routines are the ones you'll need to change. You're
- * enouraged to submit any changes to pgsql-hackers@postgresql.org
+ * encouraged to submit any changes to pgsql-hackers@postgresql.org
* or personally to the current maintainer. Those changes may be
* folded in to later versions of this program.
*/
diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c
index fa44d0362b..e4e16d7dc1 100644
--- a/src/bin/pg_basebackup/pg_recvlogical.c
+++ b/src/bin/pg_basebackup/pg_recvlogical.c
@@ -603,7 +603,7 @@ main(int argc, char **argv)
{"verbose", no_argument, NULL, 'v'},
{"version", no_argument, NULL, 'V'},
{"help", no_argument, NULL, '?'},
-/* connnection options */
+/* connection options */
{"dbname", required_argument, NULL, 'd'},
{"host", required_argument, NULL, 'h'},
{"port", required_argument, NULL, 'p'},
@@ -670,7 +670,7 @@ main(int argc, char **argv)
case 'v':
verbose++;
break;
-/* connnection options */
+/* connection options */
case 'd':
dbname = pg_strdup(optarg);
break;
diff --git a/src/bin/pg_upgrade/parallel.c b/src/bin/pg_upgrade/parallel.c
index c6978b596b..97fa0dc356 100644
--- a/src/bin/pg_upgrade/parallel.c
+++ b/src/bin/pg_upgrade/parallel.c
@@ -175,7 +175,7 @@ win32_exec_prog(exec_thread_arg *args)
* parallel_transfer_all_new_dbs
*
* This has the same API as transfer_all_new_dbs, except it does parallel execution
- * by transfering multiple tablespaces in parallel
+ * by transferring multiple tablespaces in parallel
*/
void
parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
diff --git a/src/bin/pg_upgrade/relfilenode.c b/src/bin/pg_upgrade/relfilenode.c
index fe058807b6..7b3215af56 100644
--- a/src/bin/pg_upgrade/relfilenode.c
+++ b/src/bin/pg_upgrade/relfilenode.c
@@ -35,7 +35,7 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
/*
- * Transfering files by tablespace is tricky because a single database can
+ * Transferring files by tablespace is tricky because a single database can
* use multiple tablespaces. For non-parallel mode, we just pass a NULL
* tablespace path, which matches all tablespaces. In parallel mode, we
* pass the default tablespace and all user-created tablespaces and let
diff --git a/src/include/access/attnum.h b/src/include/access/attnum.h
index 82e811dd0f..0fe27ecfba 100644
--- a/src/include/access/attnum.h
+++ b/src/include/access/attnum.h
@@ -46,7 +46,7 @@ typedef int16 AttrNumber;
* Returns the attribute offset for an attribute number.
*
* Note:
- * Assumes the attribute number is for an user defined attribute.
+ * Assumes the attribute number is for a user defined attribute.
*/
#define AttrNumberGetAttrOffset(attNum) \
( \
diff --git a/src/include/access/xact.h b/src/include/access/xact.h
index fdf3ea3228..8da6aa952f 100644
--- a/src/include/access/xact.h
+++ b/src/include/access/xact.h
@@ -168,7 +168,7 @@ typedef struct xl_xact_assignment
* 'xl_xact_xinfo->xinfo'. The presence of the xinfo field itself is signalled
* by a set XLOG_XACT_HAS_INFO bit in the xl_info field.
*
- * NB: All the individual data chunks should be be sized to multiples of
+ * NB: All the individual data chunks should be sized to multiples of
* sizeof(int) and only require int32 alignment.
*/
diff --git a/src/include/mb/pg_wchar.h b/src/include/mb/pg_wchar.h
index 254cf678d6..f7222fc177 100644
--- a/src/include/mb/pg_wchar.h
+++ b/src/include/mb/pg_wchar.h
@@ -88,7 +88,7 @@ typedef unsigned int pg_wchar;
* interoperable anyway).
*
* Note that XEmacs's implementation is different from what emacs does.
- * We follow emacs's implementaion, rathter than XEmacs's.
+ * We follow emacs's implementation, rather than XEmacs's.
*----------------------------------------------------
*/
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index f4dc0dbedc..c63cf54c8e 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -356,8 +356,8 @@ tas(volatile slock_t *lock)
/*
* Solaris has always run sparc processors in TSO (total store) mode, but
* linux didn't use to and the *BSDs still don't. So, be careful about
- * acquire/release semantics. The CPU will treat superflous membars as NOPs,
- * so it's just code space.
+ * acquire/release semantics. The CPU will treat superfluous membars as
+ * NOPs, so it's just code space.
*/
#define HAS_TEST_AND_SET
diff --git a/src/interfaces/ecpg/pgtypeslib/datetime.c b/src/interfaces/ecpg/pgtypeslib/datetime.c
index 49cb817a50..3b0855f722 100644
--- a/src/interfaces/ecpg/pgtypeslib/datetime.c
+++ b/src/interfaces/ecpg/pgtypeslib/datetime.c
@@ -323,7 +323,7 @@ PGTYPESdate_fmt_asc(date dDate, const char *fmtstring, char *outbuf)
* PGTYPESdate_defmt_asc
*
* function works as follows:
- * - first we analyze the paramters
+ * - first we analyze the parameters
* - if this is a special case with no delimiters, add delimters
* - find the tokens. First we look for numerical values. If we have found
* less than 3 tokens, we check for the months' names and thereafter for
diff --git a/src/interfaces/ecpg/pgtypeslib/numeric.c b/src/interfaces/ecpg/pgtypeslib/numeric.c
index 84d73b7bb7..0504f3cac8 100644
--- a/src/interfaces/ecpg/pgtypeslib/numeric.c
+++ b/src/interfaces/ecpg/pgtypeslib/numeric.c
@@ -1376,7 +1376,7 @@ PGTYPESnumeric_cmp(numeric *var1, numeric *var2)
if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG)
{
/*
- * instead of inverting the result, we invert the paramter ordering
+ * instead of inverting the result, we invert the parameter ordering
*/
return cmp_abs(var2, var1);
}
diff --git a/src/port/pgmkdirp.c b/src/port/pgmkdirp.c
index d9c95b522c..d943559760 100644
--- a/src/port/pgmkdirp.c
+++ b/src/port/pgmkdirp.c
@@ -93,7 +93,7 @@ pg_mkdir_p(char *path, int omode)
/*
* POSIX 1003.2: For each dir operand that does not name an existing
* directory, effects equivalent to those caused by the following command
- * shall occcur:
+ * shall occur:
*
* mkdir -p -m $(umask -S),u+wx $(dirname dir) && mkdir [-m mode] dir
*
--
cgit v1.2.3
From 168d5805e4c08bed7b95d351bf097cff7c07dd65 Mon Sep 17 00:00:00 2001
From: Andres Freund
Date: Fri, 8 May 2015 05:31:36 +0200
Subject: Add support for INSERT ... ON CONFLICT DO NOTHING/UPDATE.
The newly added ON CONFLICT clause allows to specify an alternative to
raising a unique or exclusion constraint violation error when inserting.
ON CONFLICT refers to constraints that can either be specified using a
inference clause (by specifying the columns of a unique constraint) or
by naming a unique or exclusion constraint. DO NOTHING avoids the
constraint violation, without touching the pre-existing row. DO UPDATE
SET ... [WHERE ...] updates the pre-existing tuple, and has access to
both the tuple proposed for insertion and the existing tuple; the
optional WHERE clause can be used to prevent an update from being
executed. The UPDATE SET and WHERE clauses have access to the tuple
proposed for insertion using the "magic" EXCLUDED alias, and to the
pre-existing tuple using the table name or its alias.
This feature is often referred to as upsert.
This is implemented using a new infrastructure called "speculative
insertion". It is an optimistic variant of regular insertion that first
does a pre-check for existing tuples and then attempts an insert. If a
violating tuple was inserted concurrently, the speculatively inserted
tuple is deleted and a new attempt is made. If the pre-check finds a
matching tuple the alternative DO NOTHING or DO UPDATE action is taken.
If the insertion succeeds without detecting a conflict, the tuple is
deemed inserted.
To handle the possible ambiguity between the excluded alias and a table
named excluded, and for convenience with long relation names, INSERT
INTO now can alias its target table.
Bumps catversion as stored rules change.
Author: Peter Geoghegan, with significant contributions from Heikki
Linnakangas and Andres Freund. Testing infrastructure by Jeff Janes.
Reviewed-By: Heikki Linnakangas, Andres Freund, Robert Haas, Simon Riggs,
Dean Rasheed, Stephen Frost and many others.
---
contrib/pg_stat_statements/pg_stat_statements.c | 25 ++
contrib/postgres_fdw/deparse.c | 7 +-
contrib/postgres_fdw/expected/postgres_fdw.out | 5 +
contrib/postgres_fdw/postgres_fdw.c | 15 +-
contrib/postgres_fdw/postgres_fdw.h | 2 +-
contrib/postgres_fdw/sql/postgres_fdw.sql | 3 +
contrib/test_decoding/expected/ddl.out | 34 ++
contrib/test_decoding/expected/toast.out | 9 +-
contrib/test_decoding/sql/ddl.sql | 22 +
contrib/test_decoding/sql/toast.sql | 5 +
doc/src/sgml/fdwhandler.sgml | 7 +
doc/src/sgml/keywords.sgml | 7 +
doc/src/sgml/mvcc.sgml | 23 +-
doc/src/sgml/plpgsql.sgml | 14 +-
doc/src/sgml/postgres-fdw.sgml | 8 +
doc/src/sgml/protocol.sgml | 13 +-
doc/src/sgml/ref/create_policy.sgml | 63 ++-
doc/src/sgml/ref/create_rule.sgml | 6 +-
doc/src/sgml/ref/create_table.sgml | 4 +-
doc/src/sgml/ref/create_trigger.sgml | 5 +-
doc/src/sgml/ref/create_view.sgml | 9 +-
doc/src/sgml/ref/insert.sgml | 403 ++++++++++++++++-
doc/src/sgml/trigger.sgml | 48 ++-
src/backend/access/heap/heapam.c | 377 ++++++++++++++--
src/backend/access/heap/hio.c | 27 +-
src/backend/access/heap/tuptoaster.c | 8 +
src/backend/access/nbtree/nbtinsert.c | 28 +-
src/backend/access/rmgrdesc/heapdesc.c | 9 +
src/backend/catalog/index.c | 53 ++-
src/backend/catalog/indexing.c | 2 +-
src/backend/catalog/sql_features.txt | 2 +-
src/backend/commands/constraint.c | 2 +-
src/backend/commands/copy.c | 7 +-
src/backend/commands/explain.c | 70 ++-
src/backend/commands/trigger.c | 19 +-
src/backend/executor/execIndexing.c | 417 +++++++++++++++---
src/backend/executor/execMain.c | 53 ++-
src/backend/executor/nodeLockRows.c | 12 +-
src/backend/executor/nodeModifyTable.c | 459 +++++++++++++++++++-
src/backend/nodes/copyfuncs.c | 84 ++++
src/backend/nodes/equalfuncs.c | 62 +++
src/backend/nodes/nodeFuncs.c | 87 ++++
src/backend/nodes/outfuncs.c | 41 +-
src/backend/nodes/readfuncs.c | 40 ++
src/backend/optimizer/plan/createplan.c | 26 +-
src/backend/optimizer/plan/planner.c | 27 ++
src/backend/optimizer/plan/setrefs.c | 52 ++-
src/backend/optimizer/plan/subselect.c | 4 +
src/backend/optimizer/prep/prepjointree.c | 6 +
src/backend/optimizer/prep/preptlist.c | 13 +
src/backend/optimizer/util/plancat.c | 352 +++++++++++++++
src/backend/parser/analyze.c | 149 ++++++-
src/backend/parser/gram.y | 121 +++++-
src/backend/parser/parse_clause.c | 203 +++++++++
src/backend/parser/parse_collate.c | 2 +
src/backend/parser/parse_target.c | 11 +-
src/backend/replication/logical/decode.c | 66 ++-
src/backend/replication/logical/reorderbuffer.c | 159 +++++--
src/backend/rewrite/rewriteHandler.c | 87 +++-
src/backend/rewrite/rowsecurity.c | 82 +++-
src/backend/storage/lmgr/lmgr.c | 91 ++++
src/backend/tcop/pquery.c | 17 +-
src/backend/utils/adt/lockfuncs.c | 1 +
src/backend/utils/adt/ruleutils.c | 108 +++--
src/backend/utils/time/tqual.c | 29 +-
src/bin/psql/common.c | 5 +-
src/include/access/heapam.h | 3 +
src/include/access/heapam_xlog.h | 54 ++-
src/include/access/hio.h | 2 +-
src/include/access/htup_details.h | 36 +-
src/include/catalog/catversion.h | 2 +-
src/include/catalog/index.h | 2 +
src/include/executor/executor.h | 13 +-
src/include/nodes/execnodes.h | 15 +
src/include/nodes/nodes.h | 17 +
src/include/nodes/parsenodes.h | 45 +-
src/include/nodes/plannodes.h | 8 +
src/include/nodes/primnodes.h | 42 ++
src/include/optimizer/plancat.h | 2 +
src/include/optimizer/planmain.h | 2 +-
src/include/optimizer/prep.h | 3 +
src/include/parser/kwlist.h | 1 +
src/include/parser/parse_clause.h | 4 +
src/include/replication/reorderbuffer.h | 9 +-
src/include/rewrite/rowsecurity.h | 3 +-
src/include/storage/lmgr.h | 5 +
src/include/storage/lock.h | 10 +
src/include/utils/snapshot.h | 22 +-
.../expected/insert-conflict-do-nothing.out | 23 +
.../expected/insert-conflict-do-update-2.out | 23 +
.../expected/insert-conflict-do-update-3.out | 26 ++
.../expected/insert-conflict-do-update.out | 23 +
src/test/isolation/isolation_schedule | 4 +
.../specs/insert-conflict-do-nothing.spec | 41 ++
.../specs/insert-conflict-do-update-2.spec | 41 ++
.../specs/insert-conflict-do-update-3.spec | 69 +++
.../isolation/specs/insert-conflict-do-update.spec | 40 ++
src/test/regress/expected/errors.out | 4 +-
src/test/regress/expected/insert_conflict.out | 476 +++++++++++++++++++++
src/test/regress/expected/privileges.out | 29 +-
src/test/regress/expected/returning.out | 24 ++
src/test/regress/expected/rowsecurity.out | 132 ++++++
src/test/regress/expected/rules.out | 90 ++++
src/test/regress/expected/subselect.out | 22 +
src/test/regress/expected/triggers.out | 102 ++++-
src/test/regress/expected/updatable_views.out | 61 +++
src/test/regress/expected/update.out | 34 ++
src/test/regress/expected/with.out | 82 ++++
src/test/regress/input/constraints.source | 12 +
src/test/regress/output/constraints.source | 24 +-
src/test/regress/parallel_schedule | 1 +
src/test/regress/serial_schedule | 1 +
src/test/regress/sql/insert_conflict.sql | 284 ++++++++++++
src/test/regress/sql/privileges.sql | 19 +-
src/test/regress/sql/returning.sql | 6 +
src/test/regress/sql/rowsecurity.sql | 112 +++++
src/test/regress/sql/rules.sql | 59 +++
src/test/regress/sql/subselect.sql | 14 +
src/test/regress/sql/triggers.sql | 69 ++-
src/test/regress/sql/updatable_views.sql | 9 +
src/test/regress/sql/update.sql | 21 +
src/test/regress/sql/with.sql | 57 +++
122 files changed, 6106 insertions(+), 435 deletions(-)
create mode 100644 src/test/isolation/expected/insert-conflict-do-nothing.out
create mode 100644 src/test/isolation/expected/insert-conflict-do-update-2.out
create mode 100644 src/test/isolation/expected/insert-conflict-do-update-3.out
create mode 100644 src/test/isolation/expected/insert-conflict-do-update.out
create mode 100644 src/test/isolation/specs/insert-conflict-do-nothing.spec
create mode 100644 src/test/isolation/specs/insert-conflict-do-update-2.spec
create mode 100644 src/test/isolation/specs/insert-conflict-do-update-3.spec
create mode 100644 src/test/isolation/specs/insert-conflict-do-update.spec
create mode 100644 src/test/regress/expected/insert_conflict.out
create mode 100644 src/test/regress/sql/insert_conflict.sql
(limited to 'src/backend/executor/nodeModifyTable.c')
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 76d9e0a5ec..6abe3f0770 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -2264,6 +2264,7 @@ JumbleQuery(pgssJumbleState *jstate, Query *query)
JumbleRangeTable(jstate, query->rtable);
JumbleExpr(jstate, (Node *) query->jointree);
JumbleExpr(jstate, (Node *) query->targetList);
+ JumbleExpr(jstate, (Node *) query->onConflict);
JumbleExpr(jstate, (Node *) query->returningList);
JumbleExpr(jstate, (Node *) query->groupClause);
JumbleExpr(jstate, query->havingQual);
@@ -2631,6 +2632,16 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
APP_JUMB(ce->cursor_param);
}
break;
+ case T_InferenceElem:
+ {
+ InferenceElem *ie = (InferenceElem *) node;
+
+ APP_JUMB(ie->infercollid);
+ APP_JUMB(ie->inferopfamily);
+ APP_JUMB(ie->inferopcinputtype);
+ JumbleExpr(jstate, ie->expr);
+ }
+ break;
case T_TargetEntry:
{
TargetEntry *tle = (TargetEntry *) node;
@@ -2667,6 +2678,20 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
JumbleExpr(jstate, from->quals);
}
break;
+ case T_OnConflictExpr:
+ {
+ OnConflictExpr *conf = (OnConflictExpr *) node;
+
+ APP_JUMB(conf->action);
+ JumbleExpr(jstate, (Node *) conf->arbiterElems);
+ JumbleExpr(jstate, conf->arbiterWhere);
+ JumbleExpr(jstate, (Node *) conf->onConflictSet);
+ JumbleExpr(jstate, conf->onConflictWhere);
+ APP_JUMB(conf->constraint);
+ APP_JUMB(conf->exclRelIndex);
+ JumbleExpr(jstate, (Node *) conf->exclRelTlist);
+ }
+ break;
case T_List:
foreach(temp, (List *) node)
{
diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c
index 94fab18c42..81cb2b447d 100644
--- a/contrib/postgres_fdw/deparse.c
+++ b/contrib/postgres_fdw/deparse.c
@@ -847,8 +847,8 @@ appendWhereClause(StringInfo buf,
void
deparseInsertSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel,
- List *targetAttrs, List *returningList,
- List **retrieved_attrs)
+ List *targetAttrs, bool doNothing,
+ List *returningList, List **retrieved_attrs)
{
AttrNumber pindex;
bool first;
@@ -892,6 +892,9 @@ deparseInsertSql(StringInfo buf, PlannerInfo *root,
else
appendStringInfoString(buf, " DEFAULT VALUES");
+ if (doNothing)
+ appendStringInfoString(buf, " ON CONFLICT DO NOTHING");
+
deparseReturningList(buf, root, rtindex, rel,
rel->trigdesc && rel->trigdesc->trig_insert_after_row,
returningList, retrieved_attrs);
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 93e9836cf0..1f417b30be 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -2327,6 +2327,11 @@ INSERT INTO ft1(c1, c2) VALUES(11, 12); -- duplicate key
ERROR: duplicate key value violates unique constraint "t1_pkey"
DETAIL: Key ("C 1")=(11) already exists.
CONTEXT: Remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT DO NOTHING; -- works
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO NOTHING; -- unsupported
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO UPDATE SET c3 = 'ffg'; -- unsupported
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
ERROR: new row for relation "T 1" violates check constraint "c2positive"
DETAIL: Failing row contains (1111, -2, null, null, null, null, ft1 , null).
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index de732319d7..173b4f06e6 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -1171,6 +1171,7 @@ postgresPlanForeignModify(PlannerInfo *root,
List *targetAttrs = NIL;
List *returningList = NIL;
List *retrieved_attrs = NIL;
+ bool doNothing = false;
initStringInfo(&sql);
@@ -1222,6 +1223,18 @@ postgresPlanForeignModify(PlannerInfo *root,
if (plan->returningLists)
returningList = (List *) list_nth(plan->returningLists, subplan_index);
+ /*
+ * ON CONFLICT DO UPDATE and DO NOTHING case with inference specification
+ * should have already been rejected in the optimizer, as presently there
+ * is no way to recognize an arbiter index on a foreign table. Only DO
+ * NOTHING is supported without an inference specification.
+ */
+ if (plan->onConflictAction == ONCONFLICT_NOTHING)
+ doNothing = true;
+ else if (plan->onConflictAction != ONCONFLICT_NONE)
+ elog(ERROR, "unexpected ON CONFLICT specification: %d",
+ (int) plan->onConflictAction);
+
/*
* Construct the SQL command string.
*/
@@ -1229,7 +1242,7 @@ postgresPlanForeignModify(PlannerInfo *root,
{
case CMD_INSERT:
deparseInsertSql(&sql, root, resultRelation, rel,
- targetAttrs, returningList,
+ targetAttrs, doNothing, returningList,
&retrieved_attrs);
break;
case CMD_UPDATE:
diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h
index 950c6f79a2..3835ddb79a 100644
--- a/contrib/postgres_fdw/postgres_fdw.h
+++ b/contrib/postgres_fdw/postgres_fdw.h
@@ -60,7 +60,7 @@ extern void appendWhereClause(StringInfo buf,
List **params);
extern void deparseInsertSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel,
- List *targetAttrs, List *returningList,
+ List *targetAttrs, bool doNothing, List *returningList,
List **retrieved_attrs);
extern void deparseUpdateSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel,
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index 4a23457e79..fcdd92e280 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -372,6 +372,9 @@ UPDATE ft2 SET c2 = c2 + 600 WHERE c1 % 10 = 8 AND c1 < 1200 RETURNING *;
ALTER TABLE "S 1"."T 1" ADD CONSTRAINT c2positive CHECK (c2 >= 0);
INSERT INTO ft1(c1, c2) VALUES(11, 12); -- duplicate key
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT DO NOTHING; -- works
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO NOTHING; -- unsupported
+INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO UPDATE SET c3 = 'ffg'; -- unsupported
INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive
diff --git a/contrib/test_decoding/expected/ddl.out b/contrib/test_decoding/expected/ddl.out
index 2041ba80b5..463cb5efb9 100644
--- a/contrib/test_decoding/expected/ddl.out
+++ b/contrib/test_decoding/expected/ddl.out
@@ -148,6 +148,24 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc
COMMIT
(9 rows)
+-- ON CONFLICT DO UPDATE support
+BEGIN;
+INSERT INTO replication_example(id, somedata, somenum) SELECT i, i, i FROM generate_series(-15, 15) i
+ ON CONFLICT (id) DO UPDATE SET somenum = excluded.somenum + 1;
+COMMIT;
+/* display results, but hide most of the output */
+SELECT count(*), min(data), max(data)
+FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1')
+GROUP BY substring(data, 1, 40)
+ORDER BY 1,2;
+ count | min | max
+-------+----------------------------------------------------------------------------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------
+ 1 | BEGIN | BEGIN
+ 1 | COMMIT | COMMIT
+ 15 | table public.replication_example: UPDATE: id[integer]:10 somedata[integer]:4 somenum[integer]:11 zaphod1[integer]:null zaphod2[integer]:null | table public.replication_example: UPDATE: id[integer]:9 somedata[integer]:3 somenum[integer]:10 zaphod1[integer]:null zaphod2[integer]:null
+ 16 | table public.replication_example: INSERT: id[integer]:0 somedata[integer]:0 somenum[integer]:0 zaphod1[integer]:null zaphod2[integer]:null | table public.replication_example: INSERT: id[integer]:-9 somedata[integer]:-9 somenum[integer]:-9 zaphod1[integer]:null zaphod2[integer]:null
+(4 rows)
+
-- hide changes bc of oid visible in full table rewrites
CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int);
INSERT INTO tr_unique(data) VALUES(10);
@@ -196,6 +214,22 @@ ORDER BY 1,2;
20467 | table public.tr_etoomuch: DELETE: id[integer]:1 | table public.tr_etoomuch: UPDATE: id[integer]:9999 data[integer]:-9999
(3 rows)
+-- check that a large, spooled, upsert works
+INSERT INTO tr_etoomuch (id, data)
+SELECT g.i, -g.i FROM generate_series(8000, 12000) g(i)
+ON CONFLICT(id) DO UPDATE SET data = EXCLUDED.data;
+SELECT substring(data, 1, 29), count(*)
+FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1')
+GROUP BY 1
+ORDER BY min(location - '0/0');
+ substring | count
+-------------------------------+-------
+ BEGIN | 1
+ table public.tr_etoomuch: UPD | 2235
+ table public.tr_etoomuch: INS | 1766
+ COMMIT | 1
+(4 rows)
+
/*
* check whether we decode subtransactions correctly in relation with each
* other
diff --git a/contrib/test_decoding/expected/toast.out b/contrib/test_decoding/expected/toast.out
index 0a850b7acd..735b14c978 100644
--- a/contrib/test_decoding/expected/toast.out
+++ b/contrib/test_decoding/expected/toast.out
@@ -23,6 +23,10 @@ INSERT INTO xpto (toasted_col2) SELECT repeat(string_agg(to_char(g.i, 'FM0000'),
-- update of existing column
UPDATE xpto SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i)) WHERE id = 1;
UPDATE xpto SET rand1 = 123.456 WHERE id = 1;
+-- updating external via INSERT ... ON CONFLICT DO UPDATE
+INSERT INTO xpto(id, toasted_col2) VALUES (2, 'toasted2-upsert')
+ON CONFLICT (id)
+DO UPDATE SET toasted_col2 = EXCLUDED.toasted_col2 || xpto.toasted_col2;
DELETE FROM xpto WHERE id = 1;
DROP TABLE IF EXISTS toasted_key;
NOTICE: table "toasted_key" does not exist, skipping
@@ -64,6 +68,9 @@ SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot',
table public.xpto: UPDATE: id[integer]:1 toasted_col1[text]:unchanged-toast-datum rand1[double precision]:123.456 toasted_col2[text]:unchanged-toast-datum rand2[double precision]:1578
COMMIT
BEGIN
+ table public.xpto: UPDATE: id[integer]:2 toasted_col1[text]:null rand1[double precision]:3077 toasted_col2[text]:'toasted2-upsert00010002000300040005000600070008000900100011001200130014001500160017001
+ COMMIT
+ BEGIN
table public.xpto: DELETE: id[integer]:1
COMMIT
BEGIN
@@ -283,7 +290,7 @@ SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot',
table public.toasted_copy: INSERT: id[integer]:202 data[text]:'untoasted199'
table public.toasted_copy: INSERT: id[integer]:203 data[text]:'untoasted200'
COMMIT
-(232 rows)
+(235 rows)
SELECT pg_drop_replication_slot('regression_slot');
pg_drop_replication_slot
diff --git a/contrib/test_decoding/sql/ddl.sql b/contrib/test_decoding/sql/ddl.sql
index 03314d18ac..6baad9267a 100644
--- a/contrib/test_decoding/sql/ddl.sql
+++ b/contrib/test_decoding/sql/ddl.sql
@@ -84,6 +84,18 @@ COMMIT;
-- show changes
SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');
+-- ON CONFLICT DO UPDATE support
+BEGIN;
+INSERT INTO replication_example(id, somedata, somenum) SELECT i, i, i FROM generate_series(-15, 15) i
+ ON CONFLICT (id) DO UPDATE SET somenum = excluded.somenum + 1;
+COMMIT;
+
+/* display results, but hide most of the output */
+SELECT count(*), min(data), max(data)
+FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1')
+GROUP BY substring(data, 1, 40)
+ORDER BY 1,2;
+
-- hide changes bc of oid visible in full table rewrites
CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int);
INSERT INTO tr_unique(data) VALUES(10);
@@ -114,6 +126,16 @@ FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids',
GROUP BY substring(data, 1, 24)
ORDER BY 1,2;
+-- check that a large, spooled, upsert works
+INSERT INTO tr_etoomuch (id, data)
+SELECT g.i, -g.i FROM generate_series(8000, 12000) g(i)
+ON CONFLICT(id) DO UPDATE SET data = EXCLUDED.data;
+
+SELECT substring(data, 1, 29), count(*)
+FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1')
+GROUP BY 1
+ORDER BY min(location - '0/0');
+
/*
* check whether we decode subtransactions correctly in relation with each
* other
diff --git a/contrib/test_decoding/sql/toast.sql b/contrib/test_decoding/sql/toast.sql
index 09293865df..26d6b4fbdd 100644
--- a/contrib/test_decoding/sql/toast.sql
+++ b/contrib/test_decoding/sql/toast.sql
@@ -25,6 +25,11 @@ UPDATE xpto SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_s
UPDATE xpto SET rand1 = 123.456 WHERE id = 1;
+-- updating external via INSERT ... ON CONFLICT DO UPDATE
+INSERT INTO xpto(id, toasted_col2) VALUES (2, 'toasted2-upsert')
+ON CONFLICT (id)
+DO UPDATE SET toasted_col2 = EXCLUDED.toasted_col2 || xpto.toasted_col2;
+
DELETE FROM xpto WHERE id = 1;
DROP TABLE IF EXISTS toasted_key;
diff --git a/doc/src/sgml/fdwhandler.sgml b/doc/src/sgml/fdwhandler.sgml
index 04f3c22433..bc06d2cbb2 100644
--- a/doc/src/sgml/fdwhandler.sgml
+++ b/doc/src/sgml/fdwhandler.sgml
@@ -1050,6 +1050,13 @@ GetForeignServerByName(const char *name, bool missing_ok);
source provides.
+
+ INSERT> with an ON CONFLICT> clause does not
+ support specifying the conflict target, as remote constraints are not
+ locally known. This in turn implies that ON CONFLICT DO
+ UPDATE> is not supported, since the specification is mandatory there.
+
+
diff --git a/doc/src/sgml/keywords.sgml b/doc/src/sgml/keywords.sgml
index b0dfd5ff75..ea582116ab 100644
--- a/doc/src/sgml/keywords.sgml
+++ b/doc/src/sgml/keywords.sgml
@@ -853,6 +853,13 @@
+
+ CONFLICT
+ non-reserved
+
+
+
+
CONNECT
diff --git a/doc/src/sgml/mvcc.sgml b/doc/src/sgml/mvcc.sgml
index f88b16e778..313198800c 100644
--- a/doc/src/sgml/mvcc.sgml
+++ b/doc/src/sgml/mvcc.sgml
@@ -326,8 +326,27 @@
- Because of the above rule, it is possible for an updating command to see an
- inconsistent snapshot: it can see the effects of concurrent updating
+ INSERT with an ON CONFLICT DO UPDATE> clause
+ behaves similarly. In Read Committed mode, each row proposed for insertion
+ will either insert or update. Unless there are unrelated errors, one of
+ those two outcomes is guaranteed. If a conflict originates in another
+ transaction whose effects are not yet visible to the INSERT
+ , the UPDATE clause will affect that row,
+ even though possibly no> version of that row is
+ conventionally visible to the command.
+
+
+
+ INSERT with an ON CONFLICT DO
+ NOTHING> clause may have insertion not proceed for a row due to
+ the outcome of another transaction whose effects are not visible
+ to the INSERT snapshot. Again, this is only
+ the case in Read Committed mode.
+
+
+
+ Because of the above rules, it is possible for an updating command to see
+ an inconsistent snapshot: it can see the effects of concurrent updating
commands on the same rows it is trying to update, but it
does not see effects of those commands on other rows in the database.
This behavior makes Read Committed mode unsuitable for commands that
diff --git a/doc/src/sgml/plpgsql.sgml b/doc/src/sgml/plpgsql.sgml
index d36acf6d99..9a7763d18c 100644
--- a/doc/src/sgml/plpgsql.sgml
+++ b/doc/src/sgml/plpgsql.sgml
@@ -2623,7 +2623,11 @@ END;
This example uses exception handling to perform either
- UPDATE> or INSERT>, as appropriate:
+ UPDATE> or INSERT>, as appropriate. It is
+ recommended that applications use INSERT> with
+ ON CONFLICT DO UPDATE> rather than actually using
+ this pattern. This example serves primarily to illustrate use of
+ PL/pgSQL control flow structures:
CREATE TABLE db (a INT PRIMARY KEY, b TEXT);
@@ -3852,9 +3856,11 @@ ASSERT condition , INSERT> and UPDATE> operations, the return value
should be NEW>, which the trigger function may modify to
support INSERT RETURNING> and UPDATE RETURNING>
- (this will also affect the row value passed to any subsequent triggers).
- For DELETE> operations, the return value should be
- OLD>.
+ (this will also affect the row value passed to any subsequent triggers,
+ or passed to a special EXCLUDED> alias reference within
+ an INSERT> statement with an ON CONFLICT DO
+ UPDATE> clause). For DELETE> operations, the return
+ value should be OLD>.
diff --git a/doc/src/sgml/postgres-fdw.sgml b/doc/src/sgml/postgres-fdw.sgml
index 43adb61455..1079140de2 100644
--- a/doc/src/sgml/postgres-fdw.sgml
+++ b/doc/src/sgml/postgres-fdw.sgml
@@ -68,6 +68,14 @@
in your user mapping must have privileges to do these things.)
+
+ Note that postgres_fdw> currently lacks support for
+ INSERT statements with an ON CONFLICT DO
+ UPDATE> clause. However, the ON CONFLICT DO NOTHING>
+ clause is supported, provided a unique index inference specification
+ is omitted.
+
+
It is generally recommended that the columns of a foreign table be declared
with exactly the same data types, and collations if applicable, as the
diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml
index 3a753a0b9b..ac13d3201c 100644
--- a/doc/src/sgml/protocol.sgml
+++ b/doc/src/sgml/protocol.sgml
@@ -2998,9 +2998,16 @@ CommandComplete (B)
INSERT oid
rows, where
rows is the number of rows
- inserted. oid is the object ID
- of the inserted row if rows is 1
- and the target table has OIDs;
+ inserted. However, if and only if ON CONFLICT
+ UPDATE> is specified, then the tag is UPSERT
+ oid
+ rows, where
+ rows is the number of rows inserted
+ or updated.
+ oid is the object ID of the
+ inserted row if rows is 1 and the
+ target table has OIDs, and (for the UPSERT
+ tag), the row was actually inserted rather than updated;
otherwise oid is 0.
diff --git a/doc/src/sgml/ref/create_policy.sgml b/doc/src/sgml/ref/create_policy.sgml
index 457911e0c3..e826984633 100644
--- a/doc/src/sgml/ref/create_policy.sgml
+++ b/doc/src/sgml/ref/create_policy.sgml
@@ -78,11 +78,13 @@ CREATE POLICY name ON ON CONFLICT DO
+ UPDATE> and INSERT> policies are not combined in this way, but
+ rather enforced as noted at each stage of ON CONFLICT> execution).
+ Further, for commands which can have both USING and WITH CHECK policies (ALL
+ and UPDATE), if no WITH CHECK policy is defined then the USING policy will be
+ used for both what rows are visible (normal USING case) and which rows will
+ be allowed to be added (WITH CHECK case).
@@ -263,6 +265,12 @@ CREATE POLICY name ON
+
+ Note that INSERT with ON CONFLICT DO
+ UPDATE requires that any INSERT policy
+ WITH CHECK expression passes for any rows appended to the relation by
+ the INSERT path only.
+
@@ -271,22 +279,39 @@ CREATE POLICY name ON
Using UPDATE for a policy means that it will apply
- to UPDATE commands. As UPDATE
- involves pulling an existing record and then making changes to some
- portion (but possibly not all) of the record, the
- UPDATE policy accepts both a USING expression and
- a WITH CHECK expression. The USING expression will be used to
- determine which records the UPDATE command will
- see to operate against, while the WITH CHECK
- expression defines what rows are allowed to be added back into the
- relation (similar to the INSERT policy).
- Any rows whose resulting values do not pass the
- WITH CHECK expression will cause an ERROR and the
- entire command will be aborted. Note that if only a
- USING clause is specified then that clause will be
- used for both USING and
+ to UPDATE commands (or auxiliary ON
+ CONFLICT DO UPDATE clauses of INSERT
+ commands). As UPDATE involves pulling an existing
+ record and then making changes to some portion (but possibly not all)
+ of the record, the UPDATE policy accepts both a
+ USING expression and a WITH CHECK
+ expression. The USING expression will be used to
+ determine which records the UPDATE command will see
+ to operate against, while the WITH CHECK expression
+ defines what rows are allowed to be added back into the relation
+ (similar to the INSERT policy). Any rows whose
+ resulting values do not pass the WITH CHECK
+ expression will cause an ERROR and the entire command will be aborted.
+ Note that if only a USING clause is specified then
+ that clause will be used for both USING and
WITH CHECK cases.
+
+ Note, however, that INSERT with ON CONFLICT
+ DO UPDATE requires that an UPDATE policy
+ USING expression always be enforced as a
+ WITH CHECK expression. This
+ UPDATE policy must always pass when the
+ UPDATE path is taken. Any existing row that
+ necessitates that the UPDATE path be taken must pass
+ the (UPDATE or ALL) USING qualifications (combined
+ using OR), which are always enforced as WTIH CHECK
+ options in this context (the UPDATE path will
+ never> be silently avoided; an error will be thrown
+ instead). Finally, the final row appended to the relation must pass
+ any WITH CHECK options that a conventional
+ UPDATE is required to pass.
+
diff --git a/doc/src/sgml/ref/create_rule.sgml b/doc/src/sgml/ref/create_rule.sgml
index 677766a2d5..53fdf56621 100644
--- a/doc/src/sgml/ref/create_rule.sgml
+++ b/doc/src/sgml/ref/create_rule.sgml
@@ -136,7 +136,11 @@ CREATE [ OR REPLACE ] RULE name AS
The event is one of SELECT,
INSERT, UPDATE, or
- DELETE.
+ DELETE. Note that an
+ INSERT containing an ON
+ CONFLICT clause cannot be used on tables that have
+ either INSERT or UPDATE
+ rules. Consider using an updatable view instead.
diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml
index be7ebd5f54..fac7e1ec5e 100644
--- a/doc/src/sgml/ref/create_table.sgml
+++ b/doc/src/sgml/ref/create_table.sgml
@@ -717,7 +717,9 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
EXCLUDE>, and
REFERENCES> (foreign key) constraints accept this
clause. NOT NULL> and CHECK> constraints are not
- deferrable.
+ deferrable. Note that deferrable constraints cannot be used as
+ conflict arbitrators in an INSERT statement that
+ includes an ON CONFLICT DO UPDATE> clause.
diff --git a/doc/src/sgml/ref/create_trigger.sgml b/doc/src/sgml/ref/create_trigger.sgml
index aae0b41cd2..4bde815012 100644
--- a/doc/src/sgml/ref/create_trigger.sgml
+++ b/doc/src/sgml/ref/create_trigger.sgml
@@ -76,7 +76,10 @@ CREATE [ CONSTRAINT ] TRIGGER name
executes once for any given operation, regardless of how many rows
it modifies (in particular, an operation that modifies zero rows
will still result in the execution of any applicable FOR
- EACH STATEMENT triggers).
+ EACH STATEMENT triggers). Note that with an
+ INSERT with an ON CONFLICT DO UPDATE>
+ clause, both INSERT and
+ UPDATE statement level trigger will be fired.
diff --git a/doc/src/sgml/ref/create_view.sgml b/doc/src/sgml/ref/create_view.sgml
index 5dadab1dee..8fa3564021 100644
--- a/doc/src/sgml/ref/create_view.sgml
+++ b/doc/src/sgml/ref/create_view.sgml
@@ -333,7 +333,8 @@ CREATE VIEW vista AS SELECT text 'Hello World' AS hello;
If the view is automatically updatable the system will convert any
INSERT>, UPDATE> or DELETE> statement
on the view into the corresponding statement on the underlying base
- relation.
+ relation. INSERT> statements that have an ON
+ CONFLICT UPDATE> clause are fully supported.
@@ -345,8 +346,10 @@ CREATE VIEW vista AS SELECT text 'Hello World' AS hello;
condition, and thus is no longer visible through the view. Similarly,
an INSERT> command can potentially insert base-relation rows
that do not satisfy the WHERE> condition and thus are not
- visible through the view. The CHECK OPTION> may be used to
- prevent INSERT> and UPDATE> commands from creating
+ visible through the view (ON CONFLICT UPDATE> may
+ similarly affect an existing row not visible through the view).
+ The CHECK OPTION> may be used to prevent
+ INSERT> and UPDATE> commands from creating
such rows that are not visible through the view.
diff --git a/doc/src/sgml/ref/insert.sgml b/doc/src/sgml/ref/insert.sgml
index a3cccb9f7c..c88d1b7b50 100644
--- a/doc/src/sgml/ref/insert.sgml
+++ b/doc/src/sgml/ref/insert.sgml
@@ -22,9 +22,24 @@ PostgreSQL documentation
[ WITH [ RECURSIVE ] with_query [, ...] ]
-INSERT INTO table_name [ ( column_name [, ...] ) ]
+INSERT INTO table_name [ AS alias ] [ ( column_name [, ...] ) ]
{ DEFAULT VALUES | VALUES ( { expression | DEFAULT } [, ...] ) [, ...] | query }
+ [ ON CONFLICT [ conflict_target ] conflict_action ]
[ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]
+
+where conflict_target can be one of:
+
+ ( { column_name_index | ( expression_index ) } [ COLLATE collation ] [ opclass ] [, ...] ) [ WHERE index_predicate ]
+ ON CONSTRAINT constraint_name
+
+and conflict_action is one of:
+
+ DO NOTHING
+ DO UPDATE SET { column_name = { expression | DEFAULT } |
+ ( column_name [, ...] ) = ( { expression | DEFAULT } [, ...] ) |
+ ( column_name [, ...] ) = ( sub-SELECT )
+ } [, ...]
+ [ WHERE condition ]
@@ -58,20 +73,47 @@ INSERT INTO table_name [ (
+
+ ON CONFLICT> can be used to specify an alternative
+ action to raising a unique constraint or exclusion constraint
+ violation error . (See below.)
+
+
The optional RETURNING> clause causes INSERT>
- to compute and return value(s) based on each row actually inserted.
- This is primarily useful for obtaining values that were supplied by
- defaults, such as a serial sequence number. However, any expression
- using the table's columns is allowed. The syntax of the
- RETURNING> list is identical to that of the output list
- of SELECT>.
+ to compute and return value(s) based on each row actually inserted
+ (or updated, if an ON CONFLICT DO UPDATE> clause was
+ used). This is primarily useful for obtaining values that were
+ supplied by defaults, such as a serial sequence number. However,
+ any expression using the table's columns is allowed. The syntax of
+ the RETURNING> list is identical to that of the output
+ list of SELECT>. Only rows that were successfully
+ inserted or updated will be returned. For example, if a row was
+ locked but not updated because an ON CONFLICT DO UPDATE
+ ... WHERE clause condition was not satisfied, the
+ row will not be returned.
You must have INSERT privilege on a table in
- order to insert into it. If a column list is specified, you only
- need INSERT privilege on the listed columns.
+ order to insert into it. If ON CONFLICT DO UPDATE> is
+ present the UPDATE privilege is also required.
+
+
+
+ If a column list is specified, you only need
+ INSERT privilege on the listed columns.
+ Similarly, when ON CONFLICT DO UPDATE> is specified, you
+ only need UPDATE> privilege on the column(s) that are
+ listed to be updated. However, ON CONFLICT DO UPDATE>
+ also requires SELECT> privilege on any column whose
+ values are read in the ON CONFLICT DO UPDATE>
+ expressions or condition>.
+
+
+
Use of the RETURNING> clause requires SELECT>
privilege on all columns mentioned in RETURNING>.
If you use the table_name [ (
+
+ alias
+
+
+ A substitute name for the target table. When an alias is provided, it
+ completely hides the actual name of the table. This is particularly
+ useful when using ON CONFLICT DO UPDATE into a table
+ named excluded as that's also the name of the
+ pseudo-relation containing the proposed row.
+
+
+
+
+
column_name
@@ -121,7 +177,12 @@ INSERT INTO table_name [ ( table_name.
The column name can be qualified with a subfield name or array
subscript, if needed. (Inserting into only some fields of a
- composite column leaves the other fields null.)
+ composite column leaves the other fields null.) When
+ referencing a column with ON CONFLICT DO UPDATE>, do
+ not include the table's name in the specification of a target
+ column. For example, INSERT ... ON CONFLICT DO UPDATE
+ tab SET table_name.col = 1> is invalid (this follows the general
+ behavior for UPDATE>).
@@ -171,13 +232,34 @@ INSERT INTO table_name [ (
An expression to be computed and returned by the INSERT>
- command after each row is inserted. The expression can use any
- column names of the table named by table_name.
+ command after each row is inserted (not updated). The
+ expression can use any column names of the table named by
+ table_name.
Write *> to return all columns of the inserted row(s).
+
+ conflict_target
+
+
+ Specify which conflicts ON CONFLICT refers to.
+
+
+
+
+
+ conflict_action
+
+
+ DO NOTHING or DO UPDATE
+ SET clause specifying the action to be performed in
+ case of a conflict.
+
+
+
+
output_name
@@ -186,9 +268,226 @@ INSERT INTO table_name [ (
+
+
+ column_name_index
+
+
+ The name of a table_name column. Part of a
+ unique index inference clause. Follows CREATE
+ INDEX format. SELECT> privilege on
+ column_name_index
+ is required.
+
+
+
+
+
+ expression_index
+
+
+ Similar to column_name_index, but used to
+ infer expressions on table_name columns appearing
+ within index definitions (not simple columns). Part of unique
+ index inference clause. Follows CREATE INDEX
+ format. SELECT> privilege on any column appearing
+ within expression_index is required.
+
+
+
+
+
+ collation
+
+
+ When specified, mandates that corresponding column_name_index or
+ expression_index use a
+ particular collation in order to be matched in the inference clause.
+ Typically this is omitted, as collations usually do not affect wether or
+ not a constraint violation occurs. Follows CREATE
+ INDEX format.
+
+
+
+
+
+ opclass
+
+
+ When specified, mandates that corresponding column_name_index or
+ expression_index use
+ particular operator class in order to be matched by the inference
+ clause. Sometimes this is omitted because the
+ equality semantics are often equivalent across a
+ type's operator classes anyway, or because it's sufficient to trust that
+ the defined unique indexes have the pertinent definition of equality.
+ Follows CREATE INDEX format.
+
+
+
+
+
+ index_predicate
+
+
+ Used to allow inference of partial unique indexes. Any indexes
+ that satisfy the predicate (which need not actually be partial
+ indexes) can be matched by the rest of the inference clause.
+ Follows CREATE INDEX format.
+ SELECT> privilege on any column appearing within
+ index_predicate is
+ required.
+
+
+
+
+
+ constraint_name
+
+
+ Explicitly specifies an arbiter constraint
+ by name, rather than inferring a constraint or index. This is
+ mostly useful for exclusion constraints, that cannot be chosen
+ in the conventional way (with an inference clause).
+
+
+
+
+
+ condition
+
+
+ An expression that returns a value of type boolean. Only
+ rows for which this expression returns true will be
+ updated, although all rows will be locked when the
+ ON CONFLICT DO UPDATE> action is taken.
+
+
+
+
+ ON CONFLICT Clause
+
+ UPSERT
+
+
+ ON CONFLICT
+
+
+ The optional ON CONFLICT clause specifies an
+ alternative action to raising a unique violation or exclusion
+ constraint violation error. For each individual row proposed for
+ insertion, either the insertion proceeds, or, if a constraint
+ specified by the conflict_target is
+ violated, the alternative conflict_action is
+ taken.
+
+
+
+ conflict_target describes which conflicts
+ are handled by the ON CONFLICT clause. Either a
+ unique index inference clause or an explicitly
+ named constraint can be used. For ON CONFLICT DO
+ NOTHING, it is optional to specify a
+ conflict_target; when ommitted, conflicts
+ with all usable constraints (and unique indexes) are handled. For
+ ON CONFLICT DO UPDATE, a conflict target
+ must be specified.
+
+ Every time an insertion without ON CONFLICT
+ would ordinarily raise an error due to violating one of the
+ inferred (or explicitly named) constraints, a conflict (as in
+ ON CONFLICT) occurs, and the alternative action,
+ as specified by conflict_action is taken.
+ This happens on a row-by-row basis.
+
+
+
+ A unique index inference clause consists of
+ one or more column_name_index columns and/or
+ expression_index
+ expressions, and a optional
+ index_predicate.
+
+
+
+ All the table_name
+ unique indexes that, without regard to order, contain exactly the
+ specified columns/expressions and, if specified, whose predicate
+ implies the
+ index_predicate are chosen as arbiter indexes. Note
+ that this means an index without a predicate will be used if a
+ non-partial index matching every other criteria happens to be
+ available.
+
+
+
+ If no index matches the inference clause (nor is there a constraint
+ explicitly named), an error is raised. Deferred constraints are
+ not supported as arbiters.
+
+
+
+ conflict_action defines the action to be
+ taken in case of conflict. ON CONFLICT DO
+ NOTHING simply avoids inserting a row as its alternative
+ action. ON CONFLICT DO UPDATE updates the
+ existing row that conflicts with the row proposed for insertion as
+ its alternative action.
+
+ ON CONFLICT DO UPDATE guarantees an atomic
+ INSERT or UPDATE outcome - provided
+ there is no independent error, one of those two outcomes is guaranteed,
+ even under high concurrency. This feature is also known as
+ UPSERT.
+
+ Note that exclusion constraints are not supported with
+ ON CONFLICT DO UPDATE.
+
+
+
+ ON CONFLICT DO UPDATE optionally accepts
+ a WHERE clause condition.
+ When provided, the statement only proceeds with updating if
+ the condition is satisfied. Otherwise, unlike a
+ conventional UPDATE, the row is still locked for update.
+ Note that the condition is evaluated last, after
+ a conflict has been identified as a candidate to update.
+
+
+
+ The SET and WHERE clauses in
+ ON CONFLICT UPDATE have access to the existing
+ row, using the table's name, and to the row
+ proposed for insertion, using the excluded
+ alias. The excluded alias requires
+ SELECT> privilege on any column whose values are read.
+
+ Note that the effects of all per-row BEFORE INSERT
+ triggers are reflected in excluded values, since those
+ effects may have contributed to the row being excluded from insertion.
+
+
+
+ INSERT with an ON CONFLICT DO UPDATE>
+ clause is a deterministic
statement. This means
+ that the command will not be allowed to affect any single existing
+ row more than once; a cardinality violation error will be raised
+ when this situation arises. Rows proposed for insertion should not
+ duplicate each other in terms of attributes constrained by the
+ conflict-arbitrating unique index.
+
+
+
Outputs
@@ -197,21 +496,30 @@ INSERT INTO table_name [ (
INSERT oid count
+
+ However, in the event of an ON CONFLICT DO UPDATE> clause
+ (but not in the event of an ON
+ CONFLICT DO NOTHING> clause), the command tag reports the number of
+ rows inserted or updated together, of the form
+
+UPSERT oid count
The count is the number
of rows inserted. If count
is exactly one, and the target table has OIDs, then
oid is the
- OID assigned to the inserted row. Otherwise
- oid is zero.
+ OID
+ assigned to the inserted row (but not if there is only a single
+ updated row). Otherwise oid is zero.
If the INSERT> command contains a RETURNING>
clause, the result will be similar to that of a SELECT>
statement containing the columns and values defined in the
- RETURNING> list, computed over the row(s) inserted by the
- command.
+ RETURNING> list, computed over the row(s) inserted or
+ updated by the command.
@@ -311,7 +619,65 @@ WITH upd AS (
RETURNING *
)
INSERT INTO employees_log SELECT *, current_timestamp FROM upd;
-
+
+
+
+ Insert or update new distributors as appropriate. Assumes a unique
+ index has been defined that constrains values appearing in the
+ did column. Note that an EXCLUDED>
+ expression is used to reference values originally proposed for
+ insertion:
+
+ INSERT INTO distributors (did, dname)
+ VALUES (5, 'Gizmo transglobal'), (6, 'Associated Computing, inc')
+ ON CONFLICT (did) DO UPDATE SET dname = EXCLUDED.dname;
+
+
+
+ Insert a distributor, or do nothing for rows proposed for insertion
+ when an existing, excluded row (a row with a matching constrained
+ column or columns after before row insert triggers fire) exists.
+ Example assumes a unique index has been defined that constrains
+ values appearing in the did column:
+
+ INSERT INTO distributors (did, dname) VALUES (7, 'Redline GmbH')
+ ON CONFLICT (did) DO NOTHING;
+
+
+
+ Insert or update new distributors as appropriate. Example assumes
+ a unique index has been defined that constrains values appearing in
+ the did column. WHERE> clause is
+ used to limit the rows actually updated (any existing row not
+ updated will still be locked, though):
+
+ -- Don't update existing distributors based in a certain ZIP code
+ INSERT INTO distributors AS d (did, dname) VALUES (8, 'Anvil Distribution')
+ ON CONFLICT (did) DO UPDATE
+ SET dname = EXCLUDED.dname || ' (formerly ' || d.dname || ')'
+ WHERE d.zipcode != '21201';
+
+ -- Name a constraint directly in the statement (uses associated
+ -- index to arbitrate taking the DO NOTHING action)
+ INSERT INTO distributors (did, dname) VALUES (9, 'Antwerp Design')
+ ON CONFLICT ON CONSTRAINT distributors_pkey DO NOTHING;
+
+
+
+ Insert new distributor if possible; otherwise
+ DO NOTHING. Example assumes a unique index has been
+ defined that constrains values appearing in the
+ did column on a subset of rows where the
+ is_active boolean column evaluates to
+ true:
+
+ -- This statement could infer a partial unique index on "did"
+ -- with a predicate of "WHERE is_active", but it could also
+ -- just use a regular unique constraint on "did"
+ INSERT INTO distributors (did, dname) VALUES (10, 'Conrad International')
+ ON CONFLICT (did) WHERE is_active DO NOTHING;
+
+
@@ -321,7 +687,8 @@ INSERT INTO employees_log SELECT *, current_timestamp FROM upd;
INSERT conforms to the SQL standard, except that
the RETURNING> clause is a
PostgreSQL extension, as is the ability
- to use WITH> with INSERT>.
+ to use WITH> with INSERT>, and the ability to
+ specify an alternative action with ON CONFLICT>.
Also, the case in
which a column name list is omitted, but not all the columns are
filled from the VALUES> clause or query>,
diff --git a/doc/src/sgml/trigger.sgml b/doc/src/sgml/trigger.sgml
index f94aea174a..bd0d71e0d9 100644
--- a/doc/src/sgml/trigger.sgml
+++ b/doc/src/sgml/trigger.sgml
@@ -40,14 +40,17 @@
On tables and foreign tables, triggers can be defined to execute either
before or after any INSERT, UPDATE,
or DELETE operation, either once per modified row,
- or once per SQL statement.
- UPDATE triggers can moreover be set to fire only if
- certain columns are mentioned in the SET clause of the
- UPDATE statement.
- Triggers can also fire for TRUNCATE statements.
- If a trigger event occurs, the trigger's function is called at the
- appropriate time to handle the event. Foreign tables do not support the
- TRUNCATE statement at all.
+ or once per SQL statement. If an
+ INSERT contains an ON CONFLICT DO UPDATE>
+ clause, it is possible that the effects of a BEFORE insert trigger and
+ a BEFORE update trigger can both be applied together, if a reference to
+ an EXCLUDED> column appears. UPDATE
+ triggers can moreover be set to fire only if certain columns are
+ mentioned in the SET clause of the
+ UPDATE statement. Triggers can also fire for
+ TRUNCATE statements. If a trigger event occurs,
+ the trigger's function is called at the appropriate time to handle the
+ event. Foreign tables do not support the TRUNCATE statement at all.
@@ -118,6 +121,35 @@
be operated on.
+
+ If an INSERT contains an ON CONFLICT
+ DO UPDATE> clause, it is possible that the effects of all
+ row-level BEFORE> INSERT triggers
+ and all row-level BEFORE UPDATE triggers can
+ both be applied in a way that is apparent from the final state of
+ the updated row, if an EXCLUDED> column is referenced.
+ There need not be an EXCLUDED> column reference for
+ both sets of BEFORE row-level triggers to execute, though. The
+ possibility of surprising outcomes should be considered when there
+ are both BEFORE> INSERT and
+ BEFORE> UPDATE row-level triggers
+ that both affect a row being inserted/updated (this can still be
+ problematic if the modifications are more or less equivalent if
+ they're not also idempotent). Note that statement-level
+ UPDATE triggers are executed when ON
+ CONFLICT DO UPDATE> is specified, regardless of whether or not
+ any rows were affected by the UPDATE (and
+ regardless of whether the alternative UPDATE
+ path was ever taken). An INSERT with an
+ ON CONFLICT DO UPDATE> clause will execute
+ statement-level BEFORE> INSERT
+ triggers first, then statement-level BEFORE>
+ UPDATE triggers, followed by statement-level
+ AFTER> UPDATE triggers and finally
+ statement-level AFTER> INSERT
+ triggers.
+
+
Trigger functions invoked by per-statement triggers should always
return NULL. Trigger functions invoked by per-row
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index e84c1743f4..7ea9a77e7e 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2061,8 +2061,17 @@ FreeBulkInsertState(BulkInsertState bistate)
* This causes rows to be frozen, which is an MVCC violation and
* requires explicit options chosen by user.
*
+ * HEAP_INSERT_IS_SPECULATIVE is used on so-called "speculative insertions",
+ * which can be backed out afterwards without aborting the whole transaction.
+ * Other sessions can wait for the speculative insertion to be confirmed,
+ * turning it into a regular tuple, or aborted, as if it never existed.
+ * Speculatively inserted tuples behave as "value locks" of short duration,
+ * used to implement INSERT .. ON CONFLICT.
+ *
* Note that these options will be applied when inserting into the heap's
* TOAST table, too, if the tuple requires any out-of-line data.
+ * FIXME: Do we mark TOAST tuples as speculative too? What about confirming
+ * or aborting them?
*
* The BulkInsertState object (if any; bistate can be NULL for default
* behavior) is also just passed through to RelationGetBufferForTuple.
@@ -2115,7 +2124,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
- RelationPutHeapTuple(relation, buffer, heaptup);
+ RelationPutHeapTuple(relation, buffer, heaptup,
+ (options & HEAP_INSERT_SPECULATIVE) != 0);
if (PageIsAllVisible(BufferGetPage(buffer)))
{
@@ -2169,7 +2179,11 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
}
xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
- xlrec.flags = all_visible_cleared ? XLOG_HEAP_ALL_VISIBLE_CLEARED : 0;
+ xlrec.flags = 0;
+ if (all_visible_cleared)
+ xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
+ if (options & HEAP_INSERT_SPECULATIVE)
+ xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
/*
@@ -2179,7 +2193,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
*/
if (RelationIsLogicallyLogged(relation))
{
- xlrec.flags |= XLOG_HEAP_CONTAINS_NEW_TUPLE;
+ xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
bufflags |= REGBUF_KEEP_DATA;
}
@@ -2224,6 +2238,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
*/
CacheInvalidateHeapTuple(relation, heaptup, NULL);
+ /* Note: speculative insertions are counted too, even if aborted later */
pgstat_count_heap_insert(relation, 1);
/*
@@ -2395,7 +2410,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
* RelationGetBufferForTuple has ensured that the first tuple fits.
* Put that on the page, and then as many other tuples as fit.
*/
- RelationPutHeapTuple(relation, buffer, heaptuples[ndone]);
+ RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
{
HeapTuple heaptup = heaptuples[ndone + nthispage];
@@ -2403,7 +2418,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
break;
- RelationPutHeapTuple(relation, buffer, heaptup);
+ RelationPutHeapTuple(relation, buffer, heaptup, false);
/*
* We don't use heap_multi_insert for catalog tuples yet, but
@@ -2463,7 +2478,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
/* the rest of the scratch space is used for tuple data */
tupledata = scratchptr;
- xlrec->flags = all_visible_cleared ? XLOG_HEAP_ALL_VISIBLE_CLEARED : 0;
+ xlrec->flags = all_visible_cleared ? XLH_INSERT_ALL_VISIBLE_CLEARED : 0;
xlrec->ntuples = nthispage;
/*
@@ -2498,7 +2513,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
Assert((scratchptr - scratch) < BLCKSZ);
if (need_tuple_data)
- xlrec->flags |= XLOG_HEAP_CONTAINS_NEW_TUPLE;
+ xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
/*
* Signal that this is the last xl_heap_multi_insert record
@@ -2506,7 +2521,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
* decoding so it knows when to cleanup temporary data.
*/
if (ndone + nthispage == ntuples)
- xlrec->flags |= XLOG_HEAP_LAST_MULTI_INSERT;
+ xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
if (init)
{
@@ -2914,7 +2929,12 @@ l1:
MarkBufferDirty(buffer);
- /* XLOG stuff */
+ /*
+ * XLOG stuff
+ *
+ * NB: heap_abort_speculative() uses the same xlog record and replay
+ * routines.
+ */
if (RelationNeedsWAL(relation))
{
xl_heap_delete xlrec;
@@ -2924,7 +2944,7 @@ l1:
if (RelationIsAccessibleInLogicalDecoding(relation))
log_heap_new_cid(relation, &tp);
- xlrec.flags = all_visible_cleared ? XLOG_HEAP_ALL_VISIBLE_CLEARED : 0;
+ xlrec.flags = all_visible_cleared ? XLH_DELETE_ALL_VISIBLE_CLEARED : 0;
xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
tp.t_data->t_infomask2);
xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
@@ -2933,9 +2953,9 @@ l1:
if (old_key_tuple != NULL)
{
if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
- xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_TUPLE;
+ xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
else
- xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_KEY;
+ xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
}
XLogBeginInsert();
@@ -3742,7 +3762,7 @@ l2:
HeapTupleClearHeapOnly(newtup);
}
- RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
+ RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
if (!already_marked)
{
@@ -4133,14 +4153,16 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
*
* Function result may be:
* HeapTupleMayBeUpdated: lock was successfully acquired
+ * HeapTupleInvisible: lock failed because tuple was never visible to us
* HeapTupleSelfUpdated: lock failed because tuple updated by self
* HeapTupleUpdated: lock failed because tuple updated by other xact
* HeapTupleWouldBlock: lock couldn't be acquired and wait_policy is skip
*
- * In the failure cases, the routine fills *hufd with the tuple's t_ctid,
- * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax
- * (the last only for HeapTupleSelfUpdated, since we
- * cannot obtain cmax from a combocid generated by another transaction).
+ * In the failure cases other than HeapTupleInvisible, the routine fills
+ * *hufd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
+ * if necessary), and t_cmax (the last only for HeapTupleSelfUpdated,
+ * since we cannot obtain cmax from a combocid generated by another
+ * transaction).
* See comments for struct HeapUpdateFailureData for additional info.
*
* See README.tuplock for a thorough explanation of this mechanism.
@@ -4179,8 +4201,15 @@ l3:
if (result == HeapTupleInvisible)
{
- UnlockReleaseBuffer(*buffer);
- elog(ERROR, "attempted to lock invisible tuple");
+ LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
+
+ /*
+ * This is possible, but only when locking a tuple for ON CONFLICT
+ * UPDATE. We return this value here rather than throwing an error in
+ * order to give that case the opportunity to throw a more specific
+ * error.
+ */
+ return HeapTupleInvisible;
}
else if (result == HeapTupleBeingUpdated)
{
@@ -5417,6 +5446,234 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
return HeapTupleMayBeUpdated;
}
+/*
+ * heap_finish_speculative - mark speculative insertion as successful
+ *
+ * To successfully finish a speculative insertion we have to clear speculative
+ * token from tuple. To do so the t_ctid field, which will contain a
+ * speculative token value, is modified in place to point to the tuple itself,
+ * which is characteristic of a newly inserted ordinary tuple.
+ *
+ * NB: It is not ok to commit without either finishing or aborting a
+ * speculative insertion. We could treat speculative tuples of committed
+ * transactions implicitly as completed, but then we would have to be prepared
+ * to deal with speculative tokens on committed tuples. That wouldn't be
+ * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
+ * but clearing the token at completion isn't very expensive either.
+ * An explicit confirmation WAL record also makes logical decoding simpler.
+ */
+void
+heap_finish_speculative(Relation relation, HeapTuple tuple)
+{
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
+
+ buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+ page = (Page) BufferGetPage(buffer);
+
+ offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
+ if (PageGetMaxOffsetNumber(page) >= offnum)
+ lp = PageGetItemId(page, offnum);
+
+ if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+ elog(ERROR, "heap_confirm_insert: invalid lp");
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ /* SpecTokenOffsetNumber should be distinguishable from any real offset */
+ StaticAssertStmt(MaxOffsetNumber < SpecTokenOffsetNumber,
+ "invalid speculative token constant");
+
+ /* NO EREPORT(ERROR) from here till changes are logged */
+ START_CRIT_SECTION();
+
+ Assert(HeapTupleHeaderIsSpeculative(tuple->t_data));
+
+ MarkBufferDirty(buffer);
+
+ /*
+ * Replace the speculative insertion token with a real t_ctid,
+ * pointing to itself like it does on regular tuples.
+ */
+ htup->t_ctid = tuple->t_self;
+
+ /* XLOG stuff */
+ if (RelationNeedsWAL(relation))
+ {
+ xl_heap_confirm xlrec;
+ XLogRecPtr recptr;
+
+ xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
+
+ XLogBeginInsert();
+
+ /* We want the same filtering on this as on a plain insert */
+ XLogIncludeOrigin();
+
+ XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+
+ recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
+
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+
+ UnlockReleaseBuffer(buffer);
+}
+
+/*
+ * heap_abort_speculative - kill a speculatively inserted tuple
+ *
+ * Marks a tuple that was speculatively inserted in the same command as dead,
+ * by setting its xmin as invalid. That makes it immediately appear as dead
+ * to all transactions, including our own. In particular, it makes
+ * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
+ * inserting a duplicate key value won't unnecessarily wait for our whole
+ * transaction to finish (it'll just wait for our speculative insertion to
+ * finish).
+ *
+ * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
+ * that arise due to a mutual dependency that is not user visible. By
+ * definition, unprincipled deadlocks cannot be prevented by the user
+ * reordering lock acquisition in client code, because the implementation level
+ * lock acquisitions are not under the user's direct control. If speculative
+ * inserters did not take this precaution, then under high concurrency they
+ * could deadlock with each other, which would not be acceptable.
+ *
+ * This is somewhat redundant with heap_delete, but we prefer to have a
+ * dedicated routine with stripped down requirements.
+ *
+ * This routine does not affect logical decoding as it only looks at
+ * confirmation records.
+ */
+void
+heap_abort_speculative(Relation relation, HeapTuple tuple)
+{
+ TransactionId xid = GetCurrentTransactionId();
+ ItemPointer tid = &(tuple->t_self);
+ ItemId lp;
+ HeapTupleData tp;
+ Page page;
+ BlockNumber block;
+ Buffer buffer;
+
+ Assert(ItemPointerIsValid(tid));
+
+ block = ItemPointerGetBlockNumber(tid);
+ buffer = ReadBuffer(relation, block);
+ page = BufferGetPage(buffer);
+
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ /*
+ * Page can't be all visible, we just inserted into it, and are still
+ * running.
+ */
+ Assert(!PageIsAllVisible(page));
+
+ lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
+ Assert(ItemIdIsNormal(lp));
+
+ tp.t_tableOid = RelationGetRelid(relation);
+ tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
+ tp.t_len = ItemIdGetLength(lp);
+ tp.t_self = *tid;
+
+ /*
+ * Sanity check that the tuple really is a speculatively inserted tuple,
+ * inserted by us.
+ */
+ if (tp.t_data->t_choice.t_heap.t_xmin != xid)
+ elog(ERROR, "attempted to kill a tuple inserted by another transaction");
+ if (!HeapTupleHeaderIsSpeculative(tp.t_data))
+ elog(ERROR, "attempted to kill a non-speculative tuple");
+ Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
+
+ /*
+ * No need to check for serializable conflicts here. There is never a
+ * need for a combocid, either. No need to extract replica identity, or
+ * do anything special with infomask bits.
+ */
+
+ START_CRIT_SECTION();
+
+ /*
+ * The tuple will become DEAD immediately. Flag that this page
+ * immediately is a candidate for pruning by setting xmin to
+ * RecentGlobalXmin. That's not pretty, but it doesn't seem worth
+ * inventing a nicer API for this.
+ */
+ Assert(TransactionIdIsValid(RecentGlobalXmin));
+ PageSetPrunable(page, RecentGlobalXmin);
+
+ /* store transaction information of xact deleting the tuple */
+ tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
+ tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
+
+ /*
+ * Set the tuple header xmin to InvalidTransactionId. This makes the
+ * tuple immediately invisible everyone. (In particular, to any
+ * transactions waiting on the speculative token, woken up later.)
+ */
+ HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
+
+ /* Clear the speculative insertion token too */
+ tp.t_data->t_ctid = tp.t_self;
+
+ MarkBufferDirty(buffer);
+
+ /*
+ * XLOG stuff
+ *
+ * The WAL records generated here match heap_delete(). The same recovery
+ * routines are used.
+ */
+ if (RelationNeedsWAL(relation))
+ {
+ xl_heap_delete xlrec;
+ XLogRecPtr recptr;
+
+ xlrec.flags = XLH_DELETE_IS_SUPER;
+ xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
+ tp.t_data->t_infomask2);
+ xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
+ xlrec.xmax = xid;
+
+ XLogBeginInsert();
+ XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
+ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+
+ /* No replica identity & replication origin logged */
+
+ recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
+
+ PageSetLSN(page, recptr);
+ }
+
+ END_CRIT_SECTION();
+
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ if (HeapTupleHasExternal(&tp))
+ toast_delete(relation, &tp);
+
+ /*
+ * Never need to mark tuple for invalidation, since catalogs don't support
+ * speculative insertion
+ */
+
+ /* Now we can release the buffer */
+ ReleaseBuffer(buffer);
+
+ /* count deletion, as we counted the insertion too */
+ pgstat_count_heap_delete(relation);
+}
/*
* heap_inplace_update - update a tuple "in place" (ie, overwrite it)
@@ -6732,22 +6989,22 @@ log_heap_update(Relation reln, Buffer oldbuf,
/* Prepare main WAL data chain */
xlrec.flags = 0;
if (all_visible_cleared)
- xlrec.flags |= XLOG_HEAP_ALL_VISIBLE_CLEARED;
+ xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
if (new_all_visible_cleared)
- xlrec.flags |= XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED;
+ xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
if (prefixlen > 0)
- xlrec.flags |= XLOG_HEAP_PREFIX_FROM_OLD;
+ xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
if (suffixlen > 0)
- xlrec.flags |= XLOG_HEAP_SUFFIX_FROM_OLD;
+ xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
if (need_tuple_data)
{
- xlrec.flags |= XLOG_HEAP_CONTAINS_NEW_TUPLE;
+ xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
if (old_key_tuple)
{
if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
- xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_TUPLE;
+ xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
else
- xlrec.flags |= XLOG_HEAP_CONTAINS_OLD_KEY;
+ xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
}
}
@@ -7378,7 +7635,7 @@ heap_xlog_delete(XLogReaderState *record)
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
*/
- if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+ if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(target_node);
Buffer vmbuffer = InvalidBuffer;
@@ -7406,13 +7663,16 @@ heap_xlog_delete(XLogReaderState *record)
HeapTupleHeaderClearHotUpdated(htup);
fix_infomask_from_infobits(xlrec->infobits_set,
&htup->t_infomask, &htup->t_infomask2);
- HeapTupleHeaderSetXmax(htup, xlrec->xmax);
+ if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
+ HeapTupleHeaderSetXmax(htup, xlrec->xmax);
+ else
+ HeapTupleHeaderSetXmin(htup, InvalidTransactionId);
HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
/* Mark the page as a candidate for pruning */
PageSetPrunable(page, XLogRecGetXid(record));
- if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+ if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
PageClearAllVisible(page);
/* Make sure there is no forward chain link in t_ctid */
@@ -7453,7 +7713,7 @@ heap_xlog_insert(XLogReaderState *record)
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
*/
- if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+ if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(target_node);
Buffer vmbuffer = InvalidBuffer;
@@ -7516,7 +7776,7 @@ heap_xlog_insert(XLogReaderState *record)
PageSetLSN(page, lsn);
- if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+ if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
PageClearAllVisible(page);
MarkBufferDirty(buffer);
@@ -7573,7 +7833,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
*/
- if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+ if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(rnode);
Buffer vmbuffer = InvalidBuffer;
@@ -7655,7 +7915,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
PageSetLSN(page, lsn);
- if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+ if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
PageClearAllVisible(page);
MarkBufferDirty(buffer);
@@ -7728,7 +7988,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
*/
- if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+ if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(rnode);
Buffer vmbuffer = InvalidBuffer;
@@ -7783,7 +8043,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
/* Mark the page as a candidate for pruning */
PageSetPrunable(page, XLogRecGetXid(record));
- if (xlrec->flags & XLOG_HEAP_ALL_VISIBLE_CLEARED)
+ if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
PageClearAllVisible(page);
PageSetLSN(page, lsn);
@@ -7812,7 +8072,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
*/
- if (xlrec->flags & XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED)
+ if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(rnode);
Buffer vmbuffer = InvalidBuffer;
@@ -7840,13 +8100,13 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
if (PageGetMaxOffsetNumber(page) + 1 < offnum)
elog(PANIC, "heap_update_redo: invalid max offset number");
- if (xlrec->flags & XLOG_HEAP_PREFIX_FROM_OLD)
+ if (xlrec->flags & XLH_UPDATE_PREFIX_FROM_OLD)
{
Assert(newblk == oldblk);
memcpy(&prefixlen, recdata, sizeof(uint16));
recdata += sizeof(uint16);
}
- if (xlrec->flags & XLOG_HEAP_SUFFIX_FROM_OLD)
+ if (xlrec->flags & XLH_UPDATE_SUFFIX_FROM_OLD)
{
Assert(newblk == oldblk);
memcpy(&suffixlen, recdata, sizeof(uint16));
@@ -7918,7 +8178,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
if (offnum == InvalidOffsetNumber)
elog(PANIC, "heap_update_redo: failed to add tuple");
- if (xlrec->flags & XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED)
+ if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
PageClearAllVisible(page);
freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
@@ -7951,6 +8211,42 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
XLogRecordPageWithFreeSpace(rnode, newblk, freespace);
}
+static void
+heap_xlog_confirm(XLogReaderState *record)
+{
+ XLogRecPtr lsn = record->EndRecPtr;
+ xl_heap_confirm *xlrec = (xl_heap_confirm *) XLogRecGetData(record);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
+
+ if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
+ {
+ page = BufferGetPage(buffer);
+
+ offnum = xlrec->offnum;
+ if (PageGetMaxOffsetNumber(page) >= offnum)
+ lp = PageGetItemId(page, offnum);
+
+ if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
+ elog(PANIC, "heap_confirm_redo: invalid lp");
+
+ htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+ /*
+ * Confirm tuple as actually inserted
+ */
+ ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
+
+ PageSetLSN(page, lsn);
+ MarkBufferDirty(buffer);
+ }
+ if (BufferIsValid(buffer))
+ UnlockReleaseBuffer(buffer);
+}
+
static void
heap_xlog_lock(XLogReaderState *record)
{
@@ -8101,6 +8397,9 @@ heap_redo(XLogReaderState *record)
case XLOG_HEAP_HOT_UPDATE:
heap_xlog_update(record, true);
break;
+ case XLOG_HEAP_CONFIRM:
+ heap_xlog_confirm(record);
+ break;
case XLOG_HEAP_LOCK:
heap_xlog_lock(record);
break;
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 6d091f63af..a9f0ca35e4 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -35,12 +35,17 @@
void
RelationPutHeapTuple(Relation relation,
Buffer buffer,
- HeapTuple tuple)
+ HeapTuple tuple,
+ bool token)
{
Page pageHeader;
OffsetNumber offnum;
- ItemId itemId;
- Item item;
+
+ /*
+ * A tuple that's being inserted speculatively should already have its
+ * token set.
+ */
+ Assert(!token || HeapTupleHeaderIsSpeculative(tuple->t_data));
/* Add the tuple to the page */
pageHeader = BufferGetPage(buffer);
@@ -54,10 +59,18 @@ RelationPutHeapTuple(Relation relation,
/* Update tuple->t_self to the actual position where it was stored */
ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
- /* Insert the correct position into CTID of the stored tuple, too */
- itemId = PageGetItemId(pageHeader, offnum);
- item = PageGetItem(pageHeader, itemId);
- ((HeapTupleHeader) item)->t_ctid = tuple->t_self;
+ /*
+ * Insert the correct position into CTID of the stored tuple, too
+ * (unless this is a speculative insertion, in which case the token is
+ * held in CTID field instead)
+ */
+ if (!token)
+ {
+ ItemId itemId = PageGetItemId(pageHeader, offnum);
+ Item item = PageGetItem(pageHeader, itemId);
+
+ ((HeapTupleHeader) item)->t_ctid = tuple->t_self;
+ }
}
/*
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 8464e8794f..274155ad0c 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -522,6 +522,14 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
bool toast_free[MaxHeapAttributeNumber];
bool toast_delold[MaxHeapAttributeNumber];
+ /*
+ * Ignore the INSERT_SPECULATIVE option. Speculative insertions/super
+ * deletions just normally insert/delete the toast values. It seems
+ * easiest to deal with that here, instead on, potentially, multiple
+ * callers.
+ */
+ options &= ~HEAP_INSERT_SPECULATIVE;
+
/*
* We should only ever be called for tuples of plain relations or
* materialized views --- recursing on a toast rel is bad news.
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index ef68a7145f..4a60c5fa2c 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -51,7 +51,8 @@ static Buffer _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf);
static TransactionId _bt_check_unique(Relation rel, IndexTuple itup,
Relation heapRel, Buffer buf, OffsetNumber offset,
ScanKey itup_scankey,
- IndexUniqueCheck checkUnique, bool *is_unique);
+ IndexUniqueCheck checkUnique, bool *is_unique,
+ uint32 *speculativeToken);
static void _bt_findinsertloc(Relation rel,
Buffer *bufptr,
OffsetNumber *offsetptr,
@@ -159,17 +160,27 @@ top:
*/
if (checkUnique != UNIQUE_CHECK_NO)
{
- TransactionId xwait;
+ TransactionId xwait;
+ uint32 speculativeToken;
offset = _bt_binsrch(rel, buf, natts, itup_scankey, false);
xwait = _bt_check_unique(rel, itup, heapRel, buf, offset, itup_scankey,
- checkUnique, &is_unique);
+ checkUnique, &is_unique, &speculativeToken);
if (TransactionIdIsValid(xwait))
{
/* Have to wait for the other guy ... */
_bt_relbuf(rel, buf);
- XactLockTableWait(xwait, rel, &itup->t_tid, XLTW_InsertIndex);
+ /*
+ * If it's a speculative insertion, wait for it to finish (ie.
+ * to go ahead with the insertion, or kill the tuple). Otherwise
+ * wait for the transaction to finish as usual.
+ */
+ if (speculativeToken)
+ SpeculativeInsertionWait(xwait, speculativeToken);
+ else
+ XactLockTableWait(xwait, rel, &itup->t_tid, XLTW_InsertIndex);
+
/* start over... */
_bt_freestack(stack);
goto top;
@@ -213,7 +224,10 @@ top:
*
* Returns InvalidTransactionId if there is no conflict, else an xact ID
* we must wait for to see if it commits a conflicting tuple. If an actual
- * conflict is detected, no return --- just ereport().
+ * conflict is detected, no return --- just ereport(). If an xact ID is
+ * returned, and the conflicting tuple still has a speculative insertion in
+ * progress, *speculativeToken is set to non-zero, and the caller can wait for
+ * the verdict on the insertion using SpeculativeInsertionWait().
*
* However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
* InvalidTransactionId because we don't want to wait. In this case we
@@ -223,7 +237,8 @@ top:
static TransactionId
_bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
Buffer buf, OffsetNumber offset, ScanKey itup_scankey,
- IndexUniqueCheck checkUnique, bool *is_unique)
+ IndexUniqueCheck checkUnique, bool *is_unique,
+ uint32 *speculativeToken)
{
TupleDesc itupdesc = RelationGetDescr(rel);
int natts = rel->rd_rel->relnatts;
@@ -340,6 +355,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
if (nbuf != InvalidBuffer)
_bt_relbuf(rel, nbuf);
/* Tell _bt_doinsert to wait... */
+ *speculativeToken = SnapshotDirty.speculativeToken;
return xwait;
}
diff --git a/src/backend/access/rmgrdesc/heapdesc.c b/src/backend/access/rmgrdesc/heapdesc.c
index 4f06a2637a..f4a1b002cf 100644
--- a/src/backend/access/rmgrdesc/heapdesc.c
+++ b/src/backend/access/rmgrdesc/heapdesc.c
@@ -75,6 +75,12 @@ heap_desc(StringInfo buf, XLogReaderState *record)
xlrec->new_offnum,
xlrec->new_xmax);
}
+ else if (info == XLOG_HEAP_CONFIRM)
+ {
+ xl_heap_confirm *xlrec = (xl_heap_confirm *) rec;
+
+ appendStringInfo(buf, "off %u", xlrec->offnum);
+ }
else if (info == XLOG_HEAP_LOCK)
{
xl_heap_lock *xlrec = (xl_heap_lock *) rec;
@@ -177,6 +183,9 @@ heap_identify(uint8 info)
case XLOG_HEAP_HOT_UPDATE | XLOG_HEAP_INIT_PAGE:
id = "HOT_UPDATE+INIT";
break;
+ case XLOG_HEAP_CONFIRM:
+ id = "HEAP_CONFIRM";
+ break;
case XLOG_HEAP_LOCK:
id = "LOCK";
break;
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index ac3b785b5a..8c8a9eafee 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -1665,6 +1665,10 @@ BuildIndexInfo(Relation index)
/* other info */
ii->ii_Unique = indexStruct->indisunique;
ii->ii_ReadyForInserts = IndexIsReady(indexStruct);
+ /* assume not doing speculative insertion for now */
+ ii->ii_UniqueOps = NULL;
+ ii->ii_UniqueProcs = NULL;
+ ii->ii_UniqueStrats = NULL;
/* initialize index-build state to default */
ii->ii_Concurrent = false;
@@ -1673,6 +1677,53 @@ BuildIndexInfo(Relation index)
return ii;
}
+/* ----------------
+ * BuildSpeculativeIndexInfo
+ * Add extra state to IndexInfo record
+ *
+ * For unique indexes, we usually don't want to add info to the IndexInfo for
+ * checking uniqueness, since the B-Tree AM handles that directly. However,
+ * in the case of speculative insertion, additional support is required.
+ *
+ * Do this processing here rather than in BuildIndexInfo() to not incur the
+ * overhead in the common non-speculative cases.
+ * ----------------
+ */
+void
+BuildSpeculativeIndexInfo(Relation index, IndexInfo *ii)
+{
+ int ncols = index->rd_rel->relnatts;
+ int i;
+
+ /*
+ * fetch info for checking unique indexes
+ */
+ Assert(ii->ii_Unique);
+
+ if (index->rd_rel->relam != BTREE_AM_OID)
+ elog(ERROR, "unexpected non-btree speculative unique index");
+
+ ii->ii_UniqueOps = (Oid *) palloc(sizeof(Oid) * ncols);
+ ii->ii_UniqueProcs = (Oid *) palloc(sizeof(Oid) * ncols);
+ ii->ii_UniqueStrats = (uint16 *) palloc(sizeof(uint16) * ncols);
+
+ /*
+ * We have to look up the operator's strategy number. This
+ * provides a cross-check that the operator does match the index.
+ */
+ /* We need the func OIDs and strategy numbers too */
+ for (i = 0; i < ncols; i++)
+ {
+ ii->ii_UniqueStrats[i] = BTEqualStrategyNumber;
+ ii->ii_UniqueOps[i] =
+ get_opfamily_member(index->rd_opfamily[i],
+ index->rd_opcintype[i],
+ index->rd_opcintype[i],
+ ii->ii_UniqueStrats[i]);
+ ii->ii_UniqueProcs[i] = get_opcode(ii->ii_UniqueOps[i]);
+ }
+}
+
/* ----------------
* FormIndexDatum
* Construct values[] and isnull[] arrays for a new index tuple.
@@ -2612,7 +2663,7 @@ IndexCheckExclusion(Relation heapRelation,
check_exclusion_constraint(heapRelation,
indexRelation, indexInfo,
&(heapTuple->t_self), values, isnull,
- estate, true, false);
+ estate, true);
}
heap_endscan(scan);
diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c
index fe123addac..0231084c7c 100644
--- a/src/backend/catalog/indexing.c
+++ b/src/backend/catalog/indexing.c
@@ -46,7 +46,7 @@ CatalogOpenIndexes(Relation heapRel)
resultRelInfo->ri_RelationDesc = heapRel;
resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */
- ExecOpenIndices(resultRelInfo);
+ ExecOpenIndices(resultRelInfo, false);
return resultRelInfo;
}
diff --git a/src/backend/catalog/sql_features.txt b/src/backend/catalog/sql_features.txt
index 332926424b..cc0f8c45a6 100644
--- a/src/backend/catalog/sql_features.txt
+++ b/src/backend/catalog/sql_features.txt
@@ -229,7 +229,7 @@ F311 Schema definition statement 02 CREATE TABLE for persistent base tables YES
F311 Schema definition statement 03 CREATE VIEW YES
F311 Schema definition statement 04 CREATE VIEW: WITH CHECK OPTION YES
F311 Schema definition statement 05 GRANT statement YES
-F312 MERGE statement NO
+F312 MERGE statement NO Consider INSERT ... ON CONFLICT DO UPDATE
F313 Enhanced MERGE statement NO
F314 MERGE statement with DELETE branch NO
F321 User authorization YES
diff --git a/src/backend/commands/constraint.c b/src/backend/commands/constraint.c
index 561d8fae57..e49affba9e 100644
--- a/src/backend/commands/constraint.c
+++ b/src/backend/commands/constraint.c
@@ -172,7 +172,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
*/
check_exclusion_constraint(trigdata->tg_relation, indexRel, indexInfo,
&(new_row->t_self), values, isnull,
- estate, false, false);
+ estate, false);
}
/*
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index aa8ae4b9bc..00a2417a09 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -2284,7 +2284,7 @@ CopyFrom(CopyState cstate)
1, /* dummy rangetable index */
0);
- ExecOpenIndices(resultRelInfo);
+ ExecOpenIndices(resultRelInfo, false);
estate->es_result_relations = resultRelInfo;
estate->es_num_result_relations = 1;
@@ -2439,7 +2439,8 @@ CopyFrom(CopyState cstate)
if (resultRelInfo->ri_NumIndices > 0)
recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
- estate);
+ estate, false, NULL,
+ NIL);
/* AFTER ROW INSERT Triggers */
ExecARInsertTriggers(estate, resultRelInfo, tuple,
@@ -2553,7 +2554,7 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
ExecStoreTuple(bufferedTuples[i], myslot, InvalidBuffer, false);
recheckIndexes =
ExecInsertIndexTuples(myslot, &(bufferedTuples[i]->t_self),
- estate);
+ estate, false, NULL, NIL);
ExecARInsertTriggers(estate, resultRelInfo,
bufferedTuples[i],
recheckIndexes);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index f4cc90183a..c5452e3cb6 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -103,7 +103,8 @@ static void ExplainIndexScanDetails(Oid indexid, ScanDirection indexorderdir,
static void ExplainScanTarget(Scan *plan, ExplainState *es);
static void ExplainModifyTarget(ModifyTable *plan, ExplainState *es);
static void ExplainTargetRel(Plan *plan, Index rti, ExplainState *es);
-static void show_modifytable_info(ModifyTableState *mtstate, ExplainState *es);
+static void show_modifytable_info(ModifyTableState *mtstate, List *ancestors,
+ ExplainState *es);
static void ExplainMemberNodes(List *plans, PlanState **planstates,
List *ancestors, ExplainState *es);
static void ExplainSubPlans(List *plans, List *ancestors,
@@ -744,6 +745,9 @@ ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used)
case T_ModifyTable:
*rels_used = bms_add_member(*rels_used,
((ModifyTable *) plan)->nominalRelation);
+ if (((ModifyTable *) plan)->exclRelRTI)
+ *rels_used = bms_add_member(*rels_used,
+ ((ModifyTable *) plan)->exclRelRTI);
break;
default:
break;
@@ -1466,7 +1470,8 @@ ExplainNode(PlanState *planstate, List *ancestors,
planstate, es);
break;
case T_ModifyTable:
- show_modifytable_info((ModifyTableState *) planstate, es);
+ show_modifytable_info((ModifyTableState *) planstate, ancestors,
+ es);
break;
case T_Hash:
show_hash_info((HashState *) planstate, es);
@@ -2317,18 +2322,22 @@ ExplainTargetRel(Plan *plan, Index rti, ExplainState *es)
/*
* Show extra information for a ModifyTable node
*
- * We have two objectives here. First, if there's more than one target table
- * or it's different from the nominal target, identify the actual target(s).
- * Second, give FDWs a chance to display extra info about foreign targets.
+ * We have three objectives here. First, if there's more than one target
+ * table or it's different from the nominal target, identify the actual
+ * target(s). Second, give FDWs a chance to display extra info about foreign
+ * targets. Third, show information about ON CONFLICT.
*/
static void
-show_modifytable_info(ModifyTableState *mtstate, ExplainState *es)
+show_modifytable_info(ModifyTableState *mtstate, List *ancestors,
+ ExplainState *es)
{
ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
const char *operation;
const char *foperation;
bool labeltargets;
int j;
+ List *idxNames = NIL;
+ ListCell *lst;
switch (node->operation)
{
@@ -2414,6 +2423,55 @@ show_modifytable_info(ModifyTableState *mtstate, ExplainState *es)
}
}
+ /* Gather names of ON CONFLICT arbiter indexes */
+ foreach(lst, node->arbiterIndexes)
+ {
+ char *indexname = get_rel_name(lfirst_oid(lst));
+
+ idxNames = lappend(idxNames, indexname);
+ }
+
+ if (node->onConflictAction != ONCONFLICT_NONE)
+ {
+ ExplainProperty("Conflict Resolution",
+ node->onConflictAction == ONCONFLICT_NOTHING ?
+ "NOTHING" : "UPDATE",
+ false, es);
+
+ /*
+ * Don't display arbiter indexes at all when DO NOTHING variant
+ * implicitly ignores all conflicts
+ */
+ if (idxNames)
+ ExplainPropertyList("Conflict Arbiter Indexes", idxNames, es);
+
+ /* ON CONFLICT DO UPDATE WHERE qual is specially displayed */
+ if (node->onConflictWhere)
+ {
+ show_upper_qual((List *) node->onConflictWhere, "Conflict Filter",
+ &mtstate->ps, ancestors, es);
+ show_instrumentation_count("Rows Removed by Conflict Filter", 1, &mtstate->ps, es);
+ }
+
+ /* EXPLAIN ANALYZE display of actual outcome for each tuple proposed */
+ if (es->analyze && mtstate->ps.instrument)
+ {
+ double total;
+ double insert_path;
+ double other_path;
+
+ InstrEndLoop(mtstate->mt_plans[0]->instrument);
+
+ /* count the number of source rows */
+ total = mtstate->mt_plans[0]->instrument->ntuples;
+ other_path = mtstate->ps.instrument->nfiltered2;
+ insert_path = total - other_path;
+
+ ExplainPropertyFloat("Tuples Inserted", insert_path, 0, es);
+ ExplainPropertyFloat("Conflicting Tuples", other_path, 0, es);
+ }
+ }
+
if (labeltargets)
ExplainCloseGroup("Target Tables", "Target Tables", false, es);
}
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 222e7fce85..b537ca5e66 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -2421,21 +2421,10 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
TupleTableSlot *newSlot;
int i;
Bitmapset *updatedCols;
- Bitmapset *keyCols;
LockTupleMode lockmode;
- /*
- * Compute lock mode to use. If columns that are part of the key have not
- * been modified, then we can use a weaker lock, allowing for better
- * concurrency.
- */
- updatedCols = GetUpdatedColumns(relinfo, estate);
- keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
- INDEX_ATTR_BITMAP_KEY);
- if (bms_overlap(keyCols, updatedCols))
- lockmode = LockTupleExclusive;
- else
- lockmode = LockTupleNoKeyExclusive;
+ /* Determine lock mode to use */
+ lockmode = ExecUpdateLockMode(estate, relinfo);
Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
if (fdw_trigtuple == NULL)
@@ -2476,6 +2465,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
TRIGGER_EVENT_ROW |
TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ updatedCols = GetUpdatedColumns(relinfo, estate);
for (i = 0; i < trigdesc->numtriggers; i++)
{
Trigger *trigger = &trigdesc->triggers[i];
@@ -2783,6 +2773,9 @@ ltrmark:;
*/
return NULL;
+ case HeapTupleInvisible:
+ elog(ERROR, "attempted to lock invisible tuple");
+
default:
ReleaseBuffer(buffer);
elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c
index a697682b20..e7cf72b387 100644
--- a/src/backend/executor/execIndexing.c
+++ b/src/backend/executor/execIndexing.c
@@ -50,6 +50,50 @@
* to the caller. The caller must re-check them later by calling
* check_exclusion_constraint().
*
+ * Speculative insertion
+ * ---------------------
+ *
+ * Speculative insertion is a is a two-phase mechanism, used to implement
+ * INSERT ... ON CONFLICT DO UPDATE/NOTHING. The tuple is first inserted
+ * to the heap and update the indexes as usual, but if a constraint is
+ * violated, we can still back out the insertion without aborting the whole
+ * transaction. In an INSERT ... ON CONFLICT statement, if a conflict is
+ * detected, the inserted tuple is backed out and the ON CONFLICT action is
+ * executed instead.
+ *
+ * Insertion to a unique index works as usual: the index AM checks for
+ * duplicate keys atomically with the insertion. But instead of throwing
+ * an error on a conflict, the speculatively inserted heap tuple is backed
+ * out.
+ *
+ * Exclusion constraints are slightly more complicated. As mentioned
+ * earlier, there is a risk of deadlock when two backends insert the same
+ * key concurrently. That was not a problem for regular insertions, when
+ * one of the transactions has to be aborted anyway, but with a speculative
+ * insertion we cannot let a deadlock happen, because we only want to back
+ * out the speculatively inserted tuple on conflict, not abort the whole
+ * transaction.
+ *
+ * When a backend detects that the speculative insertion conflicts with
+ * another in-progress tuple, it has two options:
+ *
+ * 1. back out the speculatively inserted tuple, then wait for the other
+ * transaction, and retry. Or,
+ * 2. wait for the other transaction, with the speculatively inserted tuple
+ * still in place.
+ *
+ * If two backends insert at the same time, and both try to wait for each
+ * other, they will deadlock. So option 2 is not acceptable. Option 1
+ * avoids the deadlock, but it is prone to a livelock instead. Both
+ * transactions will wake up immediately as the other transaction backs
+ * out. Then they both retry, and conflict with each other again, lather,
+ * rinse, repeat.
+ *
+ * To avoid the livelock, one of the backends must back out first, and then
+ * wait, while the other one waits without backing out. It doesn't matter
+ * which one backs out, so we employ an arbitrary rule that the transaction
+ * with the higher XID backs out.
+ *
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@@ -63,12 +107,30 @@
#include "postgres.h"
#include "access/relscan.h"
+#include "access/xact.h"
#include "catalog/index.h"
#include "executor/executor.h"
#include "nodes/nodeFuncs.h"
#include "storage/lmgr.h"
#include "utils/tqual.h"
+/* waitMode argument to check_exclusion_or_unique_constraint() */
+typedef enum
+{
+ CEOUC_WAIT,
+ CEOUC_NOWAIT,
+ CEOUC_LIVELOCK_PREVENTING_WAIT,
+} CEOUC_WAIT_MODE;
+
+static bool check_exclusion_or_unique_constraint(Relation heap, Relation index,
+ IndexInfo *indexInfo,
+ ItemPointer tupleid,
+ Datum *values, bool *isnull,
+ EState *estate, bool newIndex,
+ CEOUC_WAIT_MODE waitMode,
+ bool errorOK,
+ ItemPointer conflictTid);
+
static bool index_recheck_constraint(Relation index, Oid *constr_procs,
Datum *existing_values, bool *existing_isnull,
Datum *new_values);
@@ -84,7 +146,7 @@ static bool index_recheck_constraint(Relation index, Oid *constr_procs,
* ----------------------------------------------------------------
*/
void
-ExecOpenIndices(ResultRelInfo *resultRelInfo)
+ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
{
Relation resultRelation = resultRelInfo->ri_RelationDesc;
List *indexoidlist;
@@ -137,6 +199,13 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
/* extract index key information from the index's pg_index info */
ii = BuildIndexInfo(indexDesc);
+ /*
+ * If the indexes are to be used for speculative insertion, add extra
+ * information required by unique index entries.
+ */
+ if (speculative && ii->ii_Unique)
+ BuildSpeculativeIndexInfo(indexDesc, ii);
+
relationDescs[i] = indexDesc;
indexInfoArray[i] = ii;
i++;
@@ -186,7 +255,9 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo)
* Unique and exclusion constraints are enforced at the same
* time. This returns a list of index OIDs for any unique or
* exclusion constraints that are deferred and that had
- * potential (unconfirmed) conflicts.
+ * potential (unconfirmed) conflicts. (if noDupErr == true,
+ * the same is done for non-deferred constraints, but report
+ * if conflict was speculative or deferred conflict to caller)
*
* CAUTION: this must not be called for a HOT update.
* We can't defend against that here for lack of info.
@@ -196,7 +267,10 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo)
List *
ExecInsertIndexTuples(TupleTableSlot *slot,
ItemPointer tupleid,
- EState *estate)
+ EState *estate,
+ bool noDupErr,
+ bool *specConflict,
+ List *arbiterIndexes)
{
List *result = NIL;
ResultRelInfo *resultRelInfo;
@@ -236,12 +310,17 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
IndexInfo *indexInfo;
IndexUniqueCheck checkUnique;
bool satisfiesConstraint;
+ bool arbiter;
if (indexRelation == NULL)
continue;
indexInfo = indexInfoArray[i];
+ /* Record if speculative insertion arbiter */
+ arbiter = list_member_oid(arbiterIndexes,
+ indexRelation->rd_index->indexrelid);
+
/* If the index is marked as read-only, ignore it */
if (!indexInfo->ii_ReadyForInserts)
continue;
@@ -288,9 +367,14 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
* For a deferrable unique index, we tell the index AM to just detect
* possible non-uniqueness, and we add the index OID to the result
* list if further checking is needed.
+ *
+ * For a speculative insertion (used by INSERT ... ON CONFLICT), do
+ * the same as for a deferrable unique index.
*/
if (!indexRelation->rd_index->indisunique)
checkUnique = UNIQUE_CHECK_NO;
+ else if (noDupErr && (arbiterIndexes == NIL || arbiter))
+ checkUnique = UNIQUE_CHECK_PARTIAL;
else if (indexRelation->rd_index->indimmediate)
checkUnique = UNIQUE_CHECK_YES;
else
@@ -308,8 +392,11 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
* If the index has an associated exclusion constraint, check that.
* This is simpler than the process for uniqueness checks since we
* always insert first and then check. If the constraint is deferred,
- * we check now anyway, but don't throw error on violation; instead
- * we'll queue a recheck event.
+ * we check now anyway, but don't throw error on violation or wait for
+ * a conclusive outcome from a concurrent insertion; instead we'll
+ * queue a recheck event. Similarly, noDupErr callers (speculative
+ * inserters) will recheck later, and wait for a conclusive outcome
+ * then.
*
* An index for an exclusion constraint can't also be UNIQUE (not an
* essential property, we just don't allow it in the grammar), so no
@@ -317,13 +404,31 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
*/
if (indexInfo->ii_ExclusionOps != NULL)
{
- bool errorOK = !indexRelation->rd_index->indimmediate;
+ bool violationOK;
+ bool waitMode;
+
+ if (noDupErr)
+ {
+ violationOK = true;
+ waitMode = CEOUC_LIVELOCK_PREVENTING_WAIT;
+ }
+ else if (!indexRelation->rd_index->indimmediate)
+ {
+ violationOK = true;
+ waitMode = CEOUC_NOWAIT;
+ }
+ else
+ {
+ violationOK = false;
+ waitMode = CEOUC_WAIT;
+ }
satisfiesConstraint =
- check_exclusion_constraint(heapRelation,
- indexRelation, indexInfo,
- tupleid, values, isnull,
- estate, false, errorOK);
+ check_exclusion_or_unique_constraint(heapRelation,
+ indexRelation, indexInfo,
+ tupleid, values, isnull,
+ estate, false,
+ waitMode, violationOK, NULL);
}
if ((checkUnique == UNIQUE_CHECK_PARTIAL ||
@@ -333,46 +438,213 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
/*
* The tuple potentially violates the uniqueness or exclusion
* constraint, so make a note of the index so that we can re-check
- * it later.
+ * it later. Speculative inserters are told if there was a
+ * speculative conflict, since that always requires a restart.
*/
result = lappend_oid(result, RelationGetRelid(indexRelation));
+ if (indexRelation->rd_index->indimmediate && specConflict)
+ *specConflict = true;
}
}
return result;
}
+/* ----------------------------------------------------------------
+ * ExecCheckIndexConstraints
+ *
+ * This routine checks if a tuple violates any unique or
+ * exclusion constraints. Returns true if there is no no conflict.
+ * Otherwise returns false, and the TID of the conflicting
+ * tuple is returned in *conflictTid.
+ *
+ * If 'arbiterIndexes' is given, only those indexes are checked.
+ * NIL means all indexes.
+ *
+ * Note that this doesn't lock the values in any way, so it's
+ * possible that a conflicting tuple is inserted immediately
+ * after this returns. But this can be used for a pre-check
+ * before insertion.
+ * ----------------------------------------------------------------
+ */
+bool
+ExecCheckIndexConstraints(TupleTableSlot *slot,
+ EState *estate, ItemPointer conflictTid,
+ List *arbiterIndexes)
+{
+ ResultRelInfo *resultRelInfo;
+ int i;
+ int numIndices;
+ RelationPtr relationDescs;
+ Relation heapRelation;
+ IndexInfo **indexInfoArray;
+ ExprContext *econtext;
+ Datum values[INDEX_MAX_KEYS];
+ bool isnull[INDEX_MAX_KEYS];
+ ItemPointerData invalidItemPtr;
+ bool checkedIndex = false;
+
+ ItemPointerSetInvalid(conflictTid);
+ ItemPointerSetInvalid(&invalidItemPtr);
+
+ /*
+ * Get information from the result relation info structure.
+ */
+ resultRelInfo = estate->es_result_relation_info;
+ numIndices = resultRelInfo->ri_NumIndices;
+ relationDescs = resultRelInfo->ri_IndexRelationDescs;
+ indexInfoArray = resultRelInfo->ri_IndexRelationInfo;
+ heapRelation = resultRelInfo->ri_RelationDesc;
+
+ /*
+ * We will use the EState's per-tuple context for evaluating predicates
+ * and index expressions (creating it if it's not already there).
+ */
+ econtext = GetPerTupleExprContext(estate);
+
+ /* Arrange for econtext's scan tuple to be the tuple under test */
+ econtext->ecxt_scantuple = slot;
+
+ /*
+ * For each index, form index tuple and check if it satisfies the
+ * constraint.
+ */
+ for (i = 0; i < numIndices; i++)
+ {
+ Relation indexRelation = relationDescs[i];
+ IndexInfo *indexInfo;
+ bool satisfiesConstraint;
+
+ if (indexRelation == NULL)
+ continue;
+
+ indexInfo = indexInfoArray[i];
+
+ if (!indexInfo->ii_Unique && !indexInfo->ii_ExclusionOps)
+ continue;
+
+ /* If the index is marked as read-only, ignore it */
+ if (!indexInfo->ii_ReadyForInserts)
+ continue;
+
+ /* When specific arbiter indexes requested, only examine them */
+ if (arbiterIndexes != NIL &&
+ !list_member_oid(arbiterIndexes,
+ indexRelation->rd_index->indexrelid))
+ continue;
+
+ if (!indexRelation->rd_index->indimmediate)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("ON CONFLICT does not support deferred unique constraints/exclusion constraints as arbiters"),
+ errtableconstraint(heapRelation,
+ RelationGetRelationName(indexRelation))));
+
+ checkedIndex = true;
+
+ /* Check for partial index */
+ if (indexInfo->ii_Predicate != NIL)
+ {
+ List *predicate;
+
+ /*
+ * If predicate state not set up yet, create it (in the estate's
+ * per-query context)
+ */
+ predicate = indexInfo->ii_PredicateState;
+ if (predicate == NIL)
+ {
+ predicate = (List *)
+ ExecPrepareExpr((Expr *) indexInfo->ii_Predicate,
+ estate);
+ indexInfo->ii_PredicateState = predicate;
+ }
+
+ /* Skip this index-update if the predicate isn't satisfied */
+ if (!ExecQual(predicate, econtext, false))
+ continue;
+ }
+
+ /*
+ * FormIndexDatum fills in its values and isnull parameters with the
+ * appropriate values for the column(s) of the index.
+ */
+ FormIndexDatum(indexInfo,
+ slot,
+ estate,
+ values,
+ isnull);
+
+ satisfiesConstraint =
+ check_exclusion_or_unique_constraint(heapRelation, indexRelation,
+ indexInfo, &invalidItemPtr,
+ values, isnull, estate, false,
+ CEOUC_WAIT, true,
+ conflictTid);
+ if (!satisfiesConstraint)
+ return false;
+ }
+
+ if (arbiterIndexes != NIL && !checkedIndex)
+ elog(ERROR, "unexpected failure to find arbiter index");
+
+ return true;
+}
+
/*
- * Check for violation of an exclusion constraint
+ * Check for violation of an exclusion or unique constraint
*
* heap: the table containing the new tuple
- * index: the index supporting the exclusion constraint
+ * index: the index supporting the constraint
* indexInfo: info about the index, including the exclusion properties
- * tupleid: heap TID of the new tuple we have just inserted
+ * tupleid: heap TID of the new tuple we have just inserted (invalid if we
+ * haven't inserted a new tuple yet)
* values, isnull: the *index* column values computed for the new tuple
* estate: an EState we can do evaluation in
* newIndex: if true, we are trying to build a new index (this affects
* only the wording of error messages)
- * errorOK: if true, don't throw error for violation
+ * waitMode: whether to wait for concurrent inserters/deleters
+ * violationOK: if true, don't throw error for violation
+ * conflictTid: if not-NULL, the TID of the conflicting tuple is returned here
*
* Returns true if OK, false if actual or potential violation
*
- * When errorOK is true, we report violation without waiting to see if any
- * concurrent transaction has committed or not; so the violation is only
- * potential, and the caller must recheck sometime later. This behavior
- * is convenient for deferred exclusion checks; we need not bother queuing
- * a deferred event if there is definitely no conflict at insertion time.
+ * 'waitMode' determines what happens if a conflict is detected with a tuple
+ * that was inserted or deleted by a transaction that's still running.
+ * CEOUC_WAIT means that we wait for the transaction to commit, before
+ * throwing an error or returning. CEOUC_NOWAIT means that we report the
+ * violation immediately; so the violation is only potential, and the caller
+ * must recheck sometime later. This behavior is convenient for deferred
+ * exclusion checks; we need not bother queuing a deferred event if there is
+ * definitely no conflict at insertion time.
+ *
+ * CEOUC_LIVELOCK_PREVENTING_WAIT is like CEOUC_NOWAIT, but we will sometimes
+ * wait anyway, to prevent livelocking if two transactions try inserting at
+ * the same time. This is used with speculative insertions, for INSERT ON
+ * CONFLICT statements. (See notes in file header)
*
- * When errorOK is false, we'll throw error on violation, so a false result
- * is impossible.
+ * If violationOK is true, we just report the potential or actual violation to
+ * the caller by returning 'false'. Otherwise we throw a descriptive error
+ * message here. When violationOK is false, a false result is impossible.
+ *
+ * Note: The indexam is normally responsible for checking unique constraints,
+ * so this normally only needs to be used for exclusion constraints. But this
+ * function is also called when doing a "pre-check" for conflicts on a unique
+ * constraint, when doing speculative insertion. Caller may use the returned
+ * conflict TID to take further steps.
*/
-bool
-check_exclusion_constraint(Relation heap, Relation index, IndexInfo *indexInfo,
- ItemPointer tupleid, Datum *values, bool *isnull,
- EState *estate, bool newIndex, bool errorOK)
+static bool
+check_exclusion_or_unique_constraint(Relation heap, Relation index,
+ IndexInfo *indexInfo,
+ ItemPointer tupleid,
+ Datum *values, bool *isnull,
+ EState *estate, bool newIndex,
+ CEOUC_WAIT_MODE waitMode,
+ bool violationOK,
+ ItemPointer conflictTid)
{
- Oid *constr_procs = indexInfo->ii_ExclusionProcs;
- uint16 *constr_strats = indexInfo->ii_ExclusionStrats;
+ Oid *constr_procs;
+ uint16 *constr_strats;
Oid *index_collations = index->rd_indcollation;
int index_natts = index->rd_index->indnatts;
IndexScanDesc index_scan;
@@ -386,6 +658,17 @@ check_exclusion_constraint(Relation heap, Relation index, IndexInfo *indexInfo,
TupleTableSlot *existing_slot;
TupleTableSlot *save_scantuple;
+ if (indexInfo->ii_ExclusionOps)
+ {
+ constr_procs = indexInfo->ii_ExclusionProcs;
+ constr_strats = indexInfo->ii_ExclusionStrats;
+ }
+ else
+ {
+ constr_procs = indexInfo->ii_UniqueProcs;
+ constr_strats = indexInfo->ii_UniqueStrats;
+ }
+
/*
* If any of the input values are NULL, the constraint check is assumed to
* pass (i.e., we assume the operators are strict).
@@ -450,7 +733,8 @@ retry:
/*
* Ignore the entry for the tuple we're trying to check.
*/
- if (ItemPointerEquals(tupleid, &tup->t_self))
+ if (ItemPointerIsValid(tupleid) &&
+ ItemPointerEquals(tupleid, &tup->t_self))
{
if (found_self) /* should not happen */
elog(ERROR, "found self tuple multiple times in index \"%s\"",
@@ -480,39 +764,47 @@ retry:
}
/*
- * At this point we have either a conflict or a potential conflict. If
- * we're not supposed to raise error, just return the fact of the
- * potential conflict without waiting to see if it's real.
- */
- if (errorOK)
- {
- conflict = true;
- break;
- }
-
- /*
+ * At this point we have either a conflict or a potential conflict.
+ *
* If an in-progress transaction is affecting the visibility of this
- * tuple, we need to wait for it to complete and then recheck. For
- * simplicity we do rechecking by just restarting the whole scan ---
- * this case probably doesn't happen often enough to be worth trying
- * harder, and anyway we don't want to hold any index internal locks
- * while waiting.
+ * tuple, we need to wait for it to complete and then recheck (unless
+ * the caller requested not to). For simplicity we do rechecking by
+ * just restarting the whole scan --- this case probably doesn't
+ * happen often enough to be worth trying harder, and anyway we don't
+ * want to hold any index internal locks while waiting.
*/
xwait = TransactionIdIsValid(DirtySnapshot.xmin) ?
DirtySnapshot.xmin : DirtySnapshot.xmax;
- if (TransactionIdIsValid(xwait))
+ if (TransactionIdIsValid(xwait) &&
+ (waitMode == CEOUC_WAIT ||
+ (waitMode == CEOUC_LIVELOCK_PREVENTING_WAIT &&
+ DirtySnapshot.speculativeToken &&
+ TransactionIdPrecedes(GetCurrentTransactionId(), xwait))))
{
ctid_wait = tup->t_data->t_ctid;
index_endscan(index_scan);
- XactLockTableWait(xwait, heap, &ctid_wait,
- XLTW_RecheckExclusionConstr);
+ if (DirtySnapshot.speculativeToken)
+ SpeculativeInsertionWait(DirtySnapshot.xmin,
+ DirtySnapshot.speculativeToken);
+ else
+ XactLockTableWait(xwait, heap, &ctid_wait,
+ XLTW_RecheckExclusionConstr);
goto retry;
}
/*
- * We have a definite conflict. Report it.
+ * We have a definite conflict (or a potential one, but the caller
+ * didn't want to wait). Return it to caller, or report it.
*/
+ if (violationOK)
+ {
+ conflict = true;
+ if (conflictTid)
+ *conflictTid = tup->t_self;
+ break;
+ }
+
error_new = BuildIndexValueDescription(index, values, isnull);
error_existing = BuildIndexValueDescription(index, existing_values,
existing_isnull);
@@ -544,10 +836,10 @@ retry:
/*
* Ordinarily, at this point the search should have found the originally
- * inserted tuple, unless we exited the loop early because of conflict.
- * However, it is possible to define exclusion constraints for which that
- * wouldn't be true --- for instance, if the operator is <>. So we no
- * longer complain if found_self is still false.
+ * inserted tuple (if any), unless we exited the loop early because of
+ * conflict. However, it is possible to define exclusion constraints for
+ * which that wouldn't be true --- for instance, if the operator is <>.
+ * So we no longer complain if found_self is still false.
*/
econtext->ecxt_scantuple = save_scantuple;
@@ -557,6 +849,25 @@ retry:
return !conflict;
}
+/*
+ * Check for violation of an exclusion constraint
+ *
+ * This is a dumbed down version of check_exclusion_or_unique_constraint
+ * for external callers. They don't need all the special modes.
+ */
+void
+check_exclusion_constraint(Relation heap, Relation index,
+ IndexInfo *indexInfo,
+ ItemPointer tupleid,
+ Datum *values, bool *isnull,
+ EState *estate, bool newIndex)
+{
+ (void) check_exclusion_or_unique_constraint(heap, index, indexInfo, tupleid,
+ values, isnull,
+ estate, newIndex,
+ CEOUC_WAIT, false, NULL);
+}
+
/*
* Check existing tuple's index values to see if it really matches the
* exclusion condition against the new_values. Returns true if conflict.
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 4272d9bc15..0dee949178 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -1813,6 +1813,12 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
errmsg("new row violates row level security policy for \"%s\"",
wco->relname)));
break;
+ case WCO_RLS_CONFLICT_CHECK:
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("new row violates row level security policy (USING expression) for \"%s\"",
+ wco->relname)));
+ break;
default:
elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
break;
@@ -1972,6 +1978,31 @@ ExecBuildSlotValueDescription(Oid reloid,
}
+/*
+ * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
+ * given ResultRelInfo
+ */
+LockTupleMode
+ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
+{
+ Bitmapset *keyCols;
+ Bitmapset *updatedCols;
+
+ /*
+ * Compute lock mode to use. If columns that are part of the key have not
+ * been modified, then we can use a weaker lock, allowing for better
+ * concurrency.
+ */
+ updatedCols = GetUpdatedColumns(relinfo, estate);
+ keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
+ INDEX_ATTR_BITMAP_KEY);
+
+ if (bms_overlap(keyCols, updatedCols))
+ return LockTupleExclusive;
+
+ return LockTupleNoKeyExclusive;
+}
+
/*
* ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
*/
@@ -2186,8 +2217,9 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
* recycled and reused for an unrelated tuple. This implies that
* the latest version of the row was deleted, so we need do
* nothing. (Should be safe to examine xmin without getting
- * buffer's content lock, since xmin never changes in an existing
- * tuple.)
+ * buffer's content lock. We assume reading a TransactionId to be
+ * atomic, and Xmin never changes in an existing tuple, except to
+ * invalid or frozen, and neither of those can match priorXmax.)
*/
if (!TransactionIdEquals(HeapTupleHeaderGetXmin(tuple.t_data),
priorXmax))
@@ -2268,11 +2300,12 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
* case, so as to avoid the "Halloween problem" of
* repeated update attempts. In the latter case it might
* be sensible to fetch the updated tuple instead, but
- * doing so would require changing heap_lock_tuple as well
- * as heap_update and heap_delete to not complain about
- * updating "invisible" tuples, which seems pretty scary.
- * So for now, treat the tuple as deleted and do not
- * process.
+ * doing so would require changing heap_update and
+ * heap_delete to not complain about updating "invisible"
+ * tuples, which seems pretty scary (heap_lock_tuple will
+ * not complain, but few callers expect HeapTupleInvisible,
+ * and we're not one of them). So for now, treat the tuple
+ * as deleted and do not process.
*/
ReleaseBuffer(buffer);
return NULL;
@@ -2287,6 +2320,9 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to concurrent update")));
+
+ /* Should not encounter speculative tuple on recheck */
+ Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data));
if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self))
{
/* it was updated, so look at the updated version */
@@ -2302,6 +2338,9 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
ReleaseBuffer(buffer);
return NULL;
+ case HeapTupleInvisible:
+ elog(ERROR, "attempted to lock invisible tuple");
+
default:
ReleaseBuffer(buffer);
elog(ERROR, "unrecognized heap_lock_tuple status: %u",
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index bb6df47a95..5ae106c06a 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -152,10 +152,11 @@ lnext:
* case, so as to avoid the "Halloween problem" of repeated
* update attempts. In the latter case it might be sensible
* to fetch the updated tuple instead, but doing so would
- * require changing heap_lock_tuple as well as heap_update and
- * heap_delete to not complain about updating "invisible"
- * tuples, which seems pretty scary. So for now, treat the
- * tuple as deleted and do not process.
+ * require changing heap_update and heap_delete to not complain
+ * about updating "invisible" tuples, which seems pretty scary
+ * (heap_lock_tuple will not complain, but few callers expect
+ * HeapTupleInvisible, and we're not one of them). So for now,
+ * treat the tuple as deleted and do not process.
*/
goto lnext;
@@ -228,6 +229,9 @@ lnext:
/* Continue loop until we have all target tuples */
break;
+ case HeapTupleInvisible:
+ elog(ERROR, "attempted to lock invisible tuple");
+
default:
elog(ERROR, "unrecognized heap_lock_tuple status: %u",
test);
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 31666edfa8..34435c7e50 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -46,12 +46,22 @@
#include "miscadmin.h"
#include "nodes/nodeFuncs.h"
#include "storage/bufmgr.h"
+#include "storage/lmgr.h"
#include "utils/builtins.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/tqual.h"
+static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
+ ResultRelInfo *resultRelInfo,
+ ItemPointer conflictTid,
+ TupleTableSlot *planSlot,
+ TupleTableSlot *excludedSlot,
+ EState *estate,
+ bool canSetTag,
+ TupleTableSlot **returning);
+
/*
* Verify that the tuples to be produced by INSERT or UPDATE match the
* target relation's rowtype
@@ -151,6 +161,51 @@ ExecProcessReturning(ProjectionInfo *projectReturning,
return ExecProject(projectReturning, NULL);
}
+/*
+ * ExecCheckHeapTupleVisible -- verify heap tuple is visible
+ *
+ * It would not be consistent with guarantees of the higher isolation levels to
+ * proceed with avoiding insertion (taking speculative insertion's alternative
+ * path) on the basis of another tuple that is not visible to MVCC snapshot.
+ * Check for the need to raise a serialization failure, and do so as necessary.
+ */
+static void
+ExecCheckHeapTupleVisible(EState *estate,
+ HeapTuple tuple,
+ Buffer buffer)
+{
+ if (!IsolationUsesXactSnapshot())
+ return;
+
+ if (!HeapTupleSatisfiesVisibility(tuple, estate->es_snapshot, buffer))
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
+}
+
+/*
+ * ExecCheckTIDVisible -- convenience variant of ExecCheckHeapTupleVisible()
+ */
+static void
+ExecCheckTIDVisible(EState *estate,
+ ResultRelInfo *relinfo,
+ ItemPointer tid)
+{
+ Relation rel = relinfo->ri_RelationDesc;
+ Buffer buffer;
+ HeapTupleData tuple;
+
+ /* Redundantly check isolation level */
+ if (!IsolationUsesXactSnapshot())
+ return;
+
+ tuple.t_self = *tid;
+ if (!heap_fetch(rel, SnapshotAny, &tuple, &buffer, false, NULL))
+ elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
+ ExecCheckHeapTupleVisible(estate, &tuple, buffer);
+ ReleaseBuffer(buffer);
+}
+
/* ----------------------------------------------------------------
* ExecInsert
*
@@ -161,8 +216,11 @@ ExecProcessReturning(ProjectionInfo *projectReturning,
* ----------------------------------------------------------------
*/
static TupleTableSlot *
-ExecInsert(TupleTableSlot *slot,
+ExecInsert(ModifyTableState *mtstate,
+ TupleTableSlot *slot,
TupleTableSlot *planSlot,
+ List *arbiterIndexes,
+ OnConflictAction onconflict,
EState *estate,
bool canSetTag)
{
@@ -199,7 +257,15 @@ ExecInsert(TupleTableSlot *slot,
if (resultRelationDesc->rd_rel->relhasoids)
HeapTupleSetOid(tuple, InvalidOid);
- /* BEFORE ROW INSERT Triggers */
+ /*
+ * BEFORE ROW INSERT Triggers.
+ *
+ * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
+ * INSERT ... ON CONFLICT statement. We cannot check for constraint
+ * violations before firing these triggers, because they can change the
+ * values to insert. Also, they can run arbitrary user-defined code with
+ * side-effects that we can't cancel by just not inserting the tuple.
+ */
if (resultRelInfo->ri_TrigDesc &&
resultRelInfo->ri_TrigDesc->trig_insert_before_row)
{
@@ -268,21 +334,132 @@ ExecInsert(TupleTableSlot *slot,
if (resultRelationDesc->rd_att->constr)
ExecConstraints(resultRelInfo, slot, estate);
- /*
- * insert the tuple
- *
- * Note: heap_insert returns the tid (location) of the new tuple in
- * the t_self field.
- */
- newId = heap_insert(resultRelationDesc, tuple,
- estate->es_output_cid, 0, NULL);
+ if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
+ {
+ /* Perform a speculative insertion. */
+ uint32 specToken;
+ ItemPointerData conflictTid;
+ bool specConflict;
- /*
- * insert index entries for tuple
- */
- if (resultRelInfo->ri_NumIndices > 0)
+ /*
+ * Do a non-conclusive check for conflicts first.
+ *
+ * We're not holding any locks yet, so this doesn't guarantee that
+ * the later insert won't conflict. But it avoids leaving behind
+ * a lot of canceled speculative insertions, if you run a lot of
+ * INSERT ON CONFLICT statements that do conflict.
+ *
+ * We loop back here if we find a conflict below, either during
+ * the pre-check, or when we re-check after inserting the tuple
+ * speculatively. See the executor README for a full discussion
+ * of speculative insertion.
+ */
+ vlock:
+ specConflict = false;
+ if (!ExecCheckIndexConstraints(slot, estate, &conflictTid,
+ arbiterIndexes))
+ {
+ /* committed conflict tuple found */
+ if (onconflict == ONCONFLICT_UPDATE)
+ {
+ /*
+ * In case of ON CONFLICT DO UPDATE, execute the UPDATE
+ * part. Be prepared to retry if the UPDATE fails because
+ * of another concurrent UPDATE/DELETE to the conflict
+ * tuple.
+ */
+ TupleTableSlot *returning = NULL;
+
+ if (ExecOnConflictUpdate(mtstate, resultRelInfo,
+ &conflictTid, planSlot, slot,
+ estate, canSetTag, &returning))
+ {
+ InstrCountFiltered2(&mtstate->ps, 1);
+ return returning;
+ }
+ else
+ goto vlock;
+ }
+ else
+ {
+ /*
+ * In case of ON CONFLICT DO NOTHING, do nothing.
+ * However, verify that the tuple is visible to the
+ * executor's MVCC snapshot at higher isolation levels.
+ */
+ Assert(onconflict == ONCONFLICT_NOTHING);
+ ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid);
+ InstrCountFiltered2(&mtstate->ps, 1);
+ return NULL;
+ }
+ }
+
+ /*
+ * Before we start insertion proper, acquire our "speculative
+ * insertion lock". Others can use that to wait for us to decide
+ * if we're going to go ahead with the insertion, instead of
+ * waiting for the whole transaction to complete.
+ */
+ specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
+ HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
+
+ /* insert the tuple, with the speculative token */
+ newId = heap_insert(resultRelationDesc, tuple,
+ estate->es_output_cid,
+ HEAP_INSERT_SPECULATIVE,
+ NULL);
+
+ /* insert index entries for tuple */
recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
- estate);
+ estate, true, &specConflict,
+ arbiterIndexes);
+
+ /* adjust the tuple's state accordingly */
+ if (!specConflict)
+ heap_finish_speculative(resultRelationDesc, tuple);
+ else
+ heap_abort_speculative(resultRelationDesc, tuple);
+
+ /*
+ * Wake up anyone waiting for our decision. They will re-check
+ * the tuple, see that it's no longer speculative, and wait on our
+ * XID as if this was a regularly inserted tuple all along. Or if
+ * we killed the tuple, they will see it's dead, and proceed as if
+ * the tuple never existed.
+ */
+ SpeculativeInsertionLockRelease(GetCurrentTransactionId());
+
+ /*
+ * If there was a conflict, start from the beginning. We'll do
+ * the pre-check again, which will now find the conflicting tuple
+ * (unless it aborts before we get there).
+ */
+ if (specConflict)
+ {
+ list_free(recheckIndexes);
+ goto vlock;
+ }
+
+ /* Since there was no insertion conflict, we're done */
+ }
+ else
+ {
+ /*
+ * insert the tuple normally.
+ *
+ * Note: heap_insert returns the tid (location) of the new tuple
+ * in the t_self field.
+ */
+ newId = heap_insert(resultRelationDesc, tuple,
+ estate->es_output_cid,
+ 0, NULL);
+
+ /* insert index entries for tuple */
+ if (resultRelInfo->ri_NumIndices > 0)
+ recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
+ estate, false, NULL,
+ arbiterIndexes);
+ }
}
if (canSetTag)
@@ -800,7 +977,7 @@ lreplace:;
*/
if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
- estate);
+ estate, false, NULL, NIL);
}
if (canSetTag)
@@ -832,6 +1009,190 @@ lreplace:;
return NULL;
}
+/*
+ * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
+ *
+ * Try to lock tuple for update as part of speculative insertion. If
+ * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
+ * (but still lock row, even though it may not satisfy estate's
+ * snapshot).
+ *
+ * Returns true if if we're done (with or without an update), or false if
+ * the caller must retry the INSERT from scratch.
+ */
+static bool
+ExecOnConflictUpdate(ModifyTableState *mtstate,
+ ResultRelInfo *resultRelInfo,
+ ItemPointer conflictTid,
+ TupleTableSlot *planSlot,
+ TupleTableSlot *excludedSlot,
+ EState *estate,
+ bool canSetTag,
+ TupleTableSlot **returning)
+{
+ ExprContext *econtext = mtstate->ps.ps_ExprContext;
+ Relation relation = resultRelInfo->ri_RelationDesc;
+ List *onConflictSetWhere = resultRelInfo->ri_onConflictSetWhere;
+ HeapTupleData tuple;
+ HeapUpdateFailureData hufd;
+ LockTupleMode lockmode;
+ HTSU_Result test;
+ Buffer buffer;
+
+ /* Determine lock mode to use */
+ lockmode = ExecUpdateLockMode(estate, resultRelInfo);
+
+ /*
+ * Lock tuple for update. Don't follow updates when tuple cannot be
+ * locked without doing so. A row locking conflict here means our
+ * previous conclusion that the tuple is conclusively committed is not
+ * true anymore.
+ */
+ tuple.t_self = *conflictTid;
+ test = heap_lock_tuple(relation, &tuple, estate->es_output_cid,
+ lockmode, LockWaitBlock, false, &buffer,
+ &hufd);
+ switch (test)
+ {
+ case HeapTupleMayBeUpdated:
+ /* success! */
+ break;
+
+ case HeapTupleInvisible:
+
+ /*
+ * This can occur when a just inserted tuple is updated again in
+ * the same command. E.g. because multiple rows with the same
+ * conflicting key values are inserted.
+ *
+ * This is somewhat similar to the ExecUpdate()
+ * HeapTupleSelfUpdated case. We do not want to proceed because
+ * it would lead to the same row being updated a second time in
+ * some unspecified order, and in contrast to plain UPDATEs
+ * there's no historical behavior to break.
+ *
+ * It is the user's responsibility to prevent this situation from
+ * occurring. These problems are why SQL-2003 similarly specifies
+ * that for SQL MERGE, an exception must be raised in the event of
+ * an attempt to update the same row twice.
+ */
+ if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple.t_data)))
+ ereport(ERROR,
+ (errcode(ERRCODE_CARDINALITY_VIOLATION),
+ errmsg("ON CONFLICT DO UPDATE command cannot affect row a second time"),
+ errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
+
+ /* This shouldn't happen */
+ elog(ERROR, "attempted to lock invisible tuple");
+
+ case HeapTupleSelfUpdated:
+
+ /*
+ * This state should never be reached. As a dirty snapshot is used
+ * to find conflicting tuples, speculative insertion wouldn't have
+ * seen this row to conflict with.
+ */
+ elog(ERROR, "unexpected self-updated tuple");
+
+ case HeapTupleUpdated:
+ if (IsolationUsesXactSnapshot())
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
+ errmsg("could not serialize access due to concurrent update")));
+
+ /*
+ * Tell caller to try again from the very start.
+ *
+ * It does not make sense to use the usual EvalPlanQual() style
+ * loop here, as the new version of the row might not conflict
+ * anymore, or the conflicting tuple has actually been deleted.
+ */
+ ReleaseBuffer(buffer);
+ return false;
+
+ default:
+ elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
+ }
+
+ /*
+ * Success, the tuple is locked.
+ *
+ * Reset per-tuple memory context to free any expression evaluation
+ * storage allocated in the previous cycle.
+ */
+ ResetExprContext(econtext);
+
+ /*
+ * Verify that the tuple is visible to our MVCC snapshot if the current
+ * isolation level mandates that.
+ *
+ * It's not sufficient to rely on the check within ExecUpdate() as e.g.
+ * CONFLICT ... WHERE clause may prevent us from reaching that.
+ *
+ * This means we only ever continue when a new command in the current
+ * transaction could see the row, even though in READ COMMITTED mode the
+ * tuple will not be visible according to the current statement's
+ * snapshot. This is in line with the way UPDATE deals with newer tuple
+ * versions.
+ */
+ ExecCheckHeapTupleVisible(estate, &tuple, buffer);
+
+ /* Store target's existing tuple in the state's dedicated slot */
+ ExecStoreTuple(&tuple, mtstate->mt_existing, buffer, false);
+
+ /*
+ * Make tuple and any needed join variables available to ExecQual and
+ * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
+ * the target's existing tuple is installed in the scantuple. EXCLUDED has
+ * been made to reference INNER_VAR in setrefs.c, but there is no other
+ * redirection.
+ */
+ econtext->ecxt_scantuple = mtstate->mt_existing;
+ econtext->ecxt_innertuple = excludedSlot;
+ econtext->ecxt_outertuple = NULL;
+
+ if (!ExecQual(onConflictSetWhere, econtext, false))
+ {
+ ReleaseBuffer(buffer);
+ InstrCountFiltered1(&mtstate->ps, 1);
+ return true; /* done with the tuple */
+ }
+
+ if (resultRelInfo->ri_WithCheckOptions != NIL)
+ {
+ /*
+ * Check target's existing tuple against UPDATE-applicable USING
+ * security barrier quals (if any), enforced here as RLS checks/WCOs.
+ *
+ * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
+ * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
+ * but that's almost the extent of its special handling for ON
+ * CONFLICT DO UPDATE.
+ *
+ * The rewriter will also have associated UPDATE applicable straight
+ * RLS checks/WCOs for the benefit of the ExecUpdate() call that
+ * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
+ * kinds, so there is no danger of spurious over-enforcement in the
+ * INSERT or UPDATE path.
+ */
+ ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
+ mtstate->mt_existing,
+ mtstate->ps.state);
+ }
+
+ /* Project the new tuple version */
+ ExecProject(resultRelInfo->ri_onConflictSetProj, NULL);
+
+ /* Execute UPDATE with projection */
+ *returning = ExecUpdate(&tuple.t_data->t_ctid, NULL,
+ mtstate->mt_conflproj, planSlot,
+ &mtstate->mt_epqstate, mtstate->ps.state,
+ canSetTag);
+
+ ReleaseBuffer(buffer);
+ return true;
+}
+
/*
* Process BEFORE EACH STATEMENT triggers
@@ -843,6 +1204,9 @@ fireBSTriggers(ModifyTableState *node)
{
case CMD_INSERT:
ExecBSInsertTriggers(node->ps.state, node->resultRelInfo);
+ if (node->mt_onconflict == ONCONFLICT_UPDATE)
+ ExecBSUpdateTriggers(node->ps.state,
+ node->resultRelInfo);
break;
case CMD_UPDATE:
ExecBSUpdateTriggers(node->ps.state, node->resultRelInfo);
@@ -865,6 +1229,9 @@ fireASTriggers(ModifyTableState *node)
switch (node->operation)
{
case CMD_INSERT:
+ if (node->mt_onconflict == ONCONFLICT_UPDATE)
+ ExecASUpdateTriggers(node->ps.state,
+ node->resultRelInfo);
ExecASInsertTriggers(node->ps.state, node->resultRelInfo);
break;
case CMD_UPDATE:
@@ -1062,7 +1429,9 @@ ExecModifyTable(ModifyTableState *node)
switch (operation)
{
case CMD_INSERT:
- slot = ExecInsert(slot, planSlot, estate, node->canSetTag);
+ slot = ExecInsert(node, slot, planSlot,
+ node->mt_arbiterindexes, node->mt_onconflict,
+ estate, node->canSetTag);
break;
case CMD_UPDATE:
slot = ExecUpdate(tupleid, oldtuple, slot, planSlot,
@@ -1137,6 +1506,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex;
mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
mtstate->mt_nplans = nplans;
+ mtstate->mt_onconflict = node->onConflictAction;
+ mtstate->mt_arbiterindexes = node->arbiterIndexes;
/* set up epqstate with dummy subplan data for the moment */
EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam);
@@ -1175,7 +1546,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex &&
operation != CMD_DELETE &&
resultRelInfo->ri_IndexRelationDescs == NULL)
- ExecOpenIndices(resultRelInfo);
+ ExecOpenIndices(resultRelInfo, mtstate->mt_onconflict != ONCONFLICT_NONE);
/* Now init the plan for this result rel */
estate->es_result_relation_info = resultRelInfo;
@@ -1279,6 +1650,58 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
mtstate->ps.ps_ExprContext = NULL;
}
+ /*
+ * If needed, Initialize target list, projection and qual for ON CONFLICT
+ * DO UPDATE.
+ */
+ resultRelInfo = mtstate->resultRelInfo;
+ if (node->onConflictAction == ONCONFLICT_UPDATE)
+ {
+ ExprContext *econtext;
+ ExprState *setexpr;
+ TupleDesc tupDesc;
+
+ /* insert may only have one plan, inheritance is not expanded */
+ Assert(nplans == 1);
+
+ /* already exists if created by RETURNING processing above */
+ if (mtstate->ps.ps_ExprContext == NULL)
+ ExecAssignExprContext(estate, &mtstate->ps);
+
+ econtext = mtstate->ps.ps_ExprContext;
+
+ /* initialize slot for the existing tuple */
+ mtstate->mt_existing = ExecInitExtraTupleSlot(mtstate->ps.state);
+ ExecSetSlotDescriptor(mtstate->mt_existing,
+ resultRelInfo->ri_RelationDesc->rd_att);
+
+ mtstate->mt_excludedtlist = node->exclRelTlist;
+
+ /* create target slot for UPDATE SET projection */
+ tupDesc = ExecTypeFromTL((List *) node->onConflictSet,
+ false);
+ mtstate->mt_conflproj = ExecInitExtraTupleSlot(mtstate->ps.state);
+ ExecSetSlotDescriptor(mtstate->mt_conflproj, tupDesc);
+
+ /* build UPDATE SET expression and projection state */
+ setexpr = ExecInitExpr((Expr *) node->onConflictSet, &mtstate->ps);
+ resultRelInfo->ri_onConflictSetProj =
+ ExecBuildProjectionInfo((List *) setexpr, econtext,
+ mtstate->mt_conflproj,
+ resultRelInfo->ri_RelationDesc->rd_att);
+
+ /* build DO UPDATE WHERE clause expression */
+ if (node->onConflictWhere)
+ {
+ ExprState *qualexpr;
+
+ qualexpr = ExecInitExpr((Expr *) node->onConflictWhere,
+ mtstate->mt_plans[0]);
+
+ resultRelInfo->ri_onConflictSetWhere = (List *) qualexpr;
+ }
+ }
+
/*
* If we have any secondary relations in an UPDATE or DELETE, they need to
* be treated like non-locked relations in SELECT FOR UPDATE, ie, the
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 1b02be287c..a3139d3eb5 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -81,6 +81,7 @@ _copyPlannedStmt(const PlannedStmt *from)
COPY_SCALAR_FIELD(queryId);
COPY_SCALAR_FIELD(hasReturning);
COPY_SCALAR_FIELD(hasModifyingCTE);
+ COPY_SCALAR_FIELD(isUpsert);
COPY_SCALAR_FIELD(canSetTag);
COPY_SCALAR_FIELD(transientPlan);
COPY_NODE_FIELD(planTree);
@@ -185,6 +186,12 @@ _copyModifyTable(const ModifyTable *from)
COPY_NODE_FIELD(fdwPrivLists);
COPY_NODE_FIELD(rowMarks);
COPY_SCALAR_FIELD(epqParam);
+ COPY_SCALAR_FIELD(onConflictAction);
+ COPY_NODE_FIELD(arbiterIndexes);
+ COPY_NODE_FIELD(onConflictSet);
+ COPY_NODE_FIELD(onConflictWhere);
+ COPY_SCALAR_FIELD(exclRelRTI);
+ COPY_NODE_FIELD(exclRelTlist);
return newnode;
}
@@ -1786,6 +1793,22 @@ _copyCurrentOfExpr(const CurrentOfExpr *from)
return newnode;
}
+/*
+ * _copyInferenceElem
+ */
+static InferenceElem *
+_copyInferenceElem(const InferenceElem *from)
+{
+ InferenceElem *newnode = makeNode(InferenceElem);
+
+ COPY_NODE_FIELD(expr);
+ COPY_SCALAR_FIELD(infercollid);
+ COPY_SCALAR_FIELD(inferopfamily);
+ COPY_SCALAR_FIELD(inferopcinputtype);
+
+ return newnode;
+}
+
/*
* _copyTargetEntry
*/
@@ -1852,6 +1875,26 @@ _copyFromExpr(const FromExpr *from)
return newnode;
}
+/*
+ * _copyOnConflictExpr
+ */
+static OnConflictExpr *
+_copyOnConflictExpr(const OnConflictExpr *from)
+{
+ OnConflictExpr *newnode = makeNode(OnConflictExpr);
+
+ COPY_SCALAR_FIELD(action);
+ COPY_NODE_FIELD(arbiterElems);
+ COPY_NODE_FIELD(arbiterWhere);
+ COPY_NODE_FIELD(onConflictSet);
+ COPY_NODE_FIELD(onConflictWhere);
+ COPY_SCALAR_FIELD(constraint);
+ COPY_SCALAR_FIELD(exclRelIndex);
+ COPY_NODE_FIELD(exclRelTlist);
+
+ return newnode;
+}
+
/* ****************************************************************
* relation.h copy functions
*
@@ -2135,6 +2178,33 @@ _copyWithClause(const WithClause *from)
return newnode;
}
+static InferClause *
+_copyInferClause(const InferClause *from)
+{
+ InferClause *newnode = makeNode(InferClause);
+
+ COPY_NODE_FIELD(indexElems);
+ COPY_NODE_FIELD(whereClause);
+ COPY_STRING_FIELD(conname);
+ COPY_LOCATION_FIELD(location);
+
+ return newnode;
+}
+
+static OnConflictClause *
+_copyOnConflictClause(const OnConflictClause *from)
+{
+ OnConflictClause *newnode = makeNode(OnConflictClause);
+
+ COPY_SCALAR_FIELD(action);
+ COPY_NODE_FIELD(infer);
+ COPY_NODE_FIELD(targetList);
+ COPY_NODE_FIELD(whereClause);
+ COPY_LOCATION_FIELD(location);
+
+ return newnode;
+}
+
static CommonTableExpr *
_copyCommonTableExpr(const CommonTableExpr *from)
{
@@ -2552,6 +2622,7 @@ _copyQuery(const Query *from)
COPY_NODE_FIELD(jointree);
COPY_NODE_FIELD(targetList);
COPY_NODE_FIELD(withCheckOptions);
+ COPY_NODE_FIELD(onConflict);
COPY_NODE_FIELD(returningList);
COPY_NODE_FIELD(groupClause);
COPY_NODE_FIELD(havingQual);
@@ -2575,6 +2646,7 @@ _copyInsertStmt(const InsertStmt *from)
COPY_NODE_FIELD(relation);
COPY_NODE_FIELD(cols);
COPY_NODE_FIELD(selectStmt);
+ COPY_NODE_FIELD(onConflictClause);
COPY_NODE_FIELD(returningList);
COPY_NODE_FIELD(withClause);
@@ -4283,6 +4355,9 @@ copyObject(const void *from)
case T_CurrentOfExpr:
retval = _copyCurrentOfExpr(from);
break;
+ case T_InferenceElem:
+ retval = _copyInferenceElem(from);
+ break;
case T_TargetEntry:
retval = _copyTargetEntry(from);
break;
@@ -4295,6 +4370,9 @@ copyObject(const void *from)
case T_FromExpr:
retval = _copyFromExpr(from);
break;
+ case T_OnConflictExpr:
+ retval = _copyOnConflictExpr(from);
+ break;
/*
* RELATION NODES
@@ -4753,6 +4831,12 @@ copyObject(const void *from)
case T_WithClause:
retval = _copyWithClause(from);
break;
+ case T_InferClause:
+ retval = _copyInferClause(from);
+ break;
+ case T_OnConflictClause:
+ retval = _copyOnConflictClause(from);
+ break;
case T_CommonTableExpr:
retval = _copyCommonTableExpr(from);
break;
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 1b9a83b93e..7c86e919a4 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -682,6 +682,17 @@ _equalCurrentOfExpr(const CurrentOfExpr *a, const CurrentOfExpr *b)
return true;
}
+static bool
+_equalInferenceElem(const InferenceElem *a, const InferenceElem *b)
+{
+ COMPARE_NODE_FIELD(expr);
+ COMPARE_SCALAR_FIELD(infercollid);
+ COMPARE_SCALAR_FIELD(inferopfamily);
+ COMPARE_SCALAR_FIELD(inferopcinputtype);
+
+ return true;
+}
+
static bool
_equalTargetEntry(const TargetEntry *a, const TargetEntry *b)
{
@@ -728,6 +739,20 @@ _equalFromExpr(const FromExpr *a, const FromExpr *b)
return true;
}
+static bool
+_equalOnConflictExpr(const OnConflictExpr *a, const OnConflictExpr *b)
+{
+ COMPARE_SCALAR_FIELD(action);
+ COMPARE_NODE_FIELD(arbiterElems);
+ COMPARE_NODE_FIELD(arbiterWhere);
+ COMPARE_NODE_FIELD(onConflictSet);
+ COMPARE_NODE_FIELD(onConflictWhere);
+ COMPARE_SCALAR_FIELD(constraint);
+ COMPARE_SCALAR_FIELD(exclRelIndex);
+ COMPARE_NODE_FIELD(exclRelTlist);
+
+ return true;
+}
/*
* Stuff from relation.h
@@ -868,6 +893,7 @@ _equalQuery(const Query *a, const Query *b)
COMPARE_NODE_FIELD(jointree);
COMPARE_NODE_FIELD(targetList);
COMPARE_NODE_FIELD(withCheckOptions);
+ COMPARE_NODE_FIELD(onConflict);
COMPARE_NODE_FIELD(returningList);
COMPARE_NODE_FIELD(groupClause);
COMPARE_NODE_FIELD(havingQual);
@@ -889,6 +915,7 @@ _equalInsertStmt(const InsertStmt *a, const InsertStmt *b)
COMPARE_NODE_FIELD(relation);
COMPARE_NODE_FIELD(cols);
COMPARE_NODE_FIELD(selectStmt);
+ COMPARE_NODE_FIELD(onConflictClause);
COMPARE_NODE_FIELD(returningList);
COMPARE_NODE_FIELD(withClause);
@@ -2433,6 +2460,29 @@ _equalWithClause(const WithClause *a, const WithClause *b)
return true;
}
+static bool
+_equalInferClause(const InferClause *a, const InferClause *b)
+{
+ COMPARE_NODE_FIELD(indexElems);
+ COMPARE_NODE_FIELD(whereClause);
+ COMPARE_STRING_FIELD(conname);
+ COMPARE_LOCATION_FIELD(location);
+
+ return true;
+}
+
+static bool
+_equalOnConflictClause(const OnConflictClause *a, const OnConflictClause *b)
+{
+ COMPARE_SCALAR_FIELD(action);
+ COMPARE_NODE_FIELD(infer);
+ COMPARE_NODE_FIELD(targetList);
+ COMPARE_NODE_FIELD(whereClause);
+ COMPARE_LOCATION_FIELD(location);
+
+ return true;
+}
+
static bool
_equalCommonTableExpr(const CommonTableExpr *a, const CommonTableExpr *b)
{
@@ -2712,6 +2762,9 @@ equal(const void *a, const void *b)
case T_CurrentOfExpr:
retval = _equalCurrentOfExpr(a, b);
break;
+ case T_InferenceElem:
+ retval = _equalInferenceElem(a, b);
+ break;
case T_TargetEntry:
retval = _equalTargetEntry(a, b);
break;
@@ -2721,6 +2774,9 @@ equal(const void *a, const void *b)
case T_FromExpr:
retval = _equalFromExpr(a, b);
break;
+ case T_OnConflictExpr:
+ retval = _equalOnConflictExpr(a, b);
+ break;
case T_JoinExpr:
retval = _equalJoinExpr(a, b);
break;
@@ -3169,6 +3225,12 @@ equal(const void *a, const void *b)
case T_WithClause:
retval = _equalWithClause(a, b);
break;
+ case T_InferClause:
+ retval = _equalInferClause(a, b);
+ break;
+ case T_OnConflictClause:
+ retval = _equalOnConflictClause(a, b);
+ break;
case T_CommonTableExpr:
retval = _equalCommonTableExpr(a, b);
break;
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index d6f1f5bb6d..4135f9c3cf 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -235,6 +235,13 @@ exprType(const Node *expr)
case T_CurrentOfExpr:
type = BOOLOID;
break;
+ case T_InferenceElem:
+ {
+ const InferenceElem *n = (const InferenceElem *) expr;
+
+ type = exprType((Node *) n->expr);
+ }
+ break;
case T_PlaceHolderVar:
type = exprType((Node *) ((const PlaceHolderVar *) expr)->phexpr);
break;
@@ -894,6 +901,9 @@ exprCollation(const Node *expr)
case T_CurrentOfExpr:
coll = InvalidOid; /* result is always boolean */
break;
+ case T_InferenceElem:
+ coll = exprCollation((Node *) ((const InferenceElem *) expr)->expr);
+ break;
case T_PlaceHolderVar:
coll = exprCollation((Node *) ((const PlaceHolderVar *) expr)->phexpr);
break;
@@ -1484,6 +1494,12 @@ exprLocation(const Node *expr)
case T_WithClause:
loc = ((const WithClause *) expr)->location;
break;
+ case T_InferClause:
+ loc = ((const InferClause *) expr)->location;
+ break;
+ case T_OnConflictClause:
+ loc = ((const OnConflictClause *) expr)->location;
+ break;
case T_CommonTableExpr:
loc = ((const CommonTableExpr *) expr)->location;
break;
@@ -1491,6 +1507,10 @@ exprLocation(const Node *expr)
/* just use argument's location */
loc = exprLocation((Node *) ((const PlaceHolderVar *) expr)->phexpr);
break;
+ case T_InferenceElem:
+ /* just use nested expr's location */
+ loc = exprLocation((Node *) ((const InferenceElem *) expr)->expr);
+ break;
default:
/* for any other node type it's just unknown... */
loc = -1;
@@ -1890,6 +1910,20 @@ expression_tree_walker(Node *node,
return true;
}
break;
+ case T_OnConflictExpr:
+ {
+ OnConflictExpr *onconflict = (OnConflictExpr *) node;
+
+ if (walker((Node *) onconflict->arbiterElems, context))
+ return true;
+ if (walker(onconflict->arbiterWhere, context))
+ return true;
+ if (walker(onconflict->onConflictSet, context))
+ return true;
+ if (walker(onconflict->onConflictWhere, context))
+ return true;
+ }
+ break;
case T_JoinExpr:
{
JoinExpr *join = (JoinExpr *) node;
@@ -1920,6 +1954,8 @@ expression_tree_walker(Node *node,
break;
case T_PlaceHolderVar:
return walker(((PlaceHolderVar *) node)->phexpr, context);
+ case T_InferenceElem:
+ return walker(((InferenceElem *) node)->expr, context);
case T_AppendRelInfo:
{
AppendRelInfo *appinfo = (AppendRelInfo *) node;
@@ -1968,6 +2004,8 @@ query_tree_walker(Query *query,
return true;
if (walker((Node *) query->withCheckOptions, context))
return true;
+ if (walker((Node *) query->onConflict, context))
+ return true;
if (walker((Node *) query->returningList, context))
return true;
if (walker((Node *) query->jointree, context))
@@ -2594,6 +2632,20 @@ expression_tree_mutator(Node *node,
return (Node *) newnode;
}
break;
+ case T_OnConflictExpr:
+ {
+ OnConflictExpr *oc = (OnConflictExpr *) node;
+ OnConflictExpr *newnode;
+
+ FLATCOPY(newnode, oc, OnConflictExpr);
+ MUTATE(newnode->arbiterElems, oc->arbiterElems, List *);
+ MUTATE(newnode->arbiterWhere, oc->arbiterWhere, Node *);
+ MUTATE(newnode->onConflictSet, oc->onConflictSet, List *);
+ MUTATE(newnode->onConflictWhere, oc->onConflictWhere, Node *);
+
+ return (Node *) newnode;
+ }
+ break;
case T_JoinExpr:
{
JoinExpr *join = (JoinExpr *) node;
@@ -2630,6 +2682,16 @@ expression_tree_mutator(Node *node,
return (Node *) newnode;
}
break;
+ case T_InferenceElem:
+ {
+ InferenceElem *inferenceelemdexpr = (InferenceElem *) node;
+ InferenceElem *newnode;
+
+ FLATCOPY(newnode, inferenceelemdexpr, InferenceElem);
+ MUTATE(newnode->expr, newnode->expr, Node *);
+ return (Node *) newnode;
+ }
+ break;
case T_AppendRelInfo:
{
AppendRelInfo *appinfo = (AppendRelInfo *) node;
@@ -2709,6 +2771,7 @@ query_tree_mutator(Query *query,
MUTATE(query->targetList, query->targetList, List *);
MUTATE(query->withCheckOptions, query->withCheckOptions, List *);
+ MUTATE(query->onConflict, query->onConflict, OnConflictExpr *);
MUTATE(query->returningList, query->returningList, List *);
MUTATE(query->jointree, query->jointree, FromExpr *);
MUTATE(query->setOperations, query->setOperations, Node *);
@@ -2978,6 +3041,8 @@ raw_expression_tree_walker(Node *node,
return true;
if (walker(stmt->selectStmt, context))
return true;
+ if (walker(stmt->onConflictClause, context))
+ return true;
if (walker(stmt->returningList, context))
return true;
if (walker(stmt->withClause, context))
@@ -3217,6 +3282,28 @@ raw_expression_tree_walker(Node *node,
break;
case T_WithClause:
return walker(((WithClause *) node)->ctes, context);
+ case T_InferClause:
+ {
+ InferClause *stmt = (InferClause *) node;
+
+ if (walker(stmt->indexElems, context))
+ return true;
+ if (walker(stmt->whereClause, context))
+ return true;
+ }
+ break;
+ case T_OnConflictClause:
+ {
+ OnConflictClause *stmt = (OnConflictClause *) node;
+
+ if (walker(stmt->infer, context))
+ return true;
+ if (walker(stmt->targetList, context))
+ return true;
+ if (walker(stmt->whereClause, context))
+ return true;
+ }
+ break;
case T_CommonTableExpr:
return walker(((CommonTableExpr *) node)->ctequery, context);
default:
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index d5ddd0b359..bc891d391f 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -243,6 +243,7 @@ _outPlannedStmt(StringInfo str, const PlannedStmt *node)
WRITE_UINT_FIELD(queryId);
WRITE_BOOL_FIELD(hasReturning);
WRITE_BOOL_FIELD(hasModifyingCTE);
+ WRITE_BOOL_FIELD(isUpsert);
WRITE_BOOL_FIELD(canSetTag);
WRITE_BOOL_FIELD(transientPlan);
WRITE_NODE_FIELD(planTree);
@@ -337,6 +338,12 @@ _outModifyTable(StringInfo str, const ModifyTable *node)
WRITE_NODE_FIELD(fdwPrivLists);
WRITE_NODE_FIELD(rowMarks);
WRITE_INT_FIELD(epqParam);
+ WRITE_ENUM_FIELD(onConflictAction, OnConflictAction);
+ WRITE_NODE_FIELD(arbiterIndexes);
+ WRITE_NODE_FIELD(onConflictSet);
+ WRITE_NODE_FIELD(onConflictWhere);
+ WRITE_INT_FIELD(exclRelRTI);
+ WRITE_NODE_FIELD(exclRelTlist);
}
static void
@@ -1436,6 +1443,17 @@ _outCurrentOfExpr(StringInfo str, const CurrentOfExpr *node)
WRITE_INT_FIELD(cursor_param);
}
+static void
+_outInferenceElem(StringInfo str, const InferenceElem *node)
+{
+ WRITE_NODE_TYPE("INFERENCEELEM");
+
+ WRITE_NODE_FIELD(expr);
+ WRITE_OID_FIELD(infercollid);
+ WRITE_OID_FIELD(inferopfamily);
+ WRITE_OID_FIELD(inferopcinputtype);
+}
+
static void
_outTargetEntry(StringInfo str, const TargetEntry *node)
{
@@ -1482,6 +1500,21 @@ _outFromExpr(StringInfo str, const FromExpr *node)
WRITE_NODE_FIELD(quals);
}
+static void
+_outOnConflictExpr(StringInfo str, const OnConflictExpr *node)
+{
+ WRITE_NODE_TYPE("ONCONFLICTEXPR");
+
+ WRITE_ENUM_FIELD(action, OnConflictAction);
+ WRITE_NODE_FIELD(arbiterElems);
+ WRITE_NODE_FIELD(arbiterWhere);
+ WRITE_NODE_FIELD(onConflictSet);
+ WRITE_NODE_FIELD(onConflictWhere);
+ WRITE_OID_FIELD(constraint);
+ WRITE_INT_FIELD(exclRelIndex);
+ WRITE_NODE_FIELD(exclRelTlist);
+}
+
/*****************************************************************************
*
* Stuff from relation.h.
@@ -2319,6 +2352,7 @@ _outQuery(StringInfo str, const Query *node)
WRITE_NODE_FIELD(jointree);
WRITE_NODE_FIELD(targetList);
WRITE_NODE_FIELD(withCheckOptions);
+ WRITE_NODE_FIELD(onConflict);
WRITE_NODE_FIELD(returningList);
WRITE_NODE_FIELD(groupClause);
WRITE_NODE_FIELD(havingQual);
@@ -3112,6 +3146,9 @@ _outNode(StringInfo str, const void *obj)
case T_CurrentOfExpr:
_outCurrentOfExpr(str, obj);
break;
+ case T_InferenceElem:
+ _outInferenceElem(str, obj);
+ break;
case T_TargetEntry:
_outTargetEntry(str, obj);
break;
@@ -3124,7 +3161,9 @@ _outNode(StringInfo str, const void *obj)
case T_FromExpr:
_outFromExpr(str, obj);
break;
-
+ case T_OnConflictExpr:
+ _outOnConflictExpr(str, obj);
+ break;
case T_Path:
_outPath(str, obj);
break;
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index d1ced0cc4b..8136306e1e 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -214,6 +214,7 @@ _readQuery(void)
READ_NODE_FIELD(jointree);
READ_NODE_FIELD(targetList);
READ_NODE_FIELD(withCheckOptions);
+ READ_NODE_FIELD(onConflict);
READ_NODE_FIELD(returningList);
READ_NODE_FIELD(groupClause);
READ_NODE_FIELD(havingQual);
@@ -1130,6 +1131,22 @@ _readCurrentOfExpr(void)
READ_DONE();
}
+/*
+ * _readInferenceElem
+ */
+static InferenceElem *
+_readInferenceElem(void)
+{
+ READ_LOCALS(InferenceElem);
+
+ READ_NODE_FIELD(expr);
+ READ_OID_FIELD(infercollid);
+ READ_OID_FIELD(inferopfamily);
+ READ_OID_FIELD(inferopcinputtype);
+
+ READ_DONE();
+}
+
/*
* _readTargetEntry
*/
@@ -1196,6 +1213,25 @@ _readFromExpr(void)
READ_DONE();
}
+/*
+ * _readOnConflictExpr
+ */
+static OnConflictExpr *
+_readOnConflictExpr(void)
+{
+ READ_LOCALS(OnConflictExpr);
+
+ READ_ENUM_FIELD(action, OnConflictAction);
+ READ_NODE_FIELD(arbiterElems);
+ READ_NODE_FIELD(arbiterWhere);
+ READ_NODE_FIELD(onConflictSet);
+ READ_NODE_FIELD(onConflictWhere);
+ READ_OID_FIELD(constraint);
+ READ_INT_FIELD(exclRelIndex);
+ READ_NODE_FIELD(exclRelTlist);
+
+ READ_DONE();
+}
/*
* Stuff from parsenodes.h.
@@ -1395,6 +1431,8 @@ parseNodeString(void)
return_value = _readSetToDefault();
else if (MATCH("CURRENTOFEXPR", 13))
return_value = _readCurrentOfExpr();
+ else if (MATCH("INFERENCEELEM", 13))
+ return_value = _readInferenceElem();
else if (MATCH("TARGETENTRY", 11))
return_value = _readTargetEntry();
else if (MATCH("RANGETBLREF", 11))
@@ -1403,6 +1441,8 @@ parseNodeString(void)
return_value = _readJoinExpr();
else if (MATCH("FROMEXPR", 8))
return_value = _readFromExpr();
+ else if (MATCH("ONCONFLICTEXPR", 14))
+ return_value = _readOnConflictExpr();
else if (MATCH("RTE", 3))
return_value = _readRangeTblEntry();
else if (MATCH("RANGETBLFUNCTION", 16))
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index eeb2a41764..3246332d6e 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -4868,7 +4868,7 @@ make_modifytable(PlannerInfo *root,
Index nominalRelation,
List *resultRelations, List *subplans,
List *withCheckOptionLists, List *returningLists,
- List *rowMarks, int epqParam)
+ List *rowMarks, OnConflictExpr *onconflict, int epqParam)
{
ModifyTable *node = makeNode(ModifyTable);
Plan *plan = &node->plan;
@@ -4918,6 +4918,30 @@ make_modifytable(PlannerInfo *root,
node->resultRelations = resultRelations;
node->resultRelIndex = -1; /* will be set correctly in setrefs.c */
node->plans = subplans;
+ if (!onconflict)
+ {
+ node->onConflictAction = ONCONFLICT_NONE;
+ node->onConflictSet = NIL;
+ node->onConflictWhere = NULL;
+ node->arbiterIndexes = NIL;
+ }
+ else
+ {
+ node->onConflictAction = onconflict->action;
+ node->onConflictSet = onconflict->onConflictSet;
+ node->onConflictWhere = onconflict->onConflictWhere;
+
+ /*
+ * If a set of unique index inference elements was provided (an
+ * INSERT...ON CONFLICT "inference specification"), then infer
+ * appropriate unique indexes (or throw an error if none are
+ * available).
+ */
+ node->arbiterIndexes = infer_arbiter_indexes(root);
+
+ node->exclRelRTI = onconflict->exclRelIndex;
+ node->exclRelTlist = onconflict->exclRelTlist;
+ }
node->withCheckOptionLists = withCheckOptionLists;
node->returningLists = returningLists;
node->rowMarks = rowMarks;
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index ea4d4c55cb..c80d45acaa 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -243,6 +243,8 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
result->queryId = parse->queryId;
result->hasReturning = (parse->returningList != NIL);
result->hasModifyingCTE = parse->hasModifyingCTE;
+ result->isUpsert =
+ (parse->onConflict && parse->onConflict->action == ONCONFLICT_UPDATE);
result->canSetTag = parse->canSetTag;
result->transientPlan = glob->transientPlan;
result->planTree = top_plan;
@@ -462,6 +464,17 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
parse->limitCount = preprocess_expression(root, parse->limitCount,
EXPRKIND_LIMIT);
+ if (parse->onConflict)
+ {
+ parse->onConflict->onConflictSet = (List *)
+ preprocess_expression(root, (Node *) parse->onConflict->onConflictSet,
+ EXPRKIND_TARGET);
+
+ parse->onConflict->onConflictWhere =
+ preprocess_expression(root, (Node *) parse->onConflict->onConflictWhere,
+ EXPRKIND_QUAL);
+ }
+
root->append_rel_list = (List *)
preprocess_expression(root, (Node *) root->append_rel_list,
EXPRKIND_APPINFO);
@@ -612,6 +625,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
withCheckOptionLists,
returningLists,
rowMarks,
+ parse->onConflict,
SS_assign_special_param(root));
}
}
@@ -802,6 +816,8 @@ inheritance_planner(PlannerInfo *root)
List *rowMarks;
ListCell *lc;
+ Assert(parse->commandType != CMD_INSERT);
+
/*
* We generate a modified instance of the original Query for each target
* relation, plan that, and put all the plans into a list that will be
@@ -1046,6 +1062,8 @@ inheritance_planner(PlannerInfo *root)
if (parse->returningList)
returningLists = lappend(returningLists,
subroot.parse->returningList);
+
+ Assert(!parse->onConflict);
}
/* Mark result as unordered (probably unnecessary) */
@@ -1095,6 +1113,7 @@ inheritance_planner(PlannerInfo *root)
withCheckOptionLists,
returningLists,
rowMarks,
+ NULL,
SS_assign_special_param(root));
}
@@ -1228,6 +1247,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
bool use_hashed_grouping = false;
WindowFuncLists *wflists = NULL;
List *activeWindows = NIL;
+ OnConflictExpr *onconfl;
MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
@@ -1242,6 +1262,13 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/* Preprocess targetlist */
tlist = preprocess_targetlist(root, tlist);
+ onconfl = parse->onConflict;
+ if (onconfl)
+ onconfl->onConflictSet =
+ preprocess_onconflict_targetlist(onconfl->onConflictSet,
+ parse->resultRelation,
+ parse->rtable);
+
/*
* Expand any rangetable entries that have security barrier quals.
* This may add new security barrier subquery RTEs to the rangetable.
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index b7d6ff1122..612d32571a 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -739,7 +739,35 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
splan->plan.targetlist = copyObject(linitial(newRL));
}
+ /*
+ * We treat ModifyTable with ON CONFLICT as a form of 'pseudo
+ * join', where the inner side is the EXLUDED tuple. Therefore
+ * use fix_join_expr to setup the relevant variables to
+ * INNER_VAR. We explicitly don't create any OUTER_VARs as
+ * those are already used by RETURNING and it seems better to
+ * be non-conflicting.
+ */
+ if (splan->onConflictSet)
+ {
+ indexed_tlist *itlist;
+
+ itlist = build_tlist_index(splan->exclRelTlist);
+
+ splan->onConflictSet =
+ fix_join_expr(root, splan->onConflictSet,
+ NULL, itlist,
+ linitial_int(splan->resultRelations),
+ rtoffset);
+
+ splan->onConflictWhere = (Node *)
+ fix_join_expr(root, (List *) splan->onConflictWhere,
+ NULL, itlist,
+ linitial_int(splan->resultRelations),
+ rtoffset);
+ }
+
splan->nominalRelation += rtoffset;
+ splan->exclRelRTI += rtoffset;
foreach(l, splan->resultRelations)
{
@@ -1846,7 +1874,8 @@ search_indexed_tlist_for_sortgroupref(Node *node,
* inner_itlist = NULL and acceptable_rel = the ID of the target relation.
*
* 'clauses' is the targetlist or list of join clauses
- * 'outer_itlist' is the indexed target list of the outer join relation
+ * 'outer_itlist' is the indexed target list of the outer join relation,
+ * or NULL
* 'inner_itlist' is the indexed target list of the inner join relation,
* or NULL
* 'acceptable_rel' is either zero or the rangetable index of a relation
@@ -1886,12 +1915,17 @@ fix_join_expr_mutator(Node *node, fix_join_expr_context *context)
Var *var = (Var *) node;
/* First look for the var in the input tlists */
- newvar = search_indexed_tlist_for_var(var,
- context->outer_itlist,
- OUTER_VAR,
- context->rtoffset);
- if (newvar)
- return (Node *) newvar;
+ if (context->outer_itlist)
+ {
+ newvar = search_indexed_tlist_for_var(var,
+ context->outer_itlist,
+ OUTER_VAR,
+ context->rtoffset);
+ if (newvar)
+ return (Node *) newvar;
+ }
+
+ /* Then in the outer */
if (context->inner_itlist)
{
newvar = search_indexed_tlist_for_var(var,
@@ -1920,7 +1954,7 @@ fix_join_expr_mutator(Node *node, fix_join_expr_context *context)
PlaceHolderVar *phv = (PlaceHolderVar *) node;
/* See if the PlaceHolderVar has bubbled up from a lower plan node */
- if (context->outer_itlist->has_ph_vars)
+ if (context->outer_itlist && context->outer_itlist->has_ph_vars)
{
newvar = search_indexed_tlist_for_non_var((Node *) phv,
context->outer_itlist,
@@ -1943,7 +1977,7 @@ fix_join_expr_mutator(Node *node, fix_join_expr_context *context)
if (IsA(node, Param))
return fix_param_node(context->root, (Param *) node);
/* Try matching more complex expressions too, if tlists have any */
- if (context->outer_itlist->has_non_vars)
+ if (context->outer_itlist && context->outer_itlist->has_non_vars)
{
newvar = search_indexed_tlist_for_non_var(node,
context->outer_itlist,
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index acfd0bcfbe..0220672fc4 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -2340,6 +2340,10 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params,
locally_added_param);
finalize_primnode((Node *) mtplan->returningLists,
&context);
+ finalize_primnode((Node *) mtplan->onConflictSet,
+ &context);
+ finalize_primnode((Node *) mtplan->onConflictWhere,
+ &context);
foreach(l, mtplan->plans)
{
context.paramids =
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index 50acfe40e9..4f0dc80d02 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -1030,6 +1030,9 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
pullup_replace_vars((Node *) parse->targetList, &rvcontext);
parse->returningList = (List *)
pullup_replace_vars((Node *) parse->returningList, &rvcontext);
+ if (parse->onConflict)
+ parse->onConflict->onConflictSet = (List *)
+ pullup_replace_vars((Node *) parse->onConflict->onConflictSet, &rvcontext);
replace_vars_in_jointree((Node *) parse->jointree, &rvcontext,
lowest_nulling_outer_join);
Assert(parse->setOperations == NULL);
@@ -1605,6 +1608,9 @@ pull_up_simple_values(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte)
pullup_replace_vars((Node *) parse->targetList, &rvcontext);
parse->returningList = (List *)
pullup_replace_vars((Node *) parse->returningList, &rvcontext);
+ if (parse->onConflict)
+ parse->onConflict->onConflictSet = (List *)
+ pullup_replace_vars((Node *) parse->onConflict->onConflictSet, &rvcontext);
replace_vars_in_jointree((Node *) parse->jointree, &rvcontext, NULL);
Assert(parse->setOperations == NULL);
parse->havingQual = pullup_replace_vars(parse->havingQual, &rvcontext);
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index 580c846770..6b0c689e0c 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -181,6 +181,19 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
return tlist;
}
+/*
+ * preprocess_onconflict_targetlist
+ * Process ON CONFLICT SET targetlist.
+ *
+ * Returns the new targetlist.
+ */
+List *
+preprocess_onconflict_targetlist(List *tlist, int result_relation, List *range_table)
+{
+ return expand_targetlist(tlist, CMD_UPDATE, result_relation, range_table);
+}
+
+
/*****************************************************************************
*
* TARGETLIST EXPANSION
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 068ab39dd4..8bcc5064a3 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -25,6 +25,7 @@
#include "access/transam.h"
#include "access/xlog.h"
#include "catalog/catalog.h"
+#include "catalog/dependency.h"
#include "catalog/heap.h"
#include "foreign/fdwapi.h"
#include "miscadmin.h"
@@ -50,6 +51,8 @@ int constraint_exclusion = CONSTRAINT_EXCLUSION_PARTITION;
get_relation_info_hook_type get_relation_info_hook = NULL;
+static bool infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
+ Bitmapset *inferAttrs, List *idxExprs);
static int32 get_rel_data_width(Relation rel, int32 *attr_widths);
static List *get_relation_constraints(PlannerInfo *root,
Oid relationObjectId, RelOptInfo *rel,
@@ -399,6 +402,355 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
(*get_relation_info_hook) (root, relationObjectId, inhparent, rel);
}
+/*
+ * infer_arbiter_indexes -
+ * Determine the unique indexes used to arbitrate speculative insertion.
+ *
+ * Uses user-supplied inference clause expressions and predicate to match a
+ * unique index from those defined and ready on the heap relation (target).
+ * An exact match is required on columns/expressions (although they can appear
+ * in any order). However, the predicate given by the user need only restrict
+ * insertion to a subset of some part of the table covered by some particular
+ * unique index (in particular, a partial unique index) in order to be
+ * inferred.
+ *
+ * The implementation does not consider which B-Tree operator class any
+ * particular available unique index attribute uses, unless one was specified
+ * in the inference specification. The same is true of collations. In
+ * particular, there is no system dependency on the default operator class for
+ * the purposes of inference. If no opclass (or collation) is specified, then
+ * all matching indexes (that may or may not match the default in terms of
+ * each attribute opclass/collation) are used for inference.
+ */
+List *
+infer_arbiter_indexes(PlannerInfo *root)
+{
+ OnConflictExpr *onconflict = root->parse->onConflict;
+ /* Iteration state */
+ Relation relation;
+ Oid relationObjectId;
+ Oid indexOidFromConstraint = InvalidOid;
+ List *indexList;
+ ListCell *l;
+
+ /* Normalized inference attributes and inference expressions: */
+ Bitmapset *inferAttrs = NULL;
+ List *inferElems = NIL;
+
+ /* Result */
+ List *candidates = NIL;
+
+ /*
+ * Quickly return NIL for ON CONFLICT DO NOTHING without an inference
+ * specification or named constraint. ON CONFLICT DO UPDATE statements
+ * must always provide one or the other (but parser ought to have caught
+ * that already).
+ */
+ if (onconflict->arbiterElems == NIL &&
+ onconflict->constraint == InvalidOid)
+ return NIL;
+
+ /*
+ * We need not lock the relation since it was already locked, either by
+ * the rewriter or when expand_inherited_rtentry() added it to the query's
+ * rangetable.
+ */
+ relationObjectId = rt_fetch(root->parse->resultRelation,
+ root->parse->rtable)->relid;
+
+ relation = heap_open(relationObjectId, NoLock);
+
+ /*
+ * Build normalized/BMS representation of plain indexed attributes, as
+ * well as direct list of inference elements. This is required for
+ * matching the cataloged definition of indexes.
+ */
+ foreach(l, onconflict->arbiterElems)
+ {
+ InferenceElem *elem;
+ Var *var;
+ int attno;
+
+ elem = (InferenceElem *) lfirst(l);
+
+ /*
+ * Parse analysis of inference elements performs full parse analysis
+ * of Vars, even for non-expression indexes (in contrast with utility
+ * command related use of IndexElem). However, indexes are cataloged
+ * with simple attribute numbers for non-expression indexes. Those
+ * are handled later.
+ */
+ if (!IsA(elem->expr, Var))
+ {
+ inferElems = lappend(inferElems, elem->expr);
+ continue;
+ }
+
+ var = (Var *) elem->expr;
+ attno = var->varattno;
+
+ if (attno < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
+ errmsg("system columns cannot be used in an ON CONFLICT clause")));
+ else if (attno == 0)
+ elog(ERROR, "whole row unique index inference specifications are not valid");
+
+ inferAttrs = bms_add_member(inferAttrs, attno);
+ }
+
+ /*
+ * Lookup named constraint's index. This is not immediately returned
+ * because some additional sanity checks are required.
+ */
+ if (onconflict->constraint != InvalidOid)
+ {
+ indexOidFromConstraint = get_constraint_index(onconflict->constraint);
+
+ if (indexOidFromConstraint == InvalidOid)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("constraint in ON CONFLICT clause has no associated index")));
+ }
+
+ indexList = RelationGetIndexList(relation);
+
+ /*
+ * Using that representation, iterate through the list of indexes on the
+ * target relation to try and find a match
+ */
+ foreach(l, indexList)
+ {
+ Oid indexoid = lfirst_oid(l);
+ Relation idxRel;
+ Form_pg_index idxForm;
+ Bitmapset *indexedAttrs = NULL;
+ List *idxExprs;
+ List *predExprs;
+ List *whereExplicit;
+ AttrNumber natt;
+ ListCell *el;
+
+ /*
+ * Extract info from the relation descriptor for the index. We know
+ * that this is a target, so get lock type it is known will ultimately
+ * be required by the executor.
+ *
+ * Let executor complain about !indimmediate case directly, because
+ * enforcement needs to occur there anyway when an inference clause is
+ * omitted.
+ */
+ idxRel = index_open(indexoid, RowExclusiveLock);
+ idxForm = idxRel->rd_index;
+
+ if (!IndexIsValid(idxForm))
+ goto next;
+
+ /*
+ * If the index is valid, but cannot yet be used, ignore it. See
+ * src/backend/access/heap/README.HOT for discussion.
+ */
+ if (idxForm->indcheckxmin &&
+ !TransactionIdPrecedes(HeapTupleHeaderGetXmin(idxRel->rd_indextuple->t_data),
+ TransactionXmin))
+ goto next;
+
+ /*
+ * Look for match on "ON constraint_name" variant, which may not be
+ * unique constraint. This can only be a constraint name.
+ */
+ if (indexOidFromConstraint == idxForm->indexrelid)
+ {
+ if (!idxForm->indisunique && onconflict->action == ONCONFLICT_UPDATE)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("ON CONFLICT DO UPDATE not supported with exclusion constraints")));
+
+ list_free(indexList);
+ index_close(idxRel, NoLock);
+ heap_close(relation, NoLock);
+ candidates = lappend_oid(candidates, idxForm->indexrelid);
+ return candidates;
+ }
+ else if (indexOidFromConstraint != InvalidOid)
+ {
+ /* No point in further work for index in named constraint case */
+ goto next;
+ }
+
+ /*
+ * Only considering conventional inference at this point (not named
+ * constraints), so index under consideration can be immediately
+ * skipped if it's not unique
+ */
+ if (!idxForm->indisunique)
+ goto next;
+
+ /* Build BMS representation of cataloged index attributes */
+ for (natt = 0; natt < idxForm->indnatts; natt++)
+ {
+ int attno = idxRel->rd_index->indkey.values[natt];
+
+ if (attno < 0)
+ elog(ERROR, "system column in index");
+
+ if (attno != 0)
+ indexedAttrs = bms_add_member(indexedAttrs, attno);
+ }
+
+ /* Non-expression attributes (if any) must match */
+ if (!bms_equal(indexedAttrs, inferAttrs))
+ goto next;
+
+ /* Expression attributes (if any) must match */
+ idxExprs = RelationGetIndexExpressions(idxRel);
+ foreach(el, onconflict->arbiterElems)
+ {
+ InferenceElem *elem = (InferenceElem *) lfirst(el);
+
+ /*
+ * Ensure that collation/opclass aspects of inference expression
+ * element match. Even though this loop is primarily concerned
+ * with matching expressions, it is a convenient point to check
+ * this for both expressions and ordinary (non-expression)
+ * attributes appearing as inference elements.
+ */
+ if (!infer_collation_opclass_match(elem, idxRel, inferAttrs,
+ idxExprs))
+ goto next;
+
+ /*
+ * Plain Vars don't factor into count of expression elements, and
+ * the question of whether or not they satisfy the index
+ * definition has already been considered (they must).
+ */
+ if (IsA(elem->expr, Var))
+ continue;
+
+ /*
+ * Might as well avoid redundant check in the rare cases where
+ * infer_collation_opclass_match() is required to do real work.
+ * Otherwise, check that element expression appears in cataloged
+ * index definition.
+ */
+ if (elem->infercollid != InvalidOid ||
+ elem->inferopfamily != InvalidOid ||
+ list_member(idxExprs, elem->expr))
+ continue;
+
+ goto next;
+ }
+
+ /*
+ * Now that all inference elements were matched, ensure that the
+ * expression elements from inference clause are not missing any
+ * cataloged expressions. This does the right thing when unique
+ * indexes redundantly repeat the same attribute, or if attributes
+ * redundantly appear multiple times within an inference clause.
+ */
+ if (list_difference(idxExprs, inferElems) != NIL)
+ goto next;
+
+ /*
+ * Any user-supplied ON CONFLICT unique index inference WHERE clause
+ * need only be implied by the cataloged index definitions predicate.
+ */
+ predExprs = RelationGetIndexPredicate(idxRel);
+ whereExplicit = make_ands_implicit((Expr *) onconflict->arbiterWhere);
+
+ if (!predicate_implied_by(predExprs, whereExplicit))
+ goto next;
+
+ candidates = lappend_oid(candidates, idxForm->indexrelid);
+next:
+ index_close(idxRel, NoLock);
+ }
+
+ list_free(indexList);
+ heap_close(relation, NoLock);
+
+ if (candidates == NIL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
+ errmsg("there is no unique or exclusion constraint matching the ON CONFLICT specification")));
+
+ return candidates;
+}
+
+/*
+ * infer_collation_opclass_match - ensure infer element opclass/collation match
+ *
+ * Given unique index inference element from inference specification, if
+ * collation was specified, or if opclass (represented here as opfamily +
+ * opcintype) was specified, verify that there is at least one matching
+ * indexed attribute (occasionally, there may be more). Skip this in the
+ * common case where inference specification does not include collation or
+ * opclass (instead matching everything, regardless of cataloged
+ * collation/opclass of indexed attribute).
+ *
+ * At least historically, Postgres has not offered collations or opclasses
+ * with alternative-to-default notions of equality, so these additional
+ * criteria should only be required infrequently.
+ *
+ * Don't give up immediately when an inference element matches some attribute
+ * cataloged as indexed but not matching additional opclass/collation
+ * criteria. This is done so that the implementation is as forgiving as
+ * possible of redundancy within cataloged index attributes (or, less
+ * usefully, within inference specification elements). If collations actually
+ * differ between apparently redundantly indexed attributes (redundant within
+ * or across indexes), then there really is no redundancy as such.
+ *
+ * Note that if an inference element specifies an opclass and a collation at
+ * once, both must match in at least one particular attribute within index
+ * catalog definition in order for that inference element to be considered
+ * inferred/satisfied.
+ */
+static bool
+infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
+ Bitmapset *inferAttrs, List *idxExprs)
+{
+ AttrNumber natt;
+
+ /*
+ * If inference specification element lacks collation/opclass, then no
+ * need to check for exact match.
+ */
+ if (elem->infercollid == InvalidOid && elem->inferopfamily == InvalidOid)
+ return true;
+
+ for (natt = 1; natt <= idxRel->rd_att->natts; natt++)
+ {
+ Oid opfamily = idxRel->rd_opfamily[natt - 1];
+ Oid opcinputtype = idxRel->rd_opcintype[natt - 1];
+ Oid collation = idxRel->rd_indcollation[natt - 1];
+
+ if (elem->inferopfamily != InvalidOid &&
+ (elem->inferopfamily != opfamily ||
+ elem->inferopcinputtype != opcinputtype))
+ {
+ /* Attribute needed to match opclass, but didn't */
+ continue;
+ }
+
+ if (elem->infercollid != InvalidOid &&
+ elem->infercollid != collation)
+ {
+ /* Attribute needed to match collation, but didn't */
+ continue;
+ }
+
+ if ((IsA(elem->expr, Var) &&
+ bms_is_member(((Var *) elem->expr)->varattno, inferAttrs)) ||
+ list_member(idxExprs, elem->expr))
+ {
+ /* Found one match - good enough */
+ return true;
+ }
+ }
+
+ return false;
+}
+
/*
* estimate_rel_size - estimate # pages and # tuples in a table or index
*
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 2d320d100b..3eb4feabfd 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -52,6 +52,8 @@ static Query *transformDeleteStmt(ParseState *pstate, DeleteStmt *stmt);
static Query *transformInsertStmt(ParseState *pstate, InsertStmt *stmt);
static List *transformInsertRow(ParseState *pstate, List *exprlist,
List *stmtcols, List *icolumns, List *attrnos);
+static OnConflictExpr *transformOnConflictClause(ParseState *pstate,
+ OnConflictClause *onConflictClause);
static int count_rowexpr_columns(ParseState *pstate, Node *expr);
static Query *transformSelectStmt(ParseState *pstate, SelectStmt *stmt);
static Query *transformValuesClause(ParseState *pstate, SelectStmt *stmt);
@@ -62,6 +64,8 @@ static void determineRecursiveColTypes(ParseState *pstate,
Node *larg, List *nrtargetlist);
static Query *transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt);
static List *transformReturningList(ParseState *pstate, List *returningList);
+static List *transformUpdateTargetList(ParseState *pstate,
+ List *targetList);
static Query *transformDeclareCursorStmt(ParseState *pstate,
DeclareCursorStmt *stmt);
static Query *transformExplainStmt(ParseState *pstate,
@@ -419,6 +423,8 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
ListCell *icols;
ListCell *attnos;
ListCell *lc;
+ bool isOnConflictUpdate;
+ AclMode targetPerms;
/* There can't be any outer WITH to worry about */
Assert(pstate->p_ctenamespace == NIL);
@@ -434,6 +440,9 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
qry->hasModifyingCTE = pstate->p_hasModifyingCTE;
}
+ isOnConflictUpdate = (stmt->onConflictClause &&
+ stmt->onConflictClause->action == ONCONFLICT_UPDATE);
+
/*
* We have three cases to deal with: DEFAULT VALUES (selectStmt == NULL),
* VALUES list, or general SELECT input. We special-case VALUES, both for
@@ -478,8 +487,11 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* mentioned in the SELECT part. Note that the target table is not added
* to the joinlist or namespace.
*/
+ targetPerms = ACL_INSERT;
+ if (isOnConflictUpdate)
+ targetPerms |= ACL_UPDATE;
qry->resultRelation = setTargetTable(pstate, stmt->relation,
- false, false, ACL_INSERT);
+ false, false, targetPerms);
/* Validate stmt->cols list, or build default list if no list given */
icolumns = checkInsertTargets(pstate, stmt->cols, &attrnos);
@@ -740,6 +752,11 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
attnos = lnext(attnos);
}
+ /* Process ON CONFLICT, if any. */
+ if (stmt->onConflictClause)
+ qry->onConflict = transformOnConflictClause(pstate,
+ stmt->onConflictClause);
+
/*
* If we have a RETURNING clause, we need to add the target relation to
* the query namespace before processing it, so that Var references in
@@ -849,6 +866,85 @@ transformInsertRow(ParseState *pstate, List *exprlist,
return result;
}
+/*
+ * transformSelectStmt -
+ * transforms an OnConflictClause in an INSERT
+ */
+static OnConflictExpr *
+transformOnConflictClause(ParseState *pstate,
+ OnConflictClause *onConflictClause)
+{
+ List *arbiterElems;
+ Node *arbiterWhere;
+ Oid arbiterConstraint;
+ List *onConflictSet = NIL;
+ Node *onConflictWhere = NULL;
+ RangeTblEntry *exclRte = NULL;
+ int exclRelIndex = 0;
+ List *exclRelTlist = NIL;
+ OnConflictExpr *result;
+
+ /* Process the arbiter clause, ON CONFLICT ON (...) */
+ transformOnConflictArbiter(pstate, onConflictClause, &arbiterElems,
+ &arbiterWhere, &arbiterConstraint);
+
+ /* Process DO UPDATE */
+ if (onConflictClause->action == ONCONFLICT_UPDATE)
+ {
+ exclRte = addRangeTableEntryForRelation(pstate,
+ pstate->p_target_relation,
+ makeAlias("excluded", NIL),
+ false, false);
+ exclRelIndex = list_length(pstate->p_rtable);
+
+ /*
+ * Build a targetlist for the EXCLUDED pseudo relation. Out of
+ * simplicity we do that here, because expandRelAttrs() happens to
+ * nearly do the right thing; specifically it also works with views.
+ * It'd be more proper to instead scan some pseudo scan node, but it
+ * doesn't seem worth the amount of code required.
+ *
+ * The only caveat of this hack is that the permissions expandRelAttrs
+ * adds have to be reset. markVarForSelectPriv() will add the exact
+ * required permissions back.
+ */
+ exclRelTlist = expandRelAttrs(pstate, exclRte,
+ exclRelIndex, 0, -1);
+ exclRte->requiredPerms = 0;
+ exclRte->selectedCols = NULL;
+
+ /*
+ * Add EXCLUDED and the target RTE to the namespace, so that they can
+ * be used in the UPDATE statement.
+ */
+ addRTEtoQuery(pstate, exclRte, false, true, true);
+ addRTEtoQuery(pstate, pstate->p_target_rangetblentry,
+ false, true, true);
+
+ onConflictSet =
+ transformUpdateTargetList(pstate, onConflictClause->targetList);
+
+ onConflictWhere = transformWhereClause(pstate,
+ onConflictClause->whereClause,
+ EXPR_KIND_WHERE, "WHERE");
+ }
+
+ /* Finally, build ON CONFLICT DO [NOTHING | UPDATE] expression */
+ result = makeNode(OnConflictExpr);
+
+ result->action = onConflictClause->action;
+ result->arbiterElems = arbiterElems;
+ result->arbiterWhere = arbiterWhere;
+ result->constraint = arbiterConstraint;
+ result->onConflictSet = onConflictSet;
+ result->onConflictWhere = onConflictWhere;
+ result->exclRelIndex = exclRelIndex;
+ result->exclRelTlist = exclRelTlist;
+
+ return result;
+}
+
+
/*
* count_rowexpr_columns -
* get number of columns contained in a ROW() expression;
@@ -1899,10 +1995,7 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
{
Query *qry = makeNode(Query);
ParseNamespaceItem *nsitem;
- RangeTblEntry *target_rte;
Node *qual;
- ListCell *origTargetList;
- ListCell *tl;
qry->commandType = CMD_UPDATE;
pstate->p_is_update = true;
@@ -1937,23 +2030,41 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
nsitem->p_lateral_only = false;
nsitem->p_lateral_ok = true;
- qry->targetList = transformTargetList(pstate, stmt->targetList,
- EXPR_KIND_UPDATE_SOURCE);
-
qual = transformWhereClause(pstate, stmt->whereClause,
EXPR_KIND_WHERE, "WHERE");
qry->returningList = transformReturningList(pstate, stmt->returningList);
+ /*
+ * Now we are done with SELECT-like processing, and can get on with
+ * transforming the target list to match the UPDATE target columns.
+ */
+ qry->targetList = transformUpdateTargetList(pstate, stmt->targetList);
+
qry->rtable = pstate->p_rtable;
qry->jointree = makeFromExpr(pstate->p_joinlist, qual);
qry->hasSubLinks = pstate->p_hasSubLinks;
- /*
- * Now we are done with SELECT-like processing, and can get on with
- * transforming the target list to match the UPDATE target columns.
- */
+ assign_query_collations(pstate, qry);
+
+ return qry;
+}
+
+/*
+ * transformUpdateTargetList -
+ * handle SET clause in UPDATE/INSERT ... ON CONFLICT UPDATE
+ */
+static List *
+transformUpdateTargetList(ParseState *pstate, List *origTlist)
+{
+ List *tlist = NIL;
+ RangeTblEntry *target_rte;
+ ListCell *orig_tl;
+ ListCell *tl;
+
+ tlist = transformTargetList(pstate, origTlist,
+ EXPR_KIND_UPDATE_SOURCE);
/* Prepare to assign non-conflicting resnos to resjunk attributes */
if (pstate->p_next_resno <= pstate->p_target_relation->rd_rel->relnatts)
@@ -1961,9 +2072,9 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
/* Prepare non-junk columns for assignment to target table */
target_rte = pstate->p_target_rangetblentry;
- origTargetList = list_head(stmt->targetList);
+ orig_tl = list_head(origTlist);
- foreach(tl, qry->targetList)
+ foreach(tl, tlist)
{
TargetEntry *tle = (TargetEntry *) lfirst(tl);
ResTarget *origTarget;
@@ -1981,9 +2092,9 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
tle->resname = NULL;
continue;
}
- if (origTargetList == NULL)
+ if (orig_tl == NULL)
elog(ERROR, "UPDATE target count mismatch --- internal error");
- origTarget = (ResTarget *) lfirst(origTargetList);
+ origTarget = (ResTarget *) lfirst(orig_tl);
Assert(IsA(origTarget, ResTarget));
attrno = attnameAttNum(pstate->p_target_relation,
@@ -2005,14 +2116,12 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
target_rte->updatedCols = bms_add_member(target_rte->updatedCols,
attrno - FirstLowInvalidHeapAttributeNumber);
- origTargetList = lnext(origTargetList);
+ orig_tl = lnext(orig_tl);
}
- if (origTargetList != NULL)
+ if (orig_tl != NULL)
elog(ERROR, "UPDATE target count mismatch --- internal error");
- assign_query_collations(pstate, qry);
-
- return qry;
+ return tlist;
}
/*
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 0180530a30..7a4c07365c 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -217,6 +217,8 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
RangeVar *range;
IntoClause *into;
WithClause *with;
+ InferClause *infer;
+ OnConflictClause *onconflict;
A_Indices *aind;
ResTarget *target;
struct PrivTarget *privtarget;
@@ -318,7 +320,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
opt_class opt_inline_handler opt_validator validator_clause
opt_collate
-%type qualified_name OptConstrFromTable
+%type qualified_name insert_target OptConstrFromTable
%type all_Op MathOp
@@ -344,7 +346,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
OptTableElementList TableElementList OptInherit definition
OptTypedTableElementList TypedTableElementList
reloptions opt_reloptions
- OptWith opt_distinct opt_definition func_args func_args_list
+ OptWith distinct_clause opt_all_clause opt_definition func_args func_args_list
func_args_with_defaults func_args_with_defaults_list
aggr_args aggr_args_list
func_as createfunc_opt_list alterfunc_opt_list
@@ -389,7 +391,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
%type for_locking_item
%type for_locking_clause opt_for_locking_clause for_locking_items
%type locked_rels_list
-%type opt_all
+%type all_or_distinct
%type join_outer join_qual
%type join_type
@@ -418,6 +420,8 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
%type SeqOptElem
%type insert_rest
+%type opt_conf_expr
+%type opt_on_conflict
%type generic_set set_rest set_rest_more generic_reset reset_rest
SetResetClause FunctionSetResetClause
@@ -557,8 +561,8 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
CACHE CALLED CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHAR_P
CHARACTER CHARACTERISTICS CHECK CHECKPOINT CLASS CLOSE
CLUSTER COALESCE COLLATE COLLATION COLUMN COMMENT COMMENTS COMMIT
- COMMITTED CONCURRENTLY CONFIGURATION CONNECTION CONSTRAINT CONSTRAINTS
- CONTENT_P CONTINUE_P CONVERSION_P COPY COST CREATE
+ COMMITTED CONCURRENTLY CONFIGURATION CONFLICT CONNECTION CONSTRAINT
+ CONSTRAINTS CONTENT_P CONTINUE_P CONVERSION_P COPY COST CREATE
CROSS CSV CURRENT_P
CURRENT_CATALOG CURRENT_DATE CURRENT_ROLE CURRENT_SCHEMA
CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER CURSOR CYCLE
@@ -9436,15 +9440,35 @@ DeallocateStmt: DEALLOCATE name
*****************************************************************************/
InsertStmt:
- opt_with_clause INSERT INTO qualified_name insert_rest returning_clause
+ opt_with_clause INSERT INTO insert_target insert_rest
+ opt_on_conflict returning_clause
{
$5->relation = $4;
- $5->returningList = $6;
+ $5->onConflictClause = $6;
+ $5->returningList = $7;
$5->withClause = $1;
$$ = (Node *) $5;
}
;
+/*
+ * Can't easily make AS optional here, because VALUES in insert_rest would
+ * have a shift/reduce conflict with a values as a optional alias. We could
+ * easily allow unreserved_keywords as optional aliases, but that'd be a odd
+ * divergance from other places. So just require AS for now.
+ */
+insert_target:
+ qualified_name
+ {
+ $$ = $1;
+ }
+ | qualified_name AS ColId
+ {
+ $1->alias = makeAlias($3, NIL);
+ $$ = $1;
+ }
+ ;
+
insert_rest:
SelectStmt
{
@@ -9484,6 +9508,56 @@ insert_column_item:
}
;
+opt_on_conflict:
+ ON CONFLICT opt_conf_expr DO UPDATE SET set_clause_list where_clause
+ {
+ $$ = makeNode(OnConflictClause);
+ $$->action = ONCONFLICT_UPDATE;
+ $$->infer = $3;
+ $$->targetList = $7;
+ $$->whereClause = $8;
+ $$->location = @1;
+ }
+ |
+ ON CONFLICT opt_conf_expr DO NOTHING
+ {
+ $$ = makeNode(OnConflictClause);
+ $$->action = ONCONFLICT_NOTHING;
+ $$->infer = $3;
+ $$->targetList = NIL;
+ $$->whereClause = NULL;
+ $$->location = @1;
+ }
+ | /*EMPTY*/
+ {
+ $$ = NULL;
+ }
+ ;
+
+opt_conf_expr:
+ '(' index_params ')' where_clause
+ {
+ $$ = makeNode(InferClause);
+ $$->indexElems = $2;
+ $$->whereClause = $4;
+ $$->conname = NULL;
+ $$->location = @1;
+ }
+ |
+ ON CONSTRAINT name
+ {
+ $$ = makeNode(InferClause);
+ $$->indexElems = NIL;
+ $$->whereClause = NULL;
+ $$->conname = $3;
+ $$->location = @1;
+ }
+ | /*EMPTY*/
+ {
+ $$ = NULL;
+ }
+ ;
+
returning_clause:
RETURNING target_list { $$ = $2; }
| /* EMPTY */ { $$ = NIL; }
@@ -9870,7 +9944,21 @@ select_clause:
* However, this is not checked by the grammar; parse analysis must check it.
*/
simple_select:
- SELECT opt_distinct opt_target_list
+ SELECT opt_all_clause opt_target_list
+ into_clause from_clause where_clause
+ group_clause having_clause window_clause
+ {
+ SelectStmt *n = makeNode(SelectStmt);
+ n->targetList = $3;
+ n->intoClause = $4;
+ n->fromClause = $5;
+ n->whereClause = $6;
+ n->groupClause = $7;
+ n->havingClause = $8;
+ n->windowClause = $9;
+ $$ = (Node *)n;
+ }
+ | SELECT distinct_clause target_list
into_clause from_clause where_clause
group_clause having_clause window_clause
{
@@ -9905,15 +9993,15 @@ simple_select:
n->fromClause = list_make1($2);
$$ = (Node *)n;
}
- | select_clause UNION opt_all select_clause
+ | select_clause UNION all_or_distinct select_clause
{
$$ = makeSetOp(SETOP_UNION, $3, $1, $4);
}
- | select_clause INTERSECT opt_all select_clause
+ | select_clause INTERSECT all_or_distinct select_clause
{
$$ = makeSetOp(SETOP_INTERSECT, $3, $1, $4);
}
- | select_clause EXCEPT opt_all select_clause
+ | select_clause EXCEPT all_or_distinct select_clause
{
$$ = makeSetOp(SETOP_EXCEPT, $3, $1, $4);
}
@@ -10052,7 +10140,8 @@ opt_table: TABLE {}
| /*EMPTY*/ {}
;
-opt_all: ALL { $$ = TRUE; }
+all_or_distinct:
+ ALL { $$ = TRUE; }
| DISTINCT { $$ = FALSE; }
| /*EMPTY*/ { $$ = FALSE; }
;
@@ -10060,10 +10149,13 @@ opt_all: ALL { $$ = TRUE; }
/* We use (NIL) as a placeholder to indicate that all target expressions
* should be placed in the DISTINCT list during parsetree analysis.
*/
-opt_distinct:
+distinct_clause:
DISTINCT { $$ = list_make1(NIL); }
| DISTINCT ON '(' expr_list ')' { $$ = $4; }
- | ALL { $$ = NIL; }
+ ;
+
+opt_all_clause:
+ ALL { $$ = NIL;}
| /*EMPTY*/ { $$ = NIL; }
;
@@ -13367,6 +13459,7 @@ unreserved_keyword:
| COMMIT
| COMMITTED
| CONFIGURATION
+ | CONFLICT
| CONNECTION
| CONSTRAINTS
| CONTENT_P
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 8d90b5098a..73c505ed85 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -16,7 +16,9 @@
#include "postgres.h"
#include "access/heapam.h"
+#include "catalog/catalog.h"
#include "catalog/heap.h"
+#include "catalog/pg_constraint.h"
#include "catalog/pg_type.h"
#include "commands/defrem.h"
#include "nodes/makefuncs.h"
@@ -32,6 +34,7 @@
#include "parser/parse_oper.h"
#include "parser/parse_relation.h"
#include "parser/parse_target.h"
+#include "parser/parse_type.h"
#include "rewrite/rewriteManip.h"
#include "utils/guc.h"
#include "utils/lsyscache.h"
@@ -75,6 +78,8 @@ static TargetEntry *findTargetlistEntrySQL99(ParseState *pstate, Node *node,
List **tlist, ParseExprKind exprKind);
static int get_matching_location(int sortgroupref,
List *sortgrouprefs, List *exprs);
+static List *resolve_unique_index_expr(ParseState *pstate, InferClause * infer,
+ Relation heapRel);
static List *addTargetToGroupList(ParseState *pstate, TargetEntry *tle,
List *grouplist, List *targetlist, int location,
bool resolveUnknown);
@@ -2166,6 +2171,204 @@ get_matching_location(int sortgroupref, List *sortgrouprefs, List *exprs)
return -1; /* keep compiler quiet */
}
+/*
+ * resolve_unique_index_expr
+ * Infer a unique index from a list of indexElems, for ON
+ * CONFLICT clause
+ *
+ * Perform parse analysis of expressions and columns appearing within ON
+ * CONFLICT clause. During planning, the returned list of expressions is used
+ * to infer which unique index to use.
+ */
+static List *
+resolve_unique_index_expr(ParseState *pstate, InferClause *infer,
+ Relation heapRel)
+{
+ List *result = NIL;
+ ListCell *l;
+
+ foreach(l, infer->indexElems)
+ {
+ IndexElem *ielem = (IndexElem *) lfirst(l);
+ InferenceElem *pInfer = makeNode(InferenceElem);
+ Node *parse;
+
+ /*
+ * Raw grammar re-uses CREATE INDEX infrastructure for unique index
+ * inference clause, and so will accept opclasses by name and so on.
+ *
+ * Make no attempt to match ASC or DESC ordering or NULLS FIRST/NULLS
+ * LAST ordering, since those are not significant for inference
+ * purposes (any unique index matching the inference specification in
+ * other regards is accepted indifferently). Actively reject this as
+ * wrong-headed.
+ */
+ if (ielem->ordering != SORTBY_DEFAULT)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
+ errmsg("ASC/DESC is not allowed in ON CONFLICT clause"),
+ parser_errposition(pstate,
+ exprLocation((Node *) infer))));
+ if (ielem->nulls_ordering != SORTBY_NULLS_DEFAULT)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
+ errmsg("NULLS FIRST/LAST is not allowed in ON CONFLICT clause"),
+ parser_errposition(pstate,
+ exprLocation((Node *) infer))));
+
+ if (!ielem->expr)
+ {
+ /* Simple index attribute */
+ ColumnRef *n;
+
+ /*
+ * Grammar won't have built raw expression for us in event of
+ * plain column reference. Create one directly, and perform
+ * expression transformation. Planner expects this, and performs
+ * its own normalization for the purposes of matching against
+ * pg_index.
+ */
+ n = makeNode(ColumnRef);
+ n->fields = list_make1(makeString(ielem->name));
+ /* Location is approximately that of inference specification */
+ n->location = infer->location;
+ parse = (Node *) n;
+ }
+ else
+ {
+ /* Do parse transformation of the raw expression */
+ parse = (Node *) ielem->expr;
+ }
+
+ /*
+ * transformExpr() should have already rejected subqueries,
+ * aggregates, and window functions, based on the EXPR_KIND_ for an
+ * index expression. Expressions returning sets won't have been
+ * rejected, but don't bother doing so here; there should be no
+ * available expression unique index to match any such expression
+ * against anyway.
+ */
+ pInfer->expr = transformExpr(pstate, parse, EXPR_KIND_INDEX_EXPRESSION);
+
+ /* Perform lookup of collation and operator class as required */
+ if (!ielem->collation)
+ pInfer->infercollid = InvalidOid;
+ else
+ pInfer->infercollid = LookupCollation(pstate, ielem->collation,
+ exprLocation(pInfer->expr));
+
+ if (!ielem->opclass)
+ {
+ pInfer->inferopfamily = InvalidOid;
+ pInfer->inferopcinputtype = InvalidOid;
+ }
+ else
+ {
+ Oid opclass = get_opclass_oid(BTREE_AM_OID, ielem->opclass,
+ false);
+
+ pInfer->inferopfamily = get_opclass_family(opclass);
+ pInfer->inferopcinputtype = get_opclass_input_type(opclass);
+ }
+
+ result = lappend(result, pInfer);
+ }
+
+ return result;
+}
+
+/*
+ * transformOnConflictArbiter -
+ * transform arbiter expressions in an ON CONFLICT clause.
+ *
+ * Transformed expressions used to infer one unique index relation to serve as
+ * an ON CONFLICT arbiter. Partial unique indexes may be inferred using WHERE
+ * clause from inference specification clause.
+ */
+void
+transformOnConflictArbiter(ParseState *pstate,
+ OnConflictClause *onConflictClause,
+ List **arbiterExpr, Node **arbiterWhere,
+ Oid *constraint)
+{
+ InferClause *infer = onConflictClause->infer;
+
+ *arbiterExpr = NIL;
+ *arbiterWhere = NULL;
+ *constraint = InvalidOid;
+
+ if (onConflictClause->action == ONCONFLICT_UPDATE && !infer)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("ON CONFLICT DO UPDATE requires inference specification or constraint name"),
+ errhint("For example, ON CONFLICT ON CONFLICT ()."),
+ parser_errposition(pstate,
+ exprLocation((Node *) onConflictClause))));
+
+ /*
+ * To simplify certain aspects of its design, speculative insertion into
+ * system catalogs is disallowed
+ */
+ if (IsCatalogRelation(pstate->p_target_relation))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("ON CONFLICT not supported with system catalog tables"),
+ parser_errposition(pstate,
+ exprLocation((Node *) onConflictClause))));
+
+ /* Same applies to table used by logical decoding as catalog table */
+ if (RelationIsUsedAsCatalogTable(pstate->p_target_relation))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("ON CONFLICT not supported on table \"%s\" used as a catalog table",
+ RelationGetRelationName(pstate->p_target_relation)),
+ parser_errposition(pstate,
+ exprLocation((Node *) onConflictClause))));
+
+ /* ON CONFLICT DO NOTHING does not require an inference clause */
+ if (infer)
+ {
+ List *save_namespace;
+
+ /*
+ * While we process the arbiter expressions, accept only
+ * non-qualified references to the target table. Hide any other
+ * relations.
+ */
+ save_namespace = pstate->p_namespace;
+ pstate->p_namespace = NIL;
+ addRTEtoQuery(pstate, pstate->p_target_rangetblentry,
+ false, false, true);
+
+ if (infer->indexElems)
+ *arbiterExpr = resolve_unique_index_expr(pstate, infer,
+ pstate->p_target_relation);
+
+ /*
+ * Handling inference WHERE clause (for partial unique index
+ * inference)
+ */
+ if (infer->whereClause)
+ *arbiterWhere = transformExpr(pstate, infer->whereClause,
+ EXPR_KIND_INDEX_PREDICATE);
+
+ pstate->p_namespace = save_namespace;
+
+ if (infer->conname)
+ *constraint = get_relation_constraint_oid(RelationGetRelid(pstate->p_target_relation),
+ infer->conname, false);
+ }
+
+ /*
+ * It's convenient to form a list of expressions based on the
+ * representation used by CREATE INDEX, since the same restrictions are
+ * appropriate (e.g. on subqueries). However, from here on, a dedicated
+ * primnode representation is used for inference elements, and so
+ * assign_query_collations() can be trusted to do the right thing with the
+ * post parse analysis query tree inference clause representation.
+ */
+}
+
/*
* addTargetToSortList
* If the given targetlist entry isn't already in the SortGroupClause
diff --git a/src/backend/parser/parse_collate.c b/src/backend/parser/parse_collate.c
index 7c6a11c757..4c85b708d3 100644
--- a/src/backend/parser/parse_collate.c
+++ b/src/backend/parser/parse_collate.c
@@ -479,9 +479,11 @@ assign_collations_walker(Node *node, assign_collations_context *context)
parser_errposition(context->pstate,
loccontext.location2)));
break;
+ case T_InferenceElem:
case T_RangeTblRef:
case T_JoinExpr:
case T_FromExpr:
+ case T_OnConflictExpr:
case T_SortGroupClause:
(void) expression_tree_walker(node,
assign_collations_walker,
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index 2d85cf08e7..59973ba9c3 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -537,11 +537,12 @@ transformAssignedExpr(ParseState *pstate,
/*
* updateTargetListEntry()
- * This is used in UPDATE statements only. It prepares an UPDATE
- * TargetEntry for assignment to a column of the target table.
- * This includes coercing the given value to the target column's type
- * (if necessary), and dealing with any subfield names or subscripts
- * attached to the target column itself.
+ * This is used in UPDATE statements (and ON CONFLICT DO UPDATE)
+ * only. It prepares an UPDATE TargetEntry for assignment to a
+ * column of the target table. This includes coercing the given
+ * value to the target column's type (if necessary), and dealing with
+ * any subfield names or subscripts attached to the target column
+ * itself.
*
* pstate parse state
* tle target list entry to be modified
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index 88424964ef..ea38818269 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -64,6 +64,8 @@ static void DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
static void DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
static void DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+static void DecodeSpecConfirm(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
+
static void DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
xl_xact_parsed_commit *parsed, TransactionId xid);
static void DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
@@ -414,6 +416,11 @@ DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
ReorderBufferXidSetCatalogChanges(ctx->reorder, xid, buf->origptr);
break;
+ case XLOG_HEAP_CONFIRM:
+ if (SnapBuildProcessChange(builder, xid, buf->origptr))
+ DecodeSpecConfirm(ctx, buf);
+ break;
+
case XLOG_HEAP_LOCK:
/* we don't care about row level locks for now */
break;
@@ -564,11 +571,15 @@ DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
return;
change = ReorderBufferGetChange(ctx->reorder);
- change->action = REORDER_BUFFER_CHANGE_INSERT;
+ if (!(xlrec->flags & XLH_INSERT_IS_SPECULATIVE))
+ change->action = REORDER_BUFFER_CHANGE_INSERT;
+ else
+ change->action = REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT;
change->origin_id = XLogRecGetOrigin(r);
+
memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
- if (xlrec->flags & XLOG_HEAP_CONTAINS_NEW_TUPLE)
+ if (xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE)
{
Size tuplelen;
char *tupledata = XLogRecGetBlockData(r, 0, &tuplelen);
@@ -615,7 +626,7 @@ DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
change->origin_id = XLogRecGetOrigin(r);
memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
- if (xlrec->flags & XLOG_HEAP_CONTAINS_NEW_TUPLE)
+ if (xlrec->flags & XLH_UPDATE_CONTAINS_NEW_TUPLE)
{
data = XLogRecGetBlockData(r, 0, &datalen);
@@ -624,7 +635,7 @@ DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
DecodeXLogTuple(data, datalen, change->data.tp.newtuple);
}
- if (xlrec->flags & XLOG_HEAP_CONTAINS_OLD)
+ if (xlrec->flags & XLH_UPDATE_CONTAINS_OLD)
{
/* caution, remaining data in record is not aligned */
data = XLogRecGetData(r) + SizeOfHeapUpdate;
@@ -660,6 +671,13 @@ DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
if (target_node.dbNode != ctx->slot->data.database)
return;
+ /*
+ * Super deletions are irrelevant for logical decoding, it's driven by the
+ * confirmation records.
+ */
+ if (xlrec->flags & XLH_DELETE_IS_SUPER)
+ return;
+
/* output plugin doesn't look for this origin, no need to queue */
if (FilterByOrigin(ctx, XLogRecGetOrigin(r)))
return;
@@ -671,7 +689,7 @@ DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
/* old primary key stored */
- if (xlrec->flags & XLOG_HEAP_CONTAINS_OLD)
+ if (xlrec->flags & XLH_DELETE_CONTAINS_OLD)
{
Assert(XLogRecGetDataLen(r) > (SizeOfHeapDelete + SizeOfHeapHeader));
@@ -737,7 +755,7 @@ DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
* We decode the tuple in pretty much the same way as DecodeXLogTuple,
* but since the layout is slightly different, we can't use it here.
*/
- if (xlrec->flags & XLOG_HEAP_CONTAINS_NEW_TUPLE)
+ if (xlrec->flags & XLH_INSERT_CONTAINS_NEW_TUPLE)
{
change->data.tp.newtuple = ReorderBufferGetTupleBuf(ctx->reorder);
@@ -775,7 +793,7 @@ DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
* xl_multi_insert_tuple record emitted by one heap_multi_insert()
* call.
*/
- if (xlrec->flags & XLOG_HEAP_LAST_MULTI_INSERT &&
+ if (xlrec->flags & XLH_INSERT_LAST_IN_MULTI &&
(i + 1) == xlrec->ntuples)
change->data.tp.clear_toast_afterwards = true;
else
@@ -787,6 +805,40 @@ DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
Assert(data == tupledata + tuplelen);
}
+/*
+ * Parse XLOG_HEAP_CONFIRM from wal into a confirmation change.
+ *
+ * This is pretty trivial, all the state essentially already setup by the
+ * speculative insertion.
+ */
+static void
+DecodeSpecConfirm(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
+{
+ XLogReaderState *r = buf->record;
+ ReorderBufferChange *change;
+ RelFileNode target_node;
+
+ /* only interested in our database */
+ XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
+ if (target_node.dbNode != ctx->slot->data.database)
+ return;
+
+ /* output plugin doesn't look for this origin, no need to queue */
+ if (FilterByOrigin(ctx, XLogRecGetOrigin(r)))
+ return;
+
+ change = ReorderBufferGetChange(ctx->reorder);
+ change->action = REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM;
+ change->origin_id = XLogRecGetOrigin(r);
+
+ memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
+
+ change->data.tp.clear_toast_afterwards = true;
+
+ ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r), buf->origptr, change);
+}
+
+
/*
* Read a HeapTuple as WAL logged by heap_insert, heap_update and heap_delete
* (but not by heap_multi_insert) into a tuplebuf.
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index c9c1d1036e..57854b0aa5 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -401,6 +401,7 @@ ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change)
case REORDER_BUFFER_CHANGE_INSERT:
case REORDER_BUFFER_CHANGE_UPDATE:
case REORDER_BUFFER_CHANGE_DELETE:
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
if (change->data.tp.newtuple)
{
ReorderBufferReturnTupleBuf(rb, change->data.tp.newtuple);
@@ -420,8 +421,9 @@ ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change)
change->data.snapshot = NULL;
}
break;
+ /* no data in addition to the struct itself */
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
- break;
case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
break;
}
@@ -1317,6 +1319,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
PG_TRY();
{
ReorderBufferChange *change;
+ ReorderBufferChange *specinsert = NULL;
if (using_subtxn)
BeginInternalSubTransaction("replay");
@@ -1333,6 +1336,17 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
switch (change->action)
{
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
+ /*
+ * Confirmation for speculative insertion arrived. Simply
+ * use as a normal record. It'll be cleaned up at the end
+ * of INSERT processing.
+ */
+ Assert(specinsert->data.tp.oldtuple == NULL);
+ change = specinsert;
+ change->action = REORDER_BUFFER_CHANGE_INSERT;
+
+ /* intentionally fall through */
case REORDER_BUFFER_CHANGE_INSERT:
case REORDER_BUFFER_CHANGE_UPDATE:
case REORDER_BUFFER_CHANGE_DELETE:
@@ -1348,7 +1362,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
if (reloid == InvalidOid &&
change->data.tp.newtuple == NULL &&
change->data.tp.oldtuple == NULL)
- continue;
+ goto change_done;
else if (reloid == InvalidOid)
elog(ERROR, "could not map filenode \"%s\" to relation OID",
relpathperm(change->data.tp.relnode,
@@ -1362,50 +1376,92 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
relpathperm(change->data.tp.relnode,
MAIN_FORKNUM));
- if (RelationIsLogicallyLogged(relation))
+ if (!RelationIsLogicallyLogged(relation))
+ goto change_done;
+
+ /*
+ * For now ignore sequence changes entirely. Most of
+ * the time they don't log changes using records we
+ * understand, so it doesn't make sense to handle the
+ * few cases we do.
+ */
+ if (relation->rd_rel->relkind == RELKIND_SEQUENCE)
+ goto change_done;
+
+ /* user-triggered change */
+ if (!IsToastRelation(relation))
{
+ ReorderBufferToastReplace(rb, txn, relation, change);
+ rb->apply_change(rb, txn, relation, change);
+
/*
- * For now ignore sequence changes entirely. Most of
- * the time they don't log changes using records we
- * understand, so it doesn't make sense to handle the
- * few cases we do.
+ * Only clear reassembled toast chunks if we're
+ * sure they're not required anymore. The creator
+ * of the tuple tells us.
*/
- if (relation->rd_rel->relkind == RELKIND_SEQUENCE)
- {
- }
- /* user-triggered change */
- else if (!IsToastRelation(relation))
- {
- ReorderBufferToastReplace(rb, txn, relation, change);
- rb->apply_change(rb, txn, relation, change);
-
- /*
- * Only clear reassembled toast chunks if we're
- * sure they're not required anymore. The creator
- * of the tuple tells us.
- */
- if (change->data.tp.clear_toast_afterwards)
- ReorderBufferToastReset(rb, txn);
- }
- /* we're not interested in toast deletions */
- else if (change->action == REORDER_BUFFER_CHANGE_INSERT)
- {
- /*
- * Need to reassemble the full toasted Datum in
- * memory, to ensure the chunks don't get reused
- * till we're done remove it from the list of this
- * transaction's changes. Otherwise it will get
- * freed/reused while restoring spooled data from
- * disk.
- */
- dlist_delete(&change->node);
- ReorderBufferToastAppendChunk(rb, txn, relation,
- change);
- }
+ if (change->data.tp.clear_toast_afterwards)
+ ReorderBufferToastReset(rb, txn);
+ }
+ /* we're not interested in toast deletions */
+ else if (change->action == REORDER_BUFFER_CHANGE_INSERT)
+ {
+ /*
+ * Need to reassemble the full toasted Datum in
+ * memory, to ensure the chunks don't get reused till
+ * we're done remove it from the list of this
+ * transaction's changes. Otherwise it will get
+ * freed/reused while restoring spooled data from
+ * disk.
+ */
+ dlist_delete(&change->node);
+ ReorderBufferToastAppendChunk(rb, txn, relation,
+ change);
+ }
+
+ change_done:
+ /*
+ * Either speculative insertion was confirmed, or it was
+ * unsuccessful and the record isn't needed anymore.
+ */
+ if (specinsert != NULL)
+ {
+ ReorderBufferReturnChange(rb, specinsert);
+ specinsert = NULL;
+ }
+ if (relation != NULL)
+ {
+ RelationClose(relation);
+ relation = NULL;
}
- RelationClose(relation);
break;
+
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
+ /*
+ * Speculative insertions are dealt with by delaying the
+ * processing of the insert until the confirmation record
+ * arrives. For that we simply unlink the record from the
+ * chain, so it does not get freed/reused while restoring
+ * spooled data from disk.
+ *
+ * This is safe in the face of concurrent catalog changes
+ * because the relevant relation can't be changed between
+ * speculative insertion and confirmation due to
+ * CheckTableNotInUse() and locking.
+ */
+
+ /* clear out a pending (and thus failed) speculation */
+ if (specinsert != NULL)
+ {
+ ReorderBufferReturnChange(rb, specinsert);
+ specinsert = NULL;
+ }
+
+ /* and memorize the pending insertion */
+ dlist_delete(&change->node);
+ specinsert = change;
+ break;
+
case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT:
/* get rid of the old */
TeardownHistoricSnapshot(false);
@@ -1474,6 +1530,17 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
}
}
+ /*
+ * There's a a speculative insertion remaining, just clean in up, it
+ * can't have been successful, otherwise we'd gotten a confirmation
+ * record.
+ */
+ if (specinsert)
+ {
+ ReorderBufferReturnChange(rb, specinsert);
+ specinsert = NULL;
+ }
+
/* clean up the iterator */
ReorderBufferIterTXNFinish(rb, iterstate);
iterstate = NULL;
@@ -2001,11 +2068,11 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
switch (change->action)
{
+ /* fall through these, they're all similar enough */
case REORDER_BUFFER_CHANGE_INSERT:
- /* fall through */
case REORDER_BUFFER_CHANGE_UPDATE:
- /* fall through */
case REORDER_BUFFER_CHANGE_DELETE:
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
{
char *data;
ReorderBufferTupleBuf *oldtup,
@@ -2083,9 +2150,8 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
}
break;
}
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
- /* ReorderBufferChange contains everything important */
- break;
case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
/* ReorderBufferChange contains everything important */
break;
@@ -2256,11 +2322,11 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
/* restore individual stuff */
switch (change->action)
{
+ /* fall through these, they're all similar enough */
case REORDER_BUFFER_CHANGE_INSERT:
- /* fall through */
case REORDER_BUFFER_CHANGE_UPDATE:
- /* fall through */
case REORDER_BUFFER_CHANGE_DELETE:
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
if (change->data.tp.newtuple)
{
Size len = offsetof(ReorderBufferTupleBuf, t_data) +
@@ -2309,6 +2375,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
break;
}
/* the base struct contains all the data, easy peasy */
+ case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID:
case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID:
break;
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 0fc47cb786..39302a410b 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -52,7 +52,10 @@ static Query *rewriteRuleAction(Query *parsetree,
CmdType event,
bool *returning_flag);
static List *adjustJoinTreeList(Query *parsetree, bool removert, int rt_index);
-static void rewriteTargetListIU(Query *parsetree, Relation target_relation,
+static List *rewriteTargetListIU(List *targetList,
+ CmdType commandType,
+ Relation target_relation,
+ int result_rti,
List **attrno_list);
static TargetEntry *process_matched_tle(TargetEntry *src_tle,
TargetEntry *prior_tle,
@@ -66,7 +69,7 @@ static void markQueryForLocking(Query *qry, Node *jtnode,
LockClauseStrength strength, LockWaitPolicy waitPolicy,
bool pushedDown);
static List *matchLocks(CmdType event, RuleLock *rulelocks,
- int varno, Query *parsetree);
+ int varno, Query *parsetree, bool *hasUpdate);
static Query *fireRIRrules(Query *parsetree, List *activeRIRs,
bool forUpdatePushedDown);
static bool view_has_instead_trigger(Relation view, CmdType event);
@@ -679,11 +682,13 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index)
* order of the original tlist's non-junk entries. This is needed for
* processing VALUES RTEs.
*/
-static void
-rewriteTargetListIU(Query *parsetree, Relation target_relation,
+static List*
+rewriteTargetListIU(List *targetList,
+ CmdType commandType,
+ Relation target_relation,
+ int result_rti,
List **attrno_list)
{
- CmdType commandType = parsetree->commandType;
TargetEntry **new_tles;
List *new_tlist = NIL;
List *junk_tlist = NIL;
@@ -709,7 +714,7 @@ rewriteTargetListIU(Query *parsetree, Relation target_relation,
new_tles = (TargetEntry **) palloc0(numattrs * sizeof(TargetEntry *));
next_junk_attrno = numattrs + 1;
- foreach(temp, parsetree->targetList)
+ foreach(temp, targetList)
{
TargetEntry *old_tle = (TargetEntry *) lfirst(temp);
@@ -827,7 +832,7 @@ rewriteTargetListIU(Query *parsetree, Relation target_relation,
{
Node *new_expr;
- new_expr = (Node *) makeVar(parsetree->resultRelation,
+ new_expr = (Node *) makeVar(result_rti,
attrno,
att_tup->atttypid,
att_tup->atttypmod,
@@ -846,7 +851,7 @@ rewriteTargetListIU(Query *parsetree, Relation target_relation,
pfree(new_tles);
- parsetree->targetList = list_concat(new_tlist, junk_tlist);
+ return list_concat(new_tlist, junk_tlist);
}
@@ -1288,7 +1293,8 @@ static List *
matchLocks(CmdType event,
RuleLock *rulelocks,
int varno,
- Query *parsetree)
+ Query *parsetree,
+ bool *hasUpdate)
{
List *matching_locks = NIL;
int nlocks;
@@ -1309,6 +1315,9 @@ matchLocks(CmdType event,
{
RewriteRule *oneLock = rulelocks->rules[i];
+ if (oneLock->event == CMD_UPDATE)
+ *hasUpdate = true;
+
/*
* Suppress ON INSERT/UPDATE/DELETE rules that are disabled or
* configured to not fire during the current sessions replication
@@ -1766,8 +1775,8 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
/*
* Fetch any new security quals that must be applied to this RTE.
*/
- get_row_security_policies(parsetree, rte, rt_index,
- &securityQuals, &withCheckOptions,
+ get_row_security_policies(parsetree, parsetree->commandType, rte,
+ rt_index, &securityQuals, &withCheckOptions,
&hasRowSecurity, &hasSubLinks);
if (securityQuals != NIL || withCheckOptions != NIL)
@@ -2642,6 +2651,18 @@ rewriteTargetView(Query *parsetree, Relation view)
tle->resno - FirstLowInvalidHeapAttributeNumber);
}
+ if (parsetree->onConflict)
+ {
+ foreach(lc, parsetree->onConflict->onConflictSet)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc);
+
+ if (!tle->resjunk)
+ modified_cols = bms_add_member(modified_cols,
+ tle->resno - FirstLowInvalidHeapAttributeNumber);
+ }
+ }
+
auto_update_detail = view_cols_are_auto_updatable(viewquery,
modified_cols,
NULL,
@@ -2999,6 +3020,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
CmdType event = parsetree->commandType;
bool instead = false;
bool returning = false;
+ bool updatableview = false;
Query *qual_product = NULL;
List *rewritten = NIL;
ListCell *lc1;
@@ -3081,6 +3103,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
Relation rt_entry_relation;
List *locks;
List *product_queries;
+ bool hasUpdate = false;
result_relation = parsetree->resultRelation;
Assert(result_relation != 0);
@@ -3123,19 +3146,41 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
List *attrnos;
/* Process the main targetlist ... */
- rewriteTargetListIU(parsetree, rt_entry_relation, &attrnos);
+ parsetree->targetList = rewriteTargetListIU(parsetree->targetList,
+ parsetree->commandType,
+ rt_entry_relation,
+ parsetree->resultRelation,
+ &attrnos);
/* ... and the VALUES expression lists */
rewriteValuesRTE(values_rte, rt_entry_relation, attrnos);
}
else
{
/* Process just the main targetlist */
- rewriteTargetListIU(parsetree, rt_entry_relation, NULL);
+ parsetree->targetList =
+ rewriteTargetListIU(parsetree->targetList,
+ parsetree->commandType,
+ rt_entry_relation,
+ parsetree->resultRelation, NULL);
+ }
+
+ if (parsetree->onConflict &&
+ parsetree->onConflict->action == ONCONFLICT_UPDATE)
+ {
+ parsetree->onConflict->onConflictSet =
+ rewriteTargetListIU(parsetree->onConflict->onConflictSet,
+ CMD_UPDATE,
+ rt_entry_relation,
+ parsetree->resultRelation,
+ NULL);
}
}
else if (event == CMD_UPDATE)
{
- rewriteTargetListIU(parsetree, rt_entry_relation, NULL);
+ parsetree->targetList =
+ rewriteTargetListIU(parsetree->targetList,
+ parsetree->commandType, rt_entry_relation,
+ parsetree->resultRelation, NULL);
rewriteTargetListUD(parsetree, rt_entry, rt_entry_relation);
}
else if (event == CMD_DELETE)
@@ -3149,7 +3194,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
* Collect and apply the appropriate rules.
*/
locks = matchLocks(event, rt_entry_relation->rd_rules,
- result_relation, parsetree);
+ result_relation, parsetree, &hasUpdate);
product_queries = fireRules(parsetree,
result_relation,
@@ -3198,6 +3243,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
*/
instead = true;
returning = true;
+ updatableview = true;
}
/*
@@ -3278,6 +3324,17 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
}
}
+ /*
+ * Updatable views are supported by ON CONFLICT, so don't prevent that
+ * case from proceeding
+ */
+ if (parsetree->onConflict &&
+ (product_queries != NIL || hasUpdate) &&
+ !updatableview)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules")));
+
heap_close(rt_entry_relation, NoLock);
}
diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c
index b0b308118f..2c095ce88a 100644
--- a/src/backend/rewrite/rowsecurity.c
+++ b/src/backend/rewrite/rowsecurity.c
@@ -89,9 +89,10 @@ row_security_policy_hook_type row_security_policy_hook_restrictive = NULL;
* set to true if any of the quals returned contain sublinks.
*/
void
-get_row_security_policies(Query* root, RangeTblEntry* rte, int rt_index,
- List **securityQuals, List **withCheckOptions,
- bool *hasRowSecurity, bool *hasSubLinks)
+get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
+ int rt_index, List **securityQuals,
+ List **withCheckOptions, bool *hasRowSecurity,
+ bool *hasSubLinks)
{
Expr *rowsec_expr = NULL;
Expr *rowsec_with_check_expr = NULL;
@@ -159,7 +160,7 @@ get_row_security_policies(Query* root, RangeTblEntry* rte, int rt_index,
/* Grab the built-in policies which should be applied to this relation. */
rel = heap_open(rte->relid, NoLock);
- rowsec_policies = pull_row_security_policies(root->commandType, rel,
+ rowsec_policies = pull_row_security_policies(commandType, rel,
user_id);
/*
@@ -201,7 +202,7 @@ get_row_security_policies(Query* root, RangeTblEntry* rte, int rt_index,
*/
if (row_security_policy_hook_restrictive)
{
- hook_policies_restrictive = (*row_security_policy_hook_restrictive)(root->commandType, rel);
+ hook_policies_restrictive = (*row_security_policy_hook_restrictive)(commandType, rel);
/* Build the expression from any policies returned. */
if (hook_policies_restrictive != NIL)
@@ -214,7 +215,7 @@ get_row_security_policies(Query* root, RangeTblEntry* rte, int rt_index,
if (row_security_policy_hook_permissive)
{
- hook_policies_permissive = (*row_security_policy_hook_permissive)(root->commandType, rel);
+ hook_policies_permissive = (*row_security_policy_hook_permissive)(commandType, rel);
/* Build the expression from any policies returned. */
if (hook_policies_permissive != NIL)
@@ -242,7 +243,7 @@ get_row_security_policies(Query* root, RangeTblEntry* rte, int rt_index,
* WITH CHECK policy (this will be a copy of the USING policy, if no
* explicit WITH CHECK policy exists).
*/
- if (root->commandType == CMD_INSERT || root->commandType == CMD_UPDATE)
+ if (commandType == CMD_INSERT || commandType == CMD_UPDATE)
{
/*
* WITH CHECK OPTIONS wants a WCO node which wraps each Expr, so
@@ -259,7 +260,7 @@ get_row_security_policies(Query* root, RangeTblEntry* rte, int rt_index,
WithCheckOption *wco;
wco = (WithCheckOption *) makeNode(WithCheckOption);
- wco->kind = root->commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
+ wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
WCO_RLS_UPDATE_CHECK;
wco->relname = pstrdup(RelationGetRelationName(rel));
wco->qual = (Node *) hook_with_check_expr_restrictive;
@@ -276,7 +277,7 @@ get_row_security_policies(Query* root, RangeTblEntry* rte, int rt_index,
WithCheckOption *wco;
wco = (WithCheckOption *) makeNode(WithCheckOption);
- wco->kind = root->commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
+ wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
WCO_RLS_UPDATE_CHECK;
wco->relname = pstrdup(RelationGetRelationName(rel));
wco->qual = (Node *) rowsec_with_check_expr;
@@ -289,7 +290,7 @@ get_row_security_policies(Query* root, RangeTblEntry* rte, int rt_index,
WithCheckOption *wco;
wco = (WithCheckOption *) makeNode(WithCheckOption);
- wco->kind = root->commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
+ wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
WCO_RLS_UPDATE_CHECK;
wco->relname = pstrdup(RelationGetRelationName(rel));
wco->qual = (Node *) hook_with_check_expr_permissive;
@@ -312,19 +313,72 @@ get_row_security_policies(Query* root, RangeTblEntry* rte, int rt_index,
combined_qual_eval = makeBoolExpr(OR_EXPR, combined_quals, -1);
wco = (WithCheckOption *) makeNode(WithCheckOption);
- wco->kind = root->commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
+ wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
WCO_RLS_UPDATE_CHECK;
wco->relname = pstrdup(RelationGetRelationName(rel));
wco->qual = (Node *) combined_qual_eval;
wco->cascaded = false;
*withCheckOptions = lappend(*withCheckOptions, wco);
}
+
+ /*
+ * ON CONFLICT DO UPDATE has an RTE that is subject to both INSERT and
+ * UPDATE RLS enforcement. Those are enforced (as a special, distinct
+ * kind of WCO) on the target tuple.
+ *
+ * Make a second, recursive pass over the RTE for this, gathering
+ * UPDATE-applicable RLS checks/WCOs, and gathering and converting
+ * UPDATE-applicable security quals into WCO_RLS_CONFLICT_CHECK RLS
+ * checks/WCOs. Finally, these distinct kinds of RLS checks/WCOs are
+ * concatenated with our own INSERT-applicable list.
+ */
+ if (root->onConflict && root->onConflict->action == ONCONFLICT_UPDATE &&
+ commandType == CMD_INSERT)
+ {
+ List *conflictSecurityQuals = NIL;
+ List *conflictWCOs = NIL;
+ ListCell *item;
+ bool conflictHasRowSecurity = false;
+ bool conflictHasSublinks = false;
+
+ /* Assume that RTE is target resultRelation */
+ get_row_security_policies(root, CMD_UPDATE, rte, rt_index,
+ &conflictSecurityQuals, &conflictWCOs,
+ &conflictHasRowSecurity,
+ &conflictHasSublinks);
+
+ if (conflictHasRowSecurity)
+ *hasRowSecurity = true;
+ if (conflictHasSublinks)
+ *hasSubLinks = true;
+
+ /*
+ * Append WITH CHECK OPTIONs/RLS checks, which should not conflict
+ * between this INSERT and the auxiliary UPDATE
+ */
+ *withCheckOptions = list_concat(*withCheckOptions,
+ conflictWCOs);
+
+ foreach(item, conflictSecurityQuals)
+ {
+ Expr *conflict_rowsec_expr = (Expr *) lfirst(item);
+ WithCheckOption *wco;
+
+ wco = (WithCheckOption *) makeNode(WithCheckOption);
+
+ wco->kind = WCO_RLS_CONFLICT_CHECK;
+ wco->relname = pstrdup(RelationGetRelationName(rel));
+ wco->qual = (Node *) copyObject(conflict_rowsec_expr);
+ wco->cascaded = false;
+ *withCheckOptions = lappend(*withCheckOptions, wco);
+ }
+ }
}
/* For SELECT, UPDATE, and DELETE, set the security quals */
- if (root->commandType == CMD_SELECT
- || root->commandType == CMD_UPDATE
- || root->commandType == CMD_DELETE)
+ if (commandType == CMD_SELECT
+ || commandType == CMD_UPDATE
+ || commandType == CMD_DELETE)
{
/* restrictive policies can simply be added to the list first */
if (hook_expr_restrictive)
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index d13a167334..c052949749 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -25,6 +25,24 @@
#include "utils/inval.h"
+/*
+ * Per-backend counter for generating speculative insertion tokens.
+ *
+ * This may wrap around, but that's OK as it's only used for the short
+ * duration between inserting a tuple and checking that there are no (unique)
+ * constraint violations. It's theoretically possible that a backend sees a
+ * tuple that was speculatively inserted by another backend, but before it has
+ * started waiting on the token, the other backend completes its insertion,
+ * and then then performs 2^32 unrelated insertions. And after all that, the
+ * first backend finally calls SpeculativeInsertionLockAcquire(), with the
+ * intention of waiting for the first insertion to complete, but ends up
+ * waiting for the latest unrelated insertion instead. Even then, nothing
+ * particularly bad happens: in the worst case they deadlock, causing one of
+ * the transactions to abort.
+ */
+static uint32 speculativeInsertionToken = 0;
+
+
/*
* Struct to hold context info for transaction lock waits.
*
@@ -575,6 +593,73 @@ ConditionalXactLockTableWait(TransactionId xid)
return true;
}
+/*
+ * SpeculativeInsertionLockAcquire
+ *
+ * Insert a lock showing that the given transaction ID is inserting a tuple,
+ * but hasn't yet decided whether it's going to keep it. The lock can then be
+ * used to wait for the decision to go ahead with the insertion, or aborting
+ * it.
+ *
+ * The token is used to distinguish multiple insertions by the same
+ * transaction. It is returned to caller.
+ */
+uint32
+SpeculativeInsertionLockAcquire(TransactionId xid)
+{
+ LOCKTAG tag;
+
+ speculativeInsertionToken++;
+
+ /*
+ * Check for wrap-around. Zero means no token is held, so don't use that.
+ */
+ if (speculativeInsertionToken == 0)
+ speculativeInsertionToken = 1;
+
+ SET_LOCKTAG_SPECULATIVE_INSERTION(tag, xid, speculativeInsertionToken);
+
+ (void) LockAcquire(&tag, ExclusiveLock, false, false);
+
+ return speculativeInsertionToken;
+}
+
+/*
+ * SpeculativeInsertionLockRelease
+ *
+ * Delete the lock showing that the given transaction is speculatively
+ * inserting a tuple.
+ */
+void
+SpeculativeInsertionLockRelease(TransactionId xid)
+{
+ LOCKTAG tag;
+
+ SET_LOCKTAG_SPECULATIVE_INSERTION(tag, xid, speculativeInsertionToken);
+
+ LockRelease(&tag, ExclusiveLock, false);
+}
+
+/*
+ * SpeculativeInsertionWait
+ *
+ * Wait for the specified transaction to finish or abort the insertion of a
+ * tuple.
+ */
+void
+SpeculativeInsertionWait(TransactionId xid, uint32 token)
+{
+ LOCKTAG tag;
+
+ SET_LOCKTAG_SPECULATIVE_INSERTION(tag, xid, token);
+
+ Assert(TransactionIdIsValid(xid));
+ Assert(token != 0);
+
+ (void) LockAcquire(&tag, ShareLock, false, false);
+ LockRelease(&tag, ShareLock, false);
+}
+
/*
* XactLockTableWaitErrorContextCb
* Error context callback for transaction lock waits.
@@ -873,6 +958,12 @@ DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
tag->locktag_field1,
tag->locktag_field2);
break;
+ case LOCKTAG_SPECULATIVE_TOKEN:
+ appendStringInfo(buf,
+ _("speculative token %u of transaction %u"),
+ tag->locktag_field2,
+ tag->locktag_field1);
+ break;
case LOCKTAG_OBJECT:
appendStringInfo(buf,
_("object %u of class %u of database %u"),
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 9c14e8abdf..bcffd85754 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -202,8 +202,14 @@ ProcessQuery(PlannedStmt *plan,
lastOid = queryDesc->estate->es_lastoid;
else
lastOid = InvalidOid;
- snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
- "INSERT %u %u", lastOid, queryDesc->estate->es_processed);
+ if (plan->isUpsert)
+ snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
+ "UPSERT %u %u",
+ lastOid, queryDesc->estate->es_processed);
+ else
+ snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
+ "INSERT %u %u",
+ lastOid, queryDesc->estate->es_processed);
break;
case CMD_UPDATE:
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
@@ -1356,7 +1362,10 @@ PortalRunMulti(Portal portal, bool isTopLevel,
* 0" here because technically there is no query of the matching tag type,
* and printing a non-zero count for a different query type seems wrong,
* e.g. an INSERT that does an UPDATE instead should not print "0 1" if
- * one row was updated. See QueryRewrite(), step 3, for details.
+ * one row was updated (unless the ON CONFLICT DO UPDATE, or "UPSERT"
+ * variant of INSERT was used to update the row, where it's logically a
+ * direct effect of the top level command). See QueryRewrite(), step 3,
+ * for details.
*/
if (completionTag && completionTag[0] == '\0')
{
@@ -1366,6 +1375,8 @@ PortalRunMulti(Portal portal, bool isTopLevel,
sprintf(completionTag, "SELECT 0 0");
else if (strcmp(completionTag, "INSERT") == 0)
strcpy(completionTag, "INSERT 0 0");
+ else if (strcmp(completionTag, "UPSERT") == 0)
+ strcpy(completionTag, "UPSERT 0 0");
else if (strcmp(completionTag, "UPDATE") == 0)
strcpy(completionTag, "UPDATE 0");
else if (strcmp(completionTag, "DELETE") == 0)
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index 491824dd6b..9d53a8b6a3 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -29,6 +29,7 @@ static const char *const LockTagTypeNames[] = {
"tuple",
"transactionid",
"virtualxid",
+ "speculative token",
"object",
"userlock",
"advisory"
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 69267bdb91..4b3cd85ad9 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -354,6 +354,9 @@ static void get_select_query_def(Query *query, deparse_context *context,
TupleDesc resultDesc);
static void get_insert_query_def(Query *query, deparse_context *context);
static void get_update_query_def(Query *query, deparse_context *context);
+static void get_update_query_targetlist_def(Query *query, List *targetList,
+ deparse_context *context,
+ RangeTblEntry *rte);
static void get_delete_query_def(Query *query, deparse_context *context);
static void get_utility_query_def(Query *query, deparse_context *context);
static void get_basic_select_query(Query *query, deparse_context *context,
@@ -3846,15 +3849,23 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps)
* For a SubqueryScan, pretend the subplan is INNER referent. (We don't
* use OUTER because that could someday conflict with the normal meaning.)
* Likewise, for a CteScan, pretend the subquery's plan is INNER referent.
+ * For ON CONFLICT .. UPDATE we just need the inner tlist to point to the
+ * excluded expression's tlist. (Similar to the SubqueryScan we don't want
+ * to reuse OUTER, it's used for RETURNING in some modify table cases,
+ * although not INSERT .. CONFLICT).
*/
if (IsA(ps, SubqueryScanState))
dpns->inner_planstate = ((SubqueryScanState *) ps)->subplan;
else if (IsA(ps, CteScanState))
dpns->inner_planstate = ((CteScanState *) ps)->cteplanstate;
+ else if (IsA(ps, ModifyTableState))
+ dpns->inner_planstate = ps;
else
dpns->inner_planstate = innerPlanState(ps);
- if (dpns->inner_planstate)
+ if (IsA(ps, ModifyTableState))
+ dpns->inner_tlist = ((ModifyTableState *) ps)->mt_excludedtlist;
+ else if (dpns->inner_planstate)
dpns->inner_tlist = dpns->inner_planstate->plan->targetlist;
else
dpns->inner_tlist = NIL;
@@ -5302,6 +5313,32 @@ get_insert_query_def(Query *query, deparse_context *context)
appendStringInfoString(buf, "DEFAULT VALUES");
}
+ /* Add ON CONFLICT if present */
+ if (query->onConflict)
+ {
+ OnConflictExpr *confl = query->onConflict;
+
+ if (confl->action == ONCONFLICT_NOTHING)
+ {
+ appendStringInfoString(buf, " ON CONFLICT DO NOTHING");
+ }
+ else
+ {
+ appendStringInfoString(buf, " ON CONFLICT DO UPDATE SET ");
+ /* Deparse targetlist */
+ get_update_query_targetlist_def(query, confl->onConflictSet,
+ context, rte);
+
+ /* Add a WHERE clause if given */
+ if (confl->onConflictWhere != NULL)
+ {
+ appendContextKeyword(context, " WHERE ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_rule_expr(confl->onConflictWhere, context, false);
+ }
+ }
+ }
+
/* Add RETURNING if present */
if (query->returningList)
{
@@ -5321,12 +5358,6 @@ get_update_query_def(Query *query, deparse_context *context)
{
StringInfo buf = context->buf;
RangeTblEntry *rte;
- List *ma_sublinks;
- ListCell *next_ma_cell;
- SubLink *cur_ma_sublink;
- int remaining_ma_columns;
- const char *sep;
- ListCell *l;
/* Insert the WITH clause if given */
get_with_clause(query, context);
@@ -5349,6 +5380,46 @@ get_update_query_def(Query *query, deparse_context *context)
quote_identifier(rte->alias->aliasname));
appendStringInfoString(buf, " SET ");
+ /* Deparse targetlist */
+ get_update_query_targetlist_def(query, query->targetList, context, rte);
+
+ /* Add the FROM clause if needed */
+ get_from_clause(query, " FROM ", context);
+
+ /* Add a WHERE clause if given */
+ if (query->jointree->quals != NULL)
+ {
+ appendContextKeyword(context, " WHERE ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_rule_expr(query->jointree->quals, context, false);
+ }
+
+ /* Add RETURNING if present */
+ if (query->returningList)
+ {
+ appendContextKeyword(context, " RETURNING",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_target_list(query->returningList, context, NULL);
+ }
+}
+
+
+/* ----------
+ * get_update_query_targetlist_def - Parse back an UPDATE targetlist
+ * ----------
+ */
+static void
+get_update_query_targetlist_def(Query *query, List *targetList,
+ deparse_context *context, RangeTblEntry *rte)
+{
+ StringInfo buf = context->buf;
+ ListCell *l;
+ ListCell *next_ma_cell;
+ int remaining_ma_columns;
+ const char *sep;
+ SubLink *cur_ma_sublink;
+ List *ma_sublinks;
+
/*
* Prepare to deal with MULTIEXPR assignments: collect the source SubLinks
* into a list. We expect them to appear, in ID order, in resjunk tlist
@@ -5357,7 +5428,7 @@ get_update_query_def(Query *query, deparse_context *context)
ma_sublinks = NIL;
if (query->hasSubLinks) /* else there can't be any */
{
- foreach(l, query->targetList)
+ foreach(l, targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
@@ -5379,7 +5450,7 @@ get_update_query_def(Query *query, deparse_context *context)
/* Add the comma separated list of 'attname = value' */
sep = "";
- foreach(l, query->targetList)
+ foreach(l, targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
Node *expr;
@@ -5470,25 +5541,6 @@ get_update_query_def(Query *query, deparse_context *context)
get_rule_expr(expr, context, false);
}
-
- /* Add the FROM clause if needed */
- get_from_clause(query, " FROM ", context);
-
- /* Add a WHERE clause if given */
- if (query->jointree->quals != NULL)
- {
- appendContextKeyword(context, " WHERE ",
- -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
- get_rule_expr(query->jointree->quals, context, false);
- }
-
- /* Add RETURNING if present */
- if (query->returningList)
- {
- appendContextKeyword(context, " RETURNING",
- -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
- get_target_list(query->returningList, context, NULL);
- }
}
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index a4a478d114..b4284d6d94 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -405,6 +405,13 @@ HeapTupleSatisfiesToast(HeapTuple htup, Snapshot snapshot,
}
}
}
+ /*
+ * An invalid Xmin can be left behind by a speculative insertion that
+ * is cancelled by super-deleting the tuple. We shouldn't see any of
+ * those in TOAST tables, but better safe than sorry.
+ */
+ else if (!TransactionIdIsValid(HeapTupleHeaderGetXmin(tuple)))
+ return false;
}
/* otherwise assume the tuple is valid for TOAST. */
@@ -714,8 +721,11 @@ HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
* output argument to return the xids of concurrent xacts that affected the
* tuple. snapshot->xmin is set to the tuple's xmin if that is another
* transaction that's still in progress; or to InvalidTransactionId if the
- * tuple's xmin is committed good, committed dead, or my own xact. Similarly
- * for snapshot->xmax and the tuple's xmax.
+ * tuple's xmin is committed good, committed dead, or my own xact.
+ * Similarly for snapshot->xmax and the tuple's xmax. If the tuple was
+ * inserted speculatively, meaning that the inserter might still back down
+ * on the insertion without aborting the whole transaction, the associated
+ * token is also returned in snapshot->speculativeToken.
*/
bool
HeapTupleSatisfiesDirty(HeapTuple htup, Snapshot snapshot,
@@ -727,6 +737,7 @@ HeapTupleSatisfiesDirty(HeapTuple htup, Snapshot snapshot,
Assert(htup->t_tableOid != InvalidOid);
snapshot->xmin = snapshot->xmax = InvalidTransactionId;
+ snapshot->speculativeToken = 0;
if (!HeapTupleHeaderXminCommitted(tuple))
{
@@ -808,6 +819,20 @@ HeapTupleSatisfiesDirty(HeapTuple htup, Snapshot snapshot,
}
else if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmin(tuple)))
{
+ /*
+ * Return the speculative token to caller. Caller can worry
+ * about xmax, since it requires a conclusively locked row
+ * version, and a concurrent update to this tuple is a conflict
+ * of its purposes.
+ */
+ if (HeapTupleHeaderIsSpeculative(tuple))
+ {
+ snapshot->speculativeToken =
+ HeapTupleHeaderGetSpeculativeToken(tuple);
+
+ Assert(snapshot->speculativeToken != 0);
+ }
+
snapshot->xmin = HeapTupleHeaderGetRawXmin(tuple);
/* XXX shouldn't we fall through to look at xmax? */
return true; /* in insertion by other */
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index ff01368531..f4155f7877 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -894,9 +894,12 @@ PrintQueryResults(PGresult *results)
success = StoreQueryTuple(results);
else
success = PrintQueryTuples(results);
- /* if it's INSERT/UPDATE/DELETE RETURNING, also print status */
+ /*
+ * if it's INSERT/UPSERT/UPDATE/DELETE RETURNING, also print status
+ */
cmdstatus = PQcmdStatus(results);
if (strncmp(cmdstatus, "INSERT", 6) == 0 ||
+ strncmp(cmdstatus, "UPSERT", 6) == 0 ||
strncmp(cmdstatus, "UPDATE", 6) == 0 ||
strncmp(cmdstatus, "DELETE", 6) == 0)
PrintQueryStatus(results);
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 888cce7a2d..49c8ca4d66 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -28,6 +28,7 @@
#define HEAP_INSERT_SKIP_WAL 0x0001
#define HEAP_INSERT_SKIP_FSM 0x0002
#define HEAP_INSERT_FROZEN 0x0004
+#define HEAP_INSERT_SPECULATIVE 0x0008
typedef struct BulkInsertStateData *BulkInsertState;
@@ -142,6 +143,8 @@ extern void heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
extern HTSU_Result heap_delete(Relation relation, ItemPointer tid,
CommandId cid, Snapshot crosscheck, bool wait,
HeapUpdateFailureData *hufd);
+extern void heap_finish_speculative(Relation relation, HeapTuple tuple);
+extern void heap_abort_speculative(Relation relation, HeapTuple tuple);
extern HTSU_Result heap_update(Relation relation, ItemPointer otid,
HeapTuple newtup,
CommandId cid, Snapshot crosscheck, bool wait,
diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h
index f0f89dec0f..caa0f14f4b 100644
--- a/src/include/access/heapam_xlog.h
+++ b/src/include/access/heapam_xlog.h
@@ -34,7 +34,7 @@
#define XLOG_HEAP_UPDATE 0x20
/* 0x030 is free, was XLOG_HEAP_MOVE */
#define XLOG_HEAP_HOT_UPDATE 0x40
-/* 0x050 is free, was XLOG_HEAP_NEWPAGE */
+#define XLOG_HEAP_CONFIRM 0x50
#define XLOG_HEAP_LOCK 0x60
#define XLOG_HEAP_INPLACE 0x70
@@ -60,23 +60,43 @@
#define XLOG_HEAP2_NEW_CID 0x70
/*
- * xl_heap_* ->flag values, 8 bits are available.
+ * xl_heap_insert/xl_heap_multi_insert flag values, 8 bits are available.
*/
/* PD_ALL_VISIBLE was cleared */
-#define XLOG_HEAP_ALL_VISIBLE_CLEARED (1<<0)
+#define XLH_INSERT_ALL_VISIBLE_CLEARED (1<<0)
+#define XLH_INSERT_LAST_IN_MULTI (1<<1)
+#define XLH_INSERT_IS_SPECULATIVE (1<<2)
+#define XLH_INSERT_CONTAINS_NEW_TUPLE (1<<3)
+
+/*
+ * xl_heap_update flag values, 8 bits are available.
+ */
+/* PD_ALL_VISIBLE was cleared */
+#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED (1<<0)
/* PD_ALL_VISIBLE was cleared in the 2nd page */
-#define XLOG_HEAP_NEW_ALL_VISIBLE_CLEARED (1<<1)
-#define XLOG_HEAP_CONTAINS_OLD_TUPLE (1<<2)
-#define XLOG_HEAP_CONTAINS_OLD_KEY (1<<3)
-#define XLOG_HEAP_CONTAINS_NEW_TUPLE (1<<4)
-#define XLOG_HEAP_PREFIX_FROM_OLD (1<<5)
-#define XLOG_HEAP_SUFFIX_FROM_OLD (1<<6)
-/* last xl_heap_multi_insert record for one heap_multi_insert() call */
-#define XLOG_HEAP_LAST_MULTI_INSERT (1<<7)
+#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED (1<<1)
+#define XLH_UPDATE_CONTAINS_OLD_TUPLE (1<<2)
+#define XLH_UPDATE_CONTAINS_OLD_KEY (1<<3)
+#define XLH_UPDATE_CONTAINS_NEW_TUPLE (1<<4)
+#define XLH_UPDATE_PREFIX_FROM_OLD (1<<5)
+#define XLH_UPDATE_SUFFIX_FROM_OLD (1<<6)
/* convenience macro for checking whether any form of old tuple was logged */
-#define XLOG_HEAP_CONTAINS_OLD \
- (XLOG_HEAP_CONTAINS_OLD_TUPLE | XLOG_HEAP_CONTAINS_OLD_KEY)
+#define XLH_UPDATE_CONTAINS_OLD \
+ (XLH_UPDATE_CONTAINS_OLD_TUPLE | XLH_UPDATE_CONTAINS_OLD_KEY)
+
+/*
+ * xl_heap_delete flag values, 8 bits are available.
+ */
+/* PD_ALL_VISIBLE was cleared */
+#define XLH_DELETE_ALL_VISIBLE_CLEARED (1<<0)
+#define XLH_DELETE_CONTAINS_OLD_TUPLE (1<<1)
+#define XLH_DELETE_CONTAINS_OLD_KEY (1<<2)
+#define XLH_DELETE_IS_SUPER (1<<3)
+
+/* convenience macro for checking whether any form of old tuple was logged */
+#define XLH_DELETE_CONTAINS_OLD \
+ (XLH_DELETE_CONTAINS_OLD_TUPLE | XLH_DELETE_CONTAINS_OLD_KEY)
/* This is what we need to know about delete */
typedef struct xl_heap_delete
@@ -243,6 +263,14 @@ typedef struct xl_heap_lock_updated
#define SizeOfHeapLockUpdated (offsetof(xl_heap_lock_updated, infobits_set) + sizeof(uint8))
+/* This is what we need to know about confirmation of speculative insertion */
+typedef struct xl_heap_confirm
+{
+ OffsetNumber offnum; /* confirmed tuple's offset on page */
+} xl_heap_confirm;
+
+#define SizeOfHeapConfirm (offsetof(xl_heap_confirm, offnum) + sizeof(OffsetNumber))
+
/* This is what we need to know about in-place update */
typedef struct xl_heap_inplace
{
diff --git a/src/include/access/hio.h b/src/include/access/hio.h
index b0140298b1..b3b91e70d5 100644
--- a/src/include/access/hio.h
+++ b/src/include/access/hio.h
@@ -36,7 +36,7 @@ typedef struct BulkInsertStateData
extern void RelationPutHeapTuple(Relation relation, Buffer buffer,
- HeapTuple tuple);
+ HeapTuple tuple, bool token);
extern Buffer RelationGetBufferForTuple(Relation relation, Size len,
Buffer otherBuffer, int options,
BulkInsertState bistate,
diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h
index 0a673cd526..80285acc3b 100644
--- a/src/include/access/htup_details.h
+++ b/src/include/access/htup_details.h
@@ -96,6 +96,15 @@
* unrelated tuple stored into a slot recently freed by VACUUM. If either
* check fails, one may assume that there is no live descendant version.
*
+ * t_ctid is sometimes used to store a speculative insertion token, instead
+ * of a real TID. A speculative token is set on a tuple that's being
+ * inserted, until the inserter is sure that it wants to go ahead with the
+ * insertion. Hence a token should only be seen on a tuple with an XMAX
+ * that's still in-progress, or invalid/aborted. The token is replaced with
+ * the tuple's real TID when the insertion is confirmed. One should never
+ * see a speculative insertion token while following a chain of t_ctid links,
+ * because they are not used on updates, only insertions.
+ *
* Following the fixed header fields, the nulls bitmap is stored (beginning
* at t_bits). The bitmap is *not* stored if t_infomask shows that there
* are no nulls in the tuple. If an OID field is present (as indicated by
@@ -138,7 +147,8 @@ struct HeapTupleHeaderData
DatumTupleFields t_datum;
} t_choice;
- ItemPointerData t_ctid; /* current TID of this or newer tuple */
+ ItemPointerData t_ctid; /* current TID of this or newer tuple (or a
+ * speculative insertion token) */
/* Fields below here must match MinimalTupleData! */
@@ -241,6 +251,14 @@ struct HeapTupleHeaderData
*/
#define HEAP_TUPLE_HAS_MATCH HEAP_ONLY_TUPLE /* tuple has a join match */
+/*
+ * Special value used in t_ctid.ip_posid, to indicate that it holds a
+ * speculative insertion token rather than a real TID. This must be higher
+ * than MaxOffsetNumber, so that it can be distinguished from a valid
+ * offset number in a regular item pointer.
+ */
+#define SpecTokenOffsetNumber 0xfffe
+
/*
* HeapTupleHeader accessor macros
*
@@ -377,6 +395,22 @@ do { \
(tup)->t_choice.t_heap.t_field3.t_xvac = (xid); \
} while (0)
+#define HeapTupleHeaderIsSpeculative(tup) \
+( \
+ (tup)->t_ctid.ip_posid == SpecTokenOffsetNumber \
+)
+
+#define HeapTupleHeaderGetSpeculativeToken(tup) \
+( \
+ AssertMacro(HeapTupleHeaderIsSpeculative(tup)), \
+ ItemPointerGetBlockNumber(&(tup)->t_ctid) \
+)
+
+#define HeapTupleHeaderSetSpeculativeToken(tup, token) \
+( \
+ ItemPointerSet(&(tup)->t_ctid, token, SpecTokenOffsetNumber) \
+)
+
#define HeapTupleHeaderGetDatumLength(tup) \
VARSIZE(tup)
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index 3d2c5b2a29..969a8e3c35 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -53,6 +53,6 @@
*/
/* yyyymmddN */
-#define CATALOG_VERSION_NO 201505081
+#define CATALOG_VERSION_NO 201505082
#endif
diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h
index a04def96e4..06f38202a5 100644
--- a/src/include/catalog/index.h
+++ b/src/include/catalog/index.h
@@ -81,6 +81,8 @@ extern void index_drop(Oid indexId, bool concurrent);
extern IndexInfo *BuildIndexInfo(Relation index);
+extern void BuildSpeculativeIndexInfo(Relation index, IndexInfo *ii);
+
extern void FormIndexDatum(IndexInfo *indexInfo,
TupleTableSlot *slot,
EState *estate,
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 33c8fad844..1b68b54c7d 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -195,6 +195,7 @@ extern void ExecConstraints(ResultRelInfo *resultRelInfo,
TupleTableSlot *slot, EState *estate);
extern void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
TupleTableSlot *slot, EState *estate);
+extern LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo);
extern ExecRowMark *ExecFindRowMark(EState *estate, Index rti);
extern ExecAuxRowMark *ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist);
extern TupleTableSlot *EvalPlanQual(EState *estate, EPQState *epqstate,
@@ -361,16 +362,18 @@ extern void UnregisterExprContextCallback(ExprContext *econtext,
/*
* prototypes from functions in execIndexing.c
*/
-extern void ExecOpenIndices(ResultRelInfo *resultRelInfo);
+extern void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative);
extern void ExecCloseIndices(ResultRelInfo *resultRelInfo);
extern List *ExecInsertIndexTuples(TupleTableSlot *slot, ItemPointer tupleid,
- EState *estate);
-extern bool check_exclusion_constraint(Relation heap, Relation index,
+ EState *estate, bool noDupErr, bool *specConflict,
+ List *arbiterIndexes);
+extern bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate,
+ ItemPointer conflictTid, List *arbiterIndexes);
+extern void check_exclusion_constraint(Relation heap, Relation index,
IndexInfo *indexInfo,
ItemPointer tupleid,
Datum *values, bool *isnull,
- EState *estate,
- bool newIndex, bool errorOK);
+ EState *estate, bool newIndex);
#endif /* EXECUTOR_H */
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index dac542fbc1..210dbe4a53 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -41,6 +41,9 @@
* ExclusionOps Per-column exclusion operators, or NULL if none
* ExclusionProcs Underlying function OIDs for ExclusionOps
* ExclusionStrats Opclass strategy numbers for ExclusionOps
+ * UniqueOps Theses are like Exclusion*, but for unique indexes
+ * UniqueProcs
+ * UniqueStrats
* Unique is it a unique index?
* ReadyForInserts is it valid for inserts?
* Concurrent are we doing a concurrent index build?
@@ -62,6 +65,9 @@ typedef struct IndexInfo
Oid *ii_ExclusionOps; /* array with one entry per column */
Oid *ii_ExclusionProcs; /* array with one entry per column */
uint16 *ii_ExclusionStrats; /* array with one entry per column */
+ Oid *ii_UniqueOps; /* array with one entry per column */
+ Oid *ii_UniqueProcs; /* array with one entry per column */
+ uint16 *ii_UniqueStrats; /* array with one entry per column */
bool ii_Unique;
bool ii_ReadyForInserts;
bool ii_Concurrent;
@@ -308,6 +314,8 @@ typedef struct JunkFilter
* ConstraintExprs array of constraint-checking expr states
* junkFilter for removing junk attributes from tuples
* projectReturning for computing a RETURNING list
+ * onConflictSetProj for computing ON CONFLICT DO UPDATE SET
+ * onConflictSetWhere list of ON CONFLICT DO UPDATE exprs (qual)
* ----------------
*/
typedef struct ResultRelInfo
@@ -329,6 +337,8 @@ typedef struct ResultRelInfo
List **ri_ConstraintExprs;
JunkFilter *ri_junkFilter;
ProjectionInfo *ri_projectReturning;
+ ProjectionInfo *ri_onConflictSetProj;
+ List *ri_onConflictSetWhere;
} ResultRelInfo;
/* ----------------
@@ -1094,6 +1104,11 @@ typedef struct ModifyTableState
List **mt_arowmarks; /* per-subplan ExecAuxRowMark lists */
EPQState mt_epqstate; /* for evaluating EvalPlanQual rechecks */
bool fireBSTriggers; /* do we need to fire stmt triggers? */
+ OnConflictAction mt_onconflict; /* ON CONFLICT type */
+ List *mt_arbiterindexes; /* unique index OIDs to arbitrate taking alt path */
+ TupleTableSlot *mt_existing; /* slot to store existing target tuple in */
+ List *mt_excludedtlist; /* the excluded pseudo relation's tlist */
+ TupleTableSlot *mt_conflproj; /* FIXME*/
} ModifyTableState;
/* ----------------
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index 8991f3f80c..768f413a45 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -168,10 +168,12 @@ typedef enum NodeTag
T_CoerceToDomainValue,
T_SetToDefault,
T_CurrentOfExpr,
+ T_InferenceElem,
T_TargetEntry,
T_RangeTblRef,
T_JoinExpr,
T_FromExpr,
+ T_OnConflictExpr,
T_IntoClause,
/*
@@ -413,6 +415,8 @@ typedef enum NodeTag
T_RowMarkClause,
T_XmlSerialize,
T_WithClause,
+ T_InferClause,
+ T_OnConflictClause,
T_CommonTableExpr,
T_RoleSpec,
@@ -626,4 +630,17 @@ typedef enum JoinType
(1 << JOIN_RIGHT) | \
(1 << JOIN_ANTI))) != 0)
+/*
+ * OnConflictAction -
+ * "ON CONFLICT" clause type of query
+ *
+ * This is needed in both parsenodes.h and plannodes.h, so put it here...
+ */
+typedef enum OnConflictAction
+{
+ ONCONFLICT_NONE, /* No "ON CONFLICT" clause */
+ ONCONFLICT_NOTHING, /* ON CONFLICT ... DO NOTHING */
+ ONCONFLICT_UPDATE /* ON CONFLICT ... DO UPDATE */
+} OnConflictAction;
+
#endif /* NODES_H */
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 7d15ef2847..91ca9c6fd0 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -132,6 +132,8 @@ typedef struct Query
List *withCheckOptions; /* a list of WithCheckOption's */
+ OnConflictExpr *onConflict; /* ON CONFLICT DO [NOTHING | UPDATE] */
+
List *returningList; /* return-values list (of TargetEntry) */
List *groupClause; /* a list of SortGroupClause's */
@@ -591,7 +593,7 @@ typedef enum TableLikeOption
} TableLikeOption;
/*
- * IndexElem - index parameters (used in CREATE INDEX)
+ * IndexElem - index parameters (used in CREATE INDEX, and in ON CONFLICT)
*
* For a plain index attribute, 'name' is the name of the table column to
* index, and 'expr' is NULL. For an index expression, 'name' is NULL and
@@ -735,9 +737,9 @@ typedef struct XmlSerialize
* For SELECT/INSERT/UPDATE permissions, if the user doesn't have
* table-wide permissions then it is sufficient to have the permissions
* on all columns identified in selectedCols (for SELECT) and/or
- * insertedCols and/or updatedCols (INSERT with ON CONFLICT UPDATE may
- * have all 3). selectedCols, insertedCols and updatedCols are
- * bitmapsets, which cannot have negative integer members, so we subtract
+ * insertedCols and/or updatedCols (INSERT with ON CONFLICT DO UPDATE may
+ * have all 3). selectedCols, insertedCols and updatedCols are bitmapsets,
+ * which cannot have negative integer members, so we subtract
* FirstLowInvalidHeapAttributeNumber from column numbers before storing
* them in these fields. A whole-row Var reference is represented by
* setting the bit for InvalidAttrNumber.
@@ -881,7 +883,8 @@ typedef enum WCOKind
{
WCO_VIEW_CHECK, /* WCO on an auto-updatable view */
WCO_RLS_INSERT_CHECK, /* RLS INSERT WITH CHECK policy */
- WCO_RLS_UPDATE_CHECK /* RLS UPDATE WITH CHECK policy */
+ WCO_RLS_UPDATE_CHECK, /* RLS UPDATE WITH CHECK policy */
+ WCO_RLS_CONFLICT_CHECK /* RLS ON CONFLICT DO UPDATE USING policy */
} WCOKind;
typedef struct WithCheckOption
@@ -1025,6 +1028,37 @@ typedef struct WithClause
int location; /* token location, or -1 if unknown */
} WithClause;
+/*
+ * InferClause -
+ * ON CONFLICT unique index inference clause
+ *
+ * Note: InferClause does not propagate into the Query representation.
+ */
+typedef struct InferClause
+{
+ NodeTag type;
+ List *indexElems; /* IndexElems to infer unique index */
+ Node *whereClause; /* qualification (partial-index predicate) */
+ char *conname; /* Constraint name, or NULL if unnamed */
+ int location; /* token location, or -1 if unknown */
+} InferClause;
+
+/*
+ * OnConflictClause -
+ * representation of ON CONFLICT clause
+ *
+ * Note: OnConflictClause does not propagate into the Query representation.
+ */
+typedef struct OnConflictClause
+{
+ NodeTag type;
+ OnConflictAction action; /* DO NOTHING or UPDATE? */
+ InferClause *infer; /* Optional index inference clause */
+ List *targetList; /* the target list (of ResTarget) */
+ Node *whereClause; /* qualifications */
+ int location; /* token location, or -1 if unknown */
+} OnConflictClause;
+
/*
* CommonTableExpr -
* representation of WITH list element
@@ -1075,6 +1109,7 @@ typedef struct InsertStmt
RangeVar *relation; /* relation to insert into */
List *cols; /* optional: names of the target columns */
Node *selectStmt; /* the source SELECT/VALUES, or NULL */
+ OnConflictClause *onConflictClause; /* ON CONFLICT clause */
List *returningList; /* list of expressions to return */
WithClause *withClause; /* WITH clause */
} InsertStmt;
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index baeba2d330..c63492fa0b 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -45,6 +45,8 @@ typedef struct PlannedStmt
bool hasModifyingCTE; /* has insert|update|delete in WITH? */
+ bool isUpsert; /* is it insert ... ON CONFLICT UPDATE? */
+
bool canSetTag; /* do I set the command result tag? */
bool transientPlan; /* redo plan when TransactionXmin changes? */
@@ -183,6 +185,12 @@ typedef struct ModifyTable
List *fdwPrivLists; /* per-target-table FDW private data lists */
List *rowMarks; /* PlanRowMarks (non-locking only) */
int epqParam; /* ID of Param for EvalPlanQual re-eval */
+ OnConflictAction onConflictAction; /* ON CONFLICT action */
+ List *arbiterIndexes; /* List of ON CONFLICT arbiter index OIDs */
+ List *onConflictSet; /* SET for INSERT ON CONFLICT DO UPDATE */
+ Node *onConflictWhere;/* WHERE for ON CONFLICT UPDATE */
+ Index exclRelRTI; /* RTI of the EXCLUDED pseudo relation */
+ List *exclRelTlist; /* tlist of the EXCLUDED pseudo relation */
} ModifyTable;
/* ----------------
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 4f1d234d30..8f2c64847e 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -1143,6 +1143,22 @@ typedef struct CurrentOfExpr
int cursor_param; /* refcursor parameter number, or 0 */
} CurrentOfExpr;
+/*
+ * InferenceElem - an element of a unique index inference specification
+ *
+ * This mostly matches the structure of IndexElems, but having a dedicated
+ * primnode allows for a clean separation between the use of index parameters
+ * by utility commands, and this node.
+ */
+typedef struct InferenceElem
+{
+ Expr xpr;
+ Node *expr; /* expression to infer from, or NULL */
+ Oid infercollid; /* OID of collation, or InvalidOid */
+ Oid inferopfamily; /* OID of att opfamily, or InvalidOid */
+ Oid inferopcinputtype; /* OID of att input type, or InvalidOid */
+} InferenceElem;
+
/*--------------------
* TargetEntry -
* a target entry (used in query target lists)
@@ -1307,4 +1323,30 @@ typedef struct FromExpr
Node *quals; /* qualifiers on join, if any */
} FromExpr;
+/*----------
+ * OnConflictExpr - represents an ON CONFLICT DO ... expression
+ *
+ * The optimizer requires a list of inference elements, and optionally a WHERE
+ * clause to infer a unique index. The unique index (or, occasionally,
+ * indexes) inferred are used to arbitrate whether or not the alternative ON
+ * CONFLICT path is taken.
+ *----------
+ */
+typedef struct OnConflictExpr
+{
+ NodeTag type;
+ OnConflictAction action; /* DO NOTHING or UPDATE? */
+
+ /* Arbiter */
+ List *arbiterElems; /* unique index arbiter list (of InferenceElem's) */
+ Node *arbiterWhere; /* unique index arbiter WHERE clause */
+ Oid constraint; /* pg_constraint OID for arbiter */
+
+ /* ON CONFLICT UPDATE */
+ List *onConflictSet; /* List of ON CONFLICT SET TargetEntrys */
+ Node *onConflictWhere;/* qualifiers to restrict UPDATE to */
+ int exclRelIndex; /* RT index of 'excluded' relation */
+ List *exclRelTlist; /* tlist of the EXCLUDED pseudo relation */
+} OnConflictExpr;
+
#endif /* PRIMNODES_H */
diff --git a/src/include/optimizer/plancat.h b/src/include/optimizer/plancat.h
index 8eb2e57d7b..11e7d4d26b 100644
--- a/src/include/optimizer/plancat.h
+++ b/src/include/optimizer/plancat.h
@@ -28,6 +28,8 @@ extern PGDLLIMPORT get_relation_info_hook_type get_relation_info_hook;
extern void get_relation_info(PlannerInfo *root, Oid relationObjectId,
bool inhparent, RelOptInfo *rel);
+extern List *infer_arbiter_indexes(PlannerInfo *root);
+
extern void estimate_rel_size(Relation rel, int32 *attr_widths,
BlockNumber *pages, double *tuples, double *allvisfrac);
diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h
index 0c8cbcded9..1d4ab0488e 100644
--- a/src/include/optimizer/planmain.h
+++ b/src/include/optimizer/planmain.h
@@ -86,7 +86,7 @@ extern ModifyTable *make_modifytable(PlannerInfo *root,
Index nominalRelation,
List *resultRelations, List *subplans,
List *withCheckOptionLists, List *returningLists,
- List *rowMarks, int epqParam);
+ List *rowMarks, OnConflictExpr *onconflict, int epqParam);
extern bool is_projection_capable_plan(Plan *plan);
/*
diff --git a/src/include/optimizer/prep.h b/src/include/optimizer/prep.h
index 05e46c5b78..dcd078ee43 100644
--- a/src/include/optimizer/prep.h
+++ b/src/include/optimizer/prep.h
@@ -45,6 +45,9 @@ extern void expand_security_quals(PlannerInfo *root, List *tlist);
*/
extern List *preprocess_targetlist(PlannerInfo *root, List *tlist);
+extern List *preprocess_onconflict_targetlist(List *tlist,
+ int result_relation, List *range_table);
+
extern PlanRowMark *get_plan_rowmark(List *rowmarks, Index rtindex);
/*
diff --git a/src/include/parser/kwlist.h b/src/include/parser/kwlist.h
index 5b1ee15424..faea99108c 100644
--- a/src/include/parser/kwlist.h
+++ b/src/include/parser/kwlist.h
@@ -87,6 +87,7 @@ PG_KEYWORD("commit", COMMIT, UNRESERVED_KEYWORD)
PG_KEYWORD("committed", COMMITTED, UNRESERVED_KEYWORD)
PG_KEYWORD("concurrently", CONCURRENTLY, TYPE_FUNC_NAME_KEYWORD)
PG_KEYWORD("configuration", CONFIGURATION, UNRESERVED_KEYWORD)
+PG_KEYWORD("conflict", CONFLICT, UNRESERVED_KEYWORD)
PG_KEYWORD("connection", CONNECTION, UNRESERVED_KEYWORD)
PG_KEYWORD("constraint", CONSTRAINT, RESERVED_KEYWORD)
PG_KEYWORD("constraints", CONSTRAINTS, UNRESERVED_KEYWORD)
diff --git a/src/include/parser/parse_clause.h b/src/include/parser/parse_clause.h
index 6a4438f556..f1b7d3d896 100644
--- a/src/include/parser/parse_clause.h
+++ b/src/include/parser/parse_clause.h
@@ -41,6 +41,10 @@ extern List *transformDistinctClause(ParseState *pstate,
List **targetlist, List *sortClause, bool is_agg);
extern List *transformDistinctOnClause(ParseState *pstate, List *distinctlist,
List **targetlist, List *sortClause);
+extern void transformOnConflictArbiter(ParseState *pstate,
+ OnConflictClause *onConflictClause,
+ List **arbiterExpr, Node **arbiterWhere,
+ Oid *constraint);
extern List *addTargetToSortList(ParseState *pstate, TargetEntry *tle,
List *sortlist, List *targetlist, SortBy *sortby,
diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h
index 6a5528a734..928b1ca170 100644
--- a/src/include/replication/reorderbuffer.h
+++ b/src/include/replication/reorderbuffer.h
@@ -43,6 +43,11 @@ typedef struct ReorderBufferTupleBuf
* and ComboCids in the same list with the user visible INSERT/UPDATE/DELETE
* changes. Users of the decoding facilities will never see changes with
* *_INTERNAL_* actions.
+ *
+ * The INTERNAL_SPEC_INSERT and INTERNAL_SPEC_CONFIRM changes concern
+ * "speculative insertions", and their confirmation respectively. They're
+ * used by INSERT .. ON CONFLICT .. UPDATE. Users of logical decoding don't
+ * have to care about these.
*/
enum ReorderBufferChangeType
{
@@ -51,7 +56,9 @@ enum ReorderBufferChangeType
REORDER_BUFFER_CHANGE_DELETE,
REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT,
REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID,
- REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID
+ REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID,
+ REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT,
+ REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM
};
/*
diff --git a/src/include/rewrite/rowsecurity.h b/src/include/rewrite/rowsecurity.h
index 115c9a8e43..eb4b20559f 100644
--- a/src/include/rewrite/rowsecurity.h
+++ b/src/include/rewrite/rowsecurity.h
@@ -41,7 +41,8 @@ extern PGDLLIMPORT row_security_policy_hook_type row_security_policy_hook_permis
extern PGDLLIMPORT row_security_policy_hook_type row_security_policy_hook_restrictive;
-extern void get_row_security_policies(Query* root, RangeTblEntry* rte, int rt_index,
+extern void get_row_security_policies(Query *root, CmdType commandType,
+ RangeTblEntry *rte, int rt_index,
List **securityQuals, List **withCheckOptions,
bool *hasRowSecurity, bool *hasSubLinks);
diff --git a/src/include/storage/lmgr.h b/src/include/storage/lmgr.h
index f5d70e5141..7cc75fc106 100644
--- a/src/include/storage/lmgr.h
+++ b/src/include/storage/lmgr.h
@@ -76,6 +76,11 @@ extern bool ConditionalXactLockTableWait(TransactionId xid);
extern void WaitForLockers(LOCKTAG heaplocktag, LOCKMODE lockmode);
extern void WaitForLockersMultiple(List *locktags, LOCKMODE lockmode);
+/* Lock an XID for tuple insertion (used to wait for an insertion to finish) */
+extern uint32 SpeculativeInsertionLockAcquire(TransactionId xid);
+extern void SpeculativeInsertionLockRelease(TransactionId xid);
+extern void SpeculativeInsertionWait(TransactionId xid, uint32 token);
+
/* Lock a general object (other than a relation) of the current database */
extern void LockDatabaseObject(Oid classid, Oid objid, uint16 objsubid,
LOCKMODE lockmode);
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index dae517f3fe..b4eb1b4a9e 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -176,6 +176,8 @@ typedef enum LockTagType
/* ID info for a transaction is its TransactionId */
LOCKTAG_VIRTUALTRANSACTION, /* virtual transaction (ditto) */
/* ID info for a virtual transaction is its VirtualTransactionId */
+ LOCKTAG_SPECULATIVE_TOKEN, /* speculative insertion Xid and token */
+ /* ID info for a transaction is its TransactionId */
LOCKTAG_OBJECT, /* non-relation database object */
/* ID info for an object is DB OID + CLASS OID + OBJECT OID + SUBID */
@@ -261,6 +263,14 @@ typedef struct LOCKTAG
(locktag).locktag_type = LOCKTAG_VIRTUALTRANSACTION, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
+#define SET_LOCKTAG_SPECULATIVE_INSERTION(locktag,xid,token) \
+ ((locktag).locktag_field1 = (xid), \
+ (locktag).locktag_field2 = (token), \
+ (locktag).locktag_field3 = 0, \
+ (locktag).locktag_field4 = 0, \
+ (locktag).locktag_type = LOCKTAG_SPECULATIVE_TOKEN, \
+ (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
+
#define SET_LOCKTAG_OBJECT(locktag,dboid,classoid,objoid,objsubid) \
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (classoid), \
diff --git a/src/include/utils/snapshot.h b/src/include/utils/snapshot.h
index 26fb2573c7..a734bf0075 100644
--- a/src/include/utils/snapshot.h
+++ b/src/include/utils/snapshot.h
@@ -69,31 +69,41 @@ typedef struct SnapshotData
* progress, unless the snapshot was taken during recovery in which case
* it's empty. For historic MVCC snapshots, the meaning is inverted, i.e.
* it contains *committed* transactions between xmin and xmax.
+ *
+ * note: all ids in xip[] satisfy xmin <= xip[i] < xmax
*/
TransactionId *xip;
uint32 xcnt; /* # of xact ids in xip[] */
- /* note: all ids in xip[] satisfy xmin <= xip[i] < xmax */
- int32 subxcnt; /* # of xact ids in subxip[] */
/*
* For non-historic MVCC snapshots, this contains subxact IDs that are in
* progress (and other transactions that are in progress if taken during
* recovery). For historic snapshot it contains *all* xids assigned to the
* replayed transaction, including the toplevel xid.
+ *
+ * note: all ids in subxip[] are >= xmin, but we don't bother filtering
+ * out any that are >= xmax
*/
TransactionId *subxip;
+ int32 subxcnt; /* # of xact ids in subxip[] */
bool suboverflowed; /* has the subxip array overflowed? */
+
bool takenDuringRecovery; /* recovery-shaped snapshot? */
bool copied; /* false if it's a static snapshot */
+ CommandId curcid; /* in my xact, CID < curcid are visible */
+
/*
- * note: all ids in subxip[] are >= xmin, but we don't bother filtering
- * out any that are >= xmax
+ * An extra return value for HeapTupleSatisfiesDirty, not used in MVCC
+ * snapshots.
+ */
+ uint32 speculativeToken;
+
+ /*
+ * Book-keeping information, used by the snapshot manager
*/
- CommandId curcid; /* in my xact, CID < curcid are visible */
uint32 active_count; /* refcount on ActiveSnapshot stack */
uint32 regd_count; /* refcount on RegisteredSnapshots */
-
pairingheap_node ph_node; /* link in the RegisteredSnapshots heap */
} SnapshotData;
diff --git a/src/test/isolation/expected/insert-conflict-do-nothing.out b/src/test/isolation/expected/insert-conflict-do-nothing.out
new file mode 100644
index 0000000000..0a0958f034
--- /dev/null
+++ b/src/test/isolation/expected/insert-conflict-do-nothing.out
@@ -0,0 +1,23 @@
+Parsed test spec with 2 sessions
+
+starting permutation: donothing1 donothing2 c1 select2 c2
+step donothing1: INSERT INTO ints(key, val) VALUES(1, 'donothing1') ON CONFLICT DO NOTHING;
+step donothing2: INSERT INTO ints(key, val) VALUES(1, 'donothing2') ON CONFLICT DO NOTHING;
+step c1: COMMIT;
+step donothing2: <... completed>
+step select2: SELECT * FROM ints;
+key val
+
+1 donothing1
+step c2: COMMIT;
+
+starting permutation: donothing1 donothing2 a1 select2 c2
+step donothing1: INSERT INTO ints(key, val) VALUES(1, 'donothing1') ON CONFLICT DO NOTHING;
+step donothing2: INSERT INTO ints(key, val) VALUES(1, 'donothing2') ON CONFLICT DO NOTHING;
+step a1: ABORT;
+step donothing2: <... completed>
+step select2: SELECT * FROM ints;
+key val
+
+1 donothing2
+step c2: COMMIT;
diff --git a/src/test/isolation/expected/insert-conflict-do-update-2.out b/src/test/isolation/expected/insert-conflict-do-update-2.out
new file mode 100644
index 0000000000..05fb06f8d8
--- /dev/null
+++ b/src/test/isolation/expected/insert-conflict-do-update-2.out
@@ -0,0 +1,23 @@
+Parsed test spec with 2 sessions
+
+starting permutation: insert1 insert2 c1 select2 c2
+step insert1: INSERT INTO upsert(key, payload) VALUES('FooFoo', 'insert1') ON CONFLICT (lower(key)) DO UPDATE set key = EXCLUDED.key, payload = upsert.payload || ' updated by insert1';
+step insert2: INSERT INTO upsert(key, payload) VALUES('FOOFOO', 'insert2') ON CONFLICT (lower(key)) DO UPDATE set key = EXCLUDED.key, payload = upsert.payload || ' updated by insert2';
+step c1: COMMIT;
+step insert2: <... completed>
+step select2: SELECT * FROM upsert;
+key payload
+
+FOOFOO insert1 updated by insert2
+step c2: COMMIT;
+
+starting permutation: insert1 insert2 a1 select2 c2
+step insert1: INSERT INTO upsert(key, payload) VALUES('FooFoo', 'insert1') ON CONFLICT (lower(key)) DO UPDATE set key = EXCLUDED.key, payload = upsert.payload || ' updated by insert1';
+step insert2: INSERT INTO upsert(key, payload) VALUES('FOOFOO', 'insert2') ON CONFLICT (lower(key)) DO UPDATE set key = EXCLUDED.key, payload = upsert.payload || ' updated by insert2';
+step a1: ABORT;
+step insert2: <... completed>
+step select2: SELECT * FROM upsert;
+key payload
+
+FOOFOO insert2
+step c2: COMMIT;
diff --git a/src/test/isolation/expected/insert-conflict-do-update-3.out b/src/test/isolation/expected/insert-conflict-do-update-3.out
new file mode 100644
index 0000000000..6600410618
--- /dev/null
+++ b/src/test/isolation/expected/insert-conflict-do-update-3.out
@@ -0,0 +1,26 @@
+Parsed test spec with 2 sessions
+
+starting permutation: update2 insert1 c2 select1surprise c1
+step update2: UPDATE colors SET is_active = true WHERE key = 1;
+step insert1:
+ WITH t AS (
+ INSERT INTO colors(key, color, is_active)
+ VALUES(1, 'Brown', true), (2, 'Gray', true)
+ ON CONFLICT (key) DO UPDATE
+ SET color = EXCLUDED.color
+ WHERE colors.is_active)
+ SELECT * FROM colors ORDER BY key;
+step c2: COMMIT;
+step insert1: <... completed>
+key color is_active
+
+1 Red f
+2 Green f
+3 Blue f
+step select1surprise: SELECT * FROM colors ORDER BY key;
+key color is_active
+
+1 Brown t
+2 Green f
+3 Blue f
+step c1: COMMIT;
diff --git a/src/test/isolation/expected/insert-conflict-do-update.out b/src/test/isolation/expected/insert-conflict-do-update.out
new file mode 100644
index 0000000000..a634918784
--- /dev/null
+++ b/src/test/isolation/expected/insert-conflict-do-update.out
@@ -0,0 +1,23 @@
+Parsed test spec with 2 sessions
+
+starting permutation: insert1 insert2 c1 select2 c2
+step insert1: INSERT INTO upsert(key, val) VALUES(1, 'insert1') ON CONFLICT (key) DO UPDATE set val = upsert.val || ' updated by insert1';
+step insert2: INSERT INTO upsert(key, val) VALUES(1, 'insert2') ON CONFLICT (key) DO UPDATE set val = upsert.val || ' updated by insert2';
+step c1: COMMIT;
+step insert2: <... completed>
+step select2: SELECT * FROM upsert;
+key val
+
+1 insert1 updated by insert2
+step c2: COMMIT;
+
+starting permutation: insert1 insert2 a1 select2 c2
+step insert1: INSERT INTO upsert(key, val) VALUES(1, 'insert1') ON CONFLICT (key) DO UPDATE set val = upsert.val || ' updated by insert1';
+step insert2: INSERT INTO upsert(key, val) VALUES(1, 'insert2') ON CONFLICT (key) DO UPDATE set val = upsert.val || ' updated by insert2';
+step a1: ABORT;
+step insert2: <... completed>
+step select2: SELECT * FROM upsert;
+key val
+
+1 insert2
+step c2: COMMIT;
diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule
index 3e2614ecac..c0ed637cd2 100644
--- a/src/test/isolation/isolation_schedule
+++ b/src/test/isolation/isolation_schedule
@@ -16,6 +16,10 @@ test: fk-deadlock2
test: eval-plan-qual
test: lock-update-delete
test: lock-update-traversal
+test: insert-conflict-do-nothing
+test: insert-conflict-do-update
+test: insert-conflict-do-update-2
+test: insert-conflict-do-update-3
test: delete-abort-savept
test: delete-abort-savept-2
test: aborted-keyrevoke
diff --git a/src/test/isolation/specs/insert-conflict-do-nothing.spec b/src/test/isolation/specs/insert-conflict-do-nothing.spec
new file mode 100644
index 0000000000..9b92c35cec
--- /dev/null
+++ b/src/test/isolation/specs/insert-conflict-do-nothing.spec
@@ -0,0 +1,41 @@
+# INSERT...ON CONFLICT DO NOTHING test
+#
+# This test tries to expose problems with the interaction between concurrent
+# sessions during INSERT...ON CONFLICT DO NOTHING.
+#
+# The convention here is that session 1 always ends up inserting, and session 2
+# always ends up doing nothing.
+
+setup
+{
+ CREATE TABLE ints (key int primary key, val text);
+}
+
+teardown
+{
+ DROP TABLE ints;
+}
+
+session "s1"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "donothing1" { INSERT INTO ints(key, val) VALUES(1, 'donothing1') ON CONFLICT DO NOTHING; }
+step "c1" { COMMIT; }
+step "a1" { ABORT; }
+
+session "s2"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "donothing2" { INSERT INTO ints(key, val) VALUES(1, 'donothing2') ON CONFLICT DO NOTHING; }
+step "select2" { SELECT * FROM ints; }
+step "c2" { COMMIT; }
+step "a2" { ABORT; }
+
+# Regular case where one session block-waits on another to determine if it
+# should proceed with an insert or do nothing.
+permutation "donothing1" "donothing2" "c1" "select2" "c2"
+permutation "donothing1" "donothing2" "a1" "select2" "c2"
diff --git a/src/test/isolation/specs/insert-conflict-do-update-2.spec b/src/test/isolation/specs/insert-conflict-do-update-2.spec
new file mode 100644
index 0000000000..cd7e3f42fe
--- /dev/null
+++ b/src/test/isolation/specs/insert-conflict-do-update-2.spec
@@ -0,0 +1,41 @@
+# INSERT...ON CONFLICT DO UPDATE test
+#
+# This test shows a plausible scenario in which the user might wish to UPDATE a
+# value that is also constrained by the unique index that is the arbiter of
+# whether the alternative path should be taken.
+
+setup
+{
+ CREATE TABLE upsert (key text not null, payload text);
+ CREATE UNIQUE INDEX ON upsert(lower(key));
+}
+
+teardown
+{
+ DROP TABLE upsert;
+}
+
+session "s1"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "insert1" { INSERT INTO upsert(key, payload) VALUES('FooFoo', 'insert1') ON CONFLICT (lower(key)) DO UPDATE set key = EXCLUDED.key, payload = upsert.payload || ' updated by insert1'; }
+step "c1" { COMMIT; }
+step "a1" { ABORT; }
+
+session "s2"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "insert2" { INSERT INTO upsert(key, payload) VALUES('FOOFOO', 'insert2') ON CONFLICT (lower(key)) DO UPDATE set key = EXCLUDED.key, payload = upsert.payload || ' updated by insert2'; }
+step "select2" { SELECT * FROM upsert; }
+step "c2" { COMMIT; }
+step "a2" { ABORT; }
+
+# One session (session 2) block-waits on another (session 1) to determine if it
+# should proceed with an insert or update. The user can still usefully UPDATE
+# a column constrained by a unique index, as the example illustrates.
+permutation "insert1" "insert2" "c1" "select2" "c2"
+permutation "insert1" "insert2" "a1" "select2" "c2"
diff --git a/src/test/isolation/specs/insert-conflict-do-update-3.spec b/src/test/isolation/specs/insert-conflict-do-update-3.spec
new file mode 100644
index 0000000000..e282c3beca
--- /dev/null
+++ b/src/test/isolation/specs/insert-conflict-do-update-3.spec
@@ -0,0 +1,69 @@
+# INSERT...ON CONFLICT DO UPDATE test
+#
+# Other INSERT...ON CONFLICT DO UPDATE isolation tests illustrate the "MVCC
+# violation" added to facilitate the feature, whereby a
+# not-visible-to-our-snapshot tuple can be updated by our command all the same.
+# This is generally needed to provide a guarantee of a successful INSERT or
+# UPDATE in READ COMMITTED mode. This MVCC violation is quite distinct from
+# the putative "MVCC violation" that has existed in PostgreSQL for many years,
+# the EvalPlanQual() mechanism, because that mechanism always starts from a
+# tuple that is visible to the command's MVCC snapshot. This test illustrates
+# a slightly distinct user-visible consequence of the same MVCC violation
+# generally associated with INSERT...ON CONFLICT DO UPDATE. The impact of the
+# MVCC violation goes a little beyond updating MVCC-invisible tuples.
+#
+# With INSERT...ON CONFLICT DO UPDATE, the UPDATE predicate is only evaluated
+# once, on this conclusively-locked tuple, and not any other version of the
+# same tuple. It is therefore possible (in READ COMMITTED mode) that the
+# predicate "fail to be satisfied" according to the command's MVCC snapshot.
+# It might simply be that there is no row version visible, but it's also
+# possible that there is some row version visible, but only as a version that
+# doesn't satisfy the predicate. If, however, the conclusively-locked version
+# satisfies the predicate, that's good enough, and the tuple is updated. The
+# MVCC-snapshot-visible row version is denied the opportunity to prevent the
+# UPDATE from taking place, because we don't walk the UPDATE chain in the usual
+# way.
+
+setup
+{
+ CREATE TABLE colors (key int4 PRIMARY KEY, color text, is_active boolean);
+ INSERT INTO colors (key, color, is_active) VALUES(1, 'Red', false);
+ INSERT INTO colors (key, color, is_active) VALUES(2, 'Green', false);
+ INSERT INTO colors (key, color, is_active) VALUES(3, 'Blue', false);
+}
+
+teardown
+{
+ DROP TABLE colors;
+}
+
+session "s1"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "insert1" {
+ WITH t AS (
+ INSERT INTO colors(key, color, is_active)
+ VALUES(1, 'Brown', true), (2, 'Gray', true)
+ ON CONFLICT (key) DO UPDATE
+ SET color = EXCLUDED.color
+ WHERE colors.is_active)
+ SELECT * FROM colors ORDER BY key;}
+step "select1surprise" { SELECT * FROM colors ORDER BY key; }
+step "c1" { COMMIT; }
+
+session "s2"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "update2" { UPDATE colors SET is_active = true WHERE key = 1; }
+step "c2" { COMMIT; }
+
+# Perhaps surprisingly, the session 1 MVCC-snapshot-visible tuple (the tuple
+# with the pre-populated color 'Red') is denied the opportunity to prevent the
+# UPDATE from taking place -- only the conclusively-locked tuple version
+# matters, and so the tuple with key value 1 was updated to 'Brown' (but not
+# tuple with key value 2, since nothing changed there):
+permutation "update2" "insert1" "c2" "select1surprise" "c1"
diff --git a/src/test/isolation/specs/insert-conflict-do-update.spec b/src/test/isolation/specs/insert-conflict-do-update.spec
new file mode 100644
index 0000000000..5d335a3444
--- /dev/null
+++ b/src/test/isolation/specs/insert-conflict-do-update.spec
@@ -0,0 +1,40 @@
+# INSERT...ON CONFLICT DO UPDATE test
+#
+# This test tries to expose problems with the interaction between concurrent
+# sessions.
+
+setup
+{
+ CREATE TABLE upsert (key int primary key, val text);
+}
+
+teardown
+{
+ DROP TABLE upsert;
+}
+
+session "s1"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "insert1" { INSERT INTO upsert(key, val) VALUES(1, 'insert1') ON CONFLICT (key) DO UPDATE set val = upsert.val || ' updated by insert1'; }
+step "c1" { COMMIT; }
+step "a1" { ABORT; }
+
+session "s2"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "insert2" { INSERT INTO upsert(key, val) VALUES(1, 'insert2') ON CONFLICT (key) DO UPDATE set val = upsert.val || ' updated by insert2'; }
+step "select2" { SELECT * FROM upsert; }
+step "c2" { COMMIT; }
+step "a2" { ABORT; }
+
+# One session (session 2) block-waits on another (session 1) to determine if it
+# should proceed with an insert or update. Notably, this entails updating a
+# tuple while there is no version of that tuple visible to the updating
+# session's snapshot. This is permitted only in READ COMMITTED mode.
+permutation "insert1" "insert2" "c1" "select2" "c2"
+permutation "insert1" "insert2" "a1" "select2" "c2"
diff --git a/src/test/regress/expected/errors.out b/src/test/regress/expected/errors.out
index 5f8868da26..210e5ff39c 100644
--- a/src/test/regress/expected/errors.out
+++ b/src/test/regress/expected/errors.out
@@ -32,7 +32,9 @@ LINE 1: select nonesuch from pg_database;
^
-- empty distinct list isn't OK
select distinct from pg_database;
-ERROR: SELECT DISTINCT must have at least one column
+ERROR: syntax error at or near "from"
+LINE 1: select distinct from pg_database;
+ ^
-- bad attribute name on lhs of operator
select * from pg_database where nonesuch = pg_database.datname;
ERROR: column "nonesuch" does not exist
diff --git a/src/test/regress/expected/insert_conflict.out b/src/test/regress/expected/insert_conflict.out
new file mode 100644
index 0000000000..3273d98793
--- /dev/null
+++ b/src/test/regress/expected/insert_conflict.out
@@ -0,0 +1,476 @@
+--
+-- insert...on conflict do unique index inference
+--
+create table insertconflicttest(key int4, fruit text);
+--
+-- Test unique index inference with operator class specifications and
+-- named collations
+--
+create unique index op_index_key on insertconflicttest(key, fruit text_pattern_ops);
+create unique index collation_index_key on insertconflicttest(key, fruit collate "C");
+create unique index both_index_key on insertconflicttest(key, fruit collate "C" text_pattern_ops);
+create unique index both_index_expr_key on insertconflicttest(key, lower(fruit) collate "C" text_pattern_ops);
+-- fails
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key) do nothing;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit) do nothing;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+-- succeeds
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit) do nothing;
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: NOTHING
+ Conflict Arbiter Indexes: op_index_key, collation_index_key, both_index_key
+ -> Result
+(4 rows)
+
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit, key, fruit, key) do nothing;
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: NOTHING
+ Conflict Arbiter Indexes: op_index_key, collation_index_key, both_index_key
+ -> Result
+(4 rows)
+
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit), key, lower(fruit), key) do nothing;
+ QUERY PLAN
+-------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: NOTHING
+ Conflict Arbiter Indexes: both_index_expr_key
+ -> Result
+(4 rows)
+
+-- Neither collation nor operator class specifications are required --
+-- supplying them merely *limits* matches to indexes with matching opclasses
+-- used for relevant indexes
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit text_pattern_ops) do nothing;
+ QUERY PLAN
+----------------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: NOTHING
+ Conflict Arbiter Indexes: op_index_key, both_index_key
+ -> Result
+(4 rows)
+
+-- Okay, arbitrates using both index where text_pattern_ops opclass does and
+-- does not appear.
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit collate "C") do nothing;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: NOTHING
+ Conflict Arbiter Indexes: collation_index_key, both_index_key
+ -> Result
+(4 rows)
+
+-- Okay, but only accepts the single index where both opclass and collation are
+-- specified
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit collate "C" text_pattern_ops, key) do nothing;
+ QUERY PLAN
+--------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: NOTHING
+ Conflict Arbiter Indexes: both_index_key
+ -> Result
+(4 rows)
+
+-- Okay, but only accepts the single index where both opclass and collation are
+-- specified (plus expression variant)
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) collate "C", key, key) do nothing;
+ QUERY PLAN
+-------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: NOTHING
+ Conflict Arbiter Indexes: both_index_expr_key
+ -> Result
+(4 rows)
+
+-- Attribute appears twice, while not all attributes/expressions on attributes
+-- appearing within index definition match in terms of both opclass and
+-- collation.
+--
+-- Works because every attribute in inference specification needs to be
+-- satisfied once or more by cataloged index attribute, and as always when an
+-- attribute in the cataloged definition has a non-default opclass/collation,
+-- it still satisfied some inference attribute lacking any particular
+-- opclass/collation specification.
+--
+-- The implementation is liberal in accepting inference specifications on the
+-- assumption that multiple inferred unique indexes will prevent problematic
+-- cases. It rolls with unique indexes where attributes redundantly appear
+-- multiple times, too (which is not tested here).
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit, key, fruit text_pattern_ops, key) do nothing;
+ QUERY PLAN
+----------------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: NOTHING
+ Conflict Arbiter Indexes: op_index_key, both_index_key
+ -> Result
+(4 rows)
+
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) collate "C" text_pattern_ops, key, key) do nothing;
+ QUERY PLAN
+-------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: NOTHING
+ Conflict Arbiter Indexes: both_index_expr_key
+ -> Result
+(4 rows)
+
+drop index op_index_key;
+drop index collation_index_key;
+drop index both_index_key;
+drop index both_index_expr_key;
+--
+-- Single key tests
+--
+create unique index key_index on insertconflicttest(key);
+--
+-- Explain tests
+--
+explain (costs off) insert into insertconflicttest values (0, 'Bilberry') on conflict (key) do update set fruit = excluded.fruit;
+ QUERY PLAN
+---------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: UPDATE
+ Conflict Arbiter Indexes: key_index
+ -> Result
+(4 rows)
+
+-- Should display qual actually attributable to internal sequential scan:
+explain (costs off) insert into insertconflicttest values (0, 'Bilberry') on conflict (key) do update set fruit = excluded.fruit where insertconflicttest.fruit != 'Cawesh';
+ QUERY PLAN
+-----------------------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: UPDATE
+ Conflict Arbiter Indexes: key_index
+ Conflict Filter: (insertconflicttest.fruit <> 'Cawesh'::text)
+ -> Result
+(5 rows)
+
+-- With EXCLUDED.* expression in scan node:
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key) do update set fruit = excluded.fruit where excluded.fruit != 'Elderberry';
+ QUERY PLAN
+-----------------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: UPDATE
+ Conflict Arbiter Indexes: key_index
+ Conflict Filter: (excluded.fruit <> 'Elderberry'::text)
+ -> Result
+(5 rows)
+
+-- Does the same, but JSON format shows "Conflict Arbiter Index" as JSON array:
+explain (costs off, format json) insert into insertconflicttest values (0, 'Bilberry') on conflict (key) do update set fruit = excluded.fruit where insertconflicttest.fruit != 'Lime' returning *;
+ QUERY PLAN
+------------------------------------------------------------------------
+ [ +
+ { +
+ "Plan": { +
+ "Node Type": "ModifyTable", +
+ "Operation": "Insert", +
+ "Relation Name": "insertconflicttest", +
+ "Alias": "insertconflicttest", +
+ "Conflict Resolution": "UPDATE", +
+ "Conflict Arbiter Indexes": ["key_index"], +
+ "Conflict Filter": "(insertconflicttest.fruit <> 'Lime'::text)",+
+ "Plans": [ +
+ { +
+ "Node Type": "Result", +
+ "Parent Relationship": "Member" +
+ } +
+ ] +
+ } +
+ } +
+ ]
+(1 row)
+
+-- Fails (no unique index inference specification, required for do update variant):
+insert into insertconflicttest values (1, 'Apple') on conflict do update set fruit = excluded.fruit;
+ERROR: ON CONFLICT DO UPDATE requires inference specification or constraint name
+LINE 1: ...nsert into insertconflicttest values (1, 'Apple') on conflic...
+ ^
+HINT: For example, ON CONFLICT ON CONFLICT ().
+-- inference succeeds:
+insert into insertconflicttest values (1, 'Apple') on conflict (key) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (2, 'Orange') on conflict (key, key, key) do update set fruit = excluded.fruit;
+-- Succeed, since multi-assignment does not involve subquery:
+insert into insertconflicttest
+values (1, 'Apple'), (2, 'Orange')
+on conflict (key) do update set (fruit, key) = (excluded.fruit, excluded.key);
+-- Give good diagnostic message when EXCLUDED.* spuriously referenced from
+-- RETURNING:
+insert into insertconflicttest values (1, 'Apple') on conflict (key) do update set fruit = excluded.fruit RETURNING excluded.fruit;
+ERROR: invalid reference to FROM-clause entry for table "excluded"
+LINE 1: ...y) do update set fruit = excluded.fruit RETURNING excluded.f...
+ ^
+HINT: There is an entry for table "excluded", but it cannot be referenced from this part of the query.
+-- Only suggest .* column when inference element misspelled:
+insert into insertconflicttest values (1, 'Apple') on conflict (keyy) do update set fruit = excluded.fruit;
+ERROR: column "keyy" does not exist
+LINE 1: ...nsertconflicttest values (1, 'Apple') on conflict (keyy) do ...
+ ^
+HINT: Perhaps you meant to reference the column "insertconflicttest"."key".
+-- Have useful HINT for EXCLUDED.* RTE within UPDATE:
+insert into insertconflicttest values (1, 'Apple') on conflict (key) do update set fruit = excluded.fruitt;
+ERROR: column excluded.fruitt does not exist
+LINE 1: ... 'Apple') on conflict (key) do update set fruit = excluded.f...
+ ^
+HINT: Perhaps you meant to reference the column "excluded"."fruit".
+-- inference fails:
+insert into insertconflicttest values (3, 'Kiwi') on conflict (key, fruit) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (4, 'Mango') on conflict (fruit, key) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (5, 'Lemon') on conflict (fruit) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (6, 'Passionfruit') on conflict (lower(fruit)) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+-- Check the target relation can be aliased
+insert into insertconflicttest values (6, 'Passionfruits') on conflict (key) do update set fruit = excluded.fruit;
+insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = excluded.fruit; -- ok, no reference to target table
+insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = ict.fruit; -- ok, alias
+insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = insertconflicttest.fruit; -- error, references aliased away name
+ERROR: invalid reference to FROM-clause entry for table "insertconflicttest"
+LINE 1: ...onfruit') on conflict (key) do update set fruit = insertconf...
+ ^
+HINT: Perhaps you meant to reference the table alias "ict".
+drop index key_index;
+--
+-- Composite key tests
+--
+create unique index comp_key_index on insertconflicttest(key, fruit);
+-- inference succeeds:
+insert into insertconflicttest values (7, 'Raspberry') on conflict (key, fruit) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (8, 'Lime') on conflict (fruit, key) do update set fruit = excluded.fruit;
+-- inference fails:
+insert into insertconflicttest values (9, 'Banana') on conflict (key) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (10, 'Blueberry') on conflict (key, key, key) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (11, 'Cherry') on conflict (key, lower(fruit)) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (12, 'Date') on conflict (lower(fruit), key) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+drop index comp_key_index;
+--
+-- Partial index tests, no inference predicate specificied
+--
+create unique index part_comp_key_index on insertconflicttest(key, fruit) where key < 5;
+create unique index expr_part_comp_key_index on insertconflicttest(key, lower(fruit)) where key < 5;
+-- inference fails:
+insert into insertconflicttest values (13, 'Grape') on conflict (key, fruit) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (14, 'Raisin') on conflict (fruit, key) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (15, 'Cranberry') on conflict (key) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (16, 'Melon') on conflict (key, key, key) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (17, 'Mulberry') on conflict (key, lower(fruit)) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (18, 'Pineapple') on conflict (lower(fruit), key) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+drop index part_comp_key_index;
+drop index expr_part_comp_key_index;
+--
+-- Expression index tests
+--
+create unique index expr_key_index on insertconflicttest(lower(fruit));
+-- inference succeeds:
+insert into insertconflicttest values (20, 'Quince') on conflict (lower(fruit)) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (21, 'Pomegranate') on conflict (lower(fruit), lower(fruit)) do update set fruit = excluded.fruit;
+-- inference fails:
+insert into insertconflicttest values (22, 'Apricot') on conflict (upper(fruit)) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (23, 'Blackberry') on conflict (fruit) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+drop index expr_key_index;
+--
+-- Expression index tests (with regular column)
+--
+create unique index expr_comp_key_index on insertconflicttest(key, lower(fruit));
+create unique index tricky_expr_comp_key_index on insertconflicttest(key, lower(fruit), upper(fruit));
+-- inference succeeds:
+insert into insertconflicttest values (24, 'Plum') on conflict (key, lower(fruit)) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (25, 'Peach') on conflict (lower(fruit), key) do update set fruit = excluded.fruit;
+-- Should not infer "tricky_expr_comp_key_index" index:
+explain (costs off) insert into insertconflicttest values (26, 'Fig') on conflict (lower(fruit), key, lower(fruit), key) do update set fruit = excluded.fruit;
+ QUERY PLAN
+-------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: UPDATE
+ Conflict Arbiter Indexes: expr_comp_key_index
+ -> Result
+(4 rows)
+
+-- inference fails:
+insert into insertconflicttest values (27, 'Prune') on conflict (key, upper(fruit)) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (28, 'Redcurrant') on conflict (fruit, key) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (29, 'Nectarine') on conflict (key) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+drop index expr_comp_key_index;
+drop index tricky_expr_comp_key_index;
+--
+-- Non-spurious duplicate violation tests
+--
+create unique index key_index on insertconflicttest(key);
+create unique index fruit_index on insertconflicttest(fruit);
+-- succeeds, since UPDATE happens to update "fruit" to existing value:
+insert into insertconflicttest values (26, 'Fig') on conflict (key) do update set fruit = excluded.fruit;
+-- fails, since UPDATE is to row with key value 26, and we're updating "fruit"
+-- to a value that happens to exist in another row ('peach'):
+insert into insertconflicttest values (26, 'Peach') on conflict (key) do update set fruit = excluded.fruit;
+ERROR: duplicate key value violates unique constraint "fruit_index"
+DETAIL: Key (fruit)=(Peach) already exists.
+-- succeeds, since "key" isn't repeated/referenced in UPDATE, and "fruit"
+-- arbitrates that statement updates existing "Fig" row:
+insert into insertconflicttest values (25, 'Fig') on conflict (fruit) do update set fruit = excluded.fruit;
+drop index key_index;
+drop index fruit_index;
+--
+-- Test partial unique index inference
+--
+create unique index partial_key_index on insertconflicttest(key) where fruit like '%berry';
+-- Succeeds
+insert into insertconflicttest values (23, 'Blackberry') on conflict (key) where fruit like '%berry' do update set fruit = excluded.fruit;
+insert into insertconflicttest values (23, 'Blackberry') on conflict (key) where fruit like '%berry' and fruit = 'inconsequential' do nothing;
+-- fails
+insert into insertconflicttest values (23, 'Blackberry') on conflict (key) do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (23, 'Blackberry') on conflict (key) where fruit like '%berry' or fruit = 'consequential' do nothing;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+insert into insertconflicttest values (23, 'Blackberry') on conflict (fruit) where fruit like '%berry' do update set fruit = excluded.fruit;
+ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
+drop index partial_key_index;
+-- Cleanup
+drop table insertconflicttest;
+-- ******************************************************************
+-- * *
+-- * Test inheritance (example taken from tutorial) *
+-- * *
+-- ******************************************************************
+create table cities (
+ name text,
+ population float8,
+ altitude int -- (in ft)
+);
+create table capitals (
+ state char(2)
+) inherits (cities);
+-- Create unique indexes. Due to a general limitation of inheritance,
+-- uniqueness is only enforced per-relation. Unique index inference
+-- specification will do the right thing, though.
+create unique index cities_names_unique on cities (name);
+create unique index capitals_names_unique on capitals (name);
+-- prepopulate the tables.
+insert into cities values ('San Francisco', 7.24E+5, 63);
+insert into cities values ('Las Vegas', 2.583E+5, 2174);
+insert into cities values ('Mariposa', 1200, 1953);
+insert into capitals values ('Sacramento', 3.694E+5, 30, 'CA');
+insert into capitals values ('Madison', 1.913E+5, 845, 'WI');
+-- Tests proper for inheritance:
+select * from capitals;
+ name | population | altitude | state
+------------+------------+----------+-------
+ Sacramento | 369400 | 30 | CA
+ Madison | 191300 | 845 | WI
+(2 rows)
+
+-- Succeeds:
+insert into cities values ('Las Vegas', 2.583E+5, 2174) on conflict do nothing;
+insert into capitals values ('Sacramento', 4664.E+5, 30, 'CA') on conflict (name) do update set population = excluded.population;
+-- Wrong "Sacramento", so do nothing:
+insert into capitals values ('Sacramento', 50, 2267, 'NE') on conflict (name) do nothing;
+select * from capitals;
+ name | population | altitude | state
+------------+------------+----------+-------
+ Madison | 191300 | 845 | WI
+ Sacramento | 466400000 | 30 | CA
+(2 rows)
+
+insert into cities values ('Las Vegas', 5.83E+5, 2001) on conflict (name) do update set population = excluded.population, altitude = excluded.altitude;
+select tableoid::regclass, * from cities;
+ tableoid | name | population | altitude
+----------+---------------+------------+----------
+ cities | San Francisco | 724000 | 63
+ cities | Mariposa | 1200 | 1953
+ cities | Las Vegas | 583000 | 2001
+ capitals | Madison | 191300 | 845
+ capitals | Sacramento | 466400000 | 30
+(5 rows)
+
+insert into capitals values ('Las Vegas', 5.83E+5, 2222, 'NV') on conflict (name) do update set population = excluded.population;
+-- Capitals will contain new capital, Las Vegas:
+select * from capitals;
+ name | population | altitude | state
+------------+------------+----------+-------
+ Madison | 191300 | 845 | WI
+ Sacramento | 466400000 | 30 | CA
+ Las Vegas | 583000 | 2222 | NV
+(3 rows)
+
+-- Cities contains two instances of "Las Vegas", since unique constraints don't
+-- work across inheritance:
+select tableoid::regclass, * from cities;
+ tableoid | name | population | altitude
+----------+---------------+------------+----------
+ cities | San Francisco | 724000 | 63
+ cities | Mariposa | 1200 | 1953
+ cities | Las Vegas | 583000 | 2001
+ capitals | Madison | 191300 | 845
+ capitals | Sacramento | 466400000 | 30
+ capitals | Las Vegas | 583000 | 2222
+(6 rows)
+
+-- This only affects "cities" version of "Las Vegas":
+insert into cities values ('Las Vegas', 5.86E+5, 2223) on conflict (name) do update set population = excluded.population, altitude = excluded.altitude;
+select tableoid::regclass, * from cities;
+ tableoid | name | population | altitude
+----------+---------------+------------+----------
+ cities | San Francisco | 724000 | 63
+ cities | Mariposa | 1200 | 1953
+ cities | Las Vegas | 586000 | 2223
+ capitals | Madison | 191300 | 845
+ capitals | Sacramento | 466400000 | 30
+ capitals | Las Vegas | 583000 | 2222
+(6 rows)
+
+-- clean up
+drop table capitals;
+drop table cities;
+-- Make sure a table named excluded is handled properly
+create table excluded(key int primary key, data text);
+insert into excluded values(1, '1');
+-- error, ambiguous
+insert into excluded values(1, '2') on conflict (key) do update set data = excluded.data RETURNING *;
+ERROR: table reference "excluded" is ambiguous
+LINE 1: ...es(1, '2') on conflict (key) do update set data = excluded.d...
+ ^
+-- ok, aliased
+insert into excluded AS target values(1, '2') on conflict (key) do update set data = excluded.data RETURNING *;
+ key | data
+-----+------
+ 1 | 2
+(1 row)
+
+-- ok, aliased
+insert into excluded AS target values(1, '2') on conflict (key) do update set data = target.data RETURNING *;
+ key | data
+-----+------
+ 1 | 2
+(1 row)
+
+-- make sure excluded isn't a problem in returning clause
+insert into excluded values(1, '2') on conflict (key) do update set data = 3 RETURNING excluded.*;
+ key | data
+-----+------
+ 1 | 3
+(1 row)
+
+-- clean up
+drop table excluded;
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
index 0db1df3040..64a93309eb 100644
--- a/src/test/regress/expected/privileges.out
+++ b/src/test/regress/expected/privileges.out
@@ -269,7 +269,7 @@ SELECT * FROM atestv2; -- fail (even though regressuser2 can access underlying a
ERROR: permission denied for relation atest2
-- Test column level permissions
SET SESSION AUTHORIZATION regressuser1;
-CREATE TABLE atest5 (one int, two int, three int);
+CREATE TABLE atest5 (one int, two int unique, three int, four int unique);
CREATE TABLE atest6 (one int, two int, blue int);
GRANT SELECT (one), INSERT (two), UPDATE (three) ON atest5 TO regressuser4;
GRANT ALL (one) ON atest5 TO regressuser3;
@@ -367,6 +367,33 @@ UPDATE atest5 SET one = 8; -- fail
ERROR: permission denied for relation atest5
UPDATE atest5 SET three = 5, one = 2; -- fail
ERROR: permission denied for relation atest5
+-- Check that column level privs are enforced in RETURNING
+-- Ok.
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10;
+-- Error. No SELECT on column three.
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10 RETURNING atest5.three;
+ERROR: permission denied for relation atest5
+-- Ok. May SELECT on column "one":
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10 RETURNING atest5.one;
+ one
+-----
+
+(1 row)
+
+-- Check that column level privileges are enforced for EXCLUDED
+-- Ok. we may select one
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLUDED.one;
+-- Error. No select rights on three
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLUDED.three;
+ERROR: permission denied for relation atest5
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set one = 8; -- fails (due to UPDATE)
+ERROR: permission denied for relation atest5
+INSERT INTO atest5(three) VALUES (4) ON CONFLICT (two) DO UPDATE set three = 10; -- fails (due to INSERT)
+ERROR: permission denied for relation atest5
+-- Check that the the columns in the inference require select privileges
+-- Error. No privs on four
+INSERT INTO atest5(three) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 10;
+ERROR: permission denied for relation atest5
SET SESSION AUTHORIZATION regressuser1;
REVOKE ALL (one) ON atest5 FROM regressuser4;
GRANT SELECT (one,two,blue) ON atest6 TO regressuser4;
diff --git a/src/test/regress/expected/returning.out b/src/test/regress/expected/returning.out
index 69bdacc103..cb51bb8687 100644
--- a/src/test/regress/expected/returning.out
+++ b/src/test/regress/expected/returning.out
@@ -331,3 +331,27 @@ SELECT * FROM voo;
17 | zoo2
(2 rows)
+-- Check aliased target relation
+INSERT INTO foo AS bar DEFAULT VALUES RETURNING *; -- ok
+ f1 | f2 | f3 | f4
+----+----+----+----
+ 4 | | 42 | 99
+(1 row)
+
+INSERT INTO foo AS bar DEFAULT VALUES RETURNING foo.*; -- fails, wrong name
+ERROR: invalid reference to FROM-clause entry for table "foo"
+LINE 1: INSERT INTO foo AS bar DEFAULT VALUES RETURNING foo.*;
+ ^
+HINT: Perhaps you meant to reference the table alias "bar".
+INSERT INTO foo AS bar DEFAULT VALUES RETURNING bar.*; -- ok
+ f1 | f2 | f3 | f4
+----+----+----+----
+ 5 | | 42 | 99
+(1 row)
+
+INSERT INTO foo AS bar DEFAULT VALUES RETURNING bar.f3; -- ok
+ f3
+----
+ 42
+(1 row)
+
diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out
index ad93632174..82bc47895a 100644
--- a/src/test/regress/expected/rowsecurity.out
+++ b/src/test/regress/expected/rowsecurity.out
@@ -1490,6 +1490,138 @@ SELECT * FROM b1;
4 | yyy
(21 rows)
+--
+-- INSERT ... ON CONFLICT DO UPDATE and Row-level security
+--
+SET SESSION AUTHORIZATION rls_regress_user0;
+DROP POLICY p1 ON document;
+CREATE POLICY p1 ON document FOR SELECT USING (true);
+CREATE POLICY p2 ON document FOR INSERT WITH CHECK (dauthor = current_user);
+CREATE POLICY p3 ON document FOR UPDATE
+ USING (cid = (SELECT cid from category WHERE cname = 'novel'))
+ WITH CHECK (dauthor = current_user);
+SET SESSION AUTHORIZATION rls_regress_user1;
+-- Exists...
+SELECT * FROM document WHERE did = 2;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------
+ 2 | 11 | 2 | rls_regress_user1 | my second novel
+(1 row)
+
+-- ...so violates actual WITH CHECK OPTION within UPDATE (not INSERT, since
+-- alternative UPDATE path happens to be taken):
+INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user2', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, dauthor = EXCLUDED.dauthor;
+ERROR: new row violates row level security policy for "document"
+-- Violates USING qual for UPDATE policy p3.
+--
+-- UPDATE path is taken, but UPDATE fails purely because *existing* row to be
+-- updated is not a "novel"/cid 11 (row is not leaked, even though we have
+-- SELECT privileges sufficient to see the row in this instance):
+INSERT INTO document VALUES (33, 22, 1, 'rls_regress_user1', 'okay science fiction'); -- preparation for next statement
+INSERT INTO document VALUES (33, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'Some novel, replaces sci-fi') -- takes UPDATE path
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle;
+ERROR: new row violates row level security policy (USING expression) for "document"
+-- Fine (we UPDATE, since INSERT WCOs and UPDATE security barrier quals + WCOs
+-- not violated):
+INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+----------------
+ 2 | 11 | 2 | rls_regress_user1 | my first novel
+(1 row)
+
+-- Fine (we INSERT, so "cid = 33" ("technology") isn't evaluated):
+INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'some technology novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------------
+ 78 | 11 | 1 | rls_regress_user1 | some technology novel
+(1 row)
+
+-- Fine (same query, but we UPDATE, so "cid = 33", ("technology") is not the
+-- case in respect of *existing* tuple):
+INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'some technology novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+-----------------------
+ 78 | 33 | 1 | rls_regress_user1 | some technology novel
+(1 row)
+
+-- Same query a third time, but now fails due to existing tuple finally not
+-- passing quals:
+INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'some technology novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *;
+ERROR: new row violates row level security policy (USING expression) for "document"
+-- Don't fail just because INSERT doesn't satisfy WITH CHECK option that
+-- originated as a barrier/USING() qual from the UPDATE. Note that the UPDATE
+-- path *isn't* taken, and so UPDATE-related policy does not apply:
+INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'rls_regress_user1', 'technology book, can only insert')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *;
+ did | cid | dlevel | dauthor | dtitle
+-----+-----+--------+-------------------+----------------------------------
+ 79 | 33 | 1 | rls_regress_user1 | technology book, can only insert
+(1 row)
+
+-- But this time, the same statement fails, because the UPDATE path is taken,
+-- and updating the row just inserted falls afoul of security barrier qual
+-- (enforced as WCO) -- what we might have updated target tuple to is
+-- irrelevant, in fact.
+INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'rls_regress_user1', 'technology book, can only insert')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *;
+ERROR: new row violates row level security policy (USING expression) for "document"
+-- Test default USING qual enforced as WCO
+SET SESSION AUTHORIZATION rls_regress_user0;
+DROP POLICY p1 ON document;
+DROP POLICY p2 ON document;
+DROP POLICY p3 ON document;
+CREATE POLICY p3_with_default ON document FOR UPDATE
+ USING (cid = (SELECT cid from category WHERE cname = 'novel'));
+SET SESSION AUTHORIZATION rls_regress_user1;
+-- Just because WCO-style enforcement of USING quals occurs with
+-- existing/target tuple does not mean that the implementation can be allowed
+-- to fail to also enforce this qual against the final tuple appended to
+-- relation (since in the absence of an explicit WCO, this is also interpreted
+-- as an UPDATE/ALL WCO in general).
+--
+-- UPDATE path is taken here (fails due to existing tuple). Note that this is
+-- not reported as a "USING expression", because it's an RLS UPDATE check that originated as
+-- a USING qual for the purposes of RLS in general, as opposed to an explicit
+-- USING qual that is ordinarily a security barrier. We leave it up to the
+-- UPDATE to make this fail:
+INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'rls_regress_user1', 'technology book, can only insert')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *;
+ERROR: new row violates row level security policy for "document"
+-- UPDATE path is taken here. Existing tuple passes, since it's cid
+-- corresponds to "novel", but default USING qual is enforced against
+-- post-UPDATE tuple too (as always when updating with a policy that lacks an
+-- explicit WCO), and so this fails:
+INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'technology'), 1, 'rls_regress_user1', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET cid = EXCLUDED.cid, dtitle = EXCLUDED.dtitle RETURNING *;
+ERROR: new row violates row level security policy for "document"
+SET SESSION AUTHORIZATION rls_regress_user0;
+DROP POLICY p3_with_default ON document;
+--
+-- Test ALL policies with ON CONFLICT DO UPDATE (much the same as existing UPDATE
+-- tests)
+--
+CREATE POLICY p3_with_all ON document FOR ALL
+ USING (cid = (SELECT cid from category WHERE cname = 'novel'))
+ WITH CHECK (dauthor = current_user);
+SET SESSION AUTHORIZATION rls_regress_user1;
+-- Fails, since ALL WCO is enforced in insert path:
+INSERT INTO document VALUES (80, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user2', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33;
+ERROR: new row violates row level security policy for "document"
+-- Fails, since ALL policy USING qual is enforced (existing, target tuple is in
+-- violation, since it has the "manga" cid):
+INSERT INTO document VALUES (4, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle;
+ERROR: new row violates row level security policy (USING expression) for "document"
+-- Fails, since ALL WCO are enforced:
+INSERT INTO document VALUES (1, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET dauthor = 'rls_regress_user2';
+ERROR: new row violates row level security policy for "document"
--
-- ROLE/GROUP
--
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index f7f016be21..2df24c0de1 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -1123,6 +1123,10 @@ SELECT * FROM shoelace_log ORDER BY sl_name;
SELECT * FROM shoelace_obsolete WHERE sl_avail = 0;
insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0);
insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0);
+-- Unsupported (even though a similar updatable view construct is)
+insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0)
+ on conflict do nothing;
+ERROR: INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules
SELECT * FROM shoelace_obsolete ORDER BY sl_len_cm;
sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm
------------+----------+------------+--------+----------+-----------
@@ -2362,6 +2366,22 @@ DETAIL: Key (id3a, id3c)=(1, 13) is not present in table "rule_and_refint_t2".
insert into rule_and_refint_t3 values (1, 13, 11, 'row6');
ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_fkey"
DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1".
+-- Ordinary table
+insert into rule_and_refint_t3 values (1, 13, 11, 'row6')
+ on conflict do nothing;
+ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_fkey"
+DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1".
+-- rule not fired, so fk violation
+insert into rule_and_refint_t3 values (1, 13, 11, 'row6')
+ on conflict (id3a, id3b, id3c) do update
+ set id3b = excluded.id3b;
+ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_fkey"
+DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1".
+-- rule fired, so unsupported
+insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0)
+ on conflict (sl_name) do update
+ set sl_avail = excluded.sl_avail;
+ERROR: INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules
create rule rule_and_refint_t3_ins as on insert to rule_and_refint_t3
where (exists (select 1 from rule_and_refint_t3
where (((rule_and_refint_t3.id3a = new.id3a)
@@ -2743,3 +2763,73 @@ View definition:
FROM ( VALUES (1,2)) v(q, w);
drop view rule_v1;
+--
+-- Check DO INSTEAD rules with ON CONFLICT
+--
+CREATE TABLE hats (
+ hat_name char(10) primary key,
+ hat_color char(10) -- hat color
+);
+CREATE TABLE hat_data (
+ hat_name char(10) primary key,
+ hat_color char(10) -- hat color
+);
+-- okay
+CREATE RULE hat_nosert AS ON INSERT TO hats
+ DO INSTEAD
+ INSERT INTO hat_data VALUES (
+ NEW.hat_name,
+ NEW.hat_color)
+ ON CONFLICT (hat_name) DO NOTHING RETURNING *;
+-- Works (projects row)
+INSERT INTO hats VALUES ('h7', 'black') RETURNING *;
+ hat_name | hat_color
+------------+------------
+ h7 | black
+(1 row)
+
+-- Works (does nothing)
+INSERT INTO hats VALUES ('h7', 'black') RETURNING *;
+ hat_name | hat_color
+----------+-----------
+(0 rows)
+
+SELECT tablename, rulename, definition FROM pg_rules
+ WHERE tablename = 'hats';
+ tablename | rulename | definition
+-----------+------------+------------------------------------------------------------------------------
+ hats | hat_nosert | CREATE RULE hat_nosert AS +
+ | | ON INSERT TO hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color)+
+ | | VALUES (new.hat_name, new.hat_color) ON CONFLICT DO NOTHING +
+ | | RETURNING hat_data.hat_name, +
+ | | hat_data.hat_color;
+(1 row)
+
+DROP RULE hat_nosert ON hats;
+CREATE RULE hat_upsert AS ON INSERT TO hats
+ DO INSTEAD
+ INSERT INTO hat_data VALUES (
+ NEW.hat_name,
+ NEW.hat_color)
+ ON CONFLICT (hat_name) DO UPDATE SET hat_color = 'Orange' RETURNING *;
+-- Works (does upsert)
+INSERT INTO hats VALUES ('h7', 'black') RETURNING *;
+ hat_name | hat_color
+------------+------------
+ h7 | Orange
+(1 row)
+
+SELECT tablename, rulename, definition FROM pg_rules
+ WHERE tablename = 'hats';
+ tablename | rulename | definition
+-----------+------------+-----------------------------------------------------------------------------------------------
+ hats | hat_upsert | CREATE RULE hat_upsert AS +
+ | | ON INSERT TO hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) +
+ | | VALUES (new.hat_name, new.hat_color) ON CONFLICT DO UPDATE SET hat_color = 'Orange'::bpchar+
+ | | RETURNING hat_data.hat_name, +
+ | | hat_data.hat_color;
+(1 row)
+
+DROP RULE hat_upsert ON hats;
+drop table hats;
+drop table hat_data;
diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out
index b14410fd22..de64ca7ec7 100644
--- a/src/test/regress/expected/subselect.out
+++ b/src/test/regress/expected/subselect.out
@@ -638,6 +638,28 @@ from
-----
(0 rows)
+--
+-- Test case for subselect within UPDATE of INSERT...ON CONFLICT DO UPDATE
+--
+create temp table upsert(key int4 primary key, val text);
+insert into upsert values(1, 'val') on conflict (key) do update set val = 'not seen';
+insert into upsert values(1, 'val') on conflict (key) do update set val = 'seen with subselect ' || (select f1 from int4_tbl where f1 != 0 limit 1)::text;
+select * from upsert;
+ key | val
+-----+----------------------------
+ 1 | seen with subselect 123456
+(1 row)
+
+with aa as (select 'int4_tbl' u from int4_tbl limit 1)
+insert into upsert values (1, 'x'), (999, 'y')
+on conflict (key) do update set val = (select u from aa)
+returning *;
+ key | val
+-----+----------
+ 1 | int4_tbl
+ 999 | y
+(2 rows)
+
--
-- Test case for cross-type partial matching in hashed subplan (bug #7597)
--
diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out
index f1a5fde107..3b32e8fdfe 100644
--- a/src/test/regress/expected/triggers.out
+++ b/src/test/regress/expected/triggers.out
@@ -274,7 +274,7 @@ drop sequence ttdummy_seq;
-- tests for per-statement triggers
--
CREATE TABLE log_table (tstamp timestamp default timeofday()::timestamp);
-CREATE TABLE main_table (a int, b int);
+CREATE TABLE main_table (a int unique, b int);
COPY main_table (a,b) FROM stdin;
CREATE FUNCTION trigger_func() RETURNS trigger LANGUAGE plpgsql AS '
BEGIN
@@ -291,6 +291,14 @@ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('after_ins_stmt');
--
CREATE TRIGGER after_upd_stmt_trig AFTER UPDATE ON main_table
EXECUTE PROCEDURE trigger_func('after_upd_stmt');
+-- Both insert and update statement level triggers (before and after) should
+-- fire. Doesn't fire UPDATE before trigger, but only because one isn't
+-- defined.
+INSERT INTO main_table (a, b) VALUES (5, 10) ON CONFLICT (a)
+ DO UPDATE SET b = EXCLUDED.b;
+NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT
+NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT
+NOTICE: trigger_func(after_ins_stmt) called: action = INSERT, when = AFTER, level = STATEMENT
CREATE TRIGGER after_upd_row_trig AFTER UPDATE ON main_table
FOR EACH ROW EXECUTE PROCEDURE trigger_func('after_upd_row');
INSERT INTO main_table DEFAULT VALUES;
@@ -305,6 +313,8 @@ NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, lev
-- UPDATE that effects zero rows should still call per-statement trigger
UPDATE main_table SET a = a + 2 WHERE b > 100;
NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT
+-- constraint now unneeded
+ALTER TABLE main_table DROP CONSTRAINT main_table_a_key;
-- COPY should fire per-row and per-statement INSERT triggers
COPY main_table (a, b) FROM stdin;
NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT
@@ -1731,3 +1741,93 @@ select * from self_ref_trigger;
drop table self_ref_trigger;
drop function self_ref_trigger_ins_func();
drop function self_ref_trigger_del_func();
+--
+-- Verify behavior of before and after triggers with INSERT...ON CONFLICT
+-- DO UPDATE
+--
+create table upsert (key int4 primary key, color text);
+create function upsert_before_func()
+ returns trigger language plpgsql as
+$$
+begin
+ if (TG_OP = 'UPDATE') then
+ raise warning 'before update (old): %', old.*::text;
+ raise warning 'before update (new): %', new.*::text;
+ elsif (TG_OP = 'INSERT') then
+ raise warning 'before insert (new): %', new.*::text;
+ if new.key % 2 = 0 then
+ new.key := new.key + 1;
+ new.color := new.color || ' trig modified';
+ raise warning 'before insert (new, modified): %', new.*::text;
+ end if;
+ end if;
+ return new;
+end;
+$$;
+create trigger upsert_before_trig before insert or update on upsert
+ for each row execute procedure upsert_before_func();
+create function upsert_after_func()
+ returns trigger language plpgsql as
+$$
+begin
+ if (TG_OP = 'UPDATE') then
+ raise warning 'after update (old): %', new.*::text;
+ raise warning 'after update (new): %', new.*::text;
+ elsif (TG_OP = 'INSERT') then
+ raise warning 'after insert (new): %', new.*::text;
+ end if;
+ return null;
+end;
+$$;
+create trigger upsert_after_trig after insert or update on upsert
+ for each row execute procedure upsert_after_func();
+insert into upsert values(1, 'black') on conflict (key) do update set color = 'updated ' || upsert.color;
+WARNING: before insert (new): (1,black)
+WARNING: after insert (new): (1,black)
+insert into upsert values(2, 'red') on conflict (key) do update set color = 'updated ' || upsert.color;
+WARNING: before insert (new): (2,red)
+WARNING: before insert (new, modified): (3,"red trig modified")
+WARNING: after insert (new): (3,"red trig modified")
+insert into upsert values(3, 'orange') on conflict (key) do update set color = 'updated ' || upsert.color;
+WARNING: before insert (new): (3,orange)
+WARNING: before update (old): (3,"red trig modified")
+WARNING: before update (new): (3,"updated red trig modified")
+WARNING: after update (old): (3,"updated red trig modified")
+WARNING: after update (new): (3,"updated red trig modified")
+insert into upsert values(4, 'green') on conflict (key) do update set color = 'updated ' || upsert.color;
+WARNING: before insert (new): (4,green)
+WARNING: before insert (new, modified): (5,"green trig modified")
+WARNING: after insert (new): (5,"green trig modified")
+insert into upsert values(5, 'purple') on conflict (key) do update set color = 'updated ' || upsert.color;
+WARNING: before insert (new): (5,purple)
+WARNING: before update (old): (5,"green trig modified")
+WARNING: before update (new): (5,"updated green trig modified")
+WARNING: after update (old): (5,"updated green trig modified")
+WARNING: after update (new): (5,"updated green trig modified")
+insert into upsert values(6, 'white') on conflict (key) do update set color = 'updated ' || upsert.color;
+WARNING: before insert (new): (6,white)
+WARNING: before insert (new, modified): (7,"white trig modified")
+WARNING: after insert (new): (7,"white trig modified")
+insert into upsert values(7, 'pink') on conflict (key) do update set color = 'updated ' || upsert.color;
+WARNING: before insert (new): (7,pink)
+WARNING: before update (old): (7,"white trig modified")
+WARNING: before update (new): (7,"updated white trig modified")
+WARNING: after update (old): (7,"updated white trig modified")
+WARNING: after update (new): (7,"updated white trig modified")
+insert into upsert values(8, 'yellow') on conflict (key) do update set color = 'updated ' || upsert.color;
+WARNING: before insert (new): (8,yellow)
+WARNING: before insert (new, modified): (9,"yellow trig modified")
+WARNING: after insert (new): (9,"yellow trig modified")
+select * from upsert;
+ key | color
+-----+-----------------------------
+ 1 | black
+ 3 | updated red trig modified
+ 5 | updated green trig modified
+ 7 | updated white trig modified
+ 9 | yellow trig modified
+(5 rows)
+
+drop table upsert;
+drop function upsert_before_func();
+drop function upsert_after_func();
diff --git a/src/test/regress/expected/updatable_views.out b/src/test/regress/expected/updatable_views.out
index ccabe9e3dc..7eb92612d7 100644
--- a/src/test/regress/expected/updatable_views.out
+++ b/src/test/regress/expected/updatable_views.out
@@ -215,6 +215,67 @@ INSERT INTO rw_view15 VALUES (3, 'ROW 3'); -- should fail
ERROR: cannot insert into column "upper" of view "rw_view15"
DETAIL: View columns that are not columns of their base relation are not updatable.
INSERT INTO rw_view15 (a) VALUES (3); -- should be OK
+INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT DO NOTHING; -- succeeds
+SELECT * FROM rw_view15;
+ a | upper
+----+-------------
+ -2 | ROW -2
+ -1 | ROW -1
+ 0 | ROW 0
+ 1 | ROW 1
+ 2 | ROW 2
+ 3 | UNSPECIFIED
+(6 rows)
+
+INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT (a) DO NOTHING; -- succeeds
+SELECT * FROM rw_view15;
+ a | upper
+----+-------------
+ -2 | ROW -2
+ -1 | ROW -1
+ 0 | ROW 0
+ 1 | ROW 1
+ 2 | ROW 2
+ 3 | UNSPECIFIED
+(6 rows)
+
+INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT (a) DO UPDATE set a = excluded.a; -- succeeds
+SELECT * FROM rw_view15;
+ a | upper
+----+-------------
+ -2 | ROW -2
+ -1 | ROW -1
+ 0 | ROW 0
+ 1 | ROW 1
+ 2 | ROW 2
+ 3 | UNSPECIFIED
+(6 rows)
+
+INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT (a) DO UPDATE set upper = 'blarg'; -- fails
+ERROR: cannot insert into column "upper" of view "rw_view15"
+DETAIL: View columns that are not columns of their base relation are not updatable.
+SELECT * FROM rw_view15;
+ a | upper
+----+-------------
+ -2 | ROW -2
+ -1 | ROW -1
+ 0 | ROW 0
+ 1 | ROW 1
+ 2 | ROW 2
+ 3 | UNSPECIFIED
+(6 rows)
+
+SELECT * FROM rw_view15;
+ a | upper
+----+-------------
+ -2 | ROW -2
+ -1 | ROW -1
+ 0 | ROW 0
+ 1 | ROW 1
+ 2 | ROW 2
+ 3 | UNSPECIFIED
+(6 rows)
+
ALTER VIEW rw_view15 ALTER COLUMN upper SET DEFAULT 'NOT SET';
INSERT INTO rw_view15 (a) VALUES (4); -- should fail
ERROR: cannot insert into column "upper" of view "rw_view15"
diff --git a/src/test/regress/expected/update.out b/src/test/regress/expected/update.out
index 1de2a867a8..adc1fd7c39 100644
--- a/src/test/regress/expected/update.out
+++ b/src/test/regress/expected/update.out
@@ -6,6 +6,10 @@ CREATE TABLE update_test (
b INT,
c TEXT
);
+CREATE TABLE upsert_test (
+ a INT PRIMARY KEY,
+ b TEXT
+);
INSERT INTO update_test VALUES (5, 10, 'foo');
INSERT INTO update_test(b, a) VALUES (15, 10);
SELECT * FROM update_test;
@@ -147,4 +151,34 @@ SELECT a, b, char_length(c) FROM update_test;
42 | 12 | 10000
(4 rows)
+-- Test ON CONFLICT DO UPDATE
+INSERT INTO upsert_test VALUES(1, 'Boo');
+-- uncorrelated sub-select:
+WITH aaa AS (SELECT 1 AS a, 'Foo' AS b) INSERT INTO upsert_test
+ VALUES (1, 'Bar') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b, a FROM aaa) RETURNING *;
+ a | b
+---+-----
+ 1 | Foo
+(1 row)
+
+-- correlated sub-select:
+INSERT INTO upsert_test VALUES (1, 'Baz') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Correlated', a from upsert_test i WHERE i.a = upsert_test.a)
+ RETURNING *;
+ a | b
+---+-----------------
+ 1 | Foo, Correlated
+(1 row)
+
+-- correlated sub-select (EXCLUDED.* alias):
+INSERT INTO upsert_test VALUES (1, 'Bat') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Excluded', a from upsert_test i WHERE i.a = excluded.a)
+ RETURNING *;
+ a | b
+---+---------------------------
+ 1 | Foo, Correlated, Excluded
+(1 row)
+
DROP TABLE update_test;
+DROP TABLE upsert_test;
diff --git a/src/test/regress/expected/with.out b/src/test/regress/expected/with.out
index a31ec341e6..2c9226c3db 100644
--- a/src/test/regress/expected/with.out
+++ b/src/test/regress/expected/with.out
@@ -1806,6 +1806,88 @@ SELECT * FROM y;
-400
(22 rows)
+-- data-modifying WITH containing INSERT...ON CONFLICT DO UPDATE
+CREATE TABLE z AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i;
+ALTER TABLE z ADD UNIQUE (k);
+WITH t AS (
+ INSERT INTO z SELECT i, 'insert'
+ FROM generate_series(0, 16) i
+ ON CONFLICT (k) DO UPDATE SET v = z.v || ', now update'
+ RETURNING *
+)
+SELECT * FROM t JOIN y ON t.k = y.a ORDER BY a, k;
+ k | v | a
+---+--------+---
+ 0 | insert | 0
+ 0 | insert | 0
+(2 rows)
+
+-- Test EXCLUDED.* reference within CTE
+WITH aa AS (
+ INSERT INTO z VALUES(1, 5) ON CONFLICT (k) DO UPDATE SET v = EXCLUDED.v
+ WHERE z.k != EXCLUDED.k
+ RETURNING *
+)
+SELECT * FROM aa;
+ k | v
+---+---
+(0 rows)
+
+-- New query/snapshot demonstrates side-effects of previous query.
+SELECT * FROM z ORDER BY k;
+ k | v
+----+------------------
+ 0 | insert
+ 1 | 1 v, now update
+ 2 | insert
+ 3 | insert
+ 4 | 4 v, now update
+ 5 | insert
+ 6 | insert
+ 7 | 7 v, now update
+ 8 | insert
+ 9 | insert
+ 10 | 10 v, now update
+ 11 | insert
+ 12 | insert
+ 13 | 13 v, now update
+ 14 | insert
+ 15 | insert
+ 16 | 16 v, now update
+(17 rows)
+
+--
+-- Ensure subqueries within the update clause work, even if they
+-- reference outside values
+--
+WITH aa AS (SELECT 1 a, 2 b)
+INSERT INTO z VALUES(1, 'insert')
+ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1);
+WITH aa AS (SELECT 1 a, 2 b)
+INSERT INTO z VALUES(1, 'insert')
+ON CONFLICT (k) DO UPDATE SET v = ' update' WHERE z.k = (SELECT a FROM aa);
+WITH aa AS (SELECT 1 a, 2 b)
+INSERT INTO z VALUES(1, 'insert')
+ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1);
+WITH aa AS (SELECT 'a' a, 'b' b UNION ALL SELECT 'a' a, 'b' b)
+INSERT INTO z VALUES(1, 'insert')
+ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 'a' LIMIT 1);
+WITH aa AS (SELECT 1 a, 2 b)
+INSERT INTO z VALUES(1, (SELECT b || ' insert' FROM aa WHERE a = 1 ))
+ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1);
+-- This shows an attempt to update an invisible row, which should really be
+-- reported as a cardinality violation, but it doesn't seem worth fixing:
+WITH simpletup AS (
+ SELECT 2 k, 'Green' v),
+upsert_cte AS (
+ INSERT INTO z VALUES(2, 'Blue') ON CONFLICT (k) DO
+ UPDATE SET (k, v) = (SELECT k, v FROM simpletup WHERE simpletup.k = z.k)
+ RETURNING k, v)
+INSERT INTO z VALUES(2, 'Red') ON CONFLICT (k) DO
+UPDATE SET (k, v) = (SELECT k, v FROM upsert_cte WHERE upsert_cte.k = z.k)
+RETURNING k, v;
+ERROR: attempted to update invisible tuple
+DROP TABLE z;
-- check that run to completion happens in proper ordering
TRUNCATE TABLE y;
INSERT INTO y SELECT generate_series(1, 3);
diff --git a/src/test/regress/input/constraints.source b/src/test/regress/input/constraints.source
index c16f65088a..7647544f9b 100644
--- a/src/test/regress/input/constraints.source
+++ b/src/test/regress/input/constraints.source
@@ -292,6 +292,11 @@ INSERT INTO UNIQUE_TBL VALUES (5, 'one');
INSERT INTO UNIQUE_TBL (t) VALUES ('six');
INSERT INTO UNIQUE_TBL (t) VALUES ('seven');
+INSERT INTO UNIQUE_TBL VALUES (5, 'five-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'five-upsert-update';
+INSERT INTO UNIQUE_TBL VALUES (6, 'six-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'six-upsert-update';
+-- should fail
+INSERT INTO UNIQUE_TBL VALUES (1, 'a'), (2, 'b'), (2, 'b') ON CONFLICT (i) DO UPDATE SET t = 'fails';
+
SELECT '' AS five, * FROM UNIQUE_TBL;
DROP TABLE UNIQUE_TBL;
@@ -438,6 +443,12 @@ INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 4>');
INSERT INTO circles VALUES('<(10,10), 10>', '<(0,0), 5>');
-- fail, overlaps
INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>');
+-- succeed, because violation is ignored
+INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>')
+ ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO NOTHING;
+-- fail, because DO UPDATE variant requires unique index
+INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>')
+ ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO UPDATE SET c2 = EXCLUDED.c2;
-- succeed because c1 doesn't overlap
INSERT INTO circles VALUES('<(20,20), 1>', '<(0,0), 5>');
-- succeed because c2 doesn't overlap
@@ -462,6 +473,7 @@ CREATE TABLE deferred_excl (
INSERT INTO deferred_excl VALUES(1);
INSERT INTO deferred_excl VALUES(2);
INSERT INTO deferred_excl VALUES(1); -- fail
+INSERT INTO deferred_excl VALUES(1) ON CONFLICT ON CONSTRAINT deferred_excl_con DO NOTHING; -- fail
BEGIN;
INSERT INTO deferred_excl VALUES(2); -- no fail here
COMMIT; -- should fail here
diff --git a/src/test/regress/output/constraints.source b/src/test/regress/output/constraints.source
index d3ec233313..bbe4ed1976 100644
--- a/src/test/regress/output/constraints.source
+++ b/src/test/regress/output/constraints.source
@@ -421,16 +421,23 @@ INSERT INTO UNIQUE_TBL VALUES (4, 'four');
INSERT INTO UNIQUE_TBL VALUES (5, 'one');
INSERT INTO UNIQUE_TBL (t) VALUES ('six');
INSERT INTO UNIQUE_TBL (t) VALUES ('seven');
+INSERT INTO UNIQUE_TBL VALUES (5, 'five-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'five-upsert-update';
+INSERT INTO UNIQUE_TBL VALUES (6, 'six-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'six-upsert-update';
+-- should fail
+INSERT INTO UNIQUE_TBL VALUES (1, 'a'), (2, 'b'), (2, 'b') ON CONFLICT (i) DO UPDATE SET t = 'fails';
+ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time
+HINT: Ensure that no rows proposed for insertion within the same command have duplicate constrained values.
SELECT '' AS five, * FROM UNIQUE_TBL;
- five | i | t
-------+---+-------
+ five | i | t
+------+---+--------------------
| 1 | one
| 2 | two
| 4 | four
- | 5 | one
| | six
| | seven
-(6 rows)
+ | 5 | five-upsert-update
+ | 6 | six-upsert-insert
+(7 rows)
DROP TABLE UNIQUE_TBL;
CREATE TABLE UNIQUE_TBL (i int, t text,
@@ -605,6 +612,13 @@ INSERT INTO circles VALUES('<(10,10), 10>', '<(0,0), 5>');
INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>');
ERROR: conflicting key value violates exclusion constraint "circles_c1_c2_excl"
DETAIL: Key (c1, (c2::circle))=(<(20,20),10>, <(0,0),4>) conflicts with existing key (c1, (c2::circle))=(<(10,10),10>, <(0,0),5>).
+-- succeed, because violation is ignored
+INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>')
+ ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO NOTHING;
+-- fail, because DO UPDATE variant requires unique index
+INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>')
+ ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO UPDATE SET c2 = EXCLUDED.c2;
+ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints
-- succeed because c1 doesn't overlap
INSERT INTO circles VALUES('<(20,20), 1>', '<(0,0), 5>');
-- succeed because c2 doesn't overlap
@@ -627,6 +641,8 @@ INSERT INTO deferred_excl VALUES(2);
INSERT INTO deferred_excl VALUES(1); -- fail
ERROR: conflicting key value violates exclusion constraint "deferred_excl_con"
DETAIL: Key (f1)=(1) conflicts with existing key (f1)=(1).
+INSERT INTO deferred_excl VALUES(1) ON CONFLICT ON CONSTRAINT deferred_excl_con DO NOTHING; -- fail
+ERROR: ON CONFLICT does not support deferred unique constraints/exclusion constraints as arbiters
BEGIN;
INSERT INTO deferred_excl VALUES(2); -- no fail here
COMMIT; -- should fail here
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index 6d3b865351..b0ebb6b3f4 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -36,6 +36,7 @@ test: geometry horology regex oidjoins type_sanity opr_sanity
# These four each depend on the previous one
# ----------
test: insert
+test: insert_conflict
test: create_function_1
test: create_type
test: create_table
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index 8326894ed9..8409c0f3ef 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -50,6 +50,7 @@ test: oidjoins
test: type_sanity
test: opr_sanity
test: insert
+test: insert_conflict
test: create_function_1
test: create_type
test: create_table
diff --git a/src/test/regress/sql/insert_conflict.sql b/src/test/regress/sql/insert_conflict.sql
new file mode 100644
index 0000000000..ba2b66bdb6
--- /dev/null
+++ b/src/test/regress/sql/insert_conflict.sql
@@ -0,0 +1,284 @@
+--
+-- insert...on conflict do unique index inference
+--
+create table insertconflicttest(key int4, fruit text);
+
+--
+-- Test unique index inference with operator class specifications and
+-- named collations
+--
+create unique index op_index_key on insertconflicttest(key, fruit text_pattern_ops);
+create unique index collation_index_key on insertconflicttest(key, fruit collate "C");
+create unique index both_index_key on insertconflicttest(key, fruit collate "C" text_pattern_ops);
+create unique index both_index_expr_key on insertconflicttest(key, lower(fruit) collate "C" text_pattern_ops);
+
+-- fails
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key) do nothing;
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit) do nothing;
+
+-- succeeds
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit) do nothing;
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit, key, fruit, key) do nothing;
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit), key, lower(fruit), key) do nothing;
+-- Neither collation nor operator class specifications are required --
+-- supplying them merely *limits* matches to indexes with matching opclasses
+-- used for relevant indexes
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit text_pattern_ops) do nothing;
+-- Okay, arbitrates using both index where text_pattern_ops opclass does and
+-- does not appear.
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit collate "C") do nothing;
+-- Okay, but only accepts the single index where both opclass and collation are
+-- specified
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit collate "C" text_pattern_ops, key) do nothing;
+-- Okay, but only accepts the single index where both opclass and collation are
+-- specified (plus expression variant)
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) collate "C", key, key) do nothing;
+-- Attribute appears twice, while not all attributes/expressions on attributes
+-- appearing within index definition match in terms of both opclass and
+-- collation.
+--
+-- Works because every attribute in inference specification needs to be
+-- satisfied once or more by cataloged index attribute, and as always when an
+-- attribute in the cataloged definition has a non-default opclass/collation,
+-- it still satisfied some inference attribute lacking any particular
+-- opclass/collation specification.
+--
+-- The implementation is liberal in accepting inference specifications on the
+-- assumption that multiple inferred unique indexes will prevent problematic
+-- cases. It rolls with unique indexes where attributes redundantly appear
+-- multiple times, too (which is not tested here).
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit, key, fruit text_pattern_ops, key) do nothing;
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) collate "C" text_pattern_ops, key, key) do nothing;
+
+drop index op_index_key;
+drop index collation_index_key;
+drop index both_index_key;
+drop index both_index_expr_key;
+
+--
+-- Single key tests
+--
+create unique index key_index on insertconflicttest(key);
+
+--
+-- Explain tests
+--
+explain (costs off) insert into insertconflicttest values (0, 'Bilberry') on conflict (key) do update set fruit = excluded.fruit;
+-- Should display qual actually attributable to internal sequential scan:
+explain (costs off) insert into insertconflicttest values (0, 'Bilberry') on conflict (key) do update set fruit = excluded.fruit where insertconflicttest.fruit != 'Cawesh';
+-- With EXCLUDED.* expression in scan node:
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key) do update set fruit = excluded.fruit where excluded.fruit != 'Elderberry';
+-- Does the same, but JSON format shows "Conflict Arbiter Index" as JSON array:
+explain (costs off, format json) insert into insertconflicttest values (0, 'Bilberry') on conflict (key) do update set fruit = excluded.fruit where insertconflicttest.fruit != 'Lime' returning *;
+
+-- Fails (no unique index inference specification, required for do update variant):
+insert into insertconflicttest values (1, 'Apple') on conflict do update set fruit = excluded.fruit;
+
+-- inference succeeds:
+insert into insertconflicttest values (1, 'Apple') on conflict (key) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (2, 'Orange') on conflict (key, key, key) do update set fruit = excluded.fruit;
+
+-- Succeed, since multi-assignment does not involve subquery:
+insert into insertconflicttest
+values (1, 'Apple'), (2, 'Orange')
+on conflict (key) do update set (fruit, key) = (excluded.fruit, excluded.key);
+
+-- Give good diagnostic message when EXCLUDED.* spuriously referenced from
+-- RETURNING:
+insert into insertconflicttest values (1, 'Apple') on conflict (key) do update set fruit = excluded.fruit RETURNING excluded.fruit;
+
+-- Only suggest .* column when inference element misspelled:
+insert into insertconflicttest values (1, 'Apple') on conflict (keyy) do update set fruit = excluded.fruit;
+
+-- Have useful HINT for EXCLUDED.* RTE within UPDATE:
+insert into insertconflicttest values (1, 'Apple') on conflict (key) do update set fruit = excluded.fruitt;
+
+-- inference fails:
+insert into insertconflicttest values (3, 'Kiwi') on conflict (key, fruit) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (4, 'Mango') on conflict (fruit, key) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (5, 'Lemon') on conflict (fruit) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (6, 'Passionfruit') on conflict (lower(fruit)) do update set fruit = excluded.fruit;
+
+-- Check the target relation can be aliased
+insert into insertconflicttest values (6, 'Passionfruits') on conflict (key) do update set fruit = excluded.fruit;
+insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = excluded.fruit; -- ok, no reference to target table
+insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = ict.fruit; -- ok, alias
+insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = insertconflicttest.fruit; -- error, references aliased away name
+
+drop index key_index;
+
+--
+-- Composite key tests
+--
+create unique index comp_key_index on insertconflicttest(key, fruit);
+
+-- inference succeeds:
+insert into insertconflicttest values (7, 'Raspberry') on conflict (key, fruit) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (8, 'Lime') on conflict (fruit, key) do update set fruit = excluded.fruit;
+
+-- inference fails:
+insert into insertconflicttest values (9, 'Banana') on conflict (key) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (10, 'Blueberry') on conflict (key, key, key) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (11, 'Cherry') on conflict (key, lower(fruit)) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (12, 'Date') on conflict (lower(fruit), key) do update set fruit = excluded.fruit;
+
+drop index comp_key_index;
+
+--
+-- Partial index tests, no inference predicate specificied
+--
+create unique index part_comp_key_index on insertconflicttest(key, fruit) where key < 5;
+create unique index expr_part_comp_key_index on insertconflicttest(key, lower(fruit)) where key < 5;
+
+-- inference fails:
+insert into insertconflicttest values (13, 'Grape') on conflict (key, fruit) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (14, 'Raisin') on conflict (fruit, key) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (15, 'Cranberry') on conflict (key) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (16, 'Melon') on conflict (key, key, key) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (17, 'Mulberry') on conflict (key, lower(fruit)) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (18, 'Pineapple') on conflict (lower(fruit), key) do update set fruit = excluded.fruit;
+
+drop index part_comp_key_index;
+drop index expr_part_comp_key_index;
+
+--
+-- Expression index tests
+--
+create unique index expr_key_index on insertconflicttest(lower(fruit));
+
+-- inference succeeds:
+insert into insertconflicttest values (20, 'Quince') on conflict (lower(fruit)) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (21, 'Pomegranate') on conflict (lower(fruit), lower(fruit)) do update set fruit = excluded.fruit;
+
+-- inference fails:
+insert into insertconflicttest values (22, 'Apricot') on conflict (upper(fruit)) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (23, 'Blackberry') on conflict (fruit) do update set fruit = excluded.fruit;
+
+drop index expr_key_index;
+
+--
+-- Expression index tests (with regular column)
+--
+create unique index expr_comp_key_index on insertconflicttest(key, lower(fruit));
+create unique index tricky_expr_comp_key_index on insertconflicttest(key, lower(fruit), upper(fruit));
+
+-- inference succeeds:
+insert into insertconflicttest values (24, 'Plum') on conflict (key, lower(fruit)) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (25, 'Peach') on conflict (lower(fruit), key) do update set fruit = excluded.fruit;
+-- Should not infer "tricky_expr_comp_key_index" index:
+explain (costs off) insert into insertconflicttest values (26, 'Fig') on conflict (lower(fruit), key, lower(fruit), key) do update set fruit = excluded.fruit;
+
+-- inference fails:
+insert into insertconflicttest values (27, 'Prune') on conflict (key, upper(fruit)) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (28, 'Redcurrant') on conflict (fruit, key) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (29, 'Nectarine') on conflict (key) do update set fruit = excluded.fruit;
+
+drop index expr_comp_key_index;
+drop index tricky_expr_comp_key_index;
+
+--
+-- Non-spurious duplicate violation tests
+--
+create unique index key_index on insertconflicttest(key);
+create unique index fruit_index on insertconflicttest(fruit);
+
+-- succeeds, since UPDATE happens to update "fruit" to existing value:
+insert into insertconflicttest values (26, 'Fig') on conflict (key) do update set fruit = excluded.fruit;
+-- fails, since UPDATE is to row with key value 26, and we're updating "fruit"
+-- to a value that happens to exist in another row ('peach'):
+insert into insertconflicttest values (26, 'Peach') on conflict (key) do update set fruit = excluded.fruit;
+-- succeeds, since "key" isn't repeated/referenced in UPDATE, and "fruit"
+-- arbitrates that statement updates existing "Fig" row:
+insert into insertconflicttest values (25, 'Fig') on conflict (fruit) do update set fruit = excluded.fruit;
+
+drop index key_index;
+drop index fruit_index;
+
+--
+-- Test partial unique index inference
+--
+create unique index partial_key_index on insertconflicttest(key) where fruit like '%berry';
+
+-- Succeeds
+insert into insertconflicttest values (23, 'Blackberry') on conflict (key) where fruit like '%berry' do update set fruit = excluded.fruit;
+insert into insertconflicttest values (23, 'Blackberry') on conflict (key) where fruit like '%berry' and fruit = 'inconsequential' do nothing;
+
+-- fails
+insert into insertconflicttest values (23, 'Blackberry') on conflict (key) do update set fruit = excluded.fruit;
+insert into insertconflicttest values (23, 'Blackberry') on conflict (key) where fruit like '%berry' or fruit = 'consequential' do nothing;
+insert into insertconflicttest values (23, 'Blackberry') on conflict (fruit) where fruit like '%berry' do update set fruit = excluded.fruit;
+
+drop index partial_key_index;
+
+-- Cleanup
+drop table insertconflicttest;
+
+-- ******************************************************************
+-- * *
+-- * Test inheritance (example taken from tutorial) *
+-- * *
+-- ******************************************************************
+create table cities (
+ name text,
+ population float8,
+ altitude int -- (in ft)
+);
+
+create table capitals (
+ state char(2)
+) inherits (cities);
+
+-- Create unique indexes. Due to a general limitation of inheritance,
+-- uniqueness is only enforced per-relation. Unique index inference
+-- specification will do the right thing, though.
+create unique index cities_names_unique on cities (name);
+create unique index capitals_names_unique on capitals (name);
+
+-- prepopulate the tables.
+insert into cities values ('San Francisco', 7.24E+5, 63);
+insert into cities values ('Las Vegas', 2.583E+5, 2174);
+insert into cities values ('Mariposa', 1200, 1953);
+
+insert into capitals values ('Sacramento', 3.694E+5, 30, 'CA');
+insert into capitals values ('Madison', 1.913E+5, 845, 'WI');
+
+-- Tests proper for inheritance:
+select * from capitals;
+
+-- Succeeds:
+insert into cities values ('Las Vegas', 2.583E+5, 2174) on conflict do nothing;
+insert into capitals values ('Sacramento', 4664.E+5, 30, 'CA') on conflict (name) do update set population = excluded.population;
+-- Wrong "Sacramento", so do nothing:
+insert into capitals values ('Sacramento', 50, 2267, 'NE') on conflict (name) do nothing;
+select * from capitals;
+insert into cities values ('Las Vegas', 5.83E+5, 2001) on conflict (name) do update set population = excluded.population, altitude = excluded.altitude;
+select tableoid::regclass, * from cities;
+insert into capitals values ('Las Vegas', 5.83E+5, 2222, 'NV') on conflict (name) do update set population = excluded.population;
+-- Capitals will contain new capital, Las Vegas:
+select * from capitals;
+-- Cities contains two instances of "Las Vegas", since unique constraints don't
+-- work across inheritance:
+select tableoid::regclass, * from cities;
+-- This only affects "cities" version of "Las Vegas":
+insert into cities values ('Las Vegas', 5.86E+5, 2223) on conflict (name) do update set population = excluded.population, altitude = excluded.altitude;
+select tableoid::regclass, * from cities;
+
+-- clean up
+drop table capitals;
+drop table cities;
+
+
+-- Make sure a table named excluded is handled properly
+create table excluded(key int primary key, data text);
+insert into excluded values(1, '1');
+-- error, ambiguous
+insert into excluded values(1, '2') on conflict (key) do update set data = excluded.data RETURNING *;
+-- ok, aliased
+insert into excluded AS target values(1, '2') on conflict (key) do update set data = excluded.data RETURNING *;
+-- ok, aliased
+insert into excluded AS target values(1, '2') on conflict (key) do update set data = target.data RETURNING *;
+-- make sure excluded isn't a problem in returning clause
+insert into excluded values(1, '2') on conflict (key) do update set data = 3 RETURNING excluded.*;
+
+-- clean up
+drop table excluded;
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
index f97a75a5fd..22b54a28c4 100644
--- a/src/test/regress/sql/privileges.sql
+++ b/src/test/regress/sql/privileges.sql
@@ -194,7 +194,7 @@ SELECT * FROM atestv2; -- fail (even though regressuser2 can access underlying a
-- Test column level permissions
SET SESSION AUTHORIZATION regressuser1;
-CREATE TABLE atest5 (one int, two int, three int);
+CREATE TABLE atest5 (one int, two int unique, three int, four int unique);
CREATE TABLE atest6 (one int, two int, blue int);
GRANT SELECT (one), INSERT (two), UPDATE (three) ON atest5 TO regressuser4;
GRANT ALL (one) ON atest5 TO regressuser3;
@@ -245,6 +245,23 @@ INSERT INTO atest5 VALUES (5,5,5); -- fail
UPDATE atest5 SET three = 10; -- ok
UPDATE atest5 SET one = 8; -- fail
UPDATE atest5 SET three = 5, one = 2; -- fail
+-- Check that column level privs are enforced in RETURNING
+-- Ok.
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10;
+-- Error. No SELECT on column three.
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10 RETURNING atest5.three;
+-- Ok. May SELECT on column "one":
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10 RETURNING atest5.one;
+-- Check that column level privileges are enforced for EXCLUDED
+-- Ok. we may select one
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLUDED.one;
+-- Error. No select rights on three
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLUDED.three;
+INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set one = 8; -- fails (due to UPDATE)
+INSERT INTO atest5(three) VALUES (4) ON CONFLICT (two) DO UPDATE set three = 10; -- fails (due to INSERT)
+-- Check that the the columns in the inference require select privileges
+-- Error. No privs on four
+INSERT INTO atest5(three) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 10;
SET SESSION AUTHORIZATION regressuser1;
REVOKE ALL (one) ON atest5 FROM regressuser4;
diff --git a/src/test/regress/sql/returning.sql b/src/test/regress/sql/returning.sql
index 0ed9a48951..a460f82fb7 100644
--- a/src/test/regress/sql/returning.sql
+++ b/src/test/regress/sql/returning.sql
@@ -154,3 +154,9 @@ UPDATE joinview SET f1 = f1 + 1 WHERE f3 = 57 RETURNING *, other + 1;
SELECT * FROM joinview;
SELECT * FROM foo;
SELECT * FROM voo;
+
+-- Check aliased target relation
+INSERT INTO foo AS bar DEFAULT VALUES RETURNING *; -- ok
+INSERT INTO foo AS bar DEFAULT VALUES RETURNING foo.*; -- fails, wrong name
+INSERT INTO foo AS bar DEFAULT VALUES RETURNING bar.*; -- ok
+INSERT INTO foo AS bar DEFAULT VALUES RETURNING bar.f3; -- ok
diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql
index 7d12dd00a2..e8c5932b20 100644
--- a/src/test/regress/sql/rowsecurity.sql
+++ b/src/test/regress/sql/rowsecurity.sql
@@ -511,6 +511,118 @@ DELETE FROM bv1 WHERE a = 6 AND f_leak(b);
SET SESSION AUTHORIZATION rls_regress_user0;
SELECT * FROM b1;
+--
+-- INSERT ... ON CONFLICT DO UPDATE and Row-level security
+--
+
+SET SESSION AUTHORIZATION rls_regress_user0;
+DROP POLICY p1 ON document;
+
+CREATE POLICY p1 ON document FOR SELECT USING (true);
+CREATE POLICY p2 ON document FOR INSERT WITH CHECK (dauthor = current_user);
+CREATE POLICY p3 ON document FOR UPDATE
+ USING (cid = (SELECT cid from category WHERE cname = 'novel'))
+ WITH CHECK (dauthor = current_user);
+
+SET SESSION AUTHORIZATION rls_regress_user1;
+
+-- Exists...
+SELECT * FROM document WHERE did = 2;
+
+-- ...so violates actual WITH CHECK OPTION within UPDATE (not INSERT, since
+-- alternative UPDATE path happens to be taken):
+INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user2', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, dauthor = EXCLUDED.dauthor;
+
+-- Violates USING qual for UPDATE policy p3.
+--
+-- UPDATE path is taken, but UPDATE fails purely because *existing* row to be
+-- updated is not a "novel"/cid 11 (row is not leaked, even though we have
+-- SELECT privileges sufficient to see the row in this instance):
+INSERT INTO document VALUES (33, 22, 1, 'rls_regress_user1', 'okay science fiction'); -- preparation for next statement
+INSERT INTO document VALUES (33, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'Some novel, replaces sci-fi') -- takes UPDATE path
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle;
+-- Fine (we UPDATE, since INSERT WCOs and UPDATE security barrier quals + WCOs
+-- not violated):
+INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *;
+-- Fine (we INSERT, so "cid = 33" ("technology") isn't evaluated):
+INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'some technology novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *;
+-- Fine (same query, but we UPDATE, so "cid = 33", ("technology") is not the
+-- case in respect of *existing* tuple):
+INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'some technology novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *;
+-- Same query a third time, but now fails due to existing tuple finally not
+-- passing quals:
+INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'some technology novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *;
+-- Don't fail just because INSERT doesn't satisfy WITH CHECK option that
+-- originated as a barrier/USING() qual from the UPDATE. Note that the UPDATE
+-- path *isn't* taken, and so UPDATE-related policy does not apply:
+INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'rls_regress_user1', 'technology book, can only insert')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *;
+-- But this time, the same statement fails, because the UPDATE path is taken,
+-- and updating the row just inserted falls afoul of security barrier qual
+-- (enforced as WCO) -- what we might have updated target tuple to is
+-- irrelevant, in fact.
+INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'rls_regress_user1', 'technology book, can only insert')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *;
+
+-- Test default USING qual enforced as WCO
+SET SESSION AUTHORIZATION rls_regress_user0;
+DROP POLICY p1 ON document;
+DROP POLICY p2 ON document;
+DROP POLICY p3 ON document;
+
+CREATE POLICY p3_with_default ON document FOR UPDATE
+ USING (cid = (SELECT cid from category WHERE cname = 'novel'));
+
+SET SESSION AUTHORIZATION rls_regress_user1;
+-- Just because WCO-style enforcement of USING quals occurs with
+-- existing/target tuple does not mean that the implementation can be allowed
+-- to fail to also enforce this qual against the final tuple appended to
+-- relation (since in the absence of an explicit WCO, this is also interpreted
+-- as an UPDATE/ALL WCO in general).
+--
+-- UPDATE path is taken here (fails due to existing tuple). Note that this is
+-- not reported as a "USING expression", because it's an RLS UPDATE check that originated as
+-- a USING qual for the purposes of RLS in general, as opposed to an explicit
+-- USING qual that is ordinarily a security barrier. We leave it up to the
+-- UPDATE to make this fail:
+INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'rls_regress_user1', 'technology book, can only insert')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *;
+
+-- UPDATE path is taken here. Existing tuple passes, since it's cid
+-- corresponds to "novel", but default USING qual is enforced against
+-- post-UPDATE tuple too (as always when updating with a policy that lacks an
+-- explicit WCO), and so this fails:
+INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'technology'), 1, 'rls_regress_user1', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET cid = EXCLUDED.cid, dtitle = EXCLUDED.dtitle RETURNING *;
+
+SET SESSION AUTHORIZATION rls_regress_user0;
+DROP POLICY p3_with_default ON document;
+
+--
+-- Test ALL policies with ON CONFLICT DO UPDATE (much the same as existing UPDATE
+-- tests)
+--
+CREATE POLICY p3_with_all ON document FOR ALL
+ USING (cid = (SELECT cid from category WHERE cname = 'novel'))
+ WITH CHECK (dauthor = current_user);
+
+SET SESSION AUTHORIZATION rls_regress_user1;
+
+-- Fails, since ALL WCO is enforced in insert path:
+INSERT INTO document VALUES (80, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user2', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33;
+-- Fails, since ALL policy USING qual is enforced (existing, target tuple is in
+-- violation, since it has the "manga" cid):
+INSERT INTO document VALUES (4, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle;
+-- Fails, since ALL WCO are enforced:
+INSERT INTO document VALUES (1, (SELECT cid from category WHERE cname = 'novel'), 1, 'rls_regress_user1', 'my first novel')
+ ON CONFLICT (did) DO UPDATE SET dauthor = 'rls_regress_user2';
--
-- ROLE/GROUP
diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql
index c385e41457..6f1a1b84e7 100644
--- a/src/test/regress/sql/rules.sql
+++ b/src/test/regress/sql/rules.sql
@@ -680,6 +680,9 @@ SELECT * FROM shoelace_log ORDER BY sl_name;
insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0);
insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0);
+-- Unsupported (even though a similar updatable view construct is)
+insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0)
+ on conflict do nothing;
SELECT * FROM shoelace_obsolete ORDER BY sl_len_cm;
SELECT * FROM shoelace_candelete;
@@ -844,6 +847,17 @@ insert into rule_and_refint_t3 values (1, 12, 11, 'row3');
insert into rule_and_refint_t3 values (1, 12, 12, 'row4');
insert into rule_and_refint_t3 values (1, 11, 13, 'row5');
insert into rule_and_refint_t3 values (1, 13, 11, 'row6');
+-- Ordinary table
+insert into rule_and_refint_t3 values (1, 13, 11, 'row6')
+ on conflict do nothing;
+-- rule not fired, so fk violation
+insert into rule_and_refint_t3 values (1, 13, 11, 'row6')
+ on conflict (id3a, id3b, id3c) do update
+ set id3b = excluded.id3b;
+-- rule fired, so unsupported
+insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0)
+ on conflict (sl_name) do update
+ set sl_avail = excluded.sl_avail;
create rule rule_and_refint_t3_ins as on insert to rule_and_refint_t3
where (exists (select 1 from rule_and_refint_t3
@@ -1025,3 +1039,48 @@ drop view rule_v1;
create view rule_v1(x) as select * from (values(1,2)) v(q,w);
\d+ rule_v1
drop view rule_v1;
+
+--
+-- Check DO INSTEAD rules with ON CONFLICT
+--
+CREATE TABLE hats (
+ hat_name char(10) primary key,
+ hat_color char(10) -- hat color
+);
+
+CREATE TABLE hat_data (
+ hat_name char(10) primary key,
+ hat_color char(10) -- hat color
+);
+
+-- okay
+CREATE RULE hat_nosert AS ON INSERT TO hats
+ DO INSTEAD
+ INSERT INTO hat_data VALUES (
+ NEW.hat_name,
+ NEW.hat_color)
+ ON CONFLICT (hat_name) DO NOTHING RETURNING *;
+
+-- Works (projects row)
+INSERT INTO hats VALUES ('h7', 'black') RETURNING *;
+-- Works (does nothing)
+INSERT INTO hats VALUES ('h7', 'black') RETURNING *;
+SELECT tablename, rulename, definition FROM pg_rules
+ WHERE tablename = 'hats';
+DROP RULE hat_nosert ON hats;
+
+CREATE RULE hat_upsert AS ON INSERT TO hats
+ DO INSTEAD
+ INSERT INTO hat_data VALUES (
+ NEW.hat_name,
+ NEW.hat_color)
+ ON CONFLICT (hat_name) DO UPDATE SET hat_color = 'Orange' RETURNING *;
+
+-- Works (does upsert)
+INSERT INTO hats VALUES ('h7', 'black') RETURNING *;
+SELECT tablename, rulename, definition FROM pg_rules
+ WHERE tablename = 'hats';
+DROP RULE hat_upsert ON hats;
+
+drop table hats;
+drop table hat_data;
diff --git a/src/test/regress/sql/subselect.sql b/src/test/regress/sql/subselect.sql
index 4be2e40a00..2991223089 100644
--- a/src/test/regress/sql/subselect.sql
+++ b/src/test/regress/sql/subselect.sql
@@ -373,6 +373,20 @@ from
join
int4_tbl i4 on dummy = i4.f1;
+--
+-- Test case for subselect within UPDATE of INSERT...ON CONFLICT DO UPDATE
+--
+create temp table upsert(key int4 primary key, val text);
+insert into upsert values(1, 'val') on conflict (key) do update set val = 'not seen';
+insert into upsert values(1, 'val') on conflict (key) do update set val = 'seen with subselect ' || (select f1 from int4_tbl where f1 != 0 limit 1)::text;
+
+select * from upsert;
+
+with aa as (select 'int4_tbl' u from int4_tbl limit 1)
+insert into upsert values (1, 'x'), (999, 'y')
+on conflict (key) do update set val = (select u from aa)
+returning *;
+
--
-- Test case for cross-type partial matching in hashed subplan (bug #7597)
--
diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql
index 0ea2c314de..9f66702cee 100644
--- a/src/test/regress/sql/triggers.sql
+++ b/src/test/regress/sql/triggers.sql
@@ -208,7 +208,7 @@ drop sequence ttdummy_seq;
CREATE TABLE log_table (tstamp timestamp default timeofday()::timestamp);
-CREATE TABLE main_table (a int, b int);
+CREATE TABLE main_table (a int unique, b int);
COPY main_table (a,b) FROM stdin;
5 10
@@ -237,6 +237,12 @@ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('after_ins_stmt');
CREATE TRIGGER after_upd_stmt_trig AFTER UPDATE ON main_table
EXECUTE PROCEDURE trigger_func('after_upd_stmt');
+-- Both insert and update statement level triggers (before and after) should
+-- fire. Doesn't fire UPDATE before trigger, but only because one isn't
+-- defined.
+INSERT INTO main_table (a, b) VALUES (5, 10) ON CONFLICT (a)
+ DO UPDATE SET b = EXCLUDED.b;
+
CREATE TRIGGER after_upd_row_trig AFTER UPDATE ON main_table
FOR EACH ROW EXECUTE PROCEDURE trigger_func('after_upd_row');
@@ -246,6 +252,9 @@ UPDATE main_table SET a = a + 1 WHERE b < 30;
-- UPDATE that effects zero rows should still call per-statement trigger
UPDATE main_table SET a = a + 2 WHERE b > 100;
+-- constraint now unneeded
+ALTER TABLE main_table DROP CONSTRAINT main_table_a_key;
+
-- COPY should fire per-row and per-statement INSERT triggers
COPY main_table (a, b) FROM stdin;
30 40
@@ -1173,3 +1182,61 @@ select * from self_ref_trigger;
drop table self_ref_trigger;
drop function self_ref_trigger_ins_func();
drop function self_ref_trigger_del_func();
+
+--
+-- Verify behavior of before and after triggers with INSERT...ON CONFLICT
+-- DO UPDATE
+--
+create table upsert (key int4 primary key, color text);
+
+create function upsert_before_func()
+ returns trigger language plpgsql as
+$$
+begin
+ if (TG_OP = 'UPDATE') then
+ raise warning 'before update (old): %', old.*::text;
+ raise warning 'before update (new): %', new.*::text;
+ elsif (TG_OP = 'INSERT') then
+ raise warning 'before insert (new): %', new.*::text;
+ if new.key % 2 = 0 then
+ new.key := new.key + 1;
+ new.color := new.color || ' trig modified';
+ raise warning 'before insert (new, modified): %', new.*::text;
+ end if;
+ end if;
+ return new;
+end;
+$$;
+create trigger upsert_before_trig before insert or update on upsert
+ for each row execute procedure upsert_before_func();
+
+create function upsert_after_func()
+ returns trigger language plpgsql as
+$$
+begin
+ if (TG_OP = 'UPDATE') then
+ raise warning 'after update (old): %', new.*::text;
+ raise warning 'after update (new): %', new.*::text;
+ elsif (TG_OP = 'INSERT') then
+ raise warning 'after insert (new): %', new.*::text;
+ end if;
+ return null;
+end;
+$$;
+create trigger upsert_after_trig after insert or update on upsert
+ for each row execute procedure upsert_after_func();
+
+insert into upsert values(1, 'black') on conflict (key) do update set color = 'updated ' || upsert.color;
+insert into upsert values(2, 'red') on conflict (key) do update set color = 'updated ' || upsert.color;
+insert into upsert values(3, 'orange') on conflict (key) do update set color = 'updated ' || upsert.color;
+insert into upsert values(4, 'green') on conflict (key) do update set color = 'updated ' || upsert.color;
+insert into upsert values(5, 'purple') on conflict (key) do update set color = 'updated ' || upsert.color;
+insert into upsert values(6, 'white') on conflict (key) do update set color = 'updated ' || upsert.color;
+insert into upsert values(7, 'pink') on conflict (key) do update set color = 'updated ' || upsert.color;
+insert into upsert values(8, 'yellow') on conflict (key) do update set color = 'updated ' || upsert.color;
+
+select * from upsert;
+
+drop table upsert;
+drop function upsert_before_func();
+drop function upsert_after_func();
diff --git a/src/test/regress/sql/updatable_views.sql b/src/test/regress/sql/updatable_views.sql
index 697363665c..8fe96f5c51 100644
--- a/src/test/regress/sql/updatable_views.sql
+++ b/src/test/regress/sql/updatable_views.sql
@@ -69,6 +69,15 @@ DELETE FROM rw_view14 WHERE a=3; -- should be OK
-- Partially updatable view
INSERT INTO rw_view15 VALUES (3, 'ROW 3'); -- should fail
INSERT INTO rw_view15 (a) VALUES (3); -- should be OK
+INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT DO NOTHING; -- succeeds
+SELECT * FROM rw_view15;
+INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT (a) DO NOTHING; -- succeeds
+SELECT * FROM rw_view15;
+INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT (a) DO UPDATE set a = excluded.a; -- succeeds
+SELECT * FROM rw_view15;
+INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT (a) DO UPDATE set upper = 'blarg'; -- fails
+SELECT * FROM rw_view15;
+SELECT * FROM rw_view15;
ALTER VIEW rw_view15 ALTER COLUMN upper SET DEFAULT 'NOT SET';
INSERT INTO rw_view15 (a) VALUES (4); -- should fail
UPDATE rw_view15 SET upper='ROW 3' WHERE a=3; -- should fail
diff --git a/src/test/regress/sql/update.sql b/src/test/regress/sql/update.sql
index e71128c04d..5637c68acf 100644
--- a/src/test/regress/sql/update.sql
+++ b/src/test/regress/sql/update.sql
@@ -8,6 +8,11 @@ CREATE TABLE update_test (
c TEXT
);
+CREATE TABLE upsert_test (
+ a INT PRIMARY KEY,
+ b TEXT
+);
+
INSERT INTO update_test VALUES (5, 10, 'foo');
INSERT INTO update_test(b, a) VALUES (15, 10);
@@ -74,4 +79,20 @@ UPDATE update_test AS t SET b = update_test.b + 10 WHERE t.a = 10;
UPDATE update_test SET c = repeat('x', 10000) WHERE c = 'car';
SELECT a, b, char_length(c) FROM update_test;
+-- Test ON CONFLICT DO UPDATE
+INSERT INTO upsert_test VALUES(1, 'Boo');
+-- uncorrelated sub-select:
+WITH aaa AS (SELECT 1 AS a, 'Foo' AS b) INSERT INTO upsert_test
+ VALUES (1, 'Bar') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b, a FROM aaa) RETURNING *;
+-- correlated sub-select:
+INSERT INTO upsert_test VALUES (1, 'Baz') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Correlated', a from upsert_test i WHERE i.a = upsert_test.a)
+ RETURNING *;
+-- correlated sub-select (EXCLUDED.* alias):
+INSERT INTO upsert_test VALUES (1, 'Bat') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Excluded', a from upsert_test i WHERE i.a = excluded.a)
+ RETURNING *;
+
DROP TABLE update_test;
+DROP TABLE upsert_test;
diff --git a/src/test/regress/sql/with.sql b/src/test/regress/sql/with.sql
index 1687c11983..3fd55f96b3 100644
--- a/src/test/regress/sql/with.sql
+++ b/src/test/regress/sql/with.sql
@@ -795,6 +795,63 @@ SELECT * FROM t LIMIT 10;
SELECT * FROM y;
+-- data-modifying WITH containing INSERT...ON CONFLICT DO UPDATE
+CREATE TABLE z AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i;
+ALTER TABLE z ADD UNIQUE (k);
+
+WITH t AS (
+ INSERT INTO z SELECT i, 'insert'
+ FROM generate_series(0, 16) i
+ ON CONFLICT (k) DO UPDATE SET v = z.v || ', now update'
+ RETURNING *
+)
+SELECT * FROM t JOIN y ON t.k = y.a ORDER BY a, k;
+
+-- Test EXCLUDED.* reference within CTE
+WITH aa AS (
+ INSERT INTO z VALUES(1, 5) ON CONFLICT (k) DO UPDATE SET v = EXCLUDED.v
+ WHERE z.k != EXCLUDED.k
+ RETURNING *
+)
+SELECT * FROM aa;
+
+-- New query/snapshot demonstrates side-effects of previous query.
+SELECT * FROM z ORDER BY k;
+
+--
+-- Ensure subqueries within the update clause work, even if they
+-- reference outside values
+--
+WITH aa AS (SELECT 1 a, 2 b)
+INSERT INTO z VALUES(1, 'insert')
+ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1);
+WITH aa AS (SELECT 1 a, 2 b)
+INSERT INTO z VALUES(1, 'insert')
+ON CONFLICT (k) DO UPDATE SET v = ' update' WHERE z.k = (SELECT a FROM aa);
+WITH aa AS (SELECT 1 a, 2 b)
+INSERT INTO z VALUES(1, 'insert')
+ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1);
+WITH aa AS (SELECT 'a' a, 'b' b UNION ALL SELECT 'a' a, 'b' b)
+INSERT INTO z VALUES(1, 'insert')
+ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 'a' LIMIT 1);
+WITH aa AS (SELECT 1 a, 2 b)
+INSERT INTO z VALUES(1, (SELECT b || ' insert' FROM aa WHERE a = 1 ))
+ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1);
+
+-- This shows an attempt to update an invisible row, which should really be
+-- reported as a cardinality violation, but it doesn't seem worth fixing:
+WITH simpletup AS (
+ SELECT 2 k, 'Green' v),
+upsert_cte AS (
+ INSERT INTO z VALUES(2, 'Blue') ON CONFLICT (k) DO
+ UPDATE SET (k, v) = (SELECT k, v FROM simpletup WHERE simpletup.k = z.k)
+ RETURNING k, v)
+INSERT INTO z VALUES(2, 'Red') ON CONFLICT (k) DO
+UPDATE SET (k, v) = (SELECT k, v FROM upsert_cte WHERE upsert_cte.k = z.k)
+RETURNING k, v;
+
+DROP TABLE z;
+
-- check that run to completion happens in proper ordering
TRUNCATE TABLE y;
--
cgit v1.2.3
From afb9249d06f47d7a6d4a89fea0c3625fe43c5a5d Mon Sep 17 00:00:00 2001
From: Tom Lane
Date: Tue, 12 May 2015 14:10:10 -0400
Subject: Add support for doing late row locking in FDWs.
Previously, FDWs could only do "early row locking", that is lock a row as
soon as it's fetched, even though local restriction/join conditions might
discard the row later. This patch adds callbacks that allow FDWs to do
late locking in the same way that it's done for regular tables.
To make use of this feature, an FDW must support the "ctid" column as a
unique row identifier. Currently, since ctid has to be of type TID,
the feature is of limited use, though in principle it could be used by
postgres_fdw. We may eventually allow FDWs to specify another data type
for ctid, which would make it possible for more FDWs to use this feature.
This commit does not modify postgres_fdw to use late locking. We've
tested some prototype code for that, but it's not in committable shape,
and besides it's quite unclear whether it actually makes sense to do late
locking against a remote server. The extra round trips required are likely
to outweigh any benefit from improved concurrency.
Etsuro Fujita, reviewed by Ashutosh Bapat, and hacked up a lot by me
---
doc/src/sgml/fdwhandler.sgml | 232 ++++++++++++++++++++++++++++++---
src/backend/executor/execMain.c | 79 ++++++++---
src/backend/executor/execUtils.c | 17 +--
src/backend/executor/nodeLockRows.c | 133 +++++++++++++------
src/backend/executor/nodeModifyTable.c | 2 +-
src/backend/optimizer/plan/planner.c | 8 +-
src/include/executor/executor.h | 2 +-
src/include/foreign/fdwapi.h | 12 ++
src/include/nodes/execnodes.h | 12 +-
src/include/nodes/plannodes.h | 31 +++--
10 files changed, 415 insertions(+), 113 deletions(-)
(limited to 'src/backend/executor/nodeModifyTable.c')
diff --git a/doc/src/sgml/fdwhandler.sgml b/doc/src/sgml/fdwhandler.sgml
index 33863f04f8..236157743a 100644
--- a/doc/src/sgml/fdwhandler.sgml
+++ b/doc/src/sgml/fdwhandler.sgml
@@ -665,6 +665,108 @@ IsForeignRelUpdatable (Relation rel);
+
+ FDW Routines For Row Locking
+
+
+ If an FDW wishes to support late row locking> (as described
+ in ), it must provide the following
+ callback functions:
+
+
+
+
+RowMarkType
+GetForeignRowMarkType (RangeTblEntry *rte,
+ LockClauseStrength strength);
+
+
+ Report which row-marking option to use for a foreign table.
+ rte> is the RangeTblEntry> node for the table
+ and strength> describes the lock strength requested by the
+ relevant FOR UPDATE/SHARE> clause, if any. The result must be
+ a member of the RowMarkType> enum type.
+
+
+
+ This function is called during query planning for each foreign table that
+ appears in an UPDATE>, DELETE>, or SELECT
+ FOR UPDATE/SHARE> query and is not the target of UPDATE>
+ or DELETE>.
+
+
+
+ If the GetForeignRowMarkType> pointer is set to
+ NULL>, the ROW_MARK_COPY> option is always used.
+ (This implies that RefetchForeignRow> will never be called,
+ so it need not be provided either.)
+
+
+
+ See for more information.
+
+
+
+
+HeapTuple
+RefetchForeignRow (EState *estate,
+ ExecRowMark *erm,
+ Datum rowid,
+ bool *updated);
+
+
+ Re-fetch one tuple from the foreign table, after locking it if required.
+ estate> is global execution state for the query.
+ erm> is the ExecRowMark> struct describing
+ the target foreign table and the row lock type (if any) to acquire.
+ rowid> identifies the tuple to be fetched.
+ updated> is an output parameter.
+
+
+
+ This function should return a palloc'ed copy of the fetched tuple,
+ or NULL> if the row lock couldn't be obtained. The row lock
+ type to acquire is defined by erm->markType>, which is the
+ value previously returned by GetForeignRowMarkType>.
+ (ROW_MARK_REFERENCE> means to just re-fetch the tuple without
+ acquiring any lock, and ROW_MARK_COPY> will never be seen by
+ this routine.)
+
+
+
+ In addition, *updated> should be set to true>
+ if what was fetched was an updated version of the tuple rather than
+ the same version previously obtained. (If the FDW cannot be sure about
+ this, always returning true> is recommended.)
+
+
+
+ Note that by default, failure to acquire a row lock should result in
+ raising an error; a NULL> return is only appropriate if
+ the SKIP LOCKED> option is specified
+ by erm->waitPolicy>.
+
+
+
+ The rowid> is the ctid> value previously read
+ for the row to be re-fetched. Although the rowid> value is
+ passed as a Datum>, it can currently only be a tid>. The
+ function API is chosen in hopes that it may be possible to allow other
+ datatypes for row IDs in future.
+
+
+
+ If the RefetchForeignRow> pointer is set to
+ NULL>, attempts to re-fetch rows will fail
+ with an error message.
+
+
+
+ See for more information.
+
+
+
+
FDW Routines for EXPLAIN>
@@ -1092,24 +1194,6 @@ GetForeignServerByName(const char *name, bool missing_ok);
structures that copyObject> knows how to copy.
-
- For an UPDATE> or DELETE> against an external data
- source that supports concurrent updates, it is recommended that the
- ForeignScan> operation lock the rows that it fetches, perhaps
- via the equivalent of SELECT FOR UPDATE>. The FDW may also
- choose to lock rows at fetch time when the foreign table is referenced
- in a SELECT FOR UPDATE/SHARE>; if it does not, the
- FOR UPDATE> or FOR SHARE> option is essentially a
- no-op so far as the foreign table is concerned. This behavior may yield
- semantics slightly different from operations on local tables, where row
- locking is customarily delayed as long as possible: remote rows may get
- locked even though they subsequently fail locally-applied restriction or
- join conditions. However, matching the local semantics exactly would
- require an additional remote access for every row, and might be
- impossible anyway depending on what locking semantics the external data
- source provides.
-
-
INSERT> with an ON CONFLICT> clause does not
support specifying the conflict target, as remote constraints are not
@@ -1117,6 +1201,118 @@ GetForeignServerByName(const char *name, bool missing_ok);
UPDATE> is not supported, since the specification is mandatory there.
+
+
+
+ Row Locking in Foreign Data Wrappers
+
+
+ If an FDW's underlying storage mechanism has a concept of locking
+ individual rows to prevent concurrent updates of those rows, it is
+ usually worthwhile for the FDW to perform row-level locking with as
+ close an approximation as practical to the semantics used in
+ ordinary PostgreSQL> tables. There are multiple
+ considerations involved in this.
+
+
+
+ One key decision to be made is whether to perform early
+ locking> or late locking>. In early locking, a row is
+ locked when it is first retrieved from the underlying store, while in
+ late locking, the row is locked only when it is known that it needs to
+ be locked. (The difference arises because some rows may be discarded by
+ locally-checked restriction or join conditions.) Early locking is much
+ simpler and avoids extra round trips to a remote store, but it can cause
+ locking of rows that need not have been locked, resulting in reduced
+ concurrency or even unexpected deadlocks. Also, late locking is only
+ possible if the row to be locked can be uniquely re-identified later.
+ Preferably the row identifier should identify a specific version of the
+ row, as PostgreSQL> TIDs do.
+
+
+
+ By default, PostgreSQL> ignores locking considerations
+ when interfacing to FDWs, but an FDW can perform early locking without
+ any explicit support from the core code. The API functions described
+ in , which were added
+ in PostgreSQL> 9.5, allow an FDW to use late locking if
+ it wishes.
+
+
+
+ An additional consideration is that in READ COMMITTED>
+ isolation mode, PostgreSQL> may need to re-check
+ restriction and join conditions against an updated version of some
+ target tuple. Rechecking join conditions requires re-obtaining copies
+ of the non-target rows that were previously joined to the target tuple.
+ When working with standard PostgreSQL> tables, this is
+ done by including the TIDs of the non-target tables in the column list
+ projected through the join, and then re-fetching non-target rows when
+ required. This approach keeps the join data set compact, but it
+ requires inexpensive re-fetch capability, as well as a TID that can
+ uniquely identify the row version to be re-fetched. By default,
+ therefore, the approach used with foreign tables is to include a copy of
+ the entire row fetched from a foreign table in the column list projected
+ through the join. This puts no special demands on the FDW but can
+ result in reduced performance of merge and hash joins. An FDW that is
+ capable of meeting the re-fetch requirements can choose to do it the
+ first way.
+
+
+
+ For an UPDATE> or DELETE> on a foreign table, it
+ is recommended that the ForeignScan> operation on the target
+ table perform early locking on the rows that it fetches, perhaps via the
+ equivalent of SELECT FOR UPDATE>. An FDW can detect whether
+ a table is an UPDATE>/DELETE> target at plan time
+ by comparing its relid to root->parse->resultRelation>,
+ or at execution time by using ExecRelationIsTargetRelation()>.
+ An alternative possibility is to perform late locking within the
+ ExecForeignUpdate> or ExecForeignDelete>
+ callback, but no special support is provided for this.
+
+
+
+ For foreign tables that are specified to be locked by a SELECT
+ FOR UPDATE/SHARE> command, the ForeignScan> operation can
+ again perform early locking by fetching tuples with the equivalent
+ of SELECT FOR UPDATE/SHARE>. To perform late locking
+ instead, provide the callback functions defined
+ in .
+ In GetForeignRowMarkType>, select rowmark option
+ ROW_MARK_EXCLUSIVE>, ROW_MARK_NOKEYEXCLUSIVE>,
+ ROW_MARK_SHARE>, or ROW_MARK_KEYSHARE> depending
+ on the requested lock strength. (The core code will act the same
+ regardless of which of these four options you choose.)
+ Elsewhere, you can detect whether a foreign table was specified to be
+ locked by this type of command by using get_plan_rowmark> at
+ plan time, or ExecFindRowMark> at execution time; you must
+ check not only whether a non-null rowmark struct is returned, but that
+ its strength> field is not LCS_NONE>.
+
+
+
+ Lastly, for foreign tables that are used in an UPDATE>,
+ DELETE> or SELECT FOR UPDATE/SHARE> command but
+ are not specified to be row-locked, you can override the default choice
+ to copy entire rows by having GetForeignRowMarkType> select
+ option ROW_MARK_REFERENCE> when it sees lock strength
+ LCS_NONE>. This will cause RefetchForeignRow> to
+ be called with that value for markType>; it should then
+ re-fetch the row without acquiring any new lock. (If you have
+ a GetForeignRowMarkType> function but don't wish to re-fetch
+ unlocked rows, select option ROW_MARK_COPY>
+ for LCS_NONE>.)
+
+
+
+ See src/include/nodes/lockoptions.h>, the comments
+ for RowMarkType> and PlanRowMark>
+ in src/include/nodes/plannodes.h>, and the comments for
+ ExecRowMark> in src/include/nodes/execnodes.h> for
+ additional information.
+
+
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 0dee949178..43d3c44c82 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -898,8 +898,11 @@ InitPlan(QueryDesc *queryDesc, int eflags)
erm->prti = rc->prti;
erm->rowmarkId = rc->rowmarkId;
erm->markType = rc->markType;
+ erm->strength = rc->strength;
erm->waitPolicy = rc->waitPolicy;
+ erm->ermActive = false;
ItemPointerSetInvalid(&(erm->curCtid));
+ erm->ermExtra = NULL;
estate->es_rowMarks = lappend(estate->es_rowMarks, erm);
}
@@ -1143,6 +1146,8 @@ CheckValidResultRel(Relation resultRel, CmdType operation)
static void
CheckValidRowMarkRel(Relation rel, RowMarkType markType)
{
+ FdwRoutine *fdwroutine;
+
switch (rel->rd_rel->relkind)
{
case RELKIND_RELATION:
@@ -1178,11 +1183,13 @@ CheckValidRowMarkRel(Relation rel, RowMarkType markType)
RelationGetRelationName(rel))));
break;
case RELKIND_FOREIGN_TABLE:
- /* Should not get here; planner should have used ROW_MARK_COPY */
- ereport(ERROR,
- (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot lock rows in foreign table \"%s\"",
- RelationGetRelationName(rel))));
+ /* Okay only if the FDW supports it */
+ fdwroutine = GetFdwRoutineForRelation(rel, false);
+ if (fdwroutine->RefetchForeignRow == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot lock rows in foreign table \"%s\"",
+ RelationGetRelationName(rel))));
break;
default:
ereport(ERROR,
@@ -2005,9 +2012,11 @@ ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
/*
* ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
+ *
+ * If no such struct, either return NULL or throw error depending on missing_ok
*/
ExecRowMark *
-ExecFindRowMark(EState *estate, Index rti)
+ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
{
ListCell *lc;
@@ -2018,8 +2027,9 @@ ExecFindRowMark(EState *estate, Index rti)
if (erm->rti == rti)
return erm;
}
- elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
- return NULL; /* keep compiler quiet */
+ if (!missing_ok)
+ elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
+ return NULL;
}
/*
@@ -2530,7 +2540,7 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate)
if (erm->markType == ROW_MARK_REFERENCE)
{
- Buffer buffer;
+ HeapTuple copyTuple;
Assert(erm->relation != NULL);
@@ -2541,17 +2551,50 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate)
/* non-locked rels could be on the inside of outer joins */
if (isNull)
continue;
- tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
- /* okay, fetch the tuple */
- if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
- false, NULL))
- elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+ /* fetch requests on foreign tables must be passed to their FDW */
+ if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ FdwRoutine *fdwroutine;
+ bool updated = false;
- /* successful, copy and store tuple */
- EvalPlanQualSetTuple(epqstate, erm->rti,
- heap_copytuple(&tuple));
- ReleaseBuffer(buffer);
+ fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
+ /* this should have been checked already, but let's be safe */
+ if (fdwroutine->RefetchForeignRow == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot lock rows in foreign table \"%s\"",
+ RelationGetRelationName(erm->relation))));
+ copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
+ erm,
+ datum,
+ &updated);
+ if (copyTuple == NULL)
+ elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+
+ /*
+ * Ideally we'd insist on updated == false here, but that
+ * assumes that FDWs can track that exactly, which they might
+ * not be able to. So just ignore the flag.
+ */
+ }
+ else
+ {
+ /* ordinary table, fetch the tuple */
+ Buffer buffer;
+
+ tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
+ if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
+ false, NULL))
+ elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
+
+ /* successful, copy tuple */
+ copyTuple = heap_copytuple(&tuple);
+ ReleaseBuffer(buffer);
+ }
+
+ /* store tuple */
+ EvalPlanQualSetTuple(epqstate, erm->rti, copyTuple);
}
else
{
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 88ba16bc6d..0da8e53e81 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -805,20 +805,11 @@ ExecOpenScanRelation(EState *estate, Index scanrelid, int eflags)
lockmode = NoLock;
else
{
- ListCell *l;
+ /* Keep this check in sync with InitPlan! */
+ ExecRowMark *erm = ExecFindRowMark(estate, scanrelid, true);
- foreach(l, estate->es_rowMarks)
- {
- ExecRowMark *erm = lfirst(l);
-
- /* Keep this check in sync with InitPlan! */
- if (erm->rti == scanrelid &&
- erm->relation != NULL)
- {
- lockmode = NoLock;
- break;
- }
- }
+ if (erm != NULL && erm->relation != NULL)
+ lockmode = NoLock;
}
/* Open the relation and acquire lock as needed */
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index 5ae106c06a..7bcf99f488 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -25,6 +25,7 @@
#include "access/xact.h"
#include "executor/executor.h"
#include "executor/nodeLockRows.h"
+#include "foreign/fdwapi.h"
#include "storage/bufmgr.h"
#include "utils/rel.h"
#include "utils/tqual.h"
@@ -40,7 +41,7 @@ ExecLockRows(LockRowsState *node)
TupleTableSlot *slot;
EState *estate;
PlanState *outerPlan;
- bool epq_started;
+ bool epq_needed;
ListCell *lc;
/*
@@ -58,15 +59,18 @@ lnext:
if (TupIsNull(slot))
return NULL;
+ /* We don't need EvalPlanQual unless we get updated tuple version(s) */
+ epq_needed = false;
+
/*
* Attempt to lock the source tuple(s). (Note we only have locking
* rowmarks in lr_arowMarks.)
*/
- epq_started = false;
foreach(lc, node->lr_arowMarks)
{
ExecAuxRowMark *aerm = (ExecAuxRowMark *) lfirst(lc);
ExecRowMark *erm = aerm->rowmark;
+ HeapTuple *testTuple;
Datum datum;
bool isNull;
HeapTupleData tuple;
@@ -77,8 +81,10 @@ lnext:
HeapTuple copyTuple;
/* clear any leftover test tuple for this rel */
- if (node->lr_epqstate.estate != NULL)
- EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti, NULL);
+ testTuple = &(node->lr_curtuples[erm->rti - 1]);
+ if (*testTuple != NULL)
+ heap_freetuple(*testTuple);
+ *testTuple = NULL;
/* if child rel, must check whether it produced this row */
if (erm->rti != erm->prti)
@@ -97,10 +103,12 @@ lnext:
if (tableoid != erm->relid)
{
/* this child is inactive right now */
+ erm->ermActive = false;
ItemPointerSetInvalid(&(erm->curCtid));
continue;
}
}
+ erm->ermActive = true;
/* fetch the tuple's ctid */
datum = ExecGetJunkAttribute(slot,
@@ -109,9 +117,45 @@ lnext:
/* shouldn't ever get a null result... */
if (isNull)
elog(ERROR, "ctid is NULL");
- tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
+
+ /* requests for foreign tables must be passed to their FDW */
+ if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ FdwRoutine *fdwroutine;
+ bool updated = false;
+
+ fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
+ /* this should have been checked already, but let's be safe */
+ if (fdwroutine->RefetchForeignRow == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot lock rows in foreign table \"%s\"",
+ RelationGetRelationName(erm->relation))));
+ copyTuple = fdwroutine->RefetchForeignRow(estate,
+ erm,
+ datum,
+ &updated);
+ if (copyTuple == NULL)
+ {
+ /* couldn't get the lock, so skip this row */
+ goto lnext;
+ }
+
+ /* save locked tuple for possible EvalPlanQual testing below */
+ *testTuple = copyTuple;
+
+ /*
+ * if FDW says tuple was updated before getting locked, we need to
+ * perform EPQ testing to see if quals are still satisfied
+ */
+ if (updated)
+ epq_needed = true;
+
+ continue;
+ }
/* okay, try to lock the tuple */
+ tuple.t_self = *((ItemPointer) DatumGetPointer(datum));
switch (erm->markType)
{
case ROW_MARK_EXCLUSIVE:
@@ -191,40 +235,11 @@ lnext:
/* remember the actually locked tuple's TID */
tuple.t_self = copyTuple->t_self;
- /*
- * Need to run a recheck subquery. Initialize EPQ state if we
- * didn't do so already.
- */
- if (!epq_started)
- {
- ListCell *lc2;
+ /* Save locked tuple for EvalPlanQual testing below */
+ *testTuple = copyTuple;
- EvalPlanQualBegin(&node->lr_epqstate, estate);
-
- /*
- * Ensure that rels with already-visited rowmarks are told
- * not to return tuples during the first EPQ test. We can
- * exit this loop once it reaches the current rowmark;
- * rels appearing later in the list will be set up
- * correctly by the EvalPlanQualSetTuple call at the top
- * of the loop.
- */
- foreach(lc2, node->lr_arowMarks)
- {
- ExecAuxRowMark *aerm2 = (ExecAuxRowMark *) lfirst(lc2);
-
- if (lc2 == lc)
- break;
- EvalPlanQualSetTuple(&node->lr_epqstate,
- aerm2->rowmark->rti,
- NULL);
- }
-
- epq_started = true;
- }
-
- /* Store target tuple for relation's scan node */
- EvalPlanQualSetTuple(&node->lr_epqstate, erm->rti, copyTuple);
+ /* Remember we need to do EPQ testing */
+ epq_needed = true;
/* Continue loop until we have all target tuples */
break;
@@ -237,17 +252,35 @@ lnext:
test);
}
- /* Remember locked tuple's TID for WHERE CURRENT OF */
+ /* Remember locked tuple's TID for EPQ testing and WHERE CURRENT OF */
erm->curCtid = tuple.t_self;
}
/*
* If we need to do EvalPlanQual testing, do so.
*/
- if (epq_started)
+ if (epq_needed)
{
+ int i;
+
+ /* Initialize EPQ machinery */
+ EvalPlanQualBegin(&node->lr_epqstate, estate);
+
+ /*
+ * Transfer already-fetched tuples into the EPQ state, and make sure
+ * its test tuples for other tables are reset to NULL.
+ */
+ for (i = 0; i < node->lr_ntables; i++)
+ {
+ EvalPlanQualSetTuple(&node->lr_epqstate,
+ i + 1,
+ node->lr_curtuples[i]);
+ /* freeing this tuple is now the responsibility of EPQ */
+ node->lr_curtuples[i] = NULL;
+ }
+
/*
- * First, fetch a copy of any rows that were successfully locked
+ * Next, fetch a copy of any rows that were successfully locked
* without any update having occurred. (We do this in a separate pass
* so as to avoid overhead in the common case where there are no
* concurrent updates.)
@@ -260,7 +293,7 @@ lnext:
Buffer buffer;
/* ignore non-active child tables */
- if (!ItemPointerIsValid(&(erm->curCtid)))
+ if (!erm->ermActive)
{
Assert(erm->rti != erm->prti); /* check it's child table */
continue;
@@ -269,6 +302,10 @@ lnext:
if (EvalPlanQualGetTuple(&node->lr_epqstate, erm->rti) != NULL)
continue; /* it was updated and fetched above */
+ /* foreign tables should have been fetched above */
+ Assert(erm->relation->rd_rel->relkind != RELKIND_FOREIGN_TABLE);
+ Assert(ItemPointerIsValid(&(erm->curCtid)));
+
/* okay, fetch the tuple */
tuple.t_self = erm->curCtid;
if (!heap_fetch(erm->relation, SnapshotAny, &tuple, &buffer,
@@ -351,6 +388,13 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
ExecAssignResultTypeFromTL(&lrstate->ps);
lrstate->ps.ps_ProjInfo = NULL;
+ /*
+ * Create workspace in which we can remember per-RTE locked tuples
+ */
+ lrstate->lr_ntables = list_length(estate->es_range_table);
+ lrstate->lr_curtuples = (HeapTuple *)
+ palloc0(lrstate->lr_ntables * sizeof(HeapTuple));
+
/*
* Locate the ExecRowMark(s) that this node is responsible for, and
* construct ExecAuxRowMarks for them. (InitPlan should already have
@@ -370,8 +414,11 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
if (rc->isParent)
continue;
+ /* safety check on size of lr_curtuples array */
+ Assert(rc->rti > 0 && rc->rti <= lrstate->lr_ntables);
+
/* find ExecRowMark and build ExecAuxRowMark */
- erm = ExecFindRowMark(estate, rc->rti);
+ erm = ExecFindRowMark(estate, rc->rti, false);
aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist);
/*
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 34435c7e50..aec4151094 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1720,7 +1720,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
continue;
/* find ExecRowMark (same for all subplans) */
- erm = ExecFindRowMark(estate, rc->rti);
+ erm = ExecFindRowMark(estate, rc->rti, false);
/* build ExecAuxRowMark for each subplan */
for (i = 0; i < nplans; i++)
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index c80d45acaa..8de57c8e6b 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -20,6 +20,7 @@
#include "access/htup_details.h"
#include "executor/executor.h"
#include "executor/nodeAgg.h"
+#include "foreign/fdwapi.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#ifdef OPTIMIZER_DEBUG
@@ -2324,7 +2325,12 @@ select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
}
else if (rte->relkind == RELKIND_FOREIGN_TABLE)
{
- /* For now, we force all foreign tables to use ROW_MARK_COPY */
+ /* Let the FDW select the rowmark type, if it wants to */
+ FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
+
+ if (fdwroutine->GetForeignRowMarkType != NULL)
+ return fdwroutine->GetForeignRowMarkType(rte, strength);
+ /* Otherwise, use ROW_MARK_COPY by default */
return ROW_MARK_COPY;
}
else
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 6c64609197..e60ab9fd96 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -196,7 +196,7 @@ extern void ExecConstraints(ResultRelInfo *resultRelInfo,
extern void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
TupleTableSlot *slot, EState *estate);
extern LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo);
-extern ExecRowMark *ExecFindRowMark(EState *estate, Index rti);
+extern ExecRowMark *ExecFindRowMark(EState *estate, Index rti, bool missing_ok);
extern ExecAuxRowMark *ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist);
extern TupleTableSlot *EvalPlanQual(EState *estate, EPQState *epqstate,
Relation relation, Index rti, int lockmode,
diff --git a/src/include/foreign/fdwapi.h b/src/include/foreign/fdwapi.h
index 511c96b093..69b48b4677 100644
--- a/src/include/foreign/fdwapi.h
+++ b/src/include/foreign/fdwapi.h
@@ -89,6 +89,14 @@ typedef void (*EndForeignModify_function) (EState *estate,
typedef int (*IsForeignRelUpdatable_function) (Relation rel);
+typedef RowMarkType (*GetForeignRowMarkType_function) (RangeTblEntry *rte,
+ LockClauseStrength strength);
+
+typedef HeapTuple (*RefetchForeignRow_function) (EState *estate,
+ ExecRowMark *erm,
+ Datum rowid,
+ bool *updated);
+
typedef void (*ExplainForeignScan_function) (ForeignScanState *node,
struct ExplainState *es);
@@ -151,6 +159,10 @@ typedef struct FdwRoutine
EndForeignModify_function EndForeignModify;
IsForeignRelUpdatable_function IsForeignRelUpdatable;
+ /* Functions for SELECT FOR UPDATE/SHARE row locking */
+ GetForeignRowMarkType_function GetForeignRowMarkType;
+ RefetchForeignRow_function RefetchForeignRow;
+
/* Support functions for EXPLAIN */
ExplainForeignScan_function ExplainForeignScan;
ExplainForeignModify_function ExplainForeignModify;
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 9de6d1484e..5ad2cc2358 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -429,8 +429,11 @@ typedef struct EState
* parent RTEs, which can be ignored at runtime). Virtual relations such as
* subqueries-in-FROM will have an ExecRowMark with relation == NULL. See
* PlanRowMark for details about most of the fields. In addition to fields
- * directly derived from PlanRowMark, we store curCtid, which is used by the
- * WHERE CURRENT OF code.
+ * directly derived from PlanRowMark, we store an activity flag (to denote
+ * inactive children of inheritance trees), curCtid, which is used by the
+ * WHERE CURRENT OF code, and ermExtra, which is available for use by the plan
+ * node that sources the relation (e.g., for a foreign table the FDW can use
+ * ermExtra to hold information).
*
* EState->es_rowMarks is a list of these structs.
*/
@@ -442,8 +445,11 @@ typedef struct ExecRowMark
Index prti; /* parent range table index, if child */
Index rowmarkId; /* unique identifier for resjunk columns */
RowMarkType markType; /* see enum in nodes/plannodes.h */
+ LockClauseStrength strength; /* LockingClause's strength, or LCS_NONE */
LockWaitPolicy waitPolicy; /* NOWAIT and SKIP LOCKED */
+ bool ermActive; /* is this mark relevant for current tuple? */
ItemPointerData curCtid; /* ctid of currently locked tuple, if any */
+ void *ermExtra; /* available for use by relation source node */
} ExecRowMark;
/*
@@ -1921,6 +1927,8 @@ typedef struct LockRowsState
PlanState ps; /* its first field is NodeTag */
List *lr_arowMarks; /* List of ExecAuxRowMarks */
EPQState lr_epqstate; /* for evaluating EvalPlanQual rechecks */
+ HeapTuple *lr_curtuples; /* locked tuples (one entry per RT entry) */
+ int lr_ntables; /* length of lr_curtuples[] array */
} LockRowsState;
/* ----------------
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 9313292222..1494b336c2 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -822,16 +822,16 @@ typedef struct Limit
*
* The first four of these values represent different lock strengths that
* we can take on tuples according to SELECT FOR [KEY] UPDATE/SHARE requests.
- * We only support these on regular tables. For foreign tables, any locking
- * that might be done for these requests must happen during the initial row
- * fetch; there is no mechanism for going back to lock a row later (and thus
- * no need for EvalPlanQual machinery during updates of foreign tables).
+ * We support these on regular tables, as well as on foreign tables whose FDWs
+ * report support for late locking. For other foreign tables, any locking
+ * that might be done for such requests must happen during the initial row
+ * fetch; their FDWs provide no mechanism for going back to lock a row later.
* This means that the semantics will be a bit different than for a local
* table; in particular we are likely to lock more rows than would be locked
* locally, since remote rows will be locked even if they then fail
- * locally-checked restriction or join quals. However, the alternative of
- * doing a separate remote query to lock each selected row is extremely
- * unappealing, so let's do it like this for now.
+ * locally-checked restriction or join quals. However, the prospect of
+ * doing a separate remote query to lock each selected row is usually pretty
+ * unappealing, so early locking remains a credible design choice for FDWs.
*
* When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we have to uniquely
* identify all the source rows, not only those from the target relations, so
@@ -840,12 +840,11 @@ typedef struct Limit
* represented by ROW_MARK_REFERENCE. Otherwise (for example for VALUES or
* FUNCTION scans) we have to copy the whole row value. ROW_MARK_COPY is
* pretty inefficient, since most of the time we'll never need the data; but
- * fortunately the case is not performance-critical in practice. Note that
- * we use ROW_MARK_COPY for non-target foreign tables, even if the FDW has a
- * concept of rowid and so could theoretically support some form of
- * ROW_MARK_REFERENCE. Although copying the whole row value is inefficient,
- * it's probably still faster than doing a second remote fetch, so it doesn't
- * seem worth the extra complexity to permit ROW_MARK_REFERENCE.
+ * fortunately the overhead is usually not performance-critical in practice.
+ * By default we use ROW_MARK_COPY for foreign tables, but if the FDW has
+ * a concept of rowid it can request to use ROW_MARK_REFERENCE instead.
+ * (Again, this probably doesn't make sense if a physical remote fetch is
+ * needed, but for FDWs that map to local storage it might be credible.)
*/
typedef enum RowMarkType
{
@@ -866,7 +865,7 @@ typedef enum RowMarkType
* When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we create a separate
* PlanRowMark node for each non-target relation in the query. Relations that
* are not specified as FOR UPDATE/SHARE are marked ROW_MARK_REFERENCE (if
- * regular tables) or ROW_MARK_COPY (if not).
+ * regular tables or supported foreign tables) or ROW_MARK_COPY (if not).
*
* Initially all PlanRowMarks have rti == prti and isParent == false.
* When the planner discovers that a relation is the root of an inheritance
@@ -879,8 +878,8 @@ typedef enum RowMarkType
* to use different markTypes).
*
* The planner also adds resjunk output columns to the plan that carry
- * information sufficient to identify the locked or fetched rows. For
- * regular tables (markType != ROW_MARK_COPY), these columns are named
+ * information sufficient to identify the locked or fetched rows. When
+ * markType != ROW_MARK_COPY, these columns are named
* tableoid%u OID of table
* ctid%u TID of row
* The tableoid column is only present for an inheritance hierarchy.
--
cgit v1.2.3
From 4af6e61a363246cf7fff3368a76603b0ce9945dd Mon Sep 17 00:00:00 2001
From: Andres Freund
Date: Wed, 13 May 2015 00:13:22 +0200
Subject: Fix ON CONFLICT bugs that manifest when used in rules.
Specifically the tlist and rti of the pseudo "excluded" relation weren't
properly treated by expression_tree_walker, which lead to errors when
excluded was referenced inside a rule because the varnos where not
properly adjusted. Similar omissions in OffsetVarNodes and
expression_tree_mutator had less impact, but should obviously be fixed
nonetheless.
A couple tests of for ON CONFLICT UPDATE into INSERT rule bearing
relations have been added.
In passing I updated a couple comments.
---
src/backend/executor/nodeModifyTable.c | 1 +
src/backend/nodes/nodeFuncs.c | 3 +
src/backend/optimizer/plan/setrefs.c | 9 ++-
src/backend/rewrite/rewriteManip.c | 15 ++++-
src/test/regress/expected/rules.out | 105 ++++++++++++++++++++++++++++++---
src/test/regress/sql/rules.sql | 35 ++++++++++-
6 files changed, 151 insertions(+), 17 deletions(-)
(limited to 'src/backend/executor/nodeModifyTable.c')
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index aec4151094..89f1f57ae3 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1675,6 +1675,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
ExecSetSlotDescriptor(mtstate->mt_existing,
resultRelInfo->ri_RelationDesc->rd_att);
+ /* carried forward solely for the benefit of explain */
mtstate->mt_excludedtlist = node->exclRelTlist;
/* create target slot for UPDATE SET projection */
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 4135f9c3cf..eac0215923 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -1922,6 +1922,8 @@ expression_tree_walker(Node *node,
return true;
if (walker(onconflict->onConflictWhere, context))
return true;
+ if (walker(onconflict->exclRelTlist, context))
+ return true;
}
break;
case T_JoinExpr:
@@ -2642,6 +2644,7 @@ expression_tree_mutator(Node *node,
MUTATE(newnode->arbiterWhere, oc->arbiterWhere, Node *);
MUTATE(newnode->onConflictSet, oc->onConflictSet, List *);
MUTATE(newnode->onConflictWhere, oc->onConflictWhere, Node *);
+ MUTATE(newnode->exclRelTlist, oc->exclRelTlist, List *);
return (Node *) newnode;
}
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index fac51c9147..517409d28a 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -740,9 +740,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
/*
* We treat ModifyTable with ON CONFLICT as a form of 'pseudo
- * join', where the inner side is the EXLUDED tuple. Therefore
- * use fix_join_expr to setup the relevant variables to
- * INNER_VAR. We explicitly don't create any OUTER_VARs as
+ * join', where the inner side is the EXCLUDED tuple.
+ * Therefore use fix_join_expr to setup the relevant variables
+ * to INNER_VAR. We explicitly don't create any OUTER_VARs as
* those are already used by RETURNING and it seems better to
* be non-conflicting.
*/
@@ -763,6 +763,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
NULL, itlist,
linitial_int(splan->resultRelations),
rtoffset);
+
+ splan->exclRelTlist =
+ fix_scan_list(root, splan->exclRelTlist, rtoffset);
}
splan->nominalRelation += rtoffset;
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index df457080fe..a9c6e626ba 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -426,9 +426,9 @@ OffsetVarNodes(Node *node, int offset, int sublevels_up)
/*
* If we are starting at a Query, and sublevels_up is zero, then we
* must also fix rangetable indexes in the Query itself --- namely
- * resultRelation and rowMarks entries. sublevels_up cannot be zero
- * when recursing into a subquery, so there's no need to have the same
- * logic inside OffsetVarNodes_walker.
+ * resultRelation, exclRelIndex and rowMarks entries. sublevels_up
+ * cannot be zero when recursing into a subquery, so there's no need
+ * to have the same logic inside OffsetVarNodes_walker.
*/
if (sublevels_up == 0)
{
@@ -436,6 +436,10 @@ OffsetVarNodes(Node *node, int offset, int sublevels_up)
if (qry->resultRelation)
qry->resultRelation += offset;
+
+ if (qry->onConflict && qry->onConflict->exclRelIndex)
+ qry->onConflict->exclRelIndex += offset;
+
foreach(l, qry->rowMarks)
{
RowMarkClause *rc = (RowMarkClause *) lfirst(l);
@@ -617,6 +621,11 @@ ChangeVarNodes(Node *node, int rt_index, int new_index, int sublevels_up)
if (qry->resultRelation == rt_index)
qry->resultRelation = new_index;
+
+ /* this is unlikely to ever be used, but ... */
+ if (qry->onConflict && qry->onConflict->exclRelIndex == rt_index)
+ qry->onConflict->exclRelIndex = new_index;
+
foreach(l, qry->rowMarks)
{
RowMarkClause *rc = (RowMarkClause *) lfirst(l);
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index a379a7279c..cb18bb931a 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -2817,25 +2817,112 @@ CREATE RULE hat_upsert AS ON INSERT TO hats
INSERT INTO hat_data VALUES (
NEW.hat_name,
NEW.hat_color)
- ON CONFLICT (hat_name) DO UPDATE SET hat_color = 'Orange' RETURNING *;
+ ON CONFLICT (hat_name)
+ DO UPDATE
+ SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color
+ WHERE excluded.hat_color <> 'forbidden'
+ RETURNING *;
-- Works (does upsert)
-INSERT INTO hats VALUES ('h7', 'black') RETURNING *;
+INSERT INTO hats VALUES ('h8', 'black') RETURNING *;
+ hat_name | hat_color
+------------+------------
+ h8 | black
+(1 row)
+
+SELECT * FROM hat_data WHERE hat_name = 'h8';
+ hat_name | hat_color
+------------+------------
+ h8 | black
+(1 row)
+
+INSERT INTO hats VALUES ('h8', 'white') RETURNING *;
+ hat_name | hat_color
+------------+------------
+ h8 | white
+(1 row)
+
+SELECT * FROM hat_data WHERE hat_name = 'h8';
+ hat_name | hat_color
+------------+------------
+ h8 | white
+(1 row)
+
+INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *;
+ hat_name | hat_color
+----------+-----------
+(0 rows)
+
+SELECT * FROM hat_data WHERE hat_name = 'h8';
hat_name | hat_color
------------+------------
- h7 | Orange
+ h8 | white
(1 row)
SELECT tablename, rulename, definition FROM pg_rules
WHERE tablename = 'hats';
- tablename | rulename | definition
------------+------------+-----------------------------------------------------------------------------------------------
- hats | hat_upsert | CREATE RULE hat_upsert AS +
- | | ON INSERT TO hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) +
- | | VALUES (new.hat_name, new.hat_color) ON CONFLICT DO UPDATE SET hat_color = 'Orange'::bpchar+
- | | RETURNING hat_data.hat_name, +
+ tablename | rulename | definition
+-----------+------------+-------------------------------------------------------------------------------------------------------------------------------
+ hats | hat_upsert | CREATE RULE hat_upsert AS +
+ | | ON INSERT TO hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) +
+ | | VALUES (new.hat_name, new.hat_color) ON CONFLICT DO UPDATE SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color+
+ | | WHERE (excluded.hat_color <> 'forbidden'::bpchar) +
+ | | RETURNING hat_data.hat_name, +
| | hat_data.hat_color;
(1 row)
+-- ensure explain works for on insert conflict rules
+explain (costs off) INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *;
+ QUERY PLAN
+----------------------------------------------------------------
+ Insert on hat_data
+ Conflict Resolution: UPDATE
+ Conflict Arbiter Indexes: hat_data_pkey
+ Conflict Filter: (excluded.hat_color <> 'forbidden'::bpchar)
+ -> Result
+(5 rows)
+
+-- ensure upserting into a rule, with a CTE (different offsets!) works
+WITH data(hat_name, hat_color) AS (
+ VALUES ('h8', 'green'),
+ ('h9', 'blue'),
+ ('h7', 'forbidden')
+)
+INSERT INTO hats
+ SELECT * FROM data
+RETURNING *;
+ hat_name | hat_color
+------------+------------
+ h8 | green
+ h9 | blue
+(2 rows)
+
+EXPLAIN (costs off) WITH data(hat_name, hat_color) AS (
+ VALUES ('h8', 'green'),
+ ('h9', 'blue'),
+ ('h7', 'forbidden')
+)
+INSERT INTO hats
+ SELECT * FROM data
+RETURNING *;
+ QUERY PLAN
+----------------------------------------------------------------
+ Insert on hat_data
+ Conflict Resolution: UPDATE
+ Conflict Arbiter Indexes: hat_data_pkey
+ Conflict Filter: (excluded.hat_color <> 'forbidden'::bpchar)
+ CTE data
+ -> Values Scan on "*VALUES*"
+ -> CTE Scan on data
+(7 rows)
+
+SELECT * FROM hat_data WHERE hat_name IN ('h8', 'h9', 'h7') ORDER BY hat_name;
+ hat_name | hat_color
+------------+------------
+ h7 | black
+ h8 | green
+ h9 | blue
+(3 rows)
+
DROP RULE hat_upsert ON hats;
drop table hats;
drop table hat_data;
diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql
index 6f1a1b84e7..1a81155bf1 100644
--- a/src/test/regress/sql/rules.sql
+++ b/src/test/regress/sql/rules.sql
@@ -1074,12 +1074,43 @@ CREATE RULE hat_upsert AS ON INSERT TO hats
INSERT INTO hat_data VALUES (
NEW.hat_name,
NEW.hat_color)
- ON CONFLICT (hat_name) DO UPDATE SET hat_color = 'Orange' RETURNING *;
+ ON CONFLICT (hat_name)
+ DO UPDATE
+ SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color
+ WHERE excluded.hat_color <> 'forbidden'
+ RETURNING *;
-- Works (does upsert)
-INSERT INTO hats VALUES ('h7', 'black') RETURNING *;
+INSERT INTO hats VALUES ('h8', 'black') RETURNING *;
+SELECT * FROM hat_data WHERE hat_name = 'h8';
+INSERT INTO hats VALUES ('h8', 'white') RETURNING *;
+SELECT * FROM hat_data WHERE hat_name = 'h8';
+INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *;
+SELECT * FROM hat_data WHERE hat_name = 'h8';
SELECT tablename, rulename, definition FROM pg_rules
WHERE tablename = 'hats';
+-- ensure explain works for on insert conflict rules
+explain (costs off) INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *;
+
+-- ensure upserting into a rule, with a CTE (different offsets!) works
+WITH data(hat_name, hat_color) AS (
+ VALUES ('h8', 'green'),
+ ('h9', 'blue'),
+ ('h7', 'forbidden')
+)
+INSERT INTO hats
+ SELECT * FROM data
+RETURNING *;
+EXPLAIN (costs off) WITH data(hat_name, hat_color) AS (
+ VALUES ('h8', 'green'),
+ ('h9', 'blue'),
+ ('h7', 'forbidden')
+)
+INSERT INTO hats
+ SELECT * FROM data
+RETURNING *;
+SELECT * FROM hat_data WHERE hat_name IN ('h8', 'h9', 'h7') ORDER BY hat_name;
+
DROP RULE hat_upsert ON hats;
drop table hats;
--
cgit v1.2.3
From e4942f7a56efcfaabed5db7bde29ee21bef2f6e2 Mon Sep 17 00:00:00 2001
From: Andres Freund
Date: Tue, 19 May 2015 01:55:10 +0200
Subject: Attach ON CONFLICT SET ... WHERE to the correct planstate.
The previous coding was a leftover from attempting to hang all the on
conflict logic onto modify table's child nodes. It appears to not have
actually caused problems except for explain.
Add test exercising the broken and some other code paths.
Author: Peter Geoghegan and Andres Freund
---
src/backend/executor/nodeModifyTable.c | 2 +-
src/test/regress/expected/insert_conflict.out | 16 ++++++++++++++++
src/test/regress/sql/insert_conflict.sql | 2 ++
3 files changed, 19 insertions(+), 1 deletion(-)
(limited to 'src/backend/executor/nodeModifyTable.c')
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 89f1f57ae3..8112fb45b8 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1697,7 +1697,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
ExprState *qualexpr;
qualexpr = ExecInitExpr((Expr *) node->onConflictWhere,
- mtstate->mt_plans[0]);
+ &mtstate->ps);
resultRelInfo->ri_onConflictSetWhere = (List *) qualexpr;
}
diff --git a/src/test/regress/expected/insert_conflict.out b/src/test/regress/expected/insert_conflict.out
index 3273d98793..8e4e33e6e6 100644
--- a/src/test/regress/expected/insert_conflict.out
+++ b/src/test/regress/expected/insert_conflict.out
@@ -43,6 +43,22 @@ explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on con
-> Result
(4 rows)
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit) do update set fruit = excluded.fruit
+ where exists (select 1 from insertconflicttest ii where ii.key = excluded.key);
+ QUERY PLAN
+-------------------------------------------------------------------------------
+ Insert on insertconflicttest
+ Conflict Resolution: UPDATE
+ Conflict Arbiter Indexes: op_index_key, collation_index_key, both_index_key
+ Conflict Filter: (alternatives: SubPlan 1 or hashed SubPlan 2)
+ -> Result
+ SubPlan 1
+ -> Index Only Scan using both_index_expr_key on insertconflicttest ii
+ Index Cond: (key = excluded.key)
+ SubPlan 2
+ -> Seq Scan on insertconflicttest ii_1
+(10 rows)
+
-- Neither collation nor operator class specifications are required --
-- supplying them merely *limits* matches to indexes with matching opclasses
-- used for relevant indexes
diff --git a/src/test/regress/sql/insert_conflict.sql b/src/test/regress/sql/insert_conflict.sql
index ba2b66bdb6..a0bdd7f536 100644
--- a/src/test/regress/sql/insert_conflict.sql
+++ b/src/test/regress/sql/insert_conflict.sql
@@ -20,6 +20,8 @@ explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on con
explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit) do nothing;
explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit, key, fruit, key) do nothing;
explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit), key, lower(fruit), key) do nothing;
+explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit) do update set fruit = excluded.fruit
+ where exists (select 1 from insertconflicttest ii where ii.key = excluded.key);
-- Neither collation nor operator class specifications are required --
-- supplying them merely *limits* matches to indexes with matching opclasses
-- used for relevant indexes
--
cgit v1.2.3
From 807b9e0dff663c5da875af7907a5106c0ff90673 Mon Sep 17 00:00:00 2001
From: Bruce Momjian
Date: Sat, 23 May 2015 21:35:49 -0400
Subject: pgindent run for 9.5
---
contrib/btree_gin/btree_gin.c | 35 +-
contrib/btree_gist/btree_utils_num.c | 2 +-
contrib/btree_gist/btree_utils_var.c | 2 +-
contrib/fuzzystrmatch/dmetaphone.c | 2 +-
contrib/hstore/hstore_gist.c | 2 +-
contrib/hstore_plperl/hstore_plperl.c | 12 +-
contrib/hstore_plpython/hstore_plpython.c | 14 +-
contrib/ltree/crc32.c | 5 +-
contrib/ltree_plpython/ltree_plpython.c | 2 +-
contrib/pageinspect/brinfuncs.c | 38 +-
contrib/pageinspect/ginfuncs.c | 2 +-
contrib/pg_audit/pg_audit.c | 487 +++++++++++----------
contrib/pg_buffercache/pg_buffercache_pages.c | 1 +
contrib/pg_stat_statements/pg_stat_statements.c | 25 +-
contrib/pgcrypto/pgp-armor.c | 2 +-
contrib/pgcrypto/pgp-pgsql.c | 33 +-
contrib/pgcrypto/pgp.h | 8 +-
contrib/pgstattuple/pgstatapprox.c | 27 +-
contrib/postgres_fdw/postgres_fdw.c | 2 +-
contrib/test_decoding/test_decoding.c | 8 +-
contrib/tsm_system_rows/tsm_system_rows.c | 73 +--
contrib/tsm_system_time/tsm_system_time.c | 98 +++--
src/backend/access/brin/brin.c | 17 +-
src/backend/access/brin/brin_inclusion.c | 50 +--
src/backend/access/brin/brin_minmax.c | 14 +-
src/backend/access/brin/brin_revmap.c | 18 +-
src/backend/access/brin/brin_tuple.c | 2 +-
src/backend/access/gin/ginget.c | 3 +-
src/backend/access/gin/ginutil.c | 2 +-
src/backend/access/gist/gist.c | 2 +-
src/backend/access/gist/gistscan.c | 12 +-
src/backend/access/gist/gistutil.c | 2 +-
src/backend/access/heap/heapam.c | 102 ++---
src/backend/access/heap/hio.c | 6 +-
src/backend/access/index/genam.c | 20 +-
src/backend/access/nbtree/nbtinsert.c | 13 +-
src/backend/access/nbtree/nbtpage.c | 11 +-
src/backend/access/nbtree/nbtree.c | 5 +-
src/backend/access/nbtree/nbtsearch.c | 8 +-
src/backend/access/nbtree/nbtsort.c | 2 +-
src/backend/access/nbtree/nbtutils.c | 6 +-
src/backend/access/rmgrdesc/committsdesc.c | 8 +-
src/backend/access/rmgrdesc/replorigindesc.c | 6 +-
src/backend/access/rmgrdesc/xactdesc.c | 14 +-
src/backend/access/spgist/spgscan.c | 1 +
src/backend/access/tablesample/bernoulli.c | 69 +--
src/backend/access/tablesample/system.c | 48 +-
src/backend/access/tablesample/tablesample.c | 94 ++--
src/backend/access/transam/commit_ts.c | 74 ++--
src/backend/access/transam/multixact.c | 59 ++-
src/backend/access/transam/parallel.c | 188 ++++----
src/backend/access/transam/twophase.c | 41 +-
src/backend/access/transam/xact.c | 141 +++---
src/backend/access/transam/xlog.c | 242 +++++-----
src/backend/access/transam/xloginsert.c | 22 +-
src/backend/access/transam/xlogreader.c | 27 +-
src/backend/bootstrap/bootstrap.c | 1 +
src/backend/catalog/Catalog.pm | 3 +-
src/backend/catalog/aclchk.c | 16 +-
src/backend/catalog/dependency.c | 12 +-
src/backend/catalog/genbki.pl | 15 +-
src/backend/catalog/index.c | 6 +-
src/backend/catalog/objectaddress.c | 359 +++++++++------
src/backend/catalog/pg_aggregate.c | 2 +-
src/backend/catalog/pg_enum.c | 2 +-
src/backend/catalog/pg_proc.c | 10 +-
src/backend/catalog/pg_type.c | 2 +-
src/backend/catalog/toasting.c | 12 +-
src/backend/commands/analyze.c | 1 +
src/backend/commands/copy.c | 29 +-
src/backend/commands/createas.c | 2 +-
src/backend/commands/dbcommands.c | 8 +-
src/backend/commands/dropcmds.c | 4 +-
src/backend/commands/event_trigger.c | 70 +--
src/backend/commands/explain.c | 23 +-
src/backend/commands/functioncmds.c | 37 +-
src/backend/commands/matview.c | 2 +-
src/backend/commands/policy.c | 260 +++++------
src/backend/commands/schemacmds.c | 4 +-
src/backend/commands/sequence.c | 16 +-
src/backend/commands/tablecmds.c | 61 +--
src/backend/commands/trigger.c | 4 +-
src/backend/commands/typecmds.c | 40 +-
src/backend/commands/user.c | 27 +-
src/backend/commands/vacuum.c | 11 +-
src/backend/commands/vacuumlazy.c | 11 +-
src/backend/executor/execAmi.c | 8 +-
src/backend/executor/execIndexing.c | 26 +-
src/backend/executor/execMain.c | 94 ++--
src/backend/executor/execQual.c | 14 +-
src/backend/executor/execUtils.c | 2 +-
src/backend/executor/nodeAgg.c | 132 +++---
src/backend/executor/nodeBitmapHeapscan.c | 2 +-
src/backend/executor/nodeGroup.c | 2 +-
src/backend/executor/nodeHash.c | 56 +--
src/backend/executor/nodeIndexonlyscan.c | 4 +-
src/backend/executor/nodeIndexscan.c | 6 +-
src/backend/executor/nodeLockRows.c | 11 +-
src/backend/executor/nodeMaterial.c | 2 +-
src/backend/executor/nodeMergeAppend.c | 8 +-
src/backend/executor/nodeMergejoin.c | 4 +-
src/backend/executor/nodeModifyTable.c | 55 ++-
src/backend/executor/nodeSamplescan.c | 13 +-
src/backend/executor/nodeSort.c | 2 +-
src/backend/executor/nodeWindowAgg.c | 2 +-
src/backend/executor/spi.c | 14 +-
src/backend/lib/bipartite_match.c | 16 +-
src/backend/lib/hyperloglog.c | 6 +-
src/backend/lib/pairingheap.c | 4 +-
src/backend/libpq/auth.c | 4 +-
src/backend/libpq/be-secure-openssl.c | 33 +-
src/backend/libpq/be-secure.c | 14 +-
src/backend/libpq/hba.c | 4 +-
src/backend/libpq/pqcomm.c | 4 +-
src/backend/libpq/pqmq.c | 31 +-
src/backend/nodes/copyfuncs.c | 6 +-
src/backend/nodes/makefuncs.c | 2 +-
src/backend/nodes/nodeFuncs.c | 10 +-
src/backend/optimizer/path/allpaths.c | 8 +-
src/backend/optimizer/path/costsize.c | 6 +-
src/backend/optimizer/plan/analyzejoins.c | 14 +-
src/backend/optimizer/plan/createplan.c | 6 +-
src/backend/optimizer/plan/planner.c | 150 ++++---
src/backend/optimizer/plan/setrefs.c | 2 +-
src/backend/optimizer/util/clauses.c | 4 +-
src/backend/optimizer/util/pathnode.c | 2 +-
src/backend/optimizer/util/plancat.c | 25 +-
src/backend/optimizer/util/var.c | 11 +-
src/backend/parser/analyze.c | 16 +-
src/backend/parser/parse_agg.c | 112 ++---
src/backend/parser/parse_clause.c | 169 +++----
src/backend/parser/parse_func.c | 55 +--
src/backend/parser/parse_relation.c | 56 +--
src/backend/parser/parse_type.c | 2 +-
src/backend/parser/parse_utilcmd.c | 6 +-
src/backend/port/atomics.c | 15 +-
src/backend/port/sysv_shmem.c | 2 +-
src/backend/port/win32_latch.c | 2 +-
src/backend/port/win32_sema.c | 1 +
src/backend/postmaster/autovacuum.c | 10 +-
src/backend/postmaster/bgworker.c | 21 +-
src/backend/postmaster/pgstat.c | 2 +-
src/backend/postmaster/postmaster.c | 8 +-
src/backend/replication/basebackup.c | 21 +-
.../libpqwalreceiver/libpqwalreceiver.c | 14 +-
src/backend/replication/logical/decode.c | 4 +-
src/backend/replication/logical/logical.c | 4 +-
src/backend/replication/logical/logicalfuncs.c | 2 +-
src/backend/replication/logical/origin.c | 156 +++----
src/backend/replication/logical/reorderbuffer.c | 29 +-
src/backend/replication/logical/snapbuild.c | 27 +-
src/backend/replication/slot.c | 6 +-
src/backend/replication/slotfuncs.c | 6 +-
src/backend/replication/walreceiverfuncs.c | 2 +-
src/backend/replication/walsender.c | 11 +-
src/backend/rewrite/rewriteHandler.c | 32 +-
src/backend/rewrite/rewriteManip.c | 2 +-
src/backend/rewrite/rowsecurity.c | 196 ++++-----
src/backend/storage/buffer/buf_init.c | 6 +-
src/backend/storage/buffer/bufmgr.c | 42 +-
src/backend/storage/buffer/freelist.c | 13 +-
src/backend/storage/file/fd.c | 4 +-
src/backend/storage/file/reinit.c | 10 +-
src/backend/storage/ipc/dsm_impl.c | 8 +-
src/backend/storage/ipc/procarray.c | 10 +-
src/backend/storage/ipc/shm_mq.c | 20 +-
src/backend/storage/ipc/sinval.c | 4 +-
src/backend/storage/lmgr/lwlock.c | 74 ++--
src/backend/storage/lmgr/proc.c | 1 +
src/backend/storage/page/bufpage.c | 8 +-
src/backend/storage/smgr/md.c | 4 +-
src/backend/tcop/postgres.c | 20 +-
src/backend/tcop/utility.c | 65 +--
src/backend/tsearch/spell.c | 4 +-
src/backend/utils/adt/acl.c | 4 +-
src/backend/utils/adt/array_userfuncs.c | 19 +-
src/backend/utils/adt/formatting.c | 54 +--
src/backend/utils/adt/json.c | 6 +-
src/backend/utils/adt/jsonb.c | 115 ++---
src/backend/utils/adt/jsonb_util.c | 11 +-
src/backend/utils/adt/jsonfuncs.c | 103 ++---
src/backend/utils/adt/levenshtein.c | 4 +-
src/backend/utils/adt/lockfuncs.c | 2 +-
src/backend/utils/adt/misc.c | 2 +-
src/backend/utils/adt/network_gist.c | 8 +-
src/backend/utils/adt/numeric.c | 59 +--
src/backend/utils/adt/pg_locale.c | 2 +-
src/backend/utils/adt/pg_upgrade_support.c | 26 +-
src/backend/utils/adt/pgstatfuncs.c | 20 +-
src/backend/utils/adt/rangetypes_spgist.c | 30 +-
src/backend/utils/adt/regexp.c | 5 +-
src/backend/utils/adt/regproc.c | 8 +-
src/backend/utils/adt/ri_triggers.c | 16 +-
src/backend/utils/adt/ruleutils.c | 73 +--
src/backend/utils/adt/tsquery_op.c | 5 +-
src/backend/utils/adt/txid.c | 6 +-
src/backend/utils/adt/varlena.c | 184 ++++----
src/backend/utils/adt/xml.c | 4 +-
src/backend/utils/cache/inval.c | 11 +-
src/backend/utils/cache/lsyscache.c | 4 +-
src/backend/utils/cache/plancache.c | 4 +-
src/backend/utils/cache/relcache.c | 14 +-
src/backend/utils/cache/syscache.c | 38 +-
src/backend/utils/error/elog.c | 4 +-
src/backend/utils/fmgr/dfmgr.c | 4 +-
src/backend/utils/fmgr/funcapi.c | 9 +-
src/backend/utils/init/miscinit.c | 1 +
src/backend/utils/init/postinit.c | 2 +-
src/backend/utils/mb/Unicode/UCS_to_GB18030.pl | 4 +-
.../utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl | 4 +-
.../mb/conversion_procs/euc_tw_and_big5/big5.c | 8 +-
src/backend/utils/misc/guc.c | 115 +++--
src/backend/utils/misc/rls.c | 35 +-
src/backend/utils/misc/sampling.c | 4 +-
src/backend/utils/sort/sortsupport.c | 4 +-
src/backend/utils/sort/tuplesort.c | 116 ++---
src/backend/utils/time/combocid.c | 7 +-
src/backend/utils/time/snapmgr.c | 33 +-
src/backend/utils/time/tqual.c | 11 +-
src/bin/pg_basebackup/pg_basebackup.c | 18 +-
src/bin/pg_basebackup/pg_receivexlog.c | 23 +-
src/bin/pg_basebackup/receivelog.c | 115 ++---
src/bin/pg_basebackup/receivelog.h | 2 +-
src/bin/pg_basebackup/streamutil.c | 14 +-
src/bin/pg_basebackup/streamutil.h | 12 +-
src/bin/pg_basebackup/t/010_pg_basebackup.pl | 24 +-
src/bin/pg_ctl/pg_ctl.c | 13 +-
src/bin/pg_ctl/t/001_start_stop.pl | 4 +-
src/bin/pg_ctl/t/002_status.pl | 2 +-
src/bin/pg_dump/pg_dump.c | 126 +++---
src/bin/pg_dump/pg_dump.h | 2 +-
src/bin/pg_dump/pg_dumpall.c | 2 +-
src/bin/pg_resetxlog/pg_resetxlog.c | 6 +-
src/bin/pg_rewind/RewindTest.pm | 148 ++++---
src/bin/pg_rewind/filemap.c | 19 +-
src/bin/pg_rewind/filemap.h | 28 +-
src/bin/pg_rewind/parsexlog.c | 10 +-
src/bin/pg_rewind/pg_rewind.c | 8 +-
src/bin/pg_rewind/t/001_basic.pl | 20 +-
src/bin/pg_rewind/t/002_databases.pl | 8 +-
src/bin/pg_rewind/t/003_extrafiles.pl | 56 ++-
src/bin/pg_upgrade/check.c | 29 +-
src/bin/pg_upgrade/dump.c | 8 +-
src/bin/pg_upgrade/info.c | 160 +++----
src/bin/pg_upgrade/option.c | 12 +-
src/bin/pg_upgrade/pg_upgrade.c | 6 +-
src/bin/pg_upgrade/pg_upgrade.h | 6 +-
src/bin/pg_upgrade/relfilenode.c | 8 +-
src/bin/pg_upgrade/server.c | 11 +-
src/bin/pg_upgrade/version.c | 4 +-
src/bin/pg_xlogdump/pg_xlogdump.c | 30 +-
src/bin/pgbench/pgbench.c | 119 ++---
src/bin/pgbench/pgbench.h | 34 +-
src/bin/psql/command.c | 31 +-
src/bin/psql/common.c | 9 +-
src/bin/psql/common.h | 2 +-
src/bin/psql/copy.c | 2 +
src/bin/psql/describe.c | 36 +-
src/bin/psql/help.c | 40 +-
src/bin/psql/print.c | 60 ++-
src/bin/psql/print.h | 8 +-
src/bin/psql/startup.c | 2 +-
src/bin/psql/tab-complete.c | 54 ++-
src/bin/scripts/common.c | 2 +-
src/bin/scripts/reindexdb.c | 20 +-
src/bin/scripts/t/102_vacuumdb_stages.pl | 2 +-
src/bin/scripts/vacuumdb.c | 4 +-
src/common/restricted_token.c | 2 +-
src/include/access/brin_page.h | 4 +-
src/include/access/commit_ts.h | 8 +-
src/include/access/gin.h | 2 +-
src/include/access/gist_private.h | 4 +-
src/include/access/hash.h | 2 +-
src/include/access/heapam.h | 2 +-
src/include/access/htup_details.h | 2 +-
src/include/access/multixact.h | 2 +-
src/include/access/parallel.h | 18 +-
src/include/access/relscan.h | 4 +-
src/include/access/stratnum.h | 2 +-
src/include/access/tablesample.h | 27 +-
src/include/access/xact.h | 52 +--
src/include/access/xlog.h | 6 +-
src/include/access/xloginsert.h | 15 +-
src/include/access/xlogreader.h | 2 +-
src/include/access/xlogrecord.h | 18 +-
src/include/access/xlogutils.h | 10 +-
src/include/bootstrap/bootstrap.h | 2 +-
src/include/catalog/binary_upgrade.h | 2 +-
src/include/catalog/index.h | 4 +-
src/include/catalog/indexing.h | 2 +-
src/include/catalog/objectaddress.h | 2 +-
src/include/catalog/opfam_internal.h | 2 +-
src/include/catalog/pg_aggregate.h | 30 +-
src/include/catalog/pg_amop.h | 476 ++++++++++----------
src/include/catalog/pg_amproc.h | 368 ++++++++--------
src/include/catalog/pg_attribute.h | 12 +-
src/include/catalog/pg_cast.h | 2 +-
src/include/catalog/pg_class.h | 2 +-
src/include/catalog/pg_control.h | 6 +-
src/include/catalog/pg_description.h | 2 +-
src/include/catalog/pg_extension.h | 2 +-
src/include/catalog/pg_largeobject.h | 2 +-
src/include/catalog/pg_opclass.h | 18 +-
src/include/catalog/pg_operator.h | 4 +-
src/include/catalog/pg_pltemplate.h | 5 +-
src/include/catalog/pg_policy.h | 20 +-
src/include/catalog/pg_proc.h | 142 +++---
src/include/catalog/pg_replication_origin.h | 6 +-
src/include/catalog/pg_seclabel.h | 4 +-
src/include/catalog/pg_shdescription.h | 2 +-
src/include/catalog/pg_shseclabel.h | 4 +-
src/include/catalog/pg_tablesample_method.h | 21 +-
src/include/catalog/pg_transform.h | 2 +-
src/include/catalog/pg_trigger.h | 2 +-
src/include/catalog/pg_type.h | 6 +-
src/include/commands/defrem.h | 6 +-
src/include/commands/event_trigger.h | 2 +-
src/include/commands/explain.h | 2 +-
src/include/commands/vacuum.h | 20 +-
src/include/common/fe_memutils.h | 4 +-
src/include/common/pg_lzcompress.h | 2 +-
src/include/common/restricted_token.h | 8 +-
src/include/common/string.h | 4 +-
src/include/executor/executor.h | 2 +-
src/include/executor/hashjoin.h | 18 +-
src/include/fmgr.h | 2 +-
src/include/funcapi.h | 2 +-
src/include/lib/bipartite_match.h | 2 +-
src/include/lib/hyperloglog.h | 2 +-
src/include/lib/pairingheap.h | 14 +-
src/include/libpq/libpq-be.h | 4 +-
src/include/libpq/libpq.h | 22 +-
src/include/libpq/pqmq.h | 2 +-
src/include/nodes/execnodes.h | 22 +-
src/include/nodes/nodes.h | 6 +-
src/include/nodes/parsenodes.h | 57 +--
src/include/nodes/plannodes.h | 6 +-
src/include/nodes/primnodes.h | 11 +-
src/include/optimizer/pathnode.h | 2 +-
src/include/optimizer/prep.h | 2 +-
src/include/optimizer/tlist.h | 2 +-
src/include/parser/parse_clause.h | 2 +-
src/include/parser/parse_func.h | 6 +-
src/include/parser/parse_relation.h | 12 +-
src/include/pgstat.h | 32 +-
src/include/port/atomics.h | 39 +-
src/include/port/atomics/arch-ia64.h | 6 +-
src/include/port/atomics/arch-x86.h | 131 +++---
src/include/port/atomics/fallback.h | 25 +-
src/include/port/atomics/generic-acc.h | 45 +-
src/include/port/atomics/generic-gcc.h | 49 ++-
src/include/port/atomics/generic-msvc.h | 25 +-
src/include/port/atomics/generic-sunpro.h | 31 +-
src/include/port/atomics/generic-xlc.h | 35 +-
src/include/port/atomics/generic.h | 52 ++-
src/include/port/pg_crc32c.h | 2 +-
src/include/postmaster/bgworker.h | 2 +-
src/include/replication/origin.h | 24 +-
src/include/replication/output_plugin.h | 2 +-
src/include/replication/reorderbuffer.h | 4 +-
src/include/replication/walsender.h | 2 +-
src/include/rewrite/rowsecurity.h | 28 +-
src/include/storage/lmgr.h | 6 +-
src/include/storage/lock.h | 2 +-
src/include/storage/shm_mq.h | 6 +-
src/include/tcop/deparse_utility.h | 40 +-
src/include/tcop/fastpath.h | 2 +-
src/include/utils/acl.h | 2 +-
src/include/utils/aclchk_internal.h | 2 +-
src/include/utils/builtins.h | 6 +-
src/include/utils/guc.h | 3 +-
src/include/utils/guc_tables.h | 2 +-
src/include/utils/jsonapi.h | 2 +-
src/include/utils/jsonb.h | 10 +-
src/include/utils/lsyscache.h | 4 +-
src/include/utils/palloc.h | 2 +-
src/include/utils/pg_crc.h | 2 +-
src/include/utils/plancache.h | 2 +-
src/include/utils/rls.h | 18 +-
src/include/utils/ruleutils.h | 2 +-
src/include/utils/sampling.h | 10 +-
src/include/utils/selfuncs.h | 2 +-
src/include/utils/snapshot.h | 2 +-
src/include/utils/sortsupport.h | 83 ++--
src/interfaces/ecpg/ecpglib/data.c | 3 +-
src/interfaces/ecpg/ecpglib/execute.c | 14 +-
src/interfaces/ecpg/ecpglib/memory.c | 2 +-
src/interfaces/ecpg/preproc/parse.pl | 21 +-
src/interfaces/libpq/fe-connect.c | 11 +-
src/interfaces/libpq/fe-misc.c | 14 +-
src/interfaces/libpq/fe-secure-openssl.c | 86 ++--
src/interfaces/libpq/fe-secure.c | 18 +-
src/pl/plperl/plperl.c | 2 +-
src/pl/plpython/plpy_procedure.c | 5 +-
src/pl/plpython/plpy_typeio.c | 90 ++--
src/port/gettimeofday.c | 22 +-
src/port/pg_crc32c_choose.c | 4 +-
src/port/pg_crc32c_sse42.c | 3 +-
src/port/win32setlocale.c | 8 +-
.../modules/test_ddl_deparse/test_ddl_deparse.c | 8 +-
src/test/modules/test_rls_hooks/test_rls_hooks.c | 84 ++--
src/test/perl/TestLib.pm | 13 +-
src/test/regress/pg_regress.c | 19 +-
src/test/regress/regress.c | 22 +-
src/test/ssl/ServerSetup.pm | 105 ++---
src/test/ssl/t/001_ssltests.pl | 110 +++--
src/tools/msvc/Install.pm | 24 +-
src/tools/msvc/Mkvcbuild.pm | 126 +++---
src/tools/msvc/Project.pm | 1 +
src/tools/msvc/Solution.pm | 19 +-
src/tools/msvc/VCBuildProject.pm | 2 +-
src/tools/msvc/VSObjectFactory.pm | 5 +-
src/tools/msvc/config_default.pl | 34 +-
src/tools/msvc/vcregress.pl | 42 +-
414 files changed, 5830 insertions(+), 5328 deletions(-)
(limited to 'src/backend/executor/nodeModifyTable.c')
diff --git a/contrib/btree_gin/btree_gin.c b/contrib/btree_gin/btree_gin.c
index 6e3bf172e5..f74e912ed7 100644
--- a/contrib/btree_gin/btree_gin.c
+++ b/contrib/btree_gin/btree_gin.c
@@ -113,12 +113,12 @@ gin_btree_compare_prefix(FunctionCallInfo fcinfo)
cmp;
cmp = DatumGetInt32(DirectFunctionCall2Coll(
- data->typecmp,
- PG_GET_COLLATION(),
- (data->strategy == BTLessStrategyNumber ||
- data->strategy == BTLessEqualStrategyNumber)
- ? data->datum : a,
- b));
+ data->typecmp,
+ PG_GET_COLLATION(),
+ (data->strategy == BTLessStrategyNumber ||
+ data->strategy == BTLessEqualStrategyNumber)
+ ? data->datum : a,
+ b));
switch (data->strategy)
{
@@ -186,14 +186,14 @@ Datum \
gin_extract_value_##type(PG_FUNCTION_ARGS) \
{ \
return gin_btree_extract_value(fcinfo, is_varlena); \
-} \
+} \
PG_FUNCTION_INFO_V1(gin_extract_query_##type); \
Datum \
gin_extract_query_##type(PG_FUNCTION_ARGS) \
{ \
return gin_btree_extract_query(fcinfo, \
is_varlena, leftmostvalue, typecmp); \
-} \
+} \
PG_FUNCTION_INFO_V1(gin_compare_prefix_##type); \
Datum \
gin_compare_prefix_##type(PG_FUNCTION_ARGS) \
@@ -209,6 +209,7 @@ leftmostvalue_int2(void)
{
return Int16GetDatum(SHRT_MIN);
}
+
GIN_SUPPORT(int2, false, leftmostvalue_int2, btint2cmp)
static Datum
@@ -216,6 +217,7 @@ leftmostvalue_int4(void)
{
return Int32GetDatum(INT_MIN);
}
+
GIN_SUPPORT(int4, false, leftmostvalue_int4, btint4cmp)
static Datum
@@ -226,6 +228,7 @@ leftmostvalue_int8(void)
*/
return Int64GetDatum(SEQ_MINVALUE);
}
+
GIN_SUPPORT(int8, false, leftmostvalue_int8, btint8cmp)
static Datum
@@ -233,6 +236,7 @@ leftmostvalue_float4(void)
{
return Float4GetDatum(-get_float4_infinity());
}
+
GIN_SUPPORT(float4, false, leftmostvalue_float4, btfloat4cmp)
static Datum
@@ -240,6 +244,7 @@ leftmostvalue_float8(void)
{
return Float8GetDatum(-get_float8_infinity());
}
+
GIN_SUPPORT(float8, false, leftmostvalue_float8, btfloat8cmp)
static Datum
@@ -250,6 +255,7 @@ leftmostvalue_money(void)
*/
return Int64GetDatum(SEQ_MINVALUE);
}
+
GIN_SUPPORT(money, false, leftmostvalue_money, cash_cmp)
static Datum
@@ -257,6 +263,7 @@ leftmostvalue_oid(void)
{
return ObjectIdGetDatum(0);
}
+
GIN_SUPPORT(oid, false, leftmostvalue_oid, btoidcmp)
static Datum
@@ -264,6 +271,7 @@ leftmostvalue_timestamp(void)
{
return TimestampGetDatum(DT_NOBEGIN);
}
+
GIN_SUPPORT(timestamp, false, leftmostvalue_timestamp, timestamp_cmp)
GIN_SUPPORT(timestamptz, false, leftmostvalue_timestamp, timestamp_cmp)
@@ -273,6 +281,7 @@ leftmostvalue_time(void)
{
return TimeADTGetDatum(0);
}
+
GIN_SUPPORT(time, false, leftmostvalue_time, time_cmp)
static Datum
@@ -285,6 +294,7 @@ leftmostvalue_timetz(void)
return TimeTzADTPGetDatum(v);
}
+
GIN_SUPPORT(timetz, false, leftmostvalue_timetz, timetz_cmp)
static Datum
@@ -292,6 +302,7 @@ leftmostvalue_date(void)
{
return DateADTGetDatum(DATEVAL_NOBEGIN);
}
+
GIN_SUPPORT(date, false, leftmostvalue_date, date_cmp)
static Datum
@@ -304,6 +315,7 @@ leftmostvalue_interval(void)
v->month = 0;
return IntervalPGetDatum(v);
}
+
GIN_SUPPORT(interval, false, leftmostvalue_interval, interval_cmp)
static Datum
@@ -313,6 +325,7 @@ leftmostvalue_macaddr(void)
return MacaddrPGetDatum(v);
}
+
GIN_SUPPORT(macaddr, false, leftmostvalue_macaddr, macaddr_cmp)
static Datum
@@ -320,6 +333,7 @@ leftmostvalue_inet(void)
{
return DirectFunctionCall1(inet_in, CStringGetDatum("0.0.0.0/0"));
}
+
GIN_SUPPORT(inet, true, leftmostvalue_inet, network_cmp)
GIN_SUPPORT(cidr, true, leftmostvalue_inet, network_cmp)
@@ -329,6 +343,7 @@ leftmostvalue_text(void)
{
return PointerGetDatum(cstring_to_text_with_len("", 0));
}
+
GIN_SUPPORT(text, true, leftmostvalue_text, bttextcmp)
static Datum
@@ -336,6 +351,7 @@ leftmostvalue_char(void)
{
return CharGetDatum(SCHAR_MIN);
}
+
GIN_SUPPORT(char, false, leftmostvalue_char, btcharcmp)
GIN_SUPPORT(bytea, true, leftmostvalue_text, byteacmp)
@@ -348,6 +364,7 @@ leftmostvalue_bit(void)
ObjectIdGetDatum(0),
Int32GetDatum(-1));
}
+
GIN_SUPPORT(bit, true, leftmostvalue_bit, bitcmp)
static Datum
@@ -358,6 +375,7 @@ leftmostvalue_varbit(void)
ObjectIdGetDatum(0),
Int32GetDatum(-1));
}
+
GIN_SUPPORT(varbit, true, leftmostvalue_varbit, bitcmp)
/*
@@ -402,4 +420,5 @@ leftmostvalue_numeric(void)
{
return PointerGetDatum(NULL);
}
+
GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp)
diff --git a/contrib/btree_gist/btree_utils_num.c b/contrib/btree_gist/btree_utils_num.c
index 5bfe659f91..99cb41f5f5 100644
--- a/contrib/btree_gist/btree_utils_num.c
+++ b/contrib/btree_gist/btree_utils_num.c
@@ -13,7 +13,7 @@
GISTENTRY *
gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo)
{
- GISTENTRY *retval;
+ GISTENTRY *retval;
if (entry->leafkey)
{
diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c
index 78e8662add..8105a3b035 100644
--- a/contrib/btree_gist/btree_utils_var.c
+++ b/contrib/btree_gist/btree_utils_var.c
@@ -71,7 +71,7 @@ gbt_var_key_readable(const GBT_VARKEY *k)
* Create a leaf-entry to store in the index, from a single Datum.
*/
static GBT_VARKEY *
-gbt_var_key_from_datum(const struct varlena *u)
+gbt_var_key_from_datum(const struct varlena * u)
{
int32 lowersize = VARSIZE(u);
GBT_VARKEY *r;
diff --git a/contrib/fuzzystrmatch/dmetaphone.c b/contrib/fuzzystrmatch/dmetaphone.c
index 7c8457e734..147c8501ee 100644
--- a/contrib/fuzzystrmatch/dmetaphone.c
+++ b/contrib/fuzzystrmatch/dmetaphone.c
@@ -195,7 +195,7 @@ dmetaphone_alt(PG_FUNCTION_ARGS)
* in a case like this.
*/
-#define META_FREE(x) ((void)true) /* pfree((x)) */
+#define META_FREE(x) ((void)true) /* pfree((x)) */
#else /* not defined DMETAPHONE_MAIN */
/* use the standard malloc library when not running in PostgreSQL */
diff --git a/contrib/hstore/hstore_gist.c b/contrib/hstore/hstore_gist.c
index dde37fb6e6..0fb769de7d 100644
--- a/contrib/hstore/hstore_gist.c
+++ b/contrib/hstore/hstore_gist.c
@@ -72,7 +72,7 @@ typedef struct
static pg_crc32
crc32_sz(char *buf, int size)
{
- pg_crc32 crc;
+ pg_crc32 crc;
INIT_TRADITIONAL_CRC32(crc);
COMP_TRADITIONAL_CRC32(crc, buf, size);
diff --git a/contrib/hstore_plperl/hstore_plperl.c b/contrib/hstore_plperl/hstore_plperl.c
index cdc224c30e..dcc74b12e8 100644
--- a/contrib/hstore_plperl/hstore_plperl.c
+++ b/contrib/hstore_plperl/hstore_plperl.c
@@ -9,7 +9,7 @@ PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(hstore_to_plperl);
-Datum hstore_to_plperl(PG_FUNCTION_ARGS);
+Datum hstore_to_plperl(PG_FUNCTION_ARGS);
Datum
hstore_to_plperl(PG_FUNCTION_ARGS)
@@ -26,10 +26,10 @@ hstore_to_plperl(PG_FUNCTION_ARGS)
for (i = 0; i < count; i++)
{
const char *key;
- SV *value;
+ SV *value;
key = pnstrdup(HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
- value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base,i), HS_VALLEN(entries, i)));
+ value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base, i), HS_VALLEN(entries, i)));
(void) hv_store(hv, key, strlen(key), value, 0);
}
@@ -39,7 +39,7 @@ hstore_to_plperl(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(plperl_to_hstore);
-Datum plperl_to_hstore(PG_FUNCTION_ARGS);
+Datum plperl_to_hstore(PG_FUNCTION_ARGS);
Datum
plperl_to_hstore(PG_FUNCTION_ARGS)
@@ -61,8 +61,8 @@ plperl_to_hstore(PG_FUNCTION_ARGS)
i = 0;
while ((he = hv_iternext(hv)))
{
- char *key = sv2cstr(HeSVKEY_force(he));
- SV *value = HeVAL(he);
+ char *key = sv2cstr(HeSVKEY_force(he));
+ SV *value = HeVAL(he);
pairs[i].key = pstrdup(key);
pairs[i].keylen = hstoreCheckKeyLen(strlen(pairs[i].key));
diff --git a/contrib/hstore_plpython/hstore_plpython.c b/contrib/hstore_plpython/hstore_plpython.c
index 92cd4f800f..94404a5061 100644
--- a/contrib/hstore_plpython/hstore_plpython.c
+++ b/contrib/hstore_plpython/hstore_plpython.c
@@ -8,7 +8,7 @@ PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(hstore_to_plpython);
-Datum hstore_to_plpython(PG_FUNCTION_ARGS);
+Datum hstore_to_plpython(PG_FUNCTION_ARGS);
Datum
hstore_to_plpython(PG_FUNCTION_ARGS)
@@ -31,9 +31,9 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
PyDict_SetItem(dict, key, Py_None);
else
{
- PyObject *value;
+ PyObject *value;
- value = PyString_FromStringAndSize(HS_VAL(entries, base,i), HS_VALLEN(entries, i));
+ value = PyString_FromStringAndSize(HS_VAL(entries, base, i), HS_VALLEN(entries, i));
PyDict_SetItem(dict, key, value);
Py_XDECREF(value);
}
@@ -45,7 +45,7 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(plpython_to_hstore);
-Datum plpython_to_hstore(PG_FUNCTION_ARGS);
+Datum plpython_to_hstore(PG_FUNCTION_ARGS);
Datum
plpython_to_hstore(PG_FUNCTION_ARGS)
@@ -75,9 +75,9 @@ plpython_to_hstore(PG_FUNCTION_ARGS)
for (i = 0; i < pcount; i++)
{
- PyObject *tuple;
- PyObject *key;
- PyObject *value;
+ PyObject *tuple;
+ PyObject *key;
+ PyObject *value;
tuple = PyList_GetItem(items, i);
key = PyTuple_GetItem(tuple, 0);
diff --git a/contrib/ltree/crc32.c b/contrib/ltree/crc32.c
index 1c08d264f7..403dae0d7d 100644
--- a/contrib/ltree/crc32.c
+++ b/contrib/ltree/crc32.c
@@ -26,13 +26,14 @@
unsigned int
ltree_crc32_sz(char *buf, int size)
{
- pg_crc32 crc;
+ pg_crc32 crc;
char *p = buf;
INIT_TRADITIONAL_CRC32(crc);
while (size > 0)
{
- char c = (char) TOLOWER(*p);
+ char c = (char) TOLOWER(*p);
+
COMP_TRADITIONAL_CRC32(crc, &c, 1);
size--;
p++;
diff --git a/contrib/ltree_plpython/ltree_plpython.c b/contrib/ltree_plpython/ltree_plpython.c
index 111e3e356e..af166a720f 100644
--- a/contrib/ltree_plpython/ltree_plpython.c
+++ b/contrib/ltree_plpython/ltree_plpython.c
@@ -7,7 +7,7 @@ PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(ltree_to_plpython);
-Datum ltree_to_plpython(PG_FUNCTION_ARGS);
+Datum ltree_to_plpython(PG_FUNCTION_ARGS);
Datum
ltree_to_plpython(PG_FUNCTION_ARGS)
diff --git a/contrib/pageinspect/brinfuncs.c b/contrib/pageinspect/brinfuncs.c
index bd3191d5d2..7adcfa8937 100644
--- a/contrib/pageinspect/brinfuncs.c
+++ b/contrib/pageinspect/brinfuncs.c
@@ -58,7 +58,7 @@ brin_page_type(PG_FUNCTION_ARGS)
{
bytea *raw_page = PG_GETARG_BYTEA_P(0);
Page page = VARDATA(raw_page);
- char *type;
+ char *type;
switch (BrinPageType(page))
{
@@ -86,8 +86,8 @@ brin_page_type(PG_FUNCTION_ARGS)
static Page
verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
{
- Page page;
- int raw_page_size;
+ Page page;
+ int raw_page_size;
raw_page_size = VARSIZE(raw_page) - VARHDRSZ;
@@ -95,7 +95,7 @@ verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page too small"),
- errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
+ errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
page = VARDATA(raw_page);
@@ -153,7 +153,7 @@ brin_page_items(PG_FUNCTION_ARGS)
indexRel = index_open(indexRelid, AccessShareLock);
state = palloc(offsetof(brin_page_state, columns) +
- sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
+ sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
state->bdesc = brin_build_desc(indexRel);
state->page = page;
@@ -168,10 +168,10 @@ brin_page_items(PG_FUNCTION_ARGS)
*/
for (attno = 1; attno <= state->bdesc->bd_tupdesc->natts; attno++)
{
- Oid output;
- bool isVarlena;
+ Oid output;
+ bool isVarlena;
BrinOpcInfo *opcinfo;
- int i;
+ int i;
brin_column_state *column;
opcinfo = state->bdesc->bd_info[attno - 1];
@@ -213,7 +213,7 @@ brin_page_items(PG_FUNCTION_ARGS)
*/
if (state->dtup == NULL)
{
- BrinTuple *tup;
+ BrinTuple *tup;
MemoryContext mctx;
ItemId itemId;
@@ -225,8 +225,8 @@ brin_page_items(PG_FUNCTION_ARGS)
if (ItemIdIsUsed(itemId))
{
tup = (BrinTuple *) PageGetItem(state->page,
- PageGetItemId(state->page,
- state->offset));
+ PageGetItemId(state->page,
+ state->offset));
state->dtup = brin_deform_tuple(state->bdesc, tup);
state->attno = 1;
state->unusedItem = false;
@@ -253,7 +253,7 @@ brin_page_items(PG_FUNCTION_ARGS)
}
else
{
- int att = state->attno - 1;
+ int att = state->attno - 1;
values[0] = UInt16GetDatum(state->offset);
values[1] = UInt32GetDatum(state->dtup->bt_blkno);
@@ -263,8 +263,8 @@ brin_page_items(PG_FUNCTION_ARGS)
values[5] = BoolGetDatum(state->dtup->bt_placeholder);
if (!state->dtup->bt_columns[att].bv_allnulls)
{
- BrinValues *bvalues = &state->dtup->bt_columns[att];
- StringInfoData s;
+ BrinValues *bvalues = &state->dtup->bt_columns[att];
+ StringInfoData s;
bool first;
int i;
@@ -274,7 +274,7 @@ brin_page_items(PG_FUNCTION_ARGS)
first = true;
for (i = 0; i < state->columns[att]->nstored; i++)
{
- char *val;
+ char *val;
if (!first)
appendStringInfoString(&s, " .. ");
@@ -312,8 +312,8 @@ brin_page_items(PG_FUNCTION_ARGS)
}
/*
- * If we're beyond the end of the page, set flag to end the function in
- * the following iteration.
+ * If we're beyond the end of the page, set flag to end the function
+ * in the following iteration.
*/
if (state->offset > PageGetMaxOffsetNumber(state->page))
state->done = true;
@@ -366,8 +366,8 @@ brin_revmap_data(PG_FUNCTION_ARGS)
struct
{
ItemPointerData *tids;
- int idx;
- } *state;
+ int idx;
+ } *state;
FuncCallContext *fctx;
if (!superuser())
diff --git a/contrib/pageinspect/ginfuncs.c b/contrib/pageinspect/ginfuncs.c
index 701b2ca763..c0de3be8df 100644
--- a/contrib/pageinspect/ginfuncs.c
+++ b/contrib/pageinspect/ginfuncs.c
@@ -167,7 +167,7 @@ typedef struct gin_leafpage_items_state
TupleDesc tupd;
GinPostingList *seg;
GinPostingList *lastseg;
-} gin_leafpage_items_state;
+} gin_leafpage_items_state;
Datum
gin_leafpage_items(PG_FUNCTION_ARGS)
diff --git a/contrib/pg_audit/pg_audit.c b/contrib/pg_audit/pg_audit.c
index 4b75fefc34..a4b05a6df1 100644
--- a/contrib/pg_audit/pg_audit.c
+++ b/contrib/pg_audit/pg_audit.c
@@ -40,11 +40,11 @@
PG_MODULE_MAGIC;
-void _PG_init(void);
+void _PG_init(void);
/* Prototypes for functions used with event triggers */
-Datum pg_audit_ddl_command_end(PG_FUNCTION_ARGS);
-Datum pg_audit_sql_drop(PG_FUNCTION_ARGS);
+Datum pg_audit_ddl_command_end(PG_FUNCTION_ARGS);
+Datum pg_audit_sql_drop(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(pg_audit_ddl_command_end);
PG_FUNCTION_INFO_V1(pg_audit_sql_drop);
@@ -67,14 +67,14 @@ PG_FUNCTION_INFO_V1(pg_audit_sql_drop);
#define LOG_ROLE (1 << 4) /* GRANT/REVOKE, CREATE/ALTER/DROP ROLE */
#define LOG_WRITE (1 << 5) /* INSERT, UPDATE, DELETE, TRUNCATE */
-#define LOG_NONE 0 /* nothing */
+#define LOG_NONE 0 /* nothing */
#define LOG_ALL (0xFFFFFFFF) /* All */
/* GUC variable for pg_audit.log, which defines the classes to log. */
-char *auditLog = NULL;
+char *auditLog = NULL;
/* Bitmap of classes selected */
-static int auditLogBitmap = LOG_NONE;
+static int auditLogBitmap = LOG_NONE;
/*
* String constants for log classes - used when processing tokens in the
@@ -97,7 +97,7 @@ static int auditLogBitmap = LOG_NONE;
* the query are in pg_catalog. Interactive sessions (eg: psql) can cause
* a lot of noise in the logs which might be uninteresting.
*/
-bool auditLogCatalog = true;
+bool auditLogCatalog = true;
/*
* GUC variable for pg_audit.log_level
@@ -106,8 +106,8 @@ bool auditLogCatalog = true;
* at. The default level is LOG, which goes into the server log but does
* not go to the client. Set to NOTICE in the regression tests.
*/
-char *auditLogLevelString = NULL;
-int auditLogLevel = LOG;
+char *auditLogLevelString = NULL;
+int auditLogLevel = LOG;
/*
* GUC variable for pg_audit.log_parameter
@@ -115,7 +115,7 @@ int auditLogLevel = LOG;
* Administrators can choose if parameters passed into a statement are
* included in the audit log.
*/
-bool auditLogParameter = false;
+bool auditLogParameter = false;
/*
* GUC variable for pg_audit.log_relation
@@ -124,7 +124,7 @@ bool auditLogParameter = false;
* in READ/WRITE class queries. By default, SESSION logs include the query but
* do not have a log entry for each relation.
*/
-bool auditLogRelation = false;
+bool auditLogRelation = false;
/*
* GUC variable for pg_audit.log_statement_once
@@ -134,7 +134,7 @@ bool auditLogRelation = false;
* the audit log to facilitate searching, but this can cause the log to be
* unnecessairly bloated in some environments.
*/
-bool auditLogStatementOnce = false;
+bool auditLogStatementOnce = false;
/*
* GUC variable for pg_audit.role
@@ -143,7 +143,7 @@ bool auditLogStatementOnce = false;
* Object-level auditing uses the privileges which are granted to this role to
* determine if a statement should be logged.
*/
-char *auditRole = NULL;
+char *auditRole = NULL;
/*
* String constants for the audit log fields.
@@ -213,23 +213,23 @@ char *auditRole = NULL;
*/
typedef struct
{
- int64 statementId; /* Simple counter */
- int64 substatementId; /* Simple counter */
+ int64 statementId; /* Simple counter */
+ int64 substatementId; /* Simple counter */
LogStmtLevel logStmtLevel; /* From GetCommandLogLevel when possible, */
- /* generated when not. */
- NodeTag commandTag; /* same here */
+ /* generated when not. */
+ NodeTag commandTag; /* same here */
const char *command; /* same here */
const char *objectType; /* From event trigger when possible */
- /* generated when not. */
- char *objectName; /* Fully qualified object identification */
+ /* generated when not. */
+ char *objectName; /* Fully qualified object identification */
const char *commandText; /* sourceText / queryString */
ParamListInfo paramList; /* QueryDesc/ProcessUtility parameters */
- bool granted; /* Audit role has object permissions? */
- bool logged; /* Track if we have logged this event, used */
- /* post-ProcessUtility to make sure we log */
- bool statementLogged; /* Track if we have logged the statement */
+ bool granted; /* Audit role has object permissions? */
+ bool logged; /* Track if we have logged this event, used */
+ /* post-ProcessUtility to make sure we log */
+ bool statementLogged; /* Track if we have logged the statement */
} AuditEvent;
/*
@@ -239,9 +239,9 @@ typedef struct AuditEventStackItem
{
struct AuditEventStackItem *next;
- AuditEvent auditEvent;
+ AuditEvent auditEvent;
- int64 stackId;
+ int64 stackId;
MemoryContext contextAudit;
MemoryContextCallback contextCallback;
@@ -288,7 +288,7 @@ stack_free(void *stackFree)
while (nextItem != NULL)
{
/* Check if this item matches the item to be freed */
- if (nextItem == (AuditEventStackItem *)stackFree)
+ if (nextItem == (AuditEventStackItem *) stackFree)
{
/* Move top of stack to the item after the freed item */
auditEventStack = nextItem->next;
@@ -309,7 +309,8 @@ stack_free(void *stackFree)
substatementTotal = 0;
/*
- * Reset statement logged so that next statement will be logged.
+ * Reset statement logged so that next statement will be
+ * logged.
*/
statementLogged = false;
}
@@ -356,7 +357,7 @@ stack_push()
* the stack at this item.
*/
stackItem->contextCallback.func = stack_free;
- stackItem->contextCallback.arg = (void *)stackItem;
+ stackItem->contextCallback.arg = (void *) stackItem;
MemoryContextRegisterResetCallback(contextAudit,
&stackItem->contextCallback);
@@ -431,7 +432,7 @@ append_valid_csv(StringInfoData *buffer, const char *appendStr)
for (pChar = appendStr; *pChar; pChar++)
{
- if (*pChar == '"') /* double single quotes */
+ if (*pChar == '"') /* double single quotes */
appendStringInfoCharMacro(buffer, *pChar);
appendStringInfoCharMacro(buffer, *pChar);
@@ -461,23 +462,23 @@ static void
log_audit_event(AuditEventStackItem *stackItem)
{
/* By default, put everything in the MISC class. */
- int class = LOG_MISC;
- const char *className = CLASS_MISC;
- MemoryContext contextOld;
- StringInfoData auditStr;
+ int class = LOG_MISC;
+ const char *className = CLASS_MISC;
+ MemoryContext contextOld;
+ StringInfoData auditStr;
/* Classify the statement using log stmt level and the command tag */
switch (stackItem->auditEvent.logStmtLevel)
{
- /* All mods go in WRITE class, execpt EXECUTE */
+ /* All mods go in WRITE class, execpt EXECUTE */
case LOGSTMT_MOD:
className = CLASS_WRITE;
class = LOG_WRITE;
switch (stackItem->auditEvent.commandTag)
{
- /* Currently, only EXECUTE is different */
+ /* Currently, only EXECUTE is different */
case T_ExecuteStmt:
className = CLASS_MISC;
class = LOG_MISC;
@@ -487,7 +488,7 @@ log_audit_event(AuditEventStackItem *stackItem)
}
break;
- /* These are DDL, unless they are ROLE */
+ /* These are DDL, unless they are ROLE */
case LOGSTMT_DDL:
className = CLASS_DDL;
class = LOG_DDL;
@@ -495,7 +496,7 @@ log_audit_event(AuditEventStackItem *stackItem)
/* Identify role statements */
switch (stackItem->auditEvent.commandTag)
{
- /* We know these are all role statements */
+ /* We know these are all role statements */
case T_GrantStmt:
case T_GrantRoleStmt:
case T_CreateRoleStmt:
@@ -505,11 +506,12 @@ log_audit_event(AuditEventStackItem *stackItem)
className = CLASS_ROLE;
class = LOG_ROLE;
break;
- /*
- * Rename and Drop are general and therefore we have to do an
- * additional check against the command string to see if they
- * are role or regular DDL.
- */
+
+ /*
+ * Rename and Drop are general and therefore we have to do
+ * an additional check against the command string to see
+ * if they are role or regular DDL.
+ */
case T_RenameStmt:
case T_DropStmt:
if (pg_strcasecmp(stackItem->auditEvent.command,
@@ -527,11 +529,11 @@ log_audit_event(AuditEventStackItem *stackItem)
}
break;
- /* Classify the rest */
+ /* Classify the rest */
case LOGSTMT_ALL:
switch (stackItem->auditEvent.commandTag)
{
- /* READ statements */
+ /* READ statements */
case T_CopyStmt:
case T_SelectStmt:
case T_PrepareStmt:
@@ -540,7 +542,7 @@ log_audit_event(AuditEventStackItem *stackItem)
class = LOG_READ;
break;
- /* FUNCTION statements */
+ /* FUNCTION statements */
case T_DoStmt:
className = CLASS_FUNCTION;
class = LOG_FUNCTION;
@@ -558,8 +560,8 @@ log_audit_event(AuditEventStackItem *stackItem)
/*
* Only log the statement if:
*
- * 1. If object was selected for audit logging (granted)
- * 2. The statement belongs to a class that is being logged
+ * 1. If object was selected for audit logging (granted) 2. The statement
+ * belongs to a class that is being logged
*
* If neither of these is true, return.
*/
@@ -615,10 +617,10 @@ log_audit_event(AuditEventStackItem *stackItem)
/* Handle parameter logging, if enabled. */
if (auditLogParameter)
{
- int paramIdx;
- int numParams;
- StringInfoData paramStrResult;
- ParamListInfo paramList = stackItem->auditEvent.paramList;
+ int paramIdx;
+ int numParams;
+ StringInfoData paramStrResult;
+ ParamListInfo paramList = stackItem->auditEvent.paramList;
numParams = paramList == NULL ? 0 : paramList->numParams;
@@ -630,9 +632,9 @@ log_audit_event(AuditEventStackItem *stackItem)
paramIdx++)
{
ParamExternData *prm = ¶mList->params[paramIdx];
- Oid typeOutput;
- bool typeIsVarLena;
- char *paramStr;
+ Oid typeOutput;
+ bool typeIsVarLena;
+ char *paramStr;
/* Add a comma for each param */
if (paramIdx != 0)
@@ -663,7 +665,7 @@ log_audit_event(AuditEventStackItem *stackItem)
else
/* we were asked to not log it */
appendStringInfoString(&auditStr,
- ",");
+ ",");
/*
* Log the audit entry. Note: use of INT64_FORMAT here is bad for
@@ -696,7 +698,7 @@ audit_on_acl(Datum aclDatum,
{
bool result = false;
Acl *acl;
- AclItem *aclItemData;
+ AclItem *aclItemData;
int aclIndex;
int aclTotal;
@@ -710,7 +712,7 @@ audit_on_acl(Datum aclDatum,
/* Check privileges granted directly to auditOid */
for (aclIndex = 0; aclIndex < aclTotal; aclIndex++)
{
- AclItem *aclItem = &aclItemData[aclIndex];
+ AclItem *aclItem = &aclItemData[aclIndex];
if (aclItem->ai_grantee == auditOid &&
aclItem->ai_privs & mask)
@@ -731,7 +733,7 @@ audit_on_acl(Datum aclDatum,
{
for (aclIndex = 0; aclIndex < aclTotal; aclIndex++)
{
- AclItem *aclItem = &aclItemData[aclIndex];
+ AclItem *aclItem = &aclItemData[aclIndex];
/* Don't test public or auditOid (it has been tested already) */
if (aclItem->ai_grantee == ACL_ID_PUBLIC ||
@@ -838,9 +840,9 @@ audit_on_any_attribute(Oid relOid,
Bitmapset *attributeSet,
AclMode mode)
{
- bool result = false;
- AttrNumber col;
- Bitmapset *tmpSet;
+ bool result = false;
+ AttrNumber col;
+ Bitmapset *tmpSet;
/* If bms is empty then check for any column match */
if (bms_is_empty(attributeSet))
@@ -891,9 +893,9 @@ audit_on_any_attribute(Oid relOid,
static void
log_select_dml(Oid auditOid, List *rangeTabls)
{
- ListCell *lr;
- bool first = true;
- bool found = false;
+ ListCell *lr;
+ bool first = true;
+ bool found = false;
/* Do not log if this is an internal statement */
if (internalStatement)
@@ -901,8 +903,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
foreach(lr, rangeTabls)
{
- Oid relOid;
- Relation rel;
+ Oid relOid;
+ Relation rel;
RangeTblEntry *rte = lfirst(lr);
/* We only care about tables, and can ignore subqueries etc. */
@@ -912,8 +914,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
found = true;
/*
- * If we are not logging all-catalog queries (auditLogCatalog is false)
- * then filter out any system relations here.
+ * If we are not logging all-catalog queries (auditLogCatalog is
+ * false) then filter out any system relations here.
*/
relOid = rte->relid;
rel = relation_open(relOid, NoLock);
@@ -982,63 +984,72 @@ log_select_dml(Oid auditOid, List *rangeTabls)
{
case RELKIND_RELATION:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_TABLE;
+ OBJECT_TYPE_TABLE;
+
break;
case RELKIND_INDEX:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_INDEX;
+ OBJECT_TYPE_INDEX;
+
break;
case RELKIND_SEQUENCE:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_SEQUENCE;
+ OBJECT_TYPE_SEQUENCE;
+
break;
case RELKIND_TOASTVALUE:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_TOASTVALUE;
+ OBJECT_TYPE_TOASTVALUE;
+
break;
case RELKIND_VIEW:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_VIEW;
+ OBJECT_TYPE_VIEW;
+
break;
case RELKIND_COMPOSITE_TYPE:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_COMPOSITE_TYPE;
+ OBJECT_TYPE_COMPOSITE_TYPE;
+
break;
case RELKIND_FOREIGN_TABLE:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_FOREIGN_TABLE;
+ OBJECT_TYPE_FOREIGN_TABLE;
+
break;
case RELKIND_MATVIEW:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_MATVIEW;
+ OBJECT_TYPE_MATVIEW;
+
break;
default:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_UNKNOWN;
+ OBJECT_TYPE_UNKNOWN;
+
break;
}
/* Get a copy of the relation name and assign it to object name */
auditEventStack->auditEvent.objectName =
quote_qualified_identifier(get_namespace_name(
- RelationGetNamespace(rel)),
+ RelationGetNamespace(rel)),
RelationGetRelationName(rel));
relation_close(rel, NoLock);
/* Perform object auditing only if the audit role is valid */
if (auditOid != InvalidOid)
{
- AclMode auditPerms =
- (ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
- rte->requiredPerms;
+ AclMode auditPerms =
+ (ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
+ rte->requiredPerms;
/*
* If any of the required permissions for the relation are granted
@@ -1104,8 +1115,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
/*
* If no tables were found that means that RangeTbls was empty or all
- * relations were in the system schema. In that case still log a
- * session record.
+ * relations were in the system schema. In that case still log a session
+ * record.
*/
if (!found)
{
@@ -1123,7 +1134,7 @@ log_select_dml(Oid auditOid, List *rangeTabls)
static void
log_function_execute(Oid objectId)
{
- HeapTuple proctup;
+ HeapTuple proctup;
Form_pg_proc proc;
AuditEventStackItem *stackItem;
@@ -1159,6 +1170,7 @@ log_function_execute(Oid objectId)
stackItem->auditEvent.commandTag = T_DoStmt;
stackItem->auditEvent.command = COMMAND_EXECUTE;
stackItem->auditEvent.objectType = OBJECT_TYPE_FUNCTION;
+
stackItem->auditEvent.commandText = stackItem->next->auditEvent.commandText;
log_audit_event(stackItem);
@@ -1236,9 +1248,9 @@ pg_audit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags)
standard_ExecutorStart(queryDesc, eflags);
/*
- * Move the stack memory context to the query memory context. This needs to
- * be done here because the query context does not exist before the call
- * to standard_ExecutorStart() but the stack item is required by
+ * Move the stack memory context to the query memory context. This needs
+ * to be done here because the query context does not exist before the
+ * call to standard_ExecutorStart() but the stack item is required by
* pg_audit_ExecutorCheckPerms_hook() which is called during
* standard_ExecutorStart().
*/
@@ -1253,7 +1265,7 @@ pg_audit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags)
static bool
pg_audit_ExecutorCheckPerms_hook(List *rangeTabls, bool abort)
{
- Oid auditOid;
+ Oid auditOid;
/* Get the audit oid if the role exists */
auditOid = get_role_oid(auditRole, true);
@@ -1283,7 +1295,7 @@ pg_audit_ProcessUtility_hook(Node *parsetree,
char *completionTag)
{
AuditEventStackItem *stackItem = NULL;
- int64 stackId = 0;
+ int64 stackId = 0;
/*
* Don't audit substatements. All the substatements we care about should
@@ -1328,19 +1340,22 @@ pg_audit_ProcessUtility_hook(Node *parsetree,
params, dest, completionTag);
/*
- * Process the audit event if there is one. Also check that this event was
- * not popped off the stack by a memory context being free'd elsewhere.
+ * Process the audit event if there is one. Also check that this event
+ * was not popped off the stack by a memory context being free'd
+ * elsewhere.
*/
if (stackItem && !IsAbortedTransactionBlockState())
{
/*
- * Make sure the item we want to log is still on the stack - if not then
- * something has gone wrong and an error will be raised.
+ * Make sure the item we want to log is still on the stack - if not
+ * then something has gone wrong and an error will be raised.
*/
stack_valid(stackId);
- /* Log the utility command if logging is on, the command has not already
- * been logged by another hook, and the transaction is not aborted.
+ /*
+ * Log the utility command if logging is on, the command has not
+ * already been logged by another hook, and the transaction is not
+ * aborted.
*/
if (auditLogBitmap != 0 && !stackItem->auditEvent.logged)
log_audit_event(stackItem);
@@ -1380,11 +1395,12 @@ Datum
pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
{
EventTriggerData *eventData;
- int result, row;
- TupleDesc spiTupDesc;
- const char *query;
- MemoryContext contextQuery;
- MemoryContext contextOld;
+ int result,
+ row;
+ TupleDesc spiTupDesc;
+ const char *query;
+ MemoryContext contextQuery;
+ MemoryContext contextOld;
/* Continue only if session DDL logging is enabled */
if (~auditLogBitmap & LOG_DDL)
@@ -1393,7 +1409,7 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
/* Be sure the module was loaded */
if (!auditEventStack)
elog(ERROR, "pg_audit not loaded before call to "
- "pg_audit_ddl_command_end()");
+ "pg_audit_ddl_command_end()");
/* This is an internal statement - do not log it */
internalStatement = true;
@@ -1404,11 +1420,11 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
/* Switch memory context for query */
contextQuery = AllocSetContextCreate(
- CurrentMemoryContext,
- "pg_audit_func_ddl_command_end temporary context",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ CurrentMemoryContext,
+ "pg_audit_func_ddl_command_end temporary context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
contextOld = MemoryContextSwitchTo(contextQuery);
/* Get information about triggered events */
@@ -1423,31 +1439,32 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
/* Return objects affected by the (non drop) DDL statement */
query = "SELECT UPPER(object_type), object_identity\n"
- " FROM pg_event_trigger_ddl_commands()";
+ " FROM pg_event_trigger_ddl_commands()";
/* Attempt to connect */
result = SPI_connect();
if (result < 0)
elog(ERROR, "pg_audit_ddl_command_end: SPI_connect returned %d",
- result);
+ result);
/* Execute the query */
result = SPI_execute(query, true, 0);
if (result != SPI_OK_SELECT)
elog(ERROR, "pg_audit_ddl_command_end: SPI_execute returned %d",
- result);
+ result);
/* Iterate returned rows */
spiTupDesc = SPI_tuptable->tupdesc;
for (row = 0; row < SPI_processed; row++)
{
- HeapTuple spiTuple;
+ HeapTuple spiTuple;
spiTuple = SPI_tuptable->vals[row];
/* Supply object name and type for audit event */
auditEventStack->auditEvent.objectType =
- SPI_getvalue(spiTuple, spiTupDesc, 1);
+ SPI_getvalue(spiTuple, spiTupDesc, 1);
+
auditEventStack->auditEvent.objectName =
SPI_getvalue(spiTuple, spiTupDesc, 2);
@@ -1473,11 +1490,12 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
Datum
pg_audit_sql_drop(PG_FUNCTION_ARGS)
{
- int result, row;
- TupleDesc spiTupDesc;
- const char *query;
- MemoryContext contextQuery;
- MemoryContext contextOld;
+ int result,
+ row;
+ TupleDesc spiTupDesc;
+ const char *query;
+ MemoryContext contextQuery;
+ MemoryContext contextOld;
if (~auditLogBitmap & LOG_DDL)
PG_RETURN_NULL();
@@ -1485,7 +1503,7 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
/* Be sure the module was loaded */
if (!auditEventStack)
elog(ERROR, "pg_audit not loaded before call to "
- "pg_audit_sql_drop()");
+ "pg_audit_sql_drop()");
/* This is an internal statement - do not log it */
internalStatement = true;
@@ -1496,44 +1514,45 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
/* Switch memory context for the query */
contextQuery = AllocSetContextCreate(
- CurrentMemoryContext,
- "pg_audit_func_ddl_command_end temporary context",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ CurrentMemoryContext,
+ "pg_audit_func_ddl_command_end temporary context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
contextOld = MemoryContextSwitchTo(contextQuery);
/* Return objects affected by the drop statement */
query = "SELECT UPPER(object_type),\n"
- " object_identity\n"
- " FROM pg_event_trigger_dropped_objects()\n"
- " WHERE lower(object_type) <> 'type'\n"
- " AND schema_name <> 'pg_toast'";
+ " object_identity\n"
+ " FROM pg_event_trigger_dropped_objects()\n"
+ " WHERE lower(object_type) <> 'type'\n"
+ " AND schema_name <> 'pg_toast'";
/* Attempt to connect */
result = SPI_connect();
if (result < 0)
elog(ERROR, "pg_audit_ddl_drop: SPI_connect returned %d",
- result);
+ result);
/* Execute the query */
result = SPI_execute(query, true, 0);
if (result != SPI_OK_SELECT)
elog(ERROR, "pg_audit_ddl_drop: SPI_execute returned %d",
- result);
+ result);
/* Iterate returned rows */
spiTupDesc = SPI_tuptable->tupdesc;
for (row = 0; row < SPI_processed; row++)
{
- HeapTuple spiTuple;
+ HeapTuple spiTuple;
spiTuple = SPI_tuptable->vals[row];
auditEventStack->auditEvent.objectType =
- SPI_getvalue(spiTuple, spiTupDesc, 1);
+ SPI_getvalue(spiTuple, spiTupDesc, 1);
+
auditEventStack->auditEvent.objectName =
- SPI_getvalue(spiTuple, spiTupDesc, 2);
+ SPI_getvalue(spiTuple, spiTupDesc, 2);
log_audit_event(auditEventStack);
}
@@ -1562,10 +1581,10 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
static bool
check_pg_audit_log(char **newVal, void **extra, GucSource source)
{
- List *flagRawList;
- char *rawVal;
- ListCell *lt;
- int *flags;
+ List *flagRawList;
+ char *rawVal;
+ ListCell *lt;
+ int *flags;
/* Make sure newval is a comma-separated list of tokens. */
rawVal = pstrdup(*newVal);
@@ -1581,18 +1600,18 @@ check_pg_audit_log(char **newVal, void **extra, GucSource source)
* Check that we recognise each token, and add it to the bitmap we're
* building up in a newly-allocated int *f.
*/
- if (!(flags = (int *)malloc(sizeof(int))))
+ if (!(flags = (int *) malloc(sizeof(int))))
return false;
*flags = 0;
foreach(lt, flagRawList)
{
- bool subtract = false;
- int class;
+ bool subtract = false;
+ int class;
/* Retrieve a token */
- char *token = (char *)lfirst(lt);
+ char *token = (char *) lfirst(lt);
/* If token is preceded by -, then the token is subtractive */
if (strstr(token, "-") == token)
@@ -1651,7 +1670,7 @@ static void
assign_pg_audit_log(const char *newVal, void *extra)
{
if (extra)
- auditLogBitmap = *(int *)extra;
+ auditLogBitmap = *(int *) extra;
}
/*
@@ -1662,10 +1681,10 @@ assign_pg_audit_log(const char *newVal, void *extra)
static bool
check_pg_audit_log_level(char **newVal, void **extra, GucSource source)
{
- int *logLevel;
+ int *logLevel;
/* Allocate memory to store the log level */
- if (!(logLevel = (int *)malloc(sizeof(int))))
+ if (!(logLevel = (int *) malloc(sizeof(int))))
return false;
/* Find the log level enum */
@@ -1718,7 +1737,7 @@ static void
assign_pg_audit_log_level(const char *newVal, void *extra)
{
if (extra)
- auditLogLevel = *(int *)extra;
+ auditLogLevel = *(int *) extra;
}
/*
@@ -1729,126 +1748,126 @@ _PG_init(void)
{
/* Define pg_audit.log */
DefineCustomStringVariable(
- "pg_audit.log",
-
- "Specifies which classes of statements will be logged by session audit "
- "logging. Multiple classes can be provided using a comma-separated "
- "list and classes can be subtracted by prefacing the class with a "
- "- sign.",
-
- NULL,
- &auditLog,
- "none",
- PGC_SUSET,
- GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
- check_pg_audit_log,
- assign_pg_audit_log,
- NULL);
+ "pg_audit.log",
+
+ "Specifies which classes of statements will be logged by session audit "
+ "logging. Multiple classes can be provided using a comma-separated "
+ "list and classes can be subtracted by prefacing the class with a "
+ "- sign.",
+
+ NULL,
+ &auditLog,
+ "none",
+ PGC_SUSET,
+ GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
+ check_pg_audit_log,
+ assign_pg_audit_log,
+ NULL);
/* Define pg_audit.log_catalog */
DefineCustomBoolVariable(
- "pg_audit.log_catalog",
+ "pg_audit.log_catalog",
"Specifies that session logging should be enabled in the case where "
- "all relations in a statement are in pg_catalog. Disabling this "
- "setting will reduce noise in the log from tools like psql and PgAdmin "
- "that query the catalog heavily.",
+ "all relations in a statement are in pg_catalog. Disabling this "
+ "setting will reduce noise in the log from tools like psql and PgAdmin "
+ "that query the catalog heavily.",
- NULL,
- &auditLogCatalog,
- true,
- PGC_SUSET,
- GUC_NOT_IN_SAMPLE,
- NULL, NULL, NULL);
+ NULL,
+ &auditLogCatalog,
+ true,
+ PGC_SUSET,
+ GUC_NOT_IN_SAMPLE,
+ NULL, NULL, NULL);
/* Define pg_audit.log_level */
DefineCustomStringVariable(
- "pg_audit.log_level",
-
- "Specifies the log level that will be used for log entries. This "
- "setting is used for regression testing and may also be useful to end "
- "users for testing or other purposes. It is not intended to be used "
- "in a production environment as it may leak which statements are being "
- "logged to the user.",
-
- NULL,
- &auditLogLevelString,
- "log",
- PGC_SUSET,
- GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
- check_pg_audit_log_level,
- assign_pg_audit_log_level,
- NULL);
+ "pg_audit.log_level",
+
+ "Specifies the log level that will be used for log entries. This "
+ "setting is used for regression testing and may also be useful to end "
+ "users for testing or other purposes. It is not intended to be used "
+ "in a production environment as it may leak which statements are being "
+ "logged to the user.",
+
+ NULL,
+ &auditLogLevelString,
+ "log",
+ PGC_SUSET,
+ GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
+ check_pg_audit_log_level,
+ assign_pg_audit_log_level,
+ NULL);
/* Define pg_audit.log_parameter */
DefineCustomBoolVariable(
- "pg_audit.log_parameter",
+ "pg_audit.log_parameter",
- "Specifies that audit logging should include the parameters that were "
- "passed with the statement. When parameters are present they will be "
- "be included in CSV format after the statement text.",
+ "Specifies that audit logging should include the parameters that were "
+ "passed with the statement. When parameters are present they will be "
+ "be included in CSV format after the statement text.",
- NULL,
- &auditLogParameter,
- false,
- PGC_SUSET,
- GUC_NOT_IN_SAMPLE,
- NULL, NULL, NULL);
+ NULL,
+ &auditLogParameter,
+ false,
+ PGC_SUSET,
+ GUC_NOT_IN_SAMPLE,
+ NULL, NULL, NULL);
/* Define pg_audit.log_relation */
DefineCustomBoolVariable(
- "pg_audit.log_relation",
+ "pg_audit.log_relation",
- "Specifies whether session audit logging should create a separate log "
- "entry for each relation referenced in a SELECT or DML statement. "
- "This is a useful shortcut for exhaustive logging without using object "
- "audit logging.",
+ "Specifies whether session audit logging should create a separate log "
+ "entry for each relation referenced in a SELECT or DML statement. "
+ "This is a useful shortcut for exhaustive logging without using object "
+ "audit logging.",
- NULL,
- &auditLogRelation,
- false,
- PGC_SUSET,
- GUC_NOT_IN_SAMPLE,
- NULL, NULL, NULL);
+ NULL,
+ &auditLogRelation,
+ false,
+ PGC_SUSET,
+ GUC_NOT_IN_SAMPLE,
+ NULL, NULL, NULL);
/* Define pg_audit.log_statement_once */
DefineCustomBoolVariable(
- "pg_audit.log_statement_once",
-
- "Specifies whether logging will include the statement text and "
- "parameters with the first log entry for a statement/substatement "
- "combination or with every entry. Disabling this setting will result "
- "in less verbose logging but may make it more difficult to determine "
- "the statement that generated a log entry, though the "
- "statement/substatement pair along with the process id should suffice "
- "to identify the statement text logged with a previous entry.",
-
- NULL,
- &auditLogStatementOnce,
- false,
- PGC_SUSET,
- GUC_NOT_IN_SAMPLE,
- NULL, NULL, NULL);
+ "pg_audit.log_statement_once",
+
+ "Specifies whether logging will include the statement text and "
+ "parameters with the first log entry for a statement/substatement "
+ "combination or with every entry. Disabling this setting will result "
+ "in less verbose logging but may make it more difficult to determine "
+ "the statement that generated a log entry, though the "
+ "statement/substatement pair along with the process id should suffice "
+ "to identify the statement text logged with a previous entry.",
+
+ NULL,
+ &auditLogStatementOnce,
+ false,
+ PGC_SUSET,
+ GUC_NOT_IN_SAMPLE,
+ NULL, NULL, NULL);
/* Define pg_audit.role */
DefineCustomStringVariable(
- "pg_audit.role",
+ "pg_audit.role",
- "Specifies the master role to use for object audit logging. Muliple "
- "audit roles can be defined by granting them to the master role. This "
- "allows multiple groups to be in charge of different aspects of audit "
- "logging.",
+ "Specifies the master role to use for object audit logging. Muliple "
+ "audit roles can be defined by granting them to the master role. This "
+ "allows multiple groups to be in charge of different aspects of audit "
+ "logging.",
- NULL,
- &auditRole,
- "",
- PGC_SUSET,
- GUC_NOT_IN_SAMPLE,
- NULL, NULL, NULL);
+ NULL,
+ &auditRole,
+ "",
+ PGC_SUSET,
+ GUC_NOT_IN_SAMPLE,
+ NULL, NULL, NULL);
/*
- * Install our hook functions after saving the existing pointers to preserve
- * the chains.
+ * Install our hook functions after saving the existing pointers to
+ * preserve the chains.
*/
next_ExecutorStart_hook = ExecutorStart_hook;
ExecutorStart_hook = pg_audit_ExecutorStart_hook;
diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c
index 761c277c63..6622d22f5f 100644
--- a/contrib/pg_buffercache/pg_buffercache_pages.c
+++ b/contrib/pg_buffercache/pg_buffercache_pages.c
@@ -34,6 +34,7 @@ typedef struct
bool isvalid;
bool isdirty;
uint16 usagecount;
+
/*
* An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from
* being pinned by too many backends and each backend will only pin once
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 3cc687bdb7..0eb991cdf0 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -138,10 +138,10 @@ typedef struct Counters
{
int64 calls; /* # of times executed */
double total_time; /* total execution time, in msec */
- double min_time; /* minimim execution time in msec */
- double max_time; /* maximum execution time in msec */
- double mean_time; /* mean execution time in msec */
- double sum_var_time; /* sum of variances in execution time in msec */
+ double min_time; /* minimim execution time in msec */
+ double max_time; /* maximum execution time in msec */
+ double mean_time; /* mean execution time in msec */
+ double sum_var_time; /* sum of variances in execution time in msec */
int64 rows; /* total # of retrieved or affected rows */
int64 shared_blks_hit; /* # of shared buffer hits */
int64 shared_blks_read; /* # of shared disk blocks read */
@@ -1231,10 +1231,10 @@ pgss_store(const char *query, uint32 queryId,
else
{
/*
- * Welford's method for accurately computing variance.
- * See
+ * Welford's method for accurately computing variance. See
+ *
*/
- double old_mean = e->counters.mean_time;
+ double old_mean = e->counters.mean_time;
e->counters.mean_time +=
(total_time - old_mean) / e->counters.calls;
@@ -1572,10 +1572,11 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
values[i++] = Float8GetDatumFast(tmp.min_time);
values[i++] = Float8GetDatumFast(tmp.max_time);
values[i++] = Float8GetDatumFast(tmp.mean_time);
+
/*
* Note we are calculating the population variance here, not the
- * sample variance, as we have data for the whole population,
- * so Bessel's correction is not used, and we don't divide by
+ * sample variance, as we have data for the whole population, so
+ * Bessel's correction is not used, and we don't divide by
* tmp.calls - 1.
*/
if (tmp.calls > 1)
@@ -2687,16 +2688,16 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
break;
case T_OnConflictExpr:
{
- OnConflictExpr *conf = (OnConflictExpr *) node;
+ OnConflictExpr *conf = (OnConflictExpr *) node;
APP_JUMB(conf->action);
JumbleExpr(jstate, (Node *) conf->arbiterElems);
JumbleExpr(jstate, conf->arbiterWhere);
- JumbleExpr(jstate, (Node *) conf->onConflictSet);
+ JumbleExpr(jstate, (Node *) conf->onConflictSet);
JumbleExpr(jstate, conf->onConflictWhere);
APP_JUMB(conf->constraint);
APP_JUMB(conf->exclRelIndex);
- JumbleExpr(jstate, (Node *) conf->exclRelTlist);
+ JumbleExpr(jstate, (Node *) conf->exclRelTlist);
}
break;
case T_List:
diff --git a/contrib/pgcrypto/pgp-armor.c b/contrib/pgcrypto/pgp-armor.c
index 24eb42fa89..5c8355808a 100644
--- a/contrib/pgcrypto/pgp-armor.c
+++ b/contrib/pgcrypto/pgp-armor.c
@@ -399,7 +399,7 @@ pgp_extract_armor_headers(const uint8 *src, unsigned len,
char *line;
char *nextline;
char *eol,
- *colon;
+ *colon;
int hlen;
char *buf;
int hdrlines;
diff --git a/contrib/pgcrypto/pgp-pgsql.c b/contrib/pgcrypto/pgp-pgsql.c
index d0da05cd13..1842985e53 100644
--- a/contrib/pgcrypto/pgp-pgsql.c
+++ b/contrib/pgcrypto/pgp-pgsql.c
@@ -259,6 +259,7 @@ set_arg(PGP_Context *ctx, char *key, char *val,
res = pgp_set_convert_crlf(ctx, atoi(val));
else if (strcmp(key, "unicode-mode") == 0)
res = pgp_set_unicode_mode(ctx, atoi(val));
+
/*
* The remaining options are for debugging/testing and are therefore not
* documented in the user-facing docs.
@@ -834,22 +835,22 @@ static int
parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
char ***p_keys, char ***p_values)
{
- int nkdims = ARR_NDIM(key_array);
- int nvdims = ARR_NDIM(val_array);
- char **keys,
- **values;
- Datum *key_datums,
- *val_datums;
- bool *key_nulls,
- *val_nulls;
- int key_count,
- val_count;
- int i;
+ int nkdims = ARR_NDIM(key_array);
+ int nvdims = ARR_NDIM(val_array);
+ char **keys,
+ **values;
+ Datum *key_datums,
+ *val_datums;
+ bool *key_nulls,
+ *val_nulls;
+ int key_count,
+ val_count;
+ int i;
if (nkdims > 1 || nkdims != nvdims)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("wrong number of array subscripts")));
+ errmsg("wrong number of array subscripts")));
if (nkdims == 0)
return 0;
@@ -871,7 +872,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
for (i = 0; i < key_count; i++)
{
- char *v;
+ char *v;
/* Check that the key doesn't contain anything funny */
if (key_nulls[i])
@@ -884,7 +885,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
if (!string_is_ascii(v))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("header key must not contain non-ASCII characters")));
+ errmsg("header key must not contain non-ASCII characters")));
if (strstr(v, ": "))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -906,7 +907,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
if (!string_is_ascii(v))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("header value must not contain non-ASCII characters")));
+ errmsg("header value must not contain non-ASCII characters")));
if (strchr(v, '\n'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -1045,7 +1046,7 @@ pgp_armor_headers(PG_FUNCTION_ARGS)
SRF_RETURN_DONE(funcctx);
else
{
- char *values[2];
+ char *values[2];
/* we assume that the keys (and values) are in UTF-8. */
utf8key = state->keys[funcctx->call_cntr];
diff --git a/contrib/pgcrypto/pgp.h b/contrib/pgcrypto/pgp.h
index 2ce429d1b2..62b8517c27 100644
--- a/contrib/pgcrypto/pgp.h
+++ b/contrib/pgcrypto/pgp.h
@@ -278,11 +278,11 @@ void pgp_cfb_free(PGP_CFB *ctx);
int pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
int pgp_cfb_decrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
-void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
- int num_headers, char **keys, char **values);
+void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
+ int num_headers, char **keys, char **values);
int pgp_armor_decode(const uint8 *src, int len, StringInfo dst);
-int pgp_extract_armor_headers(const uint8 *src, unsigned len,
- int *nheaders, char ***keys, char ***values);
+int pgp_extract_armor_headers(const uint8 *src, unsigned len,
+ int *nheaders, char ***keys, char ***values);
int pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst);
int pgp_decompress_filter(PullFilter **res, PGP_Context *ctx, PullFilter *src);
diff --git a/contrib/pgstattuple/pgstatapprox.c b/contrib/pgstattuple/pgstatapprox.c
index ae5ed56f98..22c5f7a9ee 100644
--- a/contrib/pgstattuple/pgstatapprox.c
+++ b/contrib/pgstattuple/pgstatapprox.c
@@ -84,8 +84,8 @@ statapprox_heap(Relation rel, output_type *stat)
CHECK_FOR_INTERRUPTS();
/*
- * If the page has only visible tuples, then we can find out the
- * free space from the FSM and move on.
+ * If the page has only visible tuples, then we can find out the free
+ * space from the FSM and move on.
*/
if (visibilitymap_test(rel, blkno, &vmbuffer))
{
@@ -103,8 +103,8 @@ statapprox_heap(Relation rel, output_type *stat)
page = BufferGetPage(buf);
/*
- * It's not safe to call PageGetHeapFreeSpace() on new pages, so
- * we treat them as being free space for our purposes.
+ * It's not safe to call PageGetHeapFreeSpace() on new pages, so we
+ * treat them as being free space for our purposes.
*/
if (!PageIsNew(page))
stat->free_space += PageGetHeapFreeSpace(page);
@@ -120,9 +120,9 @@ statapprox_heap(Relation rel, output_type *stat)
scanned++;
/*
- * Look at each tuple on the page and decide whether it's live
- * or dead, then count it and its size. Unlike lazy_scan_heap,
- * we can afford to ignore problems and special cases.
+ * Look at each tuple on the page and decide whether it's live or
+ * dead, then count it and its size. Unlike lazy_scan_heap, we can
+ * afford to ignore problems and special cases.
*/
maxoff = PageGetMaxOffsetNumber(page);
@@ -179,9 +179,10 @@ statapprox_heap(Relation rel, output_type *stat)
UnlockReleaseBuffer(buf);
}
- stat->table_len = (uint64) nblocks * BLCKSZ;
+ stat->table_len = (uint64) nblocks *BLCKSZ;
+
stat->tuple_count = vac_estimate_reltuples(rel, false, nblocks, scanned,
- stat->tuple_count+misc_count);
+ stat->tuple_count + misc_count);
/*
* Calculate percentages if the relation has one or more pages.
@@ -240,9 +241,9 @@ pgstattuple_approx(PG_FUNCTION_ARGS)
errmsg("cannot access temporary tables of other sessions")));
/*
- * We support only ordinary relations and materialised views,
- * because we depend on the visibility map and free space map
- * for our estimates about unscanned pages.
+ * We support only ordinary relations and materialised views, because we
+ * depend on the visibility map and free space map for our estimates about
+ * unscanned pages.
*/
if (!(rel->rd_rel->relkind == RELKIND_RELATION ||
rel->rd_rel->relkind == RELKIND_MATVIEW))
@@ -268,6 +269,6 @@ pgstattuple_approx(PG_FUNCTION_ARGS)
values[i++] = Int64GetDatum(stat.free_space);
values[i++] = Float8GetDatum(stat.free_percent);
- ret = heap_form_tuple(tupdesc, values, nulls);
+ ret = heap_form_tuple(tupdesc, values, nulls);
return HeapTupleGetDatum(ret);
}
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index d420cb2d0c..6da01e1d6f 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -203,7 +203,7 @@ typedef struct PgFdwAnalyzeState
/* for random sampling */
double samplerows; /* # of rows fetched */
double rowstoskip; /* # of rows to skip before next sample */
- ReservoirStateData rstate; /* state for reservoir sampling*/
+ ReservoirStateData rstate; /* state for reservoir sampling */
/* working memory contexts */
MemoryContext anl_cxt; /* context for per-analyze lifespan data */
diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c
index ae2aca8a8d..32d5743018 100644
--- a/contrib/test_decoding/test_decoding.c
+++ b/contrib/test_decoding/test_decoding.c
@@ -53,16 +53,16 @@ static void pg_decode_shutdown(LogicalDecodingContext *ctx);
static void pg_decode_begin_txn(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn);
static void pg_output_begin(LogicalDecodingContext *ctx,
- TestDecodingData *data,
- ReorderBufferTXN *txn,
- bool last_write);
+ TestDecodingData *data,
+ ReorderBufferTXN *txn,
+ bool last_write);
static void pg_decode_commit_txn(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
static void pg_decode_change(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, Relation rel,
ReorderBufferChange *change);
static bool pg_decode_filter(LogicalDecodingContext *ctx,
- RepOriginId origin_id);
+ RepOriginId origin_id);
void
_PG_init(void)
diff --git a/contrib/tsm_system_rows/tsm_system_rows.c b/contrib/tsm_system_rows/tsm_system_rows.c
index 14efb27f0d..e325eaff49 100644
--- a/contrib/tsm_system_rows/tsm_system_rows.c
+++ b/contrib/tsm_system_rows/tsm_system_rows.c
@@ -33,14 +33,14 @@ PG_MODULE_MAGIC;
typedef struct
{
SamplerRandomState randstate;
- uint32 seed; /* random seed */
- BlockNumber nblocks; /* number of block in relation */
- int32 ntuples; /* number of tuples to return */
- int32 donetuples; /* tuples already returned */
- OffsetNumber lt; /* last tuple returned from current block */
- BlockNumber step; /* step size */
- BlockNumber lb; /* last block visited */
- BlockNumber doneblocks; /* number of already returned blocks */
+ uint32 seed; /* random seed */
+ BlockNumber nblocks; /* number of block in relation */
+ int32 ntuples; /* number of tuples to return */
+ int32 donetuples; /* tuples already returned */
+ OffsetNumber lt; /* last tuple returned from current block */
+ BlockNumber step; /* step size */
+ BlockNumber lb; /* last block visited */
+ BlockNumber doneblocks; /* number of already returned blocks */
} SystemSamplerData;
@@ -60,11 +60,11 @@ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
Datum
tsm_system_rows_init(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- uint32 seed = PG_GETARG_UINT32(1);
- int32 ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
- HeapScanDesc scan = tsdesc->heapScan;
- SystemSamplerData *sampler;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ int32 ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
+ HeapScanDesc scan = tsdesc->heapScan;
+ SystemSamplerData *sampler;
if (ntuples < 1)
ereport(ERROR,
@@ -86,6 +86,7 @@ tsm_system_rows_init(PG_FUNCTION_ARGS)
/* Find relative prime as step size for linear probing. */
sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+
/*
* Randomize start position so that blocks close to step size don't have
* higher probability of being chosen on very short scan.
@@ -106,8 +107,8 @@ tsm_system_rows_init(PG_FUNCTION_ARGS)
Datum
tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
sampler->doneblocks++;
@@ -127,10 +128,10 @@ tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
Datum
tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- OffsetNumber maxoffset = PG_GETARG_UINT16(2);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
- OffsetNumber tupoffset = sampler->lt;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
@@ -152,9 +153,9 @@ tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
Datum
tsm_system_rows_examinetuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- bool visible = PG_GETARG_BOOL(3);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ bool visible = PG_GETARG_BOOL(3);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
if (!visible)
PG_RETURN_BOOL(false);
@@ -183,8 +184,8 @@ tsm_system_rows_end(PG_FUNCTION_ARGS)
Datum
tsm_system_rows_reset(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lt = InvalidOffsetNumber;
sampler->donetuples = 0;
@@ -203,14 +204,14 @@ tsm_system_rows_reset(PG_FUNCTION_ARGS)
Datum
tsm_system_rows_cost(PG_FUNCTION_ARGS)
{
- PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
- Path *path = (Path *) PG_GETARG_POINTER(1);
- RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
- List *args = (List *) PG_GETARG_POINTER(3);
- BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
- double *tuples = (double *) PG_GETARG_POINTER(5);
- Node *limitnode;
- int32 ntuples;
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *limitnode;
+ int32 ntuples;
limitnode = linitial(args);
limitnode = estimate_expression_value(root, limitnode);
@@ -235,9 +236,9 @@ tsm_system_rows_cost(PG_FUNCTION_ARGS)
static uint32
-gcd (uint32 a, uint32 b)
+gcd(uint32 a, uint32 b)
{
- uint32 c;
+ uint32 c;
while (a != 0)
{
@@ -253,8 +254,8 @@ static uint32
random_relative_prime(uint32 n, SamplerRandomState randstate)
{
/* Pick random starting number, with some limits on what it can be. */
- uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
- t;
+ uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
+ t;
/*
* This should only take 2 or 3 iterations as the probability of 2 numbers
diff --git a/contrib/tsm_system_time/tsm_system_time.c b/contrib/tsm_system_time/tsm_system_time.c
index 9af9e74921..7708fc0761 100644
--- a/contrib/tsm_system_time/tsm_system_time.c
+++ b/contrib/tsm_system_time/tsm_system_time.c
@@ -35,16 +35,17 @@ PG_MODULE_MAGIC;
typedef struct
{
SamplerRandomState randstate;
- uint32 seed; /* random seed */
- BlockNumber nblocks; /* number of block in relation */
- int32 time; /* time limit for sampling */
- TimestampTz start_time; /* start time of sampling */
- TimestampTz end_time; /* end time of sampling */
- OffsetNumber lt; /* last tuple returned from current block */
- BlockNumber step; /* step size */
- BlockNumber lb; /* last block visited */
- BlockNumber estblocks; /* estimated number of returned blocks (moving) */
- BlockNumber doneblocks; /* number of already returned blocks */
+ uint32 seed; /* random seed */
+ BlockNumber nblocks; /* number of block in relation */
+ int32 time; /* time limit for sampling */
+ TimestampTz start_time; /* start time of sampling */
+ TimestampTz end_time; /* end time of sampling */
+ OffsetNumber lt; /* last tuple returned from current block */
+ BlockNumber step; /* step size */
+ BlockNumber lb; /* last block visited */
+ BlockNumber estblocks; /* estimated number of returned blocks
+ * (moving) */
+ BlockNumber doneblocks; /* number of already returned blocks */
} SystemSamplerData;
@@ -63,11 +64,11 @@ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
Datum
tsm_system_time_init(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- uint32 seed = PG_GETARG_UINT32(1);
- int32 time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
- HeapScanDesc scan = tsdesc->heapScan;
- SystemSamplerData *sampler;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ int32 time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
+ HeapScanDesc scan = tsdesc->heapScan;
+ SystemSamplerData *sampler;
if (time < 1)
ereport(ERROR,
@@ -92,6 +93,7 @@ tsm_system_time_init(PG_FUNCTION_ARGS)
/* Find relative prime as step size for linear probing. */
sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+
/*
* Randomize start position so that blocks close to step size don't have
* higher probability of being chosen on very short scan.
@@ -111,8 +113,8 @@ tsm_system_time_init(PG_FUNCTION_ARGS)
Datum
tsm_system_time_nextblock(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
sampler->doneblocks++;
@@ -125,16 +127,16 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
* Update the estimations for time limit at least 10 times per estimated
* number of returned blocks to handle variations in block read speed.
*/
- if (sampler->doneblocks % Max(sampler->estblocks/10, 1) == 0)
+ if (sampler->doneblocks % Max(sampler->estblocks / 10, 1) == 0)
{
- TimestampTz now = GetCurrentTimestamp();
- long secs;
- int usecs;
+ TimestampTz now = GetCurrentTimestamp();
+ long secs;
+ int usecs;
int usecs_remaining;
int time_per_block;
TimestampDifference(sampler->start_time, now, &secs, &usecs);
- usecs += (int) secs * 1000000;
+ usecs += (int) secs *1000000;
time_per_block = usecs / sampler->doneblocks;
@@ -144,7 +146,7 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
PG_RETURN_UINT32(InvalidBlockNumber);
/* Remaining microseconds */
- usecs_remaining = usecs + (int) secs * 1000000;
+ usecs_remaining = usecs + (int) secs *1000000;
/* Recalculate estimated returned number of blocks */
if (time_per_block < usecs_remaining && time_per_block > 0)
@@ -161,10 +163,10 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
Datum
tsm_system_time_nexttuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- OffsetNumber maxoffset = PG_GETARG_UINT16(2);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
- OffsetNumber tupoffset = sampler->lt;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
@@ -198,8 +200,8 @@ tsm_system_time_end(PG_FUNCTION_ARGS)
Datum
tsm_system_time_reset(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lt = InvalidOffsetNumber;
sampler->start_time = GetCurrentTimestamp();
@@ -221,18 +223,18 @@ tsm_system_time_reset(PG_FUNCTION_ARGS)
Datum
tsm_system_time_cost(PG_FUNCTION_ARGS)
{
- PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
- Path *path = (Path *) PG_GETARG_POINTER(1);
- RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
- List *args = (List *) PG_GETARG_POINTER(3);
- BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
- double *tuples = (double *) PG_GETARG_POINTER(5);
- Node *limitnode;
- int32 time;
- BlockNumber relpages;
- double reltuples;
- double density;
- double spc_random_page_cost;
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *limitnode;
+ int32 time;
+ BlockNumber relpages;
+ double reltuples;
+ double density;
+ double spc_random_page_cost;
limitnode = linitial(args);
limitnode = estimate_expression_value(root, limitnode);
@@ -269,10 +271,10 @@ tsm_system_time_cost(PG_FUNCTION_ARGS)
/*
* Assumption here is that we'll never read less than 1% of table pages,
* this is here mainly because it is much less bad to overestimate than
- * underestimate and using just spc_random_page_cost will probably lead
- * to underestimations in general.
+ * underestimate and using just spc_random_page_cost will probably lead to
+ * underestimations in general.
*/
- *pages = Min(baserel->pages, Max(time/spc_random_page_cost, baserel->pages/100));
+ *pages = Min(baserel->pages, Max(time / spc_random_page_cost, baserel->pages / 100));
*tuples = rint(density * (double) *pages * path->rows / baserel->tuples);
path->rows = *tuples;
@@ -280,9 +282,9 @@ tsm_system_time_cost(PG_FUNCTION_ARGS)
}
static uint32
-gcd (uint32 a, uint32 b)
+gcd(uint32 a, uint32 b)
{
- uint32 c;
+ uint32 c;
while (a != 0)
{
@@ -298,8 +300,8 @@ static uint32
random_relative_prime(uint32 n, SamplerRandomState randstate)
{
/* Pick random starting number, with some limits on what it can be. */
- uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
- t;
+ uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
+ t;
/*
* This should only take 2 or 3 iterations as the probability of 2 numbers
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index 199512551e..ff18b220c2 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -387,7 +387,7 @@ bringetbitmap(PG_FUNCTION_ARGS)
*/
Assert((key->sk_flags & SK_ISNULL) ||
(key->sk_collation ==
- bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
+ bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
/* First time this column? look up consistent function */
if (consistentFn[keyattno - 1].fn_oid == InvalidOid)
@@ -523,10 +523,10 @@ brinbuildCallback(Relation index,
thisblock = ItemPointerGetBlockNumber(&htup->t_self);
/*
- * If we're in a block that belongs to a future range, summarize what we've
- * got and start afresh. Note the scan might have skipped many pages,
- * if they were devoid of live tuples; make sure to insert index tuples
- * for those too.
+ * If we're in a block that belongs to a future range, summarize what
+ * we've got and start afresh. Note the scan might have skipped many
+ * pages, if they were devoid of live tuples; make sure to insert index
+ * tuples for those too.
*/
while (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1)
{
@@ -660,7 +660,6 @@ brinbuild(PG_FUNCTION_ARGS)
Datum
brinbuildempty(PG_FUNCTION_ARGS)
{
-
Relation index = (Relation) PG_GETARG_POINTER(0);
Buffer metabuf;
@@ -696,7 +695,7 @@ brinbulkdelete(PG_FUNCTION_ARGS)
{
/* other arguments are not currently used */
IndexBulkDeleteResult *stats =
- (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+ (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
/* allocate stats if first time through, else re-use existing struct */
if (stats == NULL)
@@ -714,7 +713,7 @@ brinvacuumcleanup(PG_FUNCTION_ARGS)
{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
IndexBulkDeleteResult *stats =
- (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+ (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
Relation heapRel;
/* No-op in ANALYZE ONLY mode */
@@ -900,7 +899,7 @@ terminate_brin_buildstate(BrinBuildState *state)
page = BufferGetPage(state->bs_currentInsertBuf);
RecordPageWithFreeSpace(state->bs_irel,
- BufferGetBlockNumber(state->bs_currentInsertBuf),
+ BufferGetBlockNumber(state->bs_currentInsertBuf),
PageGetFreeSpace(page));
ReleaseBuffer(state->bs_currentInsertBuf);
}
diff --git a/src/backend/access/brin/brin_inclusion.c b/src/backend/access/brin/brin_inclusion.c
index 1f0bc7fdb1..803b07f10a 100644
--- a/src/backend/access/brin/brin_inclusion.c
+++ b/src/backend/access/brin/brin_inclusion.c
@@ -61,11 +61,11 @@
* 0 - the union of the values in the block range
* 1 - whether an empty value is present in any tuple in the block range
* 2 - whether the values in the block range cannot be merged (e.g. an IPv6
- * address amidst IPv4 addresses).
+ * address amidst IPv4 addresses).
*/
-#define INCLUSION_UNION 0
-#define INCLUSION_UNMERGEABLE 1
-#define INCLUSION_CONTAINS_EMPTY 2
+#define INCLUSION_UNION 0
+#define INCLUSION_UNMERGEABLE 1
+#define INCLUSION_CONTAINS_EMPTY 2
typedef struct InclusionOpaque
@@ -294,22 +294,22 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
unionval = column->bv_values[INCLUSION_UNION];
switch (key->sk_strategy)
{
- /*
- * Placement strategies
- *
- * These are implemented by logically negating the result of the
- * converse placement operator; for this to work, the converse operator
- * must be part of the opclass. An error will be thrown by
- * inclusion_get_strategy_procinfo() if the required strategy is not
- * part of the opclass.
- *
- * These all return false if either argument is empty, so there is
- * no need to check for empty elements.
- */
+ /*
+ * Placement strategies
+ *
+ * These are implemented by logically negating the result of the
+ * converse placement operator; for this to work, the converse
+ * operator must be part of the opclass. An error will be thrown
+ * by inclusion_get_strategy_procinfo() if the required strategy
+ * is not part of the opclass.
+ *
+ * These all return false if either argument is empty, so there is
+ * no need to check for empty elements.
+ */
case RTLeftStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
- RTOverRightStrategyNumber);
+ RTOverRightStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
@@ -333,7 +333,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
case RTBelowStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
- RTOverAboveStrategyNumber);
+ RTOverAboveStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
@@ -351,7 +351,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
case RTAboveStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
- RTOverBelowStrategyNumber);
+ RTOverBelowStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
@@ -381,8 +381,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
* strategies because some elements can be contained even though
* the union is not; instead we use the overlap operator.
*
- * We check for empty elements separately as they are not merged to
- * the union but contained by everything.
+ * We check for empty elements separately as they are not merged
+ * to the union but contained by everything.
*/
case RTContainedByStrategyNumber:
@@ -400,8 +400,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
/*
* Adjacent strategy
*
- * We test for overlap first but to be safe we need to call
- * the actual adjacent operator also.
+ * We test for overlap first but to be safe we need to call the
+ * actual adjacent operator also.
*
* An empty element cannot be adjacent to any other, so there is
* no need to check for it.
@@ -426,8 +426,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
* the contains operator. Generally, inequality strategies do not
* make much sense for the types which will be used with the
* inclusion BRIN family of opclasses, but is is possible to
- * implement them with logical negation of the left-of and right-of
- * operators.
+ * implement them with logical negation of the left-of and
+ * right-of operators.
*
* NB: These strategies cannot be used with geometric datatypes
* that use comparison of areas! The only exception is the "same"
diff --git a/src/backend/access/brin/brin_minmax.c b/src/backend/access/brin/brin_minmax.c
index b105f980ec..7cd98887c0 100644
--- a/src/backend/access/brin/brin_minmax.c
+++ b/src/backend/access/brin/brin_minmax.c
@@ -33,7 +33,7 @@ Datum brin_minmax_add_value(PG_FUNCTION_ARGS);
Datum brin_minmax_consistent(PG_FUNCTION_ARGS);
Datum brin_minmax_union(PG_FUNCTION_ARGS);
static FmgrInfo *minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno,
- Oid subtype, uint16 strategynum);
+ Oid subtype, uint16 strategynum);
Datum
@@ -209,7 +209,7 @@ brin_minmax_consistent(PG_FUNCTION_ARGS)
break;
/* max() >= scankey */
finfo = minmax_get_strategy_procinfo(bdesc, attno, subtype,
- BTGreaterEqualStrategyNumber);
+ BTGreaterEqualStrategyNumber);
matches = FunctionCall2Coll(finfo, colloid, column->bv_values[1],
value);
break;
@@ -260,10 +260,10 @@ brin_minmax_union(PG_FUNCTION_ARGS)
attr = bdesc->bd_tupdesc->attrs[attno - 1];
/*
- * Adjust "allnulls". If A doesn't have values, just copy the values
- * from B into A, and we're done. We cannot run the operators in this
- * case, because values in A might contain garbage. Note we already
- * established that B contains values.
+ * Adjust "allnulls". If A doesn't have values, just copy the values from
+ * B into A, and we're done. We cannot run the operators in this case,
+ * because values in A might contain garbage. Note we already established
+ * that B contains values.
*/
if (col_a->bv_allnulls)
{
@@ -355,7 +355,7 @@ minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno, Oid subtype,
strategynum, attr->atttypid, subtype, opfamily);
oprid = DatumGetObjectId(SysCacheGetAttr(AMOPSTRATEGY, tuple,
- Anum_pg_amop_amopopr, &isNull));
+ Anum_pg_amop_amopopr, &isNull));
ReleaseSysCache(tuple);
Assert(!isNull && RegProcedureIsValid(oprid));
diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c
index 80795eca65..62d440f76b 100644
--- a/src/backend/access/brin/brin_revmap.c
+++ b/src/backend/access/brin/brin_revmap.c
@@ -48,7 +48,7 @@ struct BrinRevmap
{
Relation rm_irel;
BlockNumber rm_pagesPerRange;
- BlockNumber rm_lastRevmapPage; /* cached from the metapage */
+ BlockNumber rm_lastRevmapPage; /* cached from the metapage */
Buffer rm_metaBuf;
Buffer rm_currBuf;
};
@@ -57,7 +57,7 @@ struct BrinRevmap
static BlockNumber revmap_get_blkno(BrinRevmap *revmap,
- BlockNumber heapBlk);
+ BlockNumber heapBlk);
static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk);
static BlockNumber revmap_extend_and_get_blkno(BrinRevmap *revmap,
BlockNumber heapBlk);
@@ -110,7 +110,7 @@ brinRevmapTerminate(BrinRevmap *revmap)
void
brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
{
- BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
+ BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
mapBlk = revmap_extend_and_get_blkno(revmap, heapBlk);
@@ -245,7 +245,7 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk,
if (ItemPointerIsValid(&previptr) && ItemPointerEquals(&previptr, iptr))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg_internal("corrupted BRIN index: inconsistent range map")));
+ errmsg_internal("corrupted BRIN index: inconsistent range map")));
previptr = *iptr;
blk = ItemPointerGetBlockNumber(iptr);
@@ -356,7 +356,7 @@ revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk)
static BlockNumber
revmap_extend_and_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
{
- BlockNumber targetblk;
+ BlockNumber targetblk;
/* obtain revmap block number, skip 1 for metapage block */
targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
@@ -445,10 +445,10 @@ revmap_physical_extend(BrinRevmap *revmap)
if (!PageIsNew(page) && !BRIN_IS_REGULAR_PAGE(page))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
- BrinPageType(page),
- RelationGetRelationName(irel),
- BufferGetBlockNumber(buf))));
+ errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
+ BrinPageType(page),
+ RelationGetRelationName(irel),
+ BufferGetBlockNumber(buf))));
/* If the page is in use, evacuate it and restart */
if (brin_start_evacuating_page(irel, buf))
diff --git a/src/backend/access/brin/brin_tuple.c b/src/backend/access/brin/brin_tuple.c
index 22ce74a4f4..72356c066c 100644
--- a/src/backend/access/brin/brin_tuple.c
+++ b/src/backend/access/brin/brin_tuple.c
@@ -68,7 +68,7 @@ brtuple_disk_tupdesc(BrinDesc *brdesc)
{
for (j = 0; j < brdesc->bd_info[i]->oi_nstored; j++)
TupleDescInitEntry(tupdesc, attno++, NULL,
- brdesc->bd_info[i]->oi_typcache[j]->type_id,
+ brdesc->bd_info[i]->oi_typcache[j]->type_id,
-1, 0);
}
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 3e2b8b5fed..54b2db88a6 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -1785,7 +1785,8 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* Set up the scan keys, and check for unsatisfiable query.
*/
- ginFreeScanKeys(so); /* there should be no keys yet, but just to be sure */
+ ginFreeScanKeys(so); /* there should be no keys yet, but just to be
+ * sure */
ginNewScanKey(scan);
if (GinIsVoidRes(scan))
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 445466b447..cb4e32fe66 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -527,7 +527,7 @@ ginoptions(PG_FUNCTION_ARGS)
static const relopt_parse_elt tab[] = {
{"fastupdate", RELOPT_TYPE_BOOL, offsetof(GinOptions, useFastUpdate)},
{"gin_pending_list_limit", RELOPT_TYPE_INT, offsetof(GinOptions,
- pendingListCleanupSize)}
+ pendingListCleanupSize)}
};
options = parseRelOptions(reloptions, validate, RELOPT_KIND_GIN,
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 96b7701633..0e499598a4 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -1407,7 +1407,7 @@ initGISTstate(Relation index)
/* opclasses are not required to provide a Fetch method */
if (OidIsValid(index_getprocid(index, i + 1, GIST_FETCH_PROC)))
fmgr_info_copy(&(giststate->fetchFn[i]),
- index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
+ index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
scanCxt);
else
giststate->fetchFn[i].fn_oid = InvalidOid;
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index beb402357c..ad39294875 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -154,8 +154,8 @@ gistrescan(PG_FUNCTION_ARGS)
}
/*
- * If we're doing an index-only scan, on the first call, also initialize
- * a tuple descriptor to represent the returned index tuples and create a
+ * If we're doing an index-only scan, on the first call, also initialize a
+ * tuple descriptor to represent the returned index tuples and create a
* memory context to hold them during the scan.
*/
if (scan->xs_want_itup && !scan->xs_itupdesc)
@@ -169,7 +169,7 @@ gistrescan(PG_FUNCTION_ARGS)
* descriptor. Instead, construct a descriptor with the original data
* types.
*/
- natts = RelationGetNumberOfAttributes(scan->indexRelation);
+ natts = RelationGetNumberOfAttributes(scan->indexRelation);
so->giststate->fetchTupdesc = CreateTemplateTupleDesc(natts, false);
for (attno = 1; attno <= natts; attno++)
{
@@ -288,9 +288,9 @@ gistrescan(PG_FUNCTION_ARGS)
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
/*
- * Look up the datatype returned by the original ordering operator.
- * GiST always uses a float8 for the distance function, but the
- * ordering operator could be anything else.
+ * Look up the datatype returned by the original ordering
+ * operator. GiST always uses a float8 for the distance function,
+ * but the ordering operator could be anything else.
*
* XXX: The distance function is only allowed to be lossy if the
* ordering operator's result type is float4 or float8. Otherwise
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index bf9fbf30a8..7d596a3e2e 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -583,7 +583,7 @@ gistFormTuple(GISTSTATE *giststate, Relation r,
isleaf);
cep = (GISTENTRY *)
DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[i],
- giststate->supportCollation[i],
+ giststate->supportCollation[i],
PointerGetDatum(¢ry)));
compatt[i] = cep->key;
}
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index cb86a4fa3e..caacc105d2 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -80,7 +80,7 @@ bool synchronize_seqscans = true;
static HeapScanDesc heap_beginscan_internal(Relation relation,
Snapshot snapshot,
int nkeys, ScanKey key,
- bool allow_strat, bool allow_sync, bool allow_pagemode,
+ bool allow_strat, bool allow_sync, bool allow_pagemode,
bool is_bitmapscan, bool is_samplescan,
bool temp_snap);
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
@@ -1366,8 +1366,8 @@ heap_beginscan_sampling(Relation relation, Snapshot snapshot,
static HeapScanDesc
heap_beginscan_internal(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key,
- bool allow_strat, bool allow_sync, bool allow_pagemode,
- bool is_bitmapscan, bool is_samplescan, bool temp_snap)
+ bool allow_strat, bool allow_sync, bool allow_pagemode,
+ bool is_bitmapscan, bool is_samplescan, bool temp_snap)
{
HeapScanDesc scan;
@@ -2284,9 +2284,9 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
{
/*
* For now, parallel operations are required to be strictly read-only.
- * Unlike heap_update() and heap_delete(), an insert should never create
- * a combo CID, so it might be possible to relax this restriction, but
- * not without more thought and testing.
+ * Unlike heap_update() and heap_delete(), an insert should never create a
+ * combo CID, so it might be possible to relax this restriction, but not
+ * without more thought and testing.
*/
if (IsInParallelMode())
ereport(ERROR,
@@ -2768,8 +2768,8 @@ l1:
infomask = tp.t_data->t_infomask;
/*
- * Sleep until concurrent transaction ends -- except when there's a single
- * locker and it's our own transaction. Note we don't care
+ * Sleep until concurrent transaction ends -- except when there's a
+ * single locker and it's our own transaction. Note we don't care
* which lock mode the locker has, because we need the strongest one.
*
* Before sleeping, we need to acquire tuple lock to establish our
@@ -2822,8 +2822,8 @@ l1:
else if (!TransactionIdIsCurrentTransactionId(xwait))
{
/*
- * Wait for regular transaction to end; but first, acquire
- * tuple lock.
+ * Wait for regular transaction to end; but first, acquire tuple
+ * lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
@@ -3336,8 +3336,8 @@ l2:
*
* Before sleeping, we need to acquire tuple lock to establish our
* priority for the tuple (see heap_lock_tuple). LockTuple will
- * release us when we are next-in-line for the tuple. Note we must not
- * acquire the tuple lock until we're sure we're going to sleep;
+ * release us when we are next-in-line for the tuple. Note we must
+ * not acquire the tuple lock until we're sure we're going to sleep;
* otherwise we're open for race conditions with other transactions
* holding the tuple lock which sleep on us.
*
@@ -3374,8 +3374,8 @@ l2:
*/
if (xmax_infomask_changed(oldtup.t_data->t_infomask,
infomask) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
goto l2;
}
@@ -3425,9 +3425,9 @@ l2:
else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
{
/*
- * If it's just a key-share locker, and we're not changing the
- * key columns, we don't need to wait for it to end; but we
- * need to preserve it as locker.
+ * If it's just a key-share locker, and we're not changing the key
+ * columns, we don't need to wait for it to end; but we need to
+ * preserve it as locker.
*/
checked_lockers = true;
locker_remains = true;
@@ -3436,8 +3436,8 @@ l2:
else
{
/*
- * Wait for regular transaction to end; but first, acquire
- * tuple lock.
+ * Wait for regular transaction to end; but first, acquire tuple
+ * lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
@@ -3454,7 +3454,7 @@ l2:
*/
if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
!TransactionIdEquals(xwait,
- HeapTupleHeaderGetRawXmax(oldtup.t_data)))
+ HeapTupleHeaderGetRawXmax(oldtup.t_data)))
goto l2;
/* Otherwise check if it committed or aborted */
@@ -3779,7 +3779,7 @@ l2:
HeapTupleClearHeapOnly(newtup);
}
- RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
+ RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
if (!already_marked)
{
@@ -4477,7 +4477,7 @@ l3:
if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
TransactionIdIsCurrentTransactionId(xwait))
{
- /* ... but if the xmax changed in the meantime, start over */
+ /* ... but if the xmax changed in the meantime, start over */
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
@@ -4501,8 +4501,8 @@ l3:
* for the tuple. We must do this even if we are share-locking.
*
* If we are forced to "start over" below, we keep the tuple lock;
- * this arranges that we stay at the head of the line while rechecking
- * tuple state.
+ * this arranges that we stay at the head of the line while
+ * rechecking tuple state.
*/
if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
&have_tuple_lock))
@@ -4530,11 +4530,11 @@ l3:
{
case LockWaitBlock:
MultiXactIdWait((MultiXactId) xwait, status, infomask,
- relation, &tuple->t_self, XLTW_Lock, NULL);
+ relation, &tuple->t_self, XLTW_Lock, NULL);
break;
case LockWaitSkip:
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
- status, infomask, relation,
+ status, infomask, relation,
NULL))
{
result = HeapTupleWouldBlock;
@@ -4545,12 +4545,12 @@ l3:
break;
case LockWaitError:
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
- status, infomask, relation,
+ status, infomask, relation,
NULL))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ RelationGetRelationName(relation))));
break;
}
@@ -4588,7 +4588,7 @@ l3:
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ RelationGetRelationName(relation))));
break;
}
}
@@ -4613,9 +4613,9 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * xwait is done, but if xwait had just locked the tuple then
- * some other xact could update this tuple before we get to
- * this point. Check for xmax change, and start over if so.
+ * xwait is done, but if xwait had just locked the tuple then some
+ * other xact could update this tuple before we get to this point.
+ * Check for xmax change, and start over if so.
*/
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
@@ -4628,9 +4628,9 @@ l3:
* Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that would have been handled above. So
- * that transaction must necessarily be gone by now. But don't
- * check for this in the multixact case, because some locker
- * transactions might still be running.
+ * that transaction must necessarily be gone by now. But
+ * don't check for this in the multixact case, because some
+ * locker transactions might still be running.
*/
UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
}
@@ -4810,8 +4810,8 @@ heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
if (!ConditionalLockTupleTuplock(relation, tid, mode))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
break;
}
*have_tuple_lock = true;
@@ -5513,8 +5513,8 @@ heap_finish_speculative(Relation relation, HeapTuple tuple)
MarkBufferDirty(buffer);
/*
- * Replace the speculative insertion token with a real t_ctid,
- * pointing to itself like it does on regular tuples.
+ * Replace the speculative insertion token with a real t_ctid, pointing to
+ * itself like it does on regular tuples.
*/
htup->t_ctid = tuple->t_self;
@@ -6447,23 +6447,23 @@ static bool
DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
LockTupleMode lockmode)
{
- bool allow_old;
- int nmembers;
+ bool allow_old;
+ int nmembers;
MultiXactMember *members;
- bool result = false;
- LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
+ bool result = false;
+ LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
allow_old = !(infomask & HEAP_LOCK_MASK) && HEAP_XMAX_IS_LOCKED_ONLY(infomask);
nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
HEAP_XMAX_IS_LOCKED_ONLY(infomask));
if (nmembers >= 0)
{
- int i;
+ int i;
for (i = 0; i < nmembers; i++)
{
- TransactionId memxid;
- LOCKMODE memlockmode;
+ TransactionId memxid;
+ LOCKMODE memlockmode;
memlockmode = LOCKMODE_from_mxstatus(members[i].status);
@@ -7093,7 +7093,7 @@ log_heap_update(Relation reln, Buffer oldbuf,
{
XLogRegisterBufData(0,
((char *) newtup->t_data) + SizeofHeapTupleHeader,
- newtup->t_len - SizeofHeapTupleHeader - suffixlen);
+ newtup->t_len - SizeofHeapTupleHeader - suffixlen);
}
else
{
@@ -7105,8 +7105,8 @@ log_heap_update(Relation reln, Buffer oldbuf,
if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
{
XLogRegisterBufData(0,
- ((char *) newtup->t_data) + SizeofHeapTupleHeader,
- newtup->t_data->t_hoff - SizeofHeapTupleHeader);
+ ((char *) newtup->t_data) + SizeofHeapTupleHeader,
+ newtup->t_data->t_hoff - SizeofHeapTupleHeader);
}
/* data after common prefix */
@@ -7289,8 +7289,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *
{
/*
* The OID column can appear in an index definition, but that's
- * OK, because we always copy the OID if present (see below). Other
- * system columns may not.
+ * OK, because we always copy the OID if present (see below).
+ * Other system columns may not.
*/
if (attno == ObjectIdAttributeNumber)
continue;
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index a9f0ca35e4..6db73bf9d0 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -60,9 +60,9 @@ RelationPutHeapTuple(Relation relation,
ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
/*
- * Insert the correct position into CTID of the stored tuple, too
- * (unless this is a speculative insertion, in which case the token is
- * held in CTID field instead)
+ * Insert the correct position into CTID of the stored tuple, too (unless
+ * this is a speculative insertion, in which case the token is held in
+ * CTID field instead)
*/
if (!token)
{
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index e6e4d28b74..1043362f91 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -185,11 +185,11 @@ BuildIndexValueDescription(Relation indexRelation,
* Check permissions- if the user does not have access to view all of the
* key columns then return NULL to avoid leaking data.
*
- * First check if RLS is enabled for the relation. If so, return NULL
- * to avoid leaking data.
+ * First check if RLS is enabled for the relation. If so, return NULL to
+ * avoid leaking data.
*
- * Next we need to check table-level SELECT access and then, if
- * there is no access there, check column-level permissions.
+ * Next we need to check table-level SELECT access and then, if there is
+ * no access there, check column-level permissions.
*/
/*
@@ -215,18 +215,18 @@ BuildIndexValueDescription(Relation indexRelation,
if (aclresult != ACLCHECK_OK)
{
/*
- * No table-level access, so step through the columns in the
- * index and make sure the user has SELECT rights on all of them.
+ * No table-level access, so step through the columns in the index and
+ * make sure the user has SELECT rights on all of them.
*/
for (keyno = 0; keyno < idxrec->indnatts; keyno++)
{
AttrNumber attnum = idxrec->indkey.values[keyno];
/*
- * Note that if attnum == InvalidAttrNumber, then this is an
- * index based on an expression and we return no detail rather
- * than try to figure out what column(s) the expression includes
- * and if the user has SELECT rights on them.
+ * Note that if attnum == InvalidAttrNumber, then this is an index
+ * based on an expression and we return no detail rather than try
+ * to figure out what column(s) the expression includes and if the
+ * user has SELECT rights on them.
*/
if (attnum == InvalidAttrNumber ||
pg_attribute_aclcheck(indrelid, attnum, GetUserId(),
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 4a60c5fa2c..77c2fdf90b 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -160,8 +160,8 @@ top:
*/
if (checkUnique != UNIQUE_CHECK_NO)
{
- TransactionId xwait;
- uint32 speculativeToken;
+ TransactionId xwait;
+ uint32 speculativeToken;
offset = _bt_binsrch(rel, buf, natts, itup_scankey, false);
xwait = _bt_check_unique(rel, itup, heapRel, buf, offset, itup_scankey,
@@ -171,9 +171,10 @@ top:
{
/* Have to wait for the other guy ... */
_bt_relbuf(rel, buf);
+
/*
- * If it's a speculative insertion, wait for it to finish (ie.
- * to go ahead with the insertion, or kill the tuple). Otherwise
+ * If it's a speculative insertion, wait for it to finish (ie. to
+ * go ahead with the insertion, or kill the tuple). Otherwise
* wait for the transaction to finish as usual.
*/
if (speculativeToken)
@@ -417,8 +418,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
(errcode(ERRCODE_UNIQUE_VIOLATION),
errmsg("duplicate key value violates unique constraint \"%s\"",
RelationGetRelationName(rel)),
- key_desc ? errdetail("Key %s already exists.",
- key_desc) : 0,
+ key_desc ? errdetail("Key %s already exists.",
+ key_desc) : 0,
errtableconstraint(heapRel,
RelationGetRelationName(rel))));
}
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 0f4128253f..6e65db91eb 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -1233,6 +1233,7 @@ _bt_pagedel(Relation rel, Buffer buf)
lbuf = _bt_getbuf(rel, leftsib, BT_READ);
lpage = BufferGetPage(lbuf);
lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
+
/*
* If the left sibling is split again by another backend,
* after we released the lock, we know that the first
@@ -1345,11 +1346,11 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
leafrightsib = opaque->btpo_next;
/*
- * Before attempting to lock the parent page, check that the right
- * sibling is not in half-dead state. A half-dead right sibling would
- * have no downlink in the parent, which would be highly confusing later
- * when we delete the downlink that follows the current page's downlink.
- * (I believe the deletion would work correctly, but it would fail the
+ * Before attempting to lock the parent page, check that the right sibling
+ * is not in half-dead state. A half-dead right sibling would have no
+ * downlink in the parent, which would be highly confusing later when we
+ * delete the downlink that follows the current page's downlink. (I
+ * believe the deletion would work correctly, but it would fail the
* cross-check we make that the following downlink points to the right
* sibling of the delete page.)
*/
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index c2d52faa96..9431ab5d04 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -40,9 +40,8 @@ typedef struct
BTSpool *spool;
/*
- * spool2 is needed only when the index is a unique index. Dead tuples
- * are put into spool2 instead of spool in order to avoid uniqueness
- * check.
+ * spool2 is needed only when the index is a unique index. Dead tuples are
+ * put into spool2 instead of spool in order to avoid uniqueness check.
*/
BTSpool *spool2;
double indtuples;
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index cfb1d64f86..d69a0577a8 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -1027,10 +1027,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
offnum = OffsetNumberPrev(offnum);
/*
- * By here the scan position is now set for the first key. If all
- * further tuples are expected to match we set the SK_BT_MATCHED flag
- * to avoid re-checking the scan key later. This is a big win for
- * slow key matches though is still significant even for fast datatypes.
+ * By here the scan position is now set for the first key. If all further
+ * tuples are expected to match we set the SK_BT_MATCHED flag to avoid
+ * re-checking the scan key later. This is a big win for slow key matches
+ * though is still significant even for fast datatypes.
*/
switch (startKeys[0]->sk_strategy)
{
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 625f490af8..f95f67ad4b 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -742,7 +742,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
{
for (i = 1; i <= keysz; i++)
{
- SortSupport entry;
+ SortSupport entry;
Datum attrDatum1,
attrDatum2;
bool isNull1,
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index d1589f05ef..91331bad65 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -1430,8 +1430,8 @@ _bt_checkkeys(IndexScanDesc scan,
Datum test;
/*
- * If the scan key has already matched we can skip this key, as
- * long as the index tuple does not contain NULL values.
+ * If the scan key has already matched we can skip this key, as long
+ * as the index tuple does not contain NULL values.
*/
if (key->sk_flags & SK_BT_MATCHED && !IndexTupleHasNulls(tuple))
continue;
@@ -1740,7 +1740,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* any items from the page, and so there is no need to search left from the
* recorded offset. (This observation also guarantees that the item is still
* the right one to delete, which might otherwise be questionable since heap
- * TIDs can get recycled.) This holds true even if the page has been modified
+ * TIDs can get recycled.) This holds true even if the page has been modified
* by inserts and page splits, so there is no need to consult the LSN.
*
* If the pin was released after reading the page, then we re-read it. If it
diff --git a/src/backend/access/rmgrdesc/committsdesc.c b/src/backend/access/rmgrdesc/committsdesc.c
index 088fd1bc8b..59975eae9a 100644
--- a/src/backend/access/rmgrdesc/committsdesc.c
+++ b/src/backend/access/rmgrdesc/committsdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* committsdesc.c
- * rmgr descriptor routines for access/transam/commit_ts.c
+ * rmgr descriptor routines for access/transam/commit_ts.c
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/committsdesc.c
+ * src/backend/access/rmgrdesc/committsdesc.c
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
else if (info == COMMIT_TS_SETTS)
{
xl_commit_ts_set *xlrec = (xl_commit_ts_set *) rec;
- int nsubxids;
+ int nsubxids;
appendStringInfo(buf, "set %s/%d for: %u",
timestamptz_to_str(xlrec->timestamp),
@@ -51,7 +51,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
sizeof(TransactionId));
if (nsubxids > 0)
{
- int i;
+ int i;
TransactionId *subxids;
subxids = palloc(sizeof(TransactionId) * nsubxids);
diff --git a/src/backend/access/rmgrdesc/replorigindesc.c b/src/backend/access/rmgrdesc/replorigindesc.c
index 19bae9a0f8..60cf0f679d 100644
--- a/src/backend/access/rmgrdesc/replorigindesc.c
+++ b/src/backend/access/rmgrdesc/replorigindesc.c
@@ -1,13 +1,13 @@
/*-------------------------------------------------------------------------
*
* replorigindesc.c
- * rmgr descriptor routines for replication/logical/replication_origin.c
+ * rmgr descriptor routines for replication/logical/replication_origin.c
*
* Portions Copyright (c) 2015, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/replorigindesc.c
+ * src/backend/access/rmgrdesc/replorigindesc.c
*
*-------------------------------------------------------------------------
*/
@@ -26,6 +26,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
case XLOG_REPLORIGIN_SET:
{
xl_replorigin_set *xlrec;
+
xlrec = (xl_replorigin_set *) rec;
appendStringInfo(buf, "set %u; lsn %X/%X; force: %d",
@@ -38,6 +39,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
case XLOG_REPLORIGIN_DROP:
{
xl_replorigin_drop *xlrec;
+
xlrec = (xl_replorigin_drop *) rec;
appendStringInfo(buf, "drop %u", xlrec->node_id);
diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c
index 793f9bb51f..7b5f983050 100644
--- a/src/backend/access/rmgrdesc/xactdesc.c
+++ b/src/backend/access/rmgrdesc/xactdesc.c
@@ -37,7 +37,8 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
memset(parsed, 0, sizeof(*parsed));
- parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
+ parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
+ * present */
parsed->xact_time = xlrec->xact_time;
@@ -62,7 +63,7 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
{
- xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
+ xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
parsed->nsubxacts = xl_subxacts->nsubxacts;
parsed->subxacts = xl_subxacts->subxacts;
@@ -123,7 +124,8 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
memset(parsed, 0, sizeof(*parsed));
- parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
+ parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
+ * present */
parsed->xact_time = xlrec->xact_time;
@@ -138,7 +140,7 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
{
- xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
+ xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
parsed->nsubxacts = xl_subxacts->nsubxacts;
parsed->subxacts = xl_subxacts->subxacts;
@@ -236,8 +238,8 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
{
appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
origin_id,
- (uint32)(parsed.origin_lsn >> 32),
- (uint32)parsed.origin_lsn,
+ (uint32) (parsed.origin_lsn >> 32),
+ (uint32) parsed.origin_lsn,
timestamptz_to_str(parsed.origin_timestamp));
}
}
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index 06c6944fc7..8a0d9098c5 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -658,6 +658,7 @@ Datum
spgcanreturn(PG_FUNCTION_ARGS)
{
Relation index = (Relation) PG_GETARG_POINTER(0);
+
/* int i = PG_GETARG_INT32(1); */
SpGistCache *cache;
diff --git a/src/backend/access/tablesample/bernoulli.c b/src/backend/access/tablesample/bernoulli.c
index c91f3f593e..563a9168f0 100644
--- a/src/backend/access/tablesample/bernoulli.c
+++ b/src/backend/access/tablesample/bernoulli.c
@@ -27,13 +27,15 @@
/* tsdesc */
typedef struct
{
- uint32 seed; /* random seed */
- BlockNumber startblock; /* starting block, we use ths for syncscan support */
+ uint32 seed; /* random seed */
+ BlockNumber startblock; /* starting block, we use ths for syncscan
+ * support */
BlockNumber nblocks; /* number of blocks */
BlockNumber blockno; /* current block */
- float4 probability; /* probabilty that tuple will be returned (0.0-1.0) */
+ float4 probability; /* probabilty that tuple will be returned
+ * (0.0-1.0) */
OffsetNumber lt; /* last tuple returned from current block */
- SamplerRandomState randstate; /* random generator tsdesc */
+ SamplerRandomState randstate; /* random generator tsdesc */
} BernoulliSamplerData;
/*
@@ -42,10 +44,10 @@ typedef struct
Datum
tsm_bernoulli_init(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- uint32 seed = PG_GETARG_UINT32(1);
- float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
- HeapScanDesc scan = tsdesc->heapScan;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
+ HeapScanDesc scan = tsdesc->heapScan;
BernoulliSamplerData *sampler;
if (percent < 0 || percent > 100)
@@ -77,14 +79,13 @@ tsm_bernoulli_init(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- BernoulliSamplerData *sampler =
- (BernoulliSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ BernoulliSamplerData *sampler =
+ (BernoulliSamplerData *) tsdesc->tsmdata;
/*
- * Bernoulli sampling scans all blocks on the table and supports
- * syncscan so loop from startblock to startblock instead of
- * from 0 to nblocks.
+ * Bernoulli sampling scans all blocks on the table and supports syncscan
+ * so loop from startblock to startblock instead of from 0 to nblocks.
*/
if (sampler->blockno == InvalidBlockNumber)
sampler->blockno = sampler->startblock;
@@ -116,7 +117,7 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
* tuples have same probability of being returned the visible and invisible
* tuples will be returned in same ratio as they have in the actual table.
* This means that there is no skew towards either visible or invisible tuples
- * and the number returned visible tuples to from the executor node is the
+ * and the number returned visible tuples to from the executor node is the
* fraction of visible tuples which was specified in input.
*
* This is faster than doing the coinflip in the examinetuple because we don't
@@ -128,12 +129,12 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- OffsetNumber maxoffset = PG_GETARG_UINT16(2);
- BernoulliSamplerData *sampler =
- (BernoulliSamplerData *) tsdesc->tsmdata;
- OffsetNumber tupoffset = sampler->lt;
- float4 probability = sampler->probability;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ BernoulliSamplerData *sampler =
+ (BernoulliSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
+ float4 probability = sampler->probability;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
@@ -142,8 +143,8 @@ tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
/*
* Loop over tuple offsets until the random generator returns value that
- * is within the probability of returning the tuple or until we reach
- * end of the block.
+ * is within the probability of returning the tuple or until we reach end
+ * of the block.
*
* (This is our implementation of bernoulli trial)
*/
@@ -183,9 +184,9 @@ tsm_bernoulli_end(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_reset(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- BernoulliSamplerData *sampler =
- (BernoulliSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ BernoulliSamplerData *sampler =
+ (BernoulliSamplerData *) tsdesc->tsmdata;
sampler->blockno = InvalidBlockNumber;
sampler->lt = InvalidOffsetNumber;
@@ -200,14 +201,14 @@ tsm_bernoulli_reset(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_cost(PG_FUNCTION_ARGS)
{
- PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
- Path *path = (Path *) PG_GETARG_POINTER(1);
- RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
- List *args = (List *) PG_GETARG_POINTER(3);
- BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
- double *tuples = (double *) PG_GETARG_POINTER(5);
- Node *pctnode;
- float4 samplesize;
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *pctnode;
+ float4 samplesize;
*pages = baserel->pages;
diff --git a/src/backend/access/tablesample/system.c b/src/backend/access/tablesample/system.c
index 1412e511fa..1d834369a4 100644
--- a/src/backend/access/tablesample/system.c
+++ b/src/backend/access/tablesample/system.c
@@ -31,9 +31,9 @@
typedef struct
{
BlockSamplerData bs;
- uint32 seed; /* random seed */
+ uint32 seed; /* random seed */
BlockNumber nblocks; /* number of block in relation */
- int samplesize; /* number of blocks to return */
+ int samplesize; /* number of blocks to return */
OffsetNumber lt; /* last tuple returned from current block */
} SystemSamplerData;
@@ -44,11 +44,11 @@ typedef struct
Datum
tsm_system_init(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- uint32 seed = PG_GETARG_UINT32(1);
- float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
- HeapScanDesc scan = tsdesc->heapScan;
- SystemSamplerData *sampler;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
+ HeapScanDesc scan = tsdesc->heapScan;
+ SystemSamplerData *sampler;
if (percent < 0 || percent > 100)
ereport(ERROR,
@@ -80,9 +80,9 @@ tsm_system_init(PG_FUNCTION_ARGS)
Datum
tsm_system_nextblock(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
- BlockNumber blockno;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ BlockNumber blockno;
if (!BlockSampler_HasMore(&sampler->bs))
PG_RETURN_UINT32(InvalidBlockNumber);
@@ -99,10 +99,10 @@ tsm_system_nextblock(PG_FUNCTION_ARGS)
Datum
tsm_system_nexttuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- OffsetNumber maxoffset = PG_GETARG_UINT16(2);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
- OffsetNumber tupoffset = sampler->lt;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
@@ -136,8 +136,8 @@ tsm_system_end(PG_FUNCTION_ARGS)
Datum
tsm_system_reset(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lt = InvalidOffsetNumber;
BlockSampler_Init(&sampler->bs, sampler->nblocks, sampler->samplesize,
@@ -152,14 +152,14 @@ tsm_system_reset(PG_FUNCTION_ARGS)
Datum
tsm_system_cost(PG_FUNCTION_ARGS)
{
- PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
- Path *path = (Path *) PG_GETARG_POINTER(1);
- RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
- List *args = (List *) PG_GETARG_POINTER(3);
- BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
- double *tuples = (double *) PG_GETARG_POINTER(5);
- Node *pctnode;
- float4 samplesize;
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *pctnode;
+ float4 samplesize;
pctnode = linitial(args);
pctnode = estimate_expression_value(root, pctnode);
diff --git a/src/backend/access/tablesample/tablesample.c b/src/backend/access/tablesample/tablesample.c
index ef55d062e7..3398d02f85 100644
--- a/src/backend/access/tablesample/tablesample.c
+++ b/src/backend/access/tablesample/tablesample.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* tablesample.c
- * TABLESAMPLE internal API
+ * TABLESAMPLE internal API
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/tablesample/tablesample.c
+ * src/backend/access/tablesample/tablesample.c
*
* TABLESAMPLE is the SQL standard clause for sampling the relations.
*
@@ -53,7 +53,7 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
List *args = tablesample->args;
ListCell *arg;
ExprContext *econtext = scanstate->ss.ps.ps_ExprContext;
- TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
+ TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
/* Load functions */
fmgr_info(tablesample->tsminit, &(tsdesc->tsminit));
@@ -78,21 +78,21 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
fcinfo.argnull[0] = false;
/*
- * Second arg for init function is always REPEATABLE
- * When tablesample->repeatable is NULL then REPEATABLE clause was not
- * specified.
- * When specified, the expression cannot evaluate to NULL.
+ * Second arg for init function is always REPEATABLE When
+ * tablesample->repeatable is NULL then REPEATABLE clause was not
+ * specified. When specified, the expression cannot evaluate to NULL.
*/
if (tablesample->repeatable)
{
ExprState *argstate = ExecInitExpr((Expr *) tablesample->repeatable,
(PlanState *) scanstate);
+
fcinfo.arg[1] = ExecEvalExpr(argstate, econtext,
&fcinfo.argnull[1], NULL);
if (fcinfo.argnull[1])
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("REPEATABLE clause must be NOT NULL numeric value")));
+ errmsg("REPEATABLE clause must be NOT NULL numeric value")));
}
else
{
@@ -130,15 +130,15 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
HeapTuple
tablesample_getnext(TableSampleDesc *desc)
{
- HeapScanDesc scan = desc->heapScan;
- HeapTuple tuple = &(scan->rs_ctup);
- bool pagemode = scan->rs_pageatatime;
- BlockNumber blockno;
- Page page;
- bool page_all_visible;
- ItemId itemid;
- OffsetNumber tupoffset,
- maxoffset;
+ HeapScanDesc scan = desc->heapScan;
+ HeapTuple tuple = &(scan->rs_ctup);
+ bool pagemode = scan->rs_pageatatime;
+ BlockNumber blockno;
+ Page page;
+ bool page_all_visible;
+ ItemId itemid;
+ OffsetNumber tupoffset,
+ maxoffset;
if (!scan->rs_inited)
{
@@ -152,7 +152,7 @@ tablesample_getnext(TableSampleDesc *desc)
return NULL;
}
blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
- PointerGetDatum(desc)));
+ PointerGetDatum(desc)));
if (!BlockNumberIsValid(blockno))
{
tuple->t_data = NULL;
@@ -184,14 +184,14 @@ tablesample_getnext(TableSampleDesc *desc)
CHECK_FOR_INTERRUPTS();
tupoffset = DatumGetUInt16(FunctionCall3(&desc->tsmnexttuple,
- PointerGetDatum(desc),
- UInt32GetDatum(blockno),
- UInt16GetDatum(maxoffset)));
+ PointerGetDatum(desc),
+ UInt32GetDatum(blockno),
+ UInt16GetDatum(maxoffset)));
if (OffsetNumberIsValid(tupoffset))
{
- bool visible;
- bool found;
+ bool visible;
+ bool found;
/* Skip invalid tuple pointers. */
itemid = PageGetItemId(page, tupoffset);
@@ -208,8 +208,8 @@ tablesample_getnext(TableSampleDesc *desc)
visible = SampleTupleVisible(tuple, tupoffset, scan);
/*
- * Let the sampling method examine the actual tuple and decide if we
- * should return it.
+ * Let the sampling method examine the actual tuple and decide if
+ * we should return it.
*
* Note that we let it examine even invisible tuples for
* statistical purposes, but not return them since user should
@@ -218,10 +218,10 @@ tablesample_getnext(TableSampleDesc *desc)
if (OidIsValid(desc->tsmexaminetuple.fn_oid))
{
found = DatumGetBool(FunctionCall4(&desc->tsmexaminetuple,
- PointerGetDatum(desc),
- UInt32GetDatum(blockno),
- PointerGetDatum(tuple),
- BoolGetDatum(visible)));
+ PointerGetDatum(desc),
+ UInt32GetDatum(blockno),
+ PointerGetDatum(tuple),
+ BoolGetDatum(visible)));
/* Should not happen if sampling method is well written. */
if (found && !visible)
elog(ERROR, "Sampling method wanted to return invisible tuple");
@@ -248,19 +248,19 @@ tablesample_getnext(TableSampleDesc *desc)
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
- PointerGetDatum(desc)));
+ PointerGetDatum(desc)));
/*
- * Report our new scan position for synchronization purposes. We
- * don't do that when moving backwards, however. That would just
- * mess up any other forward-moving scanners.
+ * Report our new scan position for synchronization purposes. We don't
+ * do that when moving backwards, however. That would just mess up any
+ * other forward-moving scanners.
*
- * Note: we do this before checking for end of scan so that the
- * final state of the position hint is back at the start of the
- * rel. That's not strictly necessary, but otherwise when you run
- * the same query multiple times the starting position would shift
- * a little bit backwards on every invocation, which is confusing.
- * We don't guarantee any specific ordering in general, though.
+ * Note: we do this before checking for end of scan so that the final
+ * state of the position hint is back at the start of the rel. That's
+ * not strictly necessary, but otherwise when you run the same query
+ * multiple times the starting position would shift a little bit
+ * backwards on every invocation, which is confusing. We don't
+ * guarantee any specific ordering in general, though.
*/
if (scan->rs_syncscan)
ss_report_location(scan->rs_rd, BlockNumberIsValid(blockno) ?
@@ -321,25 +321,25 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
{
/*
* If this scan is reading whole pages at a time, there is already
- * visibility info present in rs_vistuples so we can just search it
- * for the tupoffset.
+ * visibility info present in rs_vistuples so we can just search it for
+ * the tupoffset.
*/
if (scan->rs_pageatatime)
{
- int start = 0,
- end = scan->rs_ntuples - 1;
+ int start = 0,
+ end = scan->rs_ntuples - 1;
/*
* Do the binary search over rs_vistuples, it's already sorted by
* OffsetNumber so we don't need to do any sorting ourselves here.
*
- * We could use bsearch() here but it's slower for integers because
- * of the function call overhead and because it needs boiler plate code
+ * We could use bsearch() here but it's slower for integers because of
+ * the function call overhead and because it needs boiler plate code
* it would not save us anything code-wise anyway.
*/
while (start <= end)
{
- int mid = start + (end - start) / 2;
+ int mid = start + (end - start) / 2;
OffsetNumber curoffset = scan->rs_vistuples[mid];
if (curoffset == tupoffset)
@@ -358,7 +358,7 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
Snapshot snapshot = scan->rs_snapshot;
Buffer buffer = scan->rs_cbuf;
- bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
+ bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
CheckForSerializableConflictOut(visible, scan->rs_rd, tuple, buffer,
snapshot);
diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c
index 63344327e3..5ad35c0d7f 100644
--- a/src/backend/access/transam/commit_ts.c
+++ b/src/backend/access/transam/commit_ts.c
@@ -55,8 +55,8 @@
*/
typedef struct CommitTimestampEntry
{
- TimestampTz time;
- RepOriginId nodeid;
+ TimestampTz time;
+ RepOriginId nodeid;
} CommitTimestampEntry;
#define SizeOfCommitTimestampEntry (offsetof(CommitTimestampEntry, nodeid) + \
@@ -65,7 +65,7 @@ typedef struct CommitTimestampEntry
#define COMMIT_TS_XACTS_PER_PAGE \
(BLCKSZ / SizeOfCommitTimestampEntry)
-#define TransactionIdToCTsPage(xid) \
+#define TransactionIdToCTsPage(xid) \
((xid) / (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
#define TransactionIdToCTsEntry(xid) \
((xid) % (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
@@ -83,21 +83,21 @@ static SlruCtlData CommitTsCtlData;
*/
typedef struct CommitTimestampShared
{
- TransactionId xidLastCommit;
+ TransactionId xidLastCommit;
CommitTimestampEntry dataLastCommit;
} CommitTimestampShared;
-CommitTimestampShared *commitTsShared;
+CommitTimestampShared *commitTsShared;
/* GUC variable */
-bool track_commit_timestamp;
+bool track_commit_timestamp;
static void SetXidCommitTsInPage(TransactionId xid, int nsubxids,
TransactionId *subxids, TimestampTz ts,
RepOriginId nodeid, int pageno);
static void TransactionIdSetCommitTs(TransactionId xid, TimestampTz ts,
- RepOriginId nodeid, int slotno);
+ RepOriginId nodeid, int slotno);
static int ZeroCommitTsPage(int pageno, bool writeXlog);
static bool CommitTsPagePrecedes(int page1, int page2);
static void WriteZeroPageXlogRec(int pageno);
@@ -141,8 +141,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
return;
/*
- * Comply with the WAL-before-data rule: if caller specified it wants
- * this value to be recorded in WAL, do so before touching the data.
+ * Comply with the WAL-before-data rule: if caller specified it wants this
+ * value to be recorded in WAL, do so before touching the data.
*/
if (do_xlog)
WriteSetTimestampXlogRec(xid, nsubxids, subxids, timestamp, nodeid);
@@ -159,9 +159,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
/*
* We split the xids to set the timestamp to in groups belonging to the
* same SLRU page; the first element in each such set is its head. The
- * first group has the main XID as the head; subsequent sets use the
- * first subxid not on the previous page as head. This way, we only have
- * to lock/modify each SLRU page once.
+ * first group has the main XID as the head; subsequent sets use the first
+ * subxid not on the previous page as head. This way, we only have to
+ * lock/modify each SLRU page once.
*/
for (i = 0, headxid = xid;;)
{
@@ -183,8 +183,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
break;
/*
- * Set the new head and skip over it, as well as over the subxids
- * we just wrote.
+ * Set the new head and skip over it, as well as over the subxids we
+ * just wrote.
*/
headxid = subxids[j];
i += j - i + 1;
@@ -271,14 +271,14 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not get commit timestamp data"),
- errhint("Make sure the configuration parameter \"%s\" is set.",
- "track_commit_timestamp")));
+ errhint("Make sure the configuration parameter \"%s\" is set.",
+ "track_commit_timestamp")));
/* error if the given Xid doesn't normally commit */
if (!TransactionIdIsNormal(xid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
+ errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
/*
* Return empty if the requested value is outside our valid range.
@@ -350,15 +350,15 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
TransactionId
GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
{
- TransactionId xid;
+ TransactionId xid;
/* Error if module not enabled */
if (!track_commit_timestamp)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not get commit timestamp data"),
- errhint("Make sure the configuration parameter \"%s\" is set.",
- "track_commit_timestamp")));
+ errhint("Make sure the configuration parameter \"%s\" is set.",
+ "track_commit_timestamp")));
LWLockAcquire(CommitTsLock, LW_SHARED);
xid = commitTsShared->xidLastCommit;
@@ -377,9 +377,9 @@ GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
Datum
pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
{
- TransactionId xid = PG_GETARG_UINT32(0);
- TimestampTz ts;
- bool found;
+ TransactionId xid = PG_GETARG_UINT32(0);
+ TimestampTz ts;
+ bool found;
found = TransactionIdGetCommitTsData(xid, &ts, NULL);
@@ -393,11 +393,11 @@ pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
Datum
pg_last_committed_xact(PG_FUNCTION_ARGS)
{
- TransactionId xid;
- TimestampTz ts;
- Datum values[2];
- bool nulls[2];
- TupleDesc tupdesc;
+ TransactionId xid;
+ TimestampTz ts;
+ Datum values[2];
+ bool nulls[2];
+ TupleDesc tupdesc;
HeapTuple htup;
/* and construct a tuple with our data */
@@ -462,7 +462,7 @@ CommitTsShmemSize(void)
void
CommitTsShmemInit(void)
{
- bool found;
+ bool found;
CommitTsCtl->PagePrecedes = CommitTsPagePrecedes;
SimpleLruInit(CommitTsCtl, "CommitTs Ctl", CommitTsShmemBuffers(), 0,
@@ -495,8 +495,8 @@ BootStrapCommitTs(void)
{
/*
* Nothing to do here at present, unlike most other SLRU modules; segments
- * are created when the server is started with this module enabled.
- * See StartupCommitTs.
+ * are created when the server is started with this module enabled. See
+ * StartupCommitTs.
*/
}
@@ -561,9 +561,9 @@ CompleteCommitTsInitialization(void)
/*
* Activate this module whenever necessary.
- * This must happen during postmaster or standalong-backend startup,
- * or during WAL replay anytime the track_commit_timestamp setting is
- * changed in the master.
+ * This must happen during postmaster or standalong-backend startup,
+ * or during WAL replay anytime the track_commit_timestamp setting is
+ * changed in the master.
*
* The reason why this SLRU needs separate activation/deactivation functions is
* that it can be enabled/disabled during start and the activation/deactivation
@@ -612,7 +612,7 @@ ActivateCommitTs(void)
/* Finally, create the current segment file, if necessary */
if (!SimpleLruDoesPhysicalPageExist(CommitTsCtl, pageno))
{
- int slotno;
+ int slotno;
LWLockAcquire(CommitTsControlLock, LW_EXCLUSIVE);
slotno = ZeroCommitTsPage(pageno, false);
@@ -834,7 +834,7 @@ WriteSetTimestampXlogRec(TransactionId mainxid, int nsubxids,
TransactionId *subxids, TimestampTz timestamp,
RepOriginId nodeid)
{
- xl_commit_ts_set record;
+ xl_commit_ts_set record;
record.timestamp = timestamp;
record.nodeid = nodeid;
@@ -907,7 +907,7 @@ commit_ts_redo(XLogReaderState *record)
subxids = NULL;
TransactionTreeSetCommitTsData(setts->mainxid, nsubxids, subxids,
- setts->timestamp, setts->nodeid, false);
+ setts->timestamp, setts->nodeid, false);
if (subxids)
pfree(subxids);
}
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 0218378ccb..9568ff1ddb 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -965,7 +965,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
*/
if (!MultiXactIdPrecedes(result, MultiXactState->multiVacLimit) ||
(MultiXactState->nextOffset - MultiXactState->oldestOffset
- > MULTIXACT_MEMBER_SAFE_THRESHOLD))
+ > MULTIXACT_MEMBER_SAFE_THRESHOLD))
{
/*
* For safety's sake, we release MultiXactGenLock while sending
@@ -1190,9 +1190,9 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
MultiXactIdSetOldestVisible();
/*
- * If we know the multi is used only for locking and not for updates,
- * then we can skip checking if the value is older than our oldest
- * visible multi. It cannot possibly still be running.
+ * If we know the multi is used only for locking and not for updates, then
+ * we can skip checking if the value is older than our oldest visible
+ * multi. It cannot possibly still be running.
*/
if (onlyLock &&
MultiXactIdPrecedes(multi, OldestVisibleMXactId[MyBackendId]))
@@ -1207,14 +1207,14 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
*
* An ID older than MultiXactState->oldestMultiXactId cannot possibly be
* useful; it has already been removed, or will be removed shortly, by
- * truncation. Returning the wrong values could lead
- * to an incorrect visibility result. However, to support pg_upgrade we
- * need to allow an empty set to be returned regardless, if the caller is
- * willing to accept it; the caller is expected to check that it's an
- * allowed condition (such as ensuring that the infomask bits set on the
- * tuple are consistent with the pg_upgrade scenario). If the caller is
- * expecting this to be called only on recently created multis, then we
- * raise an error.
+ * truncation. Returning the wrong values could lead to an incorrect
+ * visibility result. However, to support pg_upgrade we need to allow an
+ * empty set to be returned regardless, if the caller is willing to accept
+ * it; the caller is expected to check that it's an allowed condition
+ * (such as ensuring that the infomask bits set on the tuple are
+ * consistent with the pg_upgrade scenario). If the caller is expecting
+ * this to be called only on recently created multis, then we raise an
+ * error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
* seen, it implies undetected ID wraparound has occurred. This raises a
@@ -2123,11 +2123,11 @@ MultiXactSetNextMXact(MultiXactId nextMulti,
* enough to contain the next value that would be created.
*
* We need to do this pretty early during the first startup in binary
- * upgrade mode: before StartupMultiXact() in fact, because this routine is
- * called even before that by StartupXLOG(). And we can't do it earlier
- * than at this point, because during that first call of this routine we
- * determine the MultiXactState->nextMXact value that MaybeExtendOffsetSlru
- * needs.
+ * upgrade mode: before StartupMultiXact() in fact, because this routine
+ * is called even before that by StartupXLOG(). And we can't do it
+ * earlier than at this point, because during that first call of this
+ * routine we determine the MultiXactState->nextMXact value that
+ * MaybeExtendOffsetSlru needs.
*/
if (IsBinaryUpgrade)
MaybeExtendOffsetSlru();
@@ -2202,11 +2202,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
/*
* Determine the offset of the oldest multixact that might still be
- * referenced. Normally, we can read the offset from the multixact itself,
- * but there's an important special case: if there are no multixacts in
- * existence at all, oldest_datminmxid obviously can't point to one. It
- * will instead point to the multixact ID that will be assigned the next
- * time one is needed.
+ * referenced. Normally, we can read the offset from the multixact
+ * itself, but there's an important special case: if there are no
+ * multixacts in existence at all, oldest_datminmxid obviously can't point
+ * to one. It will instead point to the multixact ID that will be
+ * assigned the next time one is needed.
*
* NB: oldest_dataminmxid is the oldest multixact that might still be
* referenced from a table, unlike in DetermineSafeOldestOffset, where we
@@ -2520,10 +2520,9 @@ DetermineSafeOldestOffset(MultiXactId oldestMXact)
* obviously can't point to one. It will instead point to the multixact
* ID that will be assigned the next time one is needed.
*
- * NB: oldestMXact should be the oldest multixact that still exists in
- * the SLRU, unlike in SetMultiXactIdLimit, where we do this same
- * computation based on the oldest value that might be referenced in a
- * table.
+ * NB: oldestMXact should be the oldest multixact that still exists in the
+ * SLRU, unlike in SetMultiXactIdLimit, where we do this same computation
+ * based on the oldest value that might be referenced in a table.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
if (MultiXactState->nextMXact == oldestMXact)
@@ -2679,9 +2678,9 @@ int
MultiXactMemberFreezeThreshold(void)
{
MultiXactOffset members;
- uint32 multixacts;
- uint32 victim_multixacts;
- double fraction;
+ uint32 multixacts;
+ uint32 victim_multixacts;
+ double fraction;
ReadMultiXactCounts(&multixacts, &members);
@@ -2800,7 +2799,7 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
void
TruncateMultiXact(void)
{
- MultiXactId oldestMXact;
+ MultiXactId oldestMXact;
MultiXactOffset oldestOffset;
MultiXactOffset nextOffset;
mxtruncinfo trunc;
diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c
index 8d6a360679..f4ba8518b1 100644
--- a/src/backend/access/transam/parallel.c
+++ b/src/backend/access/transam/parallel.c
@@ -39,7 +39,7 @@
* without blocking. That way, a worker that errors out can write the whole
* message into the queue and terminate without waiting for the user backend.
*/
-#define PARALLEL_ERROR_QUEUE_SIZE 16384
+#define PARALLEL_ERROR_QUEUE_SIZE 16384
/* Magic number for parallel context TOC. */
#define PARALLEL_MAGIC 0x50477c7c
@@ -71,7 +71,7 @@ typedef struct FixedParallelState
BackendId parallel_master_backend_id;
/* Entrypoint for parallel workers. */
- parallel_worker_main_type entrypoint;
+ parallel_worker_main_type entrypoint;
/* Mutex protects remaining fields. */
slock_t mutex;
@@ -90,10 +90,10 @@ typedef struct FixedParallelState
* and < the number of workers before any user code is invoked; each parallel
* worker will get a different parallel worker number.
*/
-int ParallelWorkerNumber = -1;
+int ParallelWorkerNumber = -1;
/* Is there a parallel message pending which we need to receive? */
-bool ParallelMessagePending = false;
+bool ParallelMessagePending = false;
/* Pointer to our fixed parallel state. */
static FixedParallelState *MyFixedParallelState;
@@ -115,8 +115,8 @@ static void ParallelWorkerMain(Datum main_arg);
ParallelContext *
CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
{
- MemoryContext oldcontext;
- ParallelContext *pcxt;
+ MemoryContext oldcontext;
+ ParallelContext *pcxt;
/* It is unsafe to create a parallel context if not in parallel mode. */
Assert(IsInParallelMode());
@@ -159,7 +159,7 @@ CreateParallelContextForExternalFunction(char *library_name,
char *function_name,
int nworkers)
{
- MemoryContext oldcontext;
+ MemoryContext oldcontext;
ParallelContext *pcxt;
/* We might be running in a very short-lived memory context. */
@@ -184,15 +184,15 @@ CreateParallelContextForExternalFunction(char *library_name,
void
InitializeParallelDSM(ParallelContext *pcxt)
{
- MemoryContext oldcontext;
- Size library_len = 0;
- Size guc_len = 0;
- Size combocidlen = 0;
- Size tsnaplen = 0;
- Size asnaplen = 0;
- Size tstatelen = 0;
- Size segsize = 0;
- int i;
+ MemoryContext oldcontext;
+ Size library_len = 0;
+ Size guc_len = 0;
+ Size combocidlen = 0;
+ Size tsnaplen = 0;
+ Size asnaplen = 0;
+ Size tstatelen = 0;
+ Size segsize = 0;
+ int i;
FixedParallelState *fps;
Snapshot transaction_snapshot = GetTransactionSnapshot();
Snapshot active_snapshot = GetActiveSnapshot();
@@ -205,8 +205,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
shm_toc_estimate_keys(&pcxt->estimator, 1);
/*
- * Normally, the user will have requested at least one worker process,
- * but if by chance they have not, we can skip a bunch of things here.
+ * Normally, the user will have requested at least one worker process, but
+ * if by chance they have not, we can skip a bunch of things here.
*/
if (pcxt->nworkers > 0)
{
@@ -228,8 +228,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
/* Estimate space need for error queues. */
StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
- PARALLEL_ERROR_QUEUE_SIZE,
- "parallel error queue size not buffer-aligned");
+ PARALLEL_ERROR_QUEUE_SIZE,
+ "parallel error queue size not buffer-aligned");
shm_toc_estimate_chunk(&pcxt->estimator,
PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
shm_toc_estimate_keys(&pcxt->estimator, 1);
@@ -251,9 +251,9 @@ InitializeParallelDSM(ParallelContext *pcxt)
* memory segment; instead, just use backend-private memory.
*
* Also, if we can't create a dynamic shared memory segment because the
- * maximum number of segments have already been created, then fall back
- * to backend-private memory, and plan not to use any workers. We hope
- * this won't happen very often, but it's better to abandon the use of
+ * maximum number of segments have already been created, then fall back to
+ * backend-private memory, and plan not to use any workers. We hope this
+ * won't happen very often, but it's better to abandon the use of
* parallelism than to fail outright.
*/
segsize = shm_toc_estimate(&pcxt->estimator);
@@ -290,13 +290,13 @@ InitializeParallelDSM(ParallelContext *pcxt)
/* We can skip the rest of this if we're not budgeting for any workers. */
if (pcxt->nworkers > 0)
{
- char *libraryspace;
- char *gucspace;
- char *combocidspace;
- char *tsnapspace;
- char *asnapspace;
- char *tstatespace;
- char *error_queue_space;
+ char *libraryspace;
+ char *gucspace;
+ char *combocidspace;
+ char *tsnapspace;
+ char *asnapspace;
+ char *tstatespace;
+ char *error_queue_space;
/* Serialize shared libraries we have loaded. */
libraryspace = shm_toc_allocate(pcxt->toc, library_len);
@@ -338,12 +338,12 @@ InitializeParallelDSM(ParallelContext *pcxt)
* should be transmitted via separate (possibly larger?) queues.
*/
error_queue_space =
- shm_toc_allocate(pcxt->toc,
- PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
+ shm_toc_allocate(pcxt->toc,
+ PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
for (i = 0; i < pcxt->nworkers; ++i)
{
- char *start;
- shm_mq *mq;
+ char *start;
+ shm_mq *mq;
start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
@@ -355,8 +355,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
/* Serialize extension entrypoint information. */
if (pcxt->library_name != NULL)
{
- Size lnamelen = strlen(pcxt->library_name);
- char *extensionstate;
+ Size lnamelen = strlen(pcxt->library_name);
+ char *extensionstate;
extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
+ strlen(pcxt->function_name) + 2);
@@ -377,10 +377,10 @@ InitializeParallelDSM(ParallelContext *pcxt)
void
LaunchParallelWorkers(ParallelContext *pcxt)
{
- MemoryContext oldcontext;
- BackgroundWorker worker;
- int i;
- bool any_registrations_failed = false;
+ MemoryContext oldcontext;
+ BackgroundWorker worker;
+ int i;
+ bool any_registrations_failed = false;
/* Skip this if we have no workers. */
if (pcxt->nworkers == 0)
@@ -408,8 +408,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
*
* The caller must be able to tolerate ending up with fewer workers than
* expected, so there is no need to throw an error here if registration
- * fails. It wouldn't help much anyway, because registering the worker
- * in no way guarantees that it will start up and initialize successfully.
+ * fails. It wouldn't help much anyway, because registering the worker in
+ * no way guarantees that it will start up and initialize successfully.
*/
for (i = 0; i < pcxt->nworkers; ++i)
{
@@ -421,8 +421,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
else
{
/*
- * If we weren't able to register the worker, then we've bumped
- * up against the max_worker_processes limit, and future
+ * If we weren't able to register the worker, then we've bumped up
+ * against the max_worker_processes limit, and future
* registrations will probably fail too, so arrange to skip them.
* But we still have to execute this code for the remaining slots
* to make sure that we forget about the error queues we budgeted
@@ -455,13 +455,13 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
{
for (;;)
{
- bool anyone_alive = false;
- int i;
+ bool anyone_alive = false;
+ int i;
/*
- * This will process any parallel messages that are pending, which
- * may change the outcome of the loop that follows. It may also
- * throw an error propagated from a worker.
+ * This will process any parallel messages that are pending, which may
+ * change the outcome of the loop that follows. It may also throw an
+ * error propagated from a worker.
*/
CHECK_FOR_INTERRUPTS();
@@ -502,7 +502,7 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
void
DestroyParallelContext(ParallelContext *pcxt)
{
- int i;
+ int i;
/*
* Be careful about order of operations here! We remove the parallel
@@ -548,7 +548,7 @@ DestroyParallelContext(ParallelContext *pcxt)
/* Wait until the workers actually die. */
for (i = 0; i < pcxt->nworkers; ++i)
{
- BgwHandleStatus status;
+ BgwHandleStatus status;
if (pcxt->worker[i].bgwhandle == NULL)
continue;
@@ -626,9 +626,9 @@ HandleParallelMessages(void)
dlist_foreach(iter, &pcxt_list)
{
ParallelContext *pcxt;
- int i;
- Size nbytes;
- void *data;
+ int i;
+ Size nbytes;
+ void *data;
pcxt = dlist_container(ParallelContext, node, iter.cur);
if (pcxt->worker == NULL)
@@ -637,14 +637,14 @@ HandleParallelMessages(void)
for (i = 0; i < pcxt->nworkers; ++i)
{
/*
- * Read as many messages as we can from each worker, but stop
- * when either (1) the error queue goes away, which can happen if
- * we receive a Terminate message from the worker; or (2) no more
+ * Read as many messages as we can from each worker, but stop when
+ * either (1) the error queue goes away, which can happen if we
+ * receive a Terminate message from the worker; or (2) no more
* messages can be read from the worker without blocking.
*/
while (pcxt->worker[i].error_mqh != NULL)
{
- shm_mq_result res;
+ shm_mq_result res;
res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
&data, true);
@@ -652,7 +652,7 @@ HandleParallelMessages(void)
break;
else if (res == SHM_MQ_SUCCESS)
{
- StringInfoData msg;
+ StringInfoData msg;
initStringInfo(&msg);
appendBinaryStringInfo(&msg, data, nbytes);
@@ -661,7 +661,7 @@ HandleParallelMessages(void)
}
else
ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */
+ (errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */
errmsg("lost connection to parallel worker")));
/* This might make the error queue go away. */
@@ -677,23 +677,24 @@ HandleParallelMessages(void)
static void
HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
{
- char msgtype;
+ char msgtype;
msgtype = pq_getmsgbyte(msg);
switch (msgtype)
{
- case 'K': /* BackendKeyData */
+ case 'K': /* BackendKeyData */
{
- int32 pid = pq_getmsgint(msg, 4);
+ int32 pid = pq_getmsgint(msg, 4);
+
(void) pq_getmsgint(msg, 4); /* discard cancel key */
(void) pq_getmsgend(msg);
pcxt->worker[i].pid = pid;
break;
}
- case 'E': /* ErrorResponse */
- case 'N': /* NoticeResponse */
+ case 'E': /* ErrorResponse */
+ case 'N': /* NoticeResponse */
{
ErrorData edata;
ErrorContextCallback errctx;
@@ -725,14 +726,14 @@ HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
break;
}
- case 'A': /* NotifyResponse */
+ case 'A': /* NotifyResponse */
{
/* Propagate NotifyResponse. */
pq_putmessage(msg->data[0], &msg->data[1], msg->len - 1);
break;
}
- case 'X': /* Terminate, indicating clean exit */
+ case 'X': /* Terminate, indicating clean exit */
{
pfree(pcxt->worker[i].bgwhandle);
pfree(pcxt->worker[i].error_mqh);
@@ -797,18 +798,18 @@ static void
ParallelWorkerMain(Datum main_arg)
{
dsm_segment *seg;
- shm_toc *toc;
+ shm_toc *toc;
FixedParallelState *fps;
- char *error_queue_space;
- shm_mq *mq;
+ char *error_queue_space;
+ shm_mq *mq;
shm_mq_handle *mqh;
- char *libraryspace;
- char *gucspace;
- char *combocidspace;
- char *tsnapspace;
- char *asnapspace;
- char *tstatespace;
- StringInfoData msgbuf;
+ char *libraryspace;
+ char *gucspace;
+ char *combocidspace;
+ char *tsnapspace;
+ char *asnapspace;
+ char *tstatespace;
+ StringInfoData msgbuf;
/* Establish signal handlers. */
pqsignal(SIGTERM, die);
@@ -824,8 +825,8 @@ ParallelWorkerMain(Datum main_arg)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Now that we have a resource owner, we can attach to the dynamic
- * shared memory segment and read the table of contents.
+ * Now that we have a resource owner, we can attach to the dynamic shared
+ * memory segment and read the table of contents.
*/
seg = dsm_attach(DatumGetUInt32(main_arg));
if (seg == NULL)
@@ -836,7 +837,7 @@ ParallelWorkerMain(Datum main_arg)
if (toc == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("bad magic number in dynamic shared memory segment")));
+ errmsg("bad magic number in dynamic shared memory segment")));
/* Determine and set our worker number. */
fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
@@ -860,7 +861,7 @@ ParallelWorkerMain(Datum main_arg)
*/
error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
mq = (shm_mq *) (error_queue_space +
- ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
+ ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
shm_mq_set_sender(mq, MyProc);
mqh = shm_mq_attach(mq, seg, NULL);
pq_redirect_to_shm_mq(mq, mqh);
@@ -870,9 +871,9 @@ ParallelWorkerMain(Datum main_arg)
/*
* Send a BackendKeyData message to the process that initiated parallelism
* so that it has access to our PID before it receives any other messages
- * from us. Our cancel key is sent, too, since that's the way the protocol
- * message is defined, but it won't actually be used for anything in this
- * case.
+ * from us. Our cancel key is sent, too, since that's the way the
+ * protocol message is defined, but it won't actually be used for anything
+ * in this case.
*/
pq_beginmessage(&msgbuf, 'K');
pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
@@ -880,13 +881,13 @@ ParallelWorkerMain(Datum main_arg)
pq_endmessage(&msgbuf);
/*
- * Hooray! Primary initialization is complete. Now, we need to set up
- * our backend-local state to match the original backend.
+ * Hooray! Primary initialization is complete. Now, we need to set up our
+ * backend-local state to match the original backend.
*/
/*
- * Load libraries that were loaded by original backend. We want to do this
- * before restoring GUCs, because the libraries might define custom
+ * Load libraries that were loaded by original backend. We want to do
+ * this before restoring GUCs, because the libraries might define custom
* variables.
*/
libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
@@ -928,7 +929,8 @@ ParallelWorkerMain(Datum main_arg)
SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
/*
- * We've initialized all of our state now; nothing should change hereafter.
+ * We've initialized all of our state now; nothing should change
+ * hereafter.
*/
EnterParallelMode();
@@ -965,9 +967,9 @@ ParallelWorkerMain(Datum main_arg)
static void
ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
{
- char *extensionstate;
- char *library_name;
- char *function_name;
+ char *extensionstate;
+ char *library_name;
+ char *function_name;
parallel_worker_main_type entrypt;
extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
@@ -988,7 +990,7 @@ ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
static void
ParallelErrorContext(void *arg)
{
- errcontext("parallel worker, pid %d", * (int32 *) arg);
+ errcontext("parallel worker, pid %d", *(int32 *) arg);
}
/*
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 4743cacefe..177d1e1432 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -117,7 +117,7 @@ typedef struct GlobalTransactionData
TimestampTz prepared_at; /* time of preparation */
XLogRecPtr prepare_lsn; /* XLOG offset of prepare record */
Oid owner; /* ID of user that executed the xact */
- BackendId locking_backend; /* backend currently working on the xact */
+ BackendId locking_backend; /* backend currently working on the xact */
bool valid; /* TRUE if PGPROC entry is in proc array */
char gid[GIDSIZE]; /* The GID assigned to the prepared xact */
} GlobalTransactionData;
@@ -256,24 +256,24 @@ AtAbort_Twophase(void)
return;
/*
- * What to do with the locked global transaction entry? If we were in
- * the process of preparing the transaction, but haven't written the WAL
+ * What to do with the locked global transaction entry? If we were in the
+ * process of preparing the transaction, but haven't written the WAL
* record and state file yet, the transaction must not be considered as
* prepared. Likewise, if we are in the process of finishing an
- * already-prepared transaction, and fail after having already written
- * the 2nd phase commit or rollback record to the WAL, the transaction
- * should not be considered as prepared anymore. In those cases, just
- * remove the entry from shared memory.
+ * already-prepared transaction, and fail after having already written the
+ * 2nd phase commit or rollback record to the WAL, the transaction should
+ * not be considered as prepared anymore. In those cases, just remove the
+ * entry from shared memory.
*
- * Otherwise, the entry must be left in place so that the transaction
- * can be finished later, so just unlock it.
+ * Otherwise, the entry must be left in place so that the transaction can
+ * be finished later, so just unlock it.
*
* If we abort during prepare, after having written the WAL record, we
* might not have transferred all locks and other state to the prepared
* transaction yet. Likewise, if we abort during commit or rollback,
- * after having written the WAL record, we might not have released
- * all the resources held by the transaction yet. In those cases, the
- * in-memory state can be wrong, but it's too late to back out.
+ * after having written the WAL record, we might not have released all the
+ * resources held by the transaction yet. In those cases, the in-memory
+ * state can be wrong, but it's too late to back out.
*/
if (!MyLockedGxact->valid)
{
@@ -408,8 +408,8 @@ MarkAsPreparing(TransactionId xid, const char *gid,
TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts++] = gxact;
/*
- * Remember that we have this GlobalTransaction entry locked for us.
- * If we abort after this, we must release it.
+ * Remember that we have this GlobalTransaction entry locked for us. If we
+ * abort after this, we must release it.
*/
MyLockedGxact = gxact;
@@ -499,8 +499,8 @@ LockGXact(const char *gid, Oid user)
if (gxact->locking_backend != InvalidBackendId)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("prepared transaction with identifier \"%s\" is busy",
- gid)));
+ errmsg("prepared transaction with identifier \"%s\" is busy",
+ gid)));
if (user != gxact->owner && !superuser_arg(user))
ereport(ERROR,
@@ -1423,8 +1423,8 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/*
* In case we fail while running the callbacks, mark the gxact invalid so
- * no one else will try to commit/rollback, and so it will be recycled
- * if we fail after this point. It is still locked by our backend so it
+ * no one else will try to commit/rollback, and so it will be recycled if
+ * we fail after this point. It is still locked by our backend so it
* won't go away yet.
*
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
@@ -2055,8 +2055,9 @@ RecoverPreparedTransactions(void)
StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
/*
- * We're done with recovering this transaction. Clear MyLockedGxact,
- * like we do in PrepareTransaction() during normal operation.
+ * We're done with recovering this transaction. Clear
+ * MyLockedGxact, like we do in PrepareTransaction() during normal
+ * operation.
*/
PostPrepare_Twophase();
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 23401057e2..b53d95faf8 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -102,9 +102,9 @@ int synchronous_commit = SYNCHRONOUS_COMMIT_ON;
* The XIDs are stored sorted in numerical order (not logical order) to make
* lookups as fast as possible.
*/
-TransactionId XactTopTransactionId = InvalidTransactionId;
-int nParallelCurrentXids = 0;
-TransactionId *ParallelCurrentXids;
+TransactionId XactTopTransactionId = InvalidTransactionId;
+int nParallelCurrentXids = 0;
+TransactionId *ParallelCurrentXids;
/*
* MyXactAccessedTempRel is set when a temporary relation is accessed.
@@ -142,7 +142,7 @@ typedef enum TBlockState
/* transaction block states */
TBLOCK_BEGIN, /* starting transaction block */
TBLOCK_INPROGRESS, /* live transaction */
- TBLOCK_PARALLEL_INPROGRESS, /* live transaction inside parallel worker */
+ TBLOCK_PARALLEL_INPROGRESS, /* live transaction inside parallel worker */
TBLOCK_END, /* COMMIT received */
TBLOCK_ABORT, /* failed xact, awaiting ROLLBACK */
TBLOCK_ABORT_END, /* failed xact, ROLLBACK received */
@@ -184,7 +184,7 @@ typedef struct TransactionStateData
bool prevXactReadOnly; /* entry-time xact r/o state */
bool startedInRecovery; /* did we start in recovery? */
bool didLogXid; /* has xid been included in WAL record? */
- int parallelModeLevel; /* Enter/ExitParallelMode counter */
+ int parallelModeLevel; /* Enter/ExitParallelMode counter */
struct TransactionStateData *parent; /* back link to parent */
} TransactionStateData;
@@ -494,8 +494,8 @@ AssignTransactionId(TransactionState s)
Assert(s->state == TRANS_INPROGRESS);
/*
- * Workers synchronize transaction state at the beginning of each
- * parallel operation, so we can't account for new XIDs at this point.
+ * Workers synchronize transaction state at the beginning of each parallel
+ * operation, so we can't account for new XIDs at this point.
*/
if (IsInParallelMode())
elog(ERROR, "cannot assign XIDs during a parallel operation");
@@ -788,10 +788,10 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
return false;
/*
- * In parallel workers, the XIDs we must consider as current are stored
- * in ParallelCurrentXids rather than the transaction-state stack. Note
- * that the XIDs in this array are sorted numerically rather than
- * according to transactionIdPrecedes order.
+ * In parallel workers, the XIDs we must consider as current are stored in
+ * ParallelCurrentXids rather than the transaction-state stack. Note that
+ * the XIDs in this array are sorted numerically rather than according to
+ * transactionIdPrecedes order.
*/
if (nParallelCurrentXids > 0)
{
@@ -1204,7 +1204,7 @@ RecordTransactionCommit(void)
nchildren, children, nrels, rels,
nmsgs, invalMessages,
RelcacheInitFileInval, forceSyncCommit,
- InvalidTransactionId /* plain commit */);
+ InvalidTransactionId /* plain commit */ );
/*
* Record plain commit ts if not replaying remote actions, or if no
@@ -1505,7 +1505,7 @@ RecordTransactionAbort(bool isSubXact)
RelFileNode *rels;
int nchildren;
TransactionId *children;
- TimestampTz xact_time;
+ TimestampTz xact_time;
/*
* If we haven't been assigned an XID, nobody will care whether we aborted
@@ -2316,8 +2316,8 @@ PrepareTransaction(void)
/*
* In normal commit-processing, this is all non-critical post-transaction
- * cleanup. When the transaction is prepared, however, it's important that
- * the locks and other per-backend resources are transferred to the
+ * cleanup. When the transaction is prepared, however, it's important
+ * that the locks and other per-backend resources are transferred to the
* prepared transaction's PGPROC entry. Note that if an error is raised
* here, it's too late to abort the transaction. XXX: This probably should
* be in a critical section, to force a PANIC if any of this fails, but
@@ -2358,9 +2358,8 @@ PrepareTransaction(void)
/*
* Allow another backend to finish the transaction. After
- * PostPrepare_Twophase(), the transaction is completely detached from
- * our backend. The rest is just non-critical cleanup of backend-local
- * state.
+ * PostPrepare_Twophase(), the transaction is completely detached from our
+ * backend. The rest is just non-critical cleanup of backend-local state.
*/
PostPrepare_Twophase();
@@ -2417,7 +2416,7 @@ AbortTransaction(void)
{
TransactionState s = CurrentTransactionState;
TransactionId latestXid;
- bool is_parallel_worker;
+ bool is_parallel_worker;
/* Prevent cancel/die interrupt while cleaning up */
HOLD_INTERRUPTS();
@@ -2520,9 +2519,9 @@ AbortTransaction(void)
latestXid = InvalidTransactionId;
/*
- * Since the parallel master won't get our value of XactLastRecEnd in this
- * case, we nudge WAL-writer ourselves in this case. See related comments in
- * RecordTransactionAbort for why this matters.
+ * Since the parallel master won't get our value of XactLastRecEnd in
+ * this case, we nudge WAL-writer ourselves in this case. See related
+ * comments in RecordTransactionAbort for why this matters.
*/
XLogSetAsyncXactLSN(XactLastRecEnd);
}
@@ -3720,7 +3719,7 @@ DefineSavepoint(char *name)
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot define savepoints during a parallel operation")));
+ errmsg("cannot define savepoints during a parallel operation")));
switch (s->blockState)
{
@@ -3787,7 +3786,7 @@ ReleaseSavepoint(List *options)
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot release savepoints during a parallel operation")));
+ errmsg("cannot release savepoints during a parallel operation")));
switch (s->blockState)
{
@@ -3900,7 +3899,7 @@ RollbackToSavepoint(List *options)
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot rollback to savepoints during a parallel operation")));
+ errmsg("cannot rollback to savepoints during a parallel operation")));
switch (s->blockState)
{
@@ -4017,17 +4016,18 @@ BeginInternalSubTransaction(char *name)
/*
* Workers synchronize transaction state at the beginning of each parallel
- * operation, so we can't account for new subtransactions after that point.
- * We might be able to make an exception for the type of subtransaction
- * established by this function, which is typically used in contexts where
- * we're going to release or roll back the subtransaction before proceeding
- * further, so that no enduring change to the transaction state occurs.
- * For now, however, we prohibit this case along with all the others.
+ * operation, so we can't account for new subtransactions after that
+ * point. We might be able to make an exception for the type of
+ * subtransaction established by this function, which is typically used in
+ * contexts where we're going to release or roll back the subtransaction
+ * before proceeding further, so that no enduring change to the
+ * transaction state occurs. For now, however, we prohibit this case along
+ * with all the others.
*/
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot start subtransactions during a parallel operation")));
+ errmsg("cannot start subtransactions during a parallel operation")));
switch (s->blockState)
{
@@ -4094,7 +4094,7 @@ ReleaseCurrentSubTransaction(void)
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot commit subtransactions during a parallel operation")));
+ errmsg("cannot commit subtransactions during a parallel operation")));
if (s->blockState != TBLOCK_SUBINPROGRESS)
elog(ERROR, "ReleaseCurrentSubTransaction: unexpected state %s",
@@ -4773,7 +4773,8 @@ Size
EstimateTransactionStateSpace(void)
{
TransactionState s;
- Size nxids = 5; /* iso level, deferrable, top & current XID, XID count */
+ Size nxids = 5; /* iso level, deferrable, top & current XID,
+ * XID count */
for (s = CurrentTransactionState; s != NULL; s = s->parent)
{
@@ -4804,8 +4805,8 @@ void
SerializeTransactionState(Size maxsize, char *start_address)
{
TransactionState s;
- Size nxids = 0;
- Size i = 0;
+ Size nxids = 0;
+ Size i = 0;
TransactionId *workspace;
TransactionId *result = (TransactionId *) start_address;
@@ -4830,8 +4831,8 @@ SerializeTransactionState(Size maxsize, char *start_address)
}
/*
- * OK, we need to generate a sorted list of XIDs that our workers
- * should view as current. First, figure out how many there are.
+ * OK, we need to generate a sorted list of XIDs that our workers should
+ * view as current. First, figure out how many there are.
*/
for (s = CurrentTransactionState; s != NULL; s = s->parent)
{
@@ -5060,22 +5061,22 @@ xactGetCommittedChildren(TransactionId **ptr)
*/
XLogRecPtr
XactLogCommitRecord(TimestampTz commit_time,
- int nsubxacts, TransactionId *subxacts,
- int nrels, RelFileNode *rels,
- int nmsgs, SharedInvalidationMessage *msgs,
- bool relcacheInval, bool forceSync,
- TransactionId twophase_xid)
+ int nsubxacts, TransactionId *subxacts,
+ int nrels, RelFileNode *rels,
+ int nmsgs, SharedInvalidationMessage *msgs,
+ bool relcacheInval, bool forceSync,
+ TransactionId twophase_xid)
{
- xl_xact_commit xlrec;
- xl_xact_xinfo xl_xinfo;
- xl_xact_dbinfo xl_dbinfo;
- xl_xact_subxacts xl_subxacts;
+ xl_xact_commit xlrec;
+ xl_xact_xinfo xl_xinfo;
+ xl_xact_dbinfo xl_dbinfo;
+ xl_xact_subxacts xl_subxacts;
xl_xact_relfilenodes xl_relfilenodes;
- xl_xact_invals xl_invals;
- xl_xact_twophase xl_twophase;
- xl_xact_origin xl_origin;
+ xl_xact_invals xl_invals;
+ xl_xact_twophase xl_twophase;
+ xl_xact_origin xl_origin;
- uint8 info;
+ uint8 info;
Assert(CritSectionCount > 0);
@@ -5198,17 +5199,17 @@ XactLogCommitRecord(TimestampTz commit_time,
*/
XLogRecPtr
XactLogAbortRecord(TimestampTz abort_time,
- int nsubxacts, TransactionId *subxacts,
- int nrels, RelFileNode *rels,
- TransactionId twophase_xid)
+ int nsubxacts, TransactionId *subxacts,
+ int nrels, RelFileNode *rels,
+ TransactionId twophase_xid)
{
- xl_xact_abort xlrec;
- xl_xact_xinfo xl_xinfo;
- xl_xact_subxacts xl_subxacts;
+ xl_xact_abort xlrec;
+ xl_xact_xinfo xl_xinfo;
+ xl_xact_subxacts xl_subxacts;
xl_xact_relfilenodes xl_relfilenodes;
- xl_xact_twophase xl_twophase;
+ xl_xact_twophase xl_twophase;
- uint8 info;
+ uint8 info;
Assert(CritSectionCount > 0);
@@ -5289,7 +5290,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
{
TransactionId max_xid;
int i;
- TimestampTz commit_time;
+ TimestampTz commit_time;
max_xid = TransactionIdLatest(xid, parsed->nsubxacts, parsed->subxacts);
@@ -5351,13 +5352,13 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
* recovered. It's unlikely but it's good to be safe.
*/
TransactionIdAsyncCommitTree(
- xid, parsed->nsubxacts, parsed->subxacts, lsn);
+ xid, parsed->nsubxacts, parsed->subxacts, lsn);
/*
* We must mark clog before we update the ProcArray.
*/
ExpireTreeKnownAssignedTransactionIds(
- xid, parsed->nsubxacts, parsed->subxacts, max_xid);
+ xid, parsed->nsubxacts, parsed->subxacts, max_xid);
/*
* Send any cache invalidations attached to the commit. We must
@@ -5365,9 +5366,9 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
* occurs in CommitTransaction().
*/
ProcessCommittedInvalidationMessages(
- parsed->msgs, parsed->nmsgs,
- XactCompletionRelcacheInitFileInval(parsed->xinfo),
- parsed->dbId, parsed->tsId);
+ parsed->msgs, parsed->nmsgs,
+ XactCompletionRelcacheInitFileInval(parsed->xinfo),
+ parsed->dbId, parsed->tsId);
/*
* Release locks, if any. We do this for both two phase and normal one
@@ -5383,7 +5384,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
{
/* recover apply progress */
replorigin_advance(origin_id, parsed->origin_lsn, lsn,
- false /* backward */, false /* WAL */);
+ false /* backward */ , false /* WAL */ );
}
/* Make sure files supposed to be dropped are dropped */
@@ -5447,8 +5448,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
static void
xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid)
{
- int i;
- TransactionId max_xid;
+ int i;
+ TransactionId max_xid;
/*
* Make sure nextXid is beyond any XID mentioned in the record.
@@ -5495,7 +5496,7 @@ xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid)
* We must update the ProcArray after we have marked clog.
*/
ExpireTreeKnownAssignedTransactionIds(
- xid, parsed->nsubxacts, parsed->subxacts, max_xid);
+ xid, parsed->nsubxacts, parsed->subxacts, max_xid);
/*
* There are no flat files that need updating, nor invalidation
@@ -5557,7 +5558,7 @@ xact_redo(XLogReaderState *record)
xl_xact_parsed_abort parsed;
ParseAbortRecord(XLogRecGetInfo(record), xlrec,
- &parsed);
+ &parsed);
if (info == XLOG_XACT_ABORT)
{
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index b913bf3ebc..087b6be084 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -81,8 +81,8 @@ extern uint32 bootstrap_data_checksum_version;
/* User-settable parameters */
-int max_wal_size = 64; /* 1 GB */
-int min_wal_size = 5; /* 80 MB */
+int max_wal_size = 64; /* 1 GB */
+int min_wal_size = 5; /* 80 MB */
int wal_keep_segments = 0;
int XLOGbuffers = -1;
int XLogArchiveTimeout = 0;
@@ -951,14 +951,14 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
/*
* Check to see if my copy of RedoRecPtr or doPageWrites is out of date.
* If so, may have to go back and have the caller recompute everything.
- * This can only happen just after a checkpoint, so it's better to be
- * slow in this case and fast otherwise.
+ * This can only happen just after a checkpoint, so it's better to be slow
+ * in this case and fast otherwise.
*
* If we aren't doing full-page writes then RedoRecPtr doesn't actually
* affect the contents of the XLOG record, so we'll update our local copy
* but not force a recomputation. (If doPageWrites was just turned off,
- * we could recompute the record without full pages, but we choose not
- * to bother.)
+ * we could recompute the record without full pages, but we choose not to
+ * bother.)
*/
if (RedoRecPtr != Insert->RedoRecPtr)
{
@@ -970,8 +970,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
if (fpw_lsn != InvalidXLogRecPtr && fpw_lsn <= RedoRecPtr && doPageWrites)
{
/*
- * Oops, some buffer now needs to be backed up that the caller
- * didn't back up. Start over.
+ * Oops, some buffer now needs to be backed up that the caller didn't
+ * back up. Start over.
*/
WALInsertLockRelease();
END_CRIT_SECTION();
@@ -1100,8 +1100,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
{
appendStringInfo(&buf, "error decoding record: out of memory");
}
- else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
- &errormsg))
+ else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
+ &errormsg))
{
appendStringInfo(&buf, "error decoding record: %s",
errormsg ? errormsg : "no error message");
@@ -1932,11 +1932,11 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
/*
* Fill the new page's header
*/
- NewPage ->xlp_magic = XLOG_PAGE_MAGIC;
+ NewPage->xlp_magic = XLOG_PAGE_MAGIC;
/* NewPage->xlp_info = 0; */ /* done by memset */
- NewPage ->xlp_tli = ThisTimeLineID;
- NewPage ->xlp_pageaddr = NewPageBeginPtr;
+ NewPage->xlp_tli = ThisTimeLineID;
+ NewPage->xlp_pageaddr = NewPageBeginPtr;
/* NewPage->xlp_rem_len = 0; */ /* done by memset */
@@ -1954,7 +1954,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
* compress a few records.
*/
if (!Insert->forcePageWrites)
- NewPage ->xlp_info |= XLP_BKP_REMOVABLE;
+ NewPage->xlp_info |= XLP_BKP_REMOVABLE;
/*
* If first page of an XLOG segment file, make it a long header.
@@ -1966,7 +1966,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
NewLongPage->xlp_sysid = ControlFile->system_identifier;
NewLongPage->xlp_seg_size = XLogSegSize;
NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ;
- NewPage ->xlp_info |= XLP_LONG_HEADER;
+ NewPage->xlp_info |= XLP_LONG_HEADER;
}
/*
@@ -2008,10 +2008,10 @@ CalculateCheckpointSegments(void)
*
* a) we keep WAL for two checkpoint cycles, back to the "prev" checkpoint.
* b) during checkpoint, we consume checkpoint_completion_target *
- * number of segments consumed between checkpoints.
+ * number of segments consumed between checkpoints.
*-------
*/
- target = (double ) max_wal_size / (2.0 + CheckPointCompletionTarget);
+ target = (double) max_wal_size / (2.0 + CheckPointCompletionTarget);
/* round down */
CheckPointSegments = (int) target;
@@ -2052,15 +2052,15 @@ XLOGfileslop(XLogRecPtr PriorRedoPtr)
* remove enough segments to stay below the maximum.
*/
minSegNo = PriorRedoPtr / XLOG_SEG_SIZE + min_wal_size - 1;
- maxSegNo = PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1;
+ maxSegNo = PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1;
/*
* Between those limits, recycle enough segments to get us through to the
* estimated end of next checkpoint.
*
* To estimate where the next checkpoint will finish, assume that the
- * system runs steadily consuming CheckPointDistanceEstimate
- * bytes between every checkpoint.
+ * system runs steadily consuming CheckPointDistanceEstimate bytes between
+ * every checkpoint.
*
* The reason this calculation is done from the prior checkpoint, not the
* one that just finished, is that this behaves better if some checkpoint
@@ -3005,11 +3005,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
/*
* XXX: What should we use as max_segno? We used to use XLOGfileslop when
* that was a constant, but that was always a bit dubious: normally, at a
- * checkpoint, XLOGfileslop was the offset from the checkpoint record,
- * but here, it was the offset from the insert location. We can't do the
+ * checkpoint, XLOGfileslop was the offset from the checkpoint record, but
+ * here, it was the offset from the insert location. We can't do the
* normal XLOGfileslop calculation here because we don't have access to
- * the prior checkpoint's redo location. So somewhat arbitrarily, just
- * use CheckPointSegments.
+ * the prior checkpoint's redo location. So somewhat arbitrarily, just use
+ * CheckPointSegments.
*/
max_segno = logsegno + CheckPointSegments;
if (!InstallXLogFileSegment(&installed_segno, tmppath,
@@ -3098,7 +3098,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
nread = upto - nbytes;
/*
- * The part that is not read from the source file is filled with zeros.
+ * The part that is not read from the source file is filled with
+ * zeros.
*/
if (nread < sizeof(buffer))
memset(buffer, 0, sizeof(buffer));
@@ -3153,8 +3154,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
/*
* Now move the segment into place with its final name. (Or just return
- * the path to the file we created, if the caller wants to handle the
- * rest on its own.)
+ * the path to the file we created, if the caller wants to handle the rest
+ * on its own.)
*/
if (dstfname)
{
@@ -3690,8 +3691,8 @@ RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI)
/*
* Remove files that are on a timeline older than the new one we're
- * switching to, but with a segment number >= the first segment on
- * the new timeline.
+ * switching to, but with a segment number >= the first segment on the
+ * new timeline.
*/
if (strncmp(xlde->d_name, switchseg, 8) < 0 &&
strcmp(xlde->d_name + 8, switchseg + 8) > 0)
@@ -3768,12 +3769,13 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
segname)));
#ifdef WIN32
+
/*
* On Windows, if another process (e.g another backend) holds the file
* open in FILE_SHARE_DELETE mode, unlink will succeed, but the file
* will still show up in directory listing until the last handle is
- * closed. To avoid confusing the lingering deleted file for a live WAL
- * file that needs to be archived, rename it before deleting it.
+ * closed. To avoid confusing the lingering deleted file for a live
+ * WAL file that needs to be archived, rename it before deleting it.
*
* If another process holds the file open without FILE_SHARE_DELETE
* flag, rename will fail. We'll try again at the next checkpoint.
@@ -3783,8 +3785,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not rename old transaction log file \"%s\": %m",
- path)));
+ errmsg("could not rename old transaction log file \"%s\": %m",
+ path)));
return;
}
rc = unlink(newpath);
@@ -3795,8 +3797,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not remove old transaction log file \"%s\": %m",
- path)));
+ errmsg("could not remove old transaction log file \"%s\": %m",
+ path)));
return;
}
CheckpointStats.ckpt_segs_removed++;
@@ -4609,11 +4611,11 @@ XLOGShmemInit(void)
int i;
#ifdef WAL_DEBUG
+
/*
- * Create a memory context for WAL debugging that's exempt from the
- * normal "no pallocs in critical section" rule. Yes, that can lead to a
- * PANIC if an allocation fails, but wal_debug is not for production use
- * anyway.
+ * Create a memory context for WAL debugging that's exempt from the normal
+ * "no pallocs in critical section" rule. Yes, that can lead to a PANIC if
+ * an allocation fails, but wal_debug is not for production use anyway.
*/
if (walDebugCxt == NULL)
{
@@ -5044,7 +5046,7 @@ readRecoveryCommandFile(void)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid value for recovery parameter \"recovery_target\""),
- errhint("The only allowed value is \"immediate\".")));
+ errhint("The only allowed value is \"immediate\".")));
ereport(DEBUG2,
(errmsg_internal("recovery_target = '%s'",
item->value)));
@@ -5135,9 +5137,9 @@ readRecoveryCommandFile(void)
}
/*
- * Override any inconsistent requests. Not that this is a change
- * of behaviour in 9.5; prior to this we simply ignored a request
- * to pause if hot_standby = off, which was surprising behaviour.
+ * Override any inconsistent requests. Not that this is a change of
+ * behaviour in 9.5; prior to this we simply ignored a request to pause if
+ * hot_standby = off, which was surprising behaviour.
*/
if (recoveryTargetAction == RECOVERY_TARGET_ACTION_PAUSE &&
recoveryTargetActionSet &&
@@ -6043,7 +6045,7 @@ StartupXLOG(void)
if (read_backup_label(&checkPointLoc, &backupEndRequired,
&backupFromStandby))
{
- List *tablespaces = NIL;
+ List *tablespaces = NIL;
/*
* Archive recovery was requested, and thanks to the backup label
@@ -6099,7 +6101,7 @@ StartupXLOG(void)
foreach(lc, tablespaces)
{
tablespaceinfo *ti = lfirst(lc);
- char *linkloc;
+ char *linkloc;
linkloc = psprintf("pg_tblspc/%s", ti->oid);
@@ -6112,26 +6114,26 @@ StartupXLOG(void)
*/
if (lstat(linkloc, &st) == 0 && S_ISDIR(st.st_mode))
{
- if (!rmtree(linkloc,true))
+ if (!rmtree(linkloc, true))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not remove directory \"%s\": %m",
- linkloc)));
+ errmsg("could not remove directory \"%s\": %m",
+ linkloc)));
}
else
{
if (unlink(linkloc) < 0 && errno != ENOENT)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not remove symbolic link \"%s\": %m",
- linkloc)));
+ errmsg("could not remove symbolic link \"%s\": %m",
+ linkloc)));
}
if (symlink(ti->path, linkloc) < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create symbolic link \"%s\": %m",
- linkloc)));
+ errmsg("could not create symbolic link \"%s\": %m",
+ linkloc)));
pfree(ti->oid);
pfree(ti->path);
@@ -6222,9 +6224,9 @@ StartupXLOG(void)
* in place if the database had been cleanly shut down, but it seems
* safest to just remove them always and let them be rebuilt during the
* first backend startup. These files needs to be removed from all
- * directories including pg_tblspc, however the symlinks are created
- * only after reading tablesapce_map file in case of archive recovery
- * from backup, so needs to clear old relcache files here after creating
+ * directories including pg_tblspc, however the symlinks are created only
+ * after reading tablesapce_map file in case of archive recovery from
+ * backup, so needs to clear old relcache files here after creating
* symlinks.
*/
RelationCacheInitFileRemove();
@@ -6442,9 +6444,9 @@ StartupXLOG(void)
* Also set backupEndPoint and use minRecoveryPoint as the backup end
* location if we're starting recovery from a base backup which was
* taken from a standby. In this case, the database system status in
- * pg_control must indicate that the database was already in
- * recovery. Usually that will be DB_IN_ARCHIVE_RECOVERY but also can
- * be DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
+ * pg_control must indicate that the database was already in recovery.
+ * Usually that will be DB_IN_ARCHIVE_RECOVERY but also can be
+ * DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
* before reaching this point; e.g. because restore_command or
* primary_conninfo were faulty.
*
@@ -6500,10 +6502,10 @@ StartupXLOG(void)
/*
* If there was a tablespace_map file, it's done its job and the
- * symlinks have been created. We must get rid of the map file
- * so that if we crash during recovery, we don't create symlinks
- * again. It seems prudent though to just rename the file out of
- * the way rather than delete it completely.
+ * symlinks have been created. We must get rid of the map file so
+ * that if we crash during recovery, we don't create symlinks again.
+ * It seems prudent though to just rename the file out of the way
+ * rather than delete it completely.
*/
if (haveTblspcMap)
{
@@ -6859,7 +6861,8 @@ StartupXLOG(void)
{
/*
* Before we continue on the new timeline, clean up any
- * (possibly bogus) future WAL segments on the old timeline.
+ * (possibly bogus) future WAL segments on the old
+ * timeline.
*/
RemoveNonParentXlogFiles(EndRecPtr, ThisTimeLineID);
@@ -6890,32 +6893,33 @@ StartupXLOG(void)
{
if (!reachedConsistency)
ereport(FATAL,
- (errmsg("requested recovery stop point is before consistent recovery point")));
+ (errmsg("requested recovery stop point is before consistent recovery point")));
/*
* This is the last point where we can restart recovery with a
* new recovery target, if we shutdown and begin again. After
- * this, Resource Managers may choose to do permanent corrective
- * actions at end of recovery.
+ * this, Resource Managers may choose to do permanent
+ * corrective actions at end of recovery.
*/
switch (recoveryTargetAction)
{
case RECOVERY_TARGET_ACTION_SHUTDOWN:
- /*
- * exit with special return code to request shutdown
- * of postmaster. Log messages issued from
- * postmaster.
- */
- proc_exit(3);
+
+ /*
+ * exit with special return code to request shutdown
+ * of postmaster. Log messages issued from
+ * postmaster.
+ */
+ proc_exit(3);
case RECOVERY_TARGET_ACTION_PAUSE:
- SetRecoveryPause(true);
- recoveryPausesHere();
+ SetRecoveryPause(true);
+ recoveryPausesHere();
- /* drop into promote */
+ /* drop into promote */
case RECOVERY_TARGET_ACTION_PROMOTE:
- break;
+ break;
}
}
@@ -7259,8 +7263,8 @@ StartupXLOG(void)
* too.
*
* If a .done or .ready file already exists for the old timeline,
- * however, we had already determined that the segment is complete,
- * so we can let it be archived normally. (In particular, if it was
+ * however, we had already determined that the segment is complete, so
+ * we can let it be archived normally. (In particular, if it was
* restored from the archive to begin with, it's expected to have a
* .done file).
*/
@@ -7291,8 +7295,8 @@ StartupXLOG(void)
if (rename(origpath, partialpath) != 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not rename file \"%s\" to \"%s\": %m",
- origpath, partialpath)));
+ errmsg("could not rename file \"%s\" to \"%s\": %m",
+ origpath, partialpath)));
XLogArchiveNotify(partialfname);
}
}
@@ -7366,8 +7370,8 @@ StartupXLOG(void)
XLogReportParameters();
/*
- * Local WAL inserts enabled, so it's time to finish initialization
- * of commit timestamp.
+ * Local WAL inserts enabled, so it's time to finish initialization of
+ * commit timestamp.
*/
CompleteCommitTsInitialization();
@@ -7961,7 +7965,7 @@ LogCheckpointStart(int flags, bool restartpoint)
(flags & CHECKPOINT_WAIT) ? " wait" : "",
(flags & CHECKPOINT_CAUSE_XLOG) ? " xlog" : "",
(flags & CHECKPOINT_CAUSE_TIME) ? " time" : "",
- (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" :"");
+ (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : "");
}
/*
@@ -8056,8 +8060,8 @@ static void
UpdateCheckPointDistanceEstimate(uint64 nbytes)
{
/*
- * To estimate the number of segments consumed between checkpoints, keep
- * a moving average of the amount of WAL generated in previous checkpoint
+ * To estimate the number of segments consumed between checkpoints, keep a
+ * moving average of the amount of WAL generated in previous checkpoint
* cycles. However, if the load is bursty, with quiet periods and busy
* periods, we want to cater for the peak load. So instead of a plain
* moving average, let the average decline slowly if the previous cycle
@@ -9473,8 +9477,8 @@ xlog_redo(XLogReaderState *record)
}
/*
- * Update the commit timestamp tracking. If there was a change
- * it needs to be activated or deactivated accordingly.
+ * Update the commit timestamp tracking. If there was a change it
+ * needs to be activated or deactivated accordingly.
*/
if (track_commit_timestamp != xlrec.track_commit_timestamp)
{
@@ -9483,6 +9487,7 @@ xlog_redo(XLogReaderState *record)
if (track_commit_timestamp)
ActivateCommitTs();
else
+
/*
* We can't create a new WAL record here, but that's OK as
* master did the WAL logging already and we will replay the
@@ -9996,7 +10001,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
char *relpath = NULL;
int rllen;
StringInfoData buflinkpath;
- char *s = linkpath;
+ char *s = linkpath;
/* Skip special stuff */
if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0)
@@ -10023,10 +10028,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
linkpath[rllen] = '\0';
/*
- * Add the escape character '\\' before newline in a string
- * to ensure that we can distinguish between the newline in
- * the tablespace path and end of line while reading
- * tablespace_map file during archive recovery.
+ * Add the escape character '\\' before newline in a string to
+ * ensure that we can distinguish between the newline in the
+ * tablespace path and end of line while reading tablespace_map
+ * file during archive recovery.
*/
initStringInfo(&buflinkpath);
@@ -10054,8 +10059,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
ti->rpath = relpath ? pstrdup(relpath) : NULL;
ti->size = infotbssize ? sendTablespace(fullpath, true) : -1;
- if(tablespaces)
- *tablespaces = lappend(*tablespaces, ti);
+ if (tablespaces)
+ *tablespaces = lappend(*tablespaces, ti);
appendStringInfo(&tblspc_mapfbuf, "%s %s\n", ti->oid, ti->path);
@@ -10150,10 +10155,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
}
else
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("a backup is already in progress"),
- errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
- TABLESPACE_MAP)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("a backup is already in progress"),
+ errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
+ TABLESPACE_MAP)));
fp = AllocateFile(TABLESPACE_MAP, "w");
@@ -10353,8 +10358,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
BACKUP_LABEL_FILE)));
/*
- * Remove tablespace_map file if present, it is created
- * only if there are tablespaces.
+ * Remove tablespace_map file if present, it is created only if there
+ * are tablespaces.
*/
unlink(TABLESPACE_MAP);
}
@@ -10773,10 +10778,12 @@ read_tablespace_map(List **tablespaces)
tablespaceinfo *ti;
FILE *lfp;
char tbsoid[MAXPGPATH];
- char *tbslinkpath;
+ char *tbslinkpath;
char str[MAXPGPATH];
- int ch, prev_ch = -1,
- i = 0, n;
+ int ch,
+ prev_ch = -1,
+ i = 0,
+ n;
/*
* See if tablespace_map file is present
@@ -10794,9 +10801,9 @@ read_tablespace_map(List **tablespaces)
/*
* Read and parse the link name and path lines from tablespace_map file
- * (this code is pretty crude, but we are not expecting any variability
- * in the file format). While taking backup we embed escape character
- * '\\' before newline in tablespace path, so that during reading of
+ * (this code is pretty crude, but we are not expecting any variability in
+ * the file format). While taking backup we embed escape character '\\'
+ * before newline in tablespace path, so that during reading of
* tablespace_map file, we could distinguish newline in tablespace path
* and end of line. Now while reading tablespace_map file, remove the
* escape character that has been added in tablespace path during backup.
@@ -10808,8 +10815,8 @@ read_tablespace_map(List **tablespaces)
str[i] = '\0';
if (sscanf(str, "%s %n", tbsoid, &n) != 1)
ereport(FATAL,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
tbslinkpath = str + n;
i = 0;
@@ -10821,7 +10828,7 @@ read_tablespace_map(List **tablespaces)
continue;
}
else if ((ch == '\n' || ch == '\r') && prev_ch == '\\')
- str[i-1] = ch;
+ str[i - 1] = ch;
else
str[i++] = ch;
prev_ch = ch;
@@ -10868,7 +10875,7 @@ BackupInProgress(void)
/*
* CancelBackup: rename the "backup_label" and "tablespace_map"
- * files to cancel backup mode
+ * files to cancel backup mode
*
* If the "backup_label" file exists, it will be renamed to "backup_label.old".
* Similarly, if the "tablespace_map" file exists, it will be renamed to
@@ -11115,8 +11122,8 @@ static bool
WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
bool fetching_ckpt, XLogRecPtr tliRecPtr)
{
- static TimestampTz last_fail_time = 0;
- TimestampTz now;
+ static TimestampTz last_fail_time = 0;
+ TimestampTz now;
/*-------
* Standby mode is implemented by a state machine:
@@ -11270,9 +11277,10 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
*/
now = GetCurrentTimestamp();
if (!TimestampDifferenceExceeds(last_fail_time, now,
- wal_retrieve_retry_interval))
+ wal_retrieve_retry_interval))
{
- long secs, wait_time;
+ long secs,
+ wait_time;
int usecs;
TimestampDifference(last_fail_time, now, &secs, &usecs);
@@ -11280,7 +11288,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
(secs * 1000 + usecs / 1000);
WaitLatch(&XLogCtl->recoveryWakeupLatch,
- WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
+ WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
wait_time);
ResetLatch(&XLogCtl->recoveryWakeupLatch);
now = GetCurrentTimestamp();
@@ -11605,8 +11613,8 @@ fsync_pgdata(char *datadir)
return;
/*
- * If possible, hint to the kernel that we're soon going to fsync
- * the data directory and its contents.
+ * If possible, hint to the kernel that we're soon going to fsync the data
+ * directory and its contents.
*/
#if defined(HAVE_SYNC_FILE_RANGE) || \
(defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED))
diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c
index 419736da31..b96c39ac65 100644
--- a/src/backend/access/transam/xloginsert.c
+++ b/src/backend/access/transam/xloginsert.c
@@ -33,7 +33,7 @@
#include "pg_trace.h"
/* Buffer size required to store a compressed version of backup block image */
-#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
+#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
/*
* For each block reference registered with XLogRegisterBuffer, we fill in
@@ -58,7 +58,7 @@ typedef struct
/* buffer to store a compressed version of backup block image */
char compressed_page[PGLZ_MAX_BLCKSZ];
-} registered_buffer;
+} registered_buffer;
static registered_buffer *registered_buffers;
static int max_registered_buffers; /* allocated size */
@@ -110,7 +110,7 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info,
XLogRecPtr RedoRecPtr, bool doPageWrites,
XLogRecPtr *fpw_lsn);
static bool XLogCompressBackupBlock(char *page, uint16 hole_offset,
- uint16 hole_length, char *dest, uint16 *dlen);
+ uint16 hole_length, char *dest, uint16 *dlen);
/*
* Begin constructing a WAL record. This must be called before the
@@ -602,7 +602,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
&compressed_len);
}
- /* Fill in the remaining fields in the XLogRecordBlockHeader struct */
+ /*
+ * Fill in the remaining fields in the XLogRecordBlockHeader
+ * struct
+ */
bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
/*
@@ -762,7 +765,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
* the length of compressed block image.
*/
static bool
-XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
+XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length,
char *dest, uint16 *dlen)
{
int32 orig_len = BLCKSZ - hole_length;
@@ -790,16 +793,15 @@ XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
source = page;
/*
- * We recheck the actual size even if pglz_compress() reports success
- * and see if the number of bytes saved by compression is larger than
- * the length of extra data needed for the compressed version of block
- * image.
+ * We recheck the actual size even if pglz_compress() reports success and
+ * see if the number of bytes saved by compression is larger than the
+ * length of extra data needed for the compressed version of block image.
*/
len = pglz_compress(source, orig_len, dest, PGLZ_strategy_default);
if (len >= 0 &&
len + extra_bytes < orig_len)
{
- *dlen = (uint16) len; /* successful compression */
+ *dlen = (uint16) len; /* successful compression */
return true;
}
return false;
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index 3661e7229a..a9e926c5a2 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -1086,50 +1086,53 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg)
blk->bimg_len == BLCKSZ))
{
report_invalid_record(state,
- "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X",
+ "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X",
(unsigned int) blk->hole_offset,
(unsigned int) blk->hole_length,
(unsigned int) blk->bimg_len,
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
goto err;
}
+
/*
- * cross-check that hole_offset == 0 and hole_length == 0
- * if the HAS_HOLE flag is not set.
+ * cross-check that hole_offset == 0 and hole_length == 0 if
+ * the HAS_HOLE flag is not set.
*/
if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
(blk->hole_offset != 0 || blk->hole_length != 0))
{
report_invalid_record(state,
- "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X",
+ "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X",
(unsigned int) blk->hole_offset,
(unsigned int) blk->hole_length,
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
goto err;
}
+
/*
- * cross-check that bimg_len < BLCKSZ
- * if the IS_COMPRESSED flag is set.
+ * cross-check that bimg_len < BLCKSZ if the IS_COMPRESSED
+ * flag is set.
*/
if ((blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&
blk->bimg_len == BLCKSZ)
{
report_invalid_record(state,
- "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X",
+ "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X",
(unsigned int) blk->bimg_len,
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
goto err;
}
+
/*
- * cross-check that bimg_len = BLCKSZ if neither
- * HAS_HOLE nor IS_COMPRESSED flag is set.
+ * cross-check that bimg_len = BLCKSZ if neither HAS_HOLE nor
+ * IS_COMPRESSED flag is set.
*/
if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
!(blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&
blk->bimg_len != BLCKSZ)
{
report_invalid_record(state,
- "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X",
+ "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X",
(unsigned int) blk->data_len,
(uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
goto err;
@@ -1294,8 +1297,8 @@ bool
RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
{
DecodedBkpBlock *bkpb;
- char *ptr;
- char tmp[BLCKSZ];
+ char *ptr;
+ char tmp[BLCKSZ];
if (!record->blocks[block_id].in_use)
return false;
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index e42187a7d5..95d6c146fa 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -401,6 +401,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
proc_exit(1); /* should never return */
case BootstrapProcess:
+
/*
* There was a brief instant during which mode was Normal; this is
* okay. We need to be in bootstrap mode during BootStrapXLOG for
diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm
index ac1dafb041..5e704181ec 100644
--- a/src/backend/catalog/Catalog.pm
+++ b/src/backend/catalog/Catalog.pm
@@ -189,7 +189,8 @@ sub Catalogs
}
else
{
- die "unknown column option $attopt on column $attname"
+ die
+"unknown column option $attopt on column $attname";
}
}
push @{ $catalog{columns} }, \%row;
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 943909c822..50a00cf8c8 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -397,14 +397,14 @@ ExecuteGrantStmt(GrantStmt *stmt)
istmt.behavior = stmt->behavior;
/*
- * Convert the RoleSpec list into an Oid list. Note that at this point
- * we insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
+ * Convert the RoleSpec list into an Oid list. Note that at this point we
+ * insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
* there shouldn't be any additional work needed to support this case.
*/
foreach(cell, stmt->grantees)
{
- RoleSpec *grantee = (RoleSpec *) lfirst(cell);
- Oid grantee_uid;
+ RoleSpec *grantee = (RoleSpec *) lfirst(cell);
+ Oid grantee_uid;
switch (grantee->roletype)
{
@@ -892,14 +892,14 @@ ExecAlterDefaultPrivilegesStmt(AlterDefaultPrivilegesStmt *stmt)
iacls.behavior = action->behavior;
/*
- * Convert the RoleSpec list into an Oid list. Note that at this point
- * we insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
+ * Convert the RoleSpec list into an Oid list. Note that at this point we
+ * insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
* there shouldn't be any additional work needed to support this case.
*/
foreach(cell, action->grantees)
{
- RoleSpec *grantee = (RoleSpec *) lfirst(cell);
- Oid grantee_uid;
+ RoleSpec *grantee = (RoleSpec *) lfirst(cell);
+ Oid grantee_uid;
switch (grantee->roletype)
{
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index ec4ba397c7..c1212e9075 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -213,8 +213,8 @@ deleteObjectsInList(ObjectAddresses *targetObjects, Relation *depRel,
{
const ObjectAddress *thisobj = &targetObjects->refs[i];
const ObjectAddressExtra *extra = &targetObjects->extras[i];
- bool original = false;
- bool normal = false;
+ bool original = false;
+ bool normal = false;
if (extra->flags & DEPFLAG_ORIGINAL)
original = true;
@@ -1611,10 +1611,10 @@ find_expr_references_walker(Node *node,
context->addrs);
break;
- /*
- * Dependencies for regrole should be shared among all
- * databases, so explicitly inhibit to have dependencies.
- */
+ /*
+ * Dependencies for regrole should be shared among all
+ * databases, so explicitly inhibit to have dependencies.
+ */
case REGROLEOID:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl
index a5c78eed49..d06eae019a 100644
--- a/src/backend/catalog/genbki.pl
+++ b/src/backend/catalog/genbki.pl
@@ -147,7 +147,7 @@ foreach my $catname (@{ $catalogs->{names} })
}
print BKI "\n )\n";
- # open it, unless bootstrap case (create bootstrap does this automatically)
+ # open it, unless bootstrap case (create bootstrap does this automatically)
if ($catalog->{bootstrap} eq '')
{
print BKI "open $catname\n";
@@ -242,12 +242,12 @@ foreach my $catname (@{ $catalogs->{names} })
{
$attnum = 0;
my @SYS_ATTRS = (
- { name => 'ctid', type => 'tid' },
- { name => 'oid', type => 'oid' },
- { name => 'xmin', type => 'xid' },
- { name => 'cmin', type=> 'cid' },
- { name => 'xmax', type=> 'xid' },
- { name => 'cmax', type => 'cid' },
+ { name => 'ctid', type => 'tid' },
+ { name => 'oid', type => 'oid' },
+ { name => 'xmin', type => 'xid' },
+ { name => 'cmin', type => 'cid' },
+ { name => 'xmax', type => 'xid' },
+ { name => 'cmax', type => 'cid' },
{ name => 'tableoid', type => 'oid' });
foreach my $attr (@SYS_ATTRS)
{
@@ -384,6 +384,7 @@ sub emit_pgattr_row
}
elsif ($priornotnull)
{
+
# attnotnull will automatically be set if the type is
# fixed-width and prior columns are all NOT NULL ---
# compare DefineAttr in bootstrap.c. oidvector and
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index bac9fbe7eb..4246554d19 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -1709,8 +1709,8 @@ BuildSpeculativeIndexInfo(Relation index, IndexInfo *ii)
ii->ii_UniqueStrats = (uint16 *) palloc(sizeof(uint16) * ncols);
/*
- * We have to look up the operator's strategy number. This
- * provides a cross-check that the operator does match the index.
+ * We have to look up the operator's strategy number. This provides a
+ * cross-check that the operator does match the index.
*/
/* We need the func OIDs and strategy numbers too */
for (i = 0; i < ncols; i++)
@@ -3186,7 +3186,7 @@ IndexGetRelation(Oid indexId, bool missing_ok)
*/
void
reindex_index(Oid indexId, bool skip_constraint_checks, char persistence,
- int options)
+ int options)
{
Relation iRel,
heapRelation;
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index 8d98c5d9a6..c37e38fa3b 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -453,89 +453,188 @@ static const struct object_type_map
const char *tm_name;
ObjectType tm_type;
}
-ObjectTypeMap[] =
+
+ ObjectTypeMap[] =
{
/* OCLASS_CLASS, all kinds of relations */
- { "table", OBJECT_TABLE },
- { "index", OBJECT_INDEX },
- { "sequence", OBJECT_SEQUENCE },
- { "toast table", -1 }, /* unmapped */
- { "view", OBJECT_VIEW },
- { "materialized view", OBJECT_MATVIEW },
- { "composite type", -1 }, /* unmapped */
- { "foreign table", OBJECT_FOREIGN_TABLE },
- { "table column", OBJECT_COLUMN },
- { "index column", -1 }, /* unmapped */
- { "sequence column", -1 }, /* unmapped */
- { "toast table column", -1 }, /* unmapped */
- { "view column", -1 }, /* unmapped */
- { "materialized view column", -1 }, /* unmapped */
- { "composite type column", -1 }, /* unmapped */
- { "foreign table column", OBJECT_COLUMN },
+ {
+ "table", OBJECT_TABLE
+ },
+ {
+ "index", OBJECT_INDEX
+ },
+ {
+ "sequence", OBJECT_SEQUENCE
+ },
+ {
+ "toast table", -1
+ }, /* unmapped */
+ {
+ "view", OBJECT_VIEW
+ },
+ {
+ "materialized view", OBJECT_MATVIEW
+ },
+ {
+ "composite type", -1
+ }, /* unmapped */
+ {
+ "foreign table", OBJECT_FOREIGN_TABLE
+ },
+ {
+ "table column", OBJECT_COLUMN
+ },
+ {
+ "index column", -1
+ }, /* unmapped */
+ {
+ "sequence column", -1
+ }, /* unmapped */
+ {
+ "toast table column", -1
+ }, /* unmapped */
+ {
+ "view column", -1
+ }, /* unmapped */
+ {
+ "materialized view column", -1
+ }, /* unmapped */
+ {
+ "composite type column", -1
+ }, /* unmapped */
+ {
+ "foreign table column", OBJECT_COLUMN
+ },
/* OCLASS_PROC */
- { "aggregate", OBJECT_AGGREGATE },
- { "function", OBJECT_FUNCTION },
+ {
+ "aggregate", OBJECT_AGGREGATE
+ },
+ {
+ "function", OBJECT_FUNCTION
+ },
/* OCLASS_TYPE */
- { "type", OBJECT_TYPE },
+ {
+ "type", OBJECT_TYPE
+ },
/* OCLASS_CAST */
- { "cast", OBJECT_CAST },
+ {
+ "cast", OBJECT_CAST
+ },
/* OCLASS_COLLATION */
- { "collation", OBJECT_COLLATION },
+ {
+ "collation", OBJECT_COLLATION
+ },
/* OCLASS_CONSTRAINT */
- { "table constraint", OBJECT_TABCONSTRAINT },
- { "domain constraint", OBJECT_DOMCONSTRAINT },
+ {
+ "table constraint", OBJECT_TABCONSTRAINT
+ },
+ {
+ "domain constraint", OBJECT_DOMCONSTRAINT
+ },
/* OCLASS_CONVERSION */
- { "conversion", OBJECT_CONVERSION },
+ {
+ "conversion", OBJECT_CONVERSION
+ },
/* OCLASS_DEFAULT */
- { "default value", OBJECT_DEFAULT },
+ {
+ "default value", OBJECT_DEFAULT
+ },
/* OCLASS_LANGUAGE */
- { "language", OBJECT_LANGUAGE },
+ {
+ "language", OBJECT_LANGUAGE
+ },
/* OCLASS_LARGEOBJECT */
- { "large object", OBJECT_LARGEOBJECT },
+ {
+ "large object", OBJECT_LARGEOBJECT
+ },
/* OCLASS_OPERATOR */
- { "operator", OBJECT_OPERATOR },
+ {
+ "operator", OBJECT_OPERATOR
+ },
/* OCLASS_OPCLASS */
- { "operator class", OBJECT_OPCLASS },
+ {
+ "operator class", OBJECT_OPCLASS
+ },
/* OCLASS_OPFAMILY */
- { "operator family", OBJECT_OPFAMILY },
+ {
+ "operator family", OBJECT_OPFAMILY
+ },
/* OCLASS_AMOP */
- { "operator of access method", OBJECT_AMOP },
+ {
+ "operator of access method", OBJECT_AMOP
+ },
/* OCLASS_AMPROC */
- { "function of access method", OBJECT_AMPROC },
+ {
+ "function of access method", OBJECT_AMPROC
+ },
/* OCLASS_REWRITE */
- { "rule", OBJECT_RULE },
+ {
+ "rule", OBJECT_RULE
+ },
/* OCLASS_TRIGGER */
- { "trigger", OBJECT_TRIGGER },
+ {
+ "trigger", OBJECT_TRIGGER
+ },
/* OCLASS_SCHEMA */
- { "schema", OBJECT_SCHEMA },
+ {
+ "schema", OBJECT_SCHEMA
+ },
/* OCLASS_TSPARSER */
- { "text search parser", OBJECT_TSPARSER },
+ {
+ "text search parser", OBJECT_TSPARSER
+ },
/* OCLASS_TSDICT */
- { "text search dictionary", OBJECT_TSDICTIONARY },
+ {
+ "text search dictionary", OBJECT_TSDICTIONARY
+ },
/* OCLASS_TSTEMPLATE */
- { "text search template", OBJECT_TSTEMPLATE },
+ {
+ "text search template", OBJECT_TSTEMPLATE
+ },
/* OCLASS_TSCONFIG */
- { "text search configuration", OBJECT_TSCONFIGURATION },
+ {
+ "text search configuration", OBJECT_TSCONFIGURATION
+ },
/* OCLASS_ROLE */
- { "role", OBJECT_ROLE },
+ {
+ "role", OBJECT_ROLE
+ },
/* OCLASS_DATABASE */
- { "database", OBJECT_DATABASE },
+ {
+ "database", OBJECT_DATABASE
+ },
/* OCLASS_TBLSPACE */
- { "tablespace", OBJECT_TABLESPACE },
+ {
+ "tablespace", OBJECT_TABLESPACE
+ },
/* OCLASS_FDW */
- { "foreign-data wrapper", OBJECT_FDW },
+ {
+ "foreign-data wrapper", OBJECT_FDW
+ },
/* OCLASS_FOREIGN_SERVER */
- { "server", OBJECT_FOREIGN_SERVER },
+ {
+ "server", OBJECT_FOREIGN_SERVER
+ },
/* OCLASS_USER_MAPPING */
- { "user mapping", OBJECT_USER_MAPPING },
+ {
+ "user mapping", OBJECT_USER_MAPPING
+ },
/* OCLASS_DEFACL */
- { "default acl", OBJECT_DEFACL },
+ {
+ "default acl", OBJECT_DEFACL
+ },
/* OCLASS_EXTENSION */
- { "extension", OBJECT_EXTENSION },
+ {
+ "extension", OBJECT_EXTENSION
+ },
/* OCLASS_EVENT_TRIGGER */
- { "event trigger", OBJECT_EVENT_TRIGGER },
+ {
+ "event trigger", OBJECT_EVENT_TRIGGER
+ },
/* OCLASS_POLICY */
- { "policy", OBJECT_POLICY }
+ {
+ "policy", OBJECT_POLICY
+ }
};
const ObjectAddress InvalidObjectAddress =
@@ -667,16 +766,16 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
break;
case OBJECT_DOMCONSTRAINT:
{
- ObjectAddress domaddr;
- char *constrname;
+ ObjectAddress domaddr;
+ char *constrname;
domaddr = get_object_address_type(OBJECT_DOMAIN,
- list_head(objname), missing_ok);
+ list_head(objname), missing_ok);
constrname = strVal(linitial(objargs));
address.classId = ConstraintRelationId;
address.objectId = get_domain_constraint_oid(domaddr.objectId,
- constrname, missing_ok);
+ constrname, missing_ok);
address.objectSubId = 0;
}
@@ -1286,8 +1385,8 @@ get_object_address_attrdef(ObjectType objtype, List *objname,
if (attnum != InvalidAttrNumber && tupdesc->constr != NULL)
{
Relation attrdef;
- ScanKeyData keys[2];
- SysScanDesc scan;
+ ScanKeyData keys[2];
+ SysScanDesc scan;
HeapTuple tup;
attrdef = relation_open(AttrDefaultRelationId, AccessShareLock);
@@ -1419,14 +1518,14 @@ static ObjectAddress
get_object_address_opf_member(ObjectType objtype,
List *objname, List *objargs, bool missing_ok)
{
- ObjectAddress famaddr;
- ObjectAddress address;
- ListCell *cell;
- List *copy;
- char *typenames[2];
- Oid typeoids[2];
- int membernum;
- int i;
+ ObjectAddress famaddr;
+ ObjectAddress address;
+ ListCell *cell;
+ List *copy;
+ char *typenames[2];
+ Oid typeoids[2];
+ int membernum;
+ int i;
/*
* The last element of the objname list contains the strategy or procedure
@@ -1441,9 +1540,9 @@ get_object_address_opf_member(ObjectType objtype,
/* find out left/right type names and OIDs */
i = 0;
- foreach (cell, objargs)
+ foreach(cell, objargs)
{
- ObjectAddress typaddr;
+ ObjectAddress typaddr;
typenames[i] = strVal(lfirst(cell));
typaddr = get_object_address_type(OBJECT_TYPE, cell, missing_ok);
@@ -1471,9 +1570,9 @@ get_object_address_opf_member(ObjectType objtype,
if (!missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("operator %d (%s, %s) of %s does not exist",
- membernum, typenames[0], typenames[1],
- getObjectDescription(&famaddr))));
+ errmsg("operator %d (%s, %s) of %s does not exist",
+ membernum, typenames[0], typenames[1],
+ getObjectDescription(&famaddr))));
}
else
{
@@ -1500,9 +1599,9 @@ get_object_address_opf_member(ObjectType objtype,
if (!missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("function %d (%s, %s) of %s does not exist",
- membernum, typenames[0], typenames[1],
- getObjectDescription(&famaddr))));
+ errmsg("function %d (%s, %s) of %s does not exist",
+ membernum, typenames[0], typenames[1],
+ getObjectDescription(&famaddr))));
}
else
{
@@ -1636,8 +1735,8 @@ get_object_address_defacl(List *objname, List *objargs, bool missing_ok)
default:
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized default ACL object type %c", objtype),
- errhint("Valid object types are 'r', 'S', 'f', and 'T'.")));
+ errmsg("unrecognized default ACL object type %c", objtype),
+ errhint("Valid object types are 'r', 'S', 'f', and 'T'.")));
}
/*
@@ -1688,8 +1787,8 @@ not_found:
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("default ACL for user \"%s\" on %s does not exist",
- username, objtype_str)));
+ errmsg("default ACL for user \"%s\" on %s does not exist",
+ username, objtype_str)));
}
return address;
}
@@ -1701,11 +1800,11 @@ not_found:
static List *
textarray_to_strvaluelist(ArrayType *arr)
{
- Datum *elems;
- bool *nulls;
- int nelems;
- List *list = NIL;
- int i;
+ Datum *elems;
+ bool *nulls;
+ int nelems;
+ List *list = NIL;
+ int i;
deconstruct_array(arr, TEXTOID, -1, false, 'i',
&elems, &nulls, &nelems);
@@ -1728,18 +1827,18 @@ textarray_to_strvaluelist(ArrayType *arr)
Datum
pg_get_object_address(PG_FUNCTION_ARGS)
{
- char *ttype = TextDatumGetCString(PG_GETARG_TEXT_P(0));
- ArrayType *namearr = PG_GETARG_ARRAYTYPE_P(1);
- ArrayType *argsarr = PG_GETARG_ARRAYTYPE_P(2);
- int itype;
- ObjectType type;
- List *name;
- List *args;
+ char *ttype = TextDatumGetCString(PG_GETARG_TEXT_P(0));
+ ArrayType *namearr = PG_GETARG_ARRAYTYPE_P(1);
+ ArrayType *argsarr = PG_GETARG_ARRAYTYPE_P(2);
+ int itype;
+ ObjectType type;
+ List *name;
+ List *args;
ObjectAddress addr;
- TupleDesc tupdesc;
- Datum values[3];
- bool nulls[3];
- HeapTuple htup;
+ TupleDesc tupdesc;
+ Datum values[3];
+ bool nulls[3];
+ HeapTuple htup;
Relation relation;
/* Decode object type, raise error if unknown */
@@ -1751,16 +1850,16 @@ pg_get_object_address(PG_FUNCTION_ARGS)
type = (ObjectType) itype;
/*
- * Convert the text array to the representation appropriate for the
- * given object type. Most use a simple string Values list, but there
- * are some exceptions.
+ * Convert the text array to the representation appropriate for the given
+ * object type. Most use a simple string Values list, but there are some
+ * exceptions.
*/
if (type == OBJECT_TYPE || type == OBJECT_DOMAIN || type == OBJECT_CAST ||
type == OBJECT_DOMCONSTRAINT)
{
- Datum *elems;
- bool *nulls;
- int nelems;
+ Datum *elems;
+ bool *nulls;
+ int nelems;
deconstruct_array(namearr, TEXTOID, -1, false, 'i',
&elems, &nulls, &nelems);
@@ -1812,10 +1911,10 @@ pg_get_object_address(PG_FUNCTION_ARGS)
type == OBJECT_AMPROC)
{
/* in these cases, the args list must be of TypeName */
- Datum *elems;
- bool *nulls;
- int nelems;
- int i;
+ Datum *elems;
+ bool *nulls;
+ int nelems;
+ int i;
deconstruct_array(argsarr, TEXTOID, -1, false, 'i',
&elems, &nulls, &nelems);
@@ -1826,9 +1925,9 @@ pg_get_object_address(PG_FUNCTION_ARGS)
if (nulls[i])
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("name or argument lists may not contain nulls")));
+ errmsg("name or argument lists may not contain nulls")));
args = lappend(args,
- typeStringToTypeName(TextDatumGetCString(elems[i])));
+ typeStringToTypeName(TextDatumGetCString(elems[i])));
}
}
else
@@ -1850,7 +1949,7 @@ pg_get_object_address(PG_FUNCTION_ARGS)
if (list_length(args) != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument list length must be exactly %d", 1)));
+ errmsg("argument list length must be exactly %d", 1)));
break;
case OBJECT_OPFAMILY:
case OBJECT_OPCLASS:
@@ -1870,7 +1969,7 @@ pg_get_object_address(PG_FUNCTION_ARGS)
if (list_length(args) != 2)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument list length must be exactly %d", 2)));
+ errmsg("argument list length must be exactly %d", 2)));
break;
default:
break;
@@ -2146,8 +2245,8 @@ read_objtype_from_string(const char *objtype)
}
if (i >= lengthof(ObjectTypeMap))
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized object type \"%s\"", objtype)));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("unrecognized object type \"%s\"", objtype)));
return type;
}
@@ -2693,7 +2792,7 @@ getObjectDescription(const ObjectAddress *object)
Form_pg_transform trfForm;
trfTup = SearchSysCache1(TRFOID,
- ObjectIdGetDatum(object->objectId));
+ ObjectIdGetDatum(object->objectId));
if (!HeapTupleIsValid(trfTup))
elog(ERROR, "could not find tuple for transform %u",
object->objectId);
@@ -2924,28 +3023,28 @@ getObjectDescription(const ObjectAddress *object)
case DEFACLOBJ_RELATION:
appendStringInfo(&buffer,
_("default privileges on new relations belonging to role %s"),
- GetUserNameFromId(defacl->defaclrole, false));
+ GetUserNameFromId(defacl->defaclrole, false));
break;
case DEFACLOBJ_SEQUENCE:
appendStringInfo(&buffer,
_("default privileges on new sequences belonging to role %s"),
- GetUserNameFromId(defacl->defaclrole, false));
+ GetUserNameFromId(defacl->defaclrole, false));
break;
case DEFACLOBJ_FUNCTION:
appendStringInfo(&buffer,
_("default privileges on new functions belonging to role %s"),
- GetUserNameFromId(defacl->defaclrole, false));
+ GetUserNameFromId(defacl->defaclrole, false));
break;
case DEFACLOBJ_TYPE:
appendStringInfo(&buffer,
_("default privileges on new types belonging to role %s"),
- GetUserNameFromId(defacl->defaclrole, false));
+ GetUserNameFromId(defacl->defaclrole, false));
break;
default:
/* shouldn't get here */
appendStringInfo(&buffer,
_("default privileges belonging to role %s"),
- GetUserNameFromId(defacl->defaclrole, false));
+ GetUserNameFromId(defacl->defaclrole, false));
break;
}
@@ -2991,8 +3090,8 @@ getObjectDescription(const ObjectAddress *object)
case OCLASS_POLICY:
{
Relation policy_rel;
- ScanKeyData skey[1];
- SysScanDesc sscan;
+ ScanKeyData skey[1];
+ SysScanDesc sscan;
HeapTuple tuple;
Form_pg_policy form_policy;
@@ -3677,7 +3776,7 @@ getObjectIdentityParts(const ObjectAddress *object,
case OCLASS_TYPE:
{
- char *typeout;
+ char *typeout;
typeout = format_type_be_qualified(object->objectId);
appendStringInfoString(&buffer, typeout);
@@ -3770,7 +3869,7 @@ getObjectIdentityParts(const ObjectAddress *object,
appendStringInfo(&buffer, "%s on %s",
quote_identifier(NameStr(con->conname)),
- getObjectIdentityParts(&domain, objname, objargs));
+ getObjectIdentityParts(&domain, objname, objargs));
if (objname)
*objargs = lappend(*objargs, pstrdup(NameStr(con->conname)));
@@ -3794,8 +3893,8 @@ getObjectIdentityParts(const ObjectAddress *object,
conForm = (Form_pg_conversion) GETSTRUCT(conTup);
schema = get_namespace_name_or_temp(conForm->connamespace);
appendStringInfoString(&buffer,
- quote_qualified_identifier(schema,
- NameStr(conForm->conname)));
+ quote_qualified_identifier(schema,
+ NameStr(conForm->conname)));
if (objname)
*objname = list_make2(schema,
pstrdup(NameStr(conForm->conname)));
@@ -3901,7 +4000,7 @@ getObjectIdentityParts(const ObjectAddress *object,
appendStringInfo(&buffer, "%s USING %s",
quote_qualified_identifier(schema,
- NameStr(opcForm->opcname)),
+ NameStr(opcForm->opcname)),
quote_identifier(NameStr(amForm->amname)));
if (objname)
*objname = list_make3(pstrdup(NameStr(amForm->amname)),
@@ -3956,7 +4055,7 @@ getObjectIdentityParts(const ObjectAddress *object,
if (objname)
{
*objname = lappend(*objname,
- psprintf("%d", amopForm->amopstrategy));
+ psprintf("%d", amopForm->amopstrategy));
*objargs = list_make2(ltype, rtype);
}
@@ -4136,7 +4235,7 @@ getObjectIdentityParts(const ObjectAddress *object,
NameStr(formParser->prsname)));
if (objname)
*objname = list_make2(schema,
- pstrdup(NameStr(formParser->prsname)));
+ pstrdup(NameStr(formParser->prsname)));
ReleaseSysCache(tup);
break;
}
@@ -4159,7 +4258,7 @@ getObjectIdentityParts(const ObjectAddress *object,
NameStr(formDict->dictname)));
if (objname)
*objname = list_make2(schema,
- pstrdup(NameStr(formDict->dictname)));
+ pstrdup(NameStr(formDict->dictname)));
ReleaseSysCache(tup);
break;
}
@@ -4182,7 +4281,7 @@ getObjectIdentityParts(const ObjectAddress *object,
NameStr(formTmpl->tmplname)));
if (objname)
*objname = list_make2(schema,
- pstrdup(NameStr(formTmpl->tmplname)));
+ pstrdup(NameStr(formTmpl->tmplname)));
ReleaseSysCache(tup);
break;
}
@@ -4510,10 +4609,10 @@ getRelationIdentity(StringInfo buffer, Oid relid, List **objname)
ArrayType *
strlist_to_textarray(List *list)
{
- ArrayType *arr;
- Datum *datums;
- int j = 0;
- ListCell *cell;
+ ArrayType *arr;
+ Datum *datums;
+ int j = 0;
+ ListCell *cell;
MemoryContext memcxt;
MemoryContext oldcxt;
@@ -4527,7 +4626,7 @@ strlist_to_textarray(List *list)
datums = palloc(sizeof(text *) * list_length(list));
foreach(cell, list)
{
- char *name = lfirst(cell);
+ char *name = lfirst(cell);
datums[j++] = CStringGetTextDatum(name);
}
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index 5f211dacde..009ac398ee 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -545,7 +545,7 @@ AggregateCreate(const char *aggName,
parameterModes, /* parameterModes */
parameterNames, /* parameterNames */
parameterDefaults, /* parameterDefaults */
- PointerGetDatum(NULL), /* trftypes */
+ PointerGetDatum(NULL), /* trftypes */
PointerGetDatum(NULL), /* proconfig */
1, /* procost */
0); /* prorows */
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index c880486c4b..902b0a7297 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -346,7 +346,7 @@ restart:
if (!OidIsValid(binary_upgrade_next_pg_enum_oid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("pg_enum OID value not set when in binary upgrade mode")));
+ errmsg("pg_enum OID value not set when in binary upgrade mode")));
/*
* Use binary-upgrade override for pg_enum.oid, if supplied. During
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index 122982951e..7765be4be4 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -1158,11 +1158,11 @@ fail:
List *
oid_array_to_list(Datum datum)
{
- ArrayType *array = DatumGetArrayTypeP(datum);
- Datum *values;
- int nelems;
- int i;
- List *result = NIL;
+ ArrayType *array = DatumGetArrayTypeP(datum);
+ Datum *values;
+ int nelems;
+ int i;
+ List *result = NIL;
deconstruct_array(array,
OIDOID,
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 32453c3bb8..c4161b7b3f 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -133,7 +133,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace, Oid ownerId)
if (!OidIsValid(binary_upgrade_next_pg_type_oid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("pg_type OID value not set when in binary upgrade mode")));
+ errmsg("pg_type OID value not set when in binary upgrade mode")));
HeapTupleSetOid(tup, binary_upgrade_next_pg_type_oid);
binary_upgrade_next_pg_type_oid = InvalidOid;
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index c99d3534ce..3652d7bf51 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -175,9 +175,9 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
/*
* Check to see whether the table needs a TOAST table.
*
- * If an update-in-place TOAST relfilenode is specified, force TOAST file
- * creation even if it seems not to need one. This handles the case
- * where the old cluster needed a TOAST table but the new cluster
+ * If an update-in-place TOAST relfilenode is specified, force TOAST
+ * file creation even if it seems not to need one. This handles the
+ * case where the old cluster needed a TOAST table but the new cluster
* would not normally create one.
*/
@@ -260,9 +260,9 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
namespaceid = PG_TOAST_NAMESPACE;
/*
- * Use binary-upgrade override for pg_type.oid, if supplied. We might
- * be in the post-schema-restore phase where we are doing ALTER TABLE
- * to create TOAST tables that didn't exist in the old cluster.
+ * Use binary-upgrade override for pg_type.oid, if supplied. We might be
+ * in the post-schema-restore phase where we are doing ALTER TABLE to
+ * create TOAST tables that didn't exist in the old cluster.
*/
if (IsBinaryUpgrade && OidIsValid(binary_upgrade_next_toast_pg_type_oid))
{
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 65e329eab0..861048f213 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -2150,6 +2150,7 @@ compute_scalar_stats(VacAttrStatsP stats,
/* We always use the default collation for statistics */
ssup.ssup_collation = DEFAULT_COLLATION_OID;
ssup.ssup_nulls_first = false;
+
/*
* For now, don't perform abbreviated key conversion, because full values
* are required for MCV slot generation. Supporting that optimization
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 3e14c536e2..8904676609 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -861,8 +861,8 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
* RLS (returns RLS_ENABLED) or not for this COPY statement.
*
* If the relation has a row security policy and we are to apply it
- * then perform a "query" copy and allow the normal query processing to
- * handle the policies.
+ * then perform a "query" copy and allow the normal query processing
+ * to handle the policies.
*
* If RLS is not enabled for this, then just fall through to the
* normal non-filtering relation handling.
@@ -877,7 +877,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
if (is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY FROM not supported with row level security."),
+ errmsg("COPY FROM not supported with row level security."),
errhint("Use direct INSERT statements instead.")));
/* Build target list */
@@ -904,7 +904,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
select->targetList = list_make1(target);
select->fromClause = list_make1(from);
- query = (Node*) select;
+ query = (Node *) select;
/* Close the handle to the relation as it is no longer needed. */
heap_close(rel, (is_from ? RowExclusiveLock : AccessShareLock));
@@ -1408,26 +1408,27 @@ BeginCopy(bool is_from,
/*
* If we were passed in a relid, make sure we got the same one back
- * after planning out the query. It's possible that it changed between
- * when we checked the policies on the table and decided to use a query
- * and now.
+ * after planning out the query. It's possible that it changed
+ * between when we checked the policies on the table and decided to
+ * use a query and now.
*/
if (queryRelId != InvalidOid)
{
- Oid relid = linitial_oid(plan->relationOids);
+ Oid relid = linitial_oid(plan->relationOids);
/*
- * There should only be one relationOid in this case, since we will
- * only get here when we have changed the command for the user from
- * a "COPY relation TO" to "COPY (SELECT * FROM relation) TO", to
- * allow row level security policies to be applied.
+ * There should only be one relationOid in this case, since we
+ * will only get here when we have changed the command for the
+ * user from a "COPY relation TO" to "COPY (SELECT * FROM
+ * relation) TO", to allow row level security policies to be
+ * applied.
*/
Assert(list_length(plan->relationOids) == 1);
if (relid != queryRelId)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("relation referenced by COPY statement has changed")));
+ errmsg("relation referenced by COPY statement has changed")));
}
/*
@@ -2439,7 +2440,7 @@ CopyFrom(CopyState cstate)
if (resultRelInfo->ri_NumIndices > 0)
recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
- estate, false, NULL,
+ estate, false, NULL,
NIL);
/* AFTER ROW INSERT Triggers */
diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c
index e8f0d793b6..41183f6ff5 100644
--- a/src/backend/commands/createas.c
+++ b/src/backend/commands/createas.c
@@ -89,7 +89,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
if (stmt->if_not_exists)
{
- Oid nspid;
+ Oid nspid;
nspid = RangeVarGetCreationNamespace(stmt->into->rel);
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index a699ce3fd2..6cbe65e88a 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -554,8 +554,8 @@ createdb(const CreatedbStmt *stmt)
* Force a checkpoint before starting the copy. This will force all dirty
* buffers, including those of unlogged tables, out to disk, to ensure
* source database is up-to-date on disk for the copy.
- * FlushDatabaseBuffers() would suffice for that, but we also want
- * to process any pending unlink requests. Otherwise, if a checkpoint
+ * FlushDatabaseBuffers() would suffice for that, but we also want to
+ * process any pending unlink requests. Otherwise, if a checkpoint
* happened while we're copying files, a file might be deleted just when
* we're about to copy it, causing the lstat() call in copydir() to fail
* with ENOENT.
@@ -841,8 +841,8 @@ dropdb(const char *dbname, bool missing_ok)
if (ReplicationSlotsCountDBSlots(db_id, &nslots, &nslots_active))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is used by a logical replication slot",
- dbname),
+ errmsg("database \"%s\" is used by a logical replication slot",
+ dbname),
errdetail_plural("There is %d slot, %d of them active.",
"There are %d slots, %d of them active.",
nslots,
diff --git a/src/backend/commands/dropcmds.c b/src/backend/commands/dropcmds.c
index 78a1bf334d..f04f4f5f31 100644
--- a/src/backend/commands/dropcmds.c
+++ b/src/backend/commands/dropcmds.c
@@ -415,7 +415,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
break;
case OBJECT_OPCLASS:
{
- List *opcname = list_copy_tail(objname, 1);
+ List *opcname = list_copy_tail(objname, 1);
if (!schema_does_not_exist_skipping(opcname, &msg, &name))
{
@@ -427,7 +427,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
break;
case OBJECT_OPFAMILY:
{
- List *opfname = list_copy_tail(objname, 1);
+ List *opfname = list_copy_tail(objname, 1);
if (!schema_does_not_exist_skipping(opfname, &msg, &name))
{
diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c
index d786c7d606..cc10c5eb1d 100644
--- a/src/backend/commands/event_trigger.c
+++ b/src/backend/commands/event_trigger.c
@@ -57,13 +57,15 @@ typedef struct EventTriggerQueryState
bool in_sql_drop;
/* table_rewrite */
- Oid table_rewrite_oid; /* InvalidOid, or set for table_rewrite event */
+ Oid table_rewrite_oid; /* InvalidOid, or set for
+ * table_rewrite event */
int table_rewrite_reason; /* AT_REWRITE reason */
/* Support for command collection */
bool commandCollectionInhibited;
CollectedCommand *currentCommand;
- List *commandList; /* list of CollectedCommand; see deparse_utility.h */
+ List *commandList; /* list of CollectedCommand; see
+ * deparse_utility.h */
struct EventTriggerQueryState *previous;
} EventTriggerQueryState;
@@ -143,7 +145,7 @@ static void AlterEventTriggerOwner_internal(Relation rel,
Oid newOwnerId);
static event_trigger_command_tag_check_result check_ddl_tag(const char *tag);
static event_trigger_command_tag_check_result check_table_rewrite_ddl_tag(
- const char *tag);
+ const char *tag);
static void error_duplicate_filter_variable(const char *defname);
static Datum filter_list_to_array(List *filterlist);
static Oid insert_event_trigger_tuple(char *trigname, char *eventname,
@@ -714,7 +716,7 @@ EventTriggerCommonSetup(Node *parsetree,
dbgtag = CreateCommandTag(parsetree);
if (event == EVT_DDLCommandStart ||
- event == EVT_DDLCommandEnd ||
+ event == EVT_DDLCommandEnd ||
event == EVT_SQLDrop)
{
if (check_ddl_tag(dbgtag) != EVENT_TRIGGER_COMMAND_TAG_OK)
@@ -1562,8 +1564,8 @@ pg_event_trigger_table_rewrite_oid(PG_FUNCTION_ARGS)
currentEventTriggerState->table_rewrite_oid == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("%s can only be called in a table_rewrite event trigger function",
- "pg_event_trigger_table_rewrite_oid()")));
+ errmsg("%s can only be called in a table_rewrite event trigger function",
+ "pg_event_trigger_table_rewrite_oid()")));
PG_RETURN_OID(currentEventTriggerState->table_rewrite_oid);
}
@@ -1583,8 +1585,8 @@ pg_event_trigger_table_rewrite_reason(PG_FUNCTION_ARGS)
currentEventTriggerState->table_rewrite_reason == 0)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("%s can only be called in a table_rewrite event trigger function",
- "pg_event_trigger_table_rewrite_reason()")));
+ errmsg("%s can only be called in a table_rewrite event trigger function",
+ "pg_event_trigger_table_rewrite_reason()")));
PG_RETURN_INT32(currentEventTriggerState->table_rewrite_reason);
}
@@ -1672,7 +1674,7 @@ EventTriggerCollectSimpleCommand(ObjectAddress address,
command->parsetree = copyObject(parsetree);
currentEventTriggerState->commandList = lappend(currentEventTriggerState->commandList,
- command);
+ command);
MemoryContextSwitchTo(oldcxt);
}
@@ -1687,13 +1689,13 @@ EventTriggerCollectSimpleCommand(ObjectAddress address,
*
* XXX -- this API isn't considering the possibility of an ALTER TABLE command
* being called reentrantly by an event trigger function. Do we need stackable
- * commands at this level? Perhaps at least we should detect the condition and
+ * commands at this level? Perhaps at least we should detect the condition and
* raise an error.
*/
void
EventTriggerAlterTableStart(Node *parsetree)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
CollectedCommand *command;
/* ignore if event trigger context not set, or collection disabled */
@@ -1744,7 +1746,7 @@ EventTriggerAlterTableRelid(Oid objectId)
void
EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
CollectedATSubcmd *newsub;
/* ignore if event trigger context not set, or collection disabled */
@@ -1808,8 +1810,8 @@ EventTriggerCollectGrant(InternalGrant *istmt)
{
MemoryContext oldcxt;
CollectedCommand *command;
- InternalGrant *icopy;
- ListCell *cell;
+ InternalGrant *icopy;
+ ListCell *cell;
/* ignore if event trigger context not set, or collection disabled */
if (!currentEventTriggerState ||
@@ -1849,9 +1851,9 @@ EventTriggerCollectGrant(InternalGrant *istmt)
*/
void
EventTriggerCollectAlterOpFam(AlterOpFamilyStmt *stmt, Oid opfamoid,
- List *operators, List *procedures)
+ List *operators, List *procedures)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
CollectedCommand *command;
/* ignore if event trigger context not set, or collection disabled */
@@ -1882,9 +1884,9 @@ EventTriggerCollectAlterOpFam(AlterOpFamilyStmt *stmt, Oid opfamoid,
*/
void
EventTriggerCollectCreateOpClass(CreateOpClassStmt *stmt, Oid opcoid,
- List *operators, List *procedures)
+ List *operators, List *procedures)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
CollectedCommand *command;
/* ignore if event trigger context not set, or collection disabled */
@@ -1918,7 +1920,7 @@ void
EventTriggerCollectAlterTSConfig(AlterTSConfigurationStmt *stmt, Oid cfgId,
Oid *dictIds, int ndicts)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
CollectedCommand *command;
/* ignore if event trigger context not set, or collection disabled */
@@ -1952,7 +1954,7 @@ EventTriggerCollectAlterTSConfig(AlterTSConfigurationStmt *stmt, Oid cfgId,
void
EventTriggerCollectAlterDefPrivs(AlterDefaultPrivilegesStmt *stmt)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
CollectedCommand *command;
/* ignore if event trigger context not set, or collection disabled */
@@ -2034,10 +2036,10 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
* object, the returned OID is Invalid. Don't return anything.
*
* One might think that a viable alternative would be to look up the
- * Oid of the existing object and run the deparse with that. But since
- * the parse tree might be different from the one that created the
- * object in the first place, we might not end up in a consistent state
- * anyway.
+ * Oid of the existing object and run the deparse with that. But
+ * since the parse tree might be different from the one that created
+ * the object in the first place, we might not end up in a consistent
+ * state anyway.
*/
if (cmd->type == SCT_Simple &&
!OidIsValid(cmd->d.simple.address.objectId))
@@ -2074,10 +2076,10 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
identity = getObjectIdentity(&addr);
/*
- * Obtain schema name, if any ("pg_temp" if a temp object).
- * If the object class is not in the supported list here,
- * we assume it's a schema-less object type, and thus
- * "schema" remains set to NULL.
+ * Obtain schema name, if any ("pg_temp" if a temp
+ * object). If the object class is not in the supported
+ * list here, we assume it's a schema-less object type,
+ * and thus "schema" remains set to NULL.
*/
if (is_objectclass_supported(addr.classId))
{
@@ -2099,10 +2101,10 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
addr.classId, addr.objectId);
schema_oid =
heap_getattr(objtup, nspAttnum,
- RelationGetDescr(catalog), &isnull);
+ RelationGetDescr(catalog), &isnull);
if (isnull)
elog(ERROR,
- "invalid null namespace in object %u/%u/%d",
+ "invalid null namespace in object %u/%u/%d",
addr.classId, addr.objectId, addr.objectSubId);
/* XXX not quite get_namespace_name_or_temp */
if (isAnyTempNamespace(schema_oid))
@@ -2149,7 +2151,7 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
values[i++] = CStringGetTextDatum(CreateCommandTag(cmd->parsetree));
/* object_type */
values[i++] = CStringGetTextDatum(stringify_adefprivs_objtype(
- cmd->d.defprivs.objtype));
+ cmd->d.defprivs.objtype));
/* schema */
nulls[i++] = true;
/* identity */
@@ -2172,7 +2174,7 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
"GRANT" : "REVOKE");
/* object_type */
values[i++] = CStringGetTextDatum(stringify_grantobjtype(
- cmd->d.grant.istmt->objtype));
+ cmd->d.grant.istmt->objtype));
/* schema */
nulls[i++] = true;
/* identity */
@@ -2230,7 +2232,7 @@ stringify_grantobjtype(GrantObjectType objtype)
return "TYPE";
default:
elog(ERROR, "unrecognized type %d", objtype);
- return "???"; /* keep compiler quiet */
+ return "???"; /* keep compiler quiet */
}
}
@@ -2257,6 +2259,6 @@ stringify_adefprivs_objtype(GrantObjectType objtype)
break;
default:
elog(ERROR, "unrecognized type %d", objtype);
- return "???"; /* keep compiler quiet */
+ return "???"; /* keep compiler quiet */
}
}
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 232f41df65..a82c6ff7b4 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -83,11 +83,11 @@ static void show_merge_append_keys(MergeAppendState *mstate, List *ancestors,
static void show_agg_keys(AggState *astate, List *ancestors,
ExplainState *es);
static void show_grouping_sets(PlanState *planstate, Agg *agg,
- List *ancestors, ExplainState *es);
+ List *ancestors, ExplainState *es);
static void show_grouping_set_keys(PlanState *planstate,
- Agg *aggnode, Sort *sortnode,
- List *context, bool useprefix,
- List *ancestors, ExplainState *es);
+ Agg *aggnode, Sort *sortnode,
+ List *context, bool useprefix,
+ List *ancestors, ExplainState *es);
static void show_group_keys(GroupState *gstate, List *ancestors,
ExplainState *es);
static void show_sort_group_keys(PlanState *planstate, const char *qlabel,
@@ -754,7 +754,7 @@ ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used)
((ModifyTable *) plan)->nominalRelation);
if (((ModifyTable *) plan)->exclRelRTI)
*rels_used = bms_add_member(*rels_used,
- ((ModifyTable *) plan)->exclRelRTI);
+ ((ModifyTable *) plan)->exclRelRTI);
break;
default:
break;
@@ -984,6 +984,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
* quite messy.
*/
RangeTblEntry *rte;
+
rte = rt_fetch(((SampleScan *) plan)->scanrelid, es->rtable);
custom_name = get_tablesample_method_name(rte->tablesample->tsmid);
pname = psprintf("Sample Scan (%s)", custom_name);
@@ -1895,8 +1896,8 @@ show_grouping_sets(PlanState *planstate, Agg *agg,
foreach(lc, agg->chain)
{
- Agg *aggnode = lfirst(lc);
- Sort *sortnode = (Sort *) aggnode->plan.lefttree;
+ Agg *aggnode = lfirst(lc);
+ Sort *sortnode = (Sort *) aggnode->plan.lefttree;
show_grouping_set_keys(planstate, aggnode, sortnode,
context, useprefix, ancestors, es);
@@ -2561,7 +2562,7 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors,
{
ExplainProperty("Conflict Resolution",
node->onConflictAction == ONCONFLICT_NOTHING ?
- "NOTHING" : "UPDATE",
+ "NOTHING" : "UPDATE",
false, es);
/*
@@ -2582,9 +2583,9 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors,
/* EXPLAIN ANALYZE display of actual outcome for each tuple proposed */
if (es->analyze && mtstate->ps.instrument)
{
- double total;
- double insert_path;
- double other_path;
+ double total;
+ double insert_path;
+ double other_path;
InstrEndLoop(mtstate->mt_plans[0]->instrument);
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index c1426dc939..3d220e9c7e 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -921,9 +921,9 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
ReleaseSysCache(languageTuple);
/*
- * Only superuser is allowed to create leakproof functions because leakproof
- * functions can see tuples which have not yet been filtered out by security
- * barrier views or row level security policies.
+ * Only superuser is allowed to create leakproof functions because
+ * leakproof functions can see tuples which have not yet been filtered out
+ * by security barrier views or row level security policies.
*/
if (isLeakProof && !superuser())
ereport(ERROR,
@@ -932,14 +932,15 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
if (transformDefElem)
{
- ListCell *lc;
+ ListCell *lc;
Assert(IsA(transformDefElem, List));
- foreach (lc, (List *) transformDefElem)
+ foreach(lc, (List *) transformDefElem)
{
- Oid typeid = typenameTypeId(NULL, lfirst(lc));
- Oid elt = get_base_element_type(typeid);
+ Oid typeid = typenameTypeId(NULL, lfirst(lc));
+ Oid elt = get_base_element_type(typeid);
+
typeid = elt ? elt : typeid;
get_transform_oid(typeid, languageOid, false);
@@ -992,13 +993,13 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
if (list_length(trftypes_list) > 0)
{
- ListCell *lc;
- Datum *arr;
- int i;
+ ListCell *lc;
+ Datum *arr;
+ int i;
arr = palloc(list_length(trftypes_list) * sizeof(Datum));
i = 0;
- foreach (lc, trftypes_list)
+ foreach(lc, trftypes_list)
arr[i++] = ObjectIdGetDatum(lfirst_oid(lc));
trftypes = construct_array(arr, list_length(trftypes_list),
OIDOID, sizeof(Oid), true, 'i');
@@ -1716,7 +1717,7 @@ check_transform_function(Form_pg_proc procstruct)
if (procstruct->proisagg)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("transform function must not be an aggregate function")));
+ errmsg("transform function must not be an aggregate function")));
if (procstruct->proiswindow)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
@@ -1867,9 +1868,9 @@ CreateTransform(CreateTransformStmt *stmt)
if (!stmt->replace)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("transform for type %s language \"%s\" already exists",
- format_type_be(typeid),
- stmt->lang)));
+ errmsg("transform for type %s language \"%s\" already exists",
+ format_type_be(typeid),
+ stmt->lang)));
MemSet(replaces, false, sizeof(replaces));
replaces[Anum_pg_transform_trffromsql - 1] = true;
@@ -1958,9 +1959,9 @@ get_transform_oid(Oid type_id, Oid lang_id, bool missing_ok)
if (!OidIsValid(oid) && !missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("transform for type %s language \"%s\" does not exist",
- format_type_be(type_id),
- get_language_name(lang_id, false))));
+ errmsg("transform for type %s language \"%s\" does not exist",
+ format_type_be(type_id),
+ get_language_name(lang_id, false))));
return oid;
}
diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c
index eb16bb31ff..5492e5985b 100644
--- a/src/backend/commands/matview.c
+++ b/src/backend/commands/matview.c
@@ -66,7 +66,7 @@ static char *make_temptable_name_n(char *tempname, int n);
static void mv_GenerateOper(StringInfo buf, Oid opoid);
static void refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
- int save_sec_context);
+ int save_sec_context);
static void refresh_by_heap_swap(Oid matviewOid, Oid OIDNewHeap, char relpersistence);
static void OpenMatViewIncrementalMaintenance(void);
diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c
index a3d840da5c..6e95ba28b9 100644
--- a/src/backend/commands/policy.c
+++ b/src/backend/commands/policy.c
@@ -45,27 +45,27 @@
#include "utils/syscache.h"
static void RangeVarCallbackForPolicy(const RangeVar *rv,
- Oid relid, Oid oldrelid, void *arg);
+ Oid relid, Oid oldrelid, void *arg);
static char parse_policy_command(const char *cmd_name);
-static ArrayType* policy_role_list_to_array(List *roles);
+static ArrayType *policy_role_list_to_array(List *roles);
/*
* Callback to RangeVarGetRelidExtended().
*
* Checks the following:
- * - the relation specified is a table.
- * - current user owns the table.
- * - the table is not a system table.
+ * - the relation specified is a table.
+ * - current user owns the table.
+ * - the table is not a system table.
*
* If any of these checks fails then an error is raised.
*/
static void
RangeVarCallbackForPolicy(const RangeVar *rv, Oid relid, Oid oldrelid,
- void *arg)
+ void *arg)
{
- HeapTuple tuple;
- Form_pg_class classform;
- char relkind;
+ HeapTuple tuple;
+ Form_pg_class classform;
+ char relkind;
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tuple))
@@ -96,8 +96,8 @@ RangeVarCallbackForPolicy(const RangeVar *rv, Oid relid, Oid oldrelid,
/*
* parse_policy_command -
- * helper function to convert full command strings to their char
- * representation.
+ * helper function to convert full command strings to their char
+ * representation.
*
* cmd_name - full string command name. Valid values are 'all', 'select',
* 'insert', 'update' and 'delete'.
@@ -106,7 +106,7 @@ RangeVarCallbackForPolicy(const RangeVar *rv, Oid relid, Oid oldrelid,
static char
parse_policy_command(const char *cmd_name)
{
- char cmd;
+ char cmd;
if (!cmd_name)
elog(ERROR, "unrecognized policy command");
@@ -129,7 +129,7 @@ parse_policy_command(const char *cmd_name)
/*
* policy_role_list_to_array
- * helper function to convert a list of RoleSpecs to an array of role ids.
+ * helper function to convert a list of RoleSpecs to an array of role ids.
*/
static ArrayType *
policy_role_list_to_array(List *roles)
@@ -156,7 +156,7 @@ policy_role_list_to_array(List *roles)
foreach(cell, roles)
{
- RoleSpec *spec = lfirst(cell);
+ RoleSpec *spec = lfirst(cell);
/*
* PUBLIC covers all roles, so it only makes sense alone.
@@ -167,7 +167,7 @@ policy_role_list_to_array(List *roles)
ereport(WARNING,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("ignoring roles specified other than public"),
- errhint("All roles are members of the public role.")));
+ errhint("All roles are members of the public role.")));
temp_array[0] = ObjectIdGetDatum(ACL_ID_PUBLIC);
num_roles = 1;
break;
@@ -193,14 +193,14 @@ policy_role_list_to_array(List *roles)
void
RelationBuildRowSecurity(Relation relation)
{
- MemoryContext rscxt;
- MemoryContext oldcxt = CurrentMemoryContext;
- RowSecurityDesc * volatile rsdesc = NULL;
+ MemoryContext rscxt;
+ MemoryContext oldcxt = CurrentMemoryContext;
+ RowSecurityDesc *volatile rsdesc = NULL;
/*
* Create a memory context to hold everything associated with this
- * relation's row security policy. This makes it easy to clean up
- * during a relcache flush.
+ * relation's row security policy. This makes it easy to clean up during
+ * a relcache flush.
*/
rscxt = AllocSetContextCreate(CacheMemoryContext,
"row security descriptor",
@@ -209,15 +209,15 @@ RelationBuildRowSecurity(Relation relation)
ALLOCSET_SMALL_MAXSIZE);
/*
- * Since rscxt lives under CacheMemoryContext, it is long-lived. Use
- * a PG_TRY block to ensure it'll get freed if we fail partway through.
+ * Since rscxt lives under CacheMemoryContext, it is long-lived. Use a
+ * PG_TRY block to ensure it'll get freed if we fail partway through.
*/
PG_TRY();
{
- Relation catalog;
- ScanKeyData skey;
- SysScanDesc sscan;
- HeapTuple tuple;
+ Relation catalog;
+ ScanKeyData skey;
+ SysScanDesc sscan;
+ HeapTuple tuple;
rsdesc = MemoryContextAllocZero(rscxt, sizeof(RowSecurityDesc));
rsdesc->rscxt = rscxt;
@@ -238,17 +238,17 @@ RelationBuildRowSecurity(Relation relation)
*/
while (HeapTupleIsValid(tuple = systable_getnext(sscan)))
{
- Datum value_datum;
- char cmd_value;
- Datum roles_datum;
- char *qual_value;
- Expr *qual_expr;
- char *with_check_value;
- Expr *with_check_qual;
- char *policy_name_value;
- Oid policy_id;
- bool isnull;
- RowSecurityPolicy *policy;
+ Datum value_datum;
+ char cmd_value;
+ Datum roles_datum;
+ char *qual_value;
+ Expr *qual_expr;
+ char *with_check_value;
+ Expr *with_check_qual;
+ char *policy_name_value;
+ Oid policy_id;
+ bool isnull;
+ RowSecurityPolicy *policy;
/*
* Note: all the pass-by-reference data we collect here is either
@@ -259,26 +259,26 @@ RelationBuildRowSecurity(Relation relation)
/* Get policy command */
value_datum = heap_getattr(tuple, Anum_pg_policy_polcmd,
- RelationGetDescr(catalog), &isnull);
+ RelationGetDescr(catalog), &isnull);
Assert(!isnull);
cmd_value = DatumGetChar(value_datum);
/* Get policy name */
value_datum = heap_getattr(tuple, Anum_pg_policy_polname,
- RelationGetDescr(catalog), &isnull);
+ RelationGetDescr(catalog), &isnull);
Assert(!isnull);
policy_name_value = NameStr(*(DatumGetName(value_datum)));
/* Get policy roles */
roles_datum = heap_getattr(tuple, Anum_pg_policy_polroles,
- RelationGetDescr(catalog), &isnull);
+ RelationGetDescr(catalog), &isnull);
/* shouldn't be null, but initdb doesn't mark it so, so check */
if (isnull)
elog(ERROR, "unexpected null value in pg_policy.polroles");
/* Get policy qual */
value_datum = heap_getattr(tuple, Anum_pg_policy_polqual,
- RelationGetDescr(catalog), &isnull);
+ RelationGetDescr(catalog), &isnull);
if (!isnull)
{
qual_value = TextDatumGetCString(value_datum);
@@ -289,7 +289,7 @@ RelationBuildRowSecurity(Relation relation)
/* Get WITH CHECK qual */
value_datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck,
- RelationGetDescr(catalog), &isnull);
+ RelationGetDescr(catalog), &isnull);
if (!isnull)
{
with_check_value = TextDatumGetCString(value_datum);
@@ -311,7 +311,7 @@ RelationBuildRowSecurity(Relation relation)
policy->qual = copyObject(qual_expr);
policy->with_check_qual = copyObject(with_check_qual);
policy->hassublinks = checkExprHasSubLink((Node *) qual_expr) ||
- checkExprHasSubLink((Node *) with_check_qual);
+ checkExprHasSubLink((Node *) with_check_qual);
rsdesc->policies = lcons(policy, rsdesc->policies);
@@ -330,15 +330,15 @@ RelationBuildRowSecurity(Relation relation)
/*
* Check if no policies were added
*
- * If no policies exist in pg_policy for this relation, then we
- * need to create a single default-deny policy. We use InvalidOid for
- * the Oid to indicate that this is the default-deny policy (we may
- * decide to ignore the default policy if an extension adds policies).
+ * If no policies exist in pg_policy for this relation, then we need
+ * to create a single default-deny policy. We use InvalidOid for the
+ * Oid to indicate that this is the default-deny policy (we may decide
+ * to ignore the default policy if an extension adds policies).
*/
if (rsdesc->policies == NIL)
{
- RowSecurityPolicy *policy;
- Datum role;
+ RowSecurityPolicy *policy;
+ Datum role;
MemoryContextSwitchTo(rscxt);
@@ -351,7 +351,7 @@ RelationBuildRowSecurity(Relation relation)
policy->roles = construct_array(&role, 1, OIDOID, sizeof(Oid), true,
'i');
policy->qual = (Expr *) makeConst(BOOLOID, -1, InvalidOid,
- sizeof(bool), BoolGetDatum(false),
+ sizeof(bool), BoolGetDatum(false),
false, true);
policy->with_check_qual = copyObject(policy->qual);
policy->hassublinks = false;
@@ -376,15 +376,15 @@ RelationBuildRowSecurity(Relation relation)
/*
* RemovePolicyById -
- * remove a policy by its OID. If a policy does not exist with the provided
- * oid, then an error is raised.
+ * remove a policy by its OID. If a policy does not exist with the provided
+ * oid, then an error is raised.
*
* policy_id - the oid of the policy.
*/
void
RemovePolicyById(Oid policy_id)
{
- Relation pg_policy_rel;
+ Relation pg_policy_rel;
SysScanDesc sscan;
ScanKeyData skey[1];
HeapTuple tuple;
@@ -435,8 +435,8 @@ RemovePolicyById(Oid policy_id)
/*
* Note that, unlike some of the other flags in pg_class, relrowsecurity
- * is not just an indication of if policies exist. When relrowsecurity
- * is set by a user, then all access to the relation must be through a
+ * is not just an indication of if policies exist. When relrowsecurity is
+ * set by a user, then all access to the relation must be through a
* policy. If no policy is defined for the relation then a default-deny
* policy is created and all records are filtered (except for queries from
* the owner).
@@ -450,31 +450,31 @@ RemovePolicyById(Oid policy_id)
/*
* CreatePolicy -
- * handles the execution of the CREATE POLICY command.
+ * handles the execution of the CREATE POLICY command.
*
* stmt - the CreatePolicyStmt that describes the policy to create.
*/
ObjectAddress
CreatePolicy(CreatePolicyStmt *stmt)
{
- Relation pg_policy_rel;
- Oid policy_id;
- Relation target_table;
- Oid table_id;
- char polcmd;
- ArrayType *role_ids;
- ParseState *qual_pstate;
- ParseState *with_check_pstate;
- RangeTblEntry *rte;
- Node *qual;
- Node *with_check_qual;
- ScanKeyData skey[2];
- SysScanDesc sscan;
- HeapTuple policy_tuple;
- Datum values[Natts_pg_policy];
- bool isnull[Natts_pg_policy];
- ObjectAddress target;
- ObjectAddress myself;
+ Relation pg_policy_rel;
+ Oid policy_id;
+ Relation target_table;
+ Oid table_id;
+ char polcmd;
+ ArrayType *role_ids;
+ ParseState *qual_pstate;
+ ParseState *with_check_pstate;
+ RangeTblEntry *rte;
+ Node *qual;
+ Node *with_check_qual;
+ ScanKeyData skey[2];
+ SysScanDesc sscan;
+ HeapTuple policy_tuple;
+ Datum values[Natts_pg_policy];
+ bool isnull[Natts_pg_policy];
+ ObjectAddress target;
+ ObjectAddress myself;
/* Parse command */
polcmd = parse_policy_command(stmt->cmd);
@@ -506,8 +506,8 @@ CreatePolicy(CreatePolicyStmt *stmt)
with_check_pstate = make_parsestate(NULL);
/* zero-clear */
- memset(values, 0, sizeof(values));
- memset(isnull, 0, sizeof(isnull));
+ memset(values, 0, sizeof(values));
+ memset(isnull, 0, sizeof(isnull));
/* Get id of table. Also handles permissions checks. */
table_id = RangeVarGetRelidExtended(stmt->table, AccessExclusiveLock,
@@ -515,7 +515,7 @@ CreatePolicy(CreatePolicyStmt *stmt)
RangeVarCallbackForPolicy,
(void *) stmt);
- /* Open target_table to build quals. No lock is necessary.*/
+ /* Open target_table to build quals. No lock is necessary. */
target_table = relation_open(table_id, NoLock);
/* Add for the regular security quals */
@@ -534,9 +534,9 @@ CreatePolicy(CreatePolicyStmt *stmt)
"POLICY");
with_check_qual = transformWhereClause(with_check_pstate,
- copyObject(stmt->with_check),
- EXPR_KIND_WHERE,
- "POLICY");
+ copyObject(stmt->with_check),
+ EXPR_KIND_WHERE,
+ "POLICY");
/* Open pg_policy catalog */
pg_policy_rel = heap_open(PolicyRelationId, RowExclusiveLock);
@@ -568,7 +568,7 @@ CreatePolicy(CreatePolicyStmt *stmt)
values[Anum_pg_policy_polrelid - 1] = ObjectIdGetDatum(table_id);
values[Anum_pg_policy_polname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->policy_name));
+ CStringGetDatum(stmt->policy_name));
values[Anum_pg_policy_polcmd - 1] = CharGetDatum(polcmd);
values[Anum_pg_policy_polroles - 1] = PointerGetDatum(role_ids);
@@ -625,34 +625,34 @@ CreatePolicy(CreatePolicyStmt *stmt)
/*
* AlterPolicy -
- * handles the execution of the ALTER POLICY command.
+ * handles the execution of the ALTER POLICY command.
*
* stmt - the AlterPolicyStmt that describes the policy and how to alter it.
*/
ObjectAddress
AlterPolicy(AlterPolicyStmt *stmt)
{
- Relation pg_policy_rel;
- Oid policy_id;
- Relation target_table;
- Oid table_id;
- ArrayType *role_ids = NULL;
- List *qual_parse_rtable = NIL;
- List *with_check_parse_rtable = NIL;
- Node *qual = NULL;
- Node *with_check_qual = NULL;
- ScanKeyData skey[2];
- SysScanDesc sscan;
- HeapTuple policy_tuple;
- HeapTuple new_tuple;
- Datum values[Natts_pg_policy];
- bool isnull[Natts_pg_policy];
- bool replaces[Natts_pg_policy];
- ObjectAddress target;
- ObjectAddress myself;
- Datum cmd_datum;
- char polcmd;
- bool polcmd_isnull;
+ Relation pg_policy_rel;
+ Oid policy_id;
+ Relation target_table;
+ Oid table_id;
+ ArrayType *role_ids = NULL;
+ List *qual_parse_rtable = NIL;
+ List *with_check_parse_rtable = NIL;
+ Node *qual = NULL;
+ Node *with_check_qual = NULL;
+ ScanKeyData skey[2];
+ SysScanDesc sscan;
+ HeapTuple policy_tuple;
+ HeapTuple new_tuple;
+ Datum values[Natts_pg_policy];
+ bool isnull[Natts_pg_policy];
+ bool replaces[Natts_pg_policy];
+ ObjectAddress target;
+ ObjectAddress myself;
+ Datum cmd_datum;
+ char polcmd;
+ bool polcmd_isnull;
/* Parse role_ids */
if (stmt->roles != NULL)
@@ -669,8 +669,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
/* Parse the using policy clause */
if (stmt->qual)
{
- RangeTblEntry *rte;
- ParseState *qual_pstate = make_parsestate(NULL);
+ RangeTblEntry *rte;
+ ParseState *qual_pstate = make_parsestate(NULL);
rte = addRangeTableEntryForRelation(qual_pstate, target_table,
NULL, false, false);
@@ -688,8 +688,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
/* Parse the with-check policy clause */
if (stmt->with_check)
{
- RangeTblEntry *rte;
- ParseState *with_check_pstate = make_parsestate(NULL);
+ RangeTblEntry *rte;
+ ParseState *with_check_pstate = make_parsestate(NULL);
rte = addRangeTableEntryForRelation(with_check_pstate, target_table,
NULL, false, false);
@@ -706,9 +706,9 @@ AlterPolicy(AlterPolicyStmt *stmt)
}
/* zero-clear */
- memset(values, 0, sizeof(values));
+ memset(values, 0, sizeof(values));
memset(replaces, 0, sizeof(replaces));
- memset(isnull, 0, sizeof(isnull));
+ memset(isnull, 0, sizeof(isnull));
/* Find policy to update. */
pg_policy_rel = heap_open(PolicyRelationId, RowExclusiveLock);
@@ -756,8 +756,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
errmsg("only USING expression allowed for SELECT, DELETE")));
/*
- * If the command is INSERT then WITH CHECK should be the only
- * expression provided.
+ * If the command is INSERT then WITH CHECK should be the only expression
+ * provided.
*/
if ((polcmd == ACL_INSERT_CHR)
&& stmt->qual != NULL)
@@ -829,19 +829,19 @@ AlterPolicy(AlterPolicyStmt *stmt)
/*
* rename_policy -
- * change the name of a policy on a relation
+ * change the name of a policy on a relation
*/
ObjectAddress
rename_policy(RenameStmt *stmt)
{
- Relation pg_policy_rel;
- Relation target_table;
- Oid table_id;
- Oid opoloid;
- ScanKeyData skey[2];
- SysScanDesc sscan;
- HeapTuple policy_tuple;
- ObjectAddress address;
+ Relation pg_policy_rel;
+ Relation target_table;
+ Oid table_id;
+ Oid opoloid;
+ ScanKeyData skey[2];
+ SysScanDesc sscan;
+ HeapTuple policy_tuple;
+ ObjectAddress address;
/* Get id of table. Also handles permissions checks. */
table_id = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
@@ -875,7 +875,7 @@ rename_policy(RenameStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("policy \"%s\" for table \"%s\" already exists",
- stmt->newname, RelationGetRelationName(target_table))));
+ stmt->newname, RelationGetRelationName(target_table))));
systable_endscan(sscan);
@@ -903,7 +903,7 @@ rename_policy(RenameStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("policy \"%s\" for table \"%s\" does not exist",
- stmt->subname, RelationGetRelationName(target_table))));
+ stmt->subname, RelationGetRelationName(target_table))));
opoloid = HeapTupleGetOid(policy_tuple);
@@ -923,9 +923,9 @@ rename_policy(RenameStmt *stmt)
ObjectAddressSet(address, PolicyRelationId, opoloid);
/*
- * Invalidate relation's relcache entry so that other backends (and
- * this one too!) are sent SI message to make them rebuild relcache
- * entries. (Ideally this should happen automatically...)
+ * Invalidate relation's relcache entry so that other backends (and this
+ * one too!) are sent SI message to make them rebuild relcache entries.
+ * (Ideally this should happen automatically...)
*/
CacheInvalidateRelcache(target_table);
@@ -946,11 +946,11 @@ rename_policy(RenameStmt *stmt)
Oid
get_relation_policy_oid(Oid relid, const char *policy_name, bool missing_ok)
{
- Relation pg_policy_rel;
- ScanKeyData skey[2];
- SysScanDesc sscan;
- HeapTuple policy_tuple;
- Oid policy_oid;
+ Relation pg_policy_rel;
+ ScanKeyData skey[2];
+ SysScanDesc sscan;
+ HeapTuple policy_tuple;
+ Oid policy_oid;
pg_policy_rel = heap_open(PolicyRelationId, AccessShareLock);
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 5a7beff7d5..01e8612145 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -44,7 +44,7 @@ static void AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerI
Oid
CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
{
- const char *schemaName = stmt->schemaname;
+ const char *schemaName = stmt->schemaname;
Oid namespaceId;
OverrideSearchPath *overridePath;
List *parsetree_list;
@@ -68,7 +68,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
/* fill schema name with the user name if not specified */
if (!schemaName)
{
- HeapTuple tuple;
+ HeapTuple tuple;
tuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(owner_uid));
if (!HeapTupleIsValid(tuple))
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index bb85cb9f13..9c1037fe53 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -566,8 +566,8 @@ nextval_internal(Oid relid)
PreventCommandIfReadOnly("nextval()");
/*
- * Forbid this during parallel operation because, to make it work,
- * the cooperating backends would need to share the backend-local cached
+ * Forbid this during parallel operation because, to make it work, the
+ * cooperating backends would need to share the backend-local cached
* sequence information. Currently, we don't support that.
*/
PreventCommandIfParallelMode("nextval()");
@@ -702,10 +702,10 @@ nextval_internal(Oid relid)
/*
* If something needs to be WAL logged, acquire an xid, so this
- * transaction's commit will trigger a WAL flush and wait for
- * syncrep. It's sufficient to ensure the toplevel transaction has an xid,
- * no need to assign xids subxacts, that'll already trigger an appropriate
- * wait. (Have to do that here, so we're outside the critical section)
+ * transaction's commit will trigger a WAL flush and wait for syncrep.
+ * It's sufficient to ensure the toplevel transaction has an xid, no need
+ * to assign xids subxacts, that'll already trigger an appropriate wait.
+ * (Have to do that here, so we're outside the critical section)
*/
if (logit && RelationNeedsWAL(seqrel))
GetTopTransactionId();
@@ -870,8 +870,8 @@ do_setval(Oid relid, int64 next, bool iscalled)
PreventCommandIfReadOnly("setval()");
/*
- * Forbid this during parallel operation because, to make it work,
- * the cooperating backends would need to share the backend-local cached
+ * Forbid this during parallel operation because, to make it work, the
+ * cooperating backends would need to share the backend-local cached
* sequence information. Currently, we don't support that.
*/
PreventCommandIfParallelMode("setval()");
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 5114e6f1a4..84dbee0c41 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -306,14 +306,14 @@ static void createForeignKeyTriggers(Relation rel, Oid refRelOid,
Constraint *fkconstraint,
Oid constraintOid, Oid indexOid);
static void ATController(AlterTableStmt *parsetree,
- Relation rel, List *cmds, bool recurse, LOCKMODE lockmode);
+ Relation rel, List *cmds, bool recurse, LOCKMODE lockmode);
static void ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
bool recurse, bool recursing, LOCKMODE lockmode);
static void ATRewriteCatalogs(List **wqueue, LOCKMODE lockmode);
static void ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
AlterTableCmd *cmd, LOCKMODE lockmode);
static void ATRewriteTables(AlterTableStmt *parsetree,
- List **wqueue, LOCKMODE lockmode);
+ List **wqueue, LOCKMODE lockmode);
static void ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode);
static AlteredTableInfo *ATGetQueueEntry(List **wqueue, Relation rel);
static void ATSimplePermissions(Relation rel, int allowed_targets);
@@ -631,7 +631,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
cooked = (CookedConstraint *) palloc(sizeof(CookedConstraint));
cooked->contype = CONSTR_DEFAULT;
- cooked->conoid = InvalidOid; /* until created */
+ cooked->conoid = InvalidOid; /* until created */
cooked->name = NULL;
cooked->attnum = attnum;
cooked->expr = colDef->cooked_default;
@@ -1751,7 +1751,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
cooked = (CookedConstraint *) palloc(sizeof(CookedConstraint));
cooked->contype = CONSTR_CHECK;
- cooked->conoid = InvalidOid; /* until created */
+ cooked->conoid = InvalidOid; /* until created */
cooked->name = pstrdup(name);
cooked->attnum = 0; /* not used for constraints */
cooked->expr = expr;
@@ -1781,7 +1781,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
*/
if (inhSchema != NIL)
{
- int schema_attno = 0;
+ int schema_attno = 0;
foreach(entry, schema)
{
@@ -1809,14 +1809,14 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
* Yes, try to merge the two column definitions. They must
* have the same type, typmod, and collation.
*/
- if (exist_attno == schema_attno)
+ if (exist_attno == schema_attno)
ereport(NOTICE,
- (errmsg("merging column \"%s\" with inherited definition",
- attributeName)));
+ (errmsg("merging column \"%s\" with inherited definition",
+ attributeName)));
else
ereport(NOTICE,
- (errmsg("moving and merging column \"%s\" with inherited definition", attributeName),
- errdetail("User-specified column moved to the position of the inherited column.")));
+ (errmsg("moving and merging column \"%s\" with inherited definition", attributeName),
+ errdetail("User-specified column moved to the position of the inherited column.")));
def = (ColumnDef *) list_nth(inhSchema, exist_attno - 1);
typenameTypeIdAndMod(NULL, def->typeName, &defTypeId, &deftypmod);
typenameTypeIdAndMod(NULL, newdef->typeName, &newTypeId, &newtypmod);
@@ -3496,7 +3496,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
break;
case AT_ReAddIndex: /* ADD INDEX */
address = ATExecAddIndex(tab, rel, (IndexStmt *) cmd->def, true,
- lockmode);
+ lockmode);
break;
case AT_AddConstraint: /* ADD CONSTRAINT */
address =
@@ -3803,7 +3803,7 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode)
* And fire it only once.
*/
if (parsetree)
- EventTriggerTableRewrite((Node *)parsetree,
+ EventTriggerTableRewrite((Node *) parsetree,
tab->relid,
tab->rewrite);
@@ -5960,7 +5960,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
true, /* update pg_index */
true, /* remove old dependencies */
allowSystemTableMods,
- false); /* is_internal */
+ false); /* is_internal */
index_close(indexRel, NoLock);
@@ -6906,7 +6906,7 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse,
HeapTupleGetOid(tuple));
}
else
- address = InvalidObjectAddress; /* already validated */
+ address = InvalidObjectAddress; /* already validated */
systable_endscan(scan);
@@ -7866,11 +7866,12 @@ ATPrepAlterColumnType(List **wqueue,
{
/*
* Set up an expression to transform the old data value to the new
- * type. If a USING option was given, use the expression as transformed
- * by transformAlterTableStmt, else just take the old value and try to
- * coerce it. We do this first so that type incompatibility can be
- * detected before we waste effort, and because we need the expression
- * to be parsed against the original table row type.
+ * type. If a USING option was given, use the expression as
+ * transformed by transformAlterTableStmt, else just take the old
+ * value and try to coerce it. We do this first so that type
+ * incompatibility can be detected before we waste effort, and because
+ * we need the expression to be parsed against the original table row
+ * type.
*/
if (!transform)
{
@@ -8221,8 +8222,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* specified in the policy's USING or WITH CHECK qual
* expressions. It might be possible to rewrite and recheck
* the policy expression, but punt for now. It's certainly
- * easy enough to remove and recreate the policy; still,
- * FIXME someday.
+ * easy enough to remove and recreate the policy; still, FIXME
+ * someday.
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -9701,9 +9702,9 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt)
!ConditionalLockRelationOid(relOid, AccessExclusiveLock))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("aborting because lock on relation \"%s\".\"%s\" is not available",
- get_namespace_name(relForm->relnamespace),
- NameStr(relForm->relname))));
+ errmsg("aborting because lock on relation \"%s\".\"%s\" is not available",
+ get_namespace_name(relForm->relnamespace),
+ NameStr(relForm->relname))));
else
LockRelationOid(relOid, AccessExclusiveLock);
@@ -10923,9 +10924,9 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode
static void
ATExecEnableRowSecurity(Relation rel)
{
- Relation pg_class;
- Oid relid;
- HeapTuple tuple;
+ Relation pg_class;
+ Oid relid;
+ HeapTuple tuple;
relid = RelationGetRelid(rel);
@@ -10949,9 +10950,9 @@ ATExecEnableRowSecurity(Relation rel)
static void
ATExecDisableRowSecurity(Relation rel)
{
- Relation pg_class;
- Oid relid;
- HeapTuple tuple;
+ Relation pg_class;
+ Oid relid;
+ HeapTuple tuple;
relid = RelationGetRelid(rel);
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index d9b9587f1e..31091ba7f3 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -4329,7 +4329,7 @@ AfterTriggerEndSubXact(bool isCommit)
static void
AfterTriggerEnlargeQueryState(void)
{
- int init_depth = afterTriggers.maxquerydepth;
+ int init_depth = afterTriggers.maxquerydepth;
Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
@@ -4396,7 +4396,7 @@ SetConstraintStateCreate(int numalloc)
state = (SetConstraintState)
MemoryContextAllocZero(TopTransactionContext,
offsetof(SetConstraintStateData, trigstates) +
- numalloc * sizeof(SetConstraintTriggerData));
+ numalloc * sizeof(SetConstraintTriggerData));
state->numalloc = numalloc;
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index ab13be225c..de91353891 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -575,13 +575,13 @@ DefineType(List *names, List *parameters)
if (typmodinOid && func_volatile(typmodinOid) == PROVOLATILE_VOLATILE)
ereport(WARNING,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type modifier input function %s should not be volatile",
- NameListToString(typmodinName))));
+ errmsg("type modifier input function %s should not be volatile",
+ NameListToString(typmodinName))));
if (typmodoutOid && func_volatile(typmodoutOid) == PROVOLATILE_VOLATILE)
ereport(WARNING,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type modifier output function %s should not be volatile",
- NameListToString(typmodoutName))));
+ errmsg("type modifier output function %s should not be volatile",
+ NameListToString(typmodoutName))));
/*
* OK, we're done checking, time to make the type. We must assign the
@@ -643,32 +643,32 @@ DefineType(List *names, List *parameters)
array_type, /* type name */
typeNamespace, /* namespace */
InvalidOid, /* relation oid (n/a here) */
- 0, /* relation kind (ditto) */
- GetUserId(), /* owner's ID */
- -1, /* internal size (always varlena) */
+ 0, /* relation kind (ditto) */
+ GetUserId(), /* owner's ID */
+ -1, /* internal size (always varlena) */
TYPTYPE_BASE, /* type-type (base type) */
TYPCATEGORY_ARRAY, /* type-category (array) */
- false, /* array types are never preferred */
+ false, /* array types are never preferred */
delimiter, /* array element delimiter */
F_ARRAY_IN, /* input procedure */
- F_ARRAY_OUT, /* output procedure */
+ F_ARRAY_OUT, /* output procedure */
F_ARRAY_RECV, /* receive procedure */
F_ARRAY_SEND, /* send procedure */
- typmodinOid, /* typmodin procedure */
+ typmodinOid, /* typmodin procedure */
typmodoutOid, /* typmodout procedure */
F_ARRAY_TYPANALYZE, /* analyze procedure */
- typoid, /* element type ID */
- true, /* yes this is an array type */
+ typoid, /* element type ID */
+ true, /* yes this is an array type */
InvalidOid, /* no further array type */
InvalidOid, /* base type ID */
- NULL, /* never a default type value */
- NULL, /* binary default isn't sent either */
- false, /* never passed by value */
+ NULL, /* never a default type value */
+ NULL, /* binary default isn't sent either */
+ false, /* never passed by value */
alignment, /* see above */
- 'x', /* ARRAY is always toastable */
- -1, /* typMod (Domains only) */
- 0, /* Array dimensions of typbasetype */
- false, /* Type NOT NULL */
+ 'x', /* ARRAY is always toastable */
+ -1, /* typMod (Domains only) */
+ 0, /* Array dimensions of typbasetype */
+ false, /* Type NOT NULL */
collation); /* type's collation */
pfree(array_type);
@@ -1616,7 +1616,7 @@ makeRangeConstructors(const char *name, Oid namespace,
PointerGetDatum(NULL), /* parameterModes */
PointerGetDatum(NULL), /* parameterNames */
NIL, /* parameterDefaults */
- PointerGetDatum(NULL), /* trftypes */
+ PointerGetDatum(NULL), /* trftypes */
PointerGetDatum(NULL), /* proconfig */
1.0, /* procost */
0.0); /* prorows */
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 456c27ebe0..3b381c5835 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -87,7 +87,8 @@ CreateRole(CreateRoleStmt *stmt)
bool createdb = false; /* Can the user create databases? */
bool canlogin = false; /* Can this user login? */
bool isreplication = false; /* Is this a replication role? */
- bool bypassrls = false; /* Is this a row security enabled role? */
+ bool bypassrls = false; /* Is this a row security enabled
+ * role? */
int connlimit = -1; /* maximum connections allowed */
List *addroleto = NIL; /* roles to make this a member of */
List *rolemembers = NIL; /* roles to be members of this role */
@@ -300,7 +301,7 @@ CreateRole(CreateRoleStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to change bypassrls attribute.")));
+ errmsg("must be superuser to change bypassrls attribute.")));
}
else
{
@@ -681,7 +682,7 @@ AlterRole(AlterRoleStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to change bypassrls attribute")));
+ errmsg("must be superuser to change bypassrls attribute")));
}
else if (!have_createrole_privilege())
{
@@ -721,11 +722,11 @@ AlterRole(AlterRoleStmt *stmt)
* Call the password checking hook if there is one defined
*/
if (check_password_hook && password)
- (*check_password_hook)(rolename ,
- password,
- isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
- validUntil_datum,
- validUntil_null);
+ (*check_password_hook) (rolename,
+ password,
+ isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
+ validUntil_datum,
+ validUntil_null);
/*
* Build an updated tuple, perusing the information just obtained
@@ -1358,8 +1359,8 @@ roleSpecsToIds(List *memberNames)
foreach(l, memberNames)
{
- Node *rolespec = (Node *) lfirst(l);
- Oid roleid;
+ Node *rolespec = (Node *) lfirst(l);
+ Oid roleid;
roleid = get_rolespec_oid(rolespec, false);
result = lappend_oid(result, roleid);
@@ -1455,7 +1456,7 @@ AddRoleMems(const char *rolename, Oid roleid,
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
(errmsg("role \"%s\" is a member of role \"%s\"",
- rolename, get_rolespec_name((Node *) memberRole)))));
+ rolename, get_rolespec_name((Node *) memberRole)))));
/*
* Check if entry for this role/member already exists; if so, give
@@ -1470,7 +1471,7 @@ AddRoleMems(const char *rolename, Oid roleid,
{
ereport(NOTICE,
(errmsg("role \"%s\" is already a member of role \"%s\"",
- get_rolespec_name((Node *) memberRole), rolename)));
+ get_rolespec_name((Node *) memberRole), rolename)));
ReleaseSysCache(authmem_tuple);
continue;
}
@@ -1581,7 +1582,7 @@ DelRoleMems(const char *rolename, Oid roleid,
{
ereport(WARNING,
(errmsg("role \"%s\" is not a member of role \"%s\"",
- get_rolespec_name((Node *) memberRole), rolename)));
+ get_rolespec_name((Node *) memberRole), rolename)));
continue;
}
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 34ca325a9b..baf66f1e6c 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -83,7 +83,7 @@ static bool vacuum_rel(Oid relid, RangeVar *relation, int options,
void
ExecVacuum(VacuumStmt *vacstmt, bool isTopLevel)
{
- VacuumParams params;
+ VacuumParams params;
/* sanity checks on options */
Assert(vacstmt->options & (VACOPT_VACUUM | VACOPT_ANALYZE));
@@ -530,8 +530,8 @@ vacuum_set_xid_limits(Relation rel,
/*
* Compute the multixact age for which freezing is urgent. This is
- * normally autovacuum_multixact_freeze_max_age, but may be less if we
- * are short of multixact member space.
+ * normally autovacuum_multixact_freeze_max_age, but may be less if we are
+ * short of multixact member space.
*/
effective_multixact_freeze_max_age = MultiXactMemberFreezeThreshold();
@@ -1134,9 +1134,8 @@ vac_truncate_clog(TransactionId frozenXID,
return;
/*
- * Truncate CLOG and CommitTs to the oldest computed value.
- * Note we don't truncate multixacts; that will be done by the next
- * checkpoint.
+ * Truncate CLOG and CommitTs to the oldest computed value. Note we don't
+ * truncate multixacts; that will be done by the next checkpoint.
*/
TruncateCLOG(frozenXID);
TruncateCommitTs(frozenXID, true);
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index c94575c81e..a01cfb4c04 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -105,7 +105,7 @@ typedef struct LVRelStats
BlockNumber old_rel_pages; /* previous value of pg_class.relpages */
BlockNumber rel_pages; /* total number of pages */
BlockNumber scanned_pages; /* number of pages we examined */
- BlockNumber pinskipped_pages; /* # of pages we skipped due to a pin */
+ BlockNumber pinskipped_pages; /* # of pages we skipped due to a pin */
double scanned_tuples; /* counts only tuples on scanned pages */
double old_rel_tuples; /* previous value of pg_class.reltuples */
double new_rel_tuples; /* new estimated total # of tuples */
@@ -336,7 +336,8 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
TimestampDifferenceExceeds(starttime, endtime,
params->log_min_duration))
{
- StringInfoData buf;
+ StringInfoData buf;
+
TimestampDifference(starttime, endtime, &secs, &usecs);
read_rate = 0;
@@ -369,7 +370,7 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
vacrelstats->new_rel_tuples,
vacrelstats->new_dead_tuples);
appendStringInfo(&buf,
- _("buffer usage: %d hits, %d misses, %d dirtied\n"),
+ _("buffer usage: %d hits, %d misses, %d dirtied\n"),
VacuumPageHit,
VacuumPageMiss,
VacuumPageDirty);
@@ -454,7 +455,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
BlockNumber next_not_all_visible_block;
bool skipping_all_visible_blocks;
xl_heap_freeze_tuple *frozen;
- StringInfoData buf;
+ StringInfoData buf;
pg_rusage_init(&ru0);
@@ -1784,7 +1785,7 @@ static bool
heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid)
{
Page page = BufferGetPage(buf);
- BlockNumber blockno = BufferGetBlockNumber(buf);
+ BlockNumber blockno = BufferGetBlockNumber(buf);
OffsetNumber offnum,
maxoff;
bool all_visible = true;
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 4948a265cb..04073d3f9f 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -405,10 +405,10 @@ ExecSupportsMarkRestore(Path *pathnode)
* that does, we presently come here only for ResultPath nodes,
* which represent Result plans without a child plan. So there is
* nothing to recurse to and we can just say "false". (This means
- * that Result's support for mark/restore is in fact dead code.
- * We keep it since it's not much code, and someday the planner
- * might be smart enough to use it. That would require making
- * this function smarter too, of course.)
+ * that Result's support for mark/restore is in fact dead code. We
+ * keep it since it's not much code, and someday the planner might
+ * be smart enough to use it. That would require making this
+ * function smarter too, of course.)
*/
Assert(IsA(pathnode, ResultPath));
return false;
diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c
index ee1cd19f96..bf385086c6 100644
--- a/src/backend/executor/execIndexing.c
+++ b/src/backend/executor/execIndexing.c
@@ -78,9 +78,9 @@
* another in-progress tuple, it has two options:
*
* 1. back out the speculatively inserted tuple, then wait for the other
- * transaction, and retry. Or,
+ * transaction, and retry. Or,
* 2. wait for the other transaction, with the speculatively inserted tuple
- * still in place.
+ * still in place.
*
* If two backends insert at the same time, and both try to wait for each
* other, they will deadlock. So option 2 is not acceptable. Option 1
@@ -428,7 +428,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
indexRelation, indexInfo,
tupleid, values, isnull,
estate, false,
- waitMode, violationOK, NULL);
+ waitMode, violationOK, NULL);
}
if ((checkUnique == UNIQUE_CHECK_PARTIAL ||
@@ -538,7 +538,7 @@ ExecCheckIndexConstraints(TupleTableSlot *slot,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("ON CONFLICT does not support deferred unique constraints/exclusion constraints as arbiters"),
errtableconstraint(heapRelation,
- RelationGetRelationName(indexRelation))));
+ RelationGetRelationName(indexRelation))));
checkedIndex = true;
@@ -578,7 +578,7 @@ ExecCheckIndexConstraints(TupleTableSlot *slot,
satisfiesConstraint =
check_exclusion_or_unique_constraint(heapRelation, indexRelation,
indexInfo, &invalidItemPtr,
- values, isnull, estate, false,
+ values, isnull, estate, false,
CEOUC_WAIT, true,
conflictTid);
if (!satisfiesConstraint)
@@ -814,9 +814,9 @@ retry:
errmsg("could not create exclusion constraint \"%s\"",
RelationGetRelationName(index)),
error_new && error_existing ?
- errdetail("Key %s conflicts with key %s.",
- error_new, error_existing) :
- errdetail("Key conflicts exist."),
+ errdetail("Key %s conflicts with key %s.",
+ error_new, error_existing) :
+ errdetail("Key conflicts exist."),
errtableconstraint(heap,
RelationGetRelationName(index))));
else
@@ -825,9 +825,9 @@ retry:
errmsg("conflicting key value violates exclusion constraint \"%s\"",
RelationGetRelationName(index)),
error_new && error_existing ?
- errdetail("Key %s conflicts with existing key %s.",
- error_new, error_existing) :
- errdetail("Key conflicts with existing key."),
+ errdetail("Key %s conflicts with existing key %s.",
+ error_new, error_existing) :
+ errdetail("Key conflicts with existing key."),
errtableconstraint(heap,
RelationGetRelationName(index))));
}
@@ -838,8 +838,8 @@ retry:
* Ordinarily, at this point the search should have found the originally
* inserted tuple (if any), unless we exited the loop early because of
* conflict. However, it is possible to define exclusion constraints for
- * which that wouldn't be true --- for instance, if the operator is <>.
- * So we no longer complain if found_self is still false.
+ * which that wouldn't be true --- for instance, if the operator is <>. So
+ * we no longer complain if found_self is still false.
*/
econtext->ecxt_scantuple = save_scantuple;
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 7c29b4b42a..a1561ce0cc 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -153,16 +153,16 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
* If the transaction is read-only, we need to check if any writes are
* planned to non-temporary tables. EXPLAIN is considered read-only.
*
- * Don't allow writes in parallel mode. Supporting UPDATE and DELETE would
- * require (a) storing the combocid hash in shared memory, rather than
- * synchronizing it just once at the start of parallelism, and (b) an
+ * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
+ * would require (a) storing the combocid hash in shared memory, rather
+ * than synchronizing it just once at the start of parallelism, and (b) an
* alternative to heap_update()'s reliance on xmax for mutual exclusion.
* INSERT may have no such troubles, but we forbid it to simplify the
* checks.
*
* We have lower-level defenses in CommandCounterIncrement and elsewhere
- * against performing unsafe operations in parallel mode, but this gives
- * a more user-friendly error message.
+ * against performing unsafe operations in parallel mode, but this gives a
+ * more user-friendly error message.
*/
if ((XactReadOnly || IsInParallelMode()) &&
!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
@@ -670,14 +670,14 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
*/
if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
userid,
- rte->insertedCols,
- ACL_INSERT))
+ rte->insertedCols,
+ ACL_INSERT))
return false;
if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
userid,
- rte->updatedCols,
- ACL_UPDATE))
+ rte->updatedCols,
+ ACL_UPDATE))
return false;
}
return true;
@@ -695,10 +695,9 @@ ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
int col = -1;
/*
- * When the query doesn't explicitly update any columns, allow the
- * query if we have permission on any column of the rel. This is
- * to handle SELECT FOR UPDATE as well as possible corner cases in
- * UPDATE.
+ * When the query doesn't explicitly update any columns, allow the query
+ * if we have permission on any column of the rel. This is to handle
+ * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
*/
if (bms_is_empty(modifiedCols))
{
@@ -742,8 +741,8 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
ListCell *l;
/*
- * Fail if write permissions are requested in parallel mode for
- * table (temp or non-temp), otherwise fail for any non-temp table.
+ * Fail if write permissions are requested in parallel mode for table
+ * (temp or non-temp), otherwise fail for any non-temp table.
*/
foreach(l, plannedstmt->rtable)
{
@@ -1665,9 +1664,9 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
Relation rel = resultRelInfo->ri_RelationDesc;
TupleDesc tupdesc = RelationGetDescr(rel);
TupleConstr *constr = tupdesc->constr;
- Bitmapset *modifiedCols;
- Bitmapset *insertedCols;
- Bitmapset *updatedCols;
+ Bitmapset *modifiedCols;
+ Bitmapset *insertedCols;
+ Bitmapset *updatedCols;
Assert(constr);
@@ -1722,7 +1721,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
RelationGetRelationName(rel), failed),
- val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
+ val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
errtableconstraint(rel, failed)));
}
}
@@ -1773,11 +1772,11 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
/*
* WITH CHECK OPTION checks are intended to ensure that the new tuple
* is visible (in the case of a view) or that it passes the
- * 'with-check' policy (in the case of row security).
- * If the qual evaluates to NULL or FALSE, then the new tuple won't be
- * included in the view or doesn't pass the 'with-check' policy for the
- * table. We need ExecQual to return FALSE for NULL to handle the view
- * case (the opposite of what we do above for CHECK constraints).
+ * 'with-check' policy (in the case of row security). If the qual
+ * evaluates to NULL or FALSE, then the new tuple won't be included in
+ * the view or doesn't pass the 'with-check' policy for the table. We
+ * need ExecQual to return FALSE for NULL to handle the view case (the
+ * opposite of what we do above for CHECK constraints).
*/
if (!ExecQual((List *) wcoExpr, econtext, false))
{
@@ -1788,14 +1787,15 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
switch (wco->kind)
{
- /*
- * For WITH CHECK OPTIONs coming from views, we might be able to
- * provide the details on the row, depending on the permissions
- * on the relation (that is, if the user could view it directly
- * anyway). For RLS violations, we don't include the data since
- * we don't know if the user should be able to view the tuple as
- * as that depends on the USING policy.
- */
+ /*
+ * For WITH CHECK OPTIONs coming from views, we might be
+ * able to provide the details on the row, depending on
+ * the permissions on the relation (that is, if the user
+ * could view it directly anyway). For RLS violations, we
+ * don't include the data since we don't know if the user
+ * should be able to view the tuple as as that depends on
+ * the USING policy.
+ */
case WCO_VIEW_CHECK:
insertedCols = GetInsertedColumns(resultRelInfo, estate);
updatedCols = GetUpdatedColumns(resultRelInfo, estate);
@@ -1808,8 +1808,8 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
- errmsg("new row violates WITH CHECK OPTION for \"%s\"",
- wco->relname),
+ errmsg("new row violates WITH CHECK OPTION for \"%s\"",
+ wco->relname),
val_desc ? errdetail("Failing row contains %s.",
val_desc) : 0));
break;
@@ -1817,14 +1817,14 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
case WCO_RLS_UPDATE_CHECK:
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("new row violates row level security policy for \"%s\"",
- wco->relname)));
+ errmsg("new row violates row level security policy for \"%s\"",
+ wco->relname)));
break;
case WCO_RLS_CONFLICT_CHECK:
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("new row violates row level security policy (USING expression) for \"%s\"",
- wco->relname)));
+ errmsg("new row violates row level security policy (USING expression) for \"%s\"",
+ wco->relname)));
break;
default:
elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
@@ -1915,8 +1915,8 @@ ExecBuildSlotValueDescription(Oid reloid,
{
/*
* No table-level SELECT, so need to make sure they either have
- * SELECT rights on the column or that they have provided the
- * data for the column. If not, omit this column from the error
+ * SELECT rights on the column or that they have provided the data
+ * for the column. If not, omit this column from the error
* message.
*/
aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
@@ -2258,14 +2258,14 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
break;
case LockWaitSkip:
if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
- return NULL; /* skip instead of waiting */
+ return NULL; /* skip instead of waiting */
break;
case LockWaitError:
if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ RelationGetRelationName(relation))));
break;
}
continue; /* loop back to repeat heap_fetch */
@@ -2313,9 +2313,9 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
* doing so would require changing heap_update and
* heap_delete to not complain about updating "invisible"
* tuples, which seems pretty scary (heap_lock_tuple will
- * not complain, but few callers expect HeapTupleInvisible,
- * and we're not one of them). So for now, treat the tuple
- * as deleted and do not process.
+ * not complain, but few callers expect
+ * HeapTupleInvisible, and we're not one of them). So for
+ * now, treat the tuple as deleted and do not process.
*/
ReleaseBuffer(buffer);
return NULL;
@@ -2563,8 +2563,8 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate)
if (fdwroutine->RefetchForeignRow == NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot lock rows in foreign table \"%s\"",
- RelationGetRelationName(erm->relation))));
+ errmsg("cannot lock rows in foreign table \"%s\"",
+ RelationGetRelationName(erm->relation))));
copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
erm,
datum,
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index d414e20f12..0f911f210b 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -182,8 +182,8 @@ static Datum ExecEvalArrayCoerceExpr(ArrayCoerceExprState *astate,
static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalGroupingFuncExpr(GroupingFuncExprState *gstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
/* ----------------------------------------------------------------
@@ -3034,10 +3034,10 @@ ExecEvalGroupingFuncExpr(GroupingFuncExprState *gstate,
bool *isNull,
ExprDoneCond *isDone)
{
- int result = 0;
- int attnum = 0;
- Bitmapset *grouped_cols = gstate->aggstate->grouped_cols;
- ListCell *lc;
+ int result = 0;
+ int attnum = 0;
+ Bitmapset *grouped_cols = gstate->aggstate->grouped_cols;
+ ListCell *lc;
if (isDone)
*isDone = ExprSingleResult;
@@ -4529,7 +4529,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
GroupingFuncExprState *grp_state = makeNode(GroupingFuncExprState);
Agg *agg = NULL;
- if (!parent || !IsA(parent, AggState) || !IsA(parent->plan, Agg))
+ if (!parent || !IsA(parent, AggState) ||!IsA(parent->plan, Agg))
elog(ERROR, "parent of GROUPING is not Agg node");
grp_state->aggstate = (AggState *) parent;
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 3963408b18..7e15b797a7 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -645,7 +645,7 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
* overall targetlist's econtext. GroupingFunc arguments are never
* evaluated at all.
*/
- if (IsA(node, Aggref) || IsA(node, GroupingFunc))
+ if (IsA(node, Aggref) ||IsA(node, GroupingFunc))
return false;
if (IsA(node, WindowFunc))
return false;
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 01a1e67f09..31d74e9477 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -337,11 +337,11 @@ typedef struct AggStatePerPhaseData
{
int numsets; /* number of grouping sets (or 0) */
int *gset_lengths; /* lengths of grouping sets */
- Bitmapset **grouped_cols; /* column groupings for rollup */
+ Bitmapset **grouped_cols; /* column groupings for rollup */
FmgrInfo *eqfunctions; /* per-grouping-field equality fns */
Agg *aggnode; /* Agg node for phase data */
Sort *sortnode; /* Sort node for input ordering for phase */
-} AggStatePerPhaseData;
+} AggStatePerPhaseData;
/*
* To implement hashed aggregation, we need a hashtable that stores a
@@ -380,12 +380,12 @@ static void finalize_aggregate(AggState *aggstate,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull);
static void prepare_projection_slot(AggState *aggstate,
- TupleTableSlot *slot,
- int currentSet);
+ TupleTableSlot *slot,
+ int currentSet);
static void finalize_aggregates(AggState *aggstate,
- AggStatePerAgg peragg,
- AggStatePerGroup pergroup,
- int currentSet);
+ AggStatePerAgg peragg,
+ AggStatePerGroup pergroup,
+ int currentSet);
static TupleTableSlot *project_aggregates(AggState *aggstate);
static Bitmapset *find_unaggregated_cols(AggState *aggstate);
static bool find_unaggregated_cols_walker(Node *node, Bitmapset **colnos);
@@ -441,12 +441,12 @@ initialize_phase(AggState *aggstate, int newphase)
}
/*
- * If this isn't the last phase, we need to sort appropriately for the next
- * phase in sequence.
+ * If this isn't the last phase, we need to sort appropriately for the
+ * next phase in sequence.
*/
if (newphase < aggstate->numphases - 1)
{
- Sort *sortnode = aggstate->phases[newphase+1].sortnode;
+ Sort *sortnode = aggstate->phases[newphase + 1].sortnode;
PlanState *outerNode = outerPlanState(aggstate);
TupleDesc tupDesc = ExecGetResultType(outerNode);
@@ -540,9 +540,8 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
/*
* (Re)set transValue to the initial value.
*
- * Note that when the initial value is pass-by-ref, we must copy
- * it (into the aggcontext) since we will pfree the transValue
- * later.
+ * Note that when the initial value is pass-by-ref, we must copy it (into
+ * the aggcontext) since we will pfree the transValue later.
*/
if (peraggstate->initValueIsNull)
pergroupstate->transValue = peraggstate->initValue;
@@ -551,7 +550,7 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
MemoryContext oldContext;
oldContext = MemoryContextSwitchTo(
- aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
+ aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
pergroupstate->transValue = datumCopy(peraggstate->initValue,
peraggstate->transtypeByVal,
peraggstate->transtypeLen);
@@ -560,11 +559,11 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
pergroupstate->transValueIsNull = peraggstate->initValueIsNull;
/*
- * If the initial value for the transition state doesn't exist in
- * the pg_aggregate table then we will let the first non-NULL
- * value returned from the outer procNode become the initial
- * value. (This is useful for aggregates like max() and min().)
- * The noTransValue flag signals that we still need to do this.
+ * If the initial value for the transition state doesn't exist in the
+ * pg_aggregate table then we will let the first non-NULL value returned
+ * from the outer procNode become the initial value. (This is useful for
+ * aggregates like max() and min().) The noTransValue flag signals that we
+ * still need to do this.
*/
pergroupstate->noTransValue = peraggstate->initValueIsNull;
}
@@ -586,8 +585,8 @@ initialize_aggregates(AggState *aggstate,
int numReset)
{
int aggno;
- int numGroupingSets = Max(aggstate->phase->numsets, 1);
- int setno = 0;
+ int numGroupingSets = Max(aggstate->phase->numsets, 1);
+ int setno = 0;
if (numReset < 1)
numReset = numGroupingSets;
@@ -655,7 +654,7 @@ advance_transition_function(AggState *aggstate,
* do not need to pfree the old transValue, since it's NULL.
*/
oldContext = MemoryContextSwitchTo(
- aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
+ aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
pergroupstate->transValue = datumCopy(fcinfo->arg[1],
peraggstate->transtypeByVal,
peraggstate->transtypeLen);
@@ -730,9 +729,9 @@ static void
advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
{
int aggno;
- int setno = 0;
- int numGroupingSets = Max(aggstate->phase->numsets, 1);
- int numAggs = aggstate->numaggs;
+ int setno = 0;
+ int numGroupingSets = Max(aggstate->phase->numsets, 1);
+ int numAggs = aggstate->numaggs;
for (aggno = 0; aggno < numAggs; aggno++)
{
@@ -1134,7 +1133,7 @@ prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet
{
if (aggstate->phase->grouped_cols)
{
- Bitmapset *grouped_cols = aggstate->phase->grouped_cols[currentSet];
+ Bitmapset *grouped_cols = aggstate->phase->grouped_cols[currentSet];
aggstate->grouped_cols = grouped_cols;
@@ -1156,7 +1155,7 @@ prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet
foreach(lc, aggstate->all_grouped_cols)
{
- int attnum = lfirst_int(lc);
+ int attnum = lfirst_int(lc);
if (!bms_is_member(attnum, grouped_cols))
slot->tts_isnull[attnum - 1] = true;
@@ -1225,8 +1224,7 @@ project_aggregates(AggState *aggstate)
ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
/*
- * Check the qual (HAVING clause); if the group does not match, ignore
- * it.
+ * Check the qual (HAVING clause); if the group does not match, ignore it.
*/
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
@@ -1286,7 +1284,7 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
*colnos = bms_add_member(*colnos, var->varattno);
return false;
}
- if (IsA(node, Aggref) || IsA(node, GroupingFunc))
+ if (IsA(node, Aggref) ||IsA(node, GroupingFunc))
{
/* do not descend into aggregate exprs */
return false;
@@ -1319,7 +1317,7 @@ build_hash_table(AggState *aggstate)
aggstate->hashfunctions,
node->numGroups,
entrysize,
- aggstate->aggcontexts[0]->ecxt_per_tuple_memory,
+ aggstate->aggcontexts[0]->ecxt_per_tuple_memory,
tmpmem);
}
@@ -1521,8 +1519,8 @@ agg_retrieve_direct(AggState *aggstate)
/*
* get state info from node
*
- * econtext is the per-output-tuple expression context
- * tmpcontext is the per-input-tuple expression context
+ * econtext is the per-output-tuple expression context tmpcontext is the
+ * per-input-tuple expression context
*/
econtext = aggstate->ss.ps.ps_ExprContext;
tmpcontext = aggstate->tmpcontext;
@@ -1615,17 +1613,17 @@ agg_retrieve_direct(AggState *aggstate)
* If a subgroup for the current grouping set is present, project it.
*
* We have a new group if:
- * - we're out of input but haven't projected all grouping sets
- * (checked above)
+ * - we're out of input but haven't projected all grouping sets
+ * (checked above)
* OR
- * - we already projected a row that wasn't from the last grouping
- * set
- * AND
- * - the next grouping set has at least one grouping column (since
- * empty grouping sets project only once input is exhausted)
- * AND
- * - the previous and pending rows differ on the grouping columns
- * of the next grouping set
+ * - we already projected a row that wasn't from the last grouping
+ * set
+ * AND
+ * - the next grouping set has at least one grouping column (since
+ * empty grouping sets project only once input is exhausted)
+ * AND
+ * - the previous and pending rows differ on the grouping columns
+ * of the next grouping set
*/
if (aggstate->input_done ||
(node->aggstrategy == AGG_SORTED &&
@@ -1729,7 +1727,8 @@ agg_retrieve_direct(AggState *aggstate)
firstSlot,
InvalidBuffer,
true);
- aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
+ aggstate->grp_firstTuple = NULL; /* don't keep two
+ * pointers */
/* set up for first advance_aggregates call */
tmpcontext->ecxt_outertuple = firstSlot;
@@ -1774,7 +1773,7 @@ agg_retrieve_direct(AggState *aggstate)
node->numCols,
node->grpColIdx,
aggstate->phase->eqfunctions,
- tmpcontext->ecxt_per_tuple_memory))
+ tmpcontext->ecxt_per_tuple_memory))
{
aggstate->grp_firstTuple = ExecCopySlotTuple(outerslot);
break;
@@ -1787,8 +1786,8 @@ agg_retrieve_direct(AggState *aggstate)
* Use the representative input tuple for any references to
* non-aggregated input columns in aggregate direct args, the node
* qual, and the tlist. (If we are not grouping, and there are no
- * input rows at all, we will come here with an empty firstSlot ...
- * but if not grouping, there can't be any references to
+ * input rows at all, we will come here with an empty firstSlot
+ * ... but if not grouping, there can't be any references to
* non-aggregated input columns, so no problem.)
*/
econtext->ecxt_outertuple = firstSlot;
@@ -1803,8 +1802,8 @@ agg_retrieve_direct(AggState *aggstate)
finalize_aggregates(aggstate, peragg, pergroup, currentSet);
/*
- * If there's no row to project right now, we must continue rather than
- * returning a null since there might be more groups.
+ * If there's no row to project right now, we must continue rather
+ * than returning a null since there might be more groups.
*/
result = project_aggregates(aggstate);
if (result)
@@ -1996,7 +1995,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
foreach(l, node->chain)
{
- Agg *agg = lfirst(l);
+ Agg *agg = lfirst(l);
numGroupingSets = Max(numGroupingSets,
list_length(agg->groupingSets));
@@ -2074,7 +2073,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
ExecAssignScanTypeFromOuterPlan(&aggstate->ss);
if (node->chain)
ExecSetSlotDescriptor(aggstate->sort_slot,
- aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor);
+ aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor);
/*
* Initialize result tuple type and projection info.
@@ -2111,13 +2110,13 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
for (phase = 0; phase < numPhases; ++phase)
{
AggStatePerPhase phasedata = &aggstate->phases[phase];
- Agg *aggnode;
- Sort *sortnode;
- int num_sets;
+ Agg *aggnode;
+ Sort *sortnode;
+ int num_sets;
if (phase > 0)
{
- aggnode = list_nth(node->chain, phase-1);
+ aggnode = list_nth(node->chain, phase - 1);
sortnode = (Sort *) aggnode->plan.lefttree;
Assert(IsA(sortnode, Sort));
}
@@ -2137,8 +2136,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
i = 0;
foreach(l, aggnode->groupingSets)
{
- int current_length = list_length(lfirst(l));
- Bitmapset *cols = NULL;
+ int current_length = list_length(lfirst(l));
+ Bitmapset *cols = NULL;
/* planner forces this to be correct */
for (j = 0; j < current_length; ++j)
@@ -2288,8 +2287,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
/* Begin filling in the peraggstate data */
peraggstate->aggrefstate = aggrefstate;
peraggstate->aggref = aggref;
- peraggstate->sortstates =(Tuplesortstate**)
- palloc0(sizeof(Tuplesortstate*) * numGroupingSets);
+ peraggstate->sortstates = (Tuplesortstate **)
+ palloc0(sizeof(Tuplesortstate *) * numGroupingSets);
for (currentsortno = 0; currentsortno < numGroupingSets; currentsortno++)
peraggstate->sortstates[currentsortno] = NULL;
@@ -2643,11 +2642,11 @@ void
ExecReScanAgg(AggState *node)
{
ExprContext *econtext = node->ss.ps.ps_ExprContext;
- PlanState *outerPlan = outerPlanState(node);
+ PlanState *outerPlan = outerPlanState(node);
Agg *aggnode = (Agg *) node->ss.ps.plan;
int aggno;
- int numGroupingSets = Max(node->maxsets, 1);
- int setno;
+ int numGroupingSets = Max(node->maxsets, 1);
+ int setno;
node->agg_done = false;
@@ -2732,7 +2731,7 @@ ExecReScanAgg(AggState *node)
* Reset the per-group state (in particular, mark transvalues null)
*/
MemSet(node->pergroup, 0,
- sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets);
+ sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets);
/* reset to phase 0 */
initialize_phase(node, 0);
@@ -2775,8 +2774,9 @@ AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext)
{
if (aggcontext)
{
- AggState *aggstate = ((AggState *) fcinfo->context);
- ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
+ AggState *aggstate = ((AggState *) fcinfo->context);
+ ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
+
*aggcontext = cxt->ecxt_per_tuple_memory;
}
return AGG_CONTEXT_AGGREGATE;
@@ -2862,7 +2862,7 @@ AggRegisterCallback(FunctionCallInfo fcinfo,
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
AggState *aggstate = (AggState *) fcinfo->context;
- ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
+ ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
RegisterExprContextCallback(cxt, func, arg);
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 40a06f163a..4597437178 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -449,7 +449,7 @@ ExecBitmapHeapScan(BitmapHeapScanState *node)
void
ExecReScanBitmapHeapScan(BitmapHeapScanState *node)
{
- PlanState *outerPlan = outerPlanState(node);
+ PlanState *outerPlan = outerPlanState(node);
/* rescan to release any page pin */
heap_rescan(node->ss.ss_currentScanDesc, NULL);
diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c
index 3f87716b8f..5e4785423e 100644
--- a/src/backend/executor/nodeGroup.c
+++ b/src/backend/executor/nodeGroup.c
@@ -280,7 +280,7 @@ ExecEndGroup(GroupState *node)
void
ExecReScanGroup(GroupState *node)
{
- PlanState *outerPlan = outerPlanState(node);
+ PlanState *outerPlan = outerPlanState(node);
node->grp_done = FALSE;
node->ss.ps.ps_TupFromTlist = false;
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index b1f6c82432..2a04924054 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -500,8 +500,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
/*
- * If there's not enough space to store the projected number of tuples
- * and the required bucket headers, we will need multiple batches.
+ * If there's not enough space to store the projected number of tuples and
+ * the required bucket headers, we will need multiple batches.
*/
if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
{
@@ -512,8 +512,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
long bucket_size;
/*
- * Estimate the number of buckets we'll want to have when work_mem
- * is entirely full. Each bucket will contain a bucket pointer plus
+ * Estimate the number of buckets we'll want to have when work_mem is
+ * entirely full. Each bucket will contain a bucket pointer plus
* NTUP_PER_BUCKET tuples, whose projected size already includes
* overhead for the hash code, pointer to the next tuple, etc.
*/
@@ -527,9 +527,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
* Buckets are simple pointers to hashjoin tuples, while tupsize
* includes the pointer, hash code, and MinimalTupleData. So buckets
* should never really exceed 25% of work_mem (even for
- * NTUP_PER_BUCKET=1); except maybe * for work_mem values that are
- * not 2^N bytes, where we might get more * because of doubling.
- * So let's look for 50% here.
+ * NTUP_PER_BUCKET=1); except maybe * for work_mem values that are not
+ * 2^N bytes, where we might get more * because of doubling. So let's
+ * look for 50% here.
*/
Assert(bucket_bytes <= hash_table_bytes / 2);
@@ -655,7 +655,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
hashtable->buckets = repalloc(hashtable->buckets,
- sizeof(HashJoinTuple) * hashtable->nbuckets);
+ sizeof(HashJoinTuple) * hashtable->nbuckets);
}
/*
@@ -671,6 +671,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
while (oldchunks != NULL)
{
HashMemoryChunk nextchunk = oldchunks->next;
+
/* position within the buffer (up to oldchunks->used) */
size_t idx = 0;
@@ -691,7 +692,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
{
/* keep tuple in memory - copy it into the new chunk */
HashJoinTuple copyTuple =
- (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
+ (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
+
memcpy(copyTuple, hashTuple, hashTupleSize);
/* and add it back to the appropriate bucket */
@@ -749,15 +751,15 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
static void
ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
{
- HashMemoryChunk chunk;
+ HashMemoryChunk chunk;
/* do nothing if not an increase (it's called increase for a reason) */
if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
return;
/*
- * We already know the optimal number of buckets, so let's just
- * compute the log2_nbuckets for it.
+ * We already know the optimal number of buckets, so let's just compute
+ * the log2_nbuckets for it.
*/
hashtable->nbuckets = hashtable->nbuckets_optimal;
hashtable->log2_nbuckets = my_log2(hashtable->nbuckets_optimal);
@@ -771,14 +773,14 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
#endif
/*
- * Just reallocate the proper number of buckets - we don't need to
- * walk through them - we can walk the dense-allocated chunks
- * (just like in ExecHashIncreaseNumBatches, but without all the
- * copying into new chunks)
+ * Just reallocate the proper number of buckets - we don't need to walk
+ * through them - we can walk the dense-allocated chunks (just like in
+ * ExecHashIncreaseNumBatches, but without all the copying into new
+ * chunks)
*/
hashtable->buckets =
(HashJoinTuple *) repalloc(hashtable->buckets,
- hashtable->nbuckets * sizeof(HashJoinTuple));
+ hashtable->nbuckets * sizeof(HashJoinTuple));
memset(hashtable->buckets, 0, sizeof(void *) * hashtable->nbuckets);
@@ -786,12 +788,13 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next)
{
/* process all tuples stored in this chunk */
- size_t idx = 0;
+ size_t idx = 0;
+
while (idx < chunk->used)
{
HashJoinTuple hashTuple = (HashJoinTuple) (chunk->data + idx);
- int bucketno;
- int batchno;
+ int bucketno;
+ int batchno;
ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
&bucketno, &batchno);
@@ -869,10 +872,11 @@ ExecHashTableInsert(HashJoinTable hashtable,
/*
* Increase the (optimal) number of buckets if we just exceeded the
- * NTUP_PER_BUCKET threshold, but only when there's still a single batch.
+ * NTUP_PER_BUCKET threshold, but only when there's still a single
+ * batch.
*/
if ((hashtable->nbatch == 1) &&
- (hashtable->nbuckets_optimal <= INT_MAX/2) && /* overflow protection */
+ (hashtable->nbuckets_optimal <= INT_MAX / 2) && /* overflow protection */
(ntuples >= (hashtable->nbuckets_optimal * NTUP_PER_BUCKET)))
{
hashtable->nbuckets_optimal *= 2;
@@ -1636,7 +1640,7 @@ dense_alloc(HashJoinTable hashtable, Size size)
{
/* allocate new chunk and put it at the beginning of the list */
newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
- offsetof(HashMemoryChunkData, data) + size);
+ offsetof(HashMemoryChunkData, data) + size);
newChunk->maxlen = size;
newChunk->used = 0;
newChunk->ntuples = 0;
@@ -1663,15 +1667,15 @@ dense_alloc(HashJoinTable hashtable, Size size)
}
/*
- * See if we have enough space for it in the current chunk (if any).
- * If not, allocate a fresh chunk.
+ * See if we have enough space for it in the current chunk (if any). If
+ * not, allocate a fresh chunk.
*/
if ((hashtable->chunks == NULL) ||
(hashtable->chunks->maxlen - hashtable->chunks->used) < size)
{
/* allocate new chunk and put it at the beginning of the list */
newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
- offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE);
+ offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE);
newChunk->maxlen = HASH_CHUNK_SIZE;
newChunk->used = size;
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index 976c77b76c..9f54c4633e 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -106,8 +106,8 @@ IndexOnlyNext(IndexOnlyScanState *node)
* away, because the tuple is still visible until the deleting
* transaction commits or the statement ends (if it's our
* transaction). In either case, the lock on the VM buffer will have
- * been released (acting as a write barrier) after clearing the
- * bit. And for us to have a snapshot that includes the deleting
+ * been released (acting as a write barrier) after clearing the bit.
+ * And for us to have a snapshot that includes the deleting
* transaction (making the tuple invisible), we must have acquired
* ProcArrayLock after that time, acting as a read barrier.
*
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 79133e08b6..7fd90415f9 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -288,9 +288,9 @@ next_indextuple:
* Can we return this tuple immediately, or does it need to be pushed
* to the reorder queue? If the ORDER BY expression values returned
* by the index were inaccurate, we can't return it yet, because the
- * next tuple from the index might need to come before this one.
- * Also, we can't return it yet if there are any smaller tuples in the
- * queue already.
+ * next tuple from the index might need to come before this one. Also,
+ * we can't return it yet if there are any smaller tuples in the queue
+ * already.
*/
if (!was_exact || (topmost && cmp_orderbyvals(lastfetched_vals,
lastfetched_nulls,
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index 7bcf99f488..b9b0f06882 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -196,11 +196,12 @@ lnext:
* case, so as to avoid the "Halloween problem" of repeated
* update attempts. In the latter case it might be sensible
* to fetch the updated tuple instead, but doing so would
- * require changing heap_update and heap_delete to not complain
- * about updating "invisible" tuples, which seems pretty scary
- * (heap_lock_tuple will not complain, but few callers expect
- * HeapTupleInvisible, and we're not one of them). So for now,
- * treat the tuple as deleted and do not process.
+ * require changing heap_update and heap_delete to not
+ * complain about updating "invisible" tuples, which seems
+ * pretty scary (heap_lock_tuple will not complain, but few
+ * callers expect HeapTupleInvisible, and we're not one of
+ * them). So for now, treat the tuple as deleted and do not
+ * process.
*/
goto lnext;
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 8ff4352a66..b2b5aa7e8e 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -317,7 +317,7 @@ ExecMaterialRestrPos(MaterialState *node)
void
ExecReScanMaterial(MaterialState *node)
{
- PlanState *outerPlan = outerPlanState(node);
+ PlanState *outerPlan = outerPlanState(node);
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c
index 0c814f0e72..bdf76808a8 100644
--- a/src/backend/executor/nodeMergeAppend.c
+++ b/src/backend/executor/nodeMergeAppend.c
@@ -139,10 +139,10 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
/*
* It isn't feasible to perform abbreviated key conversion, since
- * tuples are pulled into mergestate's binary heap as needed. It would
- * likely be counter-productive to convert tuples into an abbreviated
- * representation as they're pulled up, so opt out of that additional
- * optimization entirely.
+ * tuples are pulled into mergestate's binary heap as needed. It
+ * would likely be counter-productive to convert tuples into an
+ * abbreviated representation as they're pulled up, so opt out of that
+ * additional optimization entirely.
*/
sortKey->abbreviate = false;
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 15742c574a..34b6cf61e0 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -232,8 +232,8 @@ MJExamineQuals(List *mergeclauses,
/*
* sortsupport routine must know if abbreviation optimization is
* applicable in principle. It is never applicable for merge joins
- * because there is no convenient opportunity to convert to alternative
- * representation.
+ * because there is no convenient opportunity to convert to
+ * alternative representation.
*/
clause->ssup.abbreviate = false;
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 8112fb45b8..874ca6a69b 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -180,7 +180,7 @@ ExecCheckHeapTupleVisible(EState *estate,
if (!HeapTupleSatisfiesVisibility(tuple, estate->es_snapshot, buffer))
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("could not serialize access due to concurrent update")));
+ errmsg("could not serialize access due to concurrent update")));
}
/*
@@ -321,8 +321,8 @@ ExecInsert(ModifyTableState *mtstate,
/*
* Check any RLS INSERT WITH CHECK policies
*
- * ExecWithCheckOptions() will skip any WCOs which are not of
- * the kind we are looking for at this point.
+ * ExecWithCheckOptions() will skip any WCOs which are not of the kind
+ * we are looking for at this point.
*/
if (resultRelInfo->ri_WithCheckOptions != NIL)
ExecWithCheckOptions(WCO_RLS_INSERT_CHECK,
@@ -383,9 +383,9 @@ ExecInsert(ModifyTableState *mtstate,
else
{
/*
- * In case of ON CONFLICT DO NOTHING, do nothing.
- * However, verify that the tuple is visible to the
- * executor's MVCC snapshot at higher isolation levels.
+ * In case of ON CONFLICT DO NOTHING, do nothing. However,
+ * verify that the tuple is visible to the executor's MVCC
+ * snapshot at higher isolation levels.
*/
Assert(onconflict == ONCONFLICT_NOTHING);
ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid);
@@ -411,7 +411,7 @@ ExecInsert(ModifyTableState *mtstate,
/* insert index entries for tuple */
recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
- estate, true, &specConflict,
+ estate, true, &specConflict,
arbiterIndexes);
/* adjust the tuple's state accordingly */
@@ -475,17 +475,16 @@ ExecInsert(ModifyTableState *mtstate,
list_free(recheckIndexes);
/*
- * Check any WITH CHECK OPTION constraints from parent views. We
- * are required to do this after testing all constraints and
- * uniqueness violations per the SQL spec, so we do it after actually
- * inserting the record into the heap and all indexes.
+ * Check any WITH CHECK OPTION constraints from parent views. We are
+ * required to do this after testing all constraints and uniqueness
+ * violations per the SQL spec, so we do it after actually inserting the
+ * record into the heap and all indexes.
*
- * ExecWithCheckOptions will elog(ERROR) if a violation is found, so
- * the tuple will never be seen, if it violates the WITH CHECK
- * OPTION.
+ * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
+ * tuple will never be seen, if it violates the WITH CHECK OPTION.
*
- * ExecWithCheckOptions() will skip any WCOs which are not of
- * the kind we are looking for at this point.
+ * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
+ * are looking for at this point.
*/
if (resultRelInfo->ri_WithCheckOptions != NIL)
ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
@@ -860,8 +859,8 @@ ExecUpdate(ItemPointer tupleid,
* triggers then trigger.c will have done heap_lock_tuple to lock the
* correct tuple, so there's no need to do them again.)
*
- * ExecWithCheckOptions() will skip any WCOs which are not of
- * the kind we are looking for at this point.
+ * ExecWithCheckOptions() will skip any WCOs which are not of the kind
+ * we are looking for at this point.
*/
lreplace:;
if (resultRelInfo->ri_WithCheckOptions != NIL)
@@ -990,13 +989,13 @@ lreplace:;
list_free(recheckIndexes);
/*
- * Check any WITH CHECK OPTION constraints from parent views. We
- * are required to do this after testing all constraints and
- * uniqueness violations per the SQL spec, so we do it after actually
- * updating the record in the heap and all indexes.
+ * Check any WITH CHECK OPTION constraints from parent views. We are
+ * required to do this after testing all constraints and uniqueness
+ * violations per the SQL spec, so we do it after actually updating the
+ * record in the heap and all indexes.
*
- * ExecWithCheckOptions() will skip any WCOs which are not of
- * the kind we are looking for at this point.
+ * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
+ * are looking for at this point.
*/
if (resultRelInfo->ri_WithCheckOptions != NIL)
ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
@@ -1143,9 +1142,9 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
/*
* Make tuple and any needed join variables available to ExecQual and
* ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
- * the target's existing tuple is installed in the scantuple. EXCLUDED has
- * been made to reference INNER_VAR in setrefs.c, but there is no other
- * redirection.
+ * the target's existing tuple is installed in the scantuple. EXCLUDED
+ * has been made to reference INNER_VAR in setrefs.c, but there is no
+ * other redirection.
*/
econtext->ecxt_scantuple = mtstate->mt_existing;
econtext->ecxt_innertuple = excludedSlot;
@@ -1430,7 +1429,7 @@ ExecModifyTable(ModifyTableState *node)
{
case CMD_INSERT:
slot = ExecInsert(node, slot, planSlot,
- node->mt_arbiterindexes, node->mt_onconflict,
+ node->mt_arbiterindexes, node->mt_onconflict,
estate, node->canSetTag);
break;
case CMD_UPDATE:
diff --git a/src/backend/executor/nodeSamplescan.c b/src/backend/executor/nodeSamplescan.c
index fc89d1dca0..4c1c5237b7 100644
--- a/src/backend/executor/nodeSamplescan.c
+++ b/src/backend/executor/nodeSamplescan.c
@@ -27,7 +27,7 @@
#include "utils/tqual.h"
static void InitScanRelation(SampleScanState *node, EState *estate,
- int eflags, TableSampleClause *tablesample);
+ int eflags, TableSampleClause *tablesample);
static TupleTableSlot *SampleNext(SampleScanState *node);
@@ -45,9 +45,9 @@ static TupleTableSlot *SampleNext(SampleScanState *node);
static TupleTableSlot *
SampleNext(SampleScanState *node)
{
- TupleTableSlot *slot;
- TableSampleDesc *tsdesc;
- HeapTuple tuple;
+ TupleTableSlot *slot;
+ TableSampleDesc *tsdesc;
+ HeapTuple tuple;
/*
* get information from the scan state
@@ -60,7 +60,8 @@ SampleNext(SampleScanState *node)
if (tuple)
ExecStoreTuple(tuple, /* tuple to store */
slot, /* slot to store in */
- tsdesc->heapScan->rs_cbuf, /* buffer associated with this tuple */
+ tsdesc->heapScan->rs_cbuf, /* buffer associated
+ * with this tuple */
false); /* don't pfree this pointer */
else
ExecClearTuple(slot);
@@ -112,7 +113,7 @@ InitScanRelation(SampleScanState *node, EState *estate, int eflags,
* open that relation and acquire appropriate lock on it.
*/
currentRelation = ExecOpenScanRelation(estate,
- ((SampleScan *) node->ss.ps.plan)->scanrelid,
+ ((SampleScan *) node->ss.ps.plan)->scanrelid,
eflags);
node->ss.ss_currentRelation = currentRelation;
diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c
index 732f3c38db..af1dccfb31 100644
--- a/src/backend/executor/nodeSort.c
+++ b/src/backend/executor/nodeSort.c
@@ -290,7 +290,7 @@ ExecSortRestrPos(SortState *node)
void
ExecReScanSort(SortState *node)
{
- PlanState *outerPlan = outerPlanState(node);
+ PlanState *outerPlan = outerPlanState(node);
/*
* If we haven't sorted yet, just return. If outerplan's chgParam is not
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index bf0c98d878..ecf96f8c19 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -2057,7 +2057,7 @@ ExecEndWindowAgg(WindowAggState *node)
void
ExecReScanWindowAgg(WindowAggState *node)
{
- PlanState *outerPlan = outerPlanState(node);
+ PlanState *outerPlan = outerPlanState(node);
ExprContext *econtext = node->ss.ps.ps_ExprContext;
node->all_done = false;
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 472de41f9b..d544ad9c10 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -1344,11 +1344,11 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
}
/*
- * If told to be read-only, or in parallel mode, verify that this query
- * is in fact read-only. This can't be done earlier because we need to
- * look at the finished, planned queries. (In particular, we don't want
- * to do it between GetCachedPlan and PortalDefineQuery, because throwing
- * an error between those steps would result in leaking our plancache
+ * If told to be read-only, or in parallel mode, verify that this query is
+ * in fact read-only. This can't be done earlier because we need to look
+ * at the finished, planned queries. (In particular, we don't want to do
+ * it between GetCachedPlan and PortalDefineQuery, because throwing an
+ * error between those steps would result in leaking our plancache
* refcount.)
*/
if (read_only || IsInParallelMode())
@@ -1365,8 +1365,8 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
/* translator: %s is a SQL statement name */
- errmsg("%s is not allowed in a non-volatile function",
- CreateCommandTag(pstmt))));
+ errmsg("%s is not allowed in a non-volatile function",
+ CreateCommandTag(pstmt))));
else
PreventCommandIfParallelMode(CreateCommandTag(pstmt));
}
diff --git a/src/backend/lib/bipartite_match.c b/src/backend/lib/bipartite_match.c
index 1adba78ff3..037dd1de30 100644
--- a/src/backend/lib/bipartite_match.c
+++ b/src/backend/lib/bipartite_match.c
@@ -51,14 +51,14 @@ BipartiteMatch(int u_size, int v_size, short **adjacency)
while (hk_breadth_search(state))
{
- int u;
+ int u;
for (u = 1; u <= u_size; ++u)
if (state->pair_uv[u] == 0)
if (hk_depth_search(state, u, 1))
state->matching++;
- CHECK_FOR_INTERRUPTS(); /* just in case */
+ CHECK_FOR_INTERRUPTS(); /* just in case */
}
return state;
@@ -108,18 +108,18 @@ hk_breadth_search(BipartiteMatchState *state)
if (distance[u] < distance[0])
{
- short *u_adj = state->adjacency[u];
- int i = u_adj ? u_adj[0] : 0;
+ short *u_adj = state->adjacency[u];
+ int i = u_adj ? u_adj[0] : 0;
for (; i > 0; --i)
{
- int u_next = state->pair_vu[u_adj[i]];
+ int u_next = state->pair_vu[u_adj[i]];
if (isinf(distance[u_next]))
{
distance[u_next] = 1 + distance[u];
queue[qhead++] = u_next;
- Assert(qhead <= usize+2);
+ Assert(qhead <= usize + 2);
}
}
}
@@ -145,11 +145,11 @@ hk_depth_search(BipartiteMatchState *state, int u, int depth)
for (; i > 0; --i)
{
- int v = u_adj[i];
+ int v = u_adj[i];
if (distance[pair_vu[v]] == distance[u] + 1)
{
- if (hk_depth_search(state, pair_vu[v], depth+1))
+ if (hk_depth_search(state, pair_vu[v], depth + 1))
{
pair_vu[v] = u;
pair_uv[u] = v;
diff --git a/src/backend/lib/hyperloglog.c b/src/backend/lib/hyperloglog.c
index 4b37048c37..718afb84e0 100644
--- a/src/backend/lib/hyperloglog.c
+++ b/src/backend/lib/hyperloglog.c
@@ -153,7 +153,7 @@ estimateHyperLogLog(hyperLogLogState *cState)
if (result <= (5.0 / 2.0) * cState->nRegisters)
{
/* Small range correction */
- int zero_count = 0;
+ int zero_count = 0;
for (i = 0; i < cState->nRegisters; i++)
{
@@ -183,7 +183,7 @@ estimateHyperLogLog(hyperLogLogState *cState)
void
mergeHyperLogLog(hyperLogLogState *cState, const hyperLogLogState *oState)
{
- int r;
+ int r;
if (cState->nRegisters != oState->nRegisters)
elog(ERROR, "number of registers mismatch: %zu != %zu",
@@ -216,7 +216,7 @@ mergeHyperLogLog(hyperLogLogState *cState, const hyperLogLogState *oState)
static inline uint8
rho(uint32 x, uint8 b)
{
- uint8 j = 1;
+ uint8 j = 1;
while (j <= b && !(x & 0x80000000))
{
diff --git a/src/backend/lib/pairingheap.c b/src/backend/lib/pairingheap.c
index 17278fde6e..3d8a5ea561 100644
--- a/src/backend/lib/pairingheap.c
+++ b/src/backend/lib/pairingheap.c
@@ -295,7 +295,7 @@ merge_children(pairingheap *heap, pairingheap_node *children)
static void
pairingheap_dump_recurse(StringInfo buf,
pairingheap_node *node,
- void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
+ void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
void *opaque,
int depth,
pairingheap_node *prev_or_parent)
@@ -316,7 +316,7 @@ pairingheap_dump_recurse(StringInfo buf,
char *
pairingheap_dump(pairingheap *heap,
- void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
+ void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
void *opaque)
{
StringInfoData buf;
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 40f30229c0..4699efacd0 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -371,7 +371,7 @@ ClientAuthentication(Port *port)
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("pg_hba.conf rejects replication connection for host \"%s\", user \"%s\", %s",
hostinfo, port->user_name,
- port->ssl_in_use ? _("SSL on") : _("SSL off"))));
+ port->ssl_in_use ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
@@ -387,7 +387,7 @@ ClientAuthentication(Port *port)
errmsg("pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\", %s",
hostinfo, port->user_name,
port->database_name,
- port->ssl_in_use ? _("SSL on") : _("SSL off"))));
+ port->ssl_in_use ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c
index 2646555f14..f0774fe8c9 100644
--- a/src/backend/libpq/be-secure-openssl.c
+++ b/src/backend/libpq/be-secure-openssl.c
@@ -77,10 +77,10 @@
#include "utils/memutils.h"
-static int my_sock_read(BIO *h, char *buf, int size);
-static int my_sock_write(BIO *h, const char *buf, int size);
+static int my_sock_read(BIO *h, char *buf, int size);
+static int my_sock_write(BIO *h, const char *buf, int size);
static BIO_METHOD *my_BIO_s_socket(void);
-static int my_SSL_set_fd(Port *port, int fd);
+static int my_SSL_set_fd(Port *port, int fd);
static DH *load_dh_file(int keylength);
static DH *load_dh_buffer(const char *, size_t);
@@ -571,10 +571,9 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor)
int err;
/*
- * If SSL renegotiations are enabled and we're getting close to the
- * limit, start one now; but avoid it if there's one already in
- * progress. Request the renegotiation 1kB before the limit has
- * actually expired.
+ * If SSL renegotiations are enabled and we're getting close to the limit,
+ * start one now; but avoid it if there's one already in progress.
+ * Request the renegotiation 1kB before the limit has actually expired.
*/
if (ssl_renegotiation_limit && !in_ssl_renegotiation &&
port->count > (ssl_renegotiation_limit - 1) * 1024L)
@@ -583,12 +582,12 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor)
/*
* The way we determine that a renegotiation has completed is by
- * observing OpenSSL's internal renegotiation counter. Make sure
- * we start out at zero, and assume that the renegotiation is
- * complete when the counter advances.
+ * observing OpenSSL's internal renegotiation counter. Make sure we
+ * start out at zero, and assume that the renegotiation is complete
+ * when the counter advances.
*
- * OpenSSL provides SSL_renegotiation_pending(), but this doesn't
- * seem to work in testing.
+ * OpenSSL provides SSL_renegotiation_pending(), but this doesn't seem
+ * to work in testing.
*/
SSL_clear_num_renegotiations(port->ssl);
@@ -658,9 +657,9 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor)
}
/*
- * if renegotiation is still ongoing, and we've gone beyond the
- * limit, kill the connection now -- continuing to use it can be
- * considered a security problem.
+ * if renegotiation is still ongoing, and we've gone beyond the limit,
+ * kill the connection now -- continuing to use it can be considered a
+ * security problem.
*/
if (in_ssl_renegotiation &&
port->count > ssl_renegotiation_limit * 1024L)
@@ -700,7 +699,7 @@ my_sock_read(BIO *h, char *buf, int size)
if (buf != NULL)
{
- res = secure_raw_read(((Port *)h->ptr), buf, size);
+ res = secure_raw_read(((Port *) h->ptr), buf, size);
BIO_clear_retry_flags(h);
if (res <= 0)
{
@@ -1044,7 +1043,7 @@ SSLerrmessage(void)
int
be_tls_get_cipher_bits(Port *port)
{
- int bits;
+ int bits;
if (port->ssl)
{
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index 4e7acbe080..4a650cc001 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -51,7 +51,7 @@ char *ssl_crl_file;
int ssl_renegotiation_limit;
#ifdef USE_SSL
-bool ssl_loaded_verify_locations = false;
+bool ssl_loaded_verify_locations = false;
#endif
/* GUC variable controlling SSL cipher list */
@@ -146,7 +146,7 @@ retry:
/* In blocking mode, wait until the socket is ready */
if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN))
{
- int w;
+ int w;
Assert(waitfor);
@@ -162,8 +162,8 @@ retry:
/*
* We'll retry the read. Most likely it will return immediately
- * because there's still no data available, and we'll wait
- * for the socket to become ready again.
+ * because there's still no data available, and we'll wait for the
+ * socket to become ready again.
*/
}
goto retry;
@@ -225,7 +225,7 @@ retry:
if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN))
{
- int w;
+ int w;
Assert(waitfor);
@@ -241,8 +241,8 @@ retry:
/*
* We'll retry the write. Most likely it will return immediately
- * because there's still no data available, and we'll wait
- * for the socket to become ready again.
+ * because there's still no data available, and we'll wait for the
+ * socket to become ready again.
*/
}
goto retry;
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index c23938580b..7a935f34b5 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -1382,8 +1382,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
* situations and is generally considered bad practice. We keep the
* capability around for backwards compatibility, but we might want to
* remove it at some point in the future. Users who still need to strip
- * the realm off would be better served by using an appropriate regex in
- * a pg_ident.conf mapping.
+ * the realm off would be better served by using an appropriate regex in a
+ * pg_ident.conf mapping.
*/
if (hbaline->auth_method == uaGSS ||
hbaline->auth_method == uaSSPI)
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 6667cf94c6..a4b37ed5a2 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -1125,7 +1125,7 @@ pq_getstring(StringInfo s)
/* --------------------------------
- * pq_startmsgread - begin reading a message from the client.
+ * pq_startmsgread - begin reading a message from the client.
*
* This must be called before any of the pq_get* functions.
* --------------------------------
@@ -1140,7 +1140,7 @@ pq_startmsgread(void)
if (PqCommReadingMsg)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("terminating connection because protocol sync was lost")));
+ errmsg("terminating connection because protocol sync was lost")));
PqCommReadingMsg = true;
}
diff --git a/src/backend/libpq/pqmq.c b/src/backend/libpq/pqmq.c
index f12f2d582e..9ca6b7ce0d 100644
--- a/src/backend/libpq/pqmq.c
+++ b/src/backend/libpq/pqmq.c
@@ -107,17 +107,16 @@ mq_is_send_pending(void)
static int
mq_putmessage(char msgtype, const char *s, size_t len)
{
- shm_mq_iovec iov[2];
- shm_mq_result result;
+ shm_mq_iovec iov[2];
+ shm_mq_result result;
/*
- * If we're sending a message, and we have to wait because the
- * queue is full, and then we get interrupted, and that interrupt
- * results in trying to send another message, we respond by detaching
- * the queue. There's no way to return to the original context, but
- * even if there were, just queueing the message would amount to
- * indefinitely postponing the response to the interrupt. So we do
- * this instead.
+ * If we're sending a message, and we have to wait because the queue is
+ * full, and then we get interrupted, and that interrupt results in trying
+ * to send another message, we respond by detaching the queue. There's no
+ * way to return to the original context, but even if there were, just
+ * queueing the message would amount to indefinitely postponing the
+ * response to the interrupt. So we do this instead.
*/
if (pq_mq_busy)
{
@@ -166,10 +165,10 @@ mq_putmessage_noblock(char msgtype, const char *s, size_t len)
{
/*
* While the shm_mq machinery does support sending a message in
- * non-blocking mode, there's currently no way to try sending beginning
- * to send the message that doesn't also commit us to completing the
- * transmission. This could be improved in the future, but for now
- * we don't need it.
+ * non-blocking mode, there's currently no way to try sending beginning to
+ * send the message that doesn't also commit us to completing the
+ * transmission. This could be improved in the future, but for now we
+ * don't need it.
*/
elog(ERROR, "not currently supported");
}
@@ -201,7 +200,7 @@ pq_parse_errornotice(StringInfo msg, ErrorData *edata)
/* Loop over fields and extract each one. */
for (;;)
{
- char code = pq_getmsgbyte(msg);
+ char code = pq_getmsgbyte(msg);
const char *value;
if (code == '\0')
@@ -215,9 +214,9 @@ pq_parse_errornotice(StringInfo msg, ErrorData *edata)
{
case PG_DIAG_SEVERITY:
if (strcmp(value, "DEBUG") == 0)
- edata->elevel = DEBUG1; /* or some other DEBUG level */
+ edata->elevel = DEBUG1; /* or some other DEBUG level */
else if (strcmp(value, "LOG") == 0)
- edata->elevel = LOG; /* can't be COMMERROR */
+ edata->elevel = LOG; /* can't be COMMERROR */
else if (strcmp(value, "INFO") == 0)
edata->elevel = INFO;
else if (strcmp(value, "NOTICE") == 0)
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index cab93725e6..4c363d3d39 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -1216,7 +1216,7 @@ _copyAggref(const Aggref *from)
static GroupingFunc *
_copyGroupingFunc(const GroupingFunc *from)
{
- GroupingFunc *newnode = makeNode(GroupingFunc);
+ GroupingFunc *newnode = makeNode(GroupingFunc);
COPY_NODE_FIELD(args);
COPY_NODE_FIELD(refs);
@@ -1915,7 +1915,7 @@ _copyFromExpr(const FromExpr *from)
static OnConflictExpr *
_copyOnConflictExpr(const OnConflictExpr *from)
{
- OnConflictExpr *newnode = makeNode(OnConflictExpr);
+ OnConflictExpr *newnode = makeNode(OnConflictExpr);
COPY_SCALAR_FIELD(action);
COPY_NODE_FIELD(arbiterElems);
@@ -2173,7 +2173,7 @@ _copySortGroupClause(const SortGroupClause *from)
static GroupingSet *
_copyGroupingSet(const GroupingSet *from)
{
- GroupingSet *newnode = makeNode(GroupingSet);
+ GroupingSet *newnode = makeNode(GroupingSet);
COPY_SCALAR_FIELD(kind);
COPY_NODE_FIELD(content);
diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c
index a9b58eb31f..4be89f63ae 100644
--- a/src/backend/nodes/makefuncs.c
+++ b/src/backend/nodes/makefuncs.c
@@ -562,7 +562,7 @@ makeFuncCall(List *name, List *args, int location)
GroupingSet *
makeGroupingSet(GroupingSetKind kind, List *content, int location)
{
- GroupingSet *n = makeNode(GroupingSet);
+ GroupingSet *n = makeNode(GroupingSet);
n->kind = kind;
n->content = content;
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 4176393133..a2bcca5b75 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -1936,7 +1936,7 @@ expression_tree_walker(Node *node,
break;
case T_OnConflictExpr:
{
- OnConflictExpr *onconflict = (OnConflictExpr *) node;
+ OnConflictExpr *onconflict = (OnConflictExpr *) node;
if (walker((Node *) onconflict->arbiterElems, context))
return true;
@@ -2269,8 +2269,8 @@ expression_tree_mutator(Node *node,
break;
case T_GroupingFunc:
{
- GroupingFunc *grouping = (GroupingFunc *) node;
- GroupingFunc *newnode;
+ GroupingFunc *grouping = (GroupingFunc *) node;
+ GroupingFunc *newnode;
FLATCOPY(newnode, grouping, GroupingFunc);
MUTATE(newnode->args, grouping->args, List *);
@@ -2691,8 +2691,8 @@ expression_tree_mutator(Node *node,
break;
case T_OnConflictExpr:
{
- OnConflictExpr *oc = (OnConflictExpr *) node;
- OnConflictExpr *newnode;
+ OnConflictExpr *oc = (OnConflictExpr *) node;
+ OnConflictExpr *newnode;
FLATCOPY(newnode, oc, OnConflictExpr);
MUTATE(newnode->arbiterElems, oc->arbiterElems, List *);
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 1fd8763c96..4e6d90d8d8 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -72,9 +72,9 @@ static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel,
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
@@ -451,8 +451,8 @@ set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
static void
set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
- Relids required_outer;
- Path *path;
+ Relids required_outer;
+ Path *path;
/*
* We don't support pushing join clauses into the quals of a seqscan, but
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index c2b2b7622a..ac865be637 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -242,8 +242,8 @@ cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
Cost cpu_per_tuple;
BlockNumber pages;
double tuples;
- RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
- TableSampleClause *tablesample = rte->tablesample;
+ RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
+ TableSampleClause *tablesample = rte->tablesample;
/* Should only be applied to base relations */
Assert(baserel->relid > 0);
@@ -268,7 +268,7 @@ cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
spc_page_cost = tablesample->tsmseqscan ? spc_seq_page_cost :
- spc_random_page_cost;
+ spc_random_page_cost;
/*
* disk costs
diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c
index a6c17534f0..470db87817 100644
--- a/src/backend/optimizer/plan/analyzejoins.c
+++ b/src/backend/optimizer/plan/analyzejoins.c
@@ -672,20 +672,20 @@ query_is_distinct_for(Query *query, List *colnos, List *opids)
else if (query->groupingSets)
{
/*
- * If we have grouping sets with expressions, we probably
- * don't have uniqueness and analysis would be hard. Punt.
+ * If we have grouping sets with expressions, we probably don't have
+ * uniqueness and analysis would be hard. Punt.
*/
if (query->groupClause)
return false;
/*
- * If we have no groupClause (therefore no grouping expressions),
- * we might have one or many empty grouping sets. If there's just
- * one, then we're returning only one row and are certainly unique.
- * But otherwise, we know we're certainly not unique.
+ * If we have no groupClause (therefore no grouping expressions), we
+ * might have one or many empty grouping sets. If there's just one,
+ * then we're returning only one row and are certainly unique. But
+ * otherwise, we know we're certainly not unique.
*/
if (list_length(query->groupingSets) == 1 &&
- ((GroupingSet *)linitial(query->groupingSets))->kind == GROUPING_SET_EMPTY)
+ ((GroupingSet *) linitial(query->groupingSets))->kind == GROUPING_SET_EMPTY)
return true;
else
return false;
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index b47ef466dc..a3482def64 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -59,7 +59,7 @@ static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path);
static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
static SampleScan *create_samplescan_plan(PlannerInfo *root, Path *best_path,
- List *tlist, List *scan_clauses);
+ List *tlist, List *scan_clauses);
static Scan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path,
List *tlist, List *scan_clauses, bool indexonly);
static BitmapHeapScan *create_bitmap_scan_plan(PlannerInfo *root,
@@ -1153,7 +1153,7 @@ create_seqscan_plan(PlannerInfo *root, Path *best_path,
*/
static SampleScan *
create_samplescan_plan(PlannerInfo *root, Path *best_path,
- List *tlist, List *scan_clauses)
+ List *tlist, List *scan_clauses)
{
SampleScan *scan_plan;
Index scan_relid = best_path->parent->relid;
@@ -1340,7 +1340,7 @@ create_indexscan_plan(PlannerInfo *root,
Assert(list_length(best_path->path.pathkeys) == list_length(indexorderbys));
forboth(pathkeyCell, best_path->path.pathkeys, exprCell, indexorderbys)
{
- PathKey *pathkey = (PathKey *) lfirst(pathkeyCell);
+ PathKey *pathkey = (PathKey *) lfirst(pathkeyCell);
Node *expr = (Node *) lfirst(exprCell);
Oid exprtype = exprType(expr);
Oid sortop;
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 60340e39ed..920c2b77ff 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -64,7 +64,7 @@ planner_hook_type planner_hook = NULL;
#define EXPRKIND_LIMIT 6
#define EXPRKIND_APPINFO 7
#define EXPRKIND_PHV 8
-#define EXPRKIND_TABLESAMPLE 9
+#define EXPRKIND_TABLESAMPLE 9
/* Passthrough data for standard_qp_callback */
typedef struct
@@ -123,15 +123,15 @@ static void get_column_info_for_window(PlannerInfo *root, WindowClause *wc,
AttrNumber **ordColIdx,
Oid **ordOperators);
static Plan *build_grouping_chain(PlannerInfo *root,
- Query *parse,
- List *tlist,
- bool need_sort_for_grouping,
- List *rollup_groupclauses,
- List *rollup_lists,
- AttrNumber *groupColIdx,
- AggClauseCosts *agg_costs,
- long numGroups,
- Plan *result_plan);
+ Query *parse,
+ List *tlist,
+ bool need_sort_for_grouping,
+ List *rollup_groupclauses,
+ List *rollup_lists,
+ AttrNumber *groupColIdx,
+ AggClauseCosts *agg_costs,
+ long numGroups,
+ Plan *result_plan);
/*****************************************************************************
*
@@ -865,13 +865,14 @@ inheritance_planner(PlannerInfo *root)
*
* Note that any RTEs with security barrier quals will be turned into
* subqueries during planning, and so we must create copies of them too,
- * except where they are target relations, which will each only be used
- * in a single plan.
+ * except where they are target relations, which will each only be used in
+ * a single plan.
*/
resultRTindexes = bms_add_member(resultRTindexes, parentRTindex);
foreach(lc, root->append_rel_list)
{
AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc);
+
if (appinfo->parent_relid == parentRTindex)
resultRTindexes = bms_add_member(resultRTindexes,
appinfo->child_relid);
@@ -1299,6 +1300,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
foreach(lc, parse->groupClause)
{
SortGroupClause *gc = lfirst(lc);
+
if (gc->tleSortGroupRef > maxref)
maxref = gc->tleSortGroupRef;
}
@@ -1315,12 +1317,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
foreach(lc_set, sets)
{
- List *current_sets = reorder_grouping_sets(lfirst(lc_set),
- (list_length(sets) == 1
- ? parse->sortClause
- : NIL));
- List *groupclause = preprocess_groupclause(root, linitial(current_sets));
- int ref = 0;
+ List *current_sets = reorder_grouping_sets(lfirst(lc_set),
+ (list_length(sets) == 1
+ ? parse->sortClause
+ : NIL));
+ List *groupclause = preprocess_groupclause(root, linitial(current_sets));
+ int ref = 0;
/*
* Now that we've pinned down an order for the groupClause for
@@ -1333,6 +1335,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
foreach(lc, groupclause)
{
SortGroupClause *gc = lfirst(lc);
+
tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
}
@@ -1496,7 +1499,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
foreach(lc3, lfirst(lc2))
{
- List *gset = lfirst(lc3);
+ List *gset = lfirst(lc3);
dNumGroups += estimate_num_groups(root,
groupExprs,
@@ -1736,7 +1739,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/* Detect if we'll need an explicit sort for grouping */
if (parse->groupClause && !use_hashed_grouping &&
- !pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
+ !pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
{
need_sort_for_grouping = true;
@@ -1810,6 +1813,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
foreach(lc, parse->groupClause)
{
SortGroupClause *gc = lfirst(lc);
+
grouping_map[gc->tleSortGroupRef] = groupColIdx[i++];
}
@@ -1832,7 +1836,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
&agg_costs,
numGroupCols,
groupColIdx,
- extract_grouping_ops(parse->groupClause),
+ extract_grouping_ops(parse->groupClause),
NIL,
numGroups,
result_plan);
@@ -1842,9 +1846,9 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
else if (parse->hasAggs || (parse->groupingSets && parse->groupClause))
{
/*
- * Output is in sorted order by group_pathkeys if, and only if,
- * there is a single rollup operation on a non-empty list of
- * grouping expressions.
+ * Output is in sorted order by group_pathkeys if, and only
+ * if, there is a single rollup operation on a non-empty list
+ * of grouping expressions.
*/
if (list_length(rollup_groupclauses) == 1
&& list_length(linitial(rollup_groupclauses)) > 0)
@@ -1864,8 +1868,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
result_plan);
/*
- * these are destroyed by build_grouping_chain, so make sure we
- * don't try and touch them again
+ * these are destroyed by build_grouping_chain, so make sure
+ * we don't try and touch them again
*/
rollup_groupclauses = NIL;
rollup_lists = NIL;
@@ -1901,23 +1905,23 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
}
else if (root->hasHavingQual || parse->groupingSets)
{
- int nrows = list_length(parse->groupingSets);
+ int nrows = list_length(parse->groupingSets);
/*
- * No aggregates, and no GROUP BY, but we have a HAVING qual or
- * grouping sets (which by elimination of cases above must
+ * No aggregates, and no GROUP BY, but we have a HAVING qual
+ * or grouping sets (which by elimination of cases above must
* consist solely of empty grouping sets, since otherwise
* groupClause will be non-empty).
*
* This is a degenerate case in which we are supposed to emit
- * either 0 or 1 row for each grouping set depending on whether
- * HAVING succeeds. Furthermore, there cannot be any variables
- * in either HAVING or the targetlist, so we actually do not
- * need the FROM table at all! We can just throw away the
- * plan-so-far and generate a Result node. This is a
- * sufficiently unusual corner case that it's not worth
- * contorting the structure of this routine to avoid having to
- * generate the plan in the first place.
+ * either 0 or 1 row for each grouping set depending on
+ * whether HAVING succeeds. Furthermore, there cannot be any
+ * variables in either HAVING or the targetlist, so we
+ * actually do not need the FROM table at all! We can just
+ * throw away the plan-so-far and generate a Result node.
+ * This is a sufficiently unusual corner case that it's not
+ * worth contorting the structure of this routine to avoid
+ * having to generate the plan in the first place.
*/
result_plan = (Plan *) make_result(root,
tlist,
@@ -1931,7 +1935,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
*/
if (nrows > 1)
{
- List *plans = list_make1(result_plan);
+ List *plans = list_make1(result_plan);
while (--nrows > 0)
plans = lappend(plans, copyObject(result_plan));
@@ -2279,6 +2283,7 @@ remap_groupColIdx(PlannerInfo *root, List *groupClause)
foreach(lc, groupClause)
{
SortGroupClause *clause = lfirst(lc);
+
new_grpColIdx[i++] = grouping_map[clause->tleSortGroupRef];
}
@@ -2304,15 +2309,15 @@ remap_groupColIdx(PlannerInfo *root, List *groupClause)
*/
static Plan *
build_grouping_chain(PlannerInfo *root,
- Query *parse,
- List *tlist,
- bool need_sort_for_grouping,
- List *rollup_groupclauses,
- List *rollup_lists,
+ Query *parse,
+ List *tlist,
+ bool need_sort_for_grouping,
+ List *rollup_groupclauses,
+ List *rollup_lists,
AttrNumber *groupColIdx,
AggClauseCosts *agg_costs,
- long numGroups,
- Plan *result_plan)
+ long numGroups,
+ Plan *result_plan)
{
AttrNumber *top_grpColIdx = groupColIdx;
List *chain = NIL;
@@ -2366,8 +2371,8 @@ build_grouping_chain(PlannerInfo *root,
/*
* sort_plan includes the cost of result_plan over again, which is not
- * what we want (since it's not actually running that plan). So correct
- * the cost figures.
+ * what we want (since it's not actually running that plan). So
+ * correct the cost figures.
*/
sort_plan->startup_cost -= result_plan->total_cost;
@@ -2412,7 +2417,7 @@ build_grouping_chain(PlannerInfo *root,
result_plan = (Plan *) make_agg(root,
tlist,
(List *) parse->havingQual,
- (numGroupCols > 0) ? AGG_SORTED : AGG_PLAIN,
+ (numGroupCols > 0) ? AGG_SORTED : AGG_PLAIN,
agg_costs,
numGroupCols,
top_grpColIdx,
@@ -2429,7 +2434,7 @@ build_grouping_chain(PlannerInfo *root,
*/
foreach(lc, chain)
{
- Plan *subplan = lfirst(lc);
+ Plan *subplan = lfirst(lc);
result_plan->total_cost += subplan->total_cost;
@@ -2716,6 +2721,7 @@ select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
switch (strength)
{
case LCS_NONE:
+
/*
* We don't need a tuple lock, only the ability to re-fetch
* the row. Regular tables support ROW_MARK_REFERENCE, but if
@@ -3026,7 +3032,7 @@ preprocess_groupclause(PlannerInfo *root, List *force)
{
foreach(sl, force)
{
- Index ref = lfirst_int(sl);
+ Index ref = lfirst_int(sl);
SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
new_groupclause = lappend(new_groupclause, cl);
@@ -3120,7 +3126,7 @@ extract_rollup_sets(List *groupingSets)
{
int num_sets_raw = list_length(groupingSets);
int num_empty = 0;
- int num_sets = 0; /* distinct sets */
+ int num_sets = 0; /* distinct sets */
int num_chains = 0;
List *result = NIL;
List **results;
@@ -3152,23 +3158,23 @@ extract_rollup_sets(List *groupingSets)
return list_make1(groupingSets);
/*
- * We don't strictly need to remove duplicate sets here, but if we
- * don't, they tend to become scattered through the result, which is
- * a bit confusing (and irritating if we ever decide to optimize them
- * out). So we remove them here and add them back after.
+ * We don't strictly need to remove duplicate sets here, but if we don't,
+ * they tend to become scattered through the result, which is a bit
+ * confusing (and irritating if we ever decide to optimize them out). So
+ * we remove them here and add them back after.
*
* For each non-duplicate set, we fill in the following:
*
- * orig_sets[i] = list of the original set lists
- * set_masks[i] = bitmapset for testing inclusion
- * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
+ * orig_sets[i] = list of the original set lists set_masks[i] = bitmapset
+ * for testing inclusion adjacency[i] = array [n, v1, v2, ... vn] of
+ * adjacency indices
*
* chains[i] will be the result group this set is assigned to.
*
- * We index all of these from 1 rather than 0 because it is convenient
- * to leave 0 free for the NIL node in the graph algorithm.
+ * We index all of these from 1 rather than 0 because it is convenient to
+ * leave 0 free for the NIL node in the graph algorithm.
*/
- orig_sets = palloc0((num_sets_raw + 1) * sizeof(List*));
+ orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
@@ -3192,7 +3198,8 @@ extract_rollup_sets(List *groupingSets)
/* we can only be a dup if we're the same length as a previous set */
if (j_size == list_length(candidate))
{
- int k;
+ int k;
+
for (k = j; k < i; ++k)
{
if (bms_equal(set_masks[k], candidate_set))
@@ -3215,8 +3222,8 @@ extract_rollup_sets(List *groupingSets)
}
else
{
- int k;
- int n_adj = 0;
+ int k;
+ int n_adj = 0;
orig_sets[i] = list_make1(candidate);
set_masks[i] = candidate_set;
@@ -3259,8 +3266,8 @@ extract_rollup_sets(List *groupingSets)
for (i = 1; i <= num_sets; ++i)
{
- int u = state->pair_vu[i];
- int v = state->pair_uv[i];
+ int u = state->pair_vu[i];
+ int v = state->pair_uv[i];
if (u > 0 && u < i)
chains[i] = chains[u];
@@ -3271,11 +3278,11 @@ extract_rollup_sets(List *groupingSets)
}
/* build result lists. */
- results = palloc0((num_chains + 1) * sizeof(List*));
+ results = palloc0((num_chains + 1) * sizeof(List *));
for (i = 1; i <= num_sets; ++i)
{
- int c = chains[i];
+ int c = chains[i];
Assert(c > 0);
@@ -3334,15 +3341,16 @@ reorder_grouping_sets(List *groupingsets, List *sortclause)
foreach(lc, groupingsets)
{
- List *candidate = lfirst(lc);
- List *new_elems = list_difference_int(candidate, previous);
+ List *candidate = lfirst(lc);
+ List *new_elems = list_difference_int(candidate, previous);
if (list_length(new_elems) > 0)
{
while (list_length(sortclause) > list_length(previous))
{
SortGroupClause *sc = list_nth(sortclause, list_length(previous));
- int ref = sc->tleSortGroupRef;
+ int ref = sc->tleSortGroupRef;
+
if (list_member_int(new_elems, ref))
{
previous = lappend_int(previous, ref);
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 90e13e4988..a7f65dd529 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -452,7 +452,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
break;
case T_SampleScan:
{
- SampleScan *splan = (SampleScan *) plan;
+ SampleScan *splan = (SampleScan *) plan;
splan->scanrelid += rtoffset;
splan->plan.targetlist =
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 86585c58ee..d40083d396 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -1475,8 +1475,8 @@ contain_leaked_vars_walker(Node *node, void *context)
ListCell *rarg;
/*
- * Check the comparison function and arguments passed to it for
- * each pair of row elements.
+ * Check the comparison function and arguments passed to it
+ * for each pair of row elements.
*/
forthree(opid, rcexpr->opnos,
larg, rcexpr->largs,
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 3fe2712608..7f7aa24bb8 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -712,7 +712,7 @@ create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Path *
create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
{
- Path *pathnode = makeNode(Path);
+ Path *pathnode = makeNode(Path);
pathnode->pathtype = T_SampleScan;
pathnode->parent = rel;
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index a857ba3526..b04dc2ed49 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -51,8 +51,8 @@ int constraint_exclusion = CONSTRAINT_EXCLUSION_PARTITION;
get_relation_info_hook_type get_relation_info_hook = NULL;
-static bool infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
- Bitmapset *inferAttrs, List *idxExprs);
+static bool infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
+ Bitmapset *inferAttrs, List *idxExprs);
static int32 get_rel_data_width(Relation rel, int32 *attr_widths);
static List *get_relation_constraints(PlannerInfo *root,
Oid relationObjectId, RelOptInfo *rel,
@@ -427,6 +427,7 @@ List *
infer_arbiter_indexes(PlannerInfo *root)
{
OnConflictExpr *onconflict = root->parse->onConflict;
+
/* Iteration state */
Relation relation;
Oid relationObjectId;
@@ -468,9 +469,9 @@ infer_arbiter_indexes(PlannerInfo *root)
*/
foreach(l, onconflict->arbiterElems)
{
- InferenceElem *elem;
- Var *var;
- int attno;
+ InferenceElem *elem;
+ Var *var;
+ int attno;
elem = (InferenceElem *) lfirst(l);
@@ -548,8 +549,8 @@ infer_arbiter_indexes(PlannerInfo *root)
goto next;
/*
- * Note that we do not perform a check against indcheckxmin (like
- * e.g. get_relation_info()) here to eliminate candidates, because
+ * Note that we do not perform a check against indcheckxmin (like e.g.
+ * get_relation_info()) here to eliminate candidates, because
* uniqueness checking only cares about the most recently committed
* tuple versions.
*/
@@ -605,7 +606,7 @@ infer_arbiter_indexes(PlannerInfo *root)
idxExprs = RelationGetIndexExpressions(idxRel);
foreach(el, onconflict->arbiterElems)
{
- InferenceElem *elem = (InferenceElem *) lfirst(el);
+ InferenceElem *elem = (InferenceElem *) lfirst(el);
/*
* Ensure that collation/opclass aspects of inference expression
@@ -710,7 +711,7 @@ infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
{
AttrNumber natt;
Oid inferopfamily = InvalidOid; /* OID of att opfamily */
- Oid inferopcinputtype = InvalidOid; /* OID of att opfamily */
+ Oid inferopcinputtype = InvalidOid; /* OID of att opfamily */
/*
* If inference specification element lacks collation/opclass, then no
@@ -730,9 +731,9 @@ infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
for (natt = 1; natt <= idxRel->rd_att->natts; natt++)
{
- Oid opfamily = idxRel->rd_opfamily[natt - 1];
- Oid opcinputtype = idxRel->rd_opcintype[natt - 1];
- Oid collation = idxRel->rd_indcollation[natt - 1];
+ Oid opfamily = idxRel->rd_opfamily[natt - 1];
+ Oid opcinputtype = idxRel->rd_opcintype[natt - 1];
+ Oid collation = idxRel->rd_indcollation[natt - 1];
if (elem->inferopclass != InvalidOid &&
(inferopfamily != opfamily || inferopcinputtype != opcinputtype))
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index 0f25539d12..773e7b2be1 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -578,12 +578,13 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context)
/* we do NOT descend into the contained expression */
return false;
case PVC_RECURSE_AGGREGATES:
+
/*
- * we do NOT descend into the contained expression,
- * even if the caller asked for it, because we never
- * actually evaluate it - the result is driven entirely
- * off the associated GROUP BY clause, so we never need
- * to extract the actual Vars here.
+ * we do NOT descend into the contained expression, even if
+ * the caller asked for it, because we never actually evaluate
+ * it - the result is driven entirely off the associated GROUP
+ * BY clause, so we never need to extract the actual Vars
+ * here.
*/
return false;
}
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 82c9abfa91..fc463faa6b 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -53,7 +53,7 @@ static Query *transformInsertStmt(ParseState *pstate, InsertStmt *stmt);
static List *transformInsertRow(ParseState *pstate, List *exprlist,
List *stmtcols, List *icolumns, List *attrnos);
static OnConflictExpr *transformOnConflictClause(ParseState *pstate,
- OnConflictClause *onConflictClause);
+ OnConflictClause *onConflictClause);
static int count_rowexpr_columns(ParseState *pstate, Node *expr);
static Query *transformSelectStmt(ParseState *pstate, SelectStmt *stmt);
static Query *transformValuesClause(ParseState *pstate, SelectStmt *stmt);
@@ -65,7 +65,7 @@ static void determineRecursiveColTypes(ParseState *pstate,
static Query *transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt);
static List *transformReturningList(ParseState *pstate, List *returningList);
static List *transformUpdateTargetList(ParseState *pstate,
- List *targetList);
+ List *targetList);
static Query *transformDeclareCursorStmt(ParseState *pstate,
DeclareCursorStmt *stmt);
static Query *transformExplainStmt(ParseState *pstate,
@@ -441,7 +441,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
}
isOnConflictUpdate = (stmt->onConflictClause &&
- stmt->onConflictClause->action == ONCONFLICT_UPDATE);
+ stmt->onConflictClause->action == ONCONFLICT_UPDATE);
/*
* We have three cases to deal with: DEFAULT VALUES (selectStmt == NULL),
@@ -882,7 +882,7 @@ transformOnConflictClause(ParseState *pstate,
RangeTblEntry *exclRte = NULL;
int exclRelIndex = 0;
List *exclRelTlist = NIL;
- OnConflictExpr *result;
+ OnConflictExpr *result;
/* Process the arbiter clause, ON CONFLICT ON (...) */
transformOnConflictArbiter(pstate, onConflictClause, &arbiterElems,
@@ -2059,10 +2059,10 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
static List *
transformUpdateTargetList(ParseState *pstate, List *origTlist)
{
- List *tlist = NIL;
- RangeTblEntry *target_rte;
- ListCell *orig_tl;
- ListCell *tl;
+ List *tlist = NIL;
+ RangeTblEntry *target_rte;
+ ListCell *orig_tl;
+ ListCell *tl;
tlist = transformTargetList(pstate, origTlist,
EXPR_KIND_UPDATE_SOURCE);
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index 1e3f2e0ffa..478d8ca70b 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -64,11 +64,11 @@ static void check_ungrouped_columns(Node *node, ParseState *pstate, Query *qry,
static bool check_ungrouped_columns_walker(Node *node,
check_ungrouped_columns_context *context);
static void finalize_grouping_exprs(Node *node, ParseState *pstate, Query *qry,
- List *groupClauses, PlannerInfo *root,
- bool have_non_var_grouping);
+ List *groupClauses, PlannerInfo *root,
+ bool have_non_var_grouping);
static bool finalize_grouping_exprs_walker(Node *node,
check_ungrouped_columns_context *context);
-static void check_agglevels_and_constraints(ParseState *pstate,Node *expr);
+static void check_agglevels_and_constraints(ParseState *pstate, Node *expr);
static List *expand_groupingset_node(GroupingSet *gs);
/*
@@ -246,9 +246,9 @@ transformGroupingFunc(ParseState *pstate, GroupingFunc *p)
foreach(lc, args)
{
- Node *current_result;
+ Node *current_result;
- current_result = transformExpr(pstate, (Node*) lfirst(lc), pstate->p_expr_kind);
+ current_result = transformExpr(pstate, (Node *) lfirst(lc), pstate->p_expr_kind);
/* acceptability of expressions is checked later */
@@ -284,7 +284,7 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr)
if (isAgg)
{
- Aggref *agg = (Aggref *) expr;
+ Aggref *agg = (Aggref *) expr;
directargs = agg->aggdirectargs;
args = agg->args;
@@ -335,7 +335,11 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr)
Assert(false); /* can't happen */
break;
case EXPR_KIND_OTHER:
- /* Accept aggregate/grouping here; caller must throw error if wanted */
+
+ /*
+ * Accept aggregate/grouping here; caller must throw error if
+ * wanted
+ */
break;
case EXPR_KIND_JOIN_ON:
case EXPR_KIND_JOIN_USING:
@@ -348,7 +352,11 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr)
case EXPR_KIND_FROM_SUBSELECT:
/* Should only be possible in a LATERAL subquery */
Assert(pstate->p_lateral_active);
- /* Aggregate/grouping scope rules make it worth being explicit here */
+
+ /*
+ * Aggregate/grouping scope rules make it worth being explicit
+ * here
+ */
if (isAgg)
err = _("aggregate functions are not allowed in FROM clause of their own query level");
else
@@ -932,7 +940,7 @@ transformWindowFuncCall(ParseState *pstate, WindowFunc *wfunc,
void
parseCheckAggregates(ParseState *pstate, Query *qry)
{
- List *gset_common = NIL;
+ List *gset_common = NIL;
List *groupClauses = NIL;
List *groupClauseCommonVars = NIL;
bool have_non_var_grouping;
@@ -956,7 +964,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
* The limit of 4096 is arbitrary and exists simply to avoid resource
* issues from pathological constructs.
*/
- List *gsets = expand_grouping_sets(qry->groupingSets, 4096);
+ List *gsets = expand_grouping_sets(qry->groupingSets, 4096);
if (!gsets)
ereport(ERROR,
@@ -964,8 +972,8 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
errmsg("too many grouping sets present (max 4096)"),
parser_errposition(pstate,
qry->groupClause
- ? exprLocation((Node *) qry->groupClause)
- : exprLocation((Node *) qry->groupingSets))));
+ ? exprLocation((Node *) qry->groupClause)
+ : exprLocation((Node *) qry->groupingSets))));
/*
* The intersection will often be empty, so help things along by
@@ -985,9 +993,9 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
/*
* If there was only one grouping set in the expansion, AND if the
- * groupClause is non-empty (meaning that the grouping set is not empty
- * either), then we can ditch the grouping set and pretend we just had
- * a normal GROUP BY.
+ * groupClause is non-empty (meaning that the grouping set is not
+ * empty either), then we can ditch the grouping set and pretend we
+ * just had a normal GROUP BY.
*/
if (list_length(gsets) == 1 && qry->groupClause)
qry->groupingSets = NIL;
@@ -1012,13 +1020,13 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
* Build a list of the acceptable GROUP BY expressions for use by
* check_ungrouped_columns().
*
- * We get the TLE, not just the expr, because GROUPING wants to know
- * the sortgroupref.
+ * We get the TLE, not just the expr, because GROUPING wants to know the
+ * sortgroupref.
*/
foreach(l, qry->groupClause)
{
SortGroupClause *grpcl = (SortGroupClause *) lfirst(l);
- TargetEntry *expr;
+ TargetEntry *expr;
expr = get_sortgroupclause_tle(grpcl, qry->targetList);
if (expr == NULL)
@@ -1052,13 +1060,14 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
* scans. (Note we have to flatten aliases before this.)
*
* Track Vars that are included in all grouping sets separately in
- * groupClauseCommonVars, since these are the only ones we can use to check
- * for functional dependencies.
+ * groupClauseCommonVars, since these are the only ones we can use to
+ * check for functional dependencies.
*/
have_non_var_grouping = false;
foreach(l, groupClauses)
{
TargetEntry *tle = lfirst(l);
+
if (!IsA(tle->expr, Var))
{
have_non_var_grouping = true;
@@ -1335,7 +1344,7 @@ check_ungrouped_columns_walker(Node *node,
/*
* finalize_grouping_exprs -
* Scan the given expression tree for GROUPING() and related calls,
- * and validate and process their arguments.
+ * and validate and process their arguments.
*
* This is split out from check_ungrouped_columns above because it needs
* to modify the nodes (which it does in-place, not via a mutator) while
@@ -1411,19 +1420,19 @@ finalize_grouping_exprs_walker(Node *node,
GroupingFunc *grp = (GroupingFunc *) node;
/*
- * We only need to check GroupingFunc nodes at the exact level to which
- * they belong, since they cannot mix levels in arguments.
+ * We only need to check GroupingFunc nodes at the exact level to
+ * which they belong, since they cannot mix levels in arguments.
*/
if ((int) grp->agglevelsup == context->sublevels_up)
{
- ListCell *lc;
- List *ref_list = NIL;
+ ListCell *lc;
+ List *ref_list = NIL;
foreach(lc, grp->args)
{
- Node *expr = lfirst(lc);
- Index ref = 0;
+ Node *expr = lfirst(lc);
+ Index ref = 0;
if (context->root)
expr = flatten_join_alias_vars(context->root, expr);
@@ -1436,7 +1445,7 @@ finalize_grouping_exprs_walker(Node *node,
if (IsA(expr, Var))
{
- Var *var = (Var *) expr;
+ Var *var = (Var *) expr;
if (var->varlevelsup == context->sublevels_up)
{
@@ -1517,10 +1526,10 @@ finalize_grouping_exprs_walker(Node *node,
*
* For SET nodes, recursively expand contained CUBE and ROLLUP.
*/
-static List*
+static List *
expand_groupingset_node(GroupingSet *gs)
{
- List * result = NIL;
+ List *result = NIL;
switch (gs->kind)
{
@@ -1540,8 +1549,8 @@ expand_groupingset_node(GroupingSet *gs)
while (curgroup_size > 0)
{
- List *current_result = NIL;
- int i = curgroup_size;
+ List *current_result = NIL;
+ int i = curgroup_size;
foreach(lc, rollup_val)
{
@@ -1568,10 +1577,10 @@ expand_groupingset_node(GroupingSet *gs)
case GROUPING_SET_CUBE:
{
- List *cube_list = gs->content;
- int number_bits = list_length(cube_list);
- uint32 num_sets;
- uint32 i;
+ List *cube_list = gs->content;
+ int number_bits = list_length(cube_list);
+ uint32 num_sets;
+ uint32 i;
/* parser should cap this much lower */
Assert(number_bits < 31);
@@ -1580,9 +1589,9 @@ expand_groupingset_node(GroupingSet *gs)
for (i = 0; i < num_sets; i++)
{
- List *current_result = NIL;
- ListCell *lc;
- uint32 mask = 1U;
+ List *current_result = NIL;
+ ListCell *lc;
+ uint32 mask = 1U;
foreach(lc, cube_list)
{
@@ -1611,7 +1620,7 @@ expand_groupingset_node(GroupingSet *gs)
foreach(lc, gs->content)
{
- List *current_result = expand_groupingset_node(lfirst(lc));
+ List *current_result = expand_groupingset_node(lfirst(lc));
result = list_concat(result, current_result);
}
@@ -1625,8 +1634,9 @@ expand_groupingset_node(GroupingSet *gs)
static int
cmp_list_len_asc(const void *a, const void *b)
{
- int la = list_length(*(List*const*)a);
- int lb = list_length(*(List*const*)b);
+ int la = list_length(*(List *const *) a);
+ int lb = list_length(*(List *const *) b);
+
return (la > lb) ? 1 : (la == lb) ? 0 : -1;
}
@@ -1641,7 +1651,7 @@ List *
expand_grouping_sets(List *groupingSets, int limit)
{
List *expanded_groups = NIL;
- List *result = NIL;
+ List *result = NIL;
double numsets = 1;
ListCell *lc;
@@ -1650,7 +1660,7 @@ expand_grouping_sets(List *groupingSets, int limit)
foreach(lc, groupingSets)
{
- List *current_result = NIL;
+ List *current_result = NIL;
GroupingSet *gs = lfirst(lc);
current_result = expand_groupingset_node(gs);
@@ -1666,9 +1676,9 @@ expand_grouping_sets(List *groupingSets, int limit)
}
/*
- * Do cartesian product between sublists of expanded_groups.
- * While at it, remove any duplicate elements from individual
- * grouping sets (we must NOT change the number of sets though)
+ * Do cartesian product between sublists of expanded_groups. While at it,
+ * remove any duplicate elements from individual grouping sets (we must
+ * NOT change the number of sets though)
*/
foreach(lc, (List *) linitial(expanded_groups))
@@ -1698,16 +1708,16 @@ expand_grouping_sets(List *groupingSets, int limit)
if (list_length(result) > 1)
{
- int result_len = list_length(result);
- List **buf = palloc(sizeof(List*) * result_len);
- List **ptr = buf;
+ int result_len = list_length(result);
+ List **buf = palloc(sizeof(List *) * result_len);
+ List **ptr = buf;
foreach(lc, result)
{
*ptr++ = lfirst(lc);
}
- qsort(buf, result_len, sizeof(List*), cmp_list_len_asc);
+ qsort(buf, result_len, sizeof(List *), cmp_list_len_asc);
result = NIL;
ptr = buf;
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index f8eebfe8c3..e90e1d68e3 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -82,7 +82,7 @@ static TargetEntry *findTargetlistEntrySQL99(ParseState *pstate, Node *node,
List **tlist, ParseExprKind exprKind);
static int get_matching_location(int sortgroupref,
List *sortgrouprefs, List *exprs);
-static List *resolve_unique_index_expr(ParseState *pstate, InferClause * infer,
+static List *resolve_unique_index_expr(ParseState *pstate, InferClause *infer,
Relation heapRel);
static List *addTargetToGroupList(ParseState *pstate, TargetEntry *tle,
List *grouplist, List *targetlist, int location,
@@ -426,14 +426,15 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j, List *namespace)
static RangeTblEntry *
transformTableSampleEntry(ParseState *pstate, RangeTableSample *rv)
{
- RangeTblEntry *rte = NULL;
+ RangeTblEntry *rte = NULL;
CommonTableExpr *cte = NULL;
TableSampleClause *tablesample = NULL;
/* if relation has an unqualified name, it might be a CTE reference */
if (!rv->relation->schemaname)
{
- Index levelsup;
+ Index levelsup;
+
cte = scanNameSpaceForCTE(pstate, rv->relation->relname, &levelsup);
}
@@ -443,7 +444,7 @@ transformTableSampleEntry(ParseState *pstate, RangeTableSample *rv)
if (!rte ||
(rte->relkind != RELKIND_RELATION &&
- rte->relkind != RELKIND_MATVIEW))
+ rte->relkind != RELKIND_MATVIEW))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("TABLESAMPLE clause can only be used on tables and materialized views"),
@@ -1167,7 +1168,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
else if (IsA(n, RangeTableSample))
{
/* Tablesample reference */
- RangeTableSample *rv = (RangeTableSample *) n;
+ RangeTableSample *rv = (RangeTableSample *) n;
RangeTblRef *rtr;
RangeTblEntry *rte = NULL;
int rtindex;
@@ -1738,9 +1739,9 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist,
* CUBE or ROLLUP can be nested inside GROUPING SETS (but not the reverse),
* and we leave that alone if we find it. But if we see GROUPING SETS inside
* GROUPING SETS, we can flatten and normalize as follows:
- * GROUPING SETS (a, (b,c), GROUPING SETS ((c,d),(e)), (f,g))
+ * GROUPING SETS (a, (b,c), GROUPING SETS ((c,d),(e)), (f,g))
* becomes
- * GROUPING SETS ((a), (b,c), (c,d), (e), (f,g))
+ * GROUPING SETS ((a), (b,c), (c,d), (e), (f,g))
*
* This is per the spec's syntax transformations, but these are the only such
* transformations we do in parse analysis, so that queries retain the
@@ -1750,12 +1751,12 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist,
*
* When we're done, the resulting list should contain only these possible
* elements:
- * - an expression
- * - a CUBE or ROLLUP with a list of expressions nested 2 deep
- * - a GROUPING SET containing any of:
- * - expression lists
- * - empty grouping sets
- * - CUBE or ROLLUP nodes with lists nested 2 deep
+ * - an expression
+ * - a CUBE or ROLLUP with a list of expressions nested 2 deep
+ * - a GROUPING SET containing any of:
+ * - expression lists
+ * - empty grouping sets
+ * - CUBE or ROLLUP nodes with lists nested 2 deep
* The return is a new list, but doesn't deep-copy the old nodes except for
* GroupingSet nodes.
*
@@ -1775,7 +1776,8 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
{
case T_RowExpr:
{
- RowExpr *r = (RowExpr *) expr;
+ RowExpr *r = (RowExpr *) expr;
+
if (r->row_format == COERCE_IMPLICIT_CAST)
return flatten_grouping_sets((Node *) r->args,
false, NULL);
@@ -1792,7 +1794,8 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
/*
* at the top level, we skip over all empty grouping sets; the
- * caller can supply the canonical GROUP BY () if nothing is left.
+ * caller can supply the canonical GROUP BY () if nothing is
+ * left.
*/
if (toplevel && gset->kind == GROUPING_SET_EMPTY)
@@ -1800,15 +1803,15 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
foreach(l2, gset->content)
{
- Node *n2 = flatten_grouping_sets(lfirst(l2), false, NULL);
+ Node *n2 = flatten_grouping_sets(lfirst(l2), false, NULL);
result_set = lappend(result_set, n2);
}
/*
- * At top level, keep the grouping set node; but if we're in a nested
- * grouping set, then we need to concat the flattened result into the
- * outer list if it's simply nested.
+ * At top level, keep the grouping set node; but if we're in a
+ * nested grouping set, then we need to concat the flattened
+ * result into the outer list if it's simply nested.
*/
if (toplevel || (gset->kind != GROUPING_SET_SETS))
@@ -1823,12 +1826,13 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
List *result = NIL;
ListCell *l;
- foreach(l, (List *)expr)
+ foreach(l, (List *) expr)
{
- Node *n = flatten_grouping_sets(lfirst(l), toplevel, hasGroupingSets);
+ Node *n = flatten_grouping_sets(lfirst(l), toplevel, hasGroupingSets);
+
if (n != (Node *) NIL)
{
- if (IsA(n,List))
+ if (IsA(n, List))
result = list_concat(result, (List *) n);
else
result = lappend(result, n);
@@ -1888,15 +1892,15 @@ transformGroupClauseExpr(List **flatresult, Bitmapset *seen_local,
* (Duplicates in grouping sets can affect the number of returned
* rows, so can't be dropped indiscriminately.)
*
- * Since we don't care about anything except the sortgroupref,
- * we can use a bitmapset rather than scanning lists.
+ * Since we don't care about anything except the sortgroupref, we can
+ * use a bitmapset rather than scanning lists.
*/
- if (bms_is_member(tle->ressortgroupref,seen_local))
+ if (bms_is_member(tle->ressortgroupref, seen_local))
return 0;
/*
- * If we're already in the flat clause list, we don't need
- * to consider adding ourselves again.
+ * If we're already in the flat clause list, we don't need to consider
+ * adding ourselves again.
*/
found = targetIsInSortList(tle, InvalidOid, *flatresult);
if (found)
@@ -1928,6 +1932,7 @@ transformGroupClauseExpr(List **flatresult, Bitmapset *seen_local,
if (sc->tleSortGroupRef == tle->ressortgroupref)
{
SortGroupClause *grpc = copyObject(sc);
+
if (!toplevel)
grpc->nulls_first = false;
*flatresult = lappend(*flatresult, grpc);
@@ -1983,17 +1988,18 @@ transformGroupClauseList(List **flatresult,
foreach(gl, list)
{
- Node *gexpr = (Node *) lfirst(gl);
-
- Index ref = transformGroupClauseExpr(flatresult,
- seen_local,
- pstate,
- gexpr,
- targetlist,
- sortClause,
- exprKind,
- useSQL99,
- toplevel);
+ Node *gexpr = (Node *) lfirst(gl);
+
+ Index ref = transformGroupClauseExpr(flatresult,
+ seen_local,
+ pstate,
+ gexpr,
+ targetlist,
+ sortClause,
+ exprKind,
+ useSQL99,
+ toplevel);
+
if (ref > 0)
{
seen_local = bms_add_member(seen_local, ref);
@@ -2036,14 +2042,14 @@ transformGroupingSet(List **flatresult,
foreach(gl, gset->content)
{
- Node *n = lfirst(gl);
+ Node *n = lfirst(gl);
if (IsA(n, List))
{
- List *l = transformGroupClauseList(flatresult,
- pstate, (List *) n,
- targetlist, sortClause,
- exprKind, useSQL99, false);
+ List *l = transformGroupClauseList(flatresult,
+ pstate, (List *) n,
+ targetlist, sortClause,
+ exprKind, useSQL99, false);
content = lappend(content, makeGroupingSet(GROUPING_SET_SIMPLE,
l,
@@ -2055,20 +2061,20 @@ transformGroupingSet(List **flatresult,
content = lappend(content, transformGroupingSet(flatresult,
pstate, gset2,
- targetlist, sortClause,
- exprKind, useSQL99, false));
+ targetlist, sortClause,
+ exprKind, useSQL99, false));
}
else
{
- Index ref = transformGroupClauseExpr(flatresult,
- NULL,
- pstate,
- n,
- targetlist,
- sortClause,
- exprKind,
- useSQL99,
- false);
+ Index ref = transformGroupClauseExpr(flatresult,
+ NULL,
+ pstate,
+ n,
+ targetlist,
+ sortClause,
+ exprKind,
+ useSQL99,
+ false);
content = lappend(content, makeGroupingSet(GROUPING_SET_SIMPLE,
list_make1_int(ref),
@@ -2121,7 +2127,7 @@ transformGroupingSet(List **flatresult,
*
* pstate ParseState
* grouplist clause to transform
- * groupingSets reference to list to contain the grouping set tree
+ * groupingSets reference to list to contain the grouping set tree
* targetlist reference to TargetEntry list
* sortClause ORDER BY clause (SortGroupClause nodes)
* exprKind expression kind
@@ -2136,34 +2142,34 @@ transformGroupClause(ParseState *pstate, List *grouplist, List **groupingSets,
List *flat_grouplist;
List *gsets = NIL;
ListCell *gl;
- bool hasGroupingSets = false;
+ bool hasGroupingSets = false;
Bitmapset *seen_local = NULL;
/*
- * Recursively flatten implicit RowExprs. (Technically this is only
- * needed for GROUP BY, per the syntax rules for grouping sets, but
- * we do it anyway.)
+ * Recursively flatten implicit RowExprs. (Technically this is only needed
+ * for GROUP BY, per the syntax rules for grouping sets, but we do it
+ * anyway.)
*/
flat_grouplist = (List *) flatten_grouping_sets((Node *) grouplist,
true,
&hasGroupingSets);
/*
- * If the list is now empty, but hasGroupingSets is true, it's because
- * we elided redundant empty grouping sets. Restore a single empty
- * grouping set to leave a canonical form: GROUP BY ()
+ * If the list is now empty, but hasGroupingSets is true, it's because we
+ * elided redundant empty grouping sets. Restore a single empty grouping
+ * set to leave a canonical form: GROUP BY ()
*/
if (flat_grouplist == NIL && hasGroupingSets)
{
flat_grouplist = list_make1(makeGroupingSet(GROUPING_SET_EMPTY,
NIL,
- exprLocation((Node *) grouplist)));
+ exprLocation((Node *) grouplist)));
}
foreach(gl, flat_grouplist)
{
- Node *gexpr = (Node *) lfirst(gl);
+ Node *gexpr = (Node *) lfirst(gl);
if (IsA(gexpr, GroupingSet))
{
@@ -2184,17 +2190,17 @@ transformGroupClause(ParseState *pstate, List *grouplist, List **groupingSets,
gsets = lappend(gsets,
transformGroupingSet(&result,
pstate, gset,
- targetlist, sortClause,
- exprKind, useSQL99, true));
+ targetlist, sortClause,
+ exprKind, useSQL99, true));
break;
}
}
else
{
- Index ref = transformGroupClauseExpr(&result, seen_local,
- pstate, gexpr,
- targetlist, sortClause,
- exprKind, useSQL99, true);
+ Index ref = transformGroupClauseExpr(&result, seen_local,
+ pstate, gexpr,
+ targetlist, sortClause,
+ exprKind, useSQL99, true);
if (ref > 0)
{
@@ -2661,9 +2667,9 @@ resolve_unique_index_expr(ParseState *pstate, InferClause *infer,
foreach(l, infer->indexElems)
{
- IndexElem *ielem = (IndexElem *) lfirst(l);
- InferenceElem *pInfer = makeNode(InferenceElem);
- Node *parse;
+ IndexElem *ielem = (IndexElem *) lfirst(l);
+ InferenceElem *pInfer = makeNode(InferenceElem);
+ Node *parse;
/*
* Raw grammar re-uses CREATE INDEX infrastructure for unique index
@@ -2684,7 +2690,7 @@ resolve_unique_index_expr(ParseState *pstate, InferClause *infer,
if (ielem->nulls_ordering != SORTBY_NULLS_DEFAULT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("NULLS FIRST/LAST is not allowed in ON CONFLICT clause"),
+ errmsg("NULLS FIRST/LAST is not allowed in ON CONFLICT clause"),
parser_errposition(pstate,
exprLocation((Node *) infer))));
@@ -2767,7 +2773,7 @@ transformOnConflictArbiter(ParseState *pstate,
errmsg("ON CONFLICT DO UPDATE requires inference specification or constraint name"),
errhint("For example, ON CONFLICT ()."),
parser_errposition(pstate,
- exprLocation((Node *) onConflictClause))));
+ exprLocation((Node *) onConflictClause))));
/*
* To simplify certain aspects of its design, speculative insertion into
@@ -2776,9 +2782,9 @@ transformOnConflictArbiter(ParseState *pstate,
if (IsCatalogRelation(pstate->p_target_relation))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("ON CONFLICT not supported with system catalog tables"),
+ errmsg("ON CONFLICT not supported with system catalog tables"),
parser_errposition(pstate,
- exprLocation((Node *) onConflictClause))));
+ exprLocation((Node *) onConflictClause))));
/* Same applies to table used by logical decoding as catalog table */
if (RelationIsUsedAsCatalogTable(pstate->p_target_relation))
@@ -2787,7 +2793,7 @@ transformOnConflictArbiter(ParseState *pstate,
errmsg("ON CONFLICT not supported on table \"%s\" used as a catalog table",
RelationGetRelationName(pstate->p_target_relation)),
parser_errposition(pstate,
- exprLocation((Node *) onConflictClause))));
+ exprLocation((Node *) onConflictClause))));
/* ON CONFLICT DO NOTHING does not require an inference clause */
if (infer)
@@ -2795,9 +2801,8 @@ transformOnConflictArbiter(ParseState *pstate,
List *save_namespace;
/*
- * While we process the arbiter expressions, accept only
- * non-qualified references to the target table. Hide any other
- * relations.
+ * While we process the arbiter expressions, accept only non-qualified
+ * references to the target table. Hide any other relations.
*/
save_namespace = pstate->p_namespace;
pstate->p_namespace = NIL;
@@ -2806,7 +2811,7 @@ transformOnConflictArbiter(ParseState *pstate,
if (infer->indexElems)
*arbiterExpr = resolve_unique_index_expr(pstate, infer,
- pstate->p_target_relation);
+ pstate->p_target_relation);
/*
* Handling inference WHERE clause (for partial unique index
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index fa50f92d8d..fa9761bac3 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -778,14 +778,15 @@ TableSampleClause *
ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
List *sampleargs, int location)
{
- HeapTuple tuple;
+ HeapTuple tuple;
Form_pg_tablesample_method tsm;
Form_pg_proc procform;
TableSampleClause *tablesample;
- List *fargs;
- ListCell *larg;
- int nargs, initnargs;
- Oid init_arg_types[FUNC_MAX_ARGS];
+ List *fargs;
+ ListCell *larg;
+ int nargs,
+ initnargs;
+ Oid init_arg_types[FUNC_MAX_ARGS];
/* Load the tablesample method */
tuple = SearchSysCache1(TABLESAMPLEMETHODNAME, PointerGetDatum(samplemethod));
@@ -817,7 +818,7 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
tuple = SearchSysCache1(PROCOID,
ObjectIdGetDatum(tablesample->tsminit));
- if (!HeapTupleIsValid(tuple)) /* should not happen */
+ if (!HeapTupleIsValid(tuple)) /* should not happen */
elog(ERROR, "cache lookup failed for function %u",
tablesample->tsminit);
@@ -826,15 +827,15 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
Assert(initnargs >= 3);
/*
- * First parameter is used to pass the SampleScanState, second is
- * seed (REPEATABLE), skip the processing for them here, just assert
- * that the types are correct.
+ * First parameter is used to pass the SampleScanState, second is seed
+ * (REPEATABLE), skip the processing for them here, just assert that the
+ * types are correct.
*/
Assert(procform->proargtypes.values[0] == INTERNALOID);
Assert(procform->proargtypes.values[1] == INT4OID);
initnargs -= 2;
memcpy(init_arg_types, procform->proargtypes.values + 2,
- initnargs * sizeof(Oid));
+ initnargs * sizeof(Oid));
/* Now we are done with the catalog */
ReleaseSysCache(tuple);
@@ -842,7 +843,7 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
/* Process repeatable (seed) */
if (repeatable != NULL)
{
- Node *arg = repeatable;
+ Node *arg = repeatable;
if (arg && IsA(arg, A_Const))
{
@@ -851,7 +852,7 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
if (con->val.type == T_Null)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("REPEATABLE clause must be NOT NULL numeric value"),
+ errmsg("REPEATABLE clause must be NOT NULL numeric value"),
parser_errposition(pstate, con->location)));
}
@@ -867,21 +868,21 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
if (list_length(sampleargs) != initnargs)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg_plural("tablesample method \"%s\" expects %d argument got %d",
- "tablesample method \"%s\" expects %d arguments got %d",
- initnargs,
- samplemethod,
- initnargs, list_length(sampleargs)),
- parser_errposition(pstate, location)));
+ errmsg_plural("tablesample method \"%s\" expects %d argument got %d",
+ "tablesample method \"%s\" expects %d arguments got %d",
+ initnargs,
+ samplemethod,
+ initnargs, list_length(sampleargs)),
+ parser_errposition(pstate, location)));
/* Transform the arguments, typecasting them as needed. */
fargs = NIL;
nargs = 0;
foreach(larg, sampleargs)
{
- Node *inarg = (Node *) lfirst(larg);
- Node *arg = transformExpr(pstate, inarg, EXPR_KIND_FROM_FUNCTION);
- Oid argtype = exprType(arg);
+ Node *inarg = (Node *) lfirst(larg);
+ Node *arg = transformExpr(pstate, inarg, EXPR_KIND_FROM_FUNCTION);
+ Oid argtype = exprType(arg);
if (argtype != init_arg_types[nargs])
{
@@ -889,12 +890,12 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
COERCION_IMPLICIT))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("wrong parameter %d for tablesample method \"%s\"",
- nargs + 1, samplemethod),
- errdetail("Expected type %s got %s.",
- format_type_be(init_arg_types[nargs]),
- format_type_be(argtype)),
- parser_errposition(pstate, exprLocation(inarg))));
+ errmsg("wrong parameter %d for tablesample method \"%s\"",
+ nargs + 1, samplemethod),
+ errdetail("Expected type %s got %s.",
+ format_type_be(init_arg_types[nargs]),
+ format_type_be(argtype)),
+ parser_errposition(pstate, exprLocation(inarg))));
arg = coerce_type(pstate, arg, argtype, init_arg_types[nargs], -1,
COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1);
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 562c2f54f9..0b2dacfd59 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -530,8 +530,8 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
FuzzyAttrMatchState *fuzzystate, RangeTblEntry *rte,
const char *actual, const char *match, int attnum)
{
- int columndistance;
- int matchlen;
+ int columndistance;
+ int matchlen;
/* Bail before computing the Levenshtein distance if there's no hope. */
if (fuzzy_rte_penalty > fuzzystate->distance)
@@ -550,7 +550,7 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
varstr_levenshtein_less_equal(actual, strlen(actual), match, matchlen,
1, 1, 1,
fuzzystate->distance + 1
- - fuzzy_rte_penalty);
+ - fuzzy_rte_penalty);
/*
* If more than half the characters are different, don't treat it as a
@@ -560,8 +560,8 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
return;
/*
- * From this point on, we can ignore the distinction between the
- * RTE-name distance and the column-name distance.
+ * From this point on, we can ignore the distinction between the RTE-name
+ * distance and the column-name distance.
*/
columndistance += fuzzy_rte_penalty;
@@ -581,11 +581,11 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
else if (columndistance == fuzzystate->distance)
{
/*
- * This match distance may equal a prior match within this same
- * range table. When that happens, the prior match may also be
- * given, but only if there is no more than two equally distant
- * matches from the RTE (in turn, our caller will only accept
- * two equally distant matches overall).
+ * This match distance may equal a prior match within this same range
+ * table. When that happens, the prior match may also be given, but
+ * only if there is no more than two equally distant matches from the
+ * RTE (in turn, our caller will only accept two equally distant
+ * matches overall).
*/
if (AttributeNumberIsValid(fuzzystate->second))
{
@@ -606,9 +606,9 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
else if (fuzzystate->distance <= MAX_FUZZY_DISTANCE)
{
/*
- * Record as provisional first match (this can occasionally
- * occur because previous lowest distance was "too low a
- * bar", rather than being associated with a real match)
+ * Record as provisional first match (this can occasionally occur
+ * because previous lowest distance was "too low a bar", rather
+ * than being associated with a real match)
*/
fuzzystate->rfirst = rte;
fuzzystate->first = attnum;
@@ -820,8 +820,8 @@ searchRangeTableForCol(ParseState *pstate, const char *alias, char *colname,
foreach(l, pstate->p_rtable)
{
- RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
- int fuzzy_rte_penalty = 0;
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
+ int fuzzy_rte_penalty = 0;
/*
* Typically, it is not useful to look for matches within join
@@ -851,7 +851,7 @@ searchRangeTableForCol(ParseState *pstate, const char *alias, char *colname,
*/
if (scanRTEForColumn(orig_pstate, rte, colname, location,
fuzzy_rte_penalty, fuzzystate)
- && fuzzy_rte_penalty == 0)
+ && fuzzy_rte_penalty == 0)
{
fuzzystate->rfirst = rte;
fuzzystate->first = InvalidAttrNumber;
@@ -3040,8 +3040,8 @@ void
errorMissingColumn(ParseState *pstate,
char *relname, char *colname, int location)
{
- FuzzyAttrMatchState *state;
- char *closestfirst = NULL;
+ FuzzyAttrMatchState *state;
+ char *closestfirst = NULL;
/*
* Search the entire rtable looking for possible matches. If we find one,
@@ -3056,10 +3056,10 @@ errorMissingColumn(ParseState *pstate,
* Extract closest col string for best match, if any.
*
* Infer an exact match referenced despite not being visible from the fact
- * that an attribute number was not present in state passed back -- this is
- * what is reported when !closestfirst. There might also be an exact match
- * that was qualified with an incorrect alias, in which case closestfirst
- * will be set (so hint is the same as generic fuzzy case).
+ * that an attribute number was not present in state passed back -- this
+ * is what is reported when !closestfirst. There might also be an exact
+ * match that was qualified with an incorrect alias, in which case
+ * closestfirst will be set (so hint is the same as generic fuzzy case).
*/
if (state->rfirst && AttributeNumberIsValid(state->first))
closestfirst = strVal(list_nth(state->rfirst->eref->colnames,
@@ -3074,19 +3074,19 @@ errorMissingColumn(ParseState *pstate,
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
relname ?
- errmsg("column %s.%s does not exist", relname, colname):
+ errmsg("column %s.%s does not exist", relname, colname) :
errmsg("column \"%s\" does not exist", colname),
state->rfirst ? closestfirst ?
- errhint("Perhaps you meant to reference the column \"%s\".\"%s\".",
- state->rfirst->eref->aliasname, closestfirst):
+ errhint("Perhaps you meant to reference the column \"%s\".\"%s\".",
+ state->rfirst->eref->aliasname, closestfirst) :
errhint("There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query.",
- colname, state->rfirst->eref->aliasname): 0,
+ colname, state->rfirst->eref->aliasname) : 0,
parser_errposition(pstate, location)));
}
else
{
/* Handle case where there are two equally useful column hints */
- char *closestsecond;
+ char *closestsecond;
closestsecond = strVal(list_nth(state->rsecond->eref->colnames,
state->second - 1));
@@ -3094,7 +3094,7 @@ errorMissingColumn(ParseState *pstate,
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
relname ?
- errmsg("column %s.%s does not exist", relname, colname):
+ errmsg("column %s.%s does not exist", relname, colname) :
errmsg("column \"%s\" does not exist", colname),
errhint("Perhaps you meant to reference the column \"%s\".\"%s\" or the column \"%s\".\"%s\".",
state->rfirst->eref->aliasname, closestfirst,
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index 1ba6ca76f4..661663994e 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -797,7 +797,7 @@ fail:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid type name \"%s\"", str)));
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
}
/*
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 0a55db4a82..16d40c7240 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -1804,8 +1804,8 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("inherited relation \"%s\" is not a table or foreign table",
- inh->relname)));
+ errmsg("inherited relation \"%s\" is not a table or foreign table",
+ inh->relname)));
for (count = 0; count < rel->rd_att->natts; count++)
{
Form_pg_attribute inhattr = rel->rd_att->attrs[count];
@@ -2496,7 +2496,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
case AT_AlterColumnType:
{
- ColumnDef *def = (ColumnDef *) cmd->def;
+ ColumnDef *def = (ColumnDef *) cmd->def;
/*
* For ALTER COLUMN TYPE, transform the USING clause if
diff --git a/src/backend/port/atomics.c b/src/backend/port/atomics.c
index 263c68f466..3350fb7df3 100644
--- a/src/backend/port/atomics.c
+++ b/src/backend/port/atomics.c
@@ -15,7 +15,7 @@
/*
* We want the functions below to be inline; but if the compiler doesn't
- * support that, fall back on providing them as regular functions. See
+ * support that, fall back on providing them as regular functions. See
* STATIC_IF_INLINE in c.h.
*/
#define ATOMICS_INCLUDE_DEFINITIONS
@@ -50,6 +50,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
"size mismatch of atomic_flag vs slock_t");
#ifndef HAVE_SPINLOCKS
+
/*
* NB: If we're using semaphore based TAS emulation, be careful to use a
* separate set of semaphores. Otherwise we'd get in trouble if an atomic
@@ -73,7 +74,7 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
S_UNLOCK((slock_t *) &ptr->sema);
}
-#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
+#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
#ifdef PG_HAVE_ATOMIC_U32_SIMULATION
void
@@ -98,7 +99,8 @@ bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- bool ret;
+ bool ret;
+
/*
* Do atomic op under a spinlock. It might look like we could just skip
* the cmpxchg if the lock isn't available, but that'd just emulate a
@@ -109,7 +111,7 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
*/
SpinLockAcquire((slock_t *) &ptr->sema);
- /* perform compare/exchange logic*/
+ /* perform compare/exchange logic */
ret = ptr->value == *expected;
*expected = ptr->value;
if (ret)
@@ -124,7 +126,8 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{
- uint32 oldval;
+ uint32 oldval;
+
SpinLockAcquire((slock_t *) &ptr->sema);
oldval = ptr->value;
ptr->value += add_;
@@ -132,4 +135,4 @@ pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
return oldval;
}
-#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
+#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index d95ab9273c..8be5bbe1ab 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -391,7 +391,7 @@ CreateAnonymousSegment(Size *size)
(mmap_errno == ENOMEM) ?
errhint("This error usually means that PostgreSQL's request "
"for a shared memory segment exceeded available memory, "
- "swap space, or huge pages. To reduce the request size "
+ "swap space, or huge pages. To reduce the request size "
"(currently %zu bytes), reduce PostgreSQL's shared "
"memory usage, perhaps by reducing shared_buffers or "
"max_connections.",
diff --git a/src/backend/port/win32_latch.c b/src/backend/port/win32_latch.c
index c7d4bdddc2..ee9526245f 100644
--- a/src/backend/port/win32_latch.c
+++ b/src/backend/port/win32_latch.c
@@ -151,7 +151,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
if (wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
{
/* Need an event object to represent events on the socket */
- int flags = FD_CLOSE; /* always check for errors/EOF */
+ int flags = FD_CLOSE; /* always check for errors/EOF */
if (wakeEvents & WL_SOCKET_READABLE)
flags |= FD_READ;
diff --git a/src/backend/port/win32_sema.c b/src/backend/port/win32_sema.c
index 011e2fd4a6..4fd1e2aa13 100644
--- a/src/backend/port/win32_sema.c
+++ b/src/backend/port/win32_sema.c
@@ -153,6 +153,7 @@ PGSemaphoreLock(PGSemaphore sema)
done = true;
break;
case WAIT_IO_COMPLETION:
+
/*
* The system interrupted the wait to execute an I/O
* completion routine or asynchronous procedure call in this
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index f4b30ba80e..5b69959444 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -297,8 +297,8 @@ static void do_autovacuum(void);
static void FreeWorkerInfo(int code, Datum arg);
static autovac_table *table_recheck_autovac(Oid relid, HTAB *table_toast_map,
- TupleDesc pg_class_desc,
- int effective_multixact_freeze_max_age);
+ TupleDesc pg_class_desc,
+ int effective_multixact_freeze_max_age);
static void relation_needs_vacanalyze(Oid relid, AutoVacOpts *relopts,
Form_pg_class classForm,
PgStat_StatTabEntry *tabentry,
@@ -1915,8 +1915,8 @@ do_autovacuum(void)
/*
* Compute the multixact age for which freezing is urgent. This is
- * normally autovacuum_multixact_freeze_max_age, but may be less if we
- * are short of multixact member space.
+ * normally autovacuum_multixact_freeze_max_age, but may be less if we are
+ * short of multixact member space.
*/
effective_multixact_freeze_max_age = MultiXactMemberFreezeThreshold();
@@ -2782,7 +2782,7 @@ relation_needs_vacanalyze(Oid relid,
static void
autovacuum_do_vac_analyze(autovac_table *tab, BufferAccessStrategy bstrategy)
{
- RangeVar rangevar;
+ RangeVar rangevar;
/* Set up command parameters --- use local variables instead of palloc */
MemSet(&rangevar, 0, sizeof(rangevar));
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index 377733377b..f57224c10f 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -254,15 +254,15 @@ BackgroundWorkerStateChange(void)
}
/*
- * If the worker is marked for termination, we don't need to add it
- * to the registered workers list; we can just free the slot.
- * However, if bgw_notify_pid is set, the process that registered the
- * worker may need to know that we've processed the terminate request,
- * so be sure to signal it.
+ * If the worker is marked for termination, we don't need to add it to
+ * the registered workers list; we can just free the slot. However, if
+ * bgw_notify_pid is set, the process that registered the worker may
+ * need to know that we've processed the terminate request, so be sure
+ * to signal it.
*/
if (slot->terminate)
{
- int notify_pid;
+ int notify_pid;
/*
* We need a memory barrier here to make sure that the load of
@@ -426,7 +426,7 @@ BackgroundWorkerStopNotifications(pid_t pid)
void
ResetBackgroundWorkerCrashTimes(void)
{
- slist_mutable_iter iter;
+ slist_mutable_iter iter;
slist_foreach_modify(iter, &BackgroundWorkerList)
{
@@ -435,8 +435,8 @@ ResetBackgroundWorkerCrashTimes(void)
rw = slist_container(RegisteredBgWorker, rw_lnode, iter.cur);
/*
- * For workers that should not be restarted, we don't want to lose
- * the information that they have crashed; otherwise, they would be
+ * For workers that should not be restarted, we don't want to lose the
+ * information that they have crashed; otherwise, they would be
* restarted, which is wrong.
*/
if (rw->rw_worker.bgw_restart_time != BGW_NEVER_RESTART)
@@ -679,7 +679,8 @@ StartBackgroundWorker(void)
/*
* Early initialization. Some of this could be useful even for
* background workers that aren't using shared memory, but they can
- * call the individual startup routines for those subsystems if needed.
+ * call the individual startup routines for those subsystems if
+ * needed.
*/
BaseInit();
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 1e6073abca..e9fbc381cc 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -2580,7 +2580,7 @@ CreateSharedBackendStatus(void)
buffer = (char *) BackendSslStatusBuffer;
for (i = 0; i < MaxBackends; i++)
{
- BackendStatusArray[i].st_sslstatus = (PgBackendSSLStatus *)buffer;
+ BackendStatusArray[i].st_sslstatus = (PgBackendSSLStatus *) buffer;
buffer += sizeof(PgBackendSSLStatus);
}
}
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 87f543031a..ee0b01820b 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -2603,7 +2603,7 @@ reaper(SIGNAL_ARGS)
if (EXIT_STATUS_3(exitstatus))
{
ereport(LOG,
- (errmsg("shutdown at recovery target")));
+ (errmsg("shutdown at recovery target")));
Shutdown = SmartShutdown;
TerminateChildren(SIGTERM);
pmState = PM_WAIT_BACKENDS;
@@ -2930,9 +2930,9 @@ CleanupBackgroundWorker(int pid,
}
/*
- * We must release the postmaster child slot whether this worker
- * is connected to shared memory or not, but we only treat it as
- * a crash if it is in fact connected.
+ * We must release the postmaster child slot whether this worker is
+ * connected to shared memory or not, but we only treat it as a crash
+ * if it is in fact connected.
*/
if (!ReleasePostmasterChildSlot(rw->rw_child_slot) &&
(rw->rw_worker.bgw_flags & BGWORKER_SHMEM_ACCESS) != 0)
diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c
index 4c1460cb1c..fa29624667 100644
--- a/src/backend/replication/basebackup.c
+++ b/src/backend/replication/basebackup.c
@@ -51,7 +51,7 @@ typedef struct
static int64 sendDir(char *path, int basepathlen, bool sizeonly,
- List *tablespaces, bool sendtblspclinks);
+ List *tablespaces, bool sendtblspclinks);
static bool sendFile(char *readfilename, char *tarfilename,
struct stat * statbuf, bool missing_ok);
static void sendFileWithContent(const char *filename, const char *content);
@@ -130,11 +130,12 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
&labelfile, tblspcdir, &tablespaces,
&tblspc_map_file,
opt->progress, opt->sendtblspcmapfile);
+
/*
* Once do_pg_start_backup has been called, ensure that any failure causes
- * us to abort the backup so we don't "leak" a backup counter. For this reason,
- * *all* functionality between do_pg_start_backup() and do_pg_stop_backup()
- * should be inside the error cleanup block!
+ * us to abort the backup so we don't "leak" a backup counter. For this
+ * reason, *all* functionality between do_pg_start_backup() and
+ * do_pg_stop_backup() should be inside the error cleanup block!
*/
PG_ENSURE_ERROR_CLEANUP(base_backup_cleanup, (Datum) 0);
@@ -145,8 +146,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
SendXlogRecPtrResult(startptr, starttli);
/*
- * Calculate the relative path of temporary statistics directory in order
- * to skip the files which are located in that directory later.
+ * Calculate the relative path of temporary statistics directory in
+ * order to skip the files which are located in that directory later.
*/
if (is_absolute_path(pgstat_stat_directory) &&
strncmp(pgstat_stat_directory, DataDir, datadirpathlen) == 0)
@@ -900,8 +901,8 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces,
/*
* If there's a backup_label or tablespace_map file, it belongs to a
* backup started by the user with pg_start_backup(). It is *not*
- * correct for this backup, our backup_label/tablespace_map is injected
- * into the tar separately.
+ * correct for this backup, our backup_label/tablespace_map is
+ * injected into the tar separately.
*/
if (strcmp(de->d_name, BACKUP_LABEL_FILE) == 0)
continue;
@@ -1226,8 +1227,8 @@ _tarWriteHeader(const char *filename, const char *linktarget,
enum tarError rc;
rc = tarCreateHeader(h, filename, linktarget, statbuf->st_size,
- statbuf->st_mode, statbuf->st_uid, statbuf->st_gid,
- statbuf->st_mtime);
+ statbuf->st_mode, statbuf->st_uid, statbuf->st_gid,
+ statbuf->st_mtime);
switch (rc)
{
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index 19dc9efedd..b7bbcf6ee7 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -89,15 +89,15 @@ _PG_init(void)
static void
libpqrcv_connect(char *conninfo)
{
- const char *keys[5];
- const char *vals[5];
+ const char *keys[5];
+ const char *vals[5];
/*
- * We use the expand_dbname parameter to process the connection string
- * (or URI), and pass some extra options. The deliberately undocumented
- * parameter "replication=true" makes it a replication connection.
- * The database name is ignored by the server in replication mode, but
- * specify "replication" for .pgpass lookup.
+ * We use the expand_dbname parameter to process the connection string (or
+ * URI), and pass some extra options. The deliberately undocumented
+ * parameter "replication=true" makes it a replication connection. The
+ * database name is ignored by the server in replication mode, but specify
+ * "replication" for .pgpass lookup.
*/
keys[0] = "dbname";
vals[0] = conninfo;
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index ea38818269..c629da317e 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -67,9 +67,9 @@ static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf
static void DecodeSpecConfirm(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
static void DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
- xl_xact_parsed_commit *parsed, TransactionId xid);
+ xl_xact_parsed_commit *parsed, TransactionId xid);
static void DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
- xl_xact_parsed_abort *parsed, TransactionId xid);
+ xl_xact_parsed_abort *parsed, TransactionId xid);
/* common function to decode tuples */
static void DecodeXLogTuple(char *data, Size len, ReorderBufferTupleBuf *tup);
diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
index ed78e36192..824bc915b1 100644
--- a/src/backend/replication/logical/logical.c
+++ b/src/backend/replication/logical/logical.c
@@ -234,7 +234,7 @@ CreateInitDecodingContext(char *plugin,
if (slot->data.database == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot use physical replication slot for logical decoding")));
+ errmsg("cannot use physical replication slot for logical decoding")));
if (slot->data.database != MyDatabaseId)
ereport(ERROR,
@@ -726,7 +726,7 @@ filter_by_origin_cb_wrapper(LogicalDecodingContext *ctx, RepOriginId origin_id)
{
LogicalErrorCallbackState state;
ErrorContextCallback errcallback;
- bool ret;
+ bool ret;
/* Push callback + info on the error context stack */
state.ctx = ctx;
diff --git a/src/backend/replication/logical/logicalfuncs.c b/src/backend/replication/logical/logicalfuncs.c
index 021de837be..a354a3f819 100644
--- a/src/backend/replication/logical/logicalfuncs.c
+++ b/src/backend/replication/logical/logicalfuncs.c
@@ -400,7 +400,7 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin
* what we need.
*/
if (!binary &&
- ctx->options.output_type != OUTPUT_PLUGIN_TEXTUAL_OUTPUT)
+ ctx->options.output_type !=OUTPUT_PLUGIN_TEXTUAL_OUTPUT)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("logical decoding output plugin \"%s\" produces binary output, but \"%s\" expects textual data",
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index b4b98a512e..f4ba86e836 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -13,7 +13,7 @@
* This file provides the following:
* * An infrastructure to name nodes in a replication setup
* * A facility to efficiently store and persist replication progress in an
- * efficient and durable manner.
+ * efficient and durable manner.
*
* Replication origin consist out of a descriptive, user defined, external
* name and a short, thus space efficient, internal 2 byte one. This split
@@ -45,22 +45,22 @@
* There are several levels of locking at work:
*
* * To create and drop replication origins an exclusive lock on
- * pg_replication_slot is required for the duration. That allows us to
- * safely and conflict free assign new origins using a dirty snapshot.
+ * pg_replication_slot is required for the duration. That allows us to
+ * safely and conflict free assign new origins using a dirty snapshot.
*
* * When creating an in-memory replication progress slot the ReplicationOirgin
- * LWLock has to be held exclusively; when iterating over the replication
- * progress a shared lock has to be held, the same when advancing the
- * replication progress of an individual backend that has not setup as the
- * session's replication origin.
+ * LWLock has to be held exclusively; when iterating over the replication
+ * progress a shared lock has to be held, the same when advancing the
+ * replication progress of an individual backend that has not setup as the
+ * session's replication origin.
*
* * When manipulating or looking at the remote_lsn and local_lsn fields of a
- * replication progress slot that slot's lwlock has to be held. That's
- * primarily because we do not assume 8 byte writes (the LSN) is atomic on
- * all our platforms, but it also simplifies memory ordering concerns
- * between the remote and local lsn. We use a lwlock instead of a spinlock
- * so it's less harmful to hold the lock over a WAL write
- * (c.f. AdvanceReplicationProgress).
+ * replication progress slot that slot's lwlock has to be held. That's
+ * primarily because we do not assume 8 byte writes (the LSN) is atomic on
+ * all our platforms, but it also simplifies memory ordering concerns
+ * between the remote and local lsn. We use a lwlock instead of a spinlock
+ * so it's less harmful to hold the lock over a WAL write
+ * (c.f. AdvanceReplicationProgress).
*
* ---------------------------------------------------------------------------
*/
@@ -105,7 +105,7 @@ typedef struct ReplicationState
/*
* Local identifier for the remote node.
*/
- RepOriginId roident;
+ RepOriginId roident;
/*
* Location of the latest commit from the remote side.
@@ -135,22 +135,22 @@ typedef struct ReplicationState
*/
typedef struct ReplicationStateOnDisk
{
- RepOriginId roident;
+ RepOriginId roident;
XLogRecPtr remote_lsn;
} ReplicationStateOnDisk;
typedef struct ReplicationStateCtl
{
- int tranche_id;
- LWLockTranche tranche;
- ReplicationState states[FLEXIBLE_ARRAY_MEMBER];
+ int tranche_id;
+ LWLockTranche tranche;
+ ReplicationState states[FLEXIBLE_ARRAY_MEMBER];
} ReplicationStateCtl;
/* external variables */
-RepOriginId replorigin_sesssion_origin = InvalidRepOriginId; /* assumed identity */
+RepOriginId replorigin_sesssion_origin = InvalidRepOriginId; /* assumed identity */
XLogRecPtr replorigin_sesssion_origin_lsn = InvalidXLogRecPtr;
-TimestampTz replorigin_sesssion_origin_timestamp = 0;
+TimestampTz replorigin_sesssion_origin_timestamp = 0;
/*
* Base address into a shared memory array of replication states of size
@@ -188,7 +188,7 @@ replorigin_check_prerequisites(bool check_slots, bool recoveryOK)
if (!recoveryOK && RecoveryInProgress())
ereport(ERROR,
(errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
- errmsg("cannot manipulate replication origins during recovery")));
+ errmsg("cannot manipulate replication origins during recovery")));
}
@@ -207,9 +207,9 @@ RepOriginId
replorigin_by_name(char *roname, bool missing_ok)
{
Form_pg_replication_origin ident;
- Oid roident = InvalidOid;
- HeapTuple tuple;
- Datum roname_d;
+ Oid roident = InvalidOid;
+ HeapTuple tuple;
+ Datum roname_d;
roname_d = CStringGetTextDatum(roname);
@@ -235,10 +235,10 @@ replorigin_by_name(char *roname, bool missing_ok)
RepOriginId
replorigin_create(char *roname)
{
- Oid roident;
- HeapTuple tuple = NULL;
- Relation rel;
- Datum roname_d;
+ Oid roident;
+ HeapTuple tuple = NULL;
+ Relation rel;
+ Datum roname_d;
SnapshotData SnapshotDirty;
SysScanDesc scan;
ScanKeyData key;
@@ -271,6 +271,7 @@ replorigin_create(char *roname)
bool nulls[Natts_pg_replication_origin];
Datum values[Natts_pg_replication_origin];
bool collides;
+
CHECK_FOR_INTERRUPTS();
ScanKeyInit(&key,
@@ -279,7 +280,7 @@ replorigin_create(char *roname)
ObjectIdGetDatum(roident));
scan = systable_beginscan(rel, ReplicationOriginIdentIndex,
- true /* indexOK */,
+ true /* indexOK */ ,
&SnapshotDirty,
1, &key);
@@ -295,7 +296,7 @@ replorigin_create(char *roname)
*/
memset(&nulls, 0, sizeof(nulls));
- values[Anum_pg_replication_origin_roident -1] = ObjectIdGetDatum(roident);
+ values[Anum_pg_replication_origin_roident - 1] = ObjectIdGetDatum(roident);
values[Anum_pg_replication_origin_roname - 1] = roname_d;
tuple = heap_form_tuple(RelationGetDescr(rel), values, nulls);
@@ -306,7 +307,7 @@ replorigin_create(char *roname)
}
}
- /* now release lock again, */
+ /* now release lock again, */
heap_close(rel, ExclusiveLock);
if (tuple == NULL)
@@ -327,8 +328,8 @@ replorigin_create(char *roname)
void
replorigin_drop(RepOriginId roident)
{
- HeapTuple tuple = NULL;
- Relation rel;
+ HeapTuple tuple = NULL;
+ Relation rel;
int i;
Assert(IsTransactionState());
@@ -379,7 +380,7 @@ replorigin_drop(RepOriginId roident)
CommandCounterIncrement();
- /* now release lock again, */
+ /* now release lock again, */
heap_close(rel, ExclusiveLock);
}
@@ -394,7 +395,7 @@ replorigin_drop(RepOriginId roident)
bool
replorigin_by_oid(RepOriginId roident, bool missing_ok, char **roname)
{
- HeapTuple tuple;
+ HeapTuple tuple;
Form_pg_replication_origin ric;
Assert(OidIsValid((Oid) roident));
@@ -446,7 +447,7 @@ ReplicationOriginShmemSize(void)
size = add_size(size, offsetof(ReplicationStateCtl, states));
size = add_size(size,
- mul_size(max_replication_slots, sizeof(ReplicationState)));
+ mul_size(max_replication_slots, sizeof(ReplicationState)));
return size;
}
@@ -462,11 +463,11 @@ ReplicationOriginShmemInit(void)
ShmemInitStruct("ReplicationOriginState",
ReplicationOriginShmemSize(),
&found);
- replication_states = replication_states_ctl->states;
+ replication_states = replication_states_ctl->states;
if (!found)
{
- int i;
+ int i;
replication_states_ctl->tranche_id = LWLockNewTrancheId();
replication_states_ctl->tranche.name = "ReplicationOrigins";
@@ -556,7 +557,7 @@ CheckPointReplicationOrigin(void)
{
ReplicationStateOnDisk disk_state;
ReplicationState *curstate = &replication_states[i];
- XLogRecPtr local_lsn;
+ XLogRecPtr local_lsn;
if (curstate->roident == InvalidRepOriginId)
continue;
@@ -636,16 +637,17 @@ void
StartupReplicationOrigin(void)
{
const char *path = "pg_logical/replorigin_checkpoint";
- int fd;
- int readBytes;
- uint32 magic = REPLICATION_STATE_MAGIC;
- int last_state = 0;
- pg_crc32c file_crc;
- pg_crc32c crc;
+ int fd;
+ int readBytes;
+ uint32 magic = REPLICATION_STATE_MAGIC;
+ int last_state = 0;
+ pg_crc32c file_crc;
+ pg_crc32c crc;
/* don't want to overwrite already existing state */
#ifdef USE_ASSERT_CHECKING
static bool already_started = false;
+
Assert(!already_started);
already_started = true;
#endif
@@ -660,8 +662,8 @@ StartupReplicationOrigin(void)
fd = OpenTransientFile((char *) path, O_RDONLY | PG_BINARY, 0);
/*
- * might have had max_replication_slots == 0 last run, or we just brought up a
- * standby.
+ * might have had max_replication_slots == 0 last run, or we just brought
+ * up a standby.
*/
if (fd < 0 && errno == ENOENT)
return;
@@ -681,8 +683,8 @@ StartupReplicationOrigin(void)
if (magic != REPLICATION_STATE_MAGIC)
ereport(PANIC,
- (errmsg("replication checkpoint has wrong magic %u instead of %u",
- magic, REPLICATION_STATE_MAGIC)));
+ (errmsg("replication checkpoint has wrong magic %u instead of %u",
+ magic, REPLICATION_STATE_MAGIC)));
/* we can skip locking here, no other access is possible */
@@ -697,7 +699,7 @@ StartupReplicationOrigin(void)
if (readBytes == sizeof(crc))
{
/* not pretty, but simple ... */
- file_crc = *(pg_crc32c*) &disk_state;
+ file_crc = *(pg_crc32c *) &disk_state;
break;
}
@@ -731,8 +733,8 @@ StartupReplicationOrigin(void)
elog(LOG, "recovered replication state of node %u to %X/%X",
disk_state.roident,
- (uint32)(disk_state.remote_lsn >> 32),
- (uint32)disk_state.remote_lsn);
+ (uint32) (disk_state.remote_lsn >> 32),
+ (uint32) disk_state.remote_lsn);
}
/* now check checksum */
@@ -756,18 +758,18 @@ replorigin_redo(XLogReaderState *record)
case XLOG_REPLORIGIN_SET:
{
xl_replorigin_set *xlrec =
- (xl_replorigin_set *) XLogRecGetData(record);
+ (xl_replorigin_set *) XLogRecGetData(record);
replorigin_advance(xlrec->node_id,
xlrec->remote_lsn, record->EndRecPtr,
- xlrec->force /* backward */,
- false /* WAL log */);
+ xlrec->force /* backward */ ,
+ false /* WAL log */ );
break;
}
case XLOG_REPLORIGIN_DROP:
{
xl_replorigin_drop *xlrec;
- int i;
+ int i;
xlrec = (xl_replorigin_drop *) XLogRecGetData(record);
@@ -812,7 +814,7 @@ replorigin_advance(RepOriginId node,
XLogRecPtr remote_commit, XLogRecPtr local_commit,
bool go_backward, bool wal_log)
{
- int i;
+ int i;
ReplicationState *replication_state = NULL;
ReplicationState *free_state = NULL;
@@ -899,6 +901,7 @@ replorigin_advance(RepOriginId node,
if (wal_log)
{
xl_replorigin_set xlrec;
+
xlrec.remote_lsn = remote_commit;
xlrec.node_id = node;
xlrec.force = go_backward;
@@ -911,8 +914,8 @@ replorigin_advance(RepOriginId node,
/*
* Due to - harmless - race conditions during a checkpoint we could see
- * values here that are older than the ones we already have in
- * memory. Don't overwrite those.
+ * values here that are older than the ones we already have in memory.
+ * Don't overwrite those.
*/
if (go_backward || replication_state->remote_lsn < remote_commit)
replication_state->remote_lsn = remote_commit;
@@ -973,7 +976,6 @@ replorigin_get_progress(RepOriginId node, bool flush)
static void
ReplicationOriginExitCleanup(int code, Datum arg)
{
-
LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
if (session_replication_state != NULL &&
@@ -1000,8 +1002,8 @@ void
replorigin_session_setup(RepOriginId node)
{
static bool registered_cleanup;
- int i;
- int free_slot = -1;
+ int i;
+ int free_slot = -1;
if (!registered_cleanup)
{
@@ -1014,7 +1016,7 @@ replorigin_session_setup(RepOriginId node)
if (session_replication_state != NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot setup replication origin when one is already setup")));
+ errmsg("cannot setup replication origin when one is already setup")));
/* Lock exclusively, as we may have to create a new table entry. */
LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
@@ -1043,8 +1045,8 @@ replorigin_session_setup(RepOriginId node)
{
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("replication identiefer %d is already active for pid %d",
- curstate->roident, curstate->acquired_by)));
+ errmsg("replication identiefer %d is already active for pid %d",
+ curstate->roident, curstate->acquired_by)));
}
/* ok, found slot */
@@ -1126,8 +1128,8 @@ replorigin_session_advance(XLogRecPtr remote_commit, XLogRecPtr local_commit)
XLogRecPtr
replorigin_session_get_progress(bool flush)
{
- XLogRecPtr remote_lsn;
- XLogRecPtr local_lsn;
+ XLogRecPtr remote_lsn;
+ XLogRecPtr local_lsn;
Assert(session_replication_state != NULL);
@@ -1158,7 +1160,7 @@ replorigin_session_get_progress(bool flush)
Datum
pg_replication_origin_create(PG_FUNCTION_ARGS)
{
- char *name;
+ char *name;
RepOriginId roident;
replorigin_check_prerequisites(false, false);
@@ -1177,7 +1179,7 @@ pg_replication_origin_create(PG_FUNCTION_ARGS)
Datum
pg_replication_origin_drop(PG_FUNCTION_ARGS)
{
- char *name;
+ char *name;
RepOriginId roident;
replorigin_check_prerequisites(false, false);
@@ -1200,7 +1202,7 @@ pg_replication_origin_drop(PG_FUNCTION_ARGS)
Datum
pg_replication_origin_oid(PG_FUNCTION_ARGS)
{
- char *name;
+ char *name;
RepOriginId roident;
replorigin_check_prerequisites(false, false);
@@ -1221,7 +1223,7 @@ pg_replication_origin_oid(PG_FUNCTION_ARGS)
Datum
pg_replication_origin_session_setup(PG_FUNCTION_ARGS)
{
- char *name;
+ char *name;
RepOriginId origin;
replorigin_check_prerequisites(true, false);
@@ -1329,8 +1331,8 @@ Datum
pg_replication_origin_advance(PG_FUNCTION_ARGS)
{
text *name = PG_GETARG_TEXT_P(0);
- XLogRecPtr remote_commit = PG_GETARG_LSN(1);
- RepOriginId node;
+ XLogRecPtr remote_commit = PG_GETARG_LSN(1);
+ RepOriginId node;
replorigin_check_prerequisites(true, false);
@@ -1345,7 +1347,7 @@ pg_replication_origin_advance(PG_FUNCTION_ARGS)
* set up the initial replication state, but not for replay.
*/
replorigin_advance(node, remote_commit, InvalidXLogRecPtr,
- true /* go backward */, true /* wal log */);
+ true /* go backward */ , true /* wal log */ );
UnlockRelationOid(ReplicationOriginRelationId, RowExclusiveLock);
@@ -1365,7 +1367,7 @@ pg_replication_origin_progress(PG_FUNCTION_ARGS)
{
char *name;
bool flush;
- RepOriginId roident;
+ RepOriginId roident;
XLogRecPtr remote_lsn = InvalidXLogRecPtr;
replorigin_check_prerequisites(true, true);
@@ -1456,7 +1458,7 @@ pg_show_replication_origin_status(PG_FUNCTION_ARGS)
* silently accept that it might be gone.
*/
if (replorigin_by_oid(state->roident, true,
- &roname))
+ &roname))
{
values[1] = CStringGetTextDatum(roname);
nulls[1] = false;
@@ -1464,7 +1466,7 @@ pg_show_replication_origin_status(PG_FUNCTION_ARGS)
LWLockAcquire(&state->lock, LW_SHARED);
- values[ 2] = LSNGetDatum(state->remote_lsn);
+ values[2] = LSNGetDatum(state->remote_lsn);
nulls[2] = false;
values[3] = LSNGetDatum(state->local_lsn);
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 2d86323f6f..fa98580302 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -1337,6 +1337,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
switch (change->action)
{
case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
+
/*
* Confirmation for speculative insertion arrived. Simply
* use as a normal record. It'll be cleaned up at the end
@@ -1380,10 +1381,10 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
goto change_done;
/*
- * For now ignore sequence changes entirely. Most of
- * the time they don't log changes using records we
- * understand, so it doesn't make sense to handle the
- * few cases we do.
+ * For now ignore sequence changes entirely. Most of the
+ * time they don't log changes using records we
+ * understand, so it doesn't make sense to handle the few
+ * cases we do.
*/
if (relation->rd_rel->relkind == RELKIND_SEQUENCE)
goto change_done;
@@ -1395,9 +1396,9 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
rb->apply_change(rb, txn, relation, change);
/*
- * Only clear reassembled toast chunks if we're
- * sure they're not required anymore. The creator
- * of the tuple tells us.
+ * Only clear reassembled toast chunks if we're sure
+ * they're not required anymore. The creator of the
+ * tuple tells us.
*/
if (change->data.tp.clear_toast_afterwards)
ReorderBufferToastReset(rb, txn);
@@ -1418,7 +1419,8 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
change);
}
- change_done:
+ change_done:
+
/*
* Either speculative insertion was confirmed, or it was
* unsuccessful and the record isn't needed anymore.
@@ -1437,6 +1439,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
break;
case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
+
/*
* Speculative insertions are dealt with by delaying the
* processing of the insert until the confirmation record
@@ -1704,9 +1707,9 @@ ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
txn->final_lsn = lsn;
/*
- * Process cache invalidation messages if there are any. Even if we're
- * not interested in the transaction's contents, it could have manipulated
- * the catalog and we need to update the caches according to that.
+ * Process cache invalidation messages if there are any. Even if we're not
+ * interested in the transaction's contents, it could have manipulated the
+ * catalog and we need to update the caches according to that.
*/
if (txn->base_snapshot != NULL && txn->ninvalidations > 0)
{
@@ -2068,7 +2071,7 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
switch (change->action)
{
- /* fall through these, they're all similar enough */
+ /* fall through these, they're all similar enough */
case REORDER_BUFFER_CHANGE_INSERT:
case REORDER_BUFFER_CHANGE_UPDATE:
case REORDER_BUFFER_CHANGE_DELETE:
@@ -2322,7 +2325,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
/* restore individual stuff */
switch (change->action)
{
- /* fall through these, they're all similar enough */
+ /* fall through these, they're all similar enough */
case REORDER_BUFFER_CHANGE_INSERT:
case REORDER_BUFFER_CHANGE_UPDATE:
case REORDER_BUFFER_CHANGE_DELETE:
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 35e1c06a31..efab4ca0df 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -153,9 +153,8 @@ struct SnapBuild
TransactionId xmax;
/*
- * Don't replay commits from an LSN < this LSN. This can be set
- * externally but it will also be advanced (never retreat) from within
- * snapbuild.c.
+ * Don't replay commits from an LSN < this LSN. This can be set externally
+ * but it will also be advanced (never retreat) from within snapbuild.c.
*/
XLogRecPtr start_decoding_at;
@@ -244,7 +243,7 @@ struct SnapBuild
* removes knowledge about the previously used resowner, so we save it here.
*/
static ResourceOwner SavedResourceOwnerDuringExport = NULL;
-static bool ExportInProgress = false;
+static bool ExportInProgress = false;
/* transaction state manipulation functions */
static void SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid);
@@ -599,7 +598,7 @@ SnapBuildExportSnapshot(SnapBuild *builder)
ereport(LOG,
(errmsg_plural("exported logical decoding snapshot: \"%s\" with %u transaction ID",
- "exported logical decoding snapshot: \"%s\" with %u transaction IDs",
+ "exported logical decoding snapshot: \"%s\" with %u transaction IDs",
snap->xcnt,
snapname, snap->xcnt)));
return snapname;
@@ -904,8 +903,8 @@ SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid)
ereport(LOG,
(errmsg("logical decoding found consistent point at %X/%X",
(uint32) (lsn >> 32), (uint32) lsn),
- errdetail("Transaction ID %u finished; no more running transactions.",
- xid)));
+ errdetail("Transaction ID %u finished; no more running transactions.",
+ xid)));
builder->state = SNAPBUILD_CONSISTENT;
}
}
@@ -1232,8 +1231,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
{
ereport(DEBUG1,
(errmsg_internal("skipping snapshot at %X/%X while building logical decoding snapshot, xmin horizon too low",
- (uint32) (lsn >> 32), (uint32) lsn),
- errdetail_internal("initial xmin horizon of %u vs the snapshot's %u",
+ (uint32) (lsn >> 32), (uint32) lsn),
+ errdetail_internal("initial xmin horizon of %u vs the snapshot's %u",
builder->initial_xmin_horizon, running->oldestRunningXid)));
return true;
}
@@ -1252,8 +1251,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
builder->start_decoding_at = lsn + 1;
/* As no transactions were running xmin/xmax can be trivially set. */
- builder->xmin = running->nextXid; /* < are finished */
- builder->xmax = running->nextXid; /* >= are running */
+ builder->xmin = running->nextXid; /* < are finished */
+ builder->xmax = running->nextXid; /* >= are running */
/* so we can safely use the faster comparisons */
Assert(TransactionIdIsNormal(builder->xmin));
@@ -1302,8 +1301,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
* currently running transactions have finished. We'll update both
* while waiting for the pending transactions to finish.
*/
- builder->xmin = running->nextXid; /* < are finished */
- builder->xmax = running->nextXid; /* >= are running */
+ builder->xmin = running->nextXid; /* < are finished */
+ builder->xmax = running->nextXid; /* >= are running */
/* so we can safely use the faster comparisons */
Assert(TransactionIdIsNormal(builder->xmin));
@@ -1688,7 +1687,7 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn)
INIT_CRC32C(checksum);
COMP_CRC32C(checksum,
- ((char *) &ondisk) + SnapBuildOnDiskNotChecksummedSize,
+ ((char *) &ondisk) + SnapBuildOnDiskNotChecksummedSize,
SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize);
/* read SnapBuild */
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index e02571b8bb..060343f168 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -84,7 +84,7 @@ typedef struct ReplicationSlotOnDisk
sizeof(ReplicationSlotOnDisk) - ReplicationSlotOnDiskConstantSize
#define SLOT_MAGIC 0x1051CA1 /* format identifier */
-#define SLOT_VERSION 2 /* version for new files */
+#define SLOT_VERSION 2 /* version for new files */
/* Control array for replication slot management */
ReplicationSlotCtlData *ReplicationSlotCtl = NULL;
@@ -349,8 +349,8 @@ ReplicationSlotAcquire(const char *name)
if (active_pid != 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("replication slot \"%s\" is already active for pid %d",
- name, active_pid)));
+ errmsg("replication slot \"%s\" is already active for pid %d",
+ name, active_pid)));
/* We made this slot active, so it's ours now. */
MyReplicationSlot = slot;
diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c
index 3d9aadbd83..9a2793f7ec 100644
--- a/src/backend/replication/slotfuncs.c
+++ b/src/backend/replication/slotfuncs.c
@@ -99,9 +99,9 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
CheckLogicalDecodingRequirements();
/*
- * Acquire a logical decoding slot, this will check for conflicting
- * names. Initially create it as ephemeral - that allows us to nicely
- * handle errors during initialization because it'll get dropped if this
+ * Acquire a logical decoding slot, this will check for conflicting names.
+ * Initially create it as ephemeral - that allows us to nicely handle
+ * errors during initialization because it'll get dropped if this
* transaction fails. We'll make it persistent at the end.
*/
ReplicationSlotCreate(NameStr(*name), true, RS_EPHEMERAL);
diff --git a/src/backend/replication/walreceiverfuncs.c b/src/backend/replication/walreceiverfuncs.c
index b26f5fcf63..f77a790fd8 100644
--- a/src/backend/replication/walreceiverfuncs.c
+++ b/src/backend/replication/walreceiverfuncs.c
@@ -329,7 +329,7 @@ GetReplicationApplyDelay(void)
long secs;
int usecs;
- TimestampTz chunckReplayStartTime;
+ TimestampTz chunckReplayStartTime;
SpinLockAcquire(&walrcv->mutex);
receivePtr = walrcv->receivedUpto;
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 4a20569e65..eb1b89b9c3 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -781,6 +781,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
else
{
CheckLogicalDecodingRequirements();
+
/*
* Initially create the slot as ephemeral - that allows us to nicely
* handle errors during initialization because it'll get dropped if
@@ -1266,9 +1267,9 @@ exec_replication_command(const char *cmd_string)
MemoryContext old_context;
/*
- * Log replication command if log_replication_commands is enabled.
- * Even when it's disabled, log the command with DEBUG1 level for
- * backward compatibility.
+ * Log replication command if log_replication_commands is enabled. Even
+ * when it's disabled, log the command with DEBUG1 level for backward
+ * compatibility.
*/
ereport(log_replication_commands ? LOG : DEBUG1,
(errmsg("received replication command: %s", cmd_string)));
@@ -2663,8 +2664,8 @@ WalSndWakeup(void)
for (i = 0; i < max_wal_senders; i++)
{
- Latch *latch;
- WalSnd *walsnd = &WalSndCtl->walsnds[i];
+ Latch *latch;
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
/*
* Get latch pointer with spinlock held, for the unlikely case that
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index e15e23c2e1..bbd6b77c5e 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -682,7 +682,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index)
* order of the original tlist's non-junk entries. This is needed for
* processing VALUES RTEs.
*/
-static List*
+static List *
rewriteTargetListIU(List *targetList,
CmdType commandType,
Relation target_relation,
@@ -1750,8 +1750,8 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
/*
* Apply any row level security policies. We do this last because it
* requires special recursion detection if the new quals have sublink
- * subqueries, and if we did it in the loop above query_tree_walker
- * would then recurse into those quals a second time.
+ * subqueries, and if we did it in the loop above query_tree_walker would
+ * then recurse into those quals a second time.
*/
rt_index = 0;
foreach(lc, parsetree->rtable)
@@ -1795,11 +1795,11 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
activeRIRs = lcons_oid(RelationGetRelid(rel), activeRIRs);
- expression_tree_walker( (Node*) securityQuals,
- fireRIRonSubLink, (void*)activeRIRs );
+ expression_tree_walker((Node *) securityQuals,
+ fireRIRonSubLink, (void *) activeRIRs);
- expression_tree_walker( (Node*) withCheckOptions,
- fireRIRonSubLink, (void*)activeRIRs );
+ expression_tree_walker((Node *) withCheckOptions,
+ fireRIRonSubLink, (void *) activeRIRs);
activeRIRs = list_delete_first(activeRIRs);
}
@@ -1814,7 +1814,7 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
rte->securityQuals);
parsetree->withCheckOptions = list_concat(withCheckOptions,
- parsetree->withCheckOptions);
+ parsetree->withCheckOptions);
}
/*
@@ -2662,7 +2662,7 @@ rewriteTargetView(Query *parsetree, Relation view)
if (!tle->resjunk)
modified_cols = bms_add_member(modified_cols,
- tle->resno - FirstLowInvalidHeapAttributeNumber);
+ tle->resno - FirstLowInvalidHeapAttributeNumber);
}
}
@@ -2797,8 +2797,8 @@ rewriteTargetView(Query *parsetree, Relation view)
* happens in ordinary SELECT usage of a view: all referenced columns must
* have read permission, even if optimization finds that some of them can
* be discarded during query transformation. The flattening we're doing
- * here is an optional optimization, too. (If you are unpersuaded and want
- * to change this, note that applying adjust_view_column_set to
+ * here is an optional optimization, too. (If you are unpersuaded and
+ * want to change this, note that applying adjust_view_column_set to
* view_rte->selectedCols is clearly *not* the right answer, since that
* neglects base-rel columns used in the view's WHERE quals.)
*
@@ -3150,9 +3150,9 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
/* Process the main targetlist ... */
parsetree->targetList = rewriteTargetListIU(parsetree->targetList,
- parsetree->commandType,
+ parsetree->commandType,
rt_entry_relation,
- parsetree->resultRelation,
+ parsetree->resultRelation,
&attrnos);
/* ... and the VALUES expression lists */
rewriteValuesRTE(values_rte, rt_entry_relation, attrnos);
@@ -3334,9 +3334,9 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
if (parsetree->onConflict &&
(product_queries != NIL || hasUpdate) &&
!updatableview)
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules")));
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules")));
heap_close(rt_entry_relation, NoLock);
}
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index e3dfdefe55..1da90ff894 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -729,7 +729,7 @@ IncrementVarSublevelsUp_walker(Node *node,
}
if (IsA(node, GroupingFunc))
{
- GroupingFunc *grp = (GroupingFunc *) node;
+ GroupingFunc *grp = (GroupingFunc *) node;
if (grp->agglevelsup >= context->min_sublevels_up)
grp->agglevelsup += context->delta_sublevels_up;
diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c
index 2c095ce88a..5a2f696934 100644
--- a/src/backend/rewrite/rowsecurity.c
+++ b/src/backend/rewrite/rowsecurity.c
@@ -1,6 +1,6 @@
/*
* rewrite/rowsecurity.c
- * Routines to support policies for row level security (aka RLS).
+ * Routines to support policies for row level security (aka RLS).
*
* Policies in PostgreSQL provide a mechanism to limit what records are
* returned to a user and what records a user is permitted to add to a table.
@@ -57,12 +57,12 @@
#include "tcop/utility.h"
static List *pull_row_security_policies(CmdType cmd, Relation relation,
- Oid user_id);
-static void process_policies(Query* root, List *policies, int rt_index,
- Expr **final_qual,
- Expr **final_with_check_qual,
- bool *hassublinks,
- BoolExprType boolop);
+ Oid user_id);
+static void process_policies(Query *root, List *policies, int rt_index,
+ Expr **final_qual,
+ Expr **final_with_check_qual,
+ bool *hassublinks,
+ BoolExprType boolop);
static bool check_role_for_policy(ArrayType *policy_roles, Oid user_id);
/*
@@ -77,8 +77,8 @@ static bool check_role_for_policy(ArrayType *policy_roles, Oid user_id);
* See below where the hook is called in prepend_row_security_policies for
* insight into how to use this hook.
*/
-row_security_policy_hook_type row_security_policy_hook_permissive = NULL;
-row_security_policy_hook_type row_security_policy_hook_restrictive = NULL;
+row_security_policy_hook_type row_security_policy_hook_permissive = NULL;
+row_security_policy_hook_type row_security_policy_hook_restrictive = NULL;
/*
* Get any row security quals and check quals that should be applied to the
@@ -89,27 +89,27 @@ row_security_policy_hook_type row_security_policy_hook_restrictive = NULL;
* set to true if any of the quals returned contain sublinks.
*/
void
-get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
+get_row_security_policies(Query *root, CmdType commandType, RangeTblEntry *rte,
int rt_index, List **securityQuals,
List **withCheckOptions, bool *hasRowSecurity,
bool *hasSubLinks)
{
- Expr *rowsec_expr = NULL;
- Expr *rowsec_with_check_expr = NULL;
- Expr *hook_expr_restrictive = NULL;
- Expr *hook_with_check_expr_restrictive = NULL;
- Expr *hook_expr_permissive = NULL;
- Expr *hook_with_check_expr_permissive = NULL;
-
- List *rowsec_policies;
- List *hook_policies_restrictive = NIL;
- List *hook_policies_permissive = NIL;
-
- Relation rel;
- Oid user_id;
- int sec_context;
- int rls_status;
- bool defaultDeny = false;
+ Expr *rowsec_expr = NULL;
+ Expr *rowsec_with_check_expr = NULL;
+ Expr *hook_expr_restrictive = NULL;
+ Expr *hook_with_check_expr_restrictive = NULL;
+ Expr *hook_expr_permissive = NULL;
+ Expr *hook_with_check_expr_permissive = NULL;
+
+ List *rowsec_policies;
+ List *hook_policies_restrictive = NIL;
+ List *hook_policies_permissive = NIL;
+
+ Relation rel;
+ Oid user_id;
+ int sec_context;
+ int rls_status;
+ bool defaultDeny = false;
/* Defaults for the return values */
*securityQuals = NIL;
@@ -124,9 +124,9 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
user_id = rte->checkAsUser ? rte->checkAsUser : GetUserId();
/*
- * If this is not a normal relation, or we have been told
- * to explicitly skip RLS (perhaps because this is an FK check)
- * then just return immediately.
+ * If this is not a normal relation, or we have been told to explicitly
+ * skip RLS (perhaps because this is an FK check) then just return
+ * immediately.
*/
if (rte->relid < FirstNormalObjectId
|| rte->relkind != RELKIND_RELATION
@@ -148,9 +148,9 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
if (rls_status == RLS_NONE_ENV)
{
/*
- * Indicate that this query may involve RLS and must therefore
- * be replanned if the environment changes (GUCs, role), but we
- * are not adding anything here.
+ * Indicate that this query may involve RLS and must therefore be
+ * replanned if the environment changes (GUCs, role), but we are not
+ * adding anything here.
*/
*hasRowSecurity = true;
@@ -166,15 +166,14 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
/*
* Check if this is only the default-deny policy.
*
- * Normally, if the table has row security enabled but there are
- * no policies, we use a default-deny policy and not allow anything.
- * However, when an extension uses the hook to add their own
- * policies, we don't want to include the default deny policy or
- * there won't be any way for a user to use an extension exclusively
- * for the policies to be used.
+ * Normally, if the table has row security enabled but there are no
+ * policies, we use a default-deny policy and not allow anything. However,
+ * when an extension uses the hook to add their own policies, we don't
+ * want to include the default deny policy or there won't be any way for a
+ * user to use an extension exclusively for the policies to be used.
*/
if (((RowSecurityPolicy *) linitial(rowsec_policies))->policy_id
- == InvalidOid)
+ == InvalidOid)
defaultDeny = true;
/* Now that we have our policies, build the expressions from them. */
@@ -187,8 +186,8 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
* extensions can add either permissive or restrictive policies.
*
* Note that, as with the internal policies, if multiple policies are
- * returned then they will be combined into a single expression with
- * all of them OR'd (for permissive) or AND'd (for restrictive) together.
+ * returned then they will be combined into a single expression with all
+ * of them OR'd (for permissive) or AND'd (for restrictive) together.
*
* If only a USING policy is returned by the extension then it will be
* used for WITH CHECK as well, similar to how internal policies are
@@ -202,7 +201,7 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
*/
if (row_security_policy_hook_restrictive)
{
- hook_policies_restrictive = (*row_security_policy_hook_restrictive)(commandType, rel);
+ hook_policies_restrictive = (*row_security_policy_hook_restrictive) (commandType, rel);
/* Build the expression from any policies returned. */
if (hook_policies_restrictive != NIL)
@@ -215,7 +214,7 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
if (row_security_policy_hook_permissive)
{
- hook_policies_permissive = (*row_security_policy_hook_permissive)(commandType, rel);
+ hook_policies_permissive = (*row_security_policy_hook_permissive) (commandType, rel);
/* Build the expression from any policies returned. */
if (hook_policies_permissive != NIL)
@@ -226,9 +225,9 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
}
/*
- * If the only built-in policy is the default-deny one, and hook
- * policies exist, then use the hook policies only and do not apply
- * the default-deny policy. Otherwise, we will apply both sets below.
+ * If the only built-in policy is the default-deny one, and hook policies
+ * exist, then use the hook policies only and do not apply the
+ * default-deny policy. Otherwise, we will apply both sets below.
*/
if (defaultDeny &&
(hook_policies_restrictive != NIL || hook_policies_permissive != NIL))
@@ -238,10 +237,10 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
}
/*
- * For INSERT or UPDATE, we need to add the WITH CHECK quals to
- * Query's withCheckOptions to verify that any new records pass the
- * WITH CHECK policy (this will be a copy of the USING policy, if no
- * explicit WITH CHECK policy exists).
+ * For INSERT or UPDATE, we need to add the WITH CHECK quals to Query's
+ * withCheckOptions to verify that any new records pass the WITH CHECK
+ * policy (this will be a copy of the USING policy, if no explicit WITH
+ * CHECK policy exists).
*/
if (commandType == CMD_INSERT || commandType == CMD_UPDATE)
{
@@ -257,11 +256,11 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
*/
if (hook_with_check_expr_restrictive)
{
- WithCheckOption *wco;
+ WithCheckOption *wco;
wco = (WithCheckOption *) makeNode(WithCheckOption);
wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
- WCO_RLS_UPDATE_CHECK;
+ WCO_RLS_UPDATE_CHECK;
wco->relname = pstrdup(RelationGetRelationName(rel));
wco->qual = (Node *) hook_with_check_expr_restrictive;
wco->cascaded = false;
@@ -269,16 +268,16 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
}
/*
- * Handle built-in policies, if there are no permissive
- * policies from the hook.
+ * Handle built-in policies, if there are no permissive policies from
+ * the hook.
*/
if (rowsec_with_check_expr && !hook_with_check_expr_permissive)
{
- WithCheckOption *wco;
+ WithCheckOption *wco;
wco = (WithCheckOption *) makeNode(WithCheckOption);
wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
- WCO_RLS_UPDATE_CHECK;
+ WCO_RLS_UPDATE_CHECK;
wco->relname = pstrdup(RelationGetRelationName(rel));
wco->qual = (Node *) rowsec_with_check_expr;
wco->cascaded = false;
@@ -287,11 +286,11 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
/* Handle the hook policies, if there are no built-in ones. */
else if (!rowsec_with_check_expr && hook_with_check_expr_permissive)
{
- WithCheckOption *wco;
+ WithCheckOption *wco;
wco = (WithCheckOption *) makeNode(WithCheckOption);
wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
- WCO_RLS_UPDATE_CHECK;
+ WCO_RLS_UPDATE_CHECK;
wco->relname = pstrdup(RelationGetRelationName(rel));
wco->qual = (Node *) hook_with_check_expr_permissive;
wco->cascaded = false;
@@ -300,9 +299,9 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
/* Handle the case where there are both. */
else if (rowsec_with_check_expr && hook_with_check_expr_permissive)
{
- WithCheckOption *wco;
- List *combined_quals = NIL;
- Expr *combined_qual_eval;
+ WithCheckOption *wco;
+ List *combined_quals = NIL;
+ Expr *combined_qual_eval;
combined_quals = lcons(copyObject(rowsec_with_check_expr),
combined_quals);
@@ -314,7 +313,7 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
wco = (WithCheckOption *) makeNode(WithCheckOption);
wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
- WCO_RLS_UPDATE_CHECK;
+ WCO_RLS_UPDATE_CHECK;
wco->relname = pstrdup(RelationGetRelationName(rel));
wco->qual = (Node *) combined_qual_eval;
wco->cascaded = false;
@@ -361,8 +360,8 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
foreach(item, conflictSecurityQuals)
{
- Expr *conflict_rowsec_expr = (Expr *) lfirst(item);
- WithCheckOption *wco;
+ Expr *conflict_rowsec_expr = (Expr *) lfirst(item);
+ WithCheckOption *wco;
wco = (WithCheckOption *) makeNode(WithCheckOption);
@@ -393,8 +392,8 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
/* if we have both, we have to combine them with an OR */
else if (rowsec_expr && hook_expr_permissive)
{
- List *combined_quals = NIL;
- Expr *combined_qual_eval;
+ List *combined_quals = NIL;
+ Expr *combined_qual_eval;
combined_quals = lcons(copyObject(rowsec_expr), combined_quals);
combined_quals = lcons(copyObject(hook_expr_permissive),
@@ -409,8 +408,8 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
heap_close(rel, NoLock);
/*
- * Mark this query as having row security, so plancache can invalidate
- * it when necessary (eg: role changes)
+ * Mark this query as having row security, so plancache can invalidate it
+ * when necessary (eg: role changes)
*/
*hasRowSecurity = true;
@@ -427,26 +426,27 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
static List *
pull_row_security_policies(CmdType cmd, Relation relation, Oid user_id)
{
- List *policies = NIL;
- ListCell *item;
+ List *policies = NIL;
+ ListCell *item;
/*
* Row security is enabled for the relation and the row security GUC is
- * either 'on' or 'force' here, so find the policies to apply to the table.
- * There must always be at least one policy defined (may be the simple
- * 'default-deny' policy, if none are explicitly defined on the table).
+ * either 'on' or 'force' here, so find the policies to apply to the
+ * table. There must always be at least one policy defined (may be the
+ * simple 'default-deny' policy, if none are explicitly defined on the
+ * table).
*/
foreach(item, relation->rd_rsdesc->policies)
{
- RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item);
+ RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item);
/* Always add ALL policies, if they exist. */
if (policy->polcmd == '*' &&
- check_role_for_policy(policy->roles, user_id))
+ check_role_for_policy(policy->roles, user_id))
policies = lcons(policy, policies);
/* Add relevant command-specific policies to the list. */
- switch(cmd)
+ switch (cmd)
{
case CMD_SELECT:
if (policy->polcmd == ACL_SELECT_CHR
@@ -482,8 +482,8 @@ pull_row_security_policies(CmdType cmd, Relation relation, Oid user_id)
*/
if (policies == NIL)
{
- RowSecurityPolicy *policy = NULL;
- Datum role;
+ RowSecurityPolicy *policy = NULL;
+ Datum role;
role = ObjectIdGetDatum(ACL_ID_PUBLIC);
@@ -519,18 +519,18 @@ pull_row_security_policies(CmdType cmd, Relation relation, Oid user_id)
* qual_eval, with_check_eval, and hassublinks are output variables
*/
static void
-process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
+process_policies(Query *root, List *policies, int rt_index, Expr **qual_eval,
Expr **with_check_eval, bool *hassublinks,
BoolExprType boolop)
{
- ListCell *item;
- List *quals = NIL;
- List *with_check_quals = NIL;
+ ListCell *item;
+ List *quals = NIL;
+ List *with_check_quals = NIL;
/*
- * Extract the USING and WITH CHECK quals from each of the policies
- * and add them to our lists. We only want WITH CHECK quals if this
- * RTE is the query's result relation.
+ * Extract the USING and WITH CHECK quals from each of the policies and
+ * add them to our lists. We only want WITH CHECK quals if this RTE is
+ * the query's result relation.
*/
foreach(item, policies)
{
@@ -545,8 +545,8 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
with_check_quals);
/*
- * For each policy, if there is only a USING clause then copy/use it for
- * the WITH CHECK policy also, if this RTE is the query's result
+ * For each policy, if there is only a USING clause then copy/use it
+ * for the WITH CHECK policy also, if this RTE is the query's result
* relation.
*/
if (policy->qual != NULL && policy->with_check_qual == NULL &&
@@ -568,16 +568,16 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
BoolGetDatum(false), false, true), quals);
/*
- * Row security quals always have the target table as varno 1, as no
- * joins are permitted in row security expressions. We must walk the
- * expression, updating any references to varno 1 to the varno
- * the table has in the outer query.
+ * Row security quals always have the target table as varno 1, as no joins
+ * are permitted in row security expressions. We must walk the expression,
+ * updating any references to varno 1 to the varno the table has in the
+ * outer query.
*
* We rewrite the expression in-place.
*
* We must have some quals at this point; the default-deny policy, if
- * nothing else. Note that we might not have any WITH CHECK quals-
- * that's fine, as this might not be the resultRelation.
+ * nothing else. Note that we might not have any WITH CHECK quals- that's
+ * fine, as this might not be the resultRelation.
*/
Assert(quals != NIL);
@@ -593,11 +593,11 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
if (list_length(quals) > 1)
*qual_eval = makeBoolExpr(boolop, quals, -1);
else
- *qual_eval = (Expr*) linitial(quals);
+ *qual_eval = (Expr *) linitial(quals);
/*
- * Similairly, if more than one WITH CHECK qual is returned, then
- * they need to be combined together.
+ * Similairly, if more than one WITH CHECK qual is returned, then they
+ * need to be combined together.
*
* with_check_quals is allowed to be NIL here since this might not be the
* resultRelation (see above).
@@ -605,7 +605,7 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
if (list_length(with_check_quals) > 1)
*with_check_eval = makeBoolExpr(boolop, with_check_quals, -1);
else if (with_check_quals != NIL)
- *with_check_eval = (Expr*) linitial(with_check_quals);
+ *with_check_eval = (Expr *) linitial(with_check_quals);
else
*with_check_eval = NULL;
@@ -614,7 +614,7 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
/*
* check_role_for_policy -
- * determines if the policy should be applied for the current role
+ * determines if the policy should be applied for the current role
*/
static bool
check_role_for_policy(ArrayType *policy_roles, Oid user_id)
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index ef1f9a6900..3ae2848da0 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -69,9 +69,9 @@ InitBufferPool(void)
/* Align descriptors to a cacheline boundary. */
BufferDescriptors = (BufferDescPadded *) CACHELINEALIGN(
- ShmemInitStruct("Buffer Descriptors",
- NBuffers * sizeof(BufferDescPadded) + PG_CACHE_LINE_SIZE,
- &foundDescs));
+ ShmemInitStruct("Buffer Descriptors",
+ NBuffers * sizeof(BufferDescPadded) + PG_CACHE_LINE_SIZE,
+ &foundDescs));
BufferBlocks = (char *)
ShmemInitStruct("Buffer Blocks",
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 861ec3ed49..cc973b53a9 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -68,8 +68,8 @@
typedef struct PrivateRefCountEntry
{
- Buffer buffer;
- int32 refcount;
+ Buffer buffer;
+ int32 refcount;
} PrivateRefCountEntry;
/* 64 bytes, about the size of a cache line on common systems */
@@ -132,8 +132,8 @@ static uint32 PrivateRefCountClock = 0;
static PrivateRefCountEntry *ReservedRefCountEntry = NULL;
static void ReservePrivateRefCountEntry(void);
-static PrivateRefCountEntry* NewPrivateRefCountEntry(Buffer buffer);
-static PrivateRefCountEntry* GetPrivateRefCountEntry(Buffer buffer, bool do_move);
+static PrivateRefCountEntry *NewPrivateRefCountEntry(Buffer buffer);
+static PrivateRefCountEntry *GetPrivateRefCountEntry(Buffer buffer, bool do_move);
static inline int32 GetPrivateRefCount(Buffer buffer);
static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref);
@@ -154,7 +154,7 @@ ReservePrivateRefCountEntry(void)
* majority of cases.
*/
{
- int i;
+ int i;
for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
{
@@ -180,10 +180,10 @@ ReservePrivateRefCountEntry(void)
* hashtable. Use that slot.
*/
PrivateRefCountEntry *hashent;
- bool found;
+ bool found;
/* select victim slot */
- ReservedRefCountEntry =
+ ReservedRefCountEntry =
&PrivateRefCountArray[PrivateRefCountClock++ % REFCOUNT_ARRAY_ENTRIES];
/* Better be used, otherwise we shouldn't get here. */
@@ -208,7 +208,7 @@ ReservePrivateRefCountEntry(void)
/*
* Fill a previously reserved refcount entry.
*/
-static PrivateRefCountEntry*
+static PrivateRefCountEntry *
NewPrivateRefCountEntry(Buffer buffer)
{
PrivateRefCountEntry *res;
@@ -234,7 +234,7 @@ NewPrivateRefCountEntry(Buffer buffer)
* do_move is true, and the entry resides in the hashtable the entry is
* optimized for frequent access by moving it to the array.
*/
-static PrivateRefCountEntry*
+static PrivateRefCountEntry *
GetPrivateRefCountEntry(Buffer buffer, bool do_move)
{
PrivateRefCountEntry *res;
@@ -280,7 +280,7 @@ GetPrivateRefCountEntry(Buffer buffer, bool do_move)
else
{
/* move buffer from hashtable into the free array slot */
- bool found;
+ bool found;
PrivateRefCountEntry *free;
/* Ensure there's a free array slot */
@@ -346,6 +346,7 @@ ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
ref < &PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES])
{
ref->buffer = InvalidBuffer;
+
/*
* Mark the just used entry as reserved - in many scenarios that
* allows us to avoid ever having to search the array/hash for free
@@ -355,8 +356,9 @@ ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
}
else
{
- bool found;
- Buffer buffer = ref->buffer;
+ bool found;
+ Buffer buffer = ref->buffer;
+
hash_search(PrivateRefCountHash,
(void *) &buffer,
HASH_REMOVE,
@@ -669,8 +671,8 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
found);
/*
- * In RBM_ZERO_AND_LOCK mode the caller expects the page to
- * be locked on return.
+ * In RBM_ZERO_AND_LOCK mode the caller expects the page to be
+ * locked on return.
*/
if (!isLocalBuf)
{
@@ -809,9 +811,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* page before the caller has had a chance to initialize it.
*
* Since no-one else can be looking at the page contents yet, there is no
- * difference between an exclusive lock and a cleanup-strength lock.
- * (Note that we cannot use LockBuffer() of LockBufferForCleanup() here,
- * because they assert that the buffer is already valid.)
+ * difference between an exclusive lock and a cleanup-strength lock. (Note
+ * that we cannot use LockBuffer() of LockBufferForCleanup() here, because
+ * they assert that the buffer is already valid.)
*/
if ((mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) &&
!isLocalBuf)
@@ -939,8 +941,8 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
for (;;)
{
/*
- * Ensure, while the spinlock's not yet held, that there's a free refcount
- * entry.
+ * Ensure, while the spinlock's not yet held, that there's a free
+ * refcount entry.
*/
ReservePrivateRefCountEntry();
@@ -2169,6 +2171,7 @@ CheckForBufferLeaks(void)
if (PrivateRefCountOverflowed)
{
HASH_SEQ_STATUS hstat;
+
hash_seq_init(&hstat, PrivateRefCountHash);
while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
{
@@ -2974,6 +2977,7 @@ IncrBufferRefCount(Buffer buffer)
else
{
PrivateRefCountEntry *ref;
+
ref = GetPrivateRefCountEntry(buffer, true);
Assert(ref != NULL);
ref->refcount++;
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index c36e80af23..bc2c773000 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -50,7 +50,7 @@ typedef struct
* Statistics. These counters should be wide enough that they can't
* overflow during a single bgwriter cycle.
*/
- uint32 completePasses; /* Complete cycles of the clock sweep */
+ uint32 completePasses; /* Complete cycles of the clock sweep */
pg_atomic_uint32 numBufferAllocs; /* Buffers allocated since last reset */
/*
@@ -111,7 +111,7 @@ static void AddBufferToRing(BufferAccessStrategy strategy,
static inline uint32
ClockSweepTick(void)
{
- uint32 victim;
+ uint32 victim;
/*
* Atomically move hand ahead one buffer - if there's several processes
@@ -123,7 +123,7 @@ ClockSweepTick(void)
if (victim >= NBuffers)
{
- uint32 originalVictim = victim;
+ uint32 originalVictim = victim;
/* always wrap what we look up in BufferDescriptors */
victim = victim % NBuffers;
@@ -136,9 +136,9 @@ ClockSweepTick(void)
*/
if (victim == 0)
{
- uint32 expected;
- uint32 wrapped;
- bool success = false;
+ uint32 expected;
+ uint32 wrapped;
+ bool success = false;
expected = originalVictim + 1;
@@ -381,6 +381,7 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
if (complete_passes)
{
*complete_passes = StrategyControl->completePasses;
+
/*
* Additionally add the number of wraparounds that happened before
* completePasses could be incremented. C.f. ClockSweepTick().
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index bed8478dd1..68d43c66b6 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -2517,14 +2517,14 @@ walkdir(char *path, void (*action) (char *fname, bool isdir))
int len;
struct stat lst;
- len = readlink(subpath, linkpath, sizeof(linkpath)-1);
+ len = readlink(subpath, linkpath, sizeof(linkpath) - 1);
if (len < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read symbolic link \"%s\": %m",
subpath)));
- if (len >= sizeof(linkpath)-1)
+ if (len >= sizeof(linkpath) - 1)
ereport(ERROR,
(errmsg("symbolic link \"%s\" target is too long",
subpath)));
diff --git a/src/backend/storage/file/reinit.c b/src/backend/storage/file/reinit.c
index afd92554fb..429a99bc78 100644
--- a/src/backend/storage/file/reinit.c
+++ b/src/backend/storage/file/reinit.c
@@ -341,11 +341,11 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
FreeDir(dbspace_dir);
/*
- * copy_file() above has already called pg_flush_data() on the
- * files it created. Now we need to fsync those files, because
- * a checkpoint won't do it for us while we're in recovery. We
- * do this in a separate pass to allow the kernel to perform
- * all the flushes (especially the metadata ones) at once.
+ * copy_file() above has already called pg_flush_data() on the files
+ * it created. Now we need to fsync those files, because a checkpoint
+ * won't do it for us while we're in recovery. We do this in a
+ * separate pass to allow the kernel to perform all the flushes
+ * (especially the metadata ones) at once.
*/
dbspace_dir = AllocateDir(dbspacedirname);
if (dbspace_dir == NULL)
diff --git a/src/backend/storage/ipc/dsm_impl.c b/src/backend/storage/ipc/dsm_impl.c
index 0859fbfc93..0b10dac729 100644
--- a/src/backend/storage/ipc/dsm_impl.c
+++ b/src/backend/storage/ipc/dsm_impl.c
@@ -332,8 +332,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not resize shared memory segment \"%s\" to %zu bytes: %m",
- name, request_size)));
+ errmsg("could not resize shared memory segment \"%s\" to %zu bytes: %m",
+ name, request_size)));
return false;
}
@@ -875,8 +875,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
ereport(elevel,
(errcode_for_dynamic_shared_memory(),
- errmsg("could not resize shared memory segment \"%s\" to %zu bytes: %m",
- name, request_size)));
+ errmsg("could not resize shared memory segment \"%s\" to %zu bytes: %m",
+ name, request_size)));
return false;
}
else if (*mapped_size < request_size)
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 0b3ad7294a..4f3c5c9dec 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -1707,12 +1707,12 @@ ProcArrayInstallRestoredXmin(TransactionId xmin, PGPROC *proc)
pgxact = &allPgXact[proc->pgprocno];
/*
- * Be certain that the referenced PGPROC has an advertised xmin which
- * is no later than the one we're installing, so that the system-wide
- * xmin can't go backwards. Also, make sure it's running in the same
- * database, so that the per-database xmin cannot go backwards.
+ * Be certain that the referenced PGPROC has an advertised xmin which is
+ * no later than the one we're installing, so that the system-wide xmin
+ * can't go backwards. Also, make sure it's running in the same database,
+ * so that the per-database xmin cannot go backwards.
*/
- xid = pgxact->xmin; /* fetch just once */
+ xid = pgxact->xmin; /* fetch just once */
if (proc->databaseId == MyDatabaseId &&
TransactionIdIsNormal(xid) &&
TransactionIdPrecedesOrEquals(xid, xmin))
diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c
index daca634a55..126cb0751b 100644
--- a/src/backend/storage/ipc/shm_mq.c
+++ b/src/backend/storage/ipc/shm_mq.c
@@ -317,7 +317,7 @@ shm_mq_set_handle(shm_mq_handle *mqh, BackgroundWorkerHandle *handle)
shm_mq_result
shm_mq_send(shm_mq_handle *mqh, Size nbytes, const void *data, bool nowait)
{
- shm_mq_iovec iov;
+ shm_mq_iovec iov;
iov.data = data;
iov.len = nbytes;
@@ -385,7 +385,7 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait)
offset = mqh->mqh_partial_bytes;
do
{
- Size chunksize;
+ Size chunksize;
/* Figure out which bytes need to be sent next. */
if (offset >= iov[which_iov].len)
@@ -399,18 +399,18 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait)
/*
* We want to avoid copying the data if at all possible, but every
- * chunk of bytes we write into the queue has to be MAXALIGN'd,
- * except the last. Thus, if a chunk other than the last one ends
- * on a non-MAXALIGN'd boundary, we have to combine the tail end of
- * its data with data from one or more following chunks until we
- * either reach the last chunk or accumulate a number of bytes which
- * is MAXALIGN'd.
+ * chunk of bytes we write into the queue has to be MAXALIGN'd, except
+ * the last. Thus, if a chunk other than the last one ends on a
+ * non-MAXALIGN'd boundary, we have to combine the tail end of its
+ * data with data from one or more following chunks until we either
+ * reach the last chunk or accumulate a number of bytes which is
+ * MAXALIGN'd.
*/
if (which_iov + 1 < iovcnt &&
offset + MAXIMUM_ALIGNOF > iov[which_iov].len)
{
- char tmpbuf[MAXIMUM_ALIGNOF];
- int j = 0;
+ char tmpbuf[MAXIMUM_ALIGNOF];
+ int j = 0;
for (;;)
{
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index 7c95f4c6a9..dc9207164e 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -188,8 +188,8 @@ ProcessCatchupInterrupt(void)
*
* It is awfully tempting to just call AcceptInvalidationMessages()
* without the rest of the xact start/stop overhead, and I think that
- * would actually work in the normal case; but I am not sure that things
- * would clean up nicely if we got an error partway through.
+ * would actually work in the normal case; but I am not sure that
+ * things would clean up nicely if we got an error partway through.
*/
if (IsTransactionOrTransactionBlock())
{
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 1acd2f090b..46cab4911e 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -62,14 +62,14 @@
* work. That's problematic because we're now stuck waiting inside the OS.
* To mitigate those races we use a two phased attempt at locking:
- * Phase 1: Try to do it atomically, if we succeed, nice
- * Phase 2: Add ourselves to the waitqueue of the lock
- * Phase 3: Try to grab the lock again, if we succeed, remove ourselves from
- * the queue
- * Phase 4: Sleep till wake-up, goto Phase 1
+ * Phase 1: Try to do it atomically, if we succeed, nice
+ * Phase 2: Add ourselves to the waitqueue of the lock
+ * Phase 3: Try to grab the lock again, if we succeed, remove ourselves from
+ * the queue
+ * Phase 4: Sleep till wake-up, goto Phase 1
*
* This protects us against the problem from above as nobody can release too
- * quick, before we're queued, since after Phase 2 we're already queued.
+ * quick, before we're queued, since after Phase 2 we're already queued.
* -------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -140,7 +140,7 @@ static LWLockTranche MainLWLockTranche;
/* struct representing the LWLocks we're holding */
typedef struct LWLockHandle
{
- LWLock *lock;
+ LWLock *lock;
LWLockMode mode;
} LWLockHandle;
@@ -183,7 +183,8 @@ PRINT_LWDEBUG(const char *where, LWLock *lock, LWLockMode mode)
/* hide statement & context here, otherwise the log is just too verbose */
if (Trace_lwlocks)
{
- uint32 state = pg_atomic_read_u32(&lock->state);
+ uint32 state = pg_atomic_read_u32(&lock->state);
+
ereport(LOG,
(errhidestmt(true),
errhidecontext(true),
@@ -580,17 +581,17 @@ LWLockInitialize(LWLock *lock, int tranche_id)
* Returns true if the lock isn't free and we need to wait.
*/
static bool
-LWLockAttemptLock(LWLock* lock, LWLockMode mode)
+LWLockAttemptLock(LWLock *lock, LWLockMode mode)
{
AssertArg(mode == LW_EXCLUSIVE || mode == LW_SHARED);
/* loop until we've determined whether we could acquire the lock or not */
while (true)
{
- uint32 old_state;
- uint32 expected_state;
- uint32 desired_state;
- bool lock_free;
+ uint32 old_state;
+ uint32 expected_state;
+ uint32 desired_state;
+ bool lock_free;
old_state = pg_atomic_read_u32(&lock->state);
expected_state = old_state;
@@ -632,7 +633,7 @@ LWLockAttemptLock(LWLock* lock, LWLockMode mode)
return false;
}
else
- return true; /* someobdy else has the lock */
+ return true; /* someobdy else has the lock */
}
}
pg_unreachable();
@@ -667,7 +668,7 @@ LWLockWakeup(LWLock *lock)
dlist_foreach_modify(iter, &lock->waiters)
{
- PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
+ PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
continue;
@@ -683,6 +684,7 @@ LWLockWakeup(LWLock *lock)
* automatically.
*/
new_release_ok = false;
+
/*
* Don't wakeup (further) exclusive locks.
*/
@@ -693,7 +695,7 @@ LWLockWakeup(LWLock *lock)
* Once we've woken up an exclusive lock, there's no point in waking
* up anybody else.
*/
- if(waiter->lwWaitMode == LW_EXCLUSIVE)
+ if (waiter->lwWaitMode == LW_EXCLUSIVE)
break;
}
@@ -716,10 +718,11 @@ LWLockWakeup(LWLock *lock)
/* Awaken any waiters I removed from the queue. */
dlist_foreach_modify(iter, &wakeup)
{
- PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
+ PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
dlist_delete(&waiter->lwWaitLink);
+
/*
* Guarantee that lwWaiting being unset only becomes visible once the
* unlink from the link has completed. Otherwise the target backend
@@ -799,7 +802,7 @@ LWLockQueueSelf(LWLock *lock, LWLockMode mode)
static void
LWLockDequeueSelf(LWLock *lock)
{
- bool found = false;
+ bool found = false;
dlist_mutable_iter iter;
#ifdef LWLOCK_STATS
@@ -822,7 +825,8 @@ LWLockDequeueSelf(LWLock *lock)
*/
dlist_foreach_modify(iter, &lock->waiters)
{
- PGPROC *proc = dlist_container(PGPROC, lwWaitLink, iter.cur);
+ PGPROC *proc = dlist_container(PGPROC, lwWaitLink, iter.cur);
+
if (proc == MyProc)
{
found = true;
@@ -844,7 +848,7 @@ LWLockDequeueSelf(LWLock *lock)
MyProc->lwWaiting = false;
else
{
- int extraWaits = 0;
+ int extraWaits = 0;
/*
* Somebody else dequeued us and has or will wake us up. Deal with the
@@ -881,6 +885,7 @@ LWLockDequeueSelf(LWLock *lock)
{
/* not waiting anymore */
uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
+
Assert(nwaiters < MAX_BACKENDS);
}
#endif
@@ -1047,6 +1052,7 @@ LWLockAcquireCommon(LWLock *lock, LWLockMode mode, uint64 *valptr, uint64 val)
{
/* not waiting anymore */
uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
+
Assert(nwaiters < MAX_BACKENDS);
}
#endif
@@ -1182,8 +1188,9 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
if (mustwait)
{
/*
- * Wait until awakened. Like in LWLockAcquire, be prepared for bogus
- * wakeups, because we share the semaphore with ProcWaitForSignal.
+ * Wait until awakened. Like in LWLockAcquire, be prepared for
+ * bogus wakeups, because we share the semaphore with
+ * ProcWaitForSignal.
*/
LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
@@ -1204,6 +1211,7 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
{
/* not waiting anymore */
uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
+
Assert(nwaiters < MAX_BACKENDS);
}
#endif
@@ -1216,11 +1224,11 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
/*
- * Got lock in the second attempt, undo queueing. We need to
- * treat this as having successfully acquired the lock, otherwise
- * we'd not necessarily wake up people we've prevented from
- * acquiring the lock.
- */
+ * Got lock in the second attempt, undo queueing. We need to treat
+ * this as having successfully acquired the lock, otherwise we'd
+ * not necessarily wake up people we've prevented from acquiring
+ * the lock.
+ */
LWLockDequeueSelf(lock);
}
}
@@ -1345,9 +1353,9 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
/*
* Add myself to wait queue. Note that this is racy, somebody else
- * could wakeup before we're finished queuing.
- * NB: We're using nearly the same twice-in-a-row lock acquisition
- * protocol as LWLockAcquire(). Check its comments for details.
+ * could wakeup before we're finished queuing. NB: We're using nearly
+ * the same twice-in-a-row lock acquisition protocol as
+ * LWLockAcquire(). Check its comments for details.
*/
LWLockQueueSelf(lock, LW_WAIT_UNTIL_FREE);
@@ -1405,6 +1413,7 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
{
/* not waiting anymore */
uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
+
Assert(nwaiters < MAX_BACKENDS);
}
#endif
@@ -1477,7 +1486,7 @@ LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
*/
dlist_foreach_modify(iter, &lock->waiters)
{
- PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
+ PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
break;
@@ -1494,7 +1503,8 @@ LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
*/
dlist_foreach_modify(iter, &wakeup)
{
- PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
+ PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
+
dlist_delete(&waiter->lwWaitLink);
/* check comment in LWLockWakeup() about this barrier */
pg_write_barrier();
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 33b2f69bf8..455ad26634 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -1596,6 +1596,7 @@ CheckDeadLockAlert(void)
int save_errno = errno;
got_deadlock_timeout = true;
+
/*
* Have to set the latch again, even if handle_sig_alarm already did. Back
* then got_deadlock_timeout wasn't yet set... It's unlikely that this
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 41ecc999ae..df77bb2f5c 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -962,8 +962,8 @@ PageIndexDeleteNoCompact(Page page, OffsetNumber *itemnos, int nitems)
offset != MAXALIGN(offset))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("corrupted item pointer: offset = %u, length = %u",
- offset, (unsigned int) itemlen)));
+ errmsg("corrupted item pointer: offset = %u, length = %u",
+ offset, (unsigned int) itemlen)));
if (nextitm < nitems && offnum == itemnos[nextitm])
{
@@ -1039,8 +1039,8 @@ PageIndexDeleteNoCompact(Page page, OffsetNumber *itemnos, int nitems)
if (totallen > (Size) (pd_special - pd_lower))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("corrupted item lengths: total %u, available space %u",
- (unsigned int) totallen, pd_special - pd_lower)));
+ errmsg("corrupted item lengths: total %u, available space %u",
+ (unsigned int) totallen, pd_special - pd_lower)));
/*
* Defragment the data areas of each tuple, being careful to preserve
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index b754d3bd19..42a43bb07b 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -213,8 +213,8 @@ mdinit(void)
/*
* XXX: The checkpointer needs to add entries to the pending ops table
* when absorbing fsync requests. That is done within a critical
- * section, which isn't usually allowed, but we make an exception.
- * It means that there's a theoretical possibility that you run out of
+ * section, which isn't usually allowed, but we make an exception. It
+ * means that there's a theoretical possibility that you run out of
* memory while absorbing fsync requests, which leads to a PANIC.
* Fortunately the hash table is small so that's unlikely to happen in
* practice.
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index ea2a43209d..ce4bdafad9 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -574,10 +574,10 @@ ProcessClientWriteInterrupt(bool blocked)
/*
* We only want to process the interrupt here if socket writes are
- * blocking to increase the chance to get an error message to the
- * client. If we're not blocked there'll soon be a
- * CHECK_FOR_INTERRUPTS(). But if we're blocked we'll never get out of
- * that situation if the client has died.
+ * blocking to increase the chance to get an error message to the client.
+ * If we're not blocked there'll soon be a CHECK_FOR_INTERRUPTS(). But if
+ * we're blocked we'll never get out of that situation if the client has
+ * died.
*/
if (ProcDiePending && blocked)
{
@@ -2653,9 +2653,9 @@ die(SIGNAL_ARGS)
/*
* If we're in single user mode, we want to quit immediately - we can't
- * rely on latches as they wouldn't work when stdin/stdout is a
- * file. Rather ugly, but it's unlikely to be worthwhile to invest much
- * more effort just for the benefit of single user mode.
+ * rely on latches as they wouldn't work when stdin/stdout is a file.
+ * Rather ugly, but it's unlikely to be worthwhile to invest much more
+ * effort just for the benefit of single user mode.
*/
if (DoingCommandRead && whereToSendOutput != DestRemote)
ProcessInterrupts();
@@ -2906,13 +2906,13 @@ ProcessInterrupts(void)
*/
if (RecoveryConflictPending && DoingCommandRead)
{
- QueryCancelPending = false; /* this trumps QueryCancel */
+ QueryCancelPending = false; /* this trumps QueryCancel */
RecoveryConflictPending = false;
LockErrorCleanup();
pgstat_report_recovery_conflict(RecoveryConflictReason);
ereport(FATAL,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
- errmsg("terminating connection due to conflict with recovery"),
+ errmsg("terminating connection due to conflict with recovery"),
errdetail_recovery_conflict(),
errhint("In a moment you should be able to reconnect to the"
" database and repeat your command.")));
@@ -3894,7 +3894,7 @@ PostgresMain(int argc, char *argv[],
if (pq_is_reading_msg())
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("terminating connection because protocol sync was lost")));
+ errmsg("terminating connection because protocol sync was lost")));
/* Now we can allow interrupts again */
RESUME_INTERRUPTS();
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index a95eff16cc..7db9f96fdf 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -135,8 +135,8 @@ check_xact_readonly(Node *parsetree)
/*
* Note: Commands that need to do more complicated checking are handled
* elsewhere, in particular COPY and plannable statements do their own
- * checking. However they should all call PreventCommandIfReadOnly
- * or PreventCommandIfParallelMode to actually throw the error.
+ * checking. However they should all call PreventCommandIfReadOnly or
+ * PreventCommandIfParallelMode to actually throw the error.
*/
switch (nodeTag(parsetree))
@@ -933,6 +933,7 @@ ProcessUtilitySlow(Node *parsetree,
case T_CreateSchemaStmt:
CreateSchemaCommand((CreateSchemaStmt *) parsetree,
queryString);
+
/*
* EventTriggerCollectSimpleCommand called by
* CreateSchemaCommand
@@ -1072,12 +1073,12 @@ ProcessUtilitySlow(Node *parsetree,
else
{
/*
- * Recurse for anything else. If we need to do
- * so, "close" the current complex-command set,
- * and start a new one at the bottom; this is
- * needed to ensure the ordering of queued
- * commands is consistent with the way they are
- * executed here.
+ * Recurse for anything else. If we need to
+ * do so, "close" the current complex-command
+ * set, and start a new one at the bottom;
+ * this is needed to ensure the ordering of
+ * queued commands is consistent with the way
+ * they are executed here.
*/
EventTriggerAlterTableEnd();
ProcessUtility(stmt,
@@ -1177,43 +1178,43 @@ ProcessUtilitySlow(Node *parsetree,
address =
DefineAggregate(stmt->defnames, stmt->args,
stmt->oldstyle,
- stmt->definition, queryString);
+ stmt->definition, queryString);
break;
case OBJECT_OPERATOR:
Assert(stmt->args == NIL);
address = DefineOperator(stmt->defnames,
- stmt->definition);
+ stmt->definition);
break;
case OBJECT_TYPE:
Assert(stmt->args == NIL);
address = DefineType(stmt->defnames,
- stmt->definition);
+ stmt->definition);
break;
case OBJECT_TSPARSER:
Assert(stmt->args == NIL);
address = DefineTSParser(stmt->defnames,
- stmt->definition);
+ stmt->definition);
break;
case OBJECT_TSDICTIONARY:
Assert(stmt->args == NIL);
address = DefineTSDictionary(stmt->defnames,
- stmt->definition);
+ stmt->definition);
break;
case OBJECT_TSTEMPLATE:
Assert(stmt->args == NIL);
address = DefineTSTemplate(stmt->defnames,
- stmt->definition);
+ stmt->definition);
break;
case OBJECT_TSCONFIGURATION:
Assert(stmt->args == NIL);
address = DefineTSConfiguration(stmt->defnames,
- stmt->definition,
- &secondaryObject);
+ stmt->definition,
+ &secondaryObject);
break;
case OBJECT_COLLATION:
Assert(stmt->args == NIL);
address = DefineCollation(stmt->defnames,
- stmt->definition);
+ stmt->definition);
break;
default:
elog(ERROR, "unrecognized define stmt type: %d",
@@ -1256,17 +1257,18 @@ ProcessUtilitySlow(Node *parsetree,
/* ... and do it */
EventTriggerAlterTableStart(parsetree);
address =
- DefineIndex(relid, /* OID of heap relation */
+ DefineIndex(relid, /* OID of heap relation */
stmt,
- InvalidOid, /* no predefined OID */
- false, /* is_alter_table */
- true, /* check_rights */
- false, /* skip_build */
- false); /* quiet */
+ InvalidOid, /* no predefined OID */
+ false, /* is_alter_table */
+ true, /* check_rights */
+ false, /* skip_build */
+ false); /* quiet */
+
/*
- * Add the CREATE INDEX node itself to stash right away; if
- * there were any commands stashed in the ALTER TABLE code,
- * we need them to appear after this one.
+ * Add the CREATE INDEX node itself to stash right away;
+ * if there were any commands stashed in the ALTER TABLE
+ * code, we need them to appear after this one.
*/
EventTriggerCollectSimpleCommand(address, secondaryObject,
parsetree);
@@ -1285,7 +1287,7 @@ ProcessUtilitySlow(Node *parsetree,
case T_AlterExtensionContentsStmt:
address = ExecAlterExtensionContentsStmt((AlterExtensionContentsStmt *) parsetree,
- &secondaryObject);
+ &secondaryObject);
break;
case T_CreateFdwStmt:
@@ -1377,10 +1379,11 @@ ProcessUtilitySlow(Node *parsetree,
case T_CreateTableAsStmt:
address = ExecCreateTableAs((CreateTableAsStmt *) parsetree,
- queryString, params, completionTag);
+ queryString, params, completionTag);
break;
case T_RefreshMatViewStmt:
+
/*
* REFRSH CONCURRENTLY executes some DDL commands internally.
* Inhibit DDL command collection here to avoid those commands
@@ -1391,7 +1394,7 @@ ProcessUtilitySlow(Node *parsetree,
PG_TRY();
{
address = ExecRefreshMatView((RefreshMatViewStmt *) parsetree,
- queryString, params, completionTag);
+ queryString, params, completionTag);
}
PG_CATCH();
{
@@ -1404,8 +1407,8 @@ ProcessUtilitySlow(Node *parsetree,
case T_CreateTrigStmt:
address = CreateTrigger((CreateTrigStmt *) parsetree,
- queryString, InvalidOid, InvalidOid,
- InvalidOid, InvalidOid, false);
+ queryString, InvalidOid, InvalidOid,
+ InvalidOid, InvalidOid, false);
break;
case T_CreatePLangStmt:
diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c
index a2f0f5cebd..3af1904821 100644
--- a/src/backend/tsearch/spell.c
+++ b/src/backend/tsearch/spell.c
@@ -1335,8 +1335,8 @@ CheckAffix(const char *word, size_t len, AFFIX *Affix, int flagflags, char *neww
else
{
/*
- * if prefix is an all non-changed part's length then all word contains
- * only prefix and suffix, so out
+ * if prefix is an all non-changed part's length then all word
+ * contains only prefix and suffix, so out
*/
if (baselen && *baselen + strlen(Affix->find) <= Affix->replen)
return NULL;
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index e7aecc95c9..3ca168b473 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -5202,7 +5202,7 @@ get_rolespec_tuple(const Node *node)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("role \"%s\" does not exist", role->rolename)));
+ errmsg("role \"%s\" does not exist", role->rolename)));
break;
case ROLESPEC_CURRENT_USER:
@@ -5221,7 +5221,7 @@ get_rolespec_tuple(const Node *node)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("role \"%s\" does not exist", "public")));
- tuple = NULL; /* make compiler happy */
+ tuple = NULL; /* make compiler happy */
default:
elog(ERROR, "unexpected role type %d", role->roletype);
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index f7b57da48e..c14ea23dfb 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -687,7 +687,7 @@ array_position_start(PG_FUNCTION_ARGS)
/*
* array_position_common
- * Common code for array_position and array_position_start
+ * Common code for array_position and array_position_start
*
* These are separate wrappers for the sake of opr_sanity regression test.
* They are not strict so we have to test for null inputs explicitly.
@@ -755,7 +755,8 @@ array_position_common(FunctionCallInfo fcinfo)
/*
* We arrange to look up type info for array_create_iterator only once per
- * series of calls, assuming the element type doesn't change underneath us.
+ * series of calls, assuming the element type doesn't change underneath
+ * us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
@@ -778,8 +779,8 @@ array_position_common(FunctionCallInfo fcinfo)
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(element_type))));
my_extra->element_type = element_type;
fmgr_info(typentry->eq_opr_finfo.fn_oid, &my_extra->proc);
@@ -892,7 +893,8 @@ array_positions(PG_FUNCTION_ARGS)
/*
* We arrange to look up type info for array_create_iterator only once per
- * series of calls, assuming the element type doesn't change underneath us.
+ * series of calls, assuming the element type doesn't change underneath
+ * us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
@@ -915,15 +917,16 @@ array_positions(PG_FUNCTION_ARGS)
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(element_type))));
my_extra->element_type = element_type;
fmgr_info(typentry->eq_opr_finfo.fn_oid, &my_extra->proc);
}
/*
- * Accumulate each array position iff the element matches the given element.
+ * Accumulate each array position iff the element matches the given
+ * element.
*/
array_iterator = array_create_iterator(array, 0, my_extra);
while (array_iterate(array_iterator, &value, &isnull))
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 84e4db8416..5391ea0bf0 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -920,7 +920,7 @@ typedef struct NUMProc
num_count, /* number of write digits */
num_in, /* is inside number */
num_curr, /* current position in number */
- out_pre_spaces, /* spaces before first digit */
+ out_pre_spaces, /* spaces before first digit */
read_dec, /* to_number - was read dec. point */
read_post, /* to_number - number of dec. digit */
@@ -981,7 +981,7 @@ static char *get_last_relevant_decnum(char *num);
static void NUM_numpart_from_char(NUMProc *Np, int id, int input_len);
static void NUM_numpart_to_char(NUMProc *Np, int id);
static char *NUM_processor(FormatNode *node, NUMDesc *Num, char *inout,
- char *number, int from_char_input_len, int to_char_out_pre_spaces,
+ char *number, int from_char_input_len, int to_char_out_pre_spaces,
int sign, bool is_to_char, Oid collid);
static DCHCacheEntry *DCH_cache_search(char *str);
static DCHCacheEntry *DCH_cache_getnew(char *str);
@@ -2541,14 +2541,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_toupper_z(localized_full_months[tm->tm_mon - 1], collid);
+ char *str = str_toupper_z(localized_full_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2561,14 +2561,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_initcap_z(localized_full_months[tm->tm_mon - 1], collid);
+ char *str = str_initcap_z(localized_full_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2581,14 +2581,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_tolower_z(localized_full_months[tm->tm_mon - 1], collid);
+ char *str = str_tolower_z(localized_full_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2601,14 +2601,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_toupper_z(localized_abbrev_months[tm->tm_mon - 1], collid);
+ char *str = str_toupper_z(localized_abbrev_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, asc_toupper_z(months[tm->tm_mon - 1]));
@@ -2620,14 +2620,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_initcap_z(localized_abbrev_months[tm->tm_mon - 1], collid);
+ char *str = str_initcap_z(localized_abbrev_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, months[tm->tm_mon - 1]);
@@ -2639,14 +2639,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_tolower_z(localized_abbrev_months[tm->tm_mon - 1], collid);
+ char *str = str_tolower_z(localized_abbrev_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, asc_tolower_z(months[tm->tm_mon - 1]));
@@ -2662,14 +2662,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_toupper_z(localized_full_days[tm->tm_wday], collid);
+ char *str = str_toupper_z(localized_full_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2680,14 +2680,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_initcap_z(localized_full_days[tm->tm_wday], collid);
+ char *str = str_initcap_z(localized_full_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2698,14 +2698,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_tolower_z(localized_full_days[tm->tm_wday], collid);
+ char *str = str_tolower_z(localized_full_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2716,14 +2716,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_toupper_z(localized_abbrev_days[tm->tm_wday], collid);
+ char *str = str_toupper_z(localized_abbrev_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, asc_toupper_z(days_short[tm->tm_wday]));
@@ -2733,14 +2733,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_initcap_z(localized_abbrev_days[tm->tm_wday], collid);
+ char *str = str_initcap_z(localized_abbrev_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, days_short[tm->tm_wday]);
@@ -2750,14 +2750,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_tolower_z(localized_abbrev_days[tm->tm_wday], collid);
+ char *str = str_tolower_z(localized_abbrev_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, asc_tolower_z(days_short[tm->tm_wday]));
@@ -4572,7 +4572,7 @@ NUM_numpart_to_char(NUMProc *Np, int id)
static char *
NUM_processor(FormatNode *node, NUMDesc *Num, char *inout,
- char *number, int from_char_input_len, int to_char_out_pre_spaces,
+ char *number, int from_char_input_len, int to_char_out_pre_spaces,
int sign, bool is_to_char, Oid collid)
{
FormatNode *n;
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index f08e288c21..26d3843369 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -1442,7 +1442,7 @@ datum_to_json(Datum val, bool is_null, StringInfo result,
if (DATE_NOT_FINITE(date))
{
/* we have to format infinity ourselves */
- appendStringInfoString(result,DT_INFINITY);
+ appendStringInfoString(result, DT_INFINITY);
}
else
{
@@ -1465,7 +1465,7 @@ datum_to_json(Datum val, bool is_null, StringInfo result,
if (TIMESTAMP_NOT_FINITE(timestamp))
{
/* we have to format infinity ourselves */
- appendStringInfoString(result,DT_INFINITY);
+ appendStringInfoString(result, DT_INFINITY);
}
else if (timestamp2tm(timestamp, NULL, &tm, &fsec, NULL, NULL) == 0)
{
@@ -1492,7 +1492,7 @@ datum_to_json(Datum val, bool is_null, StringInfo result,
if (TIMESTAMP_NOT_FINITE(timestamp))
{
/* we have to format infinity ourselves */
- appendStringInfoString(result,DT_INFINITY);
+ appendStringInfoString(result, DT_INFINITY);
}
else if (timestamp2tm(timestamp, &tz, &tm, &fsec, &tzn, NULL) == 0)
{
diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c
index bccc6696a4..c0959a0ee2 100644
--- a/src/backend/utils/adt/jsonb.c
+++ b/src/backend/utils/adt/jsonb.c
@@ -57,7 +57,7 @@ typedef enum /* type categories for datum_to_jsonb */
JSONBTYPE_COMPOSITE, /* composite */
JSONBTYPE_JSONCAST, /* something with an explicit cast to JSON */
JSONBTYPE_OTHER /* all else */
-} JsonbTypeCategory;
+} JsonbTypeCategory;
static inline Datum jsonb_from_cstring(char *json, int len);
static size_t checkStringLen(size_t len);
@@ -69,7 +69,7 @@ static void jsonb_in_object_field_start(void *pstate, char *fname, bool isnull);
static void jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal);
static void jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype);
static void jsonb_categorize_type(Oid typoid,
- JsonbTypeCategory * tcategory,
+ JsonbTypeCategory *tcategory,
Oid *outfuncoid);
static void composite_to_jsonb(Datum composite, JsonbInState *result);
static void array_dim_to_jsonb(JsonbInState *result, int dim, int ndims, int *dims,
@@ -77,14 +77,14 @@ static void array_dim_to_jsonb(JsonbInState *result, int dim, int ndims, int *di
JsonbTypeCategory tcategory, Oid outfuncoid);
static void array_to_jsonb_internal(Datum array, JsonbInState *result);
static void jsonb_categorize_type(Oid typoid,
- JsonbTypeCategory * tcategory,
+ JsonbTypeCategory *tcategory,
Oid *outfuncoid);
static void datum_to_jsonb(Datum val, bool is_null, JsonbInState *result,
JsonbTypeCategory tcategory, Oid outfuncoid,
bool key_scalar);
static void add_jsonb(Datum val, bool is_null, JsonbInState *result,
Oid val_type, bool key_scalar);
-static JsonbParseState * clone_parse_state(JsonbParseState * state);
+static JsonbParseState *clone_parse_state(JsonbParseState *state);
static char *JsonbToCStringWorker(StringInfo out, JsonbContainer *in, int estimated_len, bool indent);
static void add_indent(StringInfo out, bool indent, int level);
@@ -365,10 +365,12 @@ jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype)
case JSON_TOKEN_TRUE:
v.type = jbvBool;
v.val.boolean = true;
+
break;
case JSON_TOKEN_FALSE:
v.type = jbvBool;
v.val.boolean = false;
+
break;
case JSON_TOKEN_NULL:
v.type = jbvNull;
@@ -448,15 +450,17 @@ JsonbToCStringWorker(StringInfo out, JsonbContainer *in, int estimated_len, bool
JsonbValue v;
int level = 0;
bool redo_switch = false;
+
/* If we are indenting, don't add a space after a comma */
int ispaces = indent ? 1 : 2;
+
/*
- * Don't indent the very first item. This gets set to the indent flag
- * at the bottom of the loop.
+ * Don't indent the very first item. This gets set to the indent flag at
+ * the bottom of the loop.
*/
- bool use_indent = false;
- bool raw_scalar = false;
- bool last_was_key = false;
+ bool use_indent = false;
+ bool raw_scalar = false;
+ bool last_was_key = false;
if (out == NULL)
out = makeStringInfo();
@@ -530,13 +534,13 @@ JsonbToCStringWorker(StringInfo out, JsonbContainer *in, int estimated_len, bool
appendBinaryStringInfo(out, ", ", ispaces);
first = false;
- if (! raw_scalar)
+ if (!raw_scalar)
add_indent(out, use_indent, level);
jsonb_put_escaped_value(out, &v);
break;
case WJB_END_ARRAY:
level--;
- if (! raw_scalar)
+ if (!raw_scalar)
{
add_indent(out, use_indent, level);
appendStringInfoCharMacro(out, ']');
@@ -580,11 +584,11 @@ add_indent(StringInfo out, bool indent, int level)
*
* Given the datatype OID, return its JsonbTypeCategory, as well as the type's
* output function OID. If the returned category is JSONBTYPE_JSONCAST,
- * we return the OID of the relevant cast function instead.
+ * we return the OID of the relevant cast function instead.
*/
static void
jsonb_categorize_type(Oid typoid,
- JsonbTypeCategory * tcategory,
+ JsonbTypeCategory *tcategory,
Oid *outfuncoid)
{
bool typisvarlena;
@@ -649,16 +653,16 @@ jsonb_categorize_type(Oid typoid,
*tcategory = JSONBTYPE_OTHER;
/*
- * but first let's look for a cast to json (note: not to jsonb)
- * if it's not built-in.
+ * but first let's look for a cast to json (note: not to
+ * jsonb) if it's not built-in.
*/
if (typoid >= FirstNormalObjectId)
{
- Oid castfunc;
+ Oid castfunc;
CoercionPathType ctype;
ctype = find_coercion_pathway(JSONOID, typoid,
- COERCION_EXPLICIT, &castfunc);
+ COERCION_EXPLICIT, &castfunc);
if (ctype == COERCION_PATH_FUNC && OidIsValid(castfunc))
{
*tcategory = JSONBTYPE_JSONCAST;
@@ -774,30 +778,30 @@ datum_to_jsonb(Datum val, bool is_null, JsonbInState *result,
}
}
break;
- case JSONBTYPE_DATE:
- {
- DateADT date;
- struct pg_tm tm;
- char buf[MAXDATELEN + 1];
+ case JSONBTYPE_DATE:
+ {
+ DateADT date;
+ struct pg_tm tm;
+ char buf[MAXDATELEN + 1];
- date = DatumGetDateADT(val);
- jb.type = jbvString;
+ date = DatumGetDateADT(val);
+ jb.type = jbvString;
- if (DATE_NOT_FINITE(date))
- {
- jb.val.string.len = strlen(DT_INFINITY);
- jb.val.string.val = pstrdup(DT_INFINITY);
- }
- else
- {
- j2date(date + POSTGRES_EPOCH_JDATE,
- &(tm.tm_year), &(tm.tm_mon), &(tm.tm_mday));
- EncodeDateOnly(&tm, USE_XSD_DATES, buf);
- jb.val.string.len = strlen(buf);
- jb.val.string.val = pstrdup(buf);
+ if (DATE_NOT_FINITE(date))
+ {
+ jb.val.string.len = strlen(DT_INFINITY);
+ jb.val.string.val = pstrdup(DT_INFINITY);
+ }
+ else
+ {
+ j2date(date + POSTGRES_EPOCH_JDATE,
+ &(tm.tm_year), &(tm.tm_mon), &(tm.tm_mday));
+ EncodeDateOnly(&tm, USE_XSD_DATES, buf);
+ jb.val.string.len = strlen(buf);
+ jb.val.string.val = pstrdup(buf);
+ }
}
- }
- break;
+ break;
case JSONBTYPE_TIMESTAMP:
{
Timestamp timestamp;
@@ -1534,9 +1538,11 @@ jsonb_object_two_arg(PG_FUNCTION_ARGS)
* change them.
*/
static JsonbParseState *
-clone_parse_state(JsonbParseState * state)
+clone_parse_state(JsonbParseState *state)
{
- JsonbParseState *result, *icursor, *ocursor;
+ JsonbParseState *result,
+ *icursor,
+ *ocursor;
if (state == NULL)
return NULL;
@@ -1544,14 +1550,14 @@ clone_parse_state(JsonbParseState * state)
result = palloc(sizeof(JsonbParseState));
icursor = state;
ocursor = result;
- for(;;)
+ for (;;)
{
ocursor->contVal = icursor->contVal;
ocursor->size = icursor->size;
icursor = icursor->next;
if (icursor == NULL)
break;
- ocursor->next= palloc(sizeof(JsonbParseState));
+ ocursor->next = palloc(sizeof(JsonbParseState));
ocursor = ocursor->next;
}
ocursor->next = NULL;
@@ -1652,15 +1658,16 @@ jsonb_agg_transfn(PG_FUNCTION_ARGS)
{
/* copy string values in the aggregate context */
char *buf = palloc(v.val.string.len + 1);
+
snprintf(buf, v.val.string.len + 1, "%s", v.val.string.val);
v.val.string.val = buf;
}
else if (v.type == jbvNumeric)
{
/* same for numeric */
- v.val.numeric =
+ v.val.numeric =
DatumGetNumeric(DirectFunctionCall1(numeric_uplus,
- NumericGetDatum(v.val.numeric)));
+ NumericGetDatum(v.val.numeric)));
}
result->res = pushJsonbValue(&result->parseState,
@@ -1693,15 +1700,15 @@ jsonb_agg_finalfn(PG_FUNCTION_ARGS)
/*
* We need to do a shallow clone of the argument in case the final
- * function is called more than once, so we avoid changing the argument.
- * A shallow clone is sufficient as we aren't going to change any of the
+ * function is called more than once, so we avoid changing the argument. A
+ * shallow clone is sufficient as we aren't going to change any of the
* values, just add the final array end marker.
*/
result.parseState = clone_parse_state(arg->parseState);
result.res = pushJsonbValue(&result.parseState,
- WJB_END_ARRAY, NULL);
+ WJB_END_ARRAY, NULL);
out = JsonbValueToJsonb(result.res);
@@ -1813,6 +1820,7 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS)
{
/* copy string values in the aggregate context */
char *buf = palloc(v.val.string.len + 1);
+
snprintf(buf, v.val.string.len + 1, "%s", v.val.string.val);
v.val.string.val = buf;
}
@@ -1871,6 +1879,7 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS)
{
/* copy string values in the aggregate context */
char *buf = palloc(v.val.string.len + 1);
+
snprintf(buf, v.val.string.len + 1, "%s", v.val.string.val);
v.val.string.val = buf;
}
@@ -1878,8 +1887,8 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS)
{
/* same for numeric */
v.val.numeric =
- DatumGetNumeric(DirectFunctionCall1(numeric_uplus,
- NumericGetDatum(v.val.numeric)));
+ DatumGetNumeric(DirectFunctionCall1(numeric_uplus,
+ NumericGetDatum(v.val.numeric)));
}
result->res = pushJsonbValue(&result->parseState,
@@ -1900,7 +1909,7 @@ Datum
jsonb_object_agg_finalfn(PG_FUNCTION_ARGS)
{
JsonbInState *arg;
- JsonbInState result;
+ JsonbInState result;
Jsonb *out;
/* cannot be called directly because of internal-type argument */
@@ -1913,15 +1922,15 @@ jsonb_object_agg_finalfn(PG_FUNCTION_ARGS)
/*
* We need to do a shallow clone of the argument in case the final
- * function is called more than once, so we avoid changing the argument.
- * A shallow clone is sufficient as we aren't going to change any of the
+ * function is called more than once, so we avoid changing the argument. A
+ * shallow clone is sufficient as we aren't going to change any of the
* values, just add the final object end marker.
*/
result.parseState = clone_parse_state(arg->parseState);
result.res = pushJsonbValue(&result.parseState,
- WJB_END_OBJECT, NULL);
+ WJB_END_OBJECT, NULL);
out = JsonbValueToJsonb(result.res);
diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c
index 974e386524..4d733159d0 100644
--- a/src/backend/utils/adt/jsonb_util.c
+++ b/src/backend/utils/adt/jsonb_util.c
@@ -58,8 +58,8 @@ static int lengthCompareJsonbStringValue(const void *a, const void *b);
static int lengthCompareJsonbPair(const void *a, const void *b, void *arg);
static void uniqueifyJsonbObject(JsonbValue *object);
static JsonbValue *pushJsonbValueScalar(JsonbParseState **pstate,
- JsonbIteratorToken seq,
- JsonbValue *scalarVal);
+ JsonbIteratorToken seq,
+ JsonbValue *scalarVal);
/*
* Turn an in-memory JsonbValue into a Jsonb for on-disk storage.
@@ -518,7 +518,7 @@ pushJsonbValue(JsonbParseState **pstate, JsonbIteratorToken seq,
{
JsonbIterator *it;
JsonbValue *res = NULL;
- JsonbValue v;
+ JsonbValue v;
JsonbIteratorToken tok;
if (!jbval || (seq != WJB_ELEM && seq != WJB_VALUE) ||
@@ -543,7 +543,7 @@ pushJsonbValue(JsonbParseState **pstate, JsonbIteratorToken seq,
*/
static JsonbValue *
pushJsonbValueScalar(JsonbParseState **pstate, JsonbIteratorToken seq,
- JsonbValue *scalarVal)
+ JsonbValue *scalarVal)
{
JsonbValue *result = NULL;
@@ -1231,6 +1231,7 @@ JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash)
break;
case jbvBool:
tmp = scalarVal->val.boolean ? 0x02 : 0x04;
+
break;
default:
elog(ERROR, "invalid jsonb scalar type");
@@ -1304,7 +1305,7 @@ compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar)
case jbvBool:
if (aScalar->val.boolean == bScalar->val.boolean)
return 0;
- else if (aScalar->val.boolean > bScalar->val.boolean)
+ else if (aScalar->val.boolean >bScalar->val.boolean)
return 1;
else
return -1;
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 9987c73784..2f755744c1 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -110,8 +110,8 @@ static void sn_object_start(void *state);
static void sn_object_end(void *state);
static void sn_array_start(void *state);
static void sn_array_end(void *state);
-static void sn_object_field_start (void *state, char *fname, bool isnull);
-static void sn_array_element_start (void *state, bool isnull);
+static void sn_object_field_start(void *state, char *fname, bool isnull);
+static void sn_array_element_start(void *state, bool isnull);
static void sn_scalar(void *state, char *token, JsonTokenType tokentype);
/* worker function for populate_recordset and to_recordset */
@@ -126,18 +126,18 @@ static JsonbValue *findJsonbValueFromContainerLen(JsonbContainer *container,
/* functions supporting jsonb_delete, jsonb_replace and jsonb_concat */
static JsonbValue *IteratorConcat(JsonbIterator **it1, JsonbIterator **it2,
- JsonbParseState **state);
+ JsonbParseState **state);
static JsonbValue *walkJsonb(JsonbIterator **it, JsonbParseState **state, bool stop_at_level_zero);
static JsonbValue *replacePath(JsonbIterator **it, Datum *path_elems,
- bool *path_nulls, int path_len,
- JsonbParseState **st, int level, Jsonb *newval);
+ bool *path_nulls, int path_len,
+ JsonbParseState **st, int level, Jsonb *newval);
static void replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
- int path_len, JsonbParseState **st, int level,
- Jsonb *newval, uint32 nelems);
+ int path_len, JsonbParseState **st, int level,
+ Jsonb *newval, uint32 nelems);
static void replacePathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
- int path_len, JsonbParseState **st, int level,
- Jsonb *newval, uint32 npairs);
-static void addJsonbToParseState(JsonbParseState **jbps, Jsonb * jb);
+ int path_len, JsonbParseState **st, int level,
+ Jsonb *newval, uint32 npairs);
+static void addJsonbToParseState(JsonbParseState **jbps, Jsonb *jb);
/* state for json_object_keys */
typedef struct OkeysState
@@ -250,10 +250,11 @@ typedef struct PopulateRecordsetState
} PopulateRecordsetState;
/* state for json_strip_nulls */
-typedef struct StripnullState{
+typedef struct StripnullState
+{
JsonLexContext *lex;
- StringInfo strval;
- bool skip_next_null;
+ StringInfo strval;
+ bool skip_next_null;
} StripnullState;
/* Turn a jsonb object into a record */
@@ -3045,6 +3046,7 @@ static void
sn_object_start(void *state)
{
StripnullState *_state = (StripnullState *) state;
+
appendStringInfoCharMacro(_state->strval, '{');
}
@@ -3052,6 +3054,7 @@ static void
sn_object_end(void *state)
{
StripnullState *_state = (StripnullState *) state;
+
appendStringInfoCharMacro(_state->strval, '}');
}
@@ -3059,6 +3062,7 @@ static void
sn_array_start(void *state)
{
StripnullState *_state = (StripnullState *) state;
+
appendStringInfoCharMacro(_state->strval, '[');
}
@@ -3066,21 +3070,21 @@ static void
sn_array_end(void *state)
{
StripnullState *_state = (StripnullState *) state;
+
appendStringInfoCharMacro(_state->strval, ']');
}
static void
-sn_object_field_start (void *state, char *fname, bool isnull)
+sn_object_field_start(void *state, char *fname, bool isnull)
{
StripnullState *_state = (StripnullState *) state;
if (isnull)
{
/*
- * The next thing must be a scalar or isnull couldn't be true,
- * so there is no danger of this state being carried down
- * into a nested object or array. The flag will be reset in the
- * scalar action.
+ * The next thing must be a scalar or isnull couldn't be true, so
+ * there is no danger of this state being carried down into a nested
+ * object or array. The flag will be reset in the scalar action.
*/
_state->skip_next_null = true;
return;
@@ -3090,16 +3094,16 @@ sn_object_field_start (void *state, char *fname, bool isnull)
appendStringInfoCharMacro(_state->strval, ',');
/*
- * Unfortunately we don't have the quoted and escaped string any more,
- * so we have to re-escape it.
+ * Unfortunately we don't have the quoted and escaped string any more, so
+ * we have to re-escape it.
*/
- escape_json(_state->strval,fname);
+ escape_json(_state->strval, fname);
appendStringInfoCharMacro(_state->strval, ':');
}
static void
-sn_array_element_start (void *state, bool isnull)
+sn_array_element_start(void *state, bool isnull)
{
StripnullState *_state = (StripnullState *) state;
@@ -3114,7 +3118,7 @@ sn_scalar(void *state, char *token, JsonTokenType tokentype)
if (_state->skip_next_null)
{
- Assert (tokentype == JSON_TOKEN_NULL);
+ Assert(tokentype == JSON_TOKEN_NULL);
_state->skip_next_null = false;
return;
}
@@ -3132,7 +3136,7 @@ Datum
json_strip_nulls(PG_FUNCTION_ARGS)
{
text *json = PG_GETARG_TEXT_P(0);
- StripnullState *state;
+ StripnullState *state;
JsonLexContext *lex;
JsonSemAction *sem;
@@ -3166,13 +3170,14 @@ json_strip_nulls(PG_FUNCTION_ARGS)
Datum
jsonb_strip_nulls(PG_FUNCTION_ARGS)
{
- Jsonb * jb = PG_GETARG_JSONB(0);
+ Jsonb *jb = PG_GETARG_JSONB(0);
JsonbIterator *it;
JsonbParseState *parseState = NULL;
JsonbValue *res = NULL;
- int type;
- JsonbValue v,k;
- bool last_was_key = false;
+ int type;
+ JsonbValue v,
+ k;
+ bool last_was_key = false;
if (JB_ROOT_IS_SCALAR(jb))
PG_RETURN_POINTER(jb);
@@ -3181,7 +3186,7 @@ jsonb_strip_nulls(PG_FUNCTION_ARGS)
while ((type = JsonbIteratorNext(&it, &v, false)) != WJB_DONE)
{
- Assert( ! (type == WJB_KEY && last_was_key));
+ Assert(!(type == WJB_KEY && last_was_key));
if (type == WJB_KEY)
{
@@ -3225,13 +3230,12 @@ jsonb_strip_nulls(PG_FUNCTION_ARGS)
* like getting jbvBinary values, so we can't just push jb as a whole.
*/
static void
-addJsonbToParseState(JsonbParseState **jbps, Jsonb * jb)
+addJsonbToParseState(JsonbParseState **jbps, Jsonb *jb)
{
-
JsonbIterator *it;
- JsonbValue *o = &(*jbps)->contVal;
- int type;
- JsonbValue v;
+ JsonbValue *o = &(*jbps)->contVal;
+ int type;
+ JsonbValue v;
it = JsonbIteratorInit(&jb->root);
@@ -3239,8 +3243,8 @@ addJsonbToParseState(JsonbParseState **jbps, Jsonb * jb)
if (JB_ROOT_IS_SCALAR(jb))
{
- (void) JsonbIteratorNext(&it, &v, false); /* skip array header */
- (void) JsonbIteratorNext(&it, &v, false); /* fetch scalar value */
+ (void) JsonbIteratorNext(&it, &v, false); /* skip array header */
+ (void) JsonbIteratorNext(&it, &v, false); /* fetch scalar value */
switch (o->type)
{
@@ -3297,8 +3301,8 @@ jsonb_concat(PG_FUNCTION_ARGS)
Jsonb *out = palloc(VARSIZE(jb1) + VARSIZE(jb2));
JsonbParseState *state = NULL;
JsonbValue *res;
- JsonbIterator *it1,
- *it2;
+ JsonbIterator *it1,
+ *it2;
/*
* If one of the jsonb is empty, just return other.
@@ -3453,7 +3457,7 @@ jsonb_delete_idx(PG_FUNCTION_ARGS)
res = pushJsonbValue(&state, r, r < WJB_BEGIN_ARRAY ? &v : NULL);
}
- Assert (res != NULL);
+ Assert(res != NULL);
PG_RETURN_JSONB(JsonbValueToJsonb(res));
}
@@ -3497,7 +3501,7 @@ jsonb_replace(PG_FUNCTION_ARGS)
res = replacePath(&it, path_elems, path_nulls, path_len, &st, 0, newval);
- Assert (res != NULL);
+ Assert(res != NULL);
PG_RETURN_JSONB(JsonbValueToJsonb(res));
}
@@ -3541,7 +3545,7 @@ jsonb_delete_path(PG_FUNCTION_ARGS)
res = replacePath(&it, path_elems, path_nulls, path_len, &st, 0, NULL);
- Assert (res != NULL);
+ Assert(res != NULL);
PG_RETURN_JSONB(JsonbValueToJsonb(res));
}
@@ -3687,7 +3691,7 @@ walkJsonb(JsonbIterator **it, JsonbParseState **state, bool stop_at_level_zero)
{
uint32 r,
level = 1;
- JsonbValue v;
+ JsonbValue v;
JsonbValue *res = NULL;
while ((r = JsonbIteratorNext(it, &v, false)) != WJB_DONE)
@@ -3758,7 +3762,7 @@ replacePath(JsonbIterator **it, Datum *path_elems,
static void
replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
int path_len, JsonbParseState **st, int level,
- Jsonb *newval, uint32 nelems)
+ Jsonb *newval, uint32 nelems)
{
JsonbValue v;
int i;
@@ -3770,7 +3774,8 @@ replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
for (i = 0; i < nelems; i++)
{
- int r = JsonbIteratorNext(it, &k, true);
+ int r = JsonbIteratorNext(it, &k, true);
+
Assert(r == WJB_KEY);
if (!done &&
@@ -3780,7 +3785,7 @@ replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
{
if (level == path_len - 1)
{
- r = JsonbIteratorNext(it, &v, true); /* skip */
+ r = JsonbIteratorNext(it, &v, true); /* skip */
if (newval != NULL)
{
(void) pushJsonbValue(st, WJB_KEY, &k);
@@ -3801,7 +3806,7 @@ replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
(void) pushJsonbValue(st, r, r < WJB_BEGIN_ARRAY ? &v : NULL);
if (r == WJB_BEGIN_ARRAY || r == WJB_BEGIN_OBJECT)
{
- int walking_level = 1;
+ int walking_level = 1;
while (walking_level != 0)
{
@@ -3859,13 +3864,13 @@ replacePathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
/* iterate over the array elements */
for (i = 0; i < npairs; i++)
{
- int r;
+ int r;
if (i == idx && level < path_len)
{
if (level == path_len - 1)
{
- r = JsonbIteratorNext(it, &v, true); /* skip */
+ r = JsonbIteratorNext(it, &v, true); /* skip */
if (newval != NULL)
addJsonbToParseState(st, newval);
}
@@ -3881,7 +3886,7 @@ replacePathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
if (r == WJB_BEGIN_ARRAY || r == WJB_BEGIN_OBJECT)
{
- int walking_level = 1;
+ int walking_level = 1;
while (walking_level != 0)
{
diff --git a/src/backend/utils/adt/levenshtein.c b/src/backend/utils/adt/levenshtein.c
index f6e2ca6452..2c30b6c8e9 100644
--- a/src/backend/utils/adt/levenshtein.c
+++ b/src/backend/utils/adt/levenshtein.c
@@ -96,8 +96,8 @@ varstr_levenshtein(const char *source, int slen, const char *target, int tlen,
#endif
/*
- * A common use for Levenshtein distance is to match attributes when building
- * diagnostic, user-visible messages. Restrict the size of
+ * A common use for Levenshtein distance is to match attributes when
+ * building diagnostic, user-visible messages. Restrict the size of
* MAX_LEVENSHTEIN_STRLEN at compile time so that this is guaranteed to
* work.
*/
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index 9d53a8b6a3..1705ff0d11 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -419,7 +419,7 @@ PreventAdvisoryLocksInParallelMode(void)
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot use advisory locks during a parallel operation")));
+ errmsg("cannot use advisory locks during a parallel operation")));
}
/*
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 61d609f918..de68cdddf1 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -187,7 +187,7 @@ pg_terminate_backend(PG_FUNCTION_ARGS)
if (r == SIGNAL_BACKEND_NOSUPERUSER)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be a superuser to terminate superuser process"))));
+ (errmsg("must be a superuser to terminate superuser process"))));
if (r == SIGNAL_BACKEND_NOPERMISSION)
ereport(ERROR,
diff --git a/src/backend/utils/adt/network_gist.c b/src/backend/utils/adt/network_gist.c
index 0fdb17f947..756237e751 100644
--- a/src/backend/utils/adt/network_gist.c
+++ b/src/backend/utils/adt/network_gist.c
@@ -595,10 +595,10 @@ inet_gist_decompress(PG_FUNCTION_ARGS)
Datum
inet_gist_fetch(PG_FUNCTION_ARGS)
{
- GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
- GistInetKey *key = DatumGetInetKeyP(entry->key);
- GISTENTRY *retval;
- inet *dst;
+ GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
+ GistInetKey *key = DatumGetInetKeyP(entry->key);
+ GISTENTRY *retval;
+ inet *dst;
dst = (inet *) palloc0(sizeof(inet));
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 3cef3048eb..7ce41b7888 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -1731,7 +1731,7 @@ numeric_abbrev_abort(int memtupcount, SortSupport ssup)
if (trace_sort)
elog(LOG,
"numeric_abbrev: aborting abbreviation at cardinality %f"
- " below threshold %f after " INT64_FORMAT " values (%d rows)",
+ " below threshold %f after " INT64_FORMAT " values (%d rows)",
abbr_card, nss->input_count / 10000.0 + 0.5,
nss->input_count, memtupcount);
#endif
@@ -3408,10 +3408,10 @@ numeric_accum_inv(PG_FUNCTION_ARGS)
#ifdef HAVE_INT128
typedef struct Int128AggState
{
- bool calcSumX2; /* if true, calculate sumX2 */
- int64 N; /* count of processed numbers */
- int128 sumX; /* sum of processed numbers */
- int128 sumX2; /* sum of squares of processed numbers */
+ bool calcSumX2; /* if true, calculate sumX2 */
+ int64 N; /* count of processed numbers */
+ int128 sumX; /* sum of processed numbers */
+ int128 sumX2; /* sum of squares of processed numbers */
} Int128AggState;
/*
@@ -3703,9 +3703,9 @@ Datum
numeric_poly_sum(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- Numeric res;
- NumericVar result;
+ PolyNumAggState *state;
+ Numeric res;
+ NumericVar result;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -3731,9 +3731,10 @@ Datum
numeric_poly_avg(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- NumericVar result;
- Datum countd, sumd;
+ PolyNumAggState *state;
+ NumericVar result;
+ Datum countd,
+ sumd;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -3962,8 +3963,8 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
#ifdef HAVE_INT128
static Numeric
numeric_poly_stddev_internal(Int128AggState *state,
- bool variance, bool sample,
- bool *is_null)
+ bool variance, bool sample,
+ bool *is_null)
{
NumericAggState numstate;
Numeric res;
@@ -3997,9 +3998,9 @@ Datum
numeric_poly_var_samp(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- Numeric res;
- bool is_null;
+ PolyNumAggState *state;
+ Numeric res;
+ bool is_null;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -4018,9 +4019,9 @@ Datum
numeric_poly_stddev_samp(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- Numeric res;
- bool is_null;
+ PolyNumAggState *state;
+ Numeric res;
+ bool is_null;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -4039,9 +4040,9 @@ Datum
numeric_poly_var_pop(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- Numeric res;
- bool is_null;
+ PolyNumAggState *state;
+ Numeric res;
+ bool is_null;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -4060,9 +4061,9 @@ Datum
numeric_poly_stddev_pop(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- Numeric res;
- bool is_null;
+ PolyNumAggState *state;
+ Numeric res;
+ bool is_null;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -5306,10 +5307,10 @@ int64_to_numericvar(int64 val, NumericVar *var)
static void
int128_to_numericvar(int128 val, NumericVar *var)
{
- uint128 uval,
- newuval;
- NumericDigit *ptr;
- int ndigits;
+ uint128 uval,
+ newuval;
+ NumericDigit *ptr;
+ int ndigits;
/* int128 can require at most 39 decimal digits; add one for safety */
alloc_var(var, 40 / DEC_DIGITS);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index d84969f770..4be735e918 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -635,7 +635,7 @@ cache_single_time(char **dst, const char *format, const struct tm * tm)
/*
* MAX_L10N_DATA is sufficient buffer space for every known locale, and
* POSIX defines no strftime() errors. (Buffer space exhaustion is not an
- * error.) An implementation might report errors (e.g. ENOMEM) by
+ * error.) An implementation might report errors (e.g. ENOMEM) by
* returning 0 (or, less plausibly, a negative value) and setting errno.
* Report errno just in case the implementation did that, but clear it in
* advance of the call so we don't emit a stale, unrelated errno.
diff --git a/src/backend/utils/adt/pg_upgrade_support.c b/src/backend/utils/adt/pg_upgrade_support.c
index d69fa53567..883378e524 100644
--- a/src/backend/utils/adt/pg_upgrade_support.c
+++ b/src/backend/utils/adt/pg_upgrade_support.c
@@ -20,19 +20,19 @@
#include "utils/builtins.h"
-Datum binary_upgrade_set_next_pg_type_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_array_pg_type_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_toast_pg_type_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_heap_pg_class_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_index_pg_class_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_toast_pg_class_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_pg_enum_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_pg_authid_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_create_empty_extension(PG_FUNCTION_ARGS);
-
-
-#define CHECK_IS_BINARY_UPGRADE \
-do { \
+Datum binary_upgrade_set_next_pg_type_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_array_pg_type_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_toast_pg_type_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_heap_pg_class_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_index_pg_class_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_toast_pg_class_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_pg_enum_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_pg_authid_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_create_empty_extension(PG_FUNCTION_ARGS);
+
+
+#define CHECK_IS_BINARY_UPGRADE \
+do { \
if (!IsBinaryUpgrade) \
ereport(ERROR, \
(errcode(ERRCODE_CANT_CHANGE_RUNTIME_PARAM), \
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index 2b3778b03a..f7c9bf6333 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -531,14 +531,14 @@ Datum
pg_stat_get_activity(PG_FUNCTION_ARGS)
{
#define PG_STAT_GET_ACTIVITY_COLS 22
- int num_backends = pgstat_fetch_stat_numbackends();
- int curr_backend;
- int pid = PG_ARGISNULL(0) ? -1 : PG_GETARG_INT32(0);
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
- Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
+ int num_backends = pgstat_fetch_stat_numbackends();
+ int curr_backend;
+ int pid = PG_ARGISNULL(0) ? -1 : PG_GETARG_INT32(0);
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
@@ -628,7 +628,7 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
if (beentry->st_ssl)
{
- values[16] = BoolGetDatum(true); /* ssl */
+ values[16] = BoolGetDatum(true); /* ssl */
values[17] = CStringGetTextDatum(beentry->st_sslstatus->ssl_version);
values[18] = CStringGetTextDatum(beentry->st_sslstatus->ssl_cipher);
values[19] = Int32GetDatum(beentry->st_sslstatus->ssl_bits);
@@ -637,7 +637,7 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
}
else
{
- values[16] = BoolGetDatum(false); /* ssl */
+ values[16] = BoolGetDatum(false); /* ssl */
nulls[17] = nulls[18] = nulls[19] = nulls[20] = nulls[21] = true;
}
diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c
index 9281529d7a..3b5529eb30 100644
--- a/src/backend/utils/adt/rangetypes_spgist.c
+++ b/src/backend/utils/adt/rangetypes_spgist.c
@@ -583,7 +583,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
*/
cmp = adjacent_inner_consistent(typcache, &lower,
¢roidUpper,
- prevCentroid ? &prevUpper : NULL);
+ prevCentroid ? &prevUpper : NULL);
if (cmp > 0)
which1 = (1 << 1) | (1 << 4);
else if (cmp < 0)
@@ -594,12 +594,12 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
/*
* Also search for ranges's adjacent to argument's upper
* bound. They will be found along the line adjacent to
- * (and just right of) X=upper, which falls in quadrants
- * 3 and 4, or 1 and 2.
+ * (and just right of) X=upper, which falls in quadrants 3
+ * and 4, or 1 and 2.
*/
cmp = adjacent_inner_consistent(typcache, &upper,
¢roidLower,
- prevCentroid ? &prevLower : NULL);
+ prevCentroid ? &prevLower : NULL);
if (cmp > 0)
which2 = (1 << 1) | (1 << 2);
else if (cmp < 0)
@@ -782,7 +782,7 @@ adjacent_cmp_bounds(TypeCacheEntry *typcache, RangeBound *arg,
Assert(arg->lower != centroid->lower);
- cmp = range_cmp_bounds(typcache, arg, centroid);
+ cmp = range_cmp_bounds(typcache, arg, centroid);
if (centroid->lower)
{
@@ -799,11 +799,11 @@ adjacent_cmp_bounds(TypeCacheEntry *typcache, RangeBound *arg,
* With the argument range [..., 500), the adjacent range we're
* searching for is [500, ...):
*
- * ARGUMENT CENTROID CMP ADJ
- * [..., 500) [498, ...) > (N) [500, ...) is to the right
- * [..., 500) [499, ...) = (N) [500, ...) is to the right
- * [..., 500) [500, ...) < Y [500, ...) is to the right
- * [..., 500) [501, ...) < N [500, ...) is to the left
+ * ARGUMENT CENTROID CMP ADJ
+ * [..., 500) [498, ...) > (N) [500, ...) is to the right
+ * [..., 500) [499, ...) = (N) [500, ...) is to the right
+ * [..., 500) [500, ...) < Y [500, ...) is to the right
+ * [..., 500) [501, ...) < N [500, ...) is to the left
*
* So, we must search left when the argument is smaller than, and not
* adjacent, to the centroid. Otherwise search right.
@@ -821,11 +821,11 @@ adjacent_cmp_bounds(TypeCacheEntry *typcache, RangeBound *arg,
* bounds. A matching adjacent upper bound must be *smaller* than the
* argument, but only just.
*
- * ARGUMENT CENTROID CMP ADJ
- * [500, ...) [..., 499) > (N) [..., 500) is to the right
- * [500, ...) [..., 500) > (Y) [..., 500) is to the right
- * [500, ...) [..., 501) = (N) [..., 500) is to the left
- * [500, ...) [..., 502) < (N) [..., 500) is to the left
+ * ARGUMENT CENTROID CMP ADJ
+ * [500, ...) [..., 499) > (N) [..., 500) is to the right
+ * [500, ...) [..., 500) > (Y) [..., 500) is to the right
+ * [500, ...) [..., 501) = (N) [..., 500) is to the left
+ * [500, ...) [..., 502) < (N) [..., 500) is to the left
*
* We must search left when the argument is smaller than or equal to
* the centroid. Otherwise search right. We don't need to check
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 4f35992629..6a0fcc20da 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -696,7 +696,7 @@ similar_escape(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
}
}
@@ -742,7 +742,8 @@ similar_escape(PG_FUNCTION_ARGS)
if (elen > 1)
{
- int mblen = pg_mblen(p);
+ int mblen = pg_mblen(p);
+
if (mblen > 1)
{
/* slow, multi-byte path */
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index f27131edd1..0bfeb5e3fd 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -466,7 +466,7 @@ format_procedure_parts(Oid procedure_oid, List **objnames, List **objargs)
*objargs = NIL;
for (i = 0; i < nargs; i++)
{
- Oid thisargtype = procform->proargtypes.values[i];
+ Oid thisargtype = procform->proargtypes.values[i];
*objargs = lappend(*objargs, format_type_be_qualified(thisargtype));
}
@@ -1637,7 +1637,7 @@ regroleout(PG_FUNCTION_ARGS)
}
/*
- * regrolerecv - converts external binary format to regrole
+ * regrolerecv - converts external binary format to regrole
*/
Datum
regrolerecv(PG_FUNCTION_ARGS)
@@ -1647,7 +1647,7 @@ regrolerecv(PG_FUNCTION_ARGS)
}
/*
- * regrolesend - converts regrole to binary format
+ * regrolesend - converts regrole to binary format
*/
Datum
regrolesend(PG_FUNCTION_ARGS)
@@ -1680,7 +1680,7 @@ regnamespacein(PG_FUNCTION_ARGS)
strspn(nsp_name_or_oid, "0123456789") == strlen(nsp_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(nsp_name_or_oid)));
+ CStringGetDatum(nsp_name_or_oid)));
PG_RETURN_OID(result);
}
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index f6bec8be9b..88dd3faf2d 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -3274,7 +3274,7 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
{
int fnum = attnums[idx];
char *name,
- *val;
+ *val;
name = SPI_fname(tupdesc, fnum);
val = SPI_getvalue(violator, tupdesc, fnum);
@@ -3298,11 +3298,11 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
RelationGetRelationName(fk_rel),
NameStr(riinfo->conname)),
has_perm ?
- errdetail("Key (%s)=(%s) is not present in table \"%s\".",
- key_names.data, key_values.data,
- RelationGetRelationName(pk_rel)) :
- errdetail("Key is not present in table \"%s\".",
- RelationGetRelationName(pk_rel)),
+ errdetail("Key (%s)=(%s) is not present in table \"%s\".",
+ key_names.data, key_values.data,
+ RelationGetRelationName(pk_rel)) :
+ errdetail("Key is not present in table \"%s\".",
+ RelationGetRelationName(pk_rel)),
errtableconstraint(fk_rel, NameStr(riinfo->conname))));
else
ereport(ERROR,
@@ -3315,8 +3315,8 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
key_names.data, key_values.data,
RelationGetRelationName(fk_rel)) :
- errdetail("Key is still referenced from table \"%s\".",
- RelationGetRelationName(fk_rel)),
+ errdetail("Key is still referenced from table \"%s\".",
+ RelationGetRelationName(fk_rel)),
errtableconstraint(fk_rel, NameStr(riinfo->conname))));
}
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 0585251d8f..c404ae5e4c 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -106,8 +106,8 @@ typedef struct
int wrapColumn; /* max line length, or -1 for no limit */
int indentLevel; /* current indent level for prettyprint */
bool varprefix; /* TRUE to print prefixes on Vars */
- ParseExprKind special_exprkind; /* set only for exprkinds needing */
- /* special handling */
+ ParseExprKind special_exprkind; /* set only for exprkinds needing */
+ /* special handling */
} deparse_context;
/*
@@ -350,7 +350,7 @@ static void make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
static void make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
int prettyFlags, int wrapColumn);
static void get_tablesample_def(TableSampleClause *tablesample,
- deparse_context *context);
+ deparse_context *context);
static void get_query_def(Query *query, StringInfo buf, List *parentnamespace,
TupleDesc resultDesc,
int prettyFlags, int wrapColumn, int startIndent);
@@ -361,8 +361,8 @@ static void get_select_query_def(Query *query, deparse_context *context,
static void get_insert_query_def(Query *query, deparse_context *context);
static void get_update_query_def(Query *query, deparse_context *context);
static void get_update_query_targetlist_def(Query *query, List *targetList,
- deparse_context *context,
- RangeTblEntry *rte);
+ deparse_context *context,
+ RangeTblEntry *rte);
static void get_delete_query_def(Query *query, deparse_context *context);
static void get_utility_query_def(Query *query, deparse_context *context);
static void get_basic_select_query(Query *query, deparse_context *context,
@@ -376,7 +376,7 @@ static Node *get_rule_sortgroupclause(Index ref, List *tlist,
bool force_colno,
deparse_context *context);
static void get_rule_groupingset(GroupingSet *gset, List *targetlist,
- bool omit_parens, deparse_context *context);
+ bool omit_parens, deparse_context *context);
static void get_rule_orderby(List *orderList, List *targetList,
bool force_colno, deparse_context *context);
static void get_rule_windowclause(Query *query, deparse_context *context);
@@ -424,9 +424,9 @@ static void printSubscripts(ArrayRef *aref, deparse_context *context);
static char *get_relation_name(Oid relid);
static char *generate_relation_name(Oid relid, List *namespaces);
static char *generate_function_name(Oid funcid, int nargs,
- List *argnames, Oid *argtypes,
- bool has_variadic, bool *use_variadic_p,
- ParseExprKind special_exprkind);
+ List *argnames, Oid *argtypes,
+ bool has_variadic, bool *use_variadic_p,
+ ParseExprKind special_exprkind);
static char *generate_operator_name(Oid operid, Oid arg1, Oid arg2);
static text *string_to_text(char *str);
static char *flatten_reloptions(Oid relid);
@@ -1963,7 +1963,7 @@ pg_get_functiondef(PG_FUNCTION_ARGS)
print_function_trftypes(&buf, proctup);
appendStringInfo(&buf, "\n LANGUAGE %s\n",
- quote_identifier(get_language_name(proc->prolang, false)));
+ quote_identifier(get_language_name(proc->prolang, false)));
/* Emit some miscellaneous options on one line */
oldlen = buf.len;
@@ -2364,13 +2364,13 @@ is_input_argument(int nth, const char *argmodes)
static void
print_function_trftypes(StringInfo buf, HeapTuple proctup)
{
- Oid *trftypes;
- int ntypes;
+ Oid *trftypes;
+ int ntypes;
ntypes = get_func_trftypes(proctup, &trftypes);
if (ntypes > 0)
{
- int i;
+ int i;
appendStringInfoString(buf, "\n TRANSFORM ");
for (i = 0; i < ntypes; i++)
@@ -4714,7 +4714,7 @@ get_basic_select_query(Query *query, deparse_context *context,
/* Add the GROUP BY clause if given */
if (query->groupClause != NULL || query->groupingSets != NULL)
{
- ParseExprKind save_exprkind;
+ ParseExprKind save_exprkind;
appendContextKeyword(context, " GROUP BY ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
@@ -5045,13 +5045,13 @@ get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno,
expr = (Node *) tle->expr;
/*
- * Use column-number form if requested by caller. Otherwise, if expression
- * is a constant, force it to be dumped with an explicit cast as decoration
- * --- this is because a simple integer constant is ambiguous (and will be
- * misinterpreted by findTargetlistEntry()) if we dump it without any
- * decoration. If it's anything more complex than a simple Var, then force
- * extra parens around it, to ensure it can't be misinterpreted as a cube()
- * or rollup() construct.
+ * Use column-number form if requested by caller. Otherwise, if
+ * expression is a constant, force it to be dumped with an explicit cast
+ * as decoration --- this is because a simple integer constant is
+ * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
+ * dump it without any decoration. If it's anything more complex than a
+ * simple Var, then force extra parens around it, to ensure it can't be
+ * misinterpreted as a cube() or rollup() construct.
*/
if (force_colno)
{
@@ -5067,14 +5067,15 @@ get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno,
/*
* We must force parens for function-like expressions even if
* PRETTY_PAREN is off, since those are the ones in danger of
- * misparsing. For other expressions we need to force them
- * only if PRETTY_PAREN is on, since otherwise the expression
- * will output them itself. (We can't skip the parens.)
+ * misparsing. For other expressions we need to force them only if
+ * PRETTY_PAREN is on, since otherwise the expression will output them
+ * itself. (We can't skip the parens.)
*/
- bool need_paren = (PRETTY_PAREN(context)
- || IsA(expr, FuncExpr)
- || IsA(expr, Aggref)
- || IsA(expr, WindowFunc));
+ bool need_paren = (PRETTY_PAREN(context)
+ || IsA(expr, FuncExpr)
+ ||IsA(expr, Aggref)
+ ||IsA(expr, WindowFunc));
+
if (need_paren)
appendStringInfoString(context->buf, "(");
get_rule_expr(expr, context, true);
@@ -5110,7 +5111,7 @@ get_rule_groupingset(GroupingSet *gset, List *targetlist,
foreach(l, gset->content)
{
- Index ref = lfirst_int(l);
+ Index ref = lfirst_int(l);
appendStringInfoString(buf, sep);
get_rule_sortgroupclause(ref, targetlist,
@@ -5502,7 +5503,7 @@ get_insert_query_def(Query *query, deparse_context *context)
}
else if (confl->constraint != InvalidOid)
{
- char *constraint = get_constraint_name(confl->constraint);
+ char *constraint = get_constraint_name(confl->constraint);
appendStringInfo(buf, " ON CONSTRAINT %s",
quote_qualified_identifier(NULL, constraint));
@@ -7917,9 +7918,9 @@ get_rule_expr(Node *node, deparse_context *context,
case T_InferenceElem:
{
- InferenceElem *iexpr = (InferenceElem *) node;
- bool varprefix = context->varprefix;
- bool need_parens;
+ InferenceElem *iexpr = (InferenceElem *) node;
+ bool varprefix = context->varprefix;
+ bool need_parens;
/*
* InferenceElem can only refer to target relation, so a
@@ -7948,13 +7949,13 @@ get_rule_expr(Node *node, deparse_context *context,
if (iexpr->infercollid)
appendStringInfo(buf, " COLLATE %s",
- generate_collation_name(iexpr->infercollid));
+ generate_collation_name(iexpr->infercollid));
/* Add the operator class name, if not default */
if (iexpr->inferopclass)
{
- Oid inferopclass = iexpr->inferopclass;
- Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass);
+ Oid inferopclass = iexpr->inferopclass;
+ Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass);
get_opclass_name(inferopclass, inferopcinputtype, buf);
}
diff --git a/src/backend/utils/adt/tsquery_op.c b/src/backend/utils/adt/tsquery_op.c
index bd6fc25099..8afd558db3 100644
--- a/src/backend/utils/adt/tsquery_op.c
+++ b/src/backend/utils/adt/tsquery_op.c
@@ -249,6 +249,7 @@ cmp_string(const void *a, const void *b)
{
const char *sa = *((const char **) a);
const char *sb = *((const char **) b);
+
return strcmp(sa, sb);
}
@@ -300,8 +301,8 @@ tsq_mcontains(PG_FUNCTION_ARGS)
result = false;
else
{
- int i;
- int j = 0;
+ int i;
+ int j = 0;
for (i = 0; i < ex_nvalues; i++)
{
diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c
index 1d7bb02ca4..ce1d9abdde 100644
--- a/src/backend/utils/adt/txid.c
+++ b/src/backend/utils/adt/txid.c
@@ -142,8 +142,10 @@ cmp_txid(const void *aa, const void *bb)
static void
sort_snapshot(TxidSnapshot *snap)
{
- txid last = 0;
- int nxip, idx1, idx2;
+ txid last = 0;
+ int nxip,
+ idx1,
+ idx2;
if (snap->nxip > 1)
{
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 5fd2bef617..779729d724 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -56,14 +56,15 @@ typedef struct
typedef struct
{
- char *buf1; /* 1st string, or abbreviation original string buf */
- char *buf2; /* 2nd string, or abbreviation strxfrm() buf */
- int buflen1;
- int buflen2;
- bool collate_c;
- hyperLogLogState abbr_card; /* Abbreviated key cardinality state */
- hyperLogLogState full_card; /* Full key cardinality state */
- double prop_card; /* Required cardinality proportion */
+ char *buf1; /* 1st string, or abbreviation original string
+ * buf */
+ char *buf2; /* 2nd string, or abbreviation strxfrm() buf */
+ int buflen1;
+ int buflen2;
+ bool collate_c;
+ hyperLogLogState abbr_card; /* Abbreviated key cardinality state */
+ hyperLogLogState full_card; /* Full key cardinality state */
+ double prop_card; /* Required cardinality proportion */
#ifdef HAVE_LOCALE_T
pg_locale_t locale;
#endif
@@ -82,9 +83,9 @@ typedef struct
#define PG_RETURN_UNKNOWN_P(x) PG_RETURN_POINTER(x)
static void btsortsupport_worker(SortSupport ssup, Oid collid);
-static int bttextfastcmp_c(Datum x, Datum y, SortSupport ssup);
-static int bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup);
-static int bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup);
+static int bttextfastcmp_c(Datum x, Datum y, SortSupport ssup);
+static int bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup);
+static int bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup);
static Datum bttext_abbrev_convert(Datum original, SortSupport ssup);
static bool bttext_abbrev_abort(int memtupcount, SortSupport ssup);
static int32 text_length(Datum str);
@@ -1415,8 +1416,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
}
/*
- * memcmp() can't tell us which of two unequal strings sorts first, but
- * it's a cheap way to tell if they're equal. Testing shows that
+ * memcmp() can't tell us which of two unequal strings sorts first,
+ * but it's a cheap way to tell if they're equal. Testing shows that
* memcmp() followed by strcoll() is only trivially slower than
* strcoll() by itself, so we don't lose much if this doesn't work out
* very often, and if it does - for example, because there are many
@@ -1726,9 +1727,9 @@ bttextcmp(PG_FUNCTION_ARGS)
Datum
bttextsortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
- Oid collid = ssup->ssup_collation;
- MemoryContext oldcontext;
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ Oid collid = ssup->ssup_collation;
+ MemoryContext oldcontext;
oldcontext = MemoryContextSwitchTo(ssup->ssup_cxt);
@@ -1742,30 +1743,30 @@ bttextsortsupport(PG_FUNCTION_ARGS)
static void
btsortsupport_worker(SortSupport ssup, Oid collid)
{
- bool abbreviate = ssup->abbreviate;
- bool collate_c = false;
- TextSortSupport *tss;
+ bool abbreviate = ssup->abbreviate;
+ bool collate_c = false;
+ TextSortSupport *tss;
#ifdef HAVE_LOCALE_T
- pg_locale_t locale = 0;
+ pg_locale_t locale = 0;
#endif
/*
* If possible, set ssup->comparator to a function which can be used to
* directly compare two datums. If we can do this, we'll avoid the
- * overhead of a trip through the fmgr layer for every comparison,
- * which can be substantial.
+ * overhead of a trip through the fmgr layer for every comparison, which
+ * can be substantial.
*
- * Most typically, we'll set the comparator to bttextfastcmp_locale,
- * which uses strcoll() to perform comparisons. However, if LC_COLLATE
- * = C, we can make things quite a bit faster with bttextfastcmp_c,
- * which uses memcmp() rather than strcoll().
+ * Most typically, we'll set the comparator to bttextfastcmp_locale, which
+ * uses strcoll() to perform comparisons. However, if LC_COLLATE = C, we
+ * can make things quite a bit faster with bttextfastcmp_c, which uses
+ * memcmp() rather than strcoll().
*
- * There is a further exception on Windows. When the database encoding
- * is UTF-8 and we are not using the C collation, complex hacks are
- * required. We don't currently have a comparator that handles that case,
- * so we fall back on the slow method of having the sort code invoke
- * bttextcmp() via the fmgr trampoline.
+ * There is a further exception on Windows. When the database encoding is
+ * UTF-8 and we are not using the C collation, complex hacks are required.
+ * We don't currently have a comparator that handles that case, so we fall
+ * back on the slow method of having the sort code invoke bttextcmp() via
+ * the fmgr trampoline.
*/
if (lc_collate_is_c(collid))
{
@@ -1808,13 +1809,13 @@ btsortsupport_worker(SortSupport ssup, Oid collid)
* It's possible that there are platforms where the use of abbreviated
* keys should be disabled at compile time. Having only 4 byte datums
* could make worst-case performance drastically more likely, for example.
- * Moreover, Darwin's strxfrm() implementations is known to not effectively
- * concentrate a significant amount of entropy from the original string in
- * earlier transformed blobs. It's possible that other supported platforms
- * are similarly encumbered. However, even in those cases, the abbreviated
- * keys optimization may win, and if it doesn't, the "abort abbreviation"
- * code may rescue us. So, for now, we don't disable this anywhere on the
- * basis of performance.
+ * Moreover, Darwin's strxfrm() implementations is known to not
+ * effectively concentrate a significant amount of entropy from the
+ * original string in earlier transformed blobs. It's possible that other
+ * supported platforms are similarly encumbered. However, even in those
+ * cases, the abbreviated keys optimization may win, and if it doesn't,
+ * the "abort abbreviation" code may rescue us. So, for now, we don't
+ * disable this anywhere on the basis of performance.
*/
/*
@@ -1893,16 +1894,16 @@ bttextfastcmp_c(Datum x, Datum y, SortSupport ssup)
static int
bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup)
{
- text *arg1 = DatumGetTextPP(x);
- text *arg2 = DatumGetTextPP(y);
- TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
+ text *arg1 = DatumGetTextPP(x);
+ text *arg2 = DatumGetTextPP(y);
+ TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
/* working state */
- char *a1p,
- *a2p;
- int len1,
- len2,
- result;
+ char *a1p,
+ *a2p;
+ int len1,
+ len2,
+ result;
a1p = VARDATA_ANY(arg1);
a2p = VARDATA_ANY(arg2);
@@ -1943,9 +1944,9 @@ bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup)
result = strcoll(tss->buf1, tss->buf2);
/*
- * In some locales strcoll() can claim that nonidentical strings are equal.
- * Believing that would be bad news for a number of reasons, so we follow
- * Perl's lead and sort "equal" strings according to strcmp().
+ * In some locales strcoll() can claim that nonidentical strings are
+ * equal. Believing that would be bad news for a number of reasons, so we
+ * follow Perl's lead and sort "equal" strings according to strcmp().
*/
if (result == 0)
result = strcmp(tss->buf1, tss->buf2);
@@ -1966,9 +1967,9 @@ done:
static int
bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup)
{
- char *a = (char *) &x;
- char *b = (char *) &y;
- int result;
+ char *a = (char *) &x;
+ char *b = (char *) &y;
+ int result;
result = memcmp(a, b, sizeof(Datum));
@@ -1989,15 +1990,15 @@ bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup)
static Datum
bttext_abbrev_convert(Datum original, SortSupport ssup)
{
- TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
- text *authoritative = DatumGetTextPP(original);
- char *authoritative_data = VARDATA_ANY(authoritative);
+ TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
+ text *authoritative = DatumGetTextPP(original);
+ char *authoritative_data = VARDATA_ANY(authoritative);
/* working state */
- Datum res;
- char *pres;
- int len;
- uint32 hash;
+ Datum res;
+ char *pres;
+ int len;
+ uint32 hash;
/*
* Abbreviated key representation is a pass-by-value Datum that is treated
@@ -2009,8 +2010,8 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
len = VARSIZE_ANY_EXHDR(authoritative);
/*
- * If we're using the C collation, use memcmp(), rather than strxfrm(),
- * to abbreviate keys. The full comparator for the C locale is always
+ * If we're using the C collation, use memcmp(), rather than strxfrm(), to
+ * abbreviate keys. The full comparator for the C locale is always
* memcmp(), and we can't risk having this give a different answer.
* Besides, this should be faster, too.
*/
@@ -2018,7 +2019,7 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
memcpy(pres, authoritative_data, Min(len, sizeof(Datum)));
else
{
- Size bsize;
+ Size bsize;
/*
* We're not using the C collation, so fall back on strxfrm.
@@ -2075,8 +2076,8 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
/*
* Maintain approximate cardinality of both abbreviated keys and original,
* authoritative keys using HyperLogLog. Used as cheap insurance against
- * the worst case, where we do many string transformations for no saving in
- * full strcoll()-based comparisons. These statistics are used by
+ * the worst case, where we do many string transformations for no saving
+ * in full strcoll()-based comparisons. These statistics are used by
* bttext_abbrev_abort().
*
* First, Hash key proper, or a significant fraction of it. Mix in length
@@ -2094,8 +2095,8 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
/* Hash abbreviated key */
#if SIZEOF_DATUM == 8
{
- uint32 lohalf,
- hihalf;
+ uint32 lohalf,
+ hihalf;
lohalf = (uint32) res;
hihalf = (uint32) (res >> 32);
@@ -2118,8 +2119,9 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
static bool
bttext_abbrev_abort(int memtupcount, SortSupport ssup)
{
- TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
- double abbrev_distinct, key_distinct;
+ TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
+ double abbrev_distinct,
+ key_distinct;
Assert(ssup->abbreviate);
@@ -2131,9 +2133,9 @@ bttext_abbrev_abort(int memtupcount, SortSupport ssup)
key_distinct = estimateHyperLogLog(&tss->full_card);
/*
- * Clamp cardinality estimates to at least one distinct value. While NULLs
- * are generally disregarded, if only NULL values were seen so far, that
- * might misrepresent costs if we failed to clamp.
+ * Clamp cardinality estimates to at least one distinct value. While
+ * NULLs are generally disregarded, if only NULL values were seen so far,
+ * that might misrepresent costs if we failed to clamp.
*/
if (abbrev_distinct <= 1.0)
abbrev_distinct = 1.0;
@@ -2149,7 +2151,7 @@ bttext_abbrev_abort(int memtupcount, SortSupport ssup)
#ifdef TRACE_SORT
if (trace_sort)
{
- double norm_abbrev_card = abbrev_distinct / (double) memtupcount;
+ double norm_abbrev_card = abbrev_distinct / (double) memtupcount;
elog(LOG, "bttext_abbrev: abbrev_distinct after %d: %f "
"(key_distinct: %f, norm_abbrev_card: %f, prop_card: %f)",
@@ -2180,26 +2182,26 @@ bttext_abbrev_abort(int memtupcount, SortSupport ssup)
* When we have exceeded 10,000 tuples, decay required cardinality
* aggressively for next call.
*
- * This is useful because the number of comparisons required on average
- * increases at a linearithmic rate, and at roughly 10,000 tuples that
- * factor will start to dominate over the linear costs of string
- * transformation (this is a conservative estimate). The decay rate is
- * chosen to be a little less aggressive than halving -- which (since
- * we're called at points at which memtupcount has doubled) would never
- * see the cost model actually abort past the first call following a
- * decay. This decay rate is mostly a precaution against a sudden,
- * violent swing in how well abbreviated cardinality tracks full key
- * cardinality. The decay also serves to prevent a marginal case from
- * being aborted too late, when too much has already been invested in
- * string transformation.
+ * This is useful because the number of comparisons required on
+ * average increases at a linearithmic rate, and at roughly 10,000
+ * tuples that factor will start to dominate over the linear costs of
+ * string transformation (this is a conservative estimate). The decay
+ * rate is chosen to be a little less aggressive than halving -- which
+ * (since we're called at points at which memtupcount has doubled)
+ * would never see the cost model actually abort past the first call
+ * following a decay. This decay rate is mostly a precaution against
+ * a sudden, violent swing in how well abbreviated cardinality tracks
+ * full key cardinality. The decay also serves to prevent a marginal
+ * case from being aborted too late, when too much has already been
+ * invested in string transformation.
*
- * It's possible for sets of several million distinct strings with mere
- * tens of thousands of distinct abbreviated keys to still benefit very
- * significantly. This will generally occur provided each abbreviated
- * key is a proxy for a roughly uniform number of the set's full keys.
- * If it isn't so, we hope to catch that early and abort. If it isn't
- * caught early, by the time the problem is apparent it's probably not
- * worth aborting.
+ * It's possible for sets of several million distinct strings with
+ * mere tens of thousands of distinct abbreviated keys to still
+ * benefit very significantly. This will generally occur provided
+ * each abbreviated key is a proxy for a roughly uniform number of the
+ * set's full keys. If it isn't so, we hope to catch that early and
+ * abort. If it isn't caught early, by the time the problem is
+ * apparent it's probably not worth aborting.
*/
if (memtupcount > 10000)
tss->prop_card *= 0.65;
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 8bb7144ecf..99bc832ab8 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -1405,7 +1405,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg, bool preserve_whitespace,
if (*(utf8string + count))
{
res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0,
- utf8string + count, NULL);
+ utf8string + count, NULL);
if (res_code != 0 || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_XML_CONTENT,
"invalid XML content");
@@ -3697,7 +3697,7 @@ xml_xpathobjtoxmlarray(xmlXPathObjectPtr xpathobj,
for (i = 0; i < result; i++)
{
datum = PointerGetDatum(xml_xmlnodetoxmltype(xpathobj->nodesetval->nodeTab[i],
- xmlerrcxt));
+ xmlerrcxt));
(void) accumArrayResult(astate, datum, false,
XMLOID, CurrentMemoryContext);
}
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 1907a87458..c5cc4011bf 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -226,7 +226,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
chunk = (InvalidationChunk *)
MemoryContextAlloc(CurTransactionContext,
offsetof(InvalidationChunk, msgs) +
- FIRSTCHUNKSIZE * sizeof(SharedInvalidationMessage));
+ FIRSTCHUNKSIZE * sizeof(SharedInvalidationMessage));
chunk->nitems = 0;
chunk->maxitems = FIRSTCHUNKSIZE;
chunk->next = *listHdr;
@@ -240,7 +240,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
chunk = (InvalidationChunk *)
MemoryContextAlloc(CurTransactionContext,
offsetof(InvalidationChunk, msgs) +
- chunksize * sizeof(SharedInvalidationMessage));
+ chunksize * sizeof(SharedInvalidationMessage));
chunk->nitems = 0;
chunk->maxitems = chunksize;
chunk->next = *listHdr;
@@ -333,6 +333,7 @@ AddCatcacheInvalidationMessage(InvalidationListHeader *hdr,
msg.cc.id = (int8) id;
msg.cc.dbId = dbId;
msg.cc.hashValue = hashValue;
+
/*
* Define padding bytes in SharedInvalidationMessage structs to be
* defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
@@ -712,11 +713,11 @@ PrepareInvalidationState(void)
myInfo->my_level = GetCurrentTransactionNestLevel();
/*
- * If there's any previous entry, this one should be for a deeper
- * nesting level.
+ * If there's any previous entry, this one should be for a deeper nesting
+ * level.
*/
Assert(transInvalInfo == NULL ||
- myInfo->my_level > transInvalInfo->my_level);
+ myInfo->my_level > transInvalInfo->my_level);
transInvalInfo = myInfo;
}
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index f259751e15..7b32247d34 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -3012,8 +3012,8 @@ get_tablesample_method_name(Oid tsmid)
tuple = SearchSysCache1(TABLESAMPLEMETHODOID, ObjectIdGetDatum(tsmid));
if (HeapTupleIsValid(tuple))
{
- Form_pg_tablesample_method tup =
- (Form_pg_tablesample_method) GETSTRUCT(tuple);
+ Form_pg_tablesample_method tup =
+ (Form_pg_tablesample_method) GETSTRUCT(tuple);
char *result;
result = pstrdup(NameStr(tup->tsmname));
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 9a26a4efc5..e6808e7576 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -153,8 +153,8 @@ CreateCachedPlan(Node *raw_parse_tree,
CachedPlanSource *plansource;
MemoryContext source_context;
MemoryContext oldcxt;
- Oid user_id;
- int security_context;
+ Oid user_id;
+ int security_context;
Assert(query_string != NULL); /* required as of 8.4 */
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index e745006b73..f60f3cb234 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -873,7 +873,7 @@ equalPolicy(RowSecurityPolicy *policy1, RowSecurityPolicy *policy2)
return false;
if (policy1->hassublinks != policy2->hassublinks)
return false;
- if (strcmp(policy1->policy_name,policy2->policy_name) != 0)
+ if (strcmp(policy1->policy_name, policy2->policy_name) != 0)
return false;
if (ARR_DIMS(policy1->roles)[0] != ARR_DIMS(policy2->roles)[0])
return false;
@@ -906,8 +906,8 @@ equalPolicy(RowSecurityPolicy *policy1, RowSecurityPolicy *policy2)
static bool
equalRSDesc(RowSecurityDesc *rsdesc1, RowSecurityDesc *rsdesc2)
{
- ListCell *lc,
- *rc;
+ ListCell *lc,
+ *rc;
if (rsdesc1 == NULL && rsdesc2 == NULL)
return true;
@@ -922,10 +922,10 @@ equalRSDesc(RowSecurityDesc *rsdesc1, RowSecurityDesc *rsdesc2)
/* RelationBuildRowSecurity should build policies in order */
forboth(lc, rsdesc1->policies, rc, rsdesc2->policies)
{
- RowSecurityPolicy *l = (RowSecurityPolicy *) lfirst(lc);
- RowSecurityPolicy *r = (RowSecurityPolicy *) lfirst(rc);
+ RowSecurityPolicy *l = (RowSecurityPolicy *) lfirst(lc);
+ RowSecurityPolicy *r = (RowSecurityPolicy *) lfirst(rc);
- if (!equalPolicy(l,r))
+ if (!equalPolicy(l, r))
return false;
}
@@ -3460,7 +3460,7 @@ RelationCacheInitializePhase3(void)
{
RelationBuildRowSecurity(relation);
- Assert (relation->rd_rsdesc != NULL);
+ Assert(relation->rd_rsdesc != NULL);
restart = true;
}
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index 7def1be32a..58f90f672e 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -634,7 +634,7 @@ static const struct cachedesc cacheinfo[] = {
},
16
},
- {ReplicationOriginRelationId, /* REPLORIGNAME */
+ {ReplicationOriginRelationId, /* REPLORIGNAME */
ReplicationOriginNameIndex,
1,
{
@@ -701,26 +701,26 @@ static const struct cachedesc cacheinfo[] = {
4
},
{TransformRelationId, /* TRFOID */
- TransformOidIndexId,
- 1,
- {
- ObjectIdAttributeNumber,
- 0,
- 0,
- 0,
- },
- 16
+ TransformOidIndexId,
+ 1,
+ {
+ ObjectIdAttributeNumber,
+ 0,
+ 0,
+ 0,
+ },
+ 16
},
{TransformRelationId, /* TRFTYPELANG */
- TransformTypeLangIndexId,
- 2,
- {
- Anum_pg_transform_trftype,
- Anum_pg_transform_trflang,
- 0,
- 0,
- },
- 16
+ TransformTypeLangIndexId,
+ 2,
+ {
+ Anum_pg_transform_trftype,
+ Anum_pg_transform_trflang,
+ 0,
+ 0,
+ },
+ 16
},
{TSConfigMapRelationId, /* TSCONFIGMAP */
TSConfigMapIndexId,
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index dfd102a1fb..088c714821 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -1592,8 +1592,8 @@ FlushErrorState(void)
void
ThrowErrorData(ErrorData *edata)
{
- ErrorData *newedata;
- MemoryContext oldcontext;
+ ErrorData *newedata;
+ MemoryContext oldcontext;
if (!errstart(edata->elevel, edata->filename, edata->lineno,
edata->funcname, NULL))
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index 46bc1f238f..cd3db871e0 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -702,7 +702,7 @@ Size
EstimateLibraryStateSpace(void)
{
DynamicFileList *file_scanner;
- Size size = 1;
+ Size size = 1;
for (file_scanner = file_list;
file_scanner != NULL;
@@ -724,7 +724,7 @@ SerializeLibraryState(Size maxsize, char *start_address)
file_scanner != NULL;
file_scanner = file_scanner->next)
{
- Size len;
+ Size len;
len = strlcpy(start_address, file_scanner->filename, maxsize) + 1;
Assert(len < maxsize);
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index fccef38249..2b09076b61 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -886,15 +886,14 @@ int
get_func_trftypes(HeapTuple procTup,
Oid **p_trftypes)
{
-
Datum protrftypes;
ArrayType *arr;
int nelems;
- bool isNull;
+ bool isNull;
protrftypes = SysCacheGetAttr(PROCOID, procTup,
- Anum_pg_proc_protrftypes,
- &isNull);
+ Anum_pg_proc_protrftypes,
+ &isNull);
if (!isNull)
{
/*
@@ -903,7 +902,7 @@ get_func_trftypes(HeapTuple procTup,
* deconstruct_array() since the array data is just going to look like
* a C array of values.
*/
- arr = DatumGetArrayTypeP(protrftypes); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(protrftypes); /* ensure not toasted */
nelems = ARR_DIMS(arr)[0];
if (ARR_NDIM(arr) != 1 ||
nelems < 0 ||
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index b0d85af14d..2b53c19fb9 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -246,6 +246,7 @@ SwitchToSharedLatch(void)
Assert(MyProc != NULL);
MyLatch = &MyProc->procLatch;
+
/*
* Set the shared latch as the local one might have been set. This
* shouldn't normally be necessary as code is supposed to check the
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index debadf0f94..aa67f75c0c 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -1107,7 +1107,7 @@ ShutdownPostgres(int code, Datum arg)
static void
StatementTimeoutHandler(void)
{
- int sig = SIGINT;
+ int sig = SIGINT;
/*
* During authentication the timeout is used to deal with
diff --git a/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl b/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl
index fa60cdc55a..e73ed4d865 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl
@@ -23,7 +23,7 @@ open(FILE, $in_file) || die("cannot open $in_file");
while ()
{
- next if (! m/)
printf STDERR "Warning: duplicate GB18030: %08x\n", $code;
next;
}
- $arrayu{$utf} = $code;
+ $arrayu{$utf} = $code;
$arrayc{$code} = $utf;
$count++;
}
diff --git a/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl b/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl
index edfb61bcd9..33d108e025 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl
@@ -99,7 +99,7 @@ print FILE "/*\n";
print FILE " * This file was generated by UCS_to_SHIFT_JIS_2004.pl\n";
print FILE " */\n";
print FILE
- "static const pg_utf_to_local_combined ULmapSHIFT_JIS_2004_combined[] = {\n";
+"static const pg_utf_to_local_combined ULmapSHIFT_JIS_2004_combined[] = {\n";
for $index (sort { $a cmp $b } keys(%array1))
{
@@ -212,7 +212,7 @@ print FILE "/*\n";
print FILE " * This file was generated by UCS_to_SHIFT_JIS_2004.pl\n";
print FILE " */\n";
print FILE
- "static const pg_local_to_utf_combined LUmapSHIFT_JIS_2004_combined[] = {\n";
+"static const pg_local_to_utf_combined LUmapSHIFT_JIS_2004_combined[] = {\n";
for $index (sort { $a <=> $b } keys(%array1))
{
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index 1d9b10f8a7..09002a77d8 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -22,7 +22,7 @@ typedef struct
} codes_t;
/* map Big5 Level 1 to CNS 11643-1992 Plane 1 */
-static const codes_t big5Level1ToCnsPlane1[25] = { /* range */
+static const codes_t big5Level1ToCnsPlane1[25] = { /* range */
{0xA140, 0x2121},
{0xA1F6, 0x2258},
{0xA1F7, 0x2257},
@@ -51,7 +51,7 @@ static const codes_t big5Level1ToCnsPlane1[25] = { /* range */
};
/* map CNS 11643-1992 Plane 1 to Big5 Level 1 */
-static const codes_t cnsPlane1ToBig5Level1[26] = { /* range */
+static const codes_t cnsPlane1ToBig5Level1[26] = { /* range */
{0x2121, 0xA140},
{0x2257, 0xA1F7},
{0x2258, 0xA1F6},
@@ -81,7 +81,7 @@ static const codes_t cnsPlane1ToBig5Level1[26] = { /* range */
};
/* map Big5 Level 2 to CNS 11643-1992 Plane 2 */
-static const codes_t big5Level2ToCnsPlane2[48] = { /* range */
+static const codes_t big5Level2ToCnsPlane2[48] = { /* range */
{0xC940, 0x2121},
{0xc94a, 0x0000},
{0xC94B, 0x212B},
@@ -133,7 +133,7 @@ static const codes_t big5Level2ToCnsPlane2[48] = { /* range */
};
/* map CNS 11643-1992 Plane 2 to Big5 Level 2 */
-static const codes_t cnsPlane2ToBig5Level2[49] = { /* range */
+static const codes_t cnsPlane2ToBig5Level2[49] = { /* range */
{0x2121, 0xC940},
{0x212B, 0xC94B},
{0x214C, 0xC9BE},
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 3038d7c9dd..be7ba4f29d 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -666,11 +666,12 @@ const char *const config_type_names[] =
typedef struct
{
- char unit[MAX_UNIT_LEN + 1]; /* unit, as a string, like "kB" or "min" */
- int base_unit; /* GUC_UNIT_XXX */
- int multiplier; /* If positive, multiply the value with this for
- * unit -> base_unit conversion. If negative,
- * divide (with the absolute value) */
+ char unit[MAX_UNIT_LEN + 1]; /* unit, as a string, like "kB" or
+ * "min" */
+ int base_unit; /* GUC_UNIT_XXX */
+ int multiplier; /* If positive, multiply the value with this
+ * for unit -> base_unit conversion. If
+ * negative, divide (with the absolute value) */
} unit_conversion;
/* Ensure that the constants in the tables don't overflow or underflow */
@@ -684,58 +685,56 @@ typedef struct
#error XLOG_SEG_SIZE must be between 1MB and 1GB
#endif
-static const char *memory_units_hint =
- gettext_noop("Valid units for this parameter are \"kB\", \"MB\", \"GB\", and \"TB\".");
+static const char *memory_units_hint = gettext_noop("Valid units for this parameter are \"kB\", \"MB\", \"GB\", and \"TB\".");
static const unit_conversion memory_unit_conversion_table[] =
{
- { "TB", GUC_UNIT_KB, 1024*1024*1024 },
- { "GB", GUC_UNIT_KB, 1024*1024 },
- { "MB", GUC_UNIT_KB, 1024 },
- { "kB", GUC_UNIT_KB, 1 },
+ {"TB", GUC_UNIT_KB, 1024 * 1024 * 1024},
+ {"GB", GUC_UNIT_KB, 1024 * 1024},
+ {"MB", GUC_UNIT_KB, 1024},
+ {"kB", GUC_UNIT_KB, 1},
- { "TB", GUC_UNIT_BLOCKS, (1024*1024*1024) / (BLCKSZ / 1024) },
- { "GB", GUC_UNIT_BLOCKS, (1024*1024) / (BLCKSZ / 1024) },
- { "MB", GUC_UNIT_BLOCKS, 1024 / (BLCKSZ / 1024) },
- { "kB", GUC_UNIT_BLOCKS, -(BLCKSZ / 1024) },
+ {"TB", GUC_UNIT_BLOCKS, (1024 * 1024 * 1024) / (BLCKSZ / 1024)},
+ {"GB", GUC_UNIT_BLOCKS, (1024 * 1024) / (BLCKSZ / 1024)},
+ {"MB", GUC_UNIT_BLOCKS, 1024 / (BLCKSZ / 1024)},
+ {"kB", GUC_UNIT_BLOCKS, -(BLCKSZ / 1024)},
- { "TB", GUC_UNIT_XBLOCKS, (1024*1024*1024) / (XLOG_BLCKSZ / 1024) },
- { "GB", GUC_UNIT_XBLOCKS, (1024*1024) / (XLOG_BLCKSZ / 1024) },
- { "MB", GUC_UNIT_XBLOCKS, 1024 / (XLOG_BLCKSZ / 1024) },
- { "kB", GUC_UNIT_XBLOCKS, -(XLOG_BLCKSZ / 1024) },
+ {"TB", GUC_UNIT_XBLOCKS, (1024 * 1024 * 1024) / (XLOG_BLCKSZ / 1024)},
+ {"GB", GUC_UNIT_XBLOCKS, (1024 * 1024) / (XLOG_BLCKSZ / 1024)},
+ {"MB", GUC_UNIT_XBLOCKS, 1024 / (XLOG_BLCKSZ / 1024)},
+ {"kB", GUC_UNIT_XBLOCKS, -(XLOG_BLCKSZ / 1024)},
- { "TB", GUC_UNIT_XSEGS, (1024*1024*1024) / (XLOG_SEG_SIZE / 1024) },
- { "GB", GUC_UNIT_XSEGS, (1024*1024) / (XLOG_SEG_SIZE / 1024) },
- { "MB", GUC_UNIT_XSEGS, -(XLOG_SEG_SIZE / (1024 * 1024)) },
- { "kB", GUC_UNIT_XSEGS, -(XLOG_SEG_SIZE / 1024) },
+ {"TB", GUC_UNIT_XSEGS, (1024 * 1024 * 1024) / (XLOG_SEG_SIZE / 1024)},
+ {"GB", GUC_UNIT_XSEGS, (1024 * 1024) / (XLOG_SEG_SIZE / 1024)},
+ {"MB", GUC_UNIT_XSEGS, -(XLOG_SEG_SIZE / (1024 * 1024))},
+ {"kB", GUC_UNIT_XSEGS, -(XLOG_SEG_SIZE / 1024)},
- { "" } /* end of table marker */
+ {""} /* end of table marker */
};
-static const char *time_units_hint =
- gettext_noop("Valid units for this parameter are \"ms\", \"s\", \"min\", \"h\", and \"d\".");
+static const char *time_units_hint = gettext_noop("Valid units for this parameter are \"ms\", \"s\", \"min\", \"h\", and \"d\".");
static const unit_conversion time_unit_conversion_table[] =
{
- { "d", GUC_UNIT_MS, 1000 * 60 * 60 * 24 },
- { "h", GUC_UNIT_MS, 1000 * 60 * 60 },
- { "min", GUC_UNIT_MS, 1000 * 60},
- { "s", GUC_UNIT_MS, 1000 },
- { "ms", GUC_UNIT_MS, 1 },
-
- { "d", GUC_UNIT_S, 60 * 60 * 24 },
- { "h", GUC_UNIT_S, 60 * 60 },
- { "min", GUC_UNIT_S, 60 },
- { "s", GUC_UNIT_S, 1 },
- { "ms", GUC_UNIT_S, -1000 },
-
- { "d", GUC_UNIT_MIN, 60 * 24 },
- { "h", GUC_UNIT_MIN, 60 },
- { "min", GUC_UNIT_MIN, 1 },
- { "s", GUC_UNIT_MIN, -60 },
- { "ms", GUC_UNIT_MIN, -1000 * 60 },
-
- { "" } /* end of table marker */
+ {"d", GUC_UNIT_MS, 1000 * 60 * 60 * 24},
+ {"h", GUC_UNIT_MS, 1000 * 60 * 60},
+ {"min", GUC_UNIT_MS, 1000 * 60},
+ {"s", GUC_UNIT_MS, 1000},
+ {"ms", GUC_UNIT_MS, 1},
+
+ {"d", GUC_UNIT_S, 60 * 60 * 24},
+ {"h", GUC_UNIT_S, 60 * 60},
+ {"min", GUC_UNIT_S, 60},
+ {"s", GUC_UNIT_S, 1},
+ {"ms", GUC_UNIT_S, -1000},
+
+ {"d", GUC_UNIT_MIN, 60 * 24},
+ {"h", GUC_UNIT_MIN, 60},
+ {"min", GUC_UNIT_MIN, 1},
+ {"s", GUC_UNIT_MIN, -60},
+ {"ms", GUC_UNIT_MIN, -1000 * 60},
+
+ {""} /* end of table marker */
};
/*
@@ -993,8 +992,8 @@ static struct config_bool ConfigureNamesBool[] =
{
{"wal_compression", PGC_USERSET, WAL_SETTINGS,
- gettext_noop("Compresses full-page writes written in WAL file."),
- NULL
+ gettext_noop("Compresses full-page writes written in WAL file."),
+ NULL
},
&wal_compression,
false,
@@ -3685,10 +3684,10 @@ static int num_guc_variables;
*/
typedef struct ConfigFileVariable
{
- char *name;
- char *value;
- char *filename;
- int sourceline;
+ char *name;
+ char *value;
+ char *filename;
+ int sourceline;
} ConfigFileVariable;
static struct ConfigFileVariable *guc_file_variables;
@@ -5160,7 +5159,7 @@ convert_to_base_unit(int64 value, const char *unit,
int base_unit, int64 *base_value)
{
const unit_conversion *table;
- int i;
+ int i;
if (base_unit & GUC_UNIT_MEMORY)
table = memory_unit_conversion_table;
@@ -5207,9 +5206,9 @@ convert_from_base_unit(int64 base_value, int base_unit,
if (base_unit == table[i].base_unit)
{
/*
- * Accept the first conversion that divides the value evenly.
- * We assume that the conversions for each base unit are ordered
- * from greatest unit to the smallest!
+ * Accept the first conversion that divides the value evenly. We
+ * assume that the conversions for each base unit are ordered from
+ * greatest unit to the smallest!
*/
if (table[i].multiplier < 0)
{
@@ -5278,7 +5277,7 @@ parse_int(const char *value, int *result, int flags, const char **hintmsg)
bool converted = false;
if ((flags & GUC_UNIT) == 0)
- return false; /* this setting does not accept a unit */
+ return false; /* this setting does not accept a unit */
unitlen = 0;
while (*endptr != '\0' && !isspace((unsigned char) *endptr) &&
@@ -5694,7 +5693,7 @@ set_config_option(const char *name, const char *value,
if (IsInParallelMode() && changeVal && action != GUC_ACTION_SAVE)
ereport(elevel,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot set parameters during a parallel operation")));
+ errmsg("cannot set parameters during a parallel operation")));
record = find_option(name, true, elevel);
if (record == NULL)
@@ -7017,7 +7016,7 @@ ExecSetVariableStmt(VariableSetStmt *stmt, bool isTopLevel)
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot set parameters during a parallel operation")));
+ errmsg("cannot set parameters during a parallel operation")));
switch (stmt->kind)
{
diff --git a/src/backend/utils/misc/rls.c b/src/backend/utils/misc/rls.c
index 066ac21a58..44cb374303 100644
--- a/src/backend/utils/misc/rls.c
+++ b/src/backend/utils/misc/rls.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* rls.c
- * RLS-related utility functions.
+ * RLS-related utility functions.
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/utils/misc/rls.c
+ * src/backend/utils/misc/rls.c
*
*-------------------------------------------------------------------------
*/
@@ -24,7 +24,7 @@
#include "utils/syscache.h"
-extern int check_enable_rls(Oid relid, Oid checkAsUser, bool noError);
+extern int check_enable_rls(Oid relid, Oid checkAsUser, bool noError);
/*
* check_enable_rls
@@ -48,10 +48,10 @@ extern int check_enable_rls(Oid relid, Oid checkAsUser, bool noError);
int
check_enable_rls(Oid relid, Oid checkAsUser, bool noError)
{
- HeapTuple tuple;
- Form_pg_class classform;
- bool relrowsecurity;
- Oid user_id = checkAsUser ? checkAsUser : GetUserId();
+ HeapTuple tuple;
+ Form_pg_class classform;
+ bool relrowsecurity;
+ Oid user_id = checkAsUser ? checkAsUser : GetUserId();
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tuple))
@@ -88,25 +88,24 @@ check_enable_rls(Oid relid, Oid checkAsUser, bool noError)
/*
* If the row_security GUC is 'off' then check if the user has permission
- * to bypass it. Note that we have already handled the case where the user
- * is the table owner above.
+ * to bypass it. Note that we have already handled the case where the
+ * user is the table owner above.
*
- * Note that row_security is always considered 'on' when querying
- * through a view or other cases where checkAsUser is true, so skip this
- * if checkAsUser is in use.
+ * Note that row_security is always considered 'on' when querying through
+ * a view or other cases where checkAsUser is true, so skip this if
+ * checkAsUser is in use.
*/
if (!checkAsUser && row_security == ROW_SECURITY_OFF)
{
if (has_bypassrls_privilege(user_id))
/* OK to bypass */
return RLS_NONE_ENV;
+ else if (noError)
+ return RLS_ENABLED;
else
- if (noError)
- return RLS_ENABLED;
- else
- ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("insufficient privilege to bypass row security.")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("insufficient privilege to bypass row security.")));
}
/* RLS should be fully enabled for this relation. */
diff --git a/src/backend/utils/misc/sampling.c b/src/backend/utils/misc/sampling.c
index 69479a5fc8..aaf1d6c410 100644
--- a/src/backend/utils/misc/sampling.c
+++ b/src/backend/utils/misc/sampling.c
@@ -150,7 +150,7 @@ reservoir_get_next_S(ReservoirState rs, double t, int n)
double V,
quot;
- V = sampler_random_fract(rs->randstate); /* Generate V */
+ V = sampler_random_fract(rs->randstate); /* Generate V */
S = 0;
t += 1;
/* Note: "num" in Vitter's code is always equal to t - n */
@@ -276,7 +276,7 @@ anl_init_selection_state(int n)
double
anl_get_next_S(double t, int n, double *stateptr)
{
- double result;
+ double result;
oldrs.W = *stateptr;
result = reservoir_get_next_S(&oldrs, t, n);
diff --git a/src/backend/utils/sort/sortsupport.c b/src/backend/utils/sort/sortsupport.c
index a70966ec99..ffef9658e4 100644
--- a/src/backend/utils/sort/sortsupport.c
+++ b/src/backend/utils/sort/sortsupport.c
@@ -102,8 +102,8 @@ FinishSortSupportFunction(Oid opfamily, Oid opcintype, SortSupport ssup)
if (OidIsValid(sortSupportFunction))
{
/*
- * The sort support function can provide a comparator, but it can
- * also choose not to so (e.g. based on the selected collation).
+ * The sort support function can provide a comparator, but it can also
+ * choose not to so (e.g. based on the selected collation).
*/
OidFunctionCall1(sortSupportFunction, PointerGetDatum(ssup));
}
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 3d5da444a6..435041afa1 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -356,11 +356,12 @@ struct Tuplesortstate
/*
* Additional state for managing "abbreviated key" sortsupport routines
- * (which currently may be used by all cases except the Datum sort case and
- * hash index case). Tracks the intervals at which the optimization's
+ * (which currently may be used by all cases except the Datum sort case
+ * and hash index case). Tracks the intervals at which the optimization's
* effectiveness is tested.
*/
- int64 abbrevNext; /* Tuple # at which to next check applicability */
+ int64 abbrevNext; /* Tuple # at which to next check
+ * applicability */
/*
* These variables are specific to the CLUSTER case; they are set by
@@ -660,9 +661,9 @@ tuplesort_begin_heap(TupleDesc tupDesc,
/*
* The "onlyKey" optimization cannot be used with abbreviated keys, since
- * tie-breaker comparisons may be required. Typically, the optimization is
- * only of value to pass-by-value types anyway, whereas abbreviated keys
- * are typically only of value to pass-by-reference types.
+ * tie-breaker comparisons may be required. Typically, the optimization
+ * is only of value to pass-by-value types anyway, whereas abbreviated
+ * keys are typically only of value to pass-by-reference types.
*/
if (nkeys == 1 && !state->sortKeys->abbrev_converter)
state->onlyKey = state->sortKeys;
@@ -678,9 +679,9 @@ tuplesort_begin_cluster(TupleDesc tupDesc,
int workMem, bool randomAccess)
{
Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
- ScanKey indexScanKey;
+ ScanKey indexScanKey;
MemoryContext oldcontext;
- int i;
+ int i;
Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
@@ -771,9 +772,9 @@ tuplesort_begin_index_btree(Relation heapRel,
int workMem, bool randomAccess)
{
Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
- ScanKey indexScanKey;
+ ScanKey indexScanKey;
MemoryContext oldcontext;
- int i;
+ int i;
oldcontext = MemoryContextSwitchTo(state->sortcontext);
@@ -929,9 +930,9 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
/*
* The "onlyKey" optimization cannot be used with abbreviated keys, since
- * tie-breaker comparisons may be required. Typically, the optimization is
- * only of value to pass-by-value types anyway, whereas abbreviated keys
- * are typically only of value to pass-by-reference types.
+ * tie-breaker comparisons may be required. Typically, the optimization
+ * is only of value to pass-by-value types anyway, whereas abbreviated
+ * keys are typically only of value to pass-by-reference types.
*/
if (!state->sortKeys->abbrev_converter)
state->onlyKey = state->sortKeys;
@@ -1277,7 +1278,7 @@ tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
else
{
/* Abort abbreviation */
- int i;
+ int i;
stup.datum1 = original;
@@ -1285,13 +1286,13 @@ tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
* Set state to be consistent with never trying abbreviation.
*
* Alter datum1 representation in already-copied tuples, so as to
- * ensure a consistent representation (current tuple was just handled).
- * Note that we rely on all tuples copied so far actually being
- * contained within memtuples array.
+ * ensure a consistent representation (current tuple was just
+ * handled). Note that we rely on all tuples copied so far actually
+ * being contained within memtuples array.
*/
for (i = 0; i < state->memtupcount; i++)
{
- SortTuple *mtup = &state->memtuples[i];
+ SortTuple *mtup = &state->memtuples[i];
tuple = mtup->tuple;
mtup->datum1 = index_getattr(tuple,
@@ -1325,8 +1326,8 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
* control, and possibly abbreviated. The copied value is pointed to by
* stup.tuple and is treated as the canonical copy (e.g. to return via
* tuplesort_getdatum or when writing to tape); stup.datum1 gets the
- * abbreviated value if abbreviation is happening, otherwise it's identical
- * to stup.tuple.
+ * abbreviated value if abbreviation is happening, otherwise it's
+ * identical to stup.tuple.
*/
if (isNull || state->datumTypeByVal)
@@ -1337,7 +1338,7 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
}
else
{
- Datum original = datumCopy(val, false, state->datumTypeLen);
+ Datum original = datumCopy(val, false, state->datumTypeLen);
stup.isnull1 = false;
stup.tuple = DatumGetPointer(original);
@@ -1356,7 +1357,7 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
else
{
/* Abort abbreviation */
- int i;
+ int i;
stup.datum1 = original;
@@ -1364,13 +1365,13 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
* Set state to be consistent with never trying abbreviation.
*
* Alter datum1 representation in already-copied tuples, so as to
- * ensure a consistent representation (current tuple was just handled).
- * Note that we rely on all tuples copied so far actually being
- * contained within memtuples array.
+ * ensure a consistent representation (current tuple was just
+ * handled). Note that we rely on all tuples copied so far
+ * actually being contained within memtuples array.
*/
for (i = 0; i < state->memtupcount; i++)
{
- SortTuple *mtup = &state->memtuples[i];
+ SortTuple *mtup = &state->memtuples[i];
mtup->datum1 = PointerGetDatum(mtup->tuple);
}
@@ -1524,8 +1525,8 @@ consider_abort_common(Tuplesortstate *state)
state->abbrevNext *= 2;
/*
- * Check opclass-supplied abbreviation abort routine. It may
- * indicate that abbreviation should not proceed.
+ * Check opclass-supplied abbreviation abort routine. It may indicate
+ * that abbreviation should not proceed.
*/
if (!state->sortKeys->abbrev_abort(state->memtupcount,
state->sortKeys))
@@ -2231,9 +2232,9 @@ mergeruns(Tuplesortstate *state)
{
/*
* If there are multiple runs to be merged, when we go to read back
- * tuples from disk, abbreviated keys will not have been stored, and we
- * don't care to regenerate them. Disable abbreviation from this point
- * on.
+ * tuples from disk, abbreviated keys will not have been stored, and
+ * we don't care to regenerate them. Disable abbreviation from this
+ * point on.
*/
state->sortKeys->abbrev_converter = NULL;
state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
@@ -3121,7 +3122,7 @@ copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
* MinimalTuple using the exported interface for that.
*/
TupleTableSlot *slot = (TupleTableSlot *) tup;
- Datum original;
+ Datum original;
MinimalTuple tuple;
HeapTupleData htup;
@@ -3157,7 +3158,7 @@ copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
else
{
/* Abort abbreviation */
- int i;
+ int i;
stup->datum1 = original;
@@ -3165,18 +3166,18 @@ copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
* Set state to be consistent with never trying abbreviation.
*
* Alter datum1 representation in already-copied tuples, so as to
- * ensure a consistent representation (current tuple was just handled).
- * Note that we rely on all tuples copied so far actually being
- * contained within memtuples array.
+ * ensure a consistent representation (current tuple was just
+ * handled). Note that we rely on all tuples copied so far actually
+ * being contained within memtuples array.
*/
for (i = 0; i < state->memtupcount; i++)
{
- SortTuple *mtup = &state->memtuples[i];
+ SortTuple *mtup = &state->memtuples[i];
htup.t_len = ((MinimalTuple) mtup->tuple)->t_len +
- MINIMAL_TUPLE_OFFSET;
+ MINIMAL_TUPLE_OFFSET;
htup.t_data = (HeapTupleHeader) ((char *) mtup->tuple -
- MINIMAL_TUPLE_OFFSET);
+ MINIMAL_TUPLE_OFFSET);
mtup->datum1 = heap_getattr(&htup,
state->sortKeys[0].ssup_attno,
@@ -3247,7 +3248,7 @@ static int
comparetup_cluster(const SortTuple *a, const SortTuple *b,
Tuplesortstate *state)
{
- SortSupport sortKey = state->sortKeys;
+ SortSupport sortKey = state->sortKeys;
HeapTuple ltup;
HeapTuple rtup;
TupleDesc tupDesc;
@@ -3364,6 +3365,7 @@ copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
tuple = heap_copytuple(tuple);
stup->tuple = (void *) tuple;
USEMEM(state, GetMemoryChunkSpace(tuple));
+
/*
* set up first-column key value, and potentially abbreviate, if it's a
* simple column
@@ -3396,7 +3398,7 @@ copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
else
{
/* Abort abbreviation */
- int i;
+ int i;
stup->datum1 = original;
@@ -3404,17 +3406,17 @@ copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
* Set state to be consistent with never trying abbreviation.
*
* Alter datum1 representation in already-copied tuples, so as to
- * ensure a consistent representation (current tuple was just handled).
- * Note that we rely on all tuples copied so far actually being
- * contained within memtuples array.
+ * ensure a consistent representation (current tuple was just
+ * handled). Note that we rely on all tuples copied so far actually
+ * being contained within memtuples array.
*/
for (i = 0; i < state->memtupcount; i++)
{
- SortTuple *mtup = &state->memtuples[i];
+ SortTuple *mtup = &state->memtuples[i];
tuple = (HeapTuple) mtup->tuple;
mtup->datum1 = heap_getattr(tuple,
- state->indexInfo->ii_KeyAttrNumbers[0],
+ state->indexInfo->ii_KeyAttrNumbers[0],
state->tupDesc,
&stup->isnull1);
}
@@ -3487,10 +3489,10 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b,
{
/*
* This is similar to comparetup_heap(), but expects index tuples. There
- * is also special handling for enforcing uniqueness, and special treatment
- * for equal keys at the end.
+ * is also special handling for enforcing uniqueness, and special
+ * treatment for equal keys at the end.
*/
- SortSupport sortKey = state->sortKeys;
+ SortSupport sortKey = state->sortKeys;
IndexTuple tuple1;
IndexTuple tuple2;
int keysz;
@@ -3582,7 +3584,7 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b,
errmsg("could not create unique index \"%s\"",
RelationGetRelationName(state->indexRel)),
key_desc ? errdetail("Key %s is duplicated.", key_desc) :
- errdetail("Duplicate keys exist."),
+ errdetail("Duplicate keys exist."),
errtableconstraint(state->heapRel,
RelationGetRelationName(state->indexRel))));
}
@@ -3698,7 +3700,7 @@ copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
else
{
/* Abort abbreviation */
- int i;
+ int i;
stup->datum1 = original;
@@ -3706,13 +3708,13 @@ copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
* Set state to be consistent with never trying abbreviation.
*
* Alter datum1 representation in already-copied tuples, so as to
- * ensure a consistent representation (current tuple was just handled).
- * Note that we rely on all tuples copied so far actually being
- * contained within memtuples array.
+ * ensure a consistent representation (current tuple was just
+ * handled). Note that we rely on all tuples copied so far actually
+ * being contained within memtuples array.
*/
for (i = 0; i < state->memtupcount; i++)
{
- SortTuple *mtup = &state->memtuples[i];
+ SortTuple *mtup = &state->memtuples[i];
tuple = (IndexTuple) mtup->tuple;
mtup->datum1 = index_getattr(tuple,
@@ -3770,7 +3772,7 @@ readtup_index(Tuplesortstate *state, SortTuple *stup,
static int
comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
{
- int compare;
+ int compare;
compare = ApplySortComparator(a->datum1, a->isnull1,
b->datum1, b->isnull1,
@@ -3782,7 +3784,7 @@ comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
if (state->sortKeys->abbrev_converter)
compare = ApplySortAbbrevFullComparator(PointerGetDatum(a->tuple), a->isnull1,
- PointerGetDatum(b->tuple), b->isnull1,
+ PointerGetDatum(b->tuple), b->isnull1,
state->sortKeys);
return compare;
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index cc5409b880..bb2f3295a4 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -121,6 +121,7 @@ HeapTupleHeaderGetCmax(HeapTupleHeader tup)
CommandId cid = HeapTupleHeaderGetRawCommandId(tup);
Assert(!(tup->t_infomask & HEAP_MOVED));
+
/*
* Because GetUpdateXid() performs memory allocations if xmax is a
* multixact we can't Assert() if we're inside a critical section. This
@@ -128,7 +129,7 @@ HeapTupleHeaderGetCmax(HeapTupleHeader tup)
* things too much.
*/
Assert(CritSectionCount > 0 ||
- TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(tup)));
+ TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(tup)));
if (tup->t_infomask & HEAP_COMBOCID)
return GetRealCmax(cid);
@@ -317,7 +318,7 @@ SerializeComboCIDState(Size maxsize, char *start_address)
char *endptr;
/* First, we store the number of currently-existing ComboCIDs. */
- * (int *) start_address = usedComboCids;
+ *(int *) start_address = usedComboCids;
/* If maxsize is too small, throw an error. */
endptr = start_address + sizeof(int) +
@@ -347,7 +348,7 @@ RestoreComboCIDState(char *comboCIDstate)
Assert(!comboCids && !comboHash);
/* First, we retrieve the number of ComboCIDs that were serialized. */
- num_elements = * (int *) comboCIDstate;
+ num_elements = *(int *) comboCIDstate;
keydata = (ComboCidKeyData *) (comboCIDstate + sizeof(int));
/* Use GetComboCommandId to restore each ComboCID. */
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index f4bdabfd79..2f0e9cda8c 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -131,7 +131,7 @@ static ActiveSnapshotElt *ActiveSnapshot = NULL;
static int xmin_cmp(const pairingheap_node *a, const pairingheap_node *b,
void *arg);
-static pairingheap RegisteredSnapshots = { &xmin_cmp, NULL, NULL };
+static pairingheap RegisteredSnapshots = {&xmin_cmp, NULL, NULL};
/* first GetTransactionSnapshot call in a transaction? */
bool FirstSnapshotSet = false;
@@ -313,10 +313,10 @@ GetNonHistoricCatalogSnapshot(Oid relid)
{
/*
* If the caller is trying to scan a relation that has no syscache, no
- * catcache invalidations will be sent when it is updated. For a few
- * key relations, snapshot invalidations are sent instead. If we're
- * trying to scan a relation for which neither catcache nor snapshot
- * invalidations are sent, we must refresh the snapshot every time.
+ * catcache invalidations will be sent when it is updated. For a few key
+ * relations, snapshot invalidations are sent instead. If we're trying to
+ * scan a relation for which neither catcache nor snapshot invalidations
+ * are sent, we must refresh the snapshot every time.
*/
if (!CatalogSnapshotStale && !RelationInvalidatesSnapshotsOnly(relid) &&
!RelationHasSysCache(relid))
@@ -587,7 +587,9 @@ PushCopiedSnapshot(Snapshot snapshot)
void
UpdateActiveSnapshotCommandId(void)
{
- CommandId save_curcid, curcid;
+ CommandId save_curcid,
+ curcid;
+
Assert(ActiveSnapshot != NULL);
Assert(ActiveSnapshot->as_snap->active_count == 1);
Assert(ActiveSnapshot->as_snap->regd_count == 0);
@@ -772,7 +774,7 @@ xmin_cmp(const pairingheap_node *a, const pairingheap_node *b, void *arg)
static void
SnapshotResetXmin(void)
{
- Snapshot minSnapshot;
+ Snapshot minSnapshot;
if (ActiveSnapshot != NULL)
return;
@@ -897,7 +899,8 @@ AtEOXact_Snapshot(bool isCommit)
*/
foreach(lc, exportedSnapshots)
{
- Snapshot snap = (Snapshot) lfirst(lc);
+ Snapshot snap = (Snapshot) lfirst(lc);
+
pairingheap_remove(&RegisteredSnapshots, &snap->ph_node);
}
@@ -1472,8 +1475,8 @@ EstimateSnapshotSpace(Snapshot snap)
/*
* SerializeSnapshot
- * Dumps the serialized snapshot (extracted from given snapshot) onto the
- * memory location at start_address.
+ * Dumps the serialized snapshot (extracted from given snapshot) onto the
+ * memory location at start_address.
*/
void
SerializeSnapshot(Snapshot snapshot, char *start_address)
@@ -1494,9 +1497,9 @@ SerializeSnapshot(Snapshot snapshot, char *start_address)
serialized_snapshot->curcid = snapshot->curcid;
/*
- * Ignore the SubXID array if it has overflowed, unless the snapshot
- * was taken during recovey - in that case, top-level XIDs are in subxip
- * as well, and we mustn't lose them.
+ * Ignore the SubXID array if it has overflowed, unless the snapshot was
+ * taken during recovey - in that case, top-level XIDs are in subxip as
+ * well, and we mustn't lose them.
*/
if (serialized_snapshot->suboverflowed && !snapshot->takenDuringRecovery)
serialized_snapshot->subxcnt = 0;
@@ -1514,8 +1517,8 @@ SerializeSnapshot(Snapshot snapshot, char *start_address)
*/
if (snapshot->subxcnt > 0)
{
- Size subxipoff = sizeof(SerializedSnapshotData) +
- snapshot->xcnt * sizeof(TransactionId);
+ Size subxipoff = sizeof(SerializedSnapshotData) +
+ snapshot->xcnt * sizeof(TransactionId);
memcpy((TransactionId *) ((char *) serialized_snapshot + subxipoff),
snapshot->subxip, snapshot->subxcnt * sizeof(TransactionId));
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index b4284d6d94..de7b3fc80c 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -405,6 +405,7 @@ HeapTupleSatisfiesToast(HeapTuple htup, Snapshot snapshot,
}
}
}
+
/*
* An invalid Xmin can be left behind by a speculative insertion that
* is cancelled by super-deleting the tuple. We shouldn't see any of
@@ -550,7 +551,7 @@ HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
if (!TransactionIdIsCurrentTransactionId(xmax))
{
if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple),
- false))
+ false))
return HeapTupleBeingUpdated;
return HeapTupleMayBeUpdated;
}
@@ -820,10 +821,10 @@ HeapTupleSatisfiesDirty(HeapTuple htup, Snapshot snapshot,
else if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmin(tuple)))
{
/*
- * Return the speculative token to caller. Caller can worry
- * about xmax, since it requires a conclusively locked row
- * version, and a concurrent update to this tuple is a conflict
- * of its purposes.
+ * Return the speculative token to caller. Caller can worry about
+ * xmax, since it requires a conclusively locked row version, and
+ * a concurrent update to this tuple is a conflict of its
+ * purposes.
*/
if (HeapTupleHeaderIsSpeculative(tuple))
{
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index 2d0ea7bad8..5dd2887d12 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -236,7 +236,7 @@ usage(void)
printf(_(" -D, --pgdata=DIRECTORY receive base backup into directory\n"));
printf(_(" -F, --format=p|t output format (plain (default), tar)\n"));
printf(_(" -r, --max-rate=RATE maximum transfer rate to transfer data directory\n"
- " (in kB/s, or use suffix \"k\" or \"M\")\n"));
+ " (in kB/s, or use suffix \"k\" or \"M\")\n"));
printf(_(" -R, --write-recovery-conf\n"
" write recovery.conf after backup\n"));
printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n"
@@ -1255,7 +1255,7 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
* failures on related directories.
*/
if (!((pg_str_endswith(filename, "/pg_xlog") ||
- pg_str_endswith(filename, "/archive_status")) &&
+ pg_str_endswith(filename, "/archive_status")) &&
errno == EEXIST))
{
fprintf(stderr,
@@ -1278,12 +1278,12 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
*
* It's most likely a link in pg_tblspc directory, to the
* location of a tablespace. Apply any tablespace mapping
- * given on the command line (--tablespace-mapping).
- * (We blindly apply the mapping without checking that
- * the link really is inside pg_tblspc. We don't expect
- * there to be other symlinks in a data directory, but
- * if there are, you can call it an undocumented feature
- * that you can map them too.)
+ * given on the command line (--tablespace-mapping). (We
+ * blindly apply the mapping without checking that the
+ * link really is inside pg_tblspc. We don't expect there
+ * to be other symlinks in a data directory, but if there
+ * are, you can call it an undocumented feature that you
+ * can map them too.)
*/
filename[strlen(filename) - 1] = '\0'; /* Remove trailing slash */
@@ -1659,7 +1659,7 @@ BaseBackup(void)
fastcheckpoint ? "FAST" : "",
includewal ? "NOWAIT" : "",
maxrate_clause ? maxrate_clause : "",
- format == 't' ? "TABLESPACE_MAP": "");
+ format == 't' ? "TABLESPACE_MAP" : "");
if (PQsendQuery(conn, basebkp) == 0)
{
diff --git a/src/bin/pg_basebackup/pg_receivexlog.c b/src/bin/pg_basebackup/pg_receivexlog.c
index 71fb94578e..5d964e4ee6 100644
--- a/src/bin/pg_basebackup/pg_receivexlog.c
+++ b/src/bin/pg_basebackup/pg_receivexlog.c
@@ -43,7 +43,7 @@ static bool synchronous = false;
static void usage(void);
-static DIR* get_destination_dir(char *dest_folder);
+static DIR *get_destination_dir(char *dest_folder);
static void close_destination_dir(DIR *dest_dir, char *dest_folder);
static XLogRecPtr FindStreamingStart(uint32 *tli);
static void StreamLog(void);
@@ -128,10 +128,10 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
/*
* Get destination directory.
*/
-static DIR*
+static DIR *
get_destination_dir(char *dest_folder)
{
- DIR *dir;
+ DIR *dir;
Assert(dest_folder != NULL);
dir = opendir(dest_folder);
@@ -274,8 +274,10 @@ FindStreamingStart(uint32 *tli)
static void
StreamLog(void)
{
- XLogRecPtr startpos, serverpos;
- TimeLineID starttli, servertli;
+ XLogRecPtr startpos,
+ serverpos;
+ TimeLineID starttli,
+ servertli;
/*
* Connect in replication mode to the server
@@ -513,7 +515,8 @@ main(int argc, char **argv)
*/
if (!do_drop_slot)
{
- DIR *dir = get_destination_dir(basedir);
+ DIR *dir = get_destination_dir(basedir);
+
close_destination_dir(dir, basedir);
}
@@ -538,8 +541,8 @@ main(int argc, char **argv)
disconnect_and_exit(1);
/*
- * Check that there is a database associated with connection, none
- * should be defined in this context.
+ * Check that there is a database associated with connection, none should
+ * be defined in this context.
*/
if (db_name)
{
@@ -577,8 +580,8 @@ main(int argc, char **argv)
}
/*
- * Don't close the connection here so that subsequent StreamLog()
- * can reuse it.
+ * Don't close the connection here so that subsequent StreamLog() can
+ * reuse it.
*/
while (true)
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index 8caedff2b0..3c60626541 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -36,26 +36,26 @@ static bool still_sending = true; /* feedback still needs to be sent? */
static PGresult *HandleCopyStream(PGconn *conn, XLogRecPtr startpos,
uint32 timeline, char *basedir,
stream_stop_callback stream_stop, int standby_message_timeout,
- char *partial_suffix, XLogRecPtr *stoppos,
- bool synchronous, bool mark_done);
-static int CopyStreamPoll(PGconn *conn, long timeout_ms);
-static int CopyStreamReceive(PGconn *conn, long timeout, char **buffer);
+ char *partial_suffix, XLogRecPtr *stoppos,
+ bool synchronous, bool mark_done);
+static int CopyStreamPoll(PGconn *conn, long timeout_ms);
+static int CopyStreamReceive(PGconn *conn, long timeout, char **buffer);
static bool ProcessKeepaliveMsg(PGconn *conn, char *copybuf, int len,
- XLogRecPtr blockpos, int64 *last_status);
+ XLogRecPtr blockpos, int64 *last_status);
static bool ProcessXLogDataMsg(PGconn *conn, char *copybuf, int len,
- XLogRecPtr *blockpos, uint32 timeline,
- char *basedir, stream_stop_callback stream_stop,
- char *partial_suffix, bool mark_done);
+ XLogRecPtr *blockpos, uint32 timeline,
+ char *basedir, stream_stop_callback stream_stop,
+ char *partial_suffix, bool mark_done);
static PGresult *HandleEndOfCopyStream(PGconn *conn, char *copybuf,
- XLogRecPtr blockpos, char *basedir, char *partial_suffix,
- XLogRecPtr *stoppos, bool mark_done);
+ XLogRecPtr blockpos, char *basedir, char *partial_suffix,
+ XLogRecPtr *stoppos, bool mark_done);
static bool CheckCopyStreamStop(PGconn *conn, XLogRecPtr blockpos,
- uint32 timeline, char *basedir,
- stream_stop_callback stream_stop,
- char *partial_suffix, XLogRecPtr *stoppos,
- bool mark_done);
+ uint32 timeline, char *basedir,
+ stream_stop_callback stream_stop,
+ char *partial_suffix, XLogRecPtr *stoppos,
+ bool mark_done);
static long CalculateCopyStreamSleeptime(int64 now, int standby_message_timeout,
- int64 last_status);
+ int64 last_status);
static bool ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos,
uint32 *timeline);
@@ -63,7 +63,7 @@ static bool ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos,
static bool
mark_file_as_archived(const char *basedir, const char *fname)
{
- int fd;
+ int fd;
static char tmppath[MAXPGPATH];
snprintf(tmppath, sizeof(tmppath), "%s/archive_status/%s.done",
@@ -831,15 +831,15 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
* Check if we should continue streaming, or abort at this point.
*/
if (!CheckCopyStreamStop(conn, blockpos, timeline, basedir,
- stream_stop, partial_suffix, stoppos,
- mark_done))
+ stream_stop, partial_suffix, stoppos,
+ mark_done))
goto error;
now = feGetCurrentTimestamp();
/*
- * If synchronous option is true, issue sync command as soon as
- * there are WAL data which has not been flushed yet.
+ * If synchronous option is true, issue sync command as soon as there
+ * are WAL data which has not been flushed yet.
*/
if (synchronous && lastFlushPosition < blockpos && walfile != -1)
{
@@ -886,9 +886,10 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
goto error;
if (r == -2)
{
- PGresult *res = HandleEndOfCopyStream(conn, copybuf, blockpos,
- basedir, partial_suffix,
- stoppos, mark_done);
+ PGresult *res = HandleEndOfCopyStream(conn, copybuf, blockpos,
+ basedir, partial_suffix,
+ stoppos, mark_done);
+
if (res == NULL)
goto error;
else
@@ -910,7 +911,8 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
goto error;
/*
- * Check if we should continue streaming, or abort at this point.
+ * Check if we should continue streaming, or abort at this
+ * point.
*/
if (!CheckCopyStreamStop(conn, blockpos, timeline, basedir,
stream_stop, partial_suffix, stoppos,
@@ -925,8 +927,8 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
}
/*
- * Process the received data, and any subsequent data we
- * can read without blocking.
+ * Process the received data, and any subsequent data we can read
+ * without blocking.
*/
r = CopyStreamReceive(conn, 0, ©buf);
}
@@ -972,7 +974,7 @@ CopyStreamPoll(PGconn *conn, long timeout_ms)
ret = select(PQsocket(conn) + 1, &input_mask, NULL, NULL, timeoutptr);
if (ret == 0 || (ret < 0 && errno == EINTR))
- return 0; /* Got a timeout or signal */
+ return 0; /* Got a timeout or signal */
else if (ret < 0)
{
fprintf(stderr, _("%s: select() failed: %s\n"),
@@ -1009,12 +1011,12 @@ CopyStreamReceive(PGconn *conn, long timeout, char **buffer)
if (rawlen == 0)
{
/*
- * No data available. Wait for some to appear, but not longer than
- * the specified timeout, so that we can ping the server.
+ * No data available. Wait for some to appear, but not longer than the
+ * specified timeout, so that we can ping the server.
*/
if (timeout != 0)
{
- int ret;
+ int ret;
ret = CopyStreamPoll(conn, timeout);
if (ret <= 0)
@@ -1061,13 +1063,12 @@ ProcessKeepaliveMsg(PGconn *conn, char *copybuf, int len,
int64 now;
/*
- * Parse the keepalive message, enclosed in the CopyData message.
- * We just check if the server requested a reply, and ignore the
- * rest.
+ * Parse the keepalive message, enclosed in the CopyData message. We just
+ * check if the server requested a reply, and ignore the rest.
*/
- pos = 1; /* skip msgtype 'k' */
- pos += 8; /* skip walEnd */
- pos += 8; /* skip sendTime */
+ pos = 1; /* skip msgtype 'k' */
+ pos += 8; /* skip walEnd */
+ pos += 8; /* skip sendTime */
if (len < pos + 1)
{
@@ -1084,11 +1085,11 @@ ProcessKeepaliveMsg(PGconn *conn, char *copybuf, int len,
walfile != -1)
{
/*
- * If a valid flush location needs to be reported,
- * flush the current WAL file so that the latest flush
- * location is sent back to the server. This is necessary to
- * see whether the last WAL data has been successfully
- * replicated or not, at the normal shutdown of the server.
+ * If a valid flush location needs to be reported, flush the
+ * current WAL file so that the latest flush location is sent back
+ * to the server. This is necessary to see whether the last WAL
+ * data has been successfully replicated or not, at the normal
+ * shutdown of the server.
*/
if (fsync(walfile) != 0)
{
@@ -1123,21 +1124,21 @@ ProcessXLogDataMsg(PGconn *conn, char *copybuf, int len,
int hdr_len;
/*
- * Once we've decided we don't want to receive any more, just
- * ignore any subsequent XLogData messages.
+ * Once we've decided we don't want to receive any more, just ignore any
+ * subsequent XLogData messages.
*/
if (!(still_sending))
return true;
/*
- * Read the header of the XLogData message, enclosed in the
- * CopyData message. We only need the WAL location field
- * (dataStart), the rest of the header is ignored.
+ * Read the header of the XLogData message, enclosed in the CopyData
+ * message. We only need the WAL location field (dataStart), the rest of
+ * the header is ignored.
*/
- hdr_len = 1; /* msgtype 'w' */
- hdr_len += 8; /* dataStart */
- hdr_len += 8; /* walEnd */
- hdr_len += 8; /* sendTime */
+ hdr_len = 1; /* msgtype 'w' */
+ hdr_len += 8; /* dataStart */
+ hdr_len += 8; /* walEnd */
+ hdr_len += 8; /* sendTime */
if (len < hdr_len)
{
fprintf(stderr, _("%s: streaming header too small: %d\n"),
@@ -1150,8 +1151,8 @@ ProcessXLogDataMsg(PGconn *conn, char *copybuf, int len,
xlogoff = *blockpos % XLOG_SEG_SIZE;
/*
- * Verify that the initial location in the stream matches where we
- * think we are.
+ * Verify that the initial location in the stream matches where we think
+ * we are.
*/
if (walfile == -1)
{
@@ -1208,7 +1209,7 @@ ProcessXLogDataMsg(PGconn *conn, char *copybuf, int len,
bytes_to_write) != bytes_to_write)
{
fprintf(stderr,
- _("%s: could not write %u bytes to WAL file \"%s\": %s\n"),
+ _("%s: could not write %u bytes to WAL file \"%s\": %s\n"),
progname, bytes_to_write, current_walfile_name,
strerror(errno));
return false;
@@ -1252,15 +1253,15 @@ ProcessXLogDataMsg(PGconn *conn, char *copybuf, int len,
*/
static PGresult *
HandleEndOfCopyStream(PGconn *conn, char *copybuf,
- XLogRecPtr blockpos, char *basedir, char *partial_suffix,
+ XLogRecPtr blockpos, char *basedir, char *partial_suffix,
XLogRecPtr *stoppos, bool mark_done)
{
PGresult *res = PQgetResult(conn);
/*
- * The server closed its end of the copy stream. If we haven't
- * closed ours already, we need to do so now, unless the server
- * threw an error, in which case we don't.
+ * The server closed its end of the copy stream. If we haven't closed
+ * ours already, we need to do so now, unless the server threw an error,
+ * in which case we don't.
*/
if (still_sending)
{
diff --git a/src/bin/pg_basebackup/receivelog.h b/src/bin/pg_basebackup/receivelog.h
index a957aea4d9..b38e993c1a 100644
--- a/src/bin/pg_basebackup/receivelog.h
+++ b/src/bin/pg_basebackup/receivelog.h
@@ -34,4 +34,4 @@ extern bool ReceiveXlogStream(PGconn *conn,
bool synchronous,
bool mark_done);
-#endif /* RECEIVELOG_H */
+#endif /* RECEIVELOG_H */
diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c
index de37511ef1..ac84e6d360 100644
--- a/src/bin/pg_basebackup/streamutil.c
+++ b/src/bin/pg_basebackup/streamutil.c
@@ -241,7 +241,8 @@ RunIdentifySystem(PGconn *conn, char **sysid, TimeLineID *starttli,
XLogRecPtr *startpos, char **db_name)
{
PGresult *res;
- uint32 hi, lo;
+ uint32 hi,
+ lo;
/* Check connection existence */
Assert(conn != NULL);
@@ -279,7 +280,7 @@ RunIdentifySystem(PGconn *conn, char **sysid, TimeLineID *starttli,
if (sscanf(PQgetvalue(res, 0, 2), "%X/%X", &hi, &lo) != 2)
{
fprintf(stderr,
- _("%s: could not parse transaction log location \"%s\"\n"),
+ _("%s: could not parse transaction log location \"%s\"\n"),
progname, PQgetvalue(res, 0, 2));
PQclear(res);
@@ -289,7 +290,7 @@ RunIdentifySystem(PGconn *conn, char **sysid, TimeLineID *starttli,
}
/* Get database name, only available in 9.4 and newer versions */
- if (db_name != NULL)
+ if (db_name != NULL)
{
if (PQnfields(res) < 4)
fprintf(stderr,
@@ -297,7 +298,7 @@ RunIdentifySystem(PGconn *conn, char **sysid, TimeLineID *starttli,
progname, PQntuples(res), PQnfields(res), 1, 4);
if (PQgetisnull(res, 0, 3))
- *db_name = NULL;
+ *db_name = NULL;
else
*db_name = pg_strdup(PQgetvalue(res, 0, 3));
}
@@ -358,12 +359,13 @@ CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin,
/* Get LSN start position if necessary */
if (startpos != NULL)
{
- uint32 hi, lo;
+ uint32 hi,
+ lo;
if (sscanf(PQgetvalue(res, 0, 1), "%X/%X", &hi, &lo) != 2)
{
fprintf(stderr,
- _("%s: could not parse transaction log location \"%s\"\n"),
+ _("%s: could not parse transaction log location \"%s\"\n"),
progname, PQgetvalue(res, 0, 1));
destroyPQExpBuffer(query);
diff --git a/src/bin/pg_basebackup/streamutil.h b/src/bin/pg_basebackup/streamutil.h
index 6845662b42..01ab5660a1 100644
--- a/src/bin/pg_basebackup/streamutil.h
+++ b/src/bin/pg_basebackup/streamutil.h
@@ -32,13 +32,13 @@ extern PGconn *GetConnection(void);
/* Replication commands */
extern bool CreateReplicationSlot(PGconn *conn, const char *slot_name,
- const char *plugin, XLogRecPtr *startpos,
- bool is_physical);
+ const char *plugin, XLogRecPtr *startpos,
+ bool is_physical);
extern bool DropReplicationSlot(PGconn *conn, const char *slot_name);
extern bool RunIdentifySystem(PGconn *conn, char **sysid,
- TimeLineID *starttli,
- XLogRecPtr *startpos,
- char **db_name);
+ TimeLineID *starttli,
+ XLogRecPtr *startpos,
+ char **db_name);
extern int64 feGetCurrentTimestamp(void);
extern void feTimestampDifference(int64 start_time, int64 stop_time,
long *secs, int *microsecs);
@@ -48,4 +48,4 @@ extern bool feTimestampDifferenceExceeds(int64 start_time, int64 stop_time,
extern void fe_sendint64(int64 i, char *buf);
extern int64 fe_recvint64(char *buf);
-#endif /* STREAMUTIL_H */
+#endif /* STREAMUTIL_H */
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 0e4bd12aff..3476ea686a 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -49,11 +49,11 @@ command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup", '-Ft' ],
'tar format');
ok(-f "$tempdir/tarbackup/base.tar", 'backup tar was created');
-my $superlongname = "superlongname_" . ("x"x100);
+my $superlongname = "superlongname_" . ("x" x 100);
system_or_bail 'touch', "$tempdir/pgdata/$superlongname";
command_fails([ 'pg_basebackup', '-D', "$tempdir/tarbackup_l1", '-Ft' ],
- 'pg_basebackup tar with long name fails');
+ 'pg_basebackup tar with long name fails');
unlink "$tempdir/pgdata/$superlongname";
# Create a temporary directory in the system location and symlink it
@@ -64,7 +64,8 @@ my $shorter_tempdir = tempdir_short . "/tempdir";
symlink "$tempdir", $shorter_tempdir;
mkdir "$tempdir/tblspc1";
-psql 'postgres', "CREATE TABLESPACE tblspc1 LOCATION '$shorter_tempdir/tblspc1';";
+psql 'postgres',
+ "CREATE TABLESPACE tblspc1 LOCATION '$shorter_tempdir/tblspc1';";
psql 'postgres', "CREATE TABLE test1 (a int) TABLESPACE tblspc1;";
command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup2", '-Ft' ],
'tar format with tablespaces');
@@ -77,14 +78,12 @@ command_fails(
'plain format with tablespaces fails without tablespace mapping');
command_ok(
- [ 'pg_basebackup', '-D',
- "$tempdir/backup1", '-Fp',
+ [ 'pg_basebackup', '-D', "$tempdir/backup1", '-Fp',
"-T$shorter_tempdir/tblspc1=$tempdir/tbackup/tblspc1" ],
'plain format with tablespaces succeeds with tablespace mapping');
ok(-d "$tempdir/tbackup/tblspc1", 'tablespace was relocated');
opendir(my $dh, "$tempdir/pgdata/pg_tblspc") or die;
-ok( ( grep
- {
+ok( ( grep {
-l "$tempdir/backup1/pg_tblspc/$_"
and readlink "$tempdir/backup1/pg_tblspc/$_" eq
"$tempdir/tbackup/tblspc1"
@@ -95,10 +94,10 @@ closedir $dh;
mkdir "$tempdir/tbl=spc2";
psql 'postgres', "DROP TABLE test1;";
psql 'postgres', "DROP TABLESPACE tblspc1;";
-psql 'postgres', "CREATE TABLESPACE tblspc2 LOCATION '$shorter_tempdir/tbl=spc2';";
+psql 'postgres',
+ "CREATE TABLESPACE tblspc2 LOCATION '$shorter_tempdir/tbl=spc2';";
command_ok(
- [ 'pg_basebackup', '-D',
- "$tempdir/backup3", '-Fp',
+ [ 'pg_basebackup', '-D', "$tempdir/backup3", '-Fp',
"-T$shorter_tempdir/tbl\\=spc2=$tempdir/tbackup/tbl\\=spc2" ],
'mapping tablespace with = sign in path');
ok(-d "$tempdir/tbackup/tbl=spc2", 'tablespace with = sign was relocated');
@@ -126,7 +125,8 @@ command_fails(
'-T with invalid format fails');
mkdir "$tempdir/$superlongname";
-psql 'postgres', "CREATE TABLESPACE tblspc3 LOCATION '$tempdir/$superlongname';";
+psql 'postgres',
+ "CREATE TABLESPACE tblspc3 LOCATION '$tempdir/$superlongname';";
command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup_l3", '-Ft' ],
- 'pg_basebackup tar with long symlink target');
+ 'pg_basebackup tar with long symlink target');
psql 'postgres', "DROP TABLESPACE tblspc3;";
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index 6a67cb7fca..74764fabda 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -176,7 +176,7 @@ write_eventlog(int level, const char *line)
if (evtHandle == INVALID_HANDLE_VALUE)
{
evtHandle = RegisterEventSource(NULL,
- event_source ? event_source : DEFAULT_EVENT_SOURCE);
+ event_source ? event_source : DEFAULT_EVENT_SOURCE);
if (evtHandle == NULL)
{
evtHandle = INVALID_HANDLE_VALUE;
@@ -263,7 +263,8 @@ get_pgpid(bool is_status_request)
/*
* The Linux Standard Base Core Specification 3.1 says this should
* return '4, program or service status is unknown'
- * https://refspecs.linuxbase.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
+ * https://refspecs.linuxbase.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-g
+ * eneric/iniscrptact.html
*/
exit(is_status_request ? 4 : 1);
}
@@ -1600,10 +1601,10 @@ pgwin32_ServiceMain(DWORD argc, LPTSTR *argv)
{
/*
* status.dwCheckPoint can be incremented by
- * test_postmaster_connection(true), so it might not
- * start from 0.
+ * test_postmaster_connection(true), so it might not start
+ * from 0.
*/
- int maxShutdownCheckPoint = status.dwCheckPoint + 12;;
+ int maxShutdownCheckPoint = status.dwCheckPoint + 12;;
kill(postmasterPID, SIGINT);
@@ -2215,7 +2216,7 @@ main(int argc, char **argv)
post_opts = pg_strdup(optarg);
else
{
- char *old_post_opts = post_opts;
+ char *old_post_opts = post_opts;
post_opts = psprintf("%s %s", old_post_opts, optarg);
free(old_post_opts);
diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl
index 17309e8fcb..6c9ec5c717 100644
--- a/src/bin/pg_ctl/t/001_start_stop.pl
+++ b/src/bin/pg_ctl/t/001_start_stop.pl
@@ -3,7 +3,7 @@ use warnings;
use TestLib;
use Test::More tests => 17;
-my $tempdir = TestLib::tempdir;
+my $tempdir = TestLib::tempdir;
my $tempdir_short = TestLib::tempdir_short;
program_help_ok('pg_ctl');
@@ -11,7 +11,7 @@ program_version_ok('pg_ctl');
program_options_handling_ok('pg_ctl');
command_exit_is([ 'pg_ctl', 'start', '-D', "$tempdir/nonexistent" ],
- 1, 'pg_ctl start with nonexistent directory');
+ 1, 'pg_ctl start with nonexistent directory');
command_ok([ 'pg_ctl', 'initdb', '-D', "$tempdir/data" ], 'pg_ctl initdb');
command_ok(
diff --git a/src/bin/pg_ctl/t/002_status.pl b/src/bin/pg_ctl/t/002_status.pl
index b8cbbdaed5..055885495a 100644
--- a/src/bin/pg_ctl/t/002_status.pl
+++ b/src/bin/pg_ctl/t/002_status.pl
@@ -3,7 +3,7 @@ use warnings;
use TestLib;
use Test::More tests => 3;
-my $tempdir = TestLib::tempdir;
+my $tempdir = TestLib::tempdir;
my $tempdir_short = TestLib::tempdir_short;
command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/nonexistent" ],
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index d39abf9242..687cbaaf7c 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -126,8 +126,8 @@ static const CatalogId nilCatalogId = {0, 0};
static void help(const char *progname);
static void setup_connection(Archive *AH, DumpOptions *dopt,
- const char *dumpencoding, const char *dumpsnapshot,
- char *use_role);
+ const char *dumpencoding, const char *dumpsnapshot,
+ char *use_role);
static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
static void expand_schema_name_patterns(Archive *fout,
SimpleStringList *patterns,
@@ -671,7 +671,7 @@ main(int argc, char **argv)
/* check the version when a snapshot is explicitly specified by user */
if (dumpsnapshot && fout->remoteVersion < 90200)
exit_horribly(NULL,
- "Exported snapshots are not supported by this server version.\n");
+ "Exported snapshots are not supported by this server version.\n");
/* Find the last built-in OID, if needed */
if (fout->remoteVersion < 70300)
@@ -1052,8 +1052,8 @@ setup_connection(Archive *AH, DumpOptions *dopt, const char *dumpencoding,
"SET TRANSACTION ISOLATION LEVEL SERIALIZABLE");
/*
- * define an export snapshot, either chosen by user or needed for
- * parallel dump.
+ * define an export snapshot, either chosen by user or needed for parallel
+ * dump.
*/
if (dumpsnapshot)
AH->sync_snapshot_id = strdup(dumpsnapshot);
@@ -1061,6 +1061,7 @@ setup_connection(Archive *AH, DumpOptions *dopt, const char *dumpencoding,
if (AH->sync_snapshot_id)
{
PQExpBuffer query = createPQExpBuffer();
+
appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
ExecuteSqlStatement(AH, query->data);
@@ -2841,8 +2842,8 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
/*
* Get row security enabled information for the table. We represent
- * RLS enabled on a table by creating PolicyInfo object with an
- * empty policy.
+ * RLS enabled on a table by creating PolicyInfo object with an empty
+ * policy.
*/
if (tbinfo->rowsec)
{
@@ -2882,8 +2883,8 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
"SELECT oid, tableoid, pol.polname, pol.polcmd, "
"CASE WHEN pol.polroles = '{0}' THEN 'PUBLIC' ELSE "
" pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
- "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
- "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
+ "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
+ "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
"FROM pg_catalog.pg_policy pol "
"WHERE polrelid = '%u'",
tbinfo->dobj.catId.oid);
@@ -2895,8 +2896,8 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
{
/*
* No explicit policies to handle (only the default-deny policy,
- * which is handled as part of the table definition). Clean up and
- * return.
+ * which is handled as part of the table definition). Clean up
+ * and return.
*/
PQclear(res);
continue;
@@ -2959,9 +2960,9 @@ dumpPolicy(Archive *fout, DumpOptions *dopt, PolicyInfo *polinfo)
return;
/*
- * If polname is NULL, then this record is just indicating that ROW
- * LEVEL SECURITY is enabled for the table. Dump as ALTER TABLE
- * ENABLE ROW LEVEL SECURITY.
+ * If polname is NULL, then this record is just indicating that ROW LEVEL
+ * SECURITY is enabled for the table. Dump as ALTER TABLE ENABLE
+ * ROW LEVEL SECURITY.
*/
if (polinfo->polname == NULL)
{
@@ -3046,7 +3047,7 @@ binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
appendPQExpBuffer(upgrade_buffer,
- "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
+ "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
pg_type_oid);
/* we only support old >= 8.3 for binary upgrades */
@@ -6597,7 +6598,7 @@ getTransforms(Archive *fout, int *numTransforms)
int ntups;
int i;
PQExpBuffer query;
- TransformInfo *transforminfo;
+ TransformInfo *transforminfo;
int i_tableoid;
int i_oid;
int i_trftype;
@@ -8462,7 +8463,7 @@ dumpExtension(Archive *fout, DumpOptions *dopt, ExtensionInfo *extinfo)
appendPQExpBuffer(q, "DROP EXTENSION IF EXISTS %s;\n", qextname);
appendPQExpBufferStr(q,
- "SELECT pg_catalog.binary_upgrade_create_empty_extension(");
+ "SELECT pg_catalog.binary_upgrade_create_empty_extension(");
appendStringLiteralAH(q, extinfo->dobj.name, fout);
appendPQExpBufferStr(q, ", ");
appendStringLiteralAH(q, extinfo->namespace, fout);
@@ -9367,7 +9368,7 @@ dumpDomain(Archive *fout, DumpOptions *dopt, TypeInfo *tyinfo)
for (i = 0; i < tyinfo->nDomChecks; i++)
{
ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
- PQExpBuffer labelq = createPQExpBuffer();
+ PQExpBuffer labelq = createPQExpBuffer();
appendPQExpBuffer(labelq, "CONSTRAINT %s ",
fmtId(domcheck->dobj.name));
@@ -10451,8 +10452,8 @@ dumpFunc(Archive *fout, DumpOptions *dopt, FuncInfo *finfo)
if (protrftypes != NULL && strcmp(protrftypes, "") != 0)
{
- Oid *typeids = palloc(FUNC_MAX_ARGS * sizeof(Oid));
- int i;
+ Oid *typeids = palloc(FUNC_MAX_ARGS * sizeof(Oid));
+ int i;
appendPQExpBufferStr(q, " TRANSFORM ");
parseOidArray(protrftypes, typeids, FUNC_MAX_ARGS);
@@ -10461,7 +10462,7 @@ dumpFunc(Archive *fout, DumpOptions *dopt, FuncInfo *finfo)
if (i != 0)
appendPQExpBufferStr(q, ", ");
appendPQExpBuffer(q, "FOR TYPE %s",
- getFormattedTypeName(fout, typeids[i], zeroAsNone));
+ getFormattedTypeName(fout, typeids[i], zeroAsNone));
}
}
@@ -10729,11 +10730,11 @@ dumpTransform(Archive *fout, DumpOptions *dopt, TransformInfo *transform)
lanname = get_language_name(fout, transform->trflang);
appendPQExpBuffer(delqry, "DROP TRANSFORM FOR %s LANGUAGE %s;\n",
- getFormattedTypeName(fout, transform->trftype, zeroAsNone),
+ getFormattedTypeName(fout, transform->trftype, zeroAsNone),
lanname);
appendPQExpBuffer(defqry, "CREATE TRANSFORM FOR %s LANGUAGE %s (",
- getFormattedTypeName(fout, transform->trftype, zeroAsNone),
+ getFormattedTypeName(fout, transform->trftype, zeroAsNone),
lanname);
if (!transform->trffromsql && !transform->trftosql)
@@ -10747,11 +10748,10 @@ dumpTransform(Archive *fout, DumpOptions *dopt, TransformInfo *transform)
/*
* Always qualify the function name, in case it is not in
- * pg_catalog schema (format_function_signature won't qualify
- * it).
+ * pg_catalog schema (format_function_signature won't qualify it).
*/
appendPQExpBuffer(defqry, "FROM SQL WITH FUNCTION %s.%s",
- fmtId(fromsqlFuncInfo->dobj.namespace->dobj.name), fsig);
+ fmtId(fromsqlFuncInfo->dobj.namespace->dobj.name), fsig);
free(fsig);
}
else
@@ -10769,11 +10769,10 @@ dumpTransform(Archive *fout, DumpOptions *dopt, TransformInfo *transform)
/*
* Always qualify the function name, in case it is not in
- * pg_catalog schema (format_function_signature won't qualify
- * it).
+ * pg_catalog schema (format_function_signature won't qualify it).
*/
appendPQExpBuffer(defqry, "TO SQL WITH FUNCTION %s.%s",
- fmtId(tosqlFuncInfo->dobj.namespace->dobj.name), fsig);
+ fmtId(tosqlFuncInfo->dobj.namespace->dobj.name), fsig);
free(fsig);
}
else
@@ -10783,7 +10782,7 @@ dumpTransform(Archive *fout, DumpOptions *dopt, TransformInfo *transform)
appendPQExpBuffer(defqry, ");\n");
appendPQExpBuffer(labelq, "TRANSFORM FOR %s LANGUAGE %s",
- getFormattedTypeName(fout, transform->trftype, zeroAsNone),
+ getFormattedTypeName(fout, transform->trftype, zeroAsNone),
lanname);
if (dopt->binary_upgrade)
@@ -14012,9 +14011,9 @@ dumpTableSchema(Archive *fout, DumpOptions *dopt, TableInfo *tbinfo)
* here, also updating their attlen/attalign values so that the
* dropped column can be skipped properly. (We do not bother with
* restoring the original attbyval setting.) Also, inheritance
- * relationships are set up by doing ALTER TABLE INHERIT rather than using
- * an INHERITS clause --- the latter would possibly mess up the column
- * order. That also means we have to take care about setting
+ * relationships are set up by doing ALTER TABLE INHERIT rather than
+ * using an INHERITS clause --- the latter would possibly mess up the
+ * column order. That also means we have to take care about setting
* attislocal correctly, plus fix up any inherited CHECK constraints.
* Analogously, we set up typed tables using ALTER TABLE / OF here.
*/
@@ -15473,28 +15472,28 @@ dumpRule(Archive *fout, DumpOptions *dopt, RuleInfo *rinfo)
*
* 1. Identify objects which are members of extensions
*
- * Generally speaking, this is to mark them as *not* being dumped, as most
- * extension objects are created by the single CREATE EXTENSION command.
- * The one exception is binary upgrades with pg_upgrade will still dump the
- * non-table objects.
+ * Generally speaking, this is to mark them as *not* being dumped, as most
+ * extension objects are created by the single CREATE EXTENSION command.
+ * The one exception is binary upgrades with pg_upgrade will still dump the
+ * non-table objects.
*
* 2. Identify and create dump records for extension configuration tables.
*
- * Extensions can mark tables as "configuration", which means that the user
- * is able and expected to modify those tables after the extension has been
- * loaded. For these tables, we dump out only the data- the structure is
- * expected to be handled at CREATE EXTENSION time, including any indexes or
- * foreign keys, which brings us to-
+ * Extensions can mark tables as "configuration", which means that the user
+ * is able and expected to modify those tables after the extension has been
+ * loaded. For these tables, we dump out only the data- the structure is
+ * expected to be handled at CREATE EXTENSION time, including any indexes or
+ * foreign keys, which brings us to-
*
* 3. Record FK dependencies between configuration tables.
*
- * Due to the FKs being created at CREATE EXTENSION time and therefore before
- * the data is loaded, we have to work out what the best order for reloading
- * the data is, to avoid FK violations when the tables are restored. This is
- * not perfect- we can't handle circular dependencies and if any exist they
- * will cause an invalid dump to be produced (though at least all of the data
- * is included for a user to manually restore). This is currently documented
- * but perhaps we can provide a better solution in the future.
+ * Due to the FKs being created at CREATE EXTENSION time and therefore before
+ * the data is loaded, we have to work out what the best order for reloading
+ * the data is, to avoid FK violations when the tables are restored. This is
+ * not perfect- we can't handle circular dependencies and if any exist they
+ * will cause an invalid dump to be produced (though at least all of the data
+ * is included for a user to manually restore). This is currently documented
+ * but perhaps we can provide a better solution in the future.
*/
void
getExtensionMembership(Archive *fout, DumpOptions *dopt, ExtensionInfo extinfo[],
@@ -15691,21 +15690,20 @@ getExtensionMembership(Archive *fout, DumpOptions *dopt, ExtensionInfo extinfo[]
}
/*
- * Now that all the TableInfoData objects have been created for all
- * the extensions, check their FK dependencies and register them to
- * try and dump the data out in an order which they can be restored
- * in.
+ * Now that all the TableInfoData objects have been created for all the
+ * extensions, check their FK dependencies and register them to try and
+ * dump the data out in an order which they can be restored in.
*
* Note that this is not a problem for user tables as their FKs are
* recreated after the data has been loaded.
*/
printfPQExpBuffer(query,
- "SELECT conrelid, confrelid "
- "FROM pg_constraint "
- "JOIN pg_depend ON (objid = confrelid) "
- "WHERE contype = 'f' "
- "AND refclassid = 'pg_extension'::regclass "
- "AND classid = 'pg_class'::regclass;");
+ "SELECT conrelid, confrelid "
+ "FROM pg_constraint "
+ "JOIN pg_depend ON (objid = confrelid) "
+ "WHERE contype = 'f' "
+ "AND refclassid = 'pg_extension'::regclass "
+ "AND classid = 'pg_class'::regclass;");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
ntups = PQntuples(res);
@@ -15716,8 +15714,10 @@ getExtensionMembership(Archive *fout, DumpOptions *dopt, ExtensionInfo extinfo[]
/* Now get the dependencies and register them */
for (i = 0; i < ntups; i++)
{
- Oid conrelid, confrelid;
- TableInfo *reftable, *contable;
+ Oid conrelid,
+ confrelid;
+ TableInfo *reftable,
+ *contable;
conrelid = atooid(PQgetvalue(res, i, i_conrelid));
confrelid = atooid(PQgetvalue(res, i, i_confrelid));
@@ -15731,8 +15731,8 @@ getExtensionMembership(Archive *fout, DumpOptions *dopt, ExtensionInfo extinfo[]
continue;
/*
- * Make referencing TABLE_DATA object depend on the
- * referenced table's TABLE_DATA object.
+ * Make referencing TABLE_DATA object depend on the referenced table's
+ * TABLE_DATA object.
*/
addObjectDependency(&contable->dataObj->dobj,
reftable->dataObj->dobj.dumpId);
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 4c796ad6a7..009dba5c9d 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -471,7 +471,7 @@ typedef struct _policyInfo
{
DumpableObject dobj;
TableInfo *poltable;
- char *polname; /* null indicates RLS is enabled on rel */
+ char *polname; /* null indicates RLS is enabled on rel */
char *polcmd;
char *polroles;
char *polqual;
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 2c72e19f2d..c6b9326cb1 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -1422,7 +1422,7 @@ dumpCreateDB(PGconn *conn)
{
appendPQExpBufferStr(buf, "-- For binary upgrade, set datfrozenxid and datminmxid.\n");
appendPQExpBuffer(buf, "UPDATE pg_catalog.pg_database "
- "SET datfrozenxid = '%u', datminmxid = '%u' "
+ "SET datfrozenxid = '%u', datminmxid = '%u' "
"WHERE datname = ",
dbfrozenxid, dbminmxid);
appendStringLiteralConn(buf, dbname, conn);
diff --git a/src/bin/pg_resetxlog/pg_resetxlog.c b/src/bin/pg_resetxlog/pg_resetxlog.c
index 393d580154..6ffe795348 100644
--- a/src/bin/pg_resetxlog/pg_resetxlog.c
+++ b/src/bin/pg_resetxlog/pg_resetxlog.c
@@ -914,9 +914,9 @@ FindEndOfXLOG(void)
XLogSegNo segno;
/*
- * Note: We don't use XLogFromFileName here, because we want
- * to use the segment size from the control file, not the size
- * the pg_resetxlog binary was compiled with
+ * Note: We don't use XLogFromFileName here, because we want to
+ * use the segment size from the control file, not the size the
+ * pg_resetxlog binary was compiled with
*/
sscanf(xlde->d_name, "%08X%08X%08X", &tli, &log, &seg);
segno = ((uint64) log) * segs_per_xlogid + seg;
diff --git a/src/bin/pg_rewind/RewindTest.pm b/src/bin/pg_rewind/RewindTest.pm
index 6ea2f871aa..5219ec967a 100644
--- a/src/bin/pg_rewind/RewindTest.pm
+++ b/src/bin/pg_rewind/RewindTest.pm
@@ -67,8 +67,8 @@ our @EXPORT = qw(
# for debugging purposes,
my $testroot = tempdir;
-our $test_master_datadir="$testroot/data_master";
-our $test_standby_datadir="$testroot/data_standby";
+our $test_master_datadir = "$testroot/data_master";
+our $test_standby_datadir = "$testroot/data_standby";
mkdir $testroot;
@@ -76,14 +76,14 @@ mkdir $testroot;
mkdir "regress_log";
# Define non-conflicting ports for both nodes.
-my $port_master=$ENV{PGPORT};
-my $port_standby=$port_master + 1;
+my $port_master = $ENV{PGPORT};
+my $port_standby = $port_master + 1;
my $log_path;
my $tempdir_short;
-my $connstr_master="port=$port_master";
-my $connstr_standby="port=$port_standby";
+my $connstr_master = "port=$port_master";
+my $connstr_standby = "port=$port_standby";
$ENV{PGDATABASE} = "postgres";
@@ -109,19 +109,25 @@ sub check_query
my ($stdout, $stderr);
# we want just the output, no formatting
- my $result = run ['psql', '-q', '-A', '-t', '--no-psqlrc',
- '-d', $connstr_master,
- '-c' , $query],
- '>', \$stdout, '2>', \$stderr;
+ my $result = run [
+ 'psql', '-q', '-A', '-t', '--no-psqlrc', '-d',
+ $connstr_master, '-c', $query ],
+ '>', \$stdout, '2>', \$stderr;
+
# We don't use ok() for the exit code and stderr, because we want this
# check to be just a single test.
- if (!$result) {
- fail ("$test_name: psql exit code");
- } elsif ($stderr ne '') {
+ if (!$result)
+ {
+ fail("$test_name: psql exit code");
+ }
+ elsif ($stderr ne '')
+ {
diag $stderr;
- fail ("$test_name: psql no stderr");
- } else {
- is ($stdout, $expected_stdout, "$test_name: query result matches");
+ fail("$test_name: psql no stderr");
+ }
+ else
+ {
+ is($stdout, $expected_stdout, "$test_name: query result matches");
}
}
@@ -131,12 +137,12 @@ sub poll_query_until
my ($query, $connstr) = @_;
my $max_attempts = 30;
- my $attempts = 0;
+ my $attempts = 0;
my ($stdout, $stderr);
while ($attempts < $max_attempts)
{
- my $cmd = ['psql', '-At', '-c', "$query", '-d', "$connstr" ];
+ my $cmd = [ 'psql', '-At', '-c', "$query", '-d', "$connstr" ];
my $result = run $cmd, '>', \$stdout, '2>', \$stderr;
chomp($stdout);
@@ -158,7 +164,7 @@ sub poll_query_until
sub append_to_file
{
- my($filename, $str) = @_;
+ my ($filename, $str) = @_;
open my $fh, ">>", $filename or die "could not open file $filename";
print $fh $str;
@@ -167,10 +173,10 @@ sub append_to_file
sub init_rewind_test
{
- my $testname = shift;
+ my $testname = shift;
my $test_mode = shift;
- $log_path="regress_log/pg_rewind_log_${testname}_${test_mode}";
+ $log_path = "regress_log/pg_rewind_log_${testname}_${test_mode}";
remove_tree $log_path;
}
@@ -184,7 +190,8 @@ sub setup_cluster
standard_initdb($test_master_datadir);
# Custom parameters for master's postgresql.conf
- append_to_file("$test_master_datadir/postgresql.conf", qq(
+ append_to_file(
+ "$test_master_datadir/postgresql.conf", qq(
wal_level = hot_standby
max_wal_senders = 2
wal_keep_segments = 20
@@ -197,38 +204,47 @@ max_connections = 10
));
# Accept replication connections on master
- append_to_file("$test_master_datadir/pg_hba.conf", qq(
+ append_to_file(
+ "$test_master_datadir/pg_hba.conf", qq(
local replication all trust
));
- system_or_bail("pg_ctl -w -D $test_master_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_master\" start >>$log_path 2>&1");
+ system_or_bail(
+"pg_ctl -w -D $test_master_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_master\" start >>$log_path 2>&1"
+ );
#### Now run the test-specific parts to initialize the master before setting
# up standby
- $ENV{PGHOST} = $tempdir_short;
+ $ENV{PGHOST} = $tempdir_short;
}
sub create_standby
{
+
# Set up standby with necessary parameter
remove_tree $test_standby_datadir;
# Base backup is taken with xlog files included
- system_or_bail("pg_basebackup -D $test_standby_datadir -p $port_master -x >>$log_path 2>&1");
- append_to_file("$test_standby_datadir/recovery.conf", qq(
+ system_or_bail(
+"pg_basebackup -D $test_standby_datadir -p $port_master -x >>$log_path 2>&1");
+ append_to_file(
+ "$test_standby_datadir/recovery.conf", qq(
primary_conninfo='$connstr_master application_name=rewind_standby'
standby_mode=on
recovery_target_timeline='latest'
));
# Start standby
- system_or_bail("pg_ctl -w -D $test_standby_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_standby\" start >>$log_path 2>&1");
+ system_or_bail(
+"pg_ctl -w -D $test_standby_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_standby\" start >>$log_path 2>&1"
+ );
# Wait until the standby has caught up with the primary, by polling
# pg_stat_replication.
- my $caughtup_query = "SELECT pg_current_xlog_location() = replay_location FROM pg_stat_replication WHERE application_name = 'rewind_standby';";
+ my $caughtup_query =
+"SELECT pg_current_xlog_location() = replay_location FROM pg_stat_replication WHERE application_name = 'rewind_standby';";
poll_query_until($caughtup_query, $connstr_master)
- or die "Timed out while waiting for standby to catch up";
+ or die "Timed out while waiting for standby to catch up";
}
sub promote_standby
@@ -239,9 +255,10 @@ sub promote_standby
# Now promote slave and insert some new data on master, this will put
# the master out-of-sync with the standby. Wait until the standby is
# out of recovery mode, and is ready to accept read-write connections.
- system_or_bail("pg_ctl -w -D $test_standby_datadir promote >>$log_path 2>&1");
+ system_or_bail(
+ "pg_ctl -w -D $test_standby_datadir promote >>$log_path 2>&1");
poll_query_until("SELECT NOT pg_is_in_recovery()", $connstr_standby)
- or die "Timed out while waiting for promotion of standby";
+ or die "Timed out while waiting for promotion of standby";
# Force a checkpoint after the promotion. pg_rewind looks at the control
# file todetermine what timeline the server is on, and that isn't updated
@@ -257,7 +274,8 @@ sub run_pg_rewind
my $test_mode = shift;
# Stop the master and be ready to perform the rewind
- system_or_bail("pg_ctl -w -D $test_master_datadir stop -m fast >>$log_path 2>&1");
+ system_or_bail(
+ "pg_ctl -w -D $test_master_datadir stop -m fast >>$log_path 2>&1");
# At this point, the rewind processing is ready to run.
# We now have a very simple scenario with a few diverged WAL record.
@@ -266,47 +284,67 @@ sub run_pg_rewind
# Keep a temporary postgresql.conf for master node or it would be
# overwritten during the rewind.
- copy("$test_master_datadir/postgresql.conf", "$testroot/master-postgresql.conf.tmp");
+ copy(
+ "$test_master_datadir/postgresql.conf",
+ "$testroot/master-postgresql.conf.tmp");
+
# Now run pg_rewind
if ($test_mode eq "local")
{
+
# Do rewind using a local pgdata as source
# Stop the master and be ready to perform the rewind
- system_or_bail("pg_ctl -w -D $test_standby_datadir stop -m fast >>$log_path 2>&1");
- my $result =
- run(['pg_rewind',
- "--debug",
- "--source-pgdata=$test_standby_datadir",
- "--target-pgdata=$test_master_datadir"],
- '>>', $log_path, '2>&1');
- ok ($result, 'pg_rewind local');
+ system_or_bail(
+ "pg_ctl -w -D $test_standby_datadir stop -m fast >>$log_path 2>&1"
+ );
+ my $result = run(
+ [ 'pg_rewind',
+ "--debug",
+ "--source-pgdata=$test_standby_datadir",
+ "--target-pgdata=$test_master_datadir" ],
+ '>>',
+ $log_path,
+ '2>&1');
+ ok($result, 'pg_rewind local');
}
elsif ($test_mode eq "remote")
{
+
# Do rewind using a remote connection as source
- my $result =
- run(['pg_rewind',
- "--source-server", "port=$port_standby dbname=postgres",
- "--target-pgdata=$test_master_datadir"],
- '>>', $log_path, '2>&1');
- ok ($result, 'pg_rewind remote');
- } else {
+ my $result = run(
+ [ 'pg_rewind',
+ "--source-server",
+ "port=$port_standby dbname=postgres",
+ "--target-pgdata=$test_master_datadir" ],
+ '>>',
+ $log_path,
+ '2>&1');
+ ok($result, 'pg_rewind remote');
+ }
+ else
+ {
+
# Cannot come here normally
die("Incorrect test mode specified");
}
# Now move back postgresql.conf with old settings
- move("$testroot/master-postgresql.conf.tmp", "$test_master_datadir/postgresql.conf");
+ move(
+ "$testroot/master-postgresql.conf.tmp",
+ "$test_master_datadir/postgresql.conf");
# Plug-in rewound node to the now-promoted standby node
- append_to_file("$test_master_datadir/recovery.conf", qq(
+ append_to_file(
+ "$test_master_datadir/recovery.conf", qq(
primary_conninfo='port=$port_standby'
standby_mode=on
recovery_target_timeline='latest'
));
# Restart the master to check that rewind went correctly
- system_or_bail("pg_ctl -w -D $test_master_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_master\" start >>$log_path 2>&1");
+ system_or_bail(
+"pg_ctl -w -D $test_master_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_master\" start >>$log_path 2>&1"
+ );
#### Now run the test-specific parts to check the result
}
@@ -316,11 +354,13 @@ sub clean_rewind_test
{
if ($test_master_datadir)
{
- system "pg_ctl -D $test_master_datadir -s -m immediate stop 2> /dev/null";
+ system
+ "pg_ctl -D $test_master_datadir -s -m immediate stop 2> /dev/null";
}
if ($test_standby_datadir)
{
- system "pg_ctl -D $test_standby_datadir -s -m immediate stop 2> /dev/null";
+ system
+ "pg_ctl -D $test_standby_datadir -s -m immediate stop 2> /dev/null";
}
}
diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c
index 1a56866fd4..cb2bf4d1a0 100644
--- a/src/bin/pg_rewind/filemap.c
+++ b/src/bin/pg_rewind/filemap.c
@@ -185,13 +185,13 @@ process_source_file(const char *path, file_type_t type, size_t newsize,
*
* If it's smaller in the target, it means that it has been
* truncated in the target, or enlarged in the source, or
- * both. If it was truncated in the target, we need to copy the
- * missing tail from the source system. If it was enlarged in
- * the source system, there will be WAL records in the source
- * system for the new blocks, so we wouldn't need to copy them
- * here. But we don't know which scenario we're dealing with,
- * and there's no harm in copying the missing blocks now, so
- * do it now.
+ * both. If it was truncated in the target, we need to copy
+ * the missing tail from the source system. If it was enlarged
+ * in the source system, there will be WAL records in the
+ * source system for the new blocks, so we wouldn't need to
+ * copy them here. But we don't know which scenario we're
+ * dealing with, and there's no harm in copying the missing
+ * blocks now, so do it now.
*
* If it's the same size, do nothing here. Any blocks modified
* in the target will be copied based on parsing the target
@@ -370,6 +370,7 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno)
break;
case FILE_ACTION_COPY_TAIL:
+
/*
* skip the modified block if it is part of the "tail" that
* we're copying anyway.
@@ -391,8 +392,8 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno)
/*
* If we don't have any record of this file in the file map, it means
* that it's a relation that doesn't exist in the source system, and
- * it was subsequently removed in the target system, too. We can safely
- * ignore it.
+ * it was subsequently removed in the target system, too. We can
+ * safely ignore it.
*/
}
}
diff --git a/src/bin/pg_rewind/filemap.h b/src/bin/pg_rewind/filemap.h
index 73113ecdeb..9943ec5f26 100644
--- a/src/bin/pg_rewind/filemap.h
+++ b/src/bin/pg_rewind/filemap.h
@@ -23,13 +23,13 @@
*/
typedef enum
{
- FILE_ACTION_CREATE, /* create local directory or symbolic link */
- FILE_ACTION_COPY, /* copy whole file, overwriting if exists */
- FILE_ACTION_COPY_TAIL, /* copy tail from 'oldsize' to 'newsize' */
- FILE_ACTION_NONE, /* no action (we might still copy modified blocks
- * based on the parsed WAL) */
- FILE_ACTION_TRUNCATE, /* truncate local file to 'newsize' bytes */
- FILE_ACTION_REMOVE /* remove local file / directory / symlink */
+ FILE_ACTION_CREATE, /* create local directory or symbolic link */
+ FILE_ACTION_COPY, /* copy whole file, overwriting if exists */
+ FILE_ACTION_COPY_TAIL, /* copy tail from 'oldsize' to 'newsize' */
+ FILE_ACTION_NONE, /* no action (we might still copy modified
+ * blocks based on the parsed WAL) */
+ FILE_ACTION_TRUNCATE, /* truncate local file to 'newsize' bytes */
+ FILE_ACTION_REMOVE /* remove local file / directory / symlink */
} file_action_t;
typedef enum
@@ -51,10 +51,10 @@ typedef struct file_entry_t
size_t newsize;
bool isrelfile; /* is it a relation data file? */
- datapagemap_t pagemap;
+ datapagemap_t pagemap;
/* for a symlink */
- char *link_target;
+ char *link_target;
struct file_entry_t *next;
} file_entry_t;
@@ -72,16 +72,16 @@ typedef struct filemap_t
/*
* After processing all the remote files, the entries in the linked list
* are moved to this array. After processing local files, too, all the
- * local entries are added to the array by filemap_finalize, and sorted
- * in the final order. After filemap_finalize, all the entries are in
- * the array, and the linked list is empty.
+ * local entries are added to the array by filemap_finalize, and sorted in
+ * the final order. After filemap_finalize, all the entries are in the
+ * array, and the linked list is empty.
*/
file_entry_t **array;
int narray; /* current length of array */
/*
- * Summary information. total_size is the total size of the source cluster,
- * and fetch_size is the number of bytes that needs to be copied.
+ * Summary information. total_size is the total size of the source
+ * cluster, and fetch_size is the number of bytes that needs to be copied.
*/
uint64 total_size;
uint64 fetch_size;
diff --git a/src/bin/pg_rewind/parsexlog.c b/src/bin/pg_rewind/parsexlog.c
index 9c112701e5..fca771d8cb 100644
--- a/src/bin/pg_rewind/parsexlog.c
+++ b/src/bin/pg_rewind/parsexlog.c
@@ -236,7 +236,7 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
{
XLogPageReadPrivate *private = (XLogPageReadPrivate *) xlogreader->private_data;
uint32 targetPageOff;
- XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
+ XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
XLByteToSeg(targetPagePtr, targetSegNo);
targetPageOff = targetPagePtr % XLogSegSize;
@@ -315,10 +315,10 @@ extractPageInfo(XLogReaderState *record)
/*
* New databases can be safely ignored. It won't be present in the
* source system, so it will be deleted. There's one corner-case,
- * though: if a new, different, database is also created in the
- * source system, we'll see that the files already exist and not copy
- * them. That's OK, though; WAL replay of creating the new database,
- * from the source systems's WAL, will re-copy the new database,
+ * though: if a new, different, database is also created in the source
+ * system, we'll see that the files already exist and not copy them.
+ * That's OK, though; WAL replay of creating the new database, from
+ * the source systems's WAL, will re-copy the new database,
* overwriting the database created in the target system.
*/
}
diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c
index d3ae7674d7..8088be4fab 100644
--- a/src/bin/pg_rewind/pg_rewind.c
+++ b/src/bin/pg_rewind/pg_rewind.c
@@ -490,15 +490,15 @@ createBackupLabel(XLogRecPtr startpoint, TimeLineID starttli, XLogRecPtr checkpo
"BACKUP METHOD: pg_rewind\n"
"BACKUP FROM: standby\n"
"START TIME: %s\n",
- /* omit LABEL: line */
- (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename,
+ /* omit LABEL: line */
+ (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename,
(uint32) (checkpointloc >> 32), (uint32) checkpointloc,
strfbuf);
if (len >= sizeof(buf))
- pg_fatal("backup label buffer too small\n"); /* shouldn't happen */
+ pg_fatal("backup label buffer too small\n"); /* shouldn't happen */
/* TODO: move old file out of the way, if any. */
- open_target_file("backup_label", true); /* BACKUP_LABEL_FILE */
+ open_target_file("backup_label", true); /* BACKUP_LABEL_FILE */
write_target_range(buf, 0, len);
}
diff --git a/src/bin/pg_rewind/t/001_basic.pl b/src/bin/pg_rewind/t/001_basic.pl
index a1d679f6b8..f60368bd30 100644
--- a/src/bin/pg_rewind/t/001_basic.pl
+++ b/src/bin/pg_rewind/t/001_basic.pl
@@ -32,8 +32,11 @@ sub run_test
# Insert additional data on master that will be replicated to standby
master_psql("INSERT INTO tbl1 values ('in master, before promotion')");
- master_psql("INSERT INTO trunc_tbl values ('in master, before promotion')");
- master_psql("INSERT INTO tail_tbl SELECT g, 'in master, before promotion: ' || g FROM generate_series(1, 10000) g");
+ master_psql(
+ "INSERT INTO trunc_tbl values ('in master, before promotion')");
+ master_psql(
+"INSERT INTO tail_tbl SELECT g, 'in master, before promotion: ' || g FROM generate_series(1, 10000) g"
+ );
master_psql('CHECKPOINT');
@@ -50,7 +53,9 @@ sub run_test
# Insert enough rows to trunc_tbl to extend the file. pg_rewind should
# truncate it back to the old size.
- master_psql("INSERT INTO trunc_tbl SELECT 'in master, after promotion: ' || g FROM generate_series(1, 10000) g");
+ master_psql(
+"INSERT INTO trunc_tbl SELECT 'in master, after promotion: ' || g FROM generate_series(1, 10000) g"
+ );
# Truncate tail_tbl. pg_rewind should copy back the truncated part
# (We cannot use an actual TRUNCATE command here, as that creates a
@@ -60,20 +65,23 @@ sub run_test
RewindTest::run_pg_rewind($test_mode);
- check_query('SELECT * FROM tbl1',
+ check_query(
+ 'SELECT * FROM tbl1',
qq(in master
in master, before promotion
in standby, after promotion
),
'table content');
- check_query('SELECT * FROM trunc_tbl',
+ check_query(
+ 'SELECT * FROM trunc_tbl',
qq(in master
in master, before promotion
),
'truncation');
- check_query('SELECT count(*) FROM tail_tbl',
+ check_query(
+ 'SELECT count(*) FROM tail_tbl',
qq(10001
),
'tail-copy');
diff --git a/src/bin/pg_rewind/t/002_databases.pl b/src/bin/pg_rewind/t/002_databases.pl
index be1e1948a7..7564fa98a5 100644
--- a/src/bin/pg_rewind/t/002_databases.pl
+++ b/src/bin/pg_rewind/t/002_databases.pl
@@ -25,20 +25,22 @@ sub run_test
# Create databases in the old master and the new promoted standby.
master_psql('CREATE DATABASE master_afterpromotion');
standby_psql('CREATE DATABASE standby_afterpromotion');
+
# The clusters are now diverged.
RewindTest::run_pg_rewind($test_mode);
# Check that the correct databases are present after pg_rewind.
- check_query('SELECT datname FROM pg_database',
- qq(template1
+ check_query(
+ 'SELECT datname FROM pg_database',
+ qq(template1
template0
postgres
inmaster
beforepromotion
standby_afterpromotion
),
- 'database names');
+ 'database names');
RewindTest::clean_rewind_test();
}
diff --git a/src/bin/pg_rewind/t/003_extrafiles.pl b/src/bin/pg_rewind/t/003_extrafiles.pl
index ed50659195..9a952685be 100644
--- a/src/bin/pg_rewind/t/003_extrafiles.pl
+++ b/src/bin/pg_rewind/t/003_extrafiles.pl
@@ -24,44 +24,58 @@ sub run_test
append_to_file "$test_master_datadir/tst_both_dir/both_file1", "in both1";
append_to_file "$test_master_datadir/tst_both_dir/both_file2", "in both2";
mkdir "$test_master_datadir/tst_both_dir/both_subdir/";
- append_to_file "$test_master_datadir/tst_both_dir/both_subdir/both_file3", "in both3";
+ append_to_file "$test_master_datadir/tst_both_dir/both_subdir/both_file3",
+ "in both3";
RewindTest::create_standby();
# Create different subdirs and files in master and standby
mkdir "$test_standby_datadir/tst_standby_dir";
- append_to_file "$test_standby_datadir/tst_standby_dir/standby_file1", "in standby1";
- append_to_file "$test_standby_datadir/tst_standby_dir/standby_file2", "in standby2";
+ append_to_file "$test_standby_datadir/tst_standby_dir/standby_file1",
+ "in standby1";
+ append_to_file "$test_standby_datadir/tst_standby_dir/standby_file2",
+ "in standby2";
mkdir "$test_standby_datadir/tst_standby_dir/standby_subdir/";
- append_to_file "$test_standby_datadir/tst_standby_dir/standby_subdir/standby_file3", "in standby3";
+ append_to_file
+ "$test_standby_datadir/tst_standby_dir/standby_subdir/standby_file3",
+ "in standby3";
mkdir "$test_master_datadir/tst_master_dir";
- append_to_file "$test_master_datadir/tst_master_dir/master_file1", "in master1";
- append_to_file "$test_master_datadir/tst_master_dir/master_file2", "in master2";
+ append_to_file "$test_master_datadir/tst_master_dir/master_file1",
+ "in master1";
+ append_to_file "$test_master_datadir/tst_master_dir/master_file2",
+ "in master2";
mkdir "$test_master_datadir/tst_master_dir/master_subdir/";
- append_to_file "$test_master_datadir/tst_master_dir/master_subdir/master_file3", "in master3";
+ append_to_file
+ "$test_master_datadir/tst_master_dir/master_subdir/master_file3",
+ "in master3";
RewindTest::promote_standby();
RewindTest::run_pg_rewind($test_mode);
# List files in the data directory after rewind.
my @paths;
- find(sub {push @paths, $File::Find::name if $File::Find::name =~ m/.*tst_.*/},
- $test_master_datadir);
+ find(
+ sub {
+ push @paths, $File::Find::name
+ if $File::Find::name =~ m/.*tst_.*/;
+ },
+ $test_master_datadir);
@paths = sort @paths;
- is_deeply(\@paths,
- ["$test_master_datadir/tst_both_dir",
- "$test_master_datadir/tst_both_dir/both_file1",
- "$test_master_datadir/tst_both_dir/both_file2",
- "$test_master_datadir/tst_both_dir/both_subdir",
- "$test_master_datadir/tst_both_dir/both_subdir/both_file3",
- "$test_master_datadir/tst_standby_dir",
- "$test_master_datadir/tst_standby_dir/standby_file1",
- "$test_master_datadir/tst_standby_dir/standby_file2",
- "$test_master_datadir/tst_standby_dir/standby_subdir",
- "$test_master_datadir/tst_standby_dir/standby_subdir/standby_file3"],
- "file lists match");
+ is_deeply(
+ \@paths,
+ [ "$test_master_datadir/tst_both_dir",
+ "$test_master_datadir/tst_both_dir/both_file1",
+ "$test_master_datadir/tst_both_dir/both_file2",
+ "$test_master_datadir/tst_both_dir/both_subdir",
+ "$test_master_datadir/tst_both_dir/both_subdir/both_file3",
+ "$test_master_datadir/tst_standby_dir",
+ "$test_master_datadir/tst_standby_dir/standby_file1",
+ "$test_master_datadir/tst_standby_dir/standby_file2",
+ "$test_master_datadir/tst_standby_dir/standby_subdir",
+"$test_master_datadir/tst_standby_dir/standby_subdir/standby_file3" ],
+ "file lists match");
RewindTest::clean_rewind_test();
}
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 99c66be7fb..5a91871c35 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -317,16 +317,16 @@ equivalent_locale(int category, const char *loca, const char *locb)
int lenb;
/*
- * If the names are equal, the locales are equivalent. Checking this
- * first avoids calling setlocale() in the common case that the names
- * are equal. That's a good thing, if setlocale() is buggy, for example.
+ * If the names are equal, the locales are equivalent. Checking this first
+ * avoids calling setlocale() in the common case that the names are equal.
+ * That's a good thing, if setlocale() is buggy, for example.
*/
if (pg_strcasecmp(loca, locb) == 0)
return true;
/*
- * Not identical. Canonicalize both names, remove the encoding parts,
- * and try again.
+ * Not identical. Canonicalize both names, remove the encoding parts, and
+ * try again.
*/
canona = get_canonical_locale_name(category, loca);
chara = strrchr(canona, '.');
@@ -512,7 +512,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
{
/* reproduce warning from CREATE TABLESPACE that is in the log */
pg_log(PG_WARNING,
- "\nWARNING: user-defined tablespace locations should not be inside the data directory, e.g. %s\n", old_tablespace_dir);
+ "\nWARNING: user-defined tablespace locations should not be inside the data directory, e.g. %s\n", old_tablespace_dir);
/* Unlink file in case it is left over from a previous run. */
unlink(*deletion_script_file_name);
@@ -611,8 +611,8 @@ check_is_install_user(ClusterInfo *cluster)
/*
* We only allow the install user in the new cluster (see comment below)
- * and we preserve pg_authid.oid, so this must be the install user in
- * the old cluster too.
+ * and we preserve pg_authid.oid, so this must be the install user in the
+ * old cluster too.
*/
if (PQntuples(res) != 1 ||
atooid(PQgetvalue(res, 0, 1)) != BOOTSTRAP_SUPERUSERID)
@@ -681,10 +681,13 @@ check_proper_datallowconn(ClusterInfo *cluster)
}
else
{
- /* avoid datallowconn == false databases from being skipped on restore */
+ /*
+ * avoid datallowconn == false databases from being skipped on
+ * restore
+ */
if (strcmp(datallowconn, "f") == 0)
pg_fatal("All non-template0 databases must allow connections, "
- "i.e. their pg_database.datallowconn must be true\n");
+ "i.e. their pg_database.datallowconn must be true\n");
}
}
@@ -873,7 +876,7 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
" 'pg_catalog.regconfig'::pg_catalog.regtype, "
" 'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
" c.relnamespace = n.oid AND "
- " n.nspname NOT IN ('pg_catalog', 'information_schema')");
+ " n.nspname NOT IN ('pg_catalog', 'information_schema')");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
@@ -964,7 +967,7 @@ check_for_jsonb_9_4_usage(ClusterInfo *cluster)
" c.relnamespace = n.oid AND "
/* exclude possible orphaned temp tables */
" n.nspname !~ '^pg_temp_' AND "
- " n.nspname NOT IN ('pg_catalog', 'information_schema')");
+ " n.nspname NOT IN ('pg_catalog', 'information_schema')");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
@@ -999,7 +1002,7 @@ check_for_jsonb_9_4_usage(ClusterInfo *cluster)
{
pg_log(PG_REPORT, "fatal\n");
pg_fatal("Your installation contains one of the JSONB data types in user tables.\n"
- "The internal format of JSONB changed during 9.4 beta so this cluster cannot currently\n"
+ "The internal format of JSONB changed during 9.4 beta so this cluster cannot currently\n"
"be upgraded. You can remove the problem tables and restart the upgrade. A list\n"
"of the problem columns is in the file:\n"
" %s\n\n", output_path);
diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c
index 2c20e847ac..6d6f84d725 100644
--- a/src/bin/pg_upgrade/dump.c
+++ b/src/bin/pg_upgrade/dump.c
@@ -111,7 +111,7 @@ optionally_create_toast_tables(void)
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n "
"WHERE c.relnamespace = n.oid AND "
- " n.nspname NOT IN ('pg_catalog', 'information_schema') AND "
+ " n.nspname NOT IN ('pg_catalog', 'information_schema') AND "
"c.relkind IN ('r', 'm') AND "
"c.reltoastrelid = 0");
@@ -122,12 +122,12 @@ optionally_create_toast_tables(void)
{
/* enable auto-oid-numbered TOAST creation if needed */
PQclear(executeQueryOrDie(conn, "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%d'::pg_catalog.oid);",
- OPTIONALLY_CREATE_TOAST_OID));
+ OPTIONALLY_CREATE_TOAST_OID));
/* dummy command that also triggers check for required TOAST table */
PQclear(executeQueryOrDie(conn, "ALTER TABLE %s.%s RESET (binary_upgrade_dummy_option);",
- quote_identifier(PQgetvalue(res, rowno, i_nspname)),
- quote_identifier(PQgetvalue(res, rowno, i_relname))));
+ quote_identifier(PQgetvalue(res, rowno, i_nspname)),
+ quote_identifier(PQgetvalue(res, rowno, i_relname))));
}
PQclear(res);
diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c
index c0a5601209..e158c9ff8b 100644
--- a/src/bin/pg_upgrade/info.c
+++ b/src/bin/pg_upgrade/info.c
@@ -38,16 +38,16 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
int *nmaps, const char *old_pgdata, const char *new_pgdata)
{
FileNameMap *maps;
- int old_relnum, new_relnum;
+ int old_relnum,
+ new_relnum;
int num_maps = 0;
maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) *
old_db->rel_arr.nrels);
/*
- * The old database shouldn't have more relations than the new one.
- * We force the new cluster to have a TOAST table if the old table
- * had one.
+ * The old database shouldn't have more relations than the new one. We
+ * force the new cluster to have a TOAST table if the old table had one.
*/
if (old_db->rel_arr.nrels > new_db->rel_arr.nrels)
pg_fatal("old and new databases \"%s\" have a mismatched number of relations\n",
@@ -62,15 +62,15 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
/*
* It is possible that the new cluster has a TOAST table for a table
- * that didn't need one in the old cluster, e.g. 9.0 to 9.1 changed the
- * NUMERIC length computation. Therefore, if we have a TOAST table
- * in the new cluster that doesn't match, skip over it and continue
- * processing. It is possible this TOAST table used an OID that was
- * reserved in the old cluster, but we have no way of testing that,
- * and we would have already gotten an error at the new cluster schema
- * creation stage. Fortunately, since we only restore the OID counter
- * after schema restore, and restore in OID order via pg_dump, a
- * conflict would only happen if the new TOAST table had a very low
+ * that didn't need one in the old cluster, e.g. 9.0 to 9.1 changed
+ * the NUMERIC length computation. Therefore, if we have a TOAST
+ * table in the new cluster that doesn't match, skip over it and
+ * continue processing. It is possible this TOAST table used an OID
+ * that was reserved in the old cluster, but we have no way of testing
+ * that, and we would have already gotten an error at the new cluster
+ * schema creation stage. Fortunately, since we only restore the OID
+ * counter after schema restore, and restore in OID order via pg_dump,
+ * a conflict would only happen if the new TOAST table had a very low
* OID. However, TOAST tables created long after initial table
* creation can have any OID, particularly after OID wraparound.
*/
@@ -330,75 +330,77 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
*/
snprintf(query, sizeof(query),
- /* get regular heap */
- "WITH regular_heap (reloid) AS ( "
- " SELECT c.oid "
- " FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n "
- " ON c.relnamespace = n.oid "
- " LEFT OUTER JOIN pg_catalog.pg_index i "
- " ON c.oid = i.indexrelid "
- " WHERE relkind IN ('r', 'm', 'i', 'S') AND "
- /*
- * pg_dump only dumps valid indexes; testing indisready is necessary in
- * 9.2, and harmless in earlier/later versions.
- */
- " i.indisvalid IS DISTINCT FROM false AND "
- " i.indisready IS DISTINCT FROM false AND "
- /* exclude possible orphaned temp tables */
- " ((n.nspname !~ '^pg_temp_' AND "
- " n.nspname !~ '^pg_toast_temp_' AND "
- /* skip pg_toast because toast index have relkind == 'i', not 't' */
- " n.nspname NOT IN ('pg_catalog', 'information_schema', "
- " 'binary_upgrade', 'pg_toast') AND "
- " c.oid >= %u) OR "
- " (n.nspname = 'pg_catalog' AND "
- " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ))), "
- /*
- * We have to gather the TOAST tables in later steps because we
- * can't schema-qualify TOAST tables.
- */
- /* get TOAST heap */
- " toast_heap (reloid) AS ( "
- " SELECT reltoastrelid "
- " FROM regular_heap JOIN pg_catalog.pg_class c "
- " ON regular_heap.reloid = c.oid "
- " AND c.reltoastrelid != %u), "
- /* get indexes on regular and TOAST heap */
- " all_index (reloid) AS ( "
- " SELECT indexrelid "
- " FROM pg_index "
- " WHERE indisvalid "
- " AND indrelid IN (SELECT reltoastrelid "
- " FROM (SELECT reloid FROM regular_heap "
- " UNION ALL "
- " SELECT reloid FROM toast_heap) all_heap "
- " JOIN pg_catalog.pg_class c "
- " ON all_heap.reloid = c.oid "
- " AND c.reltoastrelid != %u)) "
- /* get all rels */
- "SELECT c.oid, n.nspname, c.relname, "
- " c.relfilenode, c.reltablespace, %s "
- "FROM (SELECT reloid FROM regular_heap "
- " UNION ALL "
- " SELECT reloid FROM toast_heap "
- " UNION ALL "
- " SELECT reloid FROM all_index) all_rels "
- " JOIN pg_catalog.pg_class c "
- " ON all_rels.reloid = c.oid "
- " JOIN pg_catalog.pg_namespace n "
- " ON c.relnamespace = n.oid "
- " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
- " ON c.reltablespace = t.oid "
+ /* get regular heap */
+ "WITH regular_heap (reloid) AS ( "
+ " SELECT c.oid "
+ " FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n "
+ " ON c.relnamespace = n.oid "
+ " LEFT OUTER JOIN pg_catalog.pg_index i "
+ " ON c.oid = i.indexrelid "
+ " WHERE relkind IN ('r', 'm', 'i', 'S') AND "
+
+ /*
+ * pg_dump only dumps valid indexes; testing indisready is necessary in
+ * 9.2, and harmless in earlier/later versions.
+ */
+ " i.indisvalid IS DISTINCT FROM false AND "
+ " i.indisready IS DISTINCT FROM false AND "
+ /* exclude possible orphaned temp tables */
+ " ((n.nspname !~ '^pg_temp_' AND "
+ " n.nspname !~ '^pg_toast_temp_' AND "
+ /* skip pg_toast because toast index have relkind == 'i', not 't' */
+ " n.nspname NOT IN ('pg_catalog', 'information_schema', "
+ " 'binary_upgrade', 'pg_toast') AND "
+ " c.oid >= %u) OR "
+ " (n.nspname = 'pg_catalog' AND "
+ " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ))), "
+
+ /*
+ * We have to gather the TOAST tables in later steps because we can't
+ * schema-qualify TOAST tables.
+ */
+ /* get TOAST heap */
+ " toast_heap (reloid) AS ( "
+ " SELECT reltoastrelid "
+ " FROM regular_heap JOIN pg_catalog.pg_class c "
+ " ON regular_heap.reloid = c.oid "
+ " AND c.reltoastrelid != %u), "
+ /* get indexes on regular and TOAST heap */
+ " all_index (reloid) AS ( "
+ " SELECT indexrelid "
+ " FROM pg_index "
+ " WHERE indisvalid "
+ " AND indrelid IN (SELECT reltoastrelid "
+ " FROM (SELECT reloid FROM regular_heap "
+ " UNION ALL "
+ " SELECT reloid FROM toast_heap) all_heap "
+ " JOIN pg_catalog.pg_class c "
+ " ON all_heap.reloid = c.oid "
+ " AND c.reltoastrelid != %u)) "
+ /* get all rels */
+ "SELECT c.oid, n.nspname, c.relname, "
+ " c.relfilenode, c.reltablespace, %s "
+ "FROM (SELECT reloid FROM regular_heap "
+ " UNION ALL "
+ " SELECT reloid FROM toast_heap "
+ " UNION ALL "
+ " SELECT reloid FROM all_index) all_rels "
+ " JOIN pg_catalog.pg_class c "
+ " ON all_rels.reloid = c.oid "
+ " JOIN pg_catalog.pg_namespace n "
+ " ON c.relnamespace = n.oid "
+ " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
+ " ON c.reltablespace = t.oid "
/* we preserve pg_class.oid so we sort by it to match old/new */
- "ORDER BY 1;",
- FirstNormalObjectId,
+ "ORDER BY 1;",
+ FirstNormalObjectId,
/* does pg_largeobject_metadata need to be migrated? */
- (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ?
- "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'",
- InvalidOid, InvalidOid,
+ (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ?
+ "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'",
+ InvalidOid, InvalidOid,
/* 9.2 removed the spclocation column */
- (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
- "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
+ (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
+ "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
res = executeQueryOrDie(conn, "%s", query);
diff --git a/src/bin/pg_upgrade/option.c b/src/bin/pg_upgrade/option.c
index b851056135..90f1401549 100644
--- a/src/bin/pg_upgrade/option.c
+++ b/src/bin/pg_upgrade/option.c
@@ -142,7 +142,7 @@ parseCommandLine(int argc, char *argv[])
old_cluster.pgopts = pg_strdup(optarg);
else
{
- char *old_pgopts = old_cluster.pgopts;
+ char *old_pgopts = old_cluster.pgopts;
old_cluster.pgopts = psprintf("%s %s", old_pgopts, optarg);
free(old_pgopts);
@@ -155,7 +155,7 @@ parseCommandLine(int argc, char *argv[])
new_cluster.pgopts = pg_strdup(optarg);
else
{
- char *new_pgopts = new_cluster.pgopts;
+ char *new_pgopts = new_cluster.pgopts;
new_cluster.pgopts = psprintf("%s %s", new_pgopts, optarg);
free(new_pgopts);
@@ -249,13 +249,15 @@ parseCommandLine(int argc, char *argv[])
"PGDATANEW", "-D", "new cluster data resides");
#ifdef WIN32
+
/*
* On Windows, initdb --sync-only will fail with a "Permission denied"
- * error on file pg_upgrade_utility.log if pg_upgrade is run inside
- * the new cluster directory, so we do a check here.
+ * error on file pg_upgrade_utility.log if pg_upgrade is run inside the
+ * new cluster directory, so we do a check here.
*/
{
- char cwd[MAXPGPATH], new_cluster_pgdata[MAXPGPATH];
+ char cwd[MAXPGPATH],
+ new_cluster_pgdata[MAXPGPATH];
strlcpy(new_cluster_pgdata, new_cluster.pgdata, MAXPGPATH);
canonicalize_path(new_cluster_pgdata);
diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c
index 4e6a9f91be..8cdfaf35ef 100644
--- a/src/bin/pg_upgrade/pg_upgrade.c
+++ b/src/bin/pg_upgrade/pg_upgrade.c
@@ -333,8 +333,8 @@ create_new_objects(void)
check_ok();
/*
- * We don't have minmxids for databases or relations in pre-9.3
- * clusters, so set those after we have restores the schemas.
+ * We don't have minmxids for databases or relations in pre-9.3 clusters,
+ * so set those after we have restores the schemas.
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) < 903)
set_frozenxids(true);
@@ -473,7 +473,7 @@ copy_clog_xlog_xid(void)
/* now reset the wal archives in the new cluster */
prep_status("Resetting WAL archives");
exec_prog(UTILITY_LOG_FILE, NULL, true,
- /* use timeline 1 to match controldata and no WAL history file */
+ /* use timeline 1 to match controldata and no WAL history file */
"\"%s/pg_resetxlog\" -l 00000001%s \"%s\"", new_cluster.bindir,
old_cluster.controldata.nextxlogfile + 8,
new_cluster.pgdata);
diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h
index aecf0df30c..13aa891d59 100644
--- a/src/bin/pg_upgrade/pg_upgrade.h
+++ b/src/bin/pg_upgrade/pg_upgrade.h
@@ -329,7 +329,7 @@ extern OSInfo os_info;
/* check.c */
void output_check_banner(bool live_check);
-void check_and_dump_old_cluster(bool live_check);
+void check_and_dump_old_cluster(bool live_check);
void check_new_cluster(void);
void report_clusters_compatible(void);
void issue_warnings(void);
@@ -358,7 +358,7 @@ void optionally_create_toast_tables(void);
#define EXEC_PSQL_ARGS "--echo-queries --set ON_ERROR_STOP=on --no-psqlrc --dbname=template1"
-bool exec_prog(const char *log_file, const char *opt_log_file,
+bool exec_prog(const char *log_file, const char *opt_log_file,
bool throw_error, const char *fmt,...) pg_attribute_printf(4, 5);
void verify_directories(void);
bool pid_lock_file_exists(const char *datadir);
@@ -471,7 +471,7 @@ void pg_putenv(const char *var, const char *val);
void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster,
bool check_mode);
-void old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster);
+void old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster);
/* parallel.c */
void parallel_exec_prog(const char *log_file, const char *opt_log_file,
diff --git a/src/bin/pg_upgrade/relfilenode.c b/src/bin/pg_upgrade/relfilenode.c
index 7b3215af56..c22df42949 100644
--- a/src/bin/pg_upgrade/relfilenode.c
+++ b/src/bin/pg_upgrade/relfilenode.c
@@ -35,10 +35,10 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
/*
- * Transferring files by tablespace is tricky because a single database can
- * use multiple tablespaces. For non-parallel mode, we just pass a NULL
- * tablespace path, which matches all tablespaces. In parallel mode, we
- * pass the default tablespace and all user-created tablespaces and let
+ * Transferring files by tablespace is tricky because a single database
+ * can use multiple tablespaces. For non-parallel mode, we just pass a
+ * NULL tablespace path, which matches all tablespaces. In parallel mode,
+ * we pass the default tablespace and all user-created tablespaces and let
* those operations happen in parallel.
*/
if (user_opts.jobs <= 1)
diff --git a/src/bin/pg_upgrade/server.c b/src/bin/pg_upgrade/server.c
index 8d8e7d7073..8c6b6da515 100644
--- a/src/bin/pg_upgrade/server.c
+++ b/src/bin/pg_upgrade/server.c
@@ -204,11 +204,12 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
/*
* Since PG 9.1, we have used -b to disable autovacuum. For earlier
* releases, setting autovacuum=off disables cleanup vacuum and analyze,
- * but freeze vacuums can still happen, so we set autovacuum_freeze_max_age
- * to its maximum. (autovacuum_multixact_freeze_max_age was introduced
- * after 9.1, so there is no need to set that.) We assume all datfrozenxid
- * and relfrozenxid values are less than a gap of 2000000000 from the current
- * xid counter, so autovacuum will not touch them.
+ * but freeze vacuums can still happen, so we set
+ * autovacuum_freeze_max_age to its maximum.
+ * (autovacuum_multixact_freeze_max_age was introduced after 9.1, so there
+ * is no need to set that.) We assume all datfrozenxid and relfrozenxid
+ * values are less than a gap of 2000000000 from the current xid counter,
+ * so autovacuum will not touch them.
*
* Turn off durability requirements to improve object creation speed, and
* we only modify the new cluster, so only use it there. If there is a
diff --git a/src/bin/pg_upgrade/version.c b/src/bin/pg_upgrade/version.c
index e3e7387c92..9954daea17 100644
--- a/src/bin/pg_upgrade/version.c
+++ b/src/bin/pg_upgrade/version.c
@@ -167,9 +167,9 @@ old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster)
{
pg_log(PG_REPORT, "fatal\n");
pg_fatal("Your installation contains the \"line\" data type in user tables. This\n"
- "data type changed its internal and input/output format between your old\n"
+ "data type changed its internal and input/output format between your old\n"
"and new clusters so this cluster cannot currently be upgraded. You can\n"
- "remove the problem tables and restart the upgrade. A list of the problem\n"
+ "remove the problem tables and restart the upgrade. A list of the problem\n"
"columns is in the file:\n"
" %s\n\n", output_path);
}
diff --git a/src/bin/pg_xlogdump/pg_xlogdump.c b/src/bin/pg_xlogdump/pg_xlogdump.c
index e9cbbd264d..c0a6816784 100644
--- a/src/bin/pg_xlogdump/pg_xlogdump.c
+++ b/src/bin/pg_xlogdump/pg_xlogdump.c
@@ -494,7 +494,10 @@ XLogDumpStatsRow(const char *name,
uint64 fpi_len, uint64 total_fpi_len,
uint64 tot_len, uint64 total_len)
{
- double n_pct, rec_len_pct, fpi_len_pct, tot_len_pct;
+ double n_pct,
+ rec_len_pct,
+ fpi_len_pct,
+ tot_len_pct;
n_pct = 0;
if (total_count != 0)
@@ -528,12 +531,14 @@ XLogDumpStatsRow(const char *name,
static void
XLogDumpDisplayStats(XLogDumpConfig *config, XLogDumpStats *stats)
{
- int ri, rj;
+ int ri,
+ rj;
uint64 total_count = 0;
uint64 total_rec_len = 0;
uint64 total_fpi_len = 0;
uint64 total_len = 0;
- double rec_len_pct, fpi_len_pct;
+ double rec_len_pct,
+ fpi_len_pct;
/* ---
* Make a first pass to calculate column totals:
@@ -551,11 +556,11 @@ XLogDumpDisplayStats(XLogDumpConfig *config, XLogDumpStats *stats)
total_rec_len += stats->rmgr_stats[ri].rec_len;
total_fpi_len += stats->rmgr_stats[ri].fpi_len;
}
- total_len = total_rec_len+total_fpi_len;
+ total_len = total_rec_len + total_fpi_len;
/*
- * 27 is strlen("Transaction/COMMIT_PREPARED"),
- * 20 is strlen(2^64), 8 is strlen("(100.00%)")
+ * 27 is strlen("Transaction/COMMIT_PREPARED"), 20 is strlen(2^64), 8 is
+ * strlen("(100.00%)")
*/
printf("%-27s %20s %8s %20s %8s %20s %8s %20s %8s\n"
@@ -565,7 +570,10 @@ XLogDumpDisplayStats(XLogDumpConfig *config, XLogDumpStats *stats)
for (ri = 0; ri < RM_NEXT_ID; ri++)
{
- uint64 count, rec_len, fpi_len, tot_len;
+ uint64 count,
+ rec_len,
+ fpi_len,
+ tot_len;
const RmgrDescData *desc = &RmgrDescTable[ri];
if (!config->stats_per_record)
@@ -610,10 +618,10 @@ XLogDumpDisplayStats(XLogDumpConfig *config, XLogDumpStats *stats)
"", "--------", "", "--------", "", "--------", "", "--------");
/*
- * The percentages in earlier rows were calculated against the
- * column total, but the ones that follow are against the row total.
- * Note that these are displayed with a % symbol to differentiate
- * them from the earlier ones, and are thus up to 9 characters long.
+ * The percentages in earlier rows were calculated against the column
+ * total, but the ones that follow are against the row total. Note that
+ * these are displayed with a % symbol to differentiate them from the
+ * earlier ones, and are thus up to 9 characters long.
*/
rec_len_pct = 0;
diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 8b8b5911d6..6f35db4763 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -100,7 +100,7 @@ static int pthread_join(pthread_t th, void **thread_return);
#define LOG_STEP_SECONDS 5 /* seconds between log messages */
#define DEFAULT_NXACTS 10 /* default nxacts */
-#define MIN_GAUSSIAN_THRESHOLD 2.0 /* minimum threshold for gauss */
+#define MIN_GAUSSIAN_THRESHOLD 2.0 /* minimum threshold for gauss */
int nxacts = 0; /* number of transactions per client */
int duration = 0; /* duration in seconds */
@@ -244,7 +244,8 @@ typedef struct
int64 throttle_trigger; /* previous/next throttling (us) */
int64 throttle_lag; /* total transaction lag behind throttling */
int64 throttle_lag_max; /* max transaction lag */
- int64 throttle_latency_skipped; /* lagging transactions skipped */
+ int64 throttle_latency_skipped; /* lagging transactions
+ * skipped */
int64 latency_late; /* late transactions */
} TState;
@@ -296,8 +297,8 @@ typedef struct
long start_time; /* when does the interval start */
int cnt; /* number of transactions */
- int skipped; /* number of transactions skipped under
- * --rate and --latency-limit */
+ int skipped; /* number of transactions skipped under --rate
+ * and --latency-limit */
double min_latency; /* min/max latencies */
double max_latency;
@@ -389,7 +390,7 @@ usage(void)
" -f, --file=FILENAME read transaction script from FILENAME\n"
" -j, --jobs=NUM number of threads (default: 1)\n"
" -l, --log write transaction times to log file\n"
- " -L, --latency-limit=NUM count transactions lasting more than NUM ms\n"
+ " -L, --latency-limit=NUM count transactions lasting more than NUM ms\n"
" as late.\n"
" -M, --protocol=simple|extended|prepared\n"
" protocol for submitting queries (default: simple)\n"
@@ -509,19 +510,22 @@ getrand(TState *thread, int64 min, int64 max)
static int64
getExponentialRand(TState *thread, int64 min, int64 max, double threshold)
{
- double cut, uniform, rand;
+ double cut,
+ uniform,
+ rand;
+
Assert(threshold > 0.0);
cut = exp(-threshold);
/* erand in [0, 1), uniform in (0, 1] */
uniform = 1.0 - pg_erand48(thread->random_state);
+
/*
- * inner expresion in (cut, 1] (if threshold > 0),
- * rand in [0, 1)
+ * inner expresion in (cut, 1] (if threshold > 0), rand in [0, 1)
*/
Assert((1.0 - cut) != 0.0);
- rand = - log(cut + (1.0 - cut) * uniform) / threshold;
+ rand = -log(cut + (1.0 - cut) * uniform) / threshold;
/* return int64 random number within between min and max */
- return min + (int64)((max - min + 1) * rand);
+ return min + (int64) ((max - min + 1) * rand);
}
/* random number generator: gaussian distribution from min to max inclusive */
@@ -532,34 +536,37 @@ getGaussianRand(TState *thread, int64 min, int64 max, double threshold)
double rand;
/*
- * Get user specified random number from this loop, with
- * -threshold < stdev <= threshold
+ * Get user specified random number from this loop, with -threshold <
+ * stdev <= threshold
*
* This loop is executed until the number is in the expected range.
*
* As the minimum threshold is 2.0, the probability of looping is low:
- * sqrt(-2 ln(r)) <= 2 => r >= e^{-2} ~ 0.135, then when taking the average
- * sinus multiplier as 2/pi, we have a 8.6% looping probability in the
- * worst case. For a 5.0 threshold value, the looping probability
- * is about e^{-5} * 2 / pi ~ 0.43%.
+ * sqrt(-2 ln(r)) <= 2 => r >= e^{-2} ~ 0.135, then when taking the
+ * average sinus multiplier as 2/pi, we have a 8.6% looping probability in
+ * the worst case. For a 5.0 threshold value, the looping probability is
+ * about e^{-5} * 2 / pi ~ 0.43%.
*/
do
{
/*
* pg_erand48 generates [0,1), but for the basic version of the
* Box-Muller transform the two uniformly distributed random numbers
- * are expected in (0, 1] (see http://en.wikipedia.org/wiki/Box_muller)
+ * are expected in (0, 1] (see
+ * http://en.wikipedia.org/wiki/Box_muller)
*/
- double rand1 = 1.0 - pg_erand48(thread->random_state);
- double rand2 = 1.0 - pg_erand48(thread->random_state);
+ double rand1 = 1.0 - pg_erand48(thread->random_state);
+ double rand2 = 1.0 - pg_erand48(thread->random_state);
/* Box-Muller basic form transform */
- double var_sqrt = sqrt(-2.0 * log(rand1));
+ double var_sqrt = sqrt(-2.0 * log(rand1));
+
stdev = var_sqrt * sin(2.0 * M_PI * rand2);
/*
- * we may try with cos, but there may be a bias induced if the previous
- * value fails the test. To be on the safe side, let us try over.
+ * we may try with cos, but there may be a bias induced if the
+ * previous value fails the test. To be on the safe side, let us try
+ * over.
*/
}
while (stdev < -threshold || stdev >= threshold);
@@ -568,7 +575,7 @@ getGaussianRand(TState *thread, int64 min, int64 max, double threshold)
rand = (stdev + threshold) / (threshold * 2.0);
/* return int64 random number within between min and max */
- return min + (int64)((max - min + 1) * rand);
+ return min + (int64) ((max - min + 1) * rand);
}
/*
@@ -582,7 +589,7 @@ getPoissonRand(TState *thread, int64 center)
* Use inverse transform sampling to generate a value > 0, such that the
* expected (i.e. average) value is the given argument.
*/
- double uniform;
+ double uniform;
/* erand in [0, 1), uniform in (0, 1] */
uniform = 1.0 - pg_erand48(thread->random_state);
@@ -918,7 +925,7 @@ evaluateExpr(CState *st, PgBenchExpr *expr, int64 *retval)
if ((var = getVariable(st, expr->u.variable.varname)) == NULL)
{
fprintf(stderr, "undefined variable %s\n",
- expr->u.variable.varname);
+ expr->u.variable.varname);
return false;
}
*retval = strtoint64(var);
@@ -927,8 +934,8 @@ evaluateExpr(CState *st, PgBenchExpr *expr, int64 *retval)
case ENODE_OPERATOR:
{
- int64 lval;
- int64 rval;
+ int64 lval;
+ int64 rval;
if (!evaluateExpr(st, expr->u.operator.lexpr, &lval))
return false;
@@ -1115,7 +1122,7 @@ agg_vals_init(AggVals *aggs, instr_time start)
aggs->skipped = 0; /* xacts skipped under --rate --latency-limit */
aggs->sum_latency = 0; /* SUM(latency) */
- aggs->sum2_latency = 0; /* SUM(latency*latency) */
+ aggs->sum2_latency = 0; /* SUM(latency*latency) */
/* min and max transaction duration */
aggs->min_latency = 0;
@@ -1535,9 +1542,10 @@ top:
/*
* Generate random number functions need to be able to subtract
* max from min and add one to the result without overflowing.
- * Since we know max > min, we can detect overflow just by checking
- * for a negative result. But we must check both that the subtraction
- * doesn't overflow, and that adding one to the result doesn't overflow either.
+ * Since we know max > min, we can detect overflow just by
+ * checking for a negative result. But we must check both that the
+ * subtraction doesn't overflow, and that adding one to the result
+ * doesn't overflow either.
*/
if (max - min < 0 || (max - min) + 1 < 0)
{
@@ -1546,7 +1554,7 @@ top:
return true;
}
- if (argc == 4 || /* uniform without or with "uniform" keyword */
+ if (argc == 4 || /* uniform without or with "uniform" keyword */
(argc == 5 && pg_strcasecmp(argv[4], "uniform") == 0))
{
#ifdef DEBUG
@@ -1598,7 +1606,7 @@ top:
snprintf(res, sizeof(res), INT64_FORMAT, getExponentialRand(thread, min, max, threshold));
}
}
- else /* this means an error somewhere in the parsing phase... */
+ else /* this means an error somewhere in the parsing phase... */
{
fprintf(stderr, "%s: unexpected arguments\n", argv[0]);
st->ecnt++;
@@ -1742,7 +1750,10 @@ doLog(TState *thread, CState *st, FILE *logfile, instr_time *now, AggVals *agg,
agg->cnt += 1;
if (skipped)
{
- /* there is no latency to record if the transaction was skipped */
+ /*
+ * there is no latency to record if the transaction was
+ * skipped
+ */
agg->skipped += 1;
}
else
@@ -1779,9 +1790,9 @@ doLog(TState *thread, CState *st, FILE *logfile, instr_time *now, AggVals *agg,
while (agg->start_time + agg_interval < INSTR_TIME_GET_DOUBLE(*now))
{
/*
- * This is a non-Windows branch (thanks to the
- * ifdef in usage), so we don't need to handle
- * this in a special way (see below).
+ * This is a non-Windows branch (thanks to the ifdef in
+ * usage), so we don't need to handle this in a special way
+ * (see below).
*/
fprintf(logfile, "%ld %d %.0f %.0f %.0f %.0f",
agg->start_time,
@@ -2217,7 +2228,7 @@ syntax_error(const char *source, const int lineno,
fprintf(stderr, "%s\n", line);
if (column != -1)
{
- int i;
+ int i;
for (i = 0; i < column - 1; i++)
fprintf(stderr, " ");
@@ -2260,7 +2271,8 @@ process_commands(char *buf, const char *source, const int lineno)
if (*p == '\\')
{
- int max_args = -1;
+ int max_args = -1;
+
my_commands->type = META_COMMAND;
j = 0;
@@ -2282,9 +2294,9 @@ process_commands(char *buf, const char *source, const int lineno)
if (pg_strcasecmp(my_commands->argv[0], "setrandom") == 0)
{
- /* parsing:
- * \setrandom variable min max [uniform]
- * \setrandom variable min max (gaussian|exponential) threshold
+ /*
+ * parsing: \setrandom variable min max [uniform] \setrandom
+ * variable min max (gaussian|exponential) threshold
*/
if (my_commands->argc < 4)
@@ -2295,20 +2307,21 @@ process_commands(char *buf, const char *source, const int lineno)
/* argc >= 4 */
- if (my_commands->argc == 4 || /* uniform without/with "uniform" keyword */
+ if (my_commands->argc == 4 || /* uniform without/with
+ * "uniform" keyword */
(my_commands->argc == 5 &&
pg_strcasecmp(my_commands->argv[4], "uniform") == 0))
{
/* nothing to do */
}
- else if (/* argc >= 5 */
+ else if ( /* argc >= 5 */
(pg_strcasecmp(my_commands->argv[4], "gaussian") == 0) ||
- (pg_strcasecmp(my_commands->argv[4], "exponential") == 0))
+ (pg_strcasecmp(my_commands->argv[4], "exponential") == 0))
{
if (my_commands->argc < 6)
{
syntax_error(source, lineno, my_commands->line, my_commands->argv[0],
- "missing threshold argument", my_commands->argv[4], -1);
+ "missing threshold argument", my_commands->argv[4], -1);
}
else if (my_commands->argc > 6)
{
@@ -2317,7 +2330,7 @@ process_commands(char *buf, const char *source, const int lineno)
my_commands->cols[6]);
}
}
- else /* cannot parse, unexpected arguments */
+ else /* cannot parse, unexpected arguments */
{
syntax_error(source, lineno, my_commands->line, my_commands->argv[0],
"unexpected argument", my_commands->argv[4],
@@ -2486,7 +2499,8 @@ process_file(char *filename)
Command **my_commands;
FILE *fd;
- int lineno, index;
+ int lineno,
+ index;
char *buf;
int alloc_num;
@@ -2514,6 +2528,7 @@ process_file(char *filename)
while ((buf = read_line_from_file(fd)) != NULL)
{
Command *command;
+
lineno += 1;
command = process_commands(buf, filename, lineno);
@@ -2547,7 +2562,8 @@ process_builtin(char *tb, const char *source)
#define COMMANDS_ALLOC_NUM 128
Command **my_commands;
- int lineno, index;
+ int lineno,
+ index;
char buf[BUFSIZ];
int alloc_num;
@@ -2653,7 +2669,7 @@ printResults(int ttype, int64 normal_xacts, int nclients,
if (latency_limit)
printf("number of transactions above the %.1f ms latency limit: " INT64_FORMAT " (%.3f %%)\n",
latency_limit / 1000.0, latency_late,
- 100.0 * latency_late / (throttle_latency_skipped + normal_xacts));
+ 100.0 * latency_late / (throttle_latency_skipped + normal_xacts));
if (throttle_delay || progress || latency_limit)
{
@@ -3045,7 +3061,8 @@ main(int argc, char **argv)
break;
case 'L':
{
- double limit_ms = atof(optarg);
+ double limit_ms = atof(optarg);
+
if (limit_ms <= 0.0)
{
fprintf(stderr, "invalid latency limit: %s\n", optarg);
diff --git a/src/bin/pgbench/pgbench.h b/src/bin/pgbench/pgbench.h
index a3db6b97cc..42e2aae294 100644
--- a/src/bin/pgbench/pgbench.h
+++ b/src/bin/pgbench/pgbench.h
@@ -22,39 +22,39 @@ typedef struct PgBenchExpr PgBenchExpr;
struct PgBenchExpr
{
- PgBenchExprType etype;
+ PgBenchExprType etype;
union
{
struct
{
- int64 ival;
- } integer_constant;
+ int64 ival;
+ } integer_constant;
struct
{
- char *varname;
- } variable;
+ char *varname;
+ } variable;
struct
{
- char operator;
- PgBenchExpr *lexpr;
+ char operator;
+ PgBenchExpr *lexpr;
PgBenchExpr *rexpr;
- } operator;
- } u;
+ } operator;
+ } u;
};
extern PgBenchExpr *expr_parse_result;
-extern int expr_yyparse(void);
-extern int expr_yylex(void);
+extern int expr_yyparse(void);
+extern int expr_yylex(void);
extern void expr_yyerror(const char *str);
extern void expr_scanner_init(const char *str, const char *source,
- const int lineno, const char *line,
- const char *cmd, const int ecol);
-extern void syntax_error(const char* source, const int lineno, const char* line,
- const char* cmd, const char* msg, const char* more,
- const int col);
+ const int lineno, const char *line,
+ const char *cmd, const int ecol);
+extern void syntax_error(const char *source, const int lineno, const char *line,
+ const char *cmd, const char *msg, const char *more,
+ const int col);
extern void expr_scanner_finish(void);
extern int64 strtoint64(const char *str);
-#endif /* PGBENCH_H */
+#endif /* PGBENCH_H */
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index 70b7d3be15..38253fa098 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -1082,7 +1082,8 @@ exec_command(const char *cmd,
for (i = 0; my_list[i] != NULL; i++)
{
- char *val = pset_value_string(my_list[i], &pset.popt);
+ char *val = pset_value_string(my_list[i], &pset.popt);
+
printf("%-24s %s\n", my_list[i], val);
free(val);
}
@@ -1515,7 +1516,7 @@ exec_command(const char *cmd,
else if (strcmp(cmd, "?") == 0)
{
char *opt0 = psql_scan_slash_option(scan_state,
- OT_NORMAL, NULL, false);
+ OT_NORMAL, NULL, false);
if (!opt0 || strcmp(opt0, "commands") == 0)
slashUsage(pset.popt.topt.pager);
@@ -1636,8 +1637,8 @@ do_connect(char *dbname, char *user, char *host, char *port)
/*
* Any change in the parameters read above makes us discard the password.
- * We also discard it if we're to use a conninfo rather than the positional
- * syntax.
+ * We also discard it if we're to use a conninfo rather than the
+ * positional syntax.
*/
keep_password =
((strcmp(user, PQuser(o_conn)) == 0) &&
@@ -1863,7 +1864,7 @@ printSSLInfo(void)
protocol ? protocol : _("unknown"),
cipher ? cipher : _("unknown"),
bits ? bits : _("unknown"),
- (compression && strcmp(compression, "off") != 0) ? _("on") : _("off"));
+ (compression && strcmp(compression, "off") != 0) ? _("on") : _("off"));
}
@@ -2402,7 +2403,7 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet)
if (!value)
;
else if (!set_unicode_line_style(popt, value, vallen,
- &popt->topt.unicode_border_linestyle))
+ &popt->topt.unicode_border_linestyle))
{
psql_error("\\pset: allowed unicode border linestyle are single, double\n");
return false;
@@ -2415,7 +2416,7 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet)
if (!value)
;
else if (!set_unicode_line_style(popt, value, vallen,
- &popt->topt.unicode_column_linestyle))
+ &popt->topt.unicode_column_linestyle))
{
psql_error("\\pset: allowed unicode column linestyle are single, double\n");
return false;
@@ -2428,7 +2429,7 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet)
if (!value)
;
else if (!set_unicode_line_style(popt, value, vallen,
- &popt->topt.unicode_header_linestyle))
+ &popt->topt.unicode_header_linestyle))
{
psql_error("\\pset: allowed unicode header linestyle are single, double\n");
return false;
@@ -2742,19 +2743,19 @@ printPsetInfo(const char *param, struct printQueryOpt *popt)
else if (strcmp(param, "unicode_border_linestyle") == 0)
{
printf(_("Unicode border linestyle is \"%s\".\n"),
- _unicode_linestyle2string(popt->topt.unicode_border_linestyle));
+ _unicode_linestyle2string(popt->topt.unicode_border_linestyle));
}
else if (strcmp(param, "unicode_column_linestyle") == 0)
{
printf(_("Unicode column linestyle is \"%s\".\n"),
- _unicode_linestyle2string(popt->topt.unicode_column_linestyle));
+ _unicode_linestyle2string(popt->topt.unicode_column_linestyle));
}
else if (strcmp(param, "unicode_header_linestyle") == 0)
{
printf(_("Unicode border linestyle is \"%s\".\n"),
- _unicode_linestyle2string(popt->topt.unicode_header_linestyle));
+ _unicode_linestyle2string(popt->topt.unicode_header_linestyle));
}
else
@@ -2945,7 +2946,7 @@ do_watch(PQExpBuffer query_buf, long sleep)
for (;;)
{
- int res;
+ int res;
time_t timer;
long i;
@@ -2962,8 +2963,8 @@ do_watch(PQExpBuffer query_buf, long sleep)
res = PSQLexecWatch(query_buf->data, &myopt);
/*
- * PSQLexecWatch handles the case where we can no longer
- * repeat the query, and returns 0 or -1.
+ * PSQLexecWatch handles the case where we can no longer repeat the
+ * query, and returns 0 or -1.
*/
if (res == 0)
break;
@@ -3001,7 +3002,7 @@ do_watch(PQExpBuffer query_buf, long sleep)
* returns true unless we have ECHO_HIDDEN_NOEXEC.
*/
static bool
-lookup_function_echo_hidden(char * query)
+lookup_function_echo_hidden(char *query)
{
if (pset.echo_hidden != PSQL_ECHO_HIDDEN_OFF)
{
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index ff01368531..0e266a3e18 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -491,7 +491,7 @@ int
PSQLexecWatch(const char *query, const printQueryOpt *opt)
{
PGresult *res;
- double elapsed_msec = 0;
+ double elapsed_msec = 0;
instr_time before;
instr_time after;
@@ -524,10 +524,9 @@ PSQLexecWatch(const char *query, const printQueryOpt *opt)
}
/*
- * If SIGINT is sent while the query is processing, the interrupt
- * will be consumed. The user's intention, though, is to cancel
- * the entire watch process, so detect a sent cancellation request and
- * exit in this case.
+ * If SIGINT is sent while the query is processing, the interrupt will be
+ * consumed. The user's intention, though, is to cancel the entire watch
+ * process, so detect a sent cancellation request and exit in this case.
*/
if (cancel_pressed)
{
diff --git a/src/bin/psql/common.h b/src/bin/psql/common.h
index 3c3ffa3f14..caf31d19b8 100644
--- a/src/bin/psql/common.h
+++ b/src/bin/psql/common.h
@@ -36,7 +36,7 @@ extern void SetCancelConn(void);
extern void ResetCancelConn(void);
extern PGresult *PSQLexec(const char *query);
-extern int PSQLexecWatch(const char *query, const printQueryOpt *opt);
+extern int PSQLexecWatch(const char *query, const printQueryOpt *opt);
extern bool SendQuery(const char *query);
diff --git a/src/bin/psql/copy.c b/src/bin/psql/copy.c
index 965a1dcb26..f1eb518de7 100644
--- a/src/bin/psql/copy.c
+++ b/src/bin/psql/copy.c
@@ -556,6 +556,7 @@ handleCopyIn(PGconn *conn, FILE *copystream, bool isbinary, PGresult **res)
if (showprompt)
{
const char *prompt = get_prompt(PROMPT_COPY);
+
fputs(prompt, stdout);
fflush(stdout);
}
@@ -593,6 +594,7 @@ handleCopyIn(PGconn *conn, FILE *copystream, bool isbinary, PGresult **res)
if (showprompt)
{
const char *prompt = get_prompt(PROMPT_COPY);
+
fputs(prompt, stdout);
fflush(stdout);
}
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 04d769e3d6..db568096dc 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -531,7 +531,7 @@ describeTypes(const char *pattern, bool verbose, bool showSystem)
if (verbose)
{
appendPQExpBuffer(&buf,
- " pg_catalog.pg_get_userbyid(t.typowner) AS \"%s\",\n",
+ " pg_catalog.pg_get_userbyid(t.typowner) AS \"%s\",\n",
gettext_noop("Owner"));
}
if (verbose && pset.sversion >= 90200)
@@ -803,7 +803,7 @@ permissionsList(const char *pattern)
" ELSE E''\n"
" END"
" || CASE WHEN polroles <> '{0}' THEN\n"
- " E'\\n to: ' || pg_catalog.array_to_string(\n"
+ " E'\\n to: ' || pg_catalog.array_to_string(\n"
" ARRAY(\n"
" SELECT rolname\n"
" FROM pg_catalog.pg_roles\n"
@@ -2031,19 +2031,19 @@ describeOneTableDetails(const char *schemaname,
if (pset.sversion >= 90500)
{
printfPQExpBuffer(&buf,
- "SELECT pol.polname,\n"
- "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE array_to_string(array(select rolname from pg_roles where oid = any (pol.polroles) order by 1),',') END,\n"
- "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid),\n"
- "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid),\n"
- "CASE pol.polcmd \n"
- "WHEN 'r' THEN 'SELECT'\n"
- "WHEN 'a' THEN 'INSERT'\n"
- "WHEN 'w' THEN 'UPDATE'\n"
- "WHEN 'd' THEN 'DELETE'\n"
- "WHEN '*' THEN 'ALL'\n"
- "END AS cmd\n"
+ "SELECT pol.polname,\n"
+ "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE array_to_string(array(select rolname from pg_roles where oid = any (pol.polroles) order by 1),',') END,\n"
+ "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid),\n"
+ "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid),\n"
+ "CASE pol.polcmd \n"
+ "WHEN 'r' THEN 'SELECT'\n"
+ "WHEN 'a' THEN 'INSERT'\n"
+ "WHEN 'w' THEN 'UPDATE'\n"
+ "WHEN 'd' THEN 'DELETE'\n"
+ "WHEN '*' THEN 'ALL'\n"
+ "END AS cmd\n"
"FROM pg_catalog.pg_policy pol\n"
- "WHERE pol.polrelid = '%s' ORDER BY 1;",
+ "WHERE pol.polrelid = '%s' ORDER BY 1;",
oid);
result = PSQLexec(buf.data);
@@ -2053,9 +2053,9 @@ describeOneTableDetails(const char *schemaname,
tuples = PQntuples(result);
/*
- * Handle cases where RLS is enabled and there are policies,
- * or there aren't policies, or RLS isn't enabled but there
- * are policies
+ * Handle cases where RLS is enabled and there are policies, or
+ * there aren't policies, or RLS isn't enabled but there are
+ * policies
*/
if (tableinfo.rowsecurity && tuples > 0)
printTableAddFooter(&cont, _("Policies:"));
@@ -2070,7 +2070,7 @@ describeOneTableDetails(const char *schemaname,
for (i = 0; i < tuples; i++)
{
printfPQExpBuffer(&buf, " POLICY \"%s\"",
- PQgetvalue(result, i, 0));
+ PQgetvalue(result, i, 0));
if (!PQgetisnull(result, i, 4))
appendPQExpBuffer(&buf, " FOR %s",
diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c
index ea05c3e37b..b523054825 100644
--- a/src/bin/psql/help.c
+++ b/src/bin/psql/help.c
@@ -81,11 +81,11 @@ usage(unsigned short int pager)
fprintf(output, _(" -f, --file=FILENAME execute commands from file, then exit\n"));
fprintf(output, _(" -l, --list list available databases, then exit\n"));
fprintf(output, _(" -v, --set=, --variable=NAME=VALUE\n"
- " set psql variable NAME to VALUE e.g.: -v ON_ERROR_STOP=1\n"));
+ " set psql variable NAME to VALUE e.g.: -v ON_ERROR_STOP=1\n"));
fprintf(output, _(" -V, --version output version information, then exit\n"));
fprintf(output, _(" -X, --no-psqlrc do not read startup file (~/.psqlrc)\n"));
fprintf(output, _(" -1 (\"one\"), --single-transaction\n"
- " execute as a single transaction (if non-interactive)\n"));
+ " execute as a single transaction (if non-interactive)\n"));
fprintf(output, _(" -?, --help[=options] show this help, then exit\n"));
fprintf(output, _(" --help=variables show a list of all specially treated variables, then exit\n"));
fprintf(output, _(" --help=commands show a list of backslash commands, then exit\n"));
@@ -105,29 +105,29 @@ usage(unsigned short int pager)
fprintf(output, _("\nOutput format options:\n"));
fprintf(output, _(" -A, --no-align unaligned table output mode\n"));
fprintf(output, _(" -F, --field-separator=STRING\n"
- " field separator for unaligned output (default: \"%s\")\n"),
- DEFAULT_FIELD_SEP);
+ " field separator for unaligned output (default: \"%s\")\n"),
+ DEFAULT_FIELD_SEP);
fprintf(output, _(" -H, --html HTML table output mode\n"));
fprintf(output, _(" -P, --pset=VAR[=ARG] set printing option VAR to ARG (see \\pset command)\n"));
fprintf(output, _(" -R, --record-separator=STRING\n"
- " record separator for unaligned output (default: newline)\n"));
+ " record separator for unaligned output (default: newline)\n"));
fprintf(output, _(" -t, --tuples-only print rows only\n"));
fprintf(output, _(" -T, --table-attr=TEXT set HTML table tag attributes (e.g., width, border)\n"));
fprintf(output, _(" -x, --expanded turn on expanded table output\n"));
fprintf(output, _(" -z, --field-separator-zero\n"
- " set field separator for unaligned output to zero byte\n"));
+ " set field separator for unaligned output to zero byte\n"));
fprintf(output, _(" -0, --record-separator-zero\n"
- " set record separator for unaligned output to zero byte\n"));
+ " set record separator for unaligned output to zero byte\n"));
fprintf(output, _("\nConnection options:\n"));
/* Display default host */
env = getenv("PGHOST");
fprintf(output, _(" -h, --host=HOSTNAME database server host or socket directory (default: \"%s\")\n"),
- env ? env : _("local socket"));
+ env ? env : _("local socket"));
/* Display default port */
env = getenv("PGPORT");
fprintf(output, _(" -p, --port=PORT database server port (default: \"%s\")\n"),
- env ? env : DEF_PGPORT_STR);
+ env ? env : DEF_PGPORT_STR);
/* Display default user */
env = getenv("PGUSER");
if (!env)
@@ -137,8 +137,8 @@ usage(unsigned short int pager)
fprintf(output, _(" -W, --password force password prompt (should happen automatically)\n"));
fprintf(output, _("\nFor more information, type \"\\?\" (for internal commands) or \"\\help\" (for SQL\n"
- "commands) from within psql, or consult the psql section in the PostgreSQL\n"
- "documentation.\n\n"));
+ "commands) from within psql, or consult the psql section in the PostgreSQL\n"
+ "documentation.\n\n"));
fprintf(output, _("Report bugs to .\n"));
ClosePager(output);
@@ -315,15 +315,15 @@ helpVariables(unsigned short int pager)
fprintf(output, _(" AUTOCOMMIT if set, successful SQL commands are automatically committed\n"));
fprintf(output, _(" COMP_KEYWORD_CASE determine the case used to complete SQL keywords\n"
- " [lower, upper, preserve-lower, preserve-upper]\n"));
+ " [lower, upper, preserve-lower, preserve-upper]\n"));
fprintf(output, _(" DBNAME the currently connected database name\n"));
fprintf(output, _(" ECHO control what input is written to standard output\n"
- " [all, errors, none, queries]\n"));
+ " [all, errors, none, queries]\n"));
fprintf(output, _(" ECHO_HIDDEN display internal queries executed by backslash commands when it is set\n"
- " or with [noexec] just show without execution\n"));
+ " or with [noexec] just show without execution\n"));
fprintf(output, _(" ENCODING current client character set encoding\n"));
fprintf(output, _(" FETCH_COUNT the number of result rows to fetch and display at a time\n"
- " (default: 0=unlimited)\n"));
+ " (default: 0=unlimited)\n"));
fprintf(output, _(" HISTCONTROL control history list [ignorespace, ignoredups, ignoreboth]\n"));
fprintf(output, _(" HISTFILE file name used to store the history list\n"));
fprintf(output, _(" HISTSIZE the number of commands to store in the command history\n"));
@@ -356,18 +356,18 @@ helpVariables(unsigned short int pager)
fprintf(output, _(" linestyle set the border line drawing style [ascii, old-ascii, unicode]\n"));
fprintf(output, _(" null set the string to be printed in place of a null value\n"));
fprintf(output, _(" numericlocale enable or disable display of a locale-specific character to separate\n"
- " groups of digits [on, off]\n"));
+ " groups of digits [on, off]\n"));
fprintf(output, _(" pager control when an external pager is used [yes, no, always]\n"));
fprintf(output, _(" recordsep specify the record (line) separator to use in unaligned output format\n"));
fprintf(output, _(" recordsep_zero set the record separator to use in unaligned output format to a zero byte.\n"));
fprintf(output, _(" tableattr (or T) specify attributes for table tag in html format or proportional\n"
- " column width of left aligned data type in latex format\n"));
+ " column width of left aligned data type in latex format\n"));
fprintf(output, _(" title set the table title for any subsequently printed tables\n"));
fprintf(output, _(" tuples_only if set, only actual table data is shown\n"));
fprintf(output, _(" unicode_border_linestyle\n"));
fprintf(output, _(" unicode_column_linestyle\n"));
fprintf(output, _(" unicode_header_linestyle\n"
- " set the style of unicode line drawing [single, double]\n"));
+ " set the style of unicode line drawing [single, double]\n"));
fprintf(output, _("\nEnvironment variables:\n"));
fprintf(output, _("Usage:\n"));
@@ -388,9 +388,9 @@ helpVariables(unsigned short int pager)
fprintf(output, _(" PGPASSWORD connection password (not recommended)\n"));
fprintf(output, _(" PGPASSFILE password file name\n"));
fprintf(output, _(" PSQL_EDITOR, EDITOR, VISUAL\n"
- " editor used by the \\e and \\ef commands\n"));
+ " editor used by the \\e and \\ef commands\n"));
fprintf(output, _(" PSQL_EDITOR_LINENUMBER_ARG\n"
- " how to specify a line number when invoking the editor\n"));
+ " how to specify a line number when invoking the editor\n"));
fprintf(output, _(" PSQL_HISTORY alternative location for the command history file\n"));
fprintf(output, _(" PSQLRC alternative location for the user's .psqlrc file\n"));
fprintf(output, _(" SHELL shell used by the \\! command\n"));
diff --git a/src/bin/psql/print.c b/src/bin/psql/print.c
index 94c69845c7..cab9e6eb44 100644
--- a/src/bin/psql/print.c
+++ b/src/bin/psql/print.c
@@ -92,20 +92,23 @@ const printTextFormat pg_asciiformat_old =
/* Default unicode linestyle format */
const printTextFormat pg_utf8format;
-typedef struct unicodeStyleRowFormat {
+typedef struct unicodeStyleRowFormat
+{
const char *horizontal;
const char *vertical_and_right[2];
const char *vertical_and_left[2];
} unicodeStyleRowFormat;
-typedef struct unicodeStyleColumnFormat {
+typedef struct unicodeStyleColumnFormat
+{
const char *vertical;
const char *vertical_and_horizontal[2];
const char *up_and_horizontal[2];
const char *down_and_horizontal[2];
} unicodeStyleColumnFormat;
-typedef struct unicodeStyleBorderFormat {
+typedef struct unicodeStyleBorderFormat
+{
const char *up_and_right;
const char *vertical;
const char *down_and_right;
@@ -114,7 +117,8 @@ typedef struct unicodeStyleBorderFormat {
const char *left_and_right;
} unicodeStyleBorderFormat;
-typedef struct unicodeStyleFormat {
+typedef struct unicodeStyleFormat
+{
unicodeStyleRowFormat row_style[2];
unicodeStyleColumnFormat column_style[2];
unicodeStyleBorderFormat border_style[2];
@@ -124,7 +128,7 @@ typedef struct unicodeStyleFormat {
const char *nl_right;
const char *wrap_left;
const char *wrap_right;
- bool wrap_right_border;
+ bool wrap_right_border;
} unicodeStyleFormat;
const unicodeStyleFormat unicode_style = {
@@ -175,11 +179,11 @@ const unicodeStyleFormat unicode_style = {
{"\342\225\232", "\342\225\221", "\342\225\224", "\342\225\220", "\342\225\227", "\342\225\235"},
},
" ",
- "\342\206\265", /* ↵ */
+ "\342\206\265", /* ↵ */
" ",
- "\342\206\265", /* ↵ */
- "\342\200\246", /* … */
- "\342\200\246", /* … */
+ "\342\206\265", /* ↵ */
+ "\342\200\246", /* … */
+ "\342\200\246", /* … */
true
};
@@ -984,7 +988,7 @@ print_aligned_text(const printTableContent *cont, FILE *fout)
int bytes_to_output;
int chars_to_output = width_wrap[j];
bool finalspaces = (opt_border == 2 ||
- (col_count > 0 && j < col_count - 1));
+ (col_count > 0 && j < col_count - 1));
/* Print left-hand wrap or newline mark */
if (opt_border != 0)
@@ -1356,12 +1360,13 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
else if (opt_border == 1)
{
/*
- * For border = 1, one for the pipe (|) in the middle
- * between the two spaces.
+ * For border = 1, one for the pipe (|) in the middle between the
+ * two spaces.
*/
swidth = 3;
}
else
+
/*
* For border = 2, two more for the pipes (|) at the beginning and
* at the end of the lines.
@@ -1370,10 +1375,10 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
if ((opt_border < 2) &&
((hmultiline &&
- (format == &pg_asciiformat_old)) ||
- (dmultiline &&
- (format != &pg_asciiformat_old))))
- iwidth++; /* for newline indicators */
+ (format == &pg_asciiformat_old)) ||
+ (dmultiline &&
+ (format != &pg_asciiformat_old))))
+ iwidth++; /* for newline indicators */
min_width = hwidth + iwidth + swidth + 3;
@@ -1386,6 +1391,7 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
* Record number
*/
unsigned int rwidth = 1 + log10(cont->nrows);
+
if (opt_border == 0)
rwidth += 9; /* "* RECORD " */
else if (opt_border == 1)
@@ -1402,6 +1408,7 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
if ((width < min_width) || (output_columns < min_width))
width = min_width - hwidth - iwidth - swidth;
else if (output_columns > 0)
+
/*
* Wrap to maximum width
*/
@@ -1412,7 +1419,7 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
dmultiline = true;
if ((opt_border == 0) &&
(format != &pg_asciiformat_old))
- width--; /* for wrap indicators */
+ width--; /* for wrap indicators */
}
dwidth = width;
}
@@ -1440,10 +1447,11 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
if (i % cont->ncolumns == 0)
{
unsigned int lhwidth = hwidth;
+
if ((opt_border < 2) &&
(hmultiline) &&
(format == &pg_asciiformat_old))
- lhwidth++; /* for newline indicators */
+ lhwidth++; /* for newline indicators */
if (!opt_tuples_only)
print_aligned_vertical_line(format, opt_border, record++,
@@ -1480,12 +1488,14 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
{
int swidth = hwidth,
target_width = hwidth;
+
/*
* Left spacer or new line indicator
*/
if ((opt_border == 2) ||
(hmultiline && (format == &pg_asciiformat_old)))
fputs(hline ? format->header_nl_left : " ", fout);
+
/*
* Header text
*/
@@ -1523,6 +1533,7 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
else
{
unsigned int swidth = hwidth + opt_border;
+
if ((opt_border < 2) &&
(hmultiline) &&
(format == &pg_asciiformat_old))
@@ -1886,9 +1897,10 @@ static void
asciidoc_escaped_print(const char *in, FILE *fout)
{
const char *p;
+
for (p = in; *p; p++)
{
- switch(*p)
+ switch (*p)
{
case '|':
fputs("\\|", fout);
@@ -1925,7 +1937,7 @@ print_asciidoc_text(const printTableContent *cont, FILE *fout)
/* print table [] header definition */
fprintf(fout, "[%scols=\"", !opt_tuples_only ? "options=\"header\"," : "");
- for(i = 0; i < cont->ncolumns; i++)
+ for (i = 0; i < cont->ncolumns; i++)
{
if (i != 0)
fputs(",", fout);
@@ -2046,7 +2058,7 @@ print_asciidoc_vertical(const printTableContent *cont, FILE *fout)
break;
case 2:
fputs(",frame=\"all\",grid=\"all\"", fout);
- break;
+ break;
}
fputs("]\n", fout);
fputs("|====\n", fout);
@@ -2729,8 +2741,8 @@ PageOutput(int lines, const printTableOpt *topt)
{
const char *pagerprog;
FILE *pagerpipe;
- unsigned short int pager = topt->pager;
- int min_lines = topt->pager_min_lines;
+ unsigned short int pager = topt->pager;
+ int min_lines = topt->pager_min_lines;
#ifdef TIOCGWINSZ
int result;
@@ -3262,7 +3274,7 @@ get_line_style(const printTableOpt *opt)
void
refresh_utf8format(const printTableOpt *opt)
{
- printTextFormat *popt = (printTextFormat *) &pg_utf8format;
+ printTextFormat *popt = (printTextFormat *) &pg_utf8format;
const unicodeStyleBorderFormat *border;
const unicodeStyleRowFormat *header;
diff --git a/src/bin/psql/print.h b/src/bin/psql/print.h
index 322db4d6ef..b0b6bf5251 100644
--- a/src/bin/psql/print.h
+++ b/src/bin/psql/print.h
@@ -90,7 +90,7 @@ typedef struct printTableOpt
* 1=dividing lines, 2=full */
unsigned short int pager; /* use pager for output (if to stdout and
* stdout is a tty) 0=off 1=on 2=always */
- int pager_min_lines;/* don't use pager unless there are at least
+ int pager_min_lines;/* don't use pager unless there are at least
* this many lines */
bool tuples_only; /* don't output headers, row counts, etc. */
bool start_table; /* print start decoration, eg */
@@ -106,9 +106,9 @@ typedef struct printTableOpt
int encoding; /* character encoding */
int env_columns; /* $COLUMNS on psql start, 0 is unset */
int columns; /* target width for wrapped format */
- unicode_linestyle unicode_border_linestyle;
- unicode_linestyle unicode_column_linestyle;
- unicode_linestyle unicode_header_linestyle;
+ unicode_linestyle unicode_border_linestyle;
+ unicode_linestyle unicode_column_linestyle;
+ unicode_linestyle unicode_header_linestyle;
} printTableOpt;
/*
diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c
index d57901f778..28ba75a72e 100644
--- a/src/bin/psql/startup.c
+++ b/src/bin/psql/startup.c
@@ -592,7 +592,7 @@ parse_psql_options(int argc, char *argv[], struct adhoc_opts * options)
}
break;
default:
- unknown_option:
+ unknown_option:
fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
pset.progname);
exit(EXIT_FAILURE);
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index 750e29ddf3..b9f5acc65e 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -816,10 +816,10 @@ static char *_complete_from_query(int is_schema_query,
static char *complete_from_list(const char *text, int state);
static char *complete_from_const(const char *text, int state);
static void append_variable_names(char ***varnames, int *nvars,
- int *maxvars, const char *varname,
- const char *prefix, const char *suffix);
+ int *maxvars, const char *varname,
+ const char *prefix, const char *suffix);
static char **complete_from_variables(const char *text,
- const char *prefix, const char *suffix, bool need_value);
+ const char *prefix, const char *suffix, bool need_value);
static char *complete_from_files(const char *text, int state);
static char *pg_strdup_keyword_case(const char *s, const char *ref);
@@ -961,6 +961,7 @@ psql_completion(const char *text, int start, int end)
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables,
"UNION SELECT 'ALL IN TABLESPACE'");
}
+
/*
* complete with what you can alter (TABLE, GROUP, USER, ...) unless we're
* in ALTER TABLE sth ALTER
@@ -984,7 +985,7 @@ psql_completion(const char *text, int start, int end)
pg_strcasecmp(prev2_wd, "TABLESPACE") == 0)
{
static const char *const list_ALTERALLINTSPC[] =
- {"SET TABLESPACE", "OWNED BY", NULL};
+ {"SET TABLESPACE", "OWNED BY", NULL};
COMPLETE_WITH_LIST(list_ALTERALLINTSPC);
}
@@ -1129,7 +1130,7 @@ psql_completion(const char *text, int start, int end)
{
static const char *const list_ALTER_FOREIGN_TABLE[] =
{"ADD", "ALTER", "DISABLE TRIGGER", "DROP", "ENABLE", "INHERIT",
- "NO INHERIT", "OPTIONS", "OWNER TO", "RENAME", "SET",
+ "NO INHERIT", "OPTIONS", "OWNER TO", "RENAME", "SET",
"VALIDATE CONSTRAINT", NULL};
COMPLETE_WITH_LIST(list_ALTER_FOREIGN_TABLE);
@@ -1381,7 +1382,7 @@ psql_completion(const char *text, int start, int end)
else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
pg_strcasecmp(prev2_wd, "SYSTEM") == 0 &&
(pg_strcasecmp(prev_wd, "SET") == 0 ||
- pg_strcasecmp(prev_wd, "RESET") == 0))
+ pg_strcasecmp(prev_wd, "RESET") == 0))
COMPLETE_WITH_QUERY(Query_for_list_of_alter_system_set_vars);
/* ALTER VIEW */
else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
@@ -1572,7 +1573,7 @@ psql_completion(const char *text, int start, int end)
pg_strcasecmp(prev_wd, "DISABLE") == 0)
{
static const char *const list_ALTERDISABLE[] =
- { "ROW LEVEL SECURITY", "RULE", "TRIGGER", NULL};
+ {"ROW LEVEL SECURITY", "RULE", "TRIGGER", NULL};
COMPLETE_WITH_LIST(list_ALTERDISABLE);
}
@@ -1598,7 +1599,7 @@ psql_completion(const char *text, int start, int end)
pg_strcasecmp(prev_wd, "SECURITY") == 0)
{
static const char *const list_DISABLERLS[] =
- { "CASCADE", NULL};
+ {"CASCADE", NULL};
COMPLETE_WITH_LIST(list_DISABLERLS);
}
@@ -2140,7 +2141,7 @@ psql_completion(const char *text, int start, int end)
pg_strcasecmp(prev4_wd, "ON") == 0) ||
(pg_strcasecmp(prev6_wd, "COMMENT") == 0 &&
pg_strcasecmp(prev5_wd, "ON") == 0)) &&
- pg_strcasecmp(prev_wd, "IS") != 0)
+ pg_strcasecmp(prev_wd, "IS") != 0)
COMPLETE_WITH_CONST("IS");
/* COPY */
@@ -2205,7 +2206,7 @@ psql_completion(const char *text, int start, int end)
{
static const char *const list_DATABASE[] =
{"OWNER", "TEMPLATE", "ENCODING", "TABLESPACE", "IS_TEMPLATE",
- "ALLOW_CONNECTIONS", "CONNECTION LIMIT", "LC_COLLATE", "LC_CTYPE",
+ "ALLOW_CONNECTIONS", "CONNECTION LIMIT", "LC_COLLATE", "LC_CTYPE",
NULL};
COMPLETE_WITH_LIST(list_DATABASE);
@@ -2309,8 +2310,8 @@ psql_completion(const char *text, int start, int end)
COMPLETE_WITH_ATTR(prev4_wd, "");
/* Complete USING with an index method */
else if ((pg_strcasecmp(prev6_wd, "INDEX") == 0 ||
- pg_strcasecmp(prev5_wd, "INDEX") == 0 ||
- pg_strcasecmp(prev4_wd, "INDEX") == 0) &&
+ pg_strcasecmp(prev5_wd, "INDEX") == 0 ||
+ pg_strcasecmp(prev4_wd, "INDEX") == 0) &&
pg_strcasecmp(prev3_wd, "ON") == 0 &&
pg_strcasecmp(prev_wd, "USING") == 0)
COMPLETE_WITH_QUERY(Query_for_list_of_access_methods);
@@ -2340,7 +2341,11 @@ psql_completion(const char *text, int start, int end)
COMPLETE_WITH_LIST(list_POLICYOPTIONS);
}
- /* Complete "CREATE POLICY ON FOR ALL|SELECT|INSERT|UPDATE|DELETE" */
+
+ /*
+ * Complete "CREATE POLICY ON FOR
+ * ALL|SELECT|INSERT|UPDATE|DELETE"
+ */
else if (pg_strcasecmp(prev6_wd, "CREATE") == 0 &&
pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
pg_strcasecmp(prev3_wd, "ON") == 0 &&
@@ -2362,30 +2367,33 @@ psql_completion(const char *text, int start, int end)
COMPLETE_WITH_LIST(list_POLICYOPTIONS);
}
+
/*
- * Complete "CREATE POLICY ON FOR SELECT TO|USING"
- * Complete "CREATE POLICY ON FOR DELETE TO|USING"
+ * Complete "CREATE POLICY ON FOR SELECT TO|USING" Complete
+ * "CREATE POLICY ON FOR DELETE TO|USING"
*/
else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
pg_strcasecmp(prev4_wd, "ON") == 0 &&
pg_strcasecmp(prev2_wd, "FOR") == 0 &&
(pg_strcasecmp(prev_wd, "SELECT") == 0 ||
- pg_strcasecmp(prev_wd, "DELETE") == 0))
+ pg_strcasecmp(prev_wd, "DELETE") == 0))
{
static const char *const list_POLICYOPTIONS[] =
{"TO", "USING", NULL};
COMPLETE_WITH_LIST(list_POLICYOPTIONS);
}
+
/*
* Complete "CREATE POLICY ON FOR ALL TO|USING|WITH CHECK"
- * Complete "CREATE POLICY ON FOR UPDATE TO|USING|WITH CHECK"
+ * Complete "CREATE POLICY ON FOR UPDATE TO|USING|WITH
+ * CHECK"
*/
else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
pg_strcasecmp(prev4_wd, "ON") == 0 &&
pg_strcasecmp(prev2_wd, "FOR") == 0 &&
(pg_strcasecmp(prev_wd, "ALL") == 0 ||
- pg_strcasecmp(prev_wd, "UPDATE") == 0))
+ pg_strcasecmp(prev_wd, "UPDATE") == 0))
{
static const char *const list_POLICYOPTIONS[] =
{"TO", "USING", "WITH CHECK", NULL};
@@ -3336,7 +3344,7 @@ psql_completion(const char *text, int start, int end)
else if (pg_strcasecmp(prev_wd, "REINDEX") == 0)
{
static const char *const list_REINDEX[] =
- {"TABLE", "INDEX", "SYSTEM", "SCHEMA", "DATABASE", NULL};
+ {"TABLE", "INDEX", "SYSTEM", "SCHEMA", "DATABASE", NULL};
COMPLETE_WITH_LIST(list_REINDEX);
}
@@ -3346,7 +3354,7 @@ psql_completion(const char *text, int start, int end)
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL);
else if (pg_strcasecmp(prev_wd, "INDEX") == 0)
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, NULL);
- else if (pg_strcasecmp(prev_wd, "SCHEMA") == 0 )
+ else if (pg_strcasecmp(prev_wd, "SCHEMA") == 0)
COMPLETE_WITH_QUERY(Query_for_list_of_schemas);
else if (pg_strcasecmp(prev_wd, "SYSTEM") == 0 ||
pg_strcasecmp(prev_wd, "DATABASE") == 0)
@@ -4374,7 +4382,7 @@ complete_from_variables(const char *text, const char *prefix, const char *suffix
"ENCODING", "FETCH_COUNT", "HISTCONTROL", "HISTFILE", "HISTSIZE",
"HOST", "IGNOREEOF", "LASTOID", "ON_ERROR_ROLLBACK", "ON_ERROR_STOP",
"PORT", "PROMPT1", "PROMPT2", "PROMPT3", "QUIET", "SINGLELINE",
- "SINGLESTEP", "USER", "VERBOSITY", NULL
+ "SINGLESTEP", "USER", "VERBOSITY", NULL
};
varnames = (char **) pg_malloc((maxvars + 1) * sizeof(char *));
@@ -4390,7 +4398,7 @@ complete_from_variables(const char *text, const char *prefix, const char *suffix
{
if (need_value && !(ptr->value))
continue;
- for (i = 0; known_varnames[i]; i++) /* remove duplicate entry */
+ for (i = 0; known_varnames[i]; i++) /* remove duplicate entry */
{
if (strcmp(ptr->name, known_varnames[i]) == 0)
continue;
@@ -4475,7 +4483,7 @@ pg_strdup_keyword_case(const char *s, const char *ref)
if (pset.comp_case == PSQL_COMP_CASE_LOWER ||
((pset.comp_case == PSQL_COMP_CASE_PRESERVE_LOWER ||
- pset.comp_case == PSQL_COMP_CASE_PRESERVE_UPPER) && islower(first)) ||
+ pset.comp_case == PSQL_COMP_CASE_PRESERVE_UPPER) && islower(first)) ||
(pset.comp_case == PSQL_COMP_CASE_PRESERVE_LOWER && !isalpha(first)))
{
for (p = ret; *p; p++)
diff --git a/src/bin/scripts/common.c b/src/bin/scripts/common.c
index da142aaa64..0deadec097 100644
--- a/src/bin/scripts/common.c
+++ b/src/bin/scripts/common.c
@@ -21,7 +21,7 @@
static PGcancel *volatile cancelConn = NULL;
-bool CancelRequested = false;
+bool CancelRequested = false;
#ifdef WIN32
static CRITICAL_SECTION cancelConnLock;
diff --git a/src/bin/scripts/reindexdb.c b/src/bin/scripts/reindexdb.c
index 32d3409e05..941729da2e 100644
--- a/src/bin/scripts/reindexdb.c
+++ b/src/bin/scripts/reindexdb.c
@@ -181,7 +181,7 @@ main(int argc, char *argv[])
}
reindex_all_databases(maintenance_db, host, port, username,
- prompt_password, progname, echo, quiet, verbose);
+ prompt_password, progname, echo, quiet, verbose);
}
else if (syscatalog)
{
@@ -233,7 +233,7 @@ main(int argc, char *argv[])
for (cell = schemas.head; cell; cell = cell->next)
{
reindex_one_database(cell->val, dbname, "SCHEMA", host, port,
- username, prompt_password, progname, echo, verbose);
+ username, prompt_password, progname, echo, verbose);
}
}
@@ -244,7 +244,7 @@ main(int argc, char *argv[])
for (cell = indexes.head; cell; cell = cell->next)
{
reindex_one_database(cell->val, dbname, "INDEX", host, port,
- username, prompt_password, progname, echo, verbose);
+ username, prompt_password, progname, echo, verbose);
}
}
if (tables.head != NULL)
@@ -254,13 +254,17 @@ main(int argc, char *argv[])
for (cell = tables.head; cell; cell = cell->next)
{
reindex_one_database(cell->val, dbname, "TABLE", host, port,
- username, prompt_password, progname, echo, verbose);
+ username, prompt_password, progname, echo, verbose);
}
}
- /* reindex database only if neither index nor table nor schema is specified */
+
+ /*
+ * reindex database only if neither index nor table nor schema is
+ * specified
+ */
if (indexes.head == NULL && tables.head == NULL && schemas.head == NULL)
reindex_one_database(dbname, dbname, "DATABASE", host, port,
- username, prompt_password, progname, echo, verbose);
+ username, prompt_password, progname, echo, verbose);
}
exit(0);
@@ -269,7 +273,7 @@ main(int argc, char *argv[])
static void
reindex_one_database(const char *name, const char *dbname, const char *type,
const char *host, const char *port, const char *username,
- enum trivalue prompt_password, const char *progname, bool echo,
+ enum trivalue prompt_password, const char *progname, bool echo,
bool verbose)
{
PQExpBufferData sql;
@@ -322,7 +326,7 @@ static void
reindex_all_databases(const char *maintenance_db,
const char *host, const char *port,
const char *username, enum trivalue prompt_password,
- const char *progname, bool echo, bool quiet, bool verbose)
+ const char *progname, bool echo, bool quiet, bool verbose)
{
PGconn *conn;
PGresult *result;
diff --git a/src/bin/scripts/t/102_vacuumdb_stages.pl b/src/bin/scripts/t/102_vacuumdb_stages.pl
index 1ff05e3c27..57b980ec6a 100644
--- a/src/bin/scripts/t/102_vacuumdb_stages.pl
+++ b/src/bin/scripts/t/102_vacuumdb_stages.pl
@@ -19,7 +19,7 @@ qr/.*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0;
issues_sql_like(
[ 'vacuumdb', '--analyze-in-stages', '--all' ],
- qr/.*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0;
+qr/.*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0;
.*statement:\ ANALYZE.*
.*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0;
.*statement:\ ANALYZE.*
diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c
index 2cd4aa6544..f600b0514a 100644
--- a/src/bin/scripts/vacuumdb.c
+++ b/src/bin/scripts/vacuumdb.c
@@ -674,7 +674,7 @@ run_vacuum_command(PGconn *conn, const char *sql, bool echo,
const char *dbname, const char *table,
const char *progname, bool async)
{
- bool status;
+ bool status;
if (async)
{
@@ -943,7 +943,7 @@ help(const char *progname)
printf(_(" -Z, --analyze-only only update optimizer statistics; no vacuum\n"));
printf(_(" -j, --jobs=NUM use this many concurrent connections to vacuum\n"));
printf(_(" --analyze-in-stages only update optimizer statistics, in multiple\n"
- " stages for faster results; no vacuum\n"));
+ " stages for faster results; no vacuum\n"));
printf(_(" -?, --help show this help, then exit\n"));
printf(_("\nConnection options:\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
diff --git a/src/common/restricted_token.c b/src/common/restricted_token.c
index a8213c0baf..93da03570d 100644
--- a/src/common/restricted_token.c
+++ b/src/common/restricted_token.c
@@ -25,7 +25,7 @@
#ifdef WIN32
/* internal vars */
-char *restrict_env;
+char *restrict_env;
typedef BOOL (WINAPI * __CreateRestrictedToken) (HANDLE, DWORD, DWORD, PSID_AND_ATTRIBUTES, DWORD, PLUID_AND_ATTRIBUTES, DWORD, PSID_AND_ATTRIBUTES, PHANDLE);
diff --git a/src/include/access/brin_page.h b/src/include/access/brin_page.h
index 6c645b34b2..ecbd13a9a3 100644
--- a/src/include/access/brin_page.h
+++ b/src/include/access/brin_page.h
@@ -40,11 +40,11 @@ typedef struct BrinSpecialSpace
* See comments above GinPageOpaqueData.
*/
#define BrinPageType(page) \
- (((BrinSpecialSpace *) \
+ (((BrinSpecialSpace *) \
PageGetSpecialPointer(page))->vector[MAXALIGN(1) / sizeof(uint16) - 1])
#define BrinPageFlags(page) \
- (((BrinSpecialSpace *) \
+ (((BrinSpecialSpace *) \
PageGetSpecialPointer(page))->vector[MAXALIGN(1) / sizeof(uint16) - 2])
/* special space on all BRIN pages stores a "type" identifier */
diff --git a/src/include/access/commit_ts.h b/src/include/access/commit_ts.h
index ad44db357a..bd05ab4d5c 100644
--- a/src/include/access/commit_ts.h
+++ b/src/include/access/commit_ts.h
@@ -17,7 +17,7 @@
#include "utils/guc.h"
-extern PGDLLIMPORT bool track_commit_timestamp;
+extern PGDLLIMPORT bool track_commit_timestamp;
extern bool check_track_commit_timestamp(bool *newval, void **extra,
GucSource source);
@@ -53,9 +53,9 @@ extern void AdvanceOldestCommitTs(TransactionId oldestXact);
typedef struct xl_commit_ts_set
{
- TimestampTz timestamp;
- RepOriginId nodeid;
- TransactionId mainxid;
+ TimestampTz timestamp;
+ RepOriginId nodeid;
+ TransactionId mainxid;
/* subxact Xids follow */
} xl_commit_ts_set;
diff --git a/src/include/access/gin.h b/src/include/access/gin.h
index 27b497133e..8f1abaa2f7 100644
--- a/src/include/access/gin.h
+++ b/src/include/access/gin.h
@@ -67,7 +67,7 @@ typedef char GinTernaryValue;
/* GUC parameters */
extern PGDLLIMPORT int GinFuzzySearchLimit;
-extern int gin_pending_list_limit;
+extern int gin_pending_list_limit;
/* ginutil.c */
extern void ginGetStats(Relation index, GinStatsData *stats);
diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h
index 0e819d7b51..4f1a5c33ea 100644
--- a/src/include/access/gist_private.h
+++ b/src/include/access/gist_private.h
@@ -121,7 +121,7 @@ typedef struct GISTSearchHeapItem
{
ItemPointerData heapPtr;
bool recheck; /* T if quals must be rechecked */
- bool recheckDistances; /* T if distances must be rechecked */
+ bool recheckDistances; /* T if distances must be rechecked */
IndexTuple ftup; /* data fetched back from the index, used in
* index-only scans */
} GISTSearchHeapItem;
@@ -166,7 +166,7 @@ typedef struct GISTScanOpaqueData
OffsetNumber nPageData; /* number of valid items in array */
OffsetNumber curPageData; /* next item to return */
MemoryContext pageDataCxt; /* context holding the fetched tuples, for
- index-only scans */
+ * index-only scans */
} GISTScanOpaqueData;
typedef GISTScanOpaqueData *GISTScanOpaque;
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index fc3c7f4097..93cc8afceb 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -338,7 +338,7 @@ typedef struct HSpool HSpool; /* opaque struct in hashsort.c */
extern HSpool *_h_spoolinit(Relation heap, Relation index, uint32 num_buckets);
extern void _h_spooldestroy(HSpool *hspool);
extern void _h_spool(HSpool *hspool, ItemPointer self,
- Datum *values, bool *isnull);
+ Datum *values, bool *isnull);
extern void _h_indexbuild(HSpool *hspool);
/* hashutil.c */
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index eec7c95b21..31139cbd0c 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -118,7 +118,7 @@ extern HeapScanDesc heap_beginscan_sampling(Relation relation,
Snapshot snapshot, int nkeys, ScanKey key,
bool allow_strat, bool allow_pagemode);
extern void heap_setscanlimits(HeapScanDesc scan, BlockNumber startBlk,
- BlockNumber endBlk);
+ BlockNumber endBlk);
extern void heapgetpage(HeapScanDesc scan, BlockNumber page);
extern void heap_rescan(HeapScanDesc scan, ScanKey key);
extern void heap_endscan(HeapScanDesc scan);
diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h
index 80285acc3b..55d483dfaf 100644
--- a/src/include/access/htup_details.h
+++ b/src/include/access/htup_details.h
@@ -402,7 +402,7 @@ do { \
#define HeapTupleHeaderGetSpeculativeToken(tup) \
( \
- AssertMacro(HeapTupleHeaderIsSpeculative(tup)), \
+ AssertMacro(HeapTupleHeaderIsSpeculative(tup)), \
ItemPointerGetBlockNumber(&(tup)->t_ctid) \
)
diff --git a/src/include/access/multixact.h b/src/include/access/multixact.h
index 935328983e..f1448fe063 100644
--- a/src/include/access/multixact.h
+++ b/src/include/access/multixact.h
@@ -126,7 +126,7 @@ extern void MultiXactAdvanceNextMXact(MultiXactId minMulti,
MultiXactOffset minMultiOffset);
extern void MultiXactAdvanceOldest(MultiXactId oldestMulti, Oid oldestMultiDB);
extern void MultiXactSetSafeTruncate(MultiXactId safeTruncateMulti);
-extern int MultiXactMemberFreezeThreshold(void);
+extern int MultiXactMemberFreezeThreshold(void);
extern void multixact_twophase_recover(TransactionId xid, uint16 info,
void *recdata, uint32 len);
diff --git a/src/include/access/parallel.h b/src/include/access/parallel.h
index 5f23f18f43..b029c1e883 100644
--- a/src/include/access/parallel.h
+++ b/src/include/access/parallel.h
@@ -21,33 +21,33 @@
#include "storage/shm_toc.h"
#include "utils/elog.h"
-typedef void (*parallel_worker_main_type)(dsm_segment *seg, shm_toc *toc);
+typedef void (*parallel_worker_main_type) (dsm_segment *seg, shm_toc *toc);
typedef struct ParallelWorkerInfo
{
BackgroundWorkerHandle *bgwhandle;
shm_mq_handle *error_mqh;
- int32 pid;
+ int32 pid;
} ParallelWorkerInfo;
typedef struct ParallelContext
{
- dlist_node node;
+ dlist_node node;
SubTransactionId subid;
- int nworkers;
+ int nworkers;
parallel_worker_main_type entrypoint;
- char *library_name;
- char *function_name;
+ char *library_name;
+ char *function_name;
ErrorContextCallback *error_context_stack;
shm_toc_estimator estimator;
dsm_segment *seg;
- void *private_memory;
- shm_toc *toc;
+ void *private_memory;
+ shm_toc *toc;
ParallelWorkerInfo *worker;
} ParallelContext;
extern bool ParallelMessagePending;
-extern int ParallelWorkerNumber;
+extern int ParallelWorkerNumber;
#define IsParallelWorker() (ParallelWorkerNumber >= 0)
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index 29f5b35b32..f2482e99d6 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -38,8 +38,8 @@ typedef struct HeapScanDescData
/* state set up at initscan time */
BlockNumber rs_nblocks; /* total number of blocks in rel */
BlockNumber rs_startblock; /* block # to start at */
- BlockNumber rs_initblock; /* block # to consider initial of rel */
- BlockNumber rs_numblocks; /* number of blocks to scan */
+ BlockNumber rs_initblock; /* block # to consider initial of rel */
+ BlockNumber rs_numblocks; /* number of blocks to scan */
BufferAccessStrategy rs_strategy; /* access strategy for reads */
bool rs_syncscan; /* report location to syncscan logic? */
diff --git a/src/include/access/stratnum.h b/src/include/access/stratnum.h
index a372be81e2..102f1fb94f 100644
--- a/src/include/access/stratnum.h
+++ b/src/include/access/stratnum.h
@@ -72,4 +72,4 @@ typedef uint16 StrategyNumber;
#define RTMaxStrategyNumber 27
-#endif /* STRATNUM_H */
+#endif /* STRATNUM_H */
diff --git a/src/include/access/tablesample.h b/src/include/access/tablesample.h
index 222fa8d556..a02e93d322 100644
--- a/src/include/access/tablesample.h
+++ b/src/include/access/tablesample.h
@@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* tablesample.h
- * Public header file for TABLESAMPLE clause interface
+ * Public header file for TABLESAMPLE clause interface
*
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
@@ -17,30 +17,31 @@
#include "access/relscan.h"
#include "executor/executor.h"
-typedef struct TableSampleDesc {
- HeapScanDesc heapScan;
- TupleDesc tupDesc; /* Mostly useful for tsmexaminetuple */
+typedef struct TableSampleDesc
+{
+ HeapScanDesc heapScan;
+ TupleDesc tupDesc; /* Mostly useful for tsmexaminetuple */
- void *tsmdata; /* private method data */
+ void *tsmdata; /* private method data */
/* These point to he function of the TABLESAMPLE Method. */
- FmgrInfo tsminit;
- FmgrInfo tsmnextblock;
- FmgrInfo tsmnexttuple;
- FmgrInfo tsmexaminetuple;
- FmgrInfo tsmreset;
- FmgrInfo tsmend;
+ FmgrInfo tsminit;
+ FmgrInfo tsmnextblock;
+ FmgrInfo tsmnexttuple;
+ FmgrInfo tsmexaminetuple;
+ FmgrInfo tsmreset;
+ FmgrInfo tsmend;
} TableSampleDesc;
extern TableSampleDesc *tablesample_init(SampleScanState *scanstate,
- TableSampleClause *tablesample);
+ TableSampleClause *tablesample);
extern HeapTuple tablesample_getnext(TableSampleDesc *desc);
extern void tablesample_reset(TableSampleDesc *desc);
extern void tablesample_end(TableSampleDesc *desc);
extern HeapTuple tablesample_source_getnext(TableSampleDesc *desc);
extern HeapTuple tablesample_source_gettup(TableSampleDesc *desc, ItemPointer tid,
- bool *visible);
+ bool *visible);
extern Datum tsm_system_init(PG_FUNCTION_ARGS);
extern Datum tsm_system_nextblock(PG_FUNCTION_ARGS);
diff --git a/src/include/access/xact.h b/src/include/access/xact.h
index a518a8613b..cb1c2db4cf 100644
--- a/src/include/access/xact.h
+++ b/src/include/access/xact.h
@@ -204,7 +204,7 @@ typedef struct xl_xact_subxacts
typedef struct xl_xact_relfilenodes
{
- int nrels; /* number of subtransaction XIDs */
+ int nrels; /* number of subtransaction XIDs */
RelFileNode xnodes[FLEXIBLE_ARRAY_MEMBER];
} xl_xact_relfilenodes;
#define MinSizeOfXactRelfilenodes offsetof(xl_xact_relfilenodes, xnodes)
@@ -262,23 +262,23 @@ typedef struct xl_xact_abort
*/
typedef struct xl_xact_parsed_commit
{
- TimestampTz xact_time;
+ TimestampTz xact_time;
- uint32 xinfo;
+ uint32 xinfo;
- Oid dbId; /* MyDatabaseId */
- Oid tsId; /* MyDatabaseTableSpace */
+ Oid dbId; /* MyDatabaseId */
+ Oid tsId; /* MyDatabaseTableSpace */
- int nsubxacts;
- TransactionId *subxacts;
+ int nsubxacts;
+ TransactionId *subxacts;
- int nrels;
- RelFileNode *xnodes;
+ int nrels;
+ RelFileNode *xnodes;
- int nmsgs;
+ int nmsgs;
SharedInvalidationMessage *msgs;
- TransactionId twophase_xid; /* only for 2PC */
+ TransactionId twophase_xid; /* only for 2PC */
XLogRecPtr origin_lsn;
TimestampTz origin_timestamp;
@@ -286,16 +286,16 @@ typedef struct xl_xact_parsed_commit
typedef struct xl_xact_parsed_abort
{
- TimestampTz xact_time;
- uint32 xinfo;
+ TimestampTz xact_time;
+ uint32 xinfo;
- int nsubxacts;
- TransactionId *subxacts;
+ int nsubxacts;
+ TransactionId *subxacts;
- int nrels;
- RelFileNode *xnodes;
+ int nrels;
+ RelFileNode *xnodes;
- TransactionId twophase_xid; /* only for 2PC */
+ TransactionId twophase_xid; /* only for 2PC */
} xl_xact_parsed_abort;
@@ -356,16 +356,16 @@ extern void UnregisterSubXactCallback(SubXactCallback callback, void *arg);
extern int xactGetCommittedChildren(TransactionId **ptr);
extern XLogRecPtr XactLogCommitRecord(TimestampTz commit_time,
- int nsubxacts, TransactionId *subxacts,
- int nrels, RelFileNode *rels,
- int nmsgs, SharedInvalidationMessage *msgs,
- bool relcacheInval, bool forceSync,
- TransactionId twophase_xid);
+ int nsubxacts, TransactionId *subxacts,
+ int nrels, RelFileNode *rels,
+ int nmsgs, SharedInvalidationMessage *msgs,
+ bool relcacheInval, bool forceSync,
+ TransactionId twophase_xid);
extern XLogRecPtr XactLogAbortRecord(TimestampTz abort_time,
- int nsubxacts, TransactionId *subxacts,
- int nrels, RelFileNode *rels,
- TransactionId twophase_xid);
+ int nsubxacts, TransactionId *subxacts,
+ int nrels, RelFileNode *rels,
+ TransactionId twophase_xid);
extern void xact_redo(XLogReaderState *record);
/* xactdesc.c */
diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h
index 9567379f49..33348083eb 100644
--- a/src/include/access/xlog.h
+++ b/src/include/access/xlog.h
@@ -110,9 +110,9 @@ extern int CheckPointSegments;
/* Archive modes */
typedef enum ArchiveMode
{
- ARCHIVE_MODE_OFF = 0, /* disabled */
- ARCHIVE_MODE_ON, /* enabled while server is running normally */
- ARCHIVE_MODE_ALWAYS /* enabled always (even during recovery) */
+ ARCHIVE_MODE_OFF = 0, /* disabled */
+ ARCHIVE_MODE_ON, /* enabled while server is running normally */
+ ARCHIVE_MODE_ALWAYS /* enabled always (even during recovery) */
} ArchiveMode;
extern int XLogArchiveMode;
diff --git a/src/include/access/xloginsert.h b/src/include/access/xloginsert.h
index ac609298cc..31b45ba139 100644
--- a/src/include/access/xloginsert.h
+++ b/src/include/access/xloginsert.h
@@ -29,13 +29,14 @@
/* flags for XLogRegisterBuffer */
#define REGBUF_FORCE_IMAGE 0x01 /* force a full-page image */
#define REGBUF_NO_IMAGE 0x02 /* don't take a full-page image */
-#define REGBUF_WILL_INIT (0x04 | 0x02) /* page will be re-initialized at
- * replay (implies NO_IMAGE) */
-#define REGBUF_STANDARD 0x08 /* page follows "standard" page layout,
- * (data between pd_lower and pd_upper
- * will be skipped) */
-#define REGBUF_KEEP_DATA 0x10 /* include data even if a full-page image
- * is taken */
+#define REGBUF_WILL_INIT (0x04 | 0x02) /* page will be re-initialized
+ * at replay (implies
+ * NO_IMAGE) */
+#define REGBUF_STANDARD 0x08/* page follows "standard" page layout, (data
+ * between pd_lower and pd_upper will be
+ * skipped) */
+#define REGBUF_KEEP_DATA 0x10/* include data even if a full-page image is
+ * taken */
/* prototypes for public functions in xloginsert.c: */
extern void XLogBeginInsert(void);
diff --git a/src/include/access/xlogreader.h b/src/include/access/xlogreader.h
index 5164abec75..640f7e14b1 100644
--- a/src/include/access/xlogreader.h
+++ b/src/include/access/xlogreader.h
@@ -127,7 +127,7 @@ struct XLogReaderState
uint32 main_data_len; /* main data portion's length */
uint32 main_data_bufsz; /* allocated size of the buffer */
- RepOriginId record_origin;
+ RepOriginId record_origin;
/* information about blocks referenced by the record. */
DecodedBkpBlock blocks[XLR_MAX_BLOCK_ID + 1];
diff --git a/src/include/access/xlogrecord.h b/src/include/access/xlogrecord.h
index 7a049f0e97..4ef6c206d2 100644
--- a/src/include/access/xlogrecord.h
+++ b/src/include/access/xlogrecord.h
@@ -121,13 +121,13 @@ typedef struct XLogRecordBlockHeader
*/
typedef struct XLogRecordBlockImageHeader
{
- uint16 length; /* number of page image bytes */
- uint16 hole_offset; /* number of bytes before "hole" */
- uint8 bimg_info; /* flag bits, see below */
+ uint16 length; /* number of page image bytes */
+ uint16 hole_offset; /* number of bytes before "hole" */
+ uint8 bimg_info; /* flag bits, see below */
/*
- * If BKPIMAGE_HAS_HOLE and BKPIMAGE_IS_COMPRESSED,
- * an XLogRecordBlockCompressHeader struct follows.
+ * If BKPIMAGE_HAS_HOLE and BKPIMAGE_IS_COMPRESSED, an
+ * XLogRecordBlockCompressHeader struct follows.
*/
} XLogRecordBlockImageHeader;
@@ -136,7 +136,7 @@ typedef struct XLogRecordBlockImageHeader
/* Information stored in bimg_info */
#define BKPIMAGE_HAS_HOLE 0x01 /* page image has "hole" */
-#define BKPIMAGE_IS_COMPRESSED 0x02 /* page image is compressed */
+#define BKPIMAGE_IS_COMPRESSED 0x02 /* page image is compressed */
/*
* Extra header information used when page image has "hole" and
@@ -144,7 +144,7 @@ typedef struct XLogRecordBlockImageHeader
*/
typedef struct XLogRecordBlockCompressHeader
{
- uint16 hole_length; /* number of bytes in "hole" */
+ uint16 hole_length; /* number of bytes in "hole" */
} XLogRecordBlockCompressHeader;
#define SizeOfXLogRecordBlockCompressHeader \
@@ -185,7 +185,7 @@ typedef struct XLogRecordDataHeaderShort
{
uint8 id; /* XLR_BLOCK_ID_DATA_SHORT */
uint8 data_length; /* number of payload bytes */
-} XLogRecordDataHeaderShort;
+} XLogRecordDataHeaderShort;
#define SizeOfXLogRecordDataHeaderShort (sizeof(uint8) * 2)
@@ -193,7 +193,7 @@ typedef struct XLogRecordDataHeaderLong
{
uint8 id; /* XLR_BLOCK_ID_DATA_LONG */
/* followed by uint32 data_length, unaligned */
-} XLogRecordDataHeaderLong;
+} XLogRecordDataHeaderLong;
#define SizeOfXLogRecordDataHeaderLong (sizeof(uint8) + sizeof(uint32))
diff --git a/src/include/access/xlogutils.h b/src/include/access/xlogutils.h
index 3015ff9bcc..8cf51c7fd6 100644
--- a/src/include/access/xlogutils.h
+++ b/src/include/access/xlogutils.h
@@ -26,11 +26,11 @@ extern void XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum,
/* Result codes for XLogReadBufferForRedo[Extended] */
typedef enum
{
- BLK_NEEDS_REDO, /* changes from WAL record need to be applied */
- BLK_DONE, /* block is already up-to-date */
- BLK_RESTORED, /* block was restored from a full-page image */
- BLK_NOTFOUND /* block was not found (and hence does not need to be
- * replayed) */
+ BLK_NEEDS_REDO, /* changes from WAL record need to be applied */
+ BLK_DONE, /* block is already up-to-date */
+ BLK_RESTORED, /* block was restored from a full-page image */
+ BLK_NOTFOUND /* block was not found (and hence does not
+ * need to be replayed) */
} XLogRedoAction;
extern XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record,
diff --git a/src/include/bootstrap/bootstrap.h b/src/include/bootstrap/bootstrap.h
index af9fc75a74..b88bb3e4d6 100644
--- a/src/include/bootstrap/bootstrap.h
+++ b/src/include/bootstrap/bootstrap.h
@@ -25,7 +25,7 @@
#define BOOTCOL_NULL_AUTO 1
#define BOOTCOL_NULL_FORCE_NULL 2
-#define BOOTCOL_NULL_FORCE_NOT_NULL 3
+#define BOOTCOL_NULL_FORCE_NOT_NULL 3
extern Relation boot_reldesc;
extern Form_pg_attribute attrtypes[MAXATTR];
diff --git a/src/include/catalog/binary_upgrade.h b/src/include/catalog/binary_upgrade.h
index 22388c3b70..efca09fa2d 100644
--- a/src/include/catalog/binary_upgrade.h
+++ b/src/include/catalog/binary_upgrade.h
@@ -17,7 +17,7 @@
#include "catalog/pg_authid.h"
/* pick a OID that will never be used for TOAST tables */
-#define OPTIONALLY_CREATE_TOAST_OID BOOTSTRAP_SUPERUSERID
+#define OPTIONALLY_CREATE_TOAST_OID BOOTSTRAP_SUPERUSERID
extern PGDLLIMPORT Oid binary_upgrade_next_pg_type_oid;
extern PGDLLIMPORT Oid binary_upgrade_next_array_pg_type_oid;
diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h
index e961d37172..8b3b28d954 100644
--- a/src/include/catalog/index.h
+++ b/src/include/catalog/index.h
@@ -115,14 +115,14 @@ extern void validate_index(Oid heapId, Oid indexId, Snapshot snapshot);
extern void index_set_state_flags(Oid indexId, IndexStateFlagsAction action);
extern void reindex_index(Oid indexId, bool skip_constraint_checks,
- char relpersistence, int options);
+ char relpersistence, int options);
/* Flag bits for reindex_relation(): */
#define REINDEX_REL_PROCESS_TOAST 0x01
#define REINDEX_REL_SUPPRESS_INDEX_USE 0x02
#define REINDEX_REL_CHECK_CONSTRAINTS 0x04
#define REINDEX_REL_FORCE_INDEXES_UNLOGGED 0x08
-#define REINDEX_REL_FORCE_INDEXES_PERMANENT 0x10
+#define REINDEX_REL_FORCE_INDEXES_PERMANENT 0x10
extern bool reindex_relation(Oid relid, int flags, int options);
diff --git a/src/include/catalog/indexing.h b/src/include/catalog/indexing.h
index 1c486c4b9c..748aadde94 100644
--- a/src/include/catalog/indexing.h
+++ b/src/include/catalog/indexing.h
@@ -220,7 +220,7 @@ DECLARE_UNIQUE_INDEX(pg_tablespace_spcname_index, 2698, on pg_tablespace using b
#define TablespaceNameIndexId 2698
DECLARE_UNIQUE_INDEX(pg_transform_oid_index, 3574, on pg_transform using btree(oid oid_ops));
-#define TransformOidIndexId 3574
+#define TransformOidIndexId 3574
DECLARE_UNIQUE_INDEX(pg_transform_type_lang_index, 3575, on pg_transform using btree(trftype oid_ops, trflang oid_ops));
#define TransformTypeLangIndexId 3575
diff --git a/src/include/catalog/objectaddress.h b/src/include/catalog/objectaddress.h
index 619b2f58bc..37808c03c6 100644
--- a/src/include/catalog/objectaddress.h
+++ b/src/include/catalog/objectaddress.h
@@ -67,7 +67,7 @@ extern HeapTuple get_catalog_object_by_oid(Relation catalog,
extern char *getObjectDescription(const ObjectAddress *object);
extern char *getObjectDescriptionOids(Oid classid, Oid objid);
-extern int read_objtype_from_string(const char *objtype);
+extern int read_objtype_from_string(const char *objtype);
extern char *getObjectTypeDescription(const ObjectAddress *object);
extern char *getObjectIdentity(const ObjectAddress *address);
extern char *getObjectIdentityParts(const ObjectAddress *address,
diff --git a/src/include/catalog/opfam_internal.h b/src/include/catalog/opfam_internal.h
index f01dcbe3e3..32195a7131 100644
--- a/src/include/catalog/opfam_internal.h
+++ b/src/include/catalog/opfam_internal.h
@@ -25,4 +25,4 @@ typedef struct
Oid sortfamily; /* ordering operator's sort opfamily, or 0 */
} OpFamilyMember;
-#endif /* OPFAM_INTERNAL_H */
+#endif /* OPFAM_INTERNAL_H */
diff --git a/src/include/catalog/pg_aggregate.h b/src/include/catalog/pg_aggregate.h
index b6b698841c..dd6079fbe3 100644
--- a/src/include/catalog/pg_aggregate.h
+++ b/src/include/catalog/pg_aggregate.h
@@ -129,15 +129,15 @@ typedef FormData_pg_aggregate *Form_pg_aggregate;
DATA(insert ( 2100 n 0 int8_avg_accum numeric_poly_avg int8_avg_accum int8_avg_accum_inv numeric_poly_avg f f 0 2281 48 2281 48 _null_ _null_ ));
DATA(insert ( 2101 n 0 int4_avg_accum int8_avg int4_avg_accum int4_avg_accum_inv int8_avg f f 0 1016 0 1016 0 "{0,0}" "{0,0}" ));
DATA(insert ( 2102 n 0 int2_avg_accum int8_avg int2_avg_accum int2_avg_accum_inv int8_avg f f 0 1016 0 1016 0 "{0,0}" "{0,0}" ));
-DATA(insert ( 2103 n 0 numeric_avg_accum numeric_avg numeric_avg_accum numeric_accum_inv numeric_avg f f 0 2281 128 2281 128 _null_ _null_ ));
+DATA(insert ( 2103 n 0 numeric_avg_accum numeric_avg numeric_avg_accum numeric_accum_inv numeric_avg f f 0 2281 128 2281 128 _null_ _null_ ));
DATA(insert ( 2104 n 0 float4_accum float8_avg - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2105 n 0 float8_accum float8_avg - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2106 n 0 interval_accum interval_avg interval_accum interval_accum_inv interval_avg f f 0 1187 0 1187 0 "{0 second,0 second}" "{0 second,0 second}" ));
/* sum */
-DATA(insert ( 2107 n 0 int8_avg_accum numeric_poly_sum int8_avg_accum int8_avg_accum_inv numeric_poly_sum f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2108 n 0 int4_sum - int4_avg_accum int4_avg_accum_inv int2int4_sum f f 0 20 0 1016 0 _null_ "{0,0}" ));
-DATA(insert ( 2109 n 0 int2_sum - int2_avg_accum int2_avg_accum_inv int2int4_sum f f 0 20 0 1016 0 _null_ "{0,0}" ));
+DATA(insert ( 2107 n 0 int8_avg_accum numeric_poly_sum int8_avg_accum int8_avg_accum_inv numeric_poly_sum f f 0 2281 48 2281 48 _null_ _null_ ));
+DATA(insert ( 2108 n 0 int4_sum - int4_avg_accum int4_avg_accum_inv int2int4_sum f f 0 20 0 1016 0 _null_ "{0,0}" ));
+DATA(insert ( 2109 n 0 int2_sum - int2_avg_accum int2_avg_accum_inv int2int4_sum f f 0 20 0 1016 0 _null_ "{0,0}" ));
DATA(insert ( 2110 n 0 float4pl - - - - f f 0 700 0 0 0 _null_ _null_ ));
DATA(insert ( 2111 n 0 float8pl - - - - f f 0 701 0 0 0 _null_ _null_ ));
DATA(insert ( 2112 n 0 cash_pl - cash_pl cash_mi - f f 0 790 0 790 0 _null_ _null_ ));
@@ -195,7 +195,7 @@ DATA(insert ( 2147 n 0 int8inc_any - int8inc_any int8dec_any - f f 0 2
DATA(insert ( 2803 n 0 int8inc - int8inc int8dec - f f 0 20 0 20 0 "0" "0" ));
/* var_pop */
-DATA(insert ( 2718 n 0 int8_accum numeric_var_pop int8_accum int8_accum_inv numeric_var_pop f f 0 2281 128 2281 128 _null_ _null_ ));
+DATA(insert ( 2718 n 0 int8_accum numeric_var_pop int8_accum int8_accum_inv numeric_var_pop f f 0 2281 128 2281 128 _null_ _null_ ));
DATA(insert ( 2719 n 0 int4_accum numeric_poly_var_pop int4_accum int4_accum_inv numeric_poly_var_pop f f 0 2281 48 2281 48 _null_ _null_ ));
DATA(insert ( 2720 n 0 int2_accum numeric_poly_var_pop int2_accum int2_accum_inv numeric_poly_var_pop f f 0 2281 48 2281 48 _null_ _null_ ));
DATA(insert ( 2721 n 0 float4_accum float8_var_pop - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
@@ -204,38 +204,38 @@ DATA(insert ( 2723 n 0 numeric_accum numeric_var_pop numeric_accum numeric_accum
/* var_samp */
DATA(insert ( 2641 n 0 int8_accum numeric_var_samp int8_accum int8_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2642 n 0 int4_accum numeric_poly_var_samp int4_accum int4_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2643 n 0 int2_accum numeric_poly_var_samp int2_accum int2_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
+DATA(insert ( 2642 n 0 int4_accum numeric_poly_var_samp int4_accum int4_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
+DATA(insert ( 2643 n 0 int2_accum numeric_poly_var_samp int2_accum int2_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
DATA(insert ( 2644 n 0 float4_accum float8_var_samp - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2645 n 0 float8_accum float8_var_samp - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
-DATA(insert ( 2646 n 0 numeric_accum numeric_var_samp numeric_accum numeric_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ ));
+DATA(insert ( 2646 n 0 numeric_accum numeric_var_samp numeric_accum numeric_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ ));
/* variance: historical Postgres syntax for var_samp */
DATA(insert ( 2148 n 0 int8_accum numeric_var_samp int8_accum int8_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2149 n 0 int4_accum numeric_poly_var_samp int4_accum int4_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2150 n 0 int2_accum numeric_poly_var_samp int2_accum int2_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
+DATA(insert ( 2149 n 0 int4_accum numeric_poly_var_samp int4_accum int4_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
+DATA(insert ( 2150 n 0 int2_accum numeric_poly_var_samp int2_accum int2_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ ));
DATA(insert ( 2151 n 0 float4_accum float8_var_samp - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2152 n 0 float8_accum float8_var_samp - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2153 n 0 numeric_accum numeric_var_samp numeric_accum numeric_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ ));
/* stddev_pop */
DATA(insert ( 2724 n 0 int8_accum numeric_stddev_pop int8_accum int8_accum_inv numeric_stddev_pop f f 0 2281 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2725 n 0 int4_accum numeric_poly_stddev_pop int4_accum int4_accum_inv numeric_poly_stddev_pop f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2726 n 0 int2_accum numeric_poly_stddev_pop int2_accum int2_accum_inv numeric_poly_stddev_pop f f 0 2281 48 2281 48 _null_ _null_ ));
+DATA(insert ( 2725 n 0 int4_accum numeric_poly_stddev_pop int4_accum int4_accum_inv numeric_poly_stddev_pop f f 0 2281 48 2281 48 _null_ _null_ ));
+DATA(insert ( 2726 n 0 int2_accum numeric_poly_stddev_pop int2_accum int2_accum_inv numeric_poly_stddev_pop f f 0 2281 48 2281 48 _null_ _null_ ));
DATA(insert ( 2727 n 0 float4_accum float8_stddev_pop - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2728 n 0 float8_accum float8_stddev_pop - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2729 n 0 numeric_accum numeric_stddev_pop numeric_accum numeric_accum_inv numeric_stddev_pop f f 0 2281 128 2281 128 _null_ _null_ ));
/* stddev_samp */
DATA(insert ( 2712 n 0 int8_accum numeric_stddev_samp int8_accum int8_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2713 n 0 int4_accum numeric_poly_stddev_samp int4_accum int4_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2714 n 0 int2_accum numeric_poly_stddev_samp int2_accum int2_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ ));
+DATA(insert ( 2713 n 0 int4_accum numeric_poly_stddev_samp int4_accum int4_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ ));
+DATA(insert ( 2714 n 0 int2_accum numeric_poly_stddev_samp int2_accum int2_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ ));
DATA(insert ( 2715 n 0 float4_accum float8_stddev_samp - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2716 n 0 float8_accum float8_stddev_samp - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2717 n 0 numeric_accum numeric_stddev_samp numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ ));
/* stddev: historical Postgres syntax for stddev_samp */
-DATA(insert ( 2154 n 0 int8_accum numeric_stddev_samp int8_accum int8_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ ));
+DATA(insert ( 2154 n 0 int8_accum numeric_stddev_samp int8_accum int8_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ ));
DATA(insert ( 2155 n 0 int4_accum numeric_poly_stddev_samp int4_accum int4_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ ));
DATA(insert ( 2156 n 0 int2_accum numeric_poly_stddev_samp int2_accum int2_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ ));
DATA(insert ( 2157 n 0 float4_accum float8_stddev_samp - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ ));
diff --git a/src/include/catalog/pg_amop.h b/src/include/catalog/pg_amop.h
index 657ec07059..da5fe9d947 100644
--- a/src/include/catalog/pg_amop.h
+++ b/src/include/catalog/pg_amop.h
@@ -849,271 +849,271 @@ DATA(insert ( 3550 869 869 27 s 934 783 0 ));
/* BRIN opclasses */
/* minmax bytea */
-DATA(insert ( 4064 17 17 1 s 1957 3580 0 ));
-DATA(insert ( 4064 17 17 2 s 1958 3580 0 ));
-DATA(insert ( 4064 17 17 3 s 1955 3580 0 ));
-DATA(insert ( 4064 17 17 4 s 1960 3580 0 ));
-DATA(insert ( 4064 17 17 5 s 1959 3580 0 ));
+DATA(insert ( 4064 17 17 1 s 1957 3580 0 ));
+DATA(insert ( 4064 17 17 2 s 1958 3580 0 ));
+DATA(insert ( 4064 17 17 3 s 1955 3580 0 ));
+DATA(insert ( 4064 17 17 4 s 1960 3580 0 ));
+DATA(insert ( 4064 17 17 5 s 1959 3580 0 ));
/* minmax "char" */
-DATA(insert ( 4062 18 18 1 s 631 3580 0 ));
-DATA(insert ( 4062 18 18 2 s 632 3580 0 ));
-DATA(insert ( 4062 18 18 3 s 92 3580 0 ));
-DATA(insert ( 4062 18 18 4 s 634 3580 0 ));
-DATA(insert ( 4062 18 18 5 s 633 3580 0 ));
+DATA(insert ( 4062 18 18 1 s 631 3580 0 ));
+DATA(insert ( 4062 18 18 2 s 632 3580 0 ));
+DATA(insert ( 4062 18 18 3 s 92 3580 0 ));
+DATA(insert ( 4062 18 18 4 s 634 3580 0 ));
+DATA(insert ( 4062 18 18 5 s 633 3580 0 ));
/* minmax name */
-DATA(insert ( 4065 19 19 1 s 660 3580 0 ));
-DATA(insert ( 4065 19 19 2 s 661 3580 0 ));
-DATA(insert ( 4065 19 19 3 s 93 3580 0 ));
-DATA(insert ( 4065 19 19 4 s 663 3580 0 ));
-DATA(insert ( 4065 19 19 5 s 662 3580 0 ));
+DATA(insert ( 4065 19 19 1 s 660 3580 0 ));
+DATA(insert ( 4065 19 19 2 s 661 3580 0 ));
+DATA(insert ( 4065 19 19 3 s 93 3580 0 ));
+DATA(insert ( 4065 19 19 4 s 663 3580 0 ));
+DATA(insert ( 4065 19 19 5 s 662 3580 0 ));
/* minmax integer */
-DATA(insert ( 4054 20 20 1 s 412 3580 0 ));
-DATA(insert ( 4054 20 20 2 s 414 3580 0 ));
-DATA(insert ( 4054 20 20 3 s 410 3580 0 ));
-DATA(insert ( 4054 20 20 4 s 415 3580 0 ));
-DATA(insert ( 4054 20 20 5 s 413 3580 0 ));
-DATA(insert ( 4054 20 21 1 s 1870 3580 0 ));
-DATA(insert ( 4054 20 21 2 s 1872 3580 0 ));
-DATA(insert ( 4054 20 21 3 s 1868 3580 0 ));
-DATA(insert ( 4054 20 21 4 s 1873 3580 0 ));
-DATA(insert ( 4054 20 21 5 s 1871 3580 0 ));
-DATA(insert ( 4054 20 23 1 s 418 3580 0 ));
-DATA(insert ( 4054 20 23 2 s 420 3580 0 ));
-DATA(insert ( 4054 20 23 3 s 416 3580 0 ));
-DATA(insert ( 4054 20 23 4 s 430 3580 0 ));
-DATA(insert ( 4054 20 23 5 s 419 3580 0 ));
-DATA(insert ( 4054 21 21 1 s 95 3580 0 ));
-DATA(insert ( 4054 21 21 2 s 522 3580 0 ));
-DATA(insert ( 4054 21 21 3 s 94 3580 0 ));
-DATA(insert ( 4054 21 21 4 s 524 3580 0 ));
-DATA(insert ( 4054 21 21 5 s 520 3580 0 ));
-DATA(insert ( 4054 21 20 1 s 1864 3580 0 ));
-DATA(insert ( 4054 21 20 2 s 1866 3580 0 ));
-DATA(insert ( 4054 21 20 3 s 1862 3580 0 ));
-DATA(insert ( 4054 21 20 4 s 1867 3580 0 ));
-DATA(insert ( 4054 21 20 5 s 1865 3580 0 ));
-DATA(insert ( 4054 21 23 1 s 534 3580 0 ));
-DATA(insert ( 4054 21 23 2 s 540 3580 0 ));
-DATA(insert ( 4054 21 23 3 s 532 3580 0 ));
-DATA(insert ( 4054 21 23 4 s 542 3580 0 ));
-DATA(insert ( 4054 21 23 5 s 536 3580 0 ));
-DATA(insert ( 4054 23 23 1 s 97 3580 0 ));
-DATA(insert ( 4054 23 23 2 s 523 3580 0 ));
-DATA(insert ( 4054 23 23 3 s 96 3580 0 ));
-DATA(insert ( 4054 23 23 4 s 525 3580 0 ));
-DATA(insert ( 4054 23 23 5 s 521 3580 0 ));
-DATA(insert ( 4054 23 21 1 s 535 3580 0 ));
-DATA(insert ( 4054 23 21 2 s 541 3580 0 ));
-DATA(insert ( 4054 23 21 3 s 533 3580 0 ));
-DATA(insert ( 4054 23 21 4 s 543 3580 0 ));
-DATA(insert ( 4054 23 21 5 s 537 3580 0 ));
-DATA(insert ( 4054 23 20 1 s 37 3580 0 ));
-DATA(insert ( 4054 23 20 2 s 80 3580 0 ));
-DATA(insert ( 4054 23 20 3 s 15 3580 0 ));
-DATA(insert ( 4054 23 20 4 s 82 3580 0 ));
-DATA(insert ( 4054 23 20 5 s 76 3580 0 ));
+DATA(insert ( 4054 20 20 1 s 412 3580 0 ));
+DATA(insert ( 4054 20 20 2 s 414 3580 0 ));
+DATA(insert ( 4054 20 20 3 s 410 3580 0 ));
+DATA(insert ( 4054 20 20 4 s 415 3580 0 ));
+DATA(insert ( 4054 20 20 5 s 413 3580 0 ));
+DATA(insert ( 4054 20 21 1 s 1870 3580 0 ));
+DATA(insert ( 4054 20 21 2 s 1872 3580 0 ));
+DATA(insert ( 4054 20 21 3 s 1868 3580 0 ));
+DATA(insert ( 4054 20 21 4 s 1873 3580 0 ));
+DATA(insert ( 4054 20 21 5 s 1871 3580 0 ));
+DATA(insert ( 4054 20 23 1 s 418 3580 0 ));
+DATA(insert ( 4054 20 23 2 s 420 3580 0 ));
+DATA(insert ( 4054 20 23 3 s 416 3580 0 ));
+DATA(insert ( 4054 20 23 4 s 430 3580 0 ));
+DATA(insert ( 4054 20 23 5 s 419 3580 0 ));
+DATA(insert ( 4054 21 21 1 s 95 3580 0 ));
+DATA(insert ( 4054 21 21 2 s 522 3580 0 ));
+DATA(insert ( 4054 21 21 3 s 94 3580 0 ));
+DATA(insert ( 4054 21 21 4 s 524 3580 0 ));
+DATA(insert ( 4054 21 21 5 s 520 3580 0 ));
+DATA(insert ( 4054 21 20 1 s 1864 3580 0 ));
+DATA(insert ( 4054 21 20 2 s 1866 3580 0 ));
+DATA(insert ( 4054 21 20 3 s 1862 3580 0 ));
+DATA(insert ( 4054 21 20 4 s 1867 3580 0 ));
+DATA(insert ( 4054 21 20 5 s 1865 3580 0 ));
+DATA(insert ( 4054 21 23 1 s 534 3580 0 ));
+DATA(insert ( 4054 21 23 2 s 540 3580 0 ));
+DATA(insert ( 4054 21 23 3 s 532 3580 0 ));
+DATA(insert ( 4054 21 23 4 s 542 3580 0 ));
+DATA(insert ( 4054 21 23 5 s 536 3580 0 ));
+DATA(insert ( 4054 23 23 1 s 97 3580 0 ));
+DATA(insert ( 4054 23 23 2 s 523 3580 0 ));
+DATA(insert ( 4054 23 23 3 s 96 3580 0 ));
+DATA(insert ( 4054 23 23 4 s 525 3580 0 ));
+DATA(insert ( 4054 23 23 5 s 521 3580 0 ));
+DATA(insert ( 4054 23 21 1 s 535 3580 0 ));
+DATA(insert ( 4054 23 21 2 s 541 3580 0 ));
+DATA(insert ( 4054 23 21 3 s 533 3580 0 ));
+DATA(insert ( 4054 23 21 4 s 543 3580 0 ));
+DATA(insert ( 4054 23 21 5 s 537 3580 0 ));
+DATA(insert ( 4054 23 20 1 s 37 3580 0 ));
+DATA(insert ( 4054 23 20 2 s 80 3580 0 ));
+DATA(insert ( 4054 23 20 3 s 15 3580 0 ));
+DATA(insert ( 4054 23 20 4 s 82 3580 0 ));
+DATA(insert ( 4054 23 20 5 s 76 3580 0 ));
/* minmax text */
-DATA(insert ( 4056 25 25 1 s 664 3580 0 ));
-DATA(insert ( 4056 25 25 2 s 665 3580 0 ));
-DATA(insert ( 4056 25 25 3 s 98 3580 0 ));
-DATA(insert ( 4056 25 25 4 s 667 3580 0 ));
-DATA(insert ( 4056 25 25 5 s 666 3580 0 ));
+DATA(insert ( 4056 25 25 1 s 664 3580 0 ));
+DATA(insert ( 4056 25 25 2 s 665 3580 0 ));
+DATA(insert ( 4056 25 25 3 s 98 3580 0 ));
+DATA(insert ( 4056 25 25 4 s 667 3580 0 ));
+DATA(insert ( 4056 25 25 5 s 666 3580 0 ));
/* minmax oid */
-DATA(insert ( 4068 26 26 1 s 609 3580 0 ));
-DATA(insert ( 4068 26 26 2 s 611 3580 0 ));
-DATA(insert ( 4068 26 26 3 s 607 3580 0 ));
-DATA(insert ( 4068 26 26 4 s 612 3580 0 ));
-DATA(insert ( 4068 26 26 5 s 610 3580 0 ));
+DATA(insert ( 4068 26 26 1 s 609 3580 0 ));
+DATA(insert ( 4068 26 26 2 s 611 3580 0 ));
+DATA(insert ( 4068 26 26 3 s 607 3580 0 ));
+DATA(insert ( 4068 26 26 4 s 612 3580 0 ));
+DATA(insert ( 4068 26 26 5 s 610 3580 0 ));
/* minmax tid */
-DATA(insert ( 4069 27 27 1 s 2799 3580 0 ));
-DATA(insert ( 4069 27 27 2 s 2801 3580 0 ));
-DATA(insert ( 4069 27 27 3 s 387 3580 0 ));
-DATA(insert ( 4069 27 27 4 s 2802 3580 0 ));
-DATA(insert ( 4069 27 27 5 s 2800 3580 0 ));
+DATA(insert ( 4069 27 27 1 s 2799 3580 0 ));
+DATA(insert ( 4069 27 27 2 s 2801 3580 0 ));
+DATA(insert ( 4069 27 27 3 s 387 3580 0 ));
+DATA(insert ( 4069 27 27 4 s 2802 3580 0 ));
+DATA(insert ( 4069 27 27 5 s 2800 3580 0 ));
/* minmax float (float4, float8) */
-DATA(insert ( 4070 700 700 1 s 622 3580 0 ));
-DATA(insert ( 4070 700 700 2 s 624 3580 0 ));
-DATA(insert ( 4070 700 700 3 s 620 3580 0 ));
-DATA(insert ( 4070 700 700 4 s 625 3580 0 ));
-DATA(insert ( 4070 700 700 5 s 623 3580 0 ));
-DATA(insert ( 4070 700 701 1 s 1122 3580 0 ));
-DATA(insert ( 4070 700 701 2 s 1124 3580 0 ));
-DATA(insert ( 4070 700 701 3 s 1120 3580 0 ));
-DATA(insert ( 4070 700 701 4 s 1125 3580 0 ));
-DATA(insert ( 4070 700 701 5 s 1123 3580 0 ));
-DATA(insert ( 4070 701 700 1 s 1132 3580 0 ));
-DATA(insert ( 4070 701 700 2 s 1134 3580 0 ));
-DATA(insert ( 4070 701 700 3 s 1130 3580 0 ));
-DATA(insert ( 4070 701 700 4 s 1135 3580 0 ));
-DATA(insert ( 4070 701 700 5 s 1133 3580 0 ));
-DATA(insert ( 4070 701 701 1 s 672 3580 0 ));
-DATA(insert ( 4070 701 701 2 s 673 3580 0 ));
-DATA(insert ( 4070 701 701 3 s 670 3580 0 ));
-DATA(insert ( 4070 701 701 4 s 675 3580 0 ));
-DATA(insert ( 4070 701 701 5 s 674 3580 0 ));
+DATA(insert ( 4070 700 700 1 s 622 3580 0 ));
+DATA(insert ( 4070 700 700 2 s 624 3580 0 ));
+DATA(insert ( 4070 700 700 3 s 620 3580 0 ));
+DATA(insert ( 4070 700 700 4 s 625 3580 0 ));
+DATA(insert ( 4070 700 700 5 s 623 3580 0 ));
+DATA(insert ( 4070 700 701 1 s 1122 3580 0 ));
+DATA(insert ( 4070 700 701 2 s 1124 3580 0 ));
+DATA(insert ( 4070 700 701 3 s 1120 3580 0 ));
+DATA(insert ( 4070 700 701 4 s 1125 3580 0 ));
+DATA(insert ( 4070 700 701 5 s 1123 3580 0 ));
+DATA(insert ( 4070 701 700 1 s 1132 3580 0 ));
+DATA(insert ( 4070 701 700 2 s 1134 3580 0 ));
+DATA(insert ( 4070 701 700 3 s 1130 3580 0 ));
+DATA(insert ( 4070 701 700 4 s 1135 3580 0 ));
+DATA(insert ( 4070 701 700 5 s 1133 3580 0 ));
+DATA(insert ( 4070 701 701 1 s 672 3580 0 ));
+DATA(insert ( 4070 701 701 2 s 673 3580 0 ));
+DATA(insert ( 4070 701 701 3 s 670 3580 0 ));
+DATA(insert ( 4070 701 701 4 s 675 3580 0 ));
+DATA(insert ( 4070 701 701 5 s 674 3580 0 ));
/* minmax abstime */
-DATA(insert ( 4072 702 702 1 s 562 3580 0 ));
-DATA(insert ( 4072 702 702 2 s 564 3580 0 ));
-DATA(insert ( 4072 702 702 3 s 560 3580 0 ));
-DATA(insert ( 4072 702 702 4 s 565 3580 0 ));
-DATA(insert ( 4072 702 702 5 s 563 3580 0 ));
+DATA(insert ( 4072 702 702 1 s 562 3580 0 ));
+DATA(insert ( 4072 702 702 2 s 564 3580 0 ));
+DATA(insert ( 4072 702 702 3 s 560 3580 0 ));
+DATA(insert ( 4072 702 702 4 s 565 3580 0 ));
+DATA(insert ( 4072 702 702 5 s 563 3580 0 ));
/* minmax reltime */
-DATA(insert ( 4073 703 703 1 s 568 3580 0 ));
-DATA(insert ( 4073 703 703 2 s 570 3580 0 ));
-DATA(insert ( 4073 703 703 3 s 566 3580 0 ));
-DATA(insert ( 4073 703 703 4 s 571 3580 0 ));
-DATA(insert ( 4073 703 703 5 s 569 3580 0 ));
+DATA(insert ( 4073 703 703 1 s 568 3580 0 ));
+DATA(insert ( 4073 703 703 2 s 570 3580 0 ));
+DATA(insert ( 4073 703 703 3 s 566 3580 0 ));
+DATA(insert ( 4073 703 703 4 s 571 3580 0 ));
+DATA(insert ( 4073 703 703 5 s 569 3580 0 ));
/* minmax macaddr */
-DATA(insert ( 4074 829 829 1 s 1222 3580 0 ));
-DATA(insert ( 4074 829 829 2 s 1223 3580 0 ));
-DATA(insert ( 4074 829 829 3 s 1220 3580 0 ));
-DATA(insert ( 4074 829 829 4 s 1225 3580 0 ));
-DATA(insert ( 4074 829 829 5 s 1224 3580 0 ));
+DATA(insert ( 4074 829 829 1 s 1222 3580 0 ));
+DATA(insert ( 4074 829 829 2 s 1223 3580 0 ));
+DATA(insert ( 4074 829 829 3 s 1220 3580 0 ));
+DATA(insert ( 4074 829 829 4 s 1225 3580 0 ));
+DATA(insert ( 4074 829 829 5 s 1224 3580 0 ));
/* minmax inet */
-DATA(insert ( 4075 869 869 1 s 1203 3580 0 ));
-DATA(insert ( 4075 869 869 2 s 1204 3580 0 ));
-DATA(insert ( 4075 869 869 3 s 1201 3580 0 ));
-DATA(insert ( 4075 869 869 4 s 1206 3580 0 ));
-DATA(insert ( 4075 869 869 5 s 1205 3580 0 ));
+DATA(insert ( 4075 869 869 1 s 1203 3580 0 ));
+DATA(insert ( 4075 869 869 2 s 1204 3580 0 ));
+DATA(insert ( 4075 869 869 3 s 1201 3580 0 ));
+DATA(insert ( 4075 869 869 4 s 1206 3580 0 ));
+DATA(insert ( 4075 869 869 5 s 1205 3580 0 ));
/* inclusion inet */
-DATA(insert ( 4102 869 869 3 s 3552 3580 0 ));
-DATA(insert ( 4102 869 869 7 s 934 3580 0 ));
-DATA(insert ( 4102 869 869 8 s 932 3580 0 ));
-DATA(insert ( 4102 869 869 18 s 1201 3580 0 ));
-DATA(insert ( 4102 869 869 24 s 933 3580 0 ));
-DATA(insert ( 4102 869 869 26 s 931 3580 0 ));
+DATA(insert ( 4102 869 869 3 s 3552 3580 0 ));
+DATA(insert ( 4102 869 869 7 s 934 3580 0 ));
+DATA(insert ( 4102 869 869 8 s 932 3580 0 ));
+DATA(insert ( 4102 869 869 18 s 1201 3580 0 ));
+DATA(insert ( 4102 869 869 24 s 933 3580 0 ));
+DATA(insert ( 4102 869 869 26 s 931 3580 0 ));
/* minmax character */
-DATA(insert ( 4076 1042 1042 1 s 1058 3580 0 ));
-DATA(insert ( 4076 1042 1042 2 s 1059 3580 0 ));
-DATA(insert ( 4076 1042 1042 3 s 1054 3580 0 ));
-DATA(insert ( 4076 1042 1042 4 s 1061 3580 0 ));
-DATA(insert ( 4076 1042 1042 5 s 1060 3580 0 ));
+DATA(insert ( 4076 1042 1042 1 s 1058 3580 0 ));
+DATA(insert ( 4076 1042 1042 2 s 1059 3580 0 ));
+DATA(insert ( 4076 1042 1042 3 s 1054 3580 0 ));
+DATA(insert ( 4076 1042 1042 4 s 1061 3580 0 ));
+DATA(insert ( 4076 1042 1042 5 s 1060 3580 0 ));
/* minmax time without time zone */
-DATA(insert ( 4077 1083 1083 1 s 1110 3580 0 ));
-DATA(insert ( 4077 1083 1083 2 s 1111 3580 0 ));
-DATA(insert ( 4077 1083 1083 3 s 1108 3580 0 ));
-DATA(insert ( 4077 1083 1083 4 s 1113 3580 0 ));
-DATA(insert ( 4077 1083 1083 5 s 1112 3580 0 ));
+DATA(insert ( 4077 1083 1083 1 s 1110 3580 0 ));
+DATA(insert ( 4077 1083 1083 2 s 1111 3580 0 ));
+DATA(insert ( 4077 1083 1083 3 s 1108 3580 0 ));
+DATA(insert ( 4077 1083 1083 4 s 1113 3580 0 ));
+DATA(insert ( 4077 1083 1083 5 s 1112 3580 0 ));
/* minmax datetime (date, timestamp, timestamptz) */
-DATA(insert ( 4059 1114 1114 1 s 2062 3580 0 ));
-DATA(insert ( 4059 1114 1114 2 s 2063 3580 0 ));
-DATA(insert ( 4059 1114 1114 3 s 2060 3580 0 ));
-DATA(insert ( 4059 1114 1114 4 s 2065 3580 0 ));
-DATA(insert ( 4059 1114 1114 5 s 2064 3580 0 ));
-DATA(insert ( 4059 1114 1082 1 s 2371 3580 0 ));
-DATA(insert ( 4059 1114 1082 2 s 2372 3580 0 ));
-DATA(insert ( 4059 1114 1082 3 s 2373 3580 0 ));
-DATA(insert ( 4059 1114 1082 4 s 2374 3580 0 ));
-DATA(insert ( 4059 1114 1082 5 s 2375 3580 0 ));
-DATA(insert ( 4059 1114 1184 1 s 2534 3580 0 ));
-DATA(insert ( 4059 1114 1184 2 s 2535 3580 0 ));
-DATA(insert ( 4059 1114 1184 3 s 2536 3580 0 ));
-DATA(insert ( 4059 1114 1184 4 s 2537 3580 0 ));
-DATA(insert ( 4059 1114 1184 5 s 2538 3580 0 ));
-DATA(insert ( 4059 1082 1082 1 s 1095 3580 0 ));
-DATA(insert ( 4059 1082 1082 2 s 1096 3580 0 ));
-DATA(insert ( 4059 1082 1082 3 s 1093 3580 0 ));
-DATA(insert ( 4059 1082 1082 4 s 1098 3580 0 ));
-DATA(insert ( 4059 1082 1082 5 s 1097 3580 0 ));
-DATA(insert ( 4059 1082 1114 1 s 2345 3580 0 ));
-DATA(insert ( 4059 1082 1114 2 s 2346 3580 0 ));
-DATA(insert ( 4059 1082 1114 3 s 2347 3580 0 ));
-DATA(insert ( 4059 1082 1114 4 s 2348 3580 0 ));
-DATA(insert ( 4059 1082 1114 5 s 2349 3580 0 ));
-DATA(insert ( 4059 1082 1184 1 s 2358 3580 0 ));
-DATA(insert ( 4059 1082 1184 2 s 2359 3580 0 ));
-DATA(insert ( 4059 1082 1184 3 s 2360 3580 0 ));
-DATA(insert ( 4059 1082 1184 4 s 2361 3580 0 ));
-DATA(insert ( 4059 1082 1184 5 s 2362 3580 0 ));
-DATA(insert ( 4059 1184 1082 1 s 2384 3580 0 ));
-DATA(insert ( 4059 1184 1082 2 s 2385 3580 0 ));
-DATA(insert ( 4059 1184 1082 3 s 2386 3580 0 ));
-DATA(insert ( 4059 1184 1082 4 s 2387 3580 0 ));
-DATA(insert ( 4059 1184 1082 5 s 2388 3580 0 ));
-DATA(insert ( 4059 1184 1114 1 s 2540 3580 0 ));
-DATA(insert ( 4059 1184 1114 2 s 2541 3580 0 ));
-DATA(insert ( 4059 1184 1114 3 s 2542 3580 0 ));
-DATA(insert ( 4059 1184 1114 4 s 2543 3580 0 ));
-DATA(insert ( 4059 1184 1114 5 s 2544 3580 0 ));
-DATA(insert ( 4059 1184 1184 1 s 1322 3580 0 ));
-DATA(insert ( 4059 1184 1184 2 s 1323 3580 0 ));
-DATA(insert ( 4059 1184 1184 3 s 1320 3580 0 ));
-DATA(insert ( 4059 1184 1184 4 s 1325 3580 0 ));
-DATA(insert ( 4059 1184 1184 5 s 1324 3580 0 ));
+DATA(insert ( 4059 1114 1114 1 s 2062 3580 0 ));
+DATA(insert ( 4059 1114 1114 2 s 2063 3580 0 ));
+DATA(insert ( 4059 1114 1114 3 s 2060 3580 0 ));
+DATA(insert ( 4059 1114 1114 4 s 2065 3580 0 ));
+DATA(insert ( 4059 1114 1114 5 s 2064 3580 0 ));
+DATA(insert ( 4059 1114 1082 1 s 2371 3580 0 ));
+DATA(insert ( 4059 1114 1082 2 s 2372 3580 0 ));
+DATA(insert ( 4059 1114 1082 3 s 2373 3580 0 ));
+DATA(insert ( 4059 1114 1082 4 s 2374 3580 0 ));
+DATA(insert ( 4059 1114 1082 5 s 2375 3580 0 ));
+DATA(insert ( 4059 1114 1184 1 s 2534 3580 0 ));
+DATA(insert ( 4059 1114 1184 2 s 2535 3580 0 ));
+DATA(insert ( 4059 1114 1184 3 s 2536 3580 0 ));
+DATA(insert ( 4059 1114 1184 4 s 2537 3580 0 ));
+DATA(insert ( 4059 1114 1184 5 s 2538 3580 0 ));
+DATA(insert ( 4059 1082 1082 1 s 1095 3580 0 ));
+DATA(insert ( 4059 1082 1082 2 s 1096 3580 0 ));
+DATA(insert ( 4059 1082 1082 3 s 1093 3580 0 ));
+DATA(insert ( 4059 1082 1082 4 s 1098 3580 0 ));
+DATA(insert ( 4059 1082 1082 5 s 1097 3580 0 ));
+DATA(insert ( 4059 1082 1114 1 s 2345 3580 0 ));
+DATA(insert ( 4059 1082 1114 2 s 2346 3580 0 ));
+DATA(insert ( 4059 1082 1114 3 s 2347 3580 0 ));
+DATA(insert ( 4059 1082 1114 4 s 2348 3580 0 ));
+DATA(insert ( 4059 1082 1114 5 s 2349 3580 0 ));
+DATA(insert ( 4059 1082 1184 1 s 2358 3580 0 ));
+DATA(insert ( 4059 1082 1184 2 s 2359 3580 0 ));
+DATA(insert ( 4059 1082 1184 3 s 2360 3580 0 ));
+DATA(insert ( 4059 1082 1184 4 s 2361 3580 0 ));
+DATA(insert ( 4059 1082 1184 5 s 2362 3580 0 ));
+DATA(insert ( 4059 1184 1082 1 s 2384 3580 0 ));
+DATA(insert ( 4059 1184 1082 2 s 2385 3580 0 ));
+DATA(insert ( 4059 1184 1082 3 s 2386 3580 0 ));
+DATA(insert ( 4059 1184 1082 4 s 2387 3580 0 ));
+DATA(insert ( 4059 1184 1082 5 s 2388 3580 0 ));
+DATA(insert ( 4059 1184 1114 1 s 2540 3580 0 ));
+DATA(insert ( 4059 1184 1114 2 s 2541 3580 0 ));
+DATA(insert ( 4059 1184 1114 3 s 2542 3580 0 ));
+DATA(insert ( 4059 1184 1114 4 s 2543 3580 0 ));
+DATA(insert ( 4059 1184 1114 5 s 2544 3580 0 ));
+DATA(insert ( 4059 1184 1184 1 s 1322 3580 0 ));
+DATA(insert ( 4059 1184 1184 2 s 1323 3580 0 ));
+DATA(insert ( 4059 1184 1184 3 s 1320 3580 0 ));
+DATA(insert ( 4059 1184 1184 4 s 1325 3580 0 ));
+DATA(insert ( 4059 1184 1184 5 s 1324 3580 0 ));
/* minmax interval */
-DATA(insert ( 4078 1186 1186 1 s 1332 3580 0 ));
-DATA(insert ( 4078 1186 1186 2 s 1333 3580 0 ));
-DATA(insert ( 4078 1186 1186 3 s 1330 3580 0 ));
-DATA(insert ( 4078 1186 1186 4 s 1335 3580 0 ));
-DATA(insert ( 4078 1186 1186 5 s 1334 3580 0 ));
+DATA(insert ( 4078 1186 1186 1 s 1332 3580 0 ));
+DATA(insert ( 4078 1186 1186 2 s 1333 3580 0 ));
+DATA(insert ( 4078 1186 1186 3 s 1330 3580 0 ));
+DATA(insert ( 4078 1186 1186 4 s 1335 3580 0 ));
+DATA(insert ( 4078 1186 1186 5 s 1334 3580 0 ));
/* minmax time with time zone */
-DATA(insert ( 4058 1266 1266 1 s 1552 3580 0 ));
-DATA(insert ( 4058 1266 1266 2 s 1553 3580 0 ));
-DATA(insert ( 4058 1266 1266 3 s 1550 3580 0 ));
-DATA(insert ( 4058 1266 1266 4 s 1555 3580 0 ));
-DATA(insert ( 4058 1266 1266 5 s 1554 3580 0 ));
+DATA(insert ( 4058 1266 1266 1 s 1552 3580 0 ));
+DATA(insert ( 4058 1266 1266 2 s 1553 3580 0 ));
+DATA(insert ( 4058 1266 1266 3 s 1550 3580 0 ));
+DATA(insert ( 4058 1266 1266 4 s 1555 3580 0 ));
+DATA(insert ( 4058 1266 1266 5 s 1554 3580 0 ));
/* minmax bit */
-DATA(insert ( 4079 1560 1560 1 s 1786 3580 0 ));
-DATA(insert ( 4079 1560 1560 2 s 1788 3580 0 ));
-DATA(insert ( 4079 1560 1560 3 s 1784 3580 0 ));
-DATA(insert ( 4079 1560 1560 4 s 1789 3580 0 ));
-DATA(insert ( 4079 1560 1560 5 s 1787 3580 0 ));
+DATA(insert ( 4079 1560 1560 1 s 1786 3580 0 ));
+DATA(insert ( 4079 1560 1560 2 s 1788 3580 0 ));
+DATA(insert ( 4079 1560 1560 3 s 1784 3580 0 ));
+DATA(insert ( 4079 1560 1560 4 s 1789 3580 0 ));
+DATA(insert ( 4079 1560 1560 5 s 1787 3580 0 ));
/* minmax bit varying */
-DATA(insert ( 4080 1562 1562 1 s 1806 3580 0 ));
-DATA(insert ( 4080 1562 1562 2 s 1808 3580 0 ));
-DATA(insert ( 4080 1562 1562 3 s 1804 3580 0 ));
-DATA(insert ( 4080 1562 1562 4 s 1809 3580 0 ));
-DATA(insert ( 4080 1562 1562 5 s 1807 3580 0 ));
+DATA(insert ( 4080 1562 1562 1 s 1806 3580 0 ));
+DATA(insert ( 4080 1562 1562 2 s 1808 3580 0 ));
+DATA(insert ( 4080 1562 1562 3 s 1804 3580 0 ));
+DATA(insert ( 4080 1562 1562 4 s 1809 3580 0 ));
+DATA(insert ( 4080 1562 1562 5 s 1807 3580 0 ));
/* minmax numeric */
-DATA(insert ( 4055 1700 1700 1 s 1754 3580 0 ));
-DATA(insert ( 4055 1700 1700 2 s 1755 3580 0 ));
-DATA(insert ( 4055 1700 1700 3 s 1752 3580 0 ));
-DATA(insert ( 4055 1700 1700 4 s 1757 3580 0 ));
-DATA(insert ( 4055 1700 1700 5 s 1756 3580 0 ));
+DATA(insert ( 4055 1700 1700 1 s 1754 3580 0 ));
+DATA(insert ( 4055 1700 1700 2 s 1755 3580 0 ));
+DATA(insert ( 4055 1700 1700 3 s 1752 3580 0 ));
+DATA(insert ( 4055 1700 1700 4 s 1757 3580 0 ));
+DATA(insert ( 4055 1700 1700 5 s 1756 3580 0 ));
/* minmax uuid */
-DATA(insert ( 4081 2950 2950 1 s 2974 3580 0 ));
-DATA(insert ( 4081 2950 2950 2 s 2976 3580 0 ));
-DATA(insert ( 4081 2950 2950 3 s 2972 3580 0 ));
-DATA(insert ( 4081 2950 2950 4 s 2977 3580 0 ));
-DATA(insert ( 4081 2950 2950 5 s 2975 3580 0 ));
+DATA(insert ( 4081 2950 2950 1 s 2974 3580 0 ));
+DATA(insert ( 4081 2950 2950 2 s 2976 3580 0 ));
+DATA(insert ( 4081 2950 2950 3 s 2972 3580 0 ));
+DATA(insert ( 4081 2950 2950 4 s 2977 3580 0 ));
+DATA(insert ( 4081 2950 2950 5 s 2975 3580 0 ));
/* inclusion range types */
-DATA(insert ( 4103 3831 3831 1 s 3893 3580 0 ));
-DATA(insert ( 4103 3831 3831 2 s 3895 3580 0 ));
-DATA(insert ( 4103 3831 3831 3 s 3888 3580 0 ));
-DATA(insert ( 4103 3831 3831 4 s 3896 3580 0 ));
-DATA(insert ( 4103 3831 3831 5 s 3894 3580 0 ));
-DATA(insert ( 4103 3831 3831 7 s 3890 3580 0 ));
-DATA(insert ( 4103 3831 3831 8 s 3892 3580 0 ));
-DATA(insert ( 4103 3831 2283 16 s 3889 3580 0 ));
-DATA(insert ( 4103 3831 3831 17 s 3897 3580 0 ));
-DATA(insert ( 4103 3831 3831 18 s 3882 3580 0 ));
-DATA(insert ( 4103 3831 3831 20 s 3884 3580 0 ));
-DATA(insert ( 4103 3831 3831 21 s 3885 3580 0 ));
-DATA(insert ( 4103 3831 3831 22 s 3887 3580 0 ));
-DATA(insert ( 4103 3831 3831 23 s 3886 3580 0 ));
+DATA(insert ( 4103 3831 3831 1 s 3893 3580 0 ));
+DATA(insert ( 4103 3831 3831 2 s 3895 3580 0 ));
+DATA(insert ( 4103 3831 3831 3 s 3888 3580 0 ));
+DATA(insert ( 4103 3831 3831 4 s 3896 3580 0 ));
+DATA(insert ( 4103 3831 3831 5 s 3894 3580 0 ));
+DATA(insert ( 4103 3831 3831 7 s 3890 3580 0 ));
+DATA(insert ( 4103 3831 3831 8 s 3892 3580 0 ));
+DATA(insert ( 4103 3831 2283 16 s 3889 3580 0 ));
+DATA(insert ( 4103 3831 3831 17 s 3897 3580 0 ));
+DATA(insert ( 4103 3831 3831 18 s 3882 3580 0 ));
+DATA(insert ( 4103 3831 3831 20 s 3884 3580 0 ));
+DATA(insert ( 4103 3831 3831 21 s 3885 3580 0 ));
+DATA(insert ( 4103 3831 3831 22 s 3887 3580 0 ));
+DATA(insert ( 4103 3831 3831 23 s 3886 3580 0 ));
/* minmax pg_lsn */
-DATA(insert ( 4082 3220 3220 1 s 3224 3580 0 ));
-DATA(insert ( 4082 3220 3220 2 s 3226 3580 0 ));
-DATA(insert ( 4082 3220 3220 3 s 3222 3580 0 ));
-DATA(insert ( 4082 3220 3220 4 s 3227 3580 0 ));
-DATA(insert ( 4082 3220 3220 5 s 3225 3580 0 ));
+DATA(insert ( 4082 3220 3220 1 s 3224 3580 0 ));
+DATA(insert ( 4082 3220 3220 2 s 3226 3580 0 ));
+DATA(insert ( 4082 3220 3220 3 s 3222 3580 0 ));
+DATA(insert ( 4082 3220 3220 4 s 3227 3580 0 ));
+DATA(insert ( 4082 3220 3220 5 s 3225 3580 0 ));
/* inclusion box */
-DATA(insert ( 4104 603 603 1 s 493 3580 0 ));
-DATA(insert ( 4104 603 603 2 s 494 3580 0 ));
-DATA(insert ( 4104 603 603 3 s 500 3580 0 ));
-DATA(insert ( 4104 603 603 4 s 495 3580 0 ));
-DATA(insert ( 4104 603 603 5 s 496 3580 0 ));
-DATA(insert ( 4104 603 603 6 s 499 3580 0 ));
-DATA(insert ( 4104 603 603 7 s 498 3580 0 ));
-DATA(insert ( 4104 603 603 8 s 497 3580 0 ));
-DATA(insert ( 4104 603 603 9 s 2571 3580 0 ));
-DATA(insert ( 4104 603 603 10 s 2570 3580 0 ));
-DATA(insert ( 4104 603 603 11 s 2573 3580 0 ));
-DATA(insert ( 4104 603 603 12 s 2572 3580 0 ));
+DATA(insert ( 4104 603 603 1 s 493 3580 0 ));
+DATA(insert ( 4104 603 603 2 s 494 3580 0 ));
+DATA(insert ( 4104 603 603 3 s 500 3580 0 ));
+DATA(insert ( 4104 603 603 4 s 495 3580 0 ));
+DATA(insert ( 4104 603 603 5 s 496 3580 0 ));
+DATA(insert ( 4104 603 603 6 s 499 3580 0 ));
+DATA(insert ( 4104 603 603 7 s 498 3580 0 ));
+DATA(insert ( 4104 603 603 8 s 497 3580 0 ));
+DATA(insert ( 4104 603 603 9 s 2571 3580 0 ));
+DATA(insert ( 4104 603 603 10 s 2570 3580 0 ));
+DATA(insert ( 4104 603 603 11 s 2573 3580 0 ));
+DATA(insert ( 4104 603 603 12 s 2572 3580 0 ));
/* we could, but choose not to, supply entries for strategies 13 and 14 */
-DATA(insert ( 4104 603 600 7 s 433 3580 0 ));
+DATA(insert ( 4104 603 600 7 s 433 3580 0 ));
#endif /* PG_AMOP_H */
diff --git a/src/include/catalog/pg_amproc.h b/src/include/catalog/pg_amproc.h
index f22e9a61ef..b57d6e65ca 100644
--- a/src/include/catalog/pg_amproc.h
+++ b/src/include/catalog/pg_amproc.h
@@ -441,223 +441,223 @@ DATA(insert ( 4017 25 25 5 4031 ));
/* BRIN opclasses */
/* minmax bytea */
-DATA(insert ( 4064 17 17 1 3383 ));
-DATA(insert ( 4064 17 17 2 3384 ));
-DATA(insert ( 4064 17 17 3 3385 ));
-DATA(insert ( 4064 17 17 4 3386 ));
+DATA(insert ( 4064 17 17 1 3383 ));
+DATA(insert ( 4064 17 17 2 3384 ));
+DATA(insert ( 4064 17 17 3 3385 ));
+DATA(insert ( 4064 17 17 4 3386 ));
/* minmax "char" */
-DATA(insert ( 4062 18 18 1 3383 ));
-DATA(insert ( 4062 18 18 2 3384 ));
-DATA(insert ( 4062 18 18 3 3385 ));
-DATA(insert ( 4062 18 18 4 3386 ));
+DATA(insert ( 4062 18 18 1 3383 ));
+DATA(insert ( 4062 18 18 2 3384 ));
+DATA(insert ( 4062 18 18 3 3385 ));
+DATA(insert ( 4062 18 18 4 3386 ));
/* minmax name */
-DATA(insert ( 4065 19 19 1 3383 ));
-DATA(insert ( 4065 19 19 2 3384 ));
-DATA(insert ( 4065 19 19 3 3385 ));
-DATA(insert ( 4065 19 19 4 3386 ));
+DATA(insert ( 4065 19 19 1 3383 ));
+DATA(insert ( 4065 19 19 2 3384 ));
+DATA(insert ( 4065 19 19 3 3385 ));
+DATA(insert ( 4065 19 19 4 3386 ));
/* minmax integer: int2, int4, int8 */
-DATA(insert ( 4054 20 20 1 3383 ));
-DATA(insert ( 4054 20 20 2 3384 ));
-DATA(insert ( 4054 20 20 3 3385 ));
-DATA(insert ( 4054 20 20 4 3386 ));
-DATA(insert ( 4054 20 21 1 3383 ));
-DATA(insert ( 4054 20 21 2 3384 ));
-DATA(insert ( 4054 20 21 3 3385 ));
-DATA(insert ( 4054 20 21 4 3386 ));
-DATA(insert ( 4054 20 23 1 3383 ));
-DATA(insert ( 4054 20 23 2 3384 ));
-DATA(insert ( 4054 20 23 3 3385 ));
-DATA(insert ( 4054 20 23 4 3386 ));
+DATA(insert ( 4054 20 20 1 3383 ));
+DATA(insert ( 4054 20 20 2 3384 ));
+DATA(insert ( 4054 20 20 3 3385 ));
+DATA(insert ( 4054 20 20 4 3386 ));
+DATA(insert ( 4054 20 21 1 3383 ));
+DATA(insert ( 4054 20 21 2 3384 ));
+DATA(insert ( 4054 20 21 3 3385 ));
+DATA(insert ( 4054 20 21 4 3386 ));
+DATA(insert ( 4054 20 23 1 3383 ));
+DATA(insert ( 4054 20 23 2 3384 ));
+DATA(insert ( 4054 20 23 3 3385 ));
+DATA(insert ( 4054 20 23 4 3386 ));
-DATA(insert ( 4054 21 21 1 3383 ));
-DATA(insert ( 4054 21 21 2 3384 ));
-DATA(insert ( 4054 21 21 3 3385 ));
-DATA(insert ( 4054 21 21 4 3386 ));
-DATA(insert ( 4054 21 20 1 3383 ));
-DATA(insert ( 4054 21 20 2 3384 ));
-DATA(insert ( 4054 21 20 3 3385 ));
-DATA(insert ( 4054 21 20 4 3386 ));
-DATA(insert ( 4054 21 23 1 3383 ));
-DATA(insert ( 4054 21 23 2 3384 ));
-DATA(insert ( 4054 21 23 3 3385 ));
-DATA(insert ( 4054 21 23 4 3386 ));
+DATA(insert ( 4054 21 21 1 3383 ));
+DATA(insert ( 4054 21 21 2 3384 ));
+DATA(insert ( 4054 21 21 3 3385 ));
+DATA(insert ( 4054 21 21 4 3386 ));
+DATA(insert ( 4054 21 20 1 3383 ));
+DATA(insert ( 4054 21 20 2 3384 ));
+DATA(insert ( 4054 21 20 3 3385 ));
+DATA(insert ( 4054 21 20 4 3386 ));
+DATA(insert ( 4054 21 23 1 3383 ));
+DATA(insert ( 4054 21 23 2 3384 ));
+DATA(insert ( 4054 21 23 3 3385 ));
+DATA(insert ( 4054 21 23 4 3386 ));
-DATA(insert ( 4054 23 23 1 3383 ));
-DATA(insert ( 4054 23 23 2 3384 ));
-DATA(insert ( 4054 23 23 3 3385 ));
-DATA(insert ( 4054 23 23 4 3386 ));
-DATA(insert ( 4054 23 20 1 3383 ));
-DATA(insert ( 4054 23 20 2 3384 ));
-DATA(insert ( 4054 23 20 3 3385 ));
-DATA(insert ( 4054 23 20 4 3386 ));
-DATA(insert ( 4054 23 21 1 3383 ));
-DATA(insert ( 4054 23 21 2 3384 ));
-DATA(insert ( 4054 23 21 3 3385 ));
-DATA(insert ( 4054 23 21 4 3386 ));
+DATA(insert ( 4054 23 23 1 3383 ));
+DATA(insert ( 4054 23 23 2 3384 ));
+DATA(insert ( 4054 23 23 3 3385 ));
+DATA(insert ( 4054 23 23 4 3386 ));
+DATA(insert ( 4054 23 20 1 3383 ));
+DATA(insert ( 4054 23 20 2 3384 ));
+DATA(insert ( 4054 23 20 3 3385 ));
+DATA(insert ( 4054 23 20 4 3386 ));
+DATA(insert ( 4054 23 21 1 3383 ));
+DATA(insert ( 4054 23 21 2 3384 ));
+DATA(insert ( 4054 23 21 3 3385 ));
+DATA(insert ( 4054 23 21 4 3386 ));
/* minmax text */
-DATA(insert ( 4056 25 25 1 3383 ));
-DATA(insert ( 4056 25 25 2 3384 ));
-DATA(insert ( 4056 25 25 3 3385 ));
-DATA(insert ( 4056 25 25 4 3386 ));
+DATA(insert ( 4056 25 25 1 3383 ));
+DATA(insert ( 4056 25 25 2 3384 ));
+DATA(insert ( 4056 25 25 3 3385 ));
+DATA(insert ( 4056 25 25 4 3386 ));
/* minmax oid */
-DATA(insert ( 4068 26 26 1 3383 ));
-DATA(insert ( 4068 26 26 2 3384 ));
-DATA(insert ( 4068 26 26 3 3385 ));
-DATA(insert ( 4068 26 26 4 3386 ));
+DATA(insert ( 4068 26 26 1 3383 ));
+DATA(insert ( 4068 26 26 2 3384 ));
+DATA(insert ( 4068 26 26 3 3385 ));
+DATA(insert ( 4068 26 26 4 3386 ));
/* minmax tid */
-DATA(insert ( 4069 27 27 1 3383 ));
-DATA(insert ( 4069 27 27 2 3384 ));
-DATA(insert ( 4069 27 27 3 3385 ));
-DATA(insert ( 4069 27 27 4 3386 ));
+DATA(insert ( 4069 27 27 1 3383 ));
+DATA(insert ( 4069 27 27 2 3384 ));
+DATA(insert ( 4069 27 27 3 3385 ));
+DATA(insert ( 4069 27 27 4 3386 ));
/* minmax float */
-DATA(insert ( 4070 700 700 1 3383 ));
-DATA(insert ( 4070 700 700 2 3384 ));
-DATA(insert ( 4070 700 700 3 3385 ));
-DATA(insert ( 4070 700 700 4 3386 ));
+DATA(insert ( 4070 700 700 1 3383 ));
+DATA(insert ( 4070 700 700 2 3384 ));
+DATA(insert ( 4070 700 700 3 3385 ));
+DATA(insert ( 4070 700 700 4 3386 ));
-DATA(insert ( 4070 700 701 1 3383 ));
-DATA(insert ( 4070 700 701 2 3384 ));
-DATA(insert ( 4070 700 701 3 3385 ));
-DATA(insert ( 4070 700 701 4 3386 ));
+DATA(insert ( 4070 700 701 1 3383 ));
+DATA(insert ( 4070 700 701 2 3384 ));
+DATA(insert ( 4070 700 701 3 3385 ));
+DATA(insert ( 4070 700 701 4 3386 ));
-DATA(insert ( 4070 701 701 1 3383 ));
-DATA(insert ( 4070 701 701 2 3384 ));
-DATA(insert ( 4070 701 701 3 3385 ));
-DATA(insert ( 4070 701 701 4 3386 ));
+DATA(insert ( 4070 701 701 1 3383 ));
+DATA(insert ( 4070 701 701 2 3384 ));
+DATA(insert ( 4070 701 701 3 3385 ));
+DATA(insert ( 4070 701 701 4 3386 ));
-DATA(insert ( 4070 701 700 1 3383 ));
-DATA(insert ( 4070 701 700 2 3384 ));
-DATA(insert ( 4070 701 700 3 3385 ));
-DATA(insert ( 4070 701 700 4 3386 ));
+DATA(insert ( 4070 701 700 1 3383 ));
+DATA(insert ( 4070 701 700 2 3384 ));
+DATA(insert ( 4070 701 700 3 3385 ));
+DATA(insert ( 4070 701 700 4 3386 ));
/* minmax abstime */
-DATA(insert ( 4072 702 702 1 3383 ));
-DATA(insert ( 4072 702 702 2 3384 ));
-DATA(insert ( 4072 702 702 3 3385 ));
-DATA(insert ( 4072 702 702 4 3386 ));
+DATA(insert ( 4072 702 702 1 3383 ));
+DATA(insert ( 4072 702 702 2 3384 ));
+DATA(insert ( 4072 702 702 3 3385 ));
+DATA(insert ( 4072 702 702 4 3386 ));
/* minmax reltime */
-DATA(insert ( 4073 703 703 1 3383 ));
-DATA(insert ( 4073 703 703 2 3384 ));
-DATA(insert ( 4073 703 703 3 3385 ));
-DATA(insert ( 4073 703 703 4 3386 ));
+DATA(insert ( 4073 703 703 1 3383 ));
+DATA(insert ( 4073 703 703 2 3384 ));
+DATA(insert ( 4073 703 703 3 3385 ));
+DATA(insert ( 4073 703 703 4 3386 ));
/* minmax macaddr */
-DATA(insert ( 4074 829 829 1 3383 ));
-DATA(insert ( 4074 829 829 2 3384 ));
-DATA(insert ( 4074 829 829 3 3385 ));
-DATA(insert ( 4074 829 829 4 3386 ));
+DATA(insert ( 4074 829 829 1 3383 ));
+DATA(insert ( 4074 829 829 2 3384 ));
+DATA(insert ( 4074 829 829 3 3385 ));
+DATA(insert ( 4074 829 829 4 3386 ));
/* minmax inet */
-DATA(insert ( 4075 869 869 1 3383 ));
-DATA(insert ( 4075 869 869 2 3384 ));
-DATA(insert ( 4075 869 869 3 3385 ));
-DATA(insert ( 4075 869 869 4 3386 ));
+DATA(insert ( 4075 869 869 1 3383 ));
+DATA(insert ( 4075 869 869 2 3384 ));
+DATA(insert ( 4075 869 869 3 3385 ));
+DATA(insert ( 4075 869 869 4 3386 ));
/* inclusion inet */
-DATA(insert ( 4102 869 869 1 4105 ));
-DATA(insert ( 4102 869 869 2 4106 ));
-DATA(insert ( 4102 869 869 3 4107 ));
-DATA(insert ( 4102 869 869 4 4108 ));
-DATA(insert ( 4102 869 869 11 4063 ));
-DATA(insert ( 4102 869 869 12 4071 ));
-DATA(insert ( 4102 869 869 13 930 ));
+DATA(insert ( 4102 869 869 1 4105 ));
+DATA(insert ( 4102 869 869 2 4106 ));
+DATA(insert ( 4102 869 869 3 4107 ));
+DATA(insert ( 4102 869 869 4 4108 ));
+DATA(insert ( 4102 869 869 11 4063 ));
+DATA(insert ( 4102 869 869 12 4071 ));
+DATA(insert ( 4102 869 869 13 930 ));
/* minmax character */
-DATA(insert ( 4076 1042 1042 1 3383 ));
-DATA(insert ( 4076 1042 1042 2 3384 ));
-DATA(insert ( 4076 1042 1042 3 3385 ));
-DATA(insert ( 4076 1042 1042 4 3386 ));
+DATA(insert ( 4076 1042 1042 1 3383 ));
+DATA(insert ( 4076 1042 1042 2 3384 ));
+DATA(insert ( 4076 1042 1042 3 3385 ));
+DATA(insert ( 4076 1042 1042 4 3386 ));
/* minmax time without time zone */
-DATA(insert ( 4077 1083 1083 1 3383 ));
-DATA(insert ( 4077 1083 1083 2 3384 ));
-DATA(insert ( 4077 1083 1083 3 3385 ));
-DATA(insert ( 4077 1083 1083 4 3386 ));
+DATA(insert ( 4077 1083 1083 1 3383 ));
+DATA(insert ( 4077 1083 1083 2 3384 ));
+DATA(insert ( 4077 1083 1083 3 3385 ));
+DATA(insert ( 4077 1083 1083 4 3386 ));
/* minmax datetime (date, timestamp, timestamptz) */
-DATA(insert ( 4059 1114 1114 1 3383 ));
-DATA(insert ( 4059 1114 1114 2 3384 ));
-DATA(insert ( 4059 1114 1114 3 3385 ));
-DATA(insert ( 4059 1114 1114 4 3386 ));
-DATA(insert ( 4059 1114 1184 1 3383 ));
-DATA(insert ( 4059 1114 1184 2 3384 ));
-DATA(insert ( 4059 1114 1184 3 3385 ));
-DATA(insert ( 4059 1114 1184 4 3386 ));
-DATA(insert ( 4059 1114 1082 1 3383 ));
-DATA(insert ( 4059 1114 1082 2 3384 ));
-DATA(insert ( 4059 1114 1082 3 3385 ));
-DATA(insert ( 4059 1114 1082 4 3386 ));
+DATA(insert ( 4059 1114 1114 1 3383 ));
+DATA(insert ( 4059 1114 1114 2 3384 ));
+DATA(insert ( 4059 1114 1114 3 3385 ));
+DATA(insert ( 4059 1114 1114 4 3386 ));
+DATA(insert ( 4059 1114 1184 1 3383 ));
+DATA(insert ( 4059 1114 1184 2 3384 ));
+DATA(insert ( 4059 1114 1184 3 3385 ));
+DATA(insert ( 4059 1114 1184 4 3386 ));
+DATA(insert ( 4059 1114 1082 1 3383 ));
+DATA(insert ( 4059 1114 1082 2 3384 ));
+DATA(insert ( 4059 1114 1082 3 3385 ));
+DATA(insert ( 4059 1114 1082 4 3386 ));
-DATA(insert ( 4059 1184 1184 1 3383 ));
-DATA(insert ( 4059 1184 1184 2 3384 ));
-DATA(insert ( 4059 1184 1184 3 3385 ));
-DATA(insert ( 4059 1184 1184 4 3386 ));
-DATA(insert ( 4059 1184 1114 1 3383 ));
-DATA(insert ( 4059 1184 1114 2 3384 ));
-DATA(insert ( 4059 1184 1114 3 3385 ));
-DATA(insert ( 4059 1184 1114 4 3386 ));
-DATA(insert ( 4059 1184 1082 1 3383 ));
-DATA(insert ( 4059 1184 1082 2 3384 ));
-DATA(insert ( 4059 1184 1082 3 3385 ));
-DATA(insert ( 4059 1184 1082 4 3386 ));
+DATA(insert ( 4059 1184 1184 1 3383 ));
+DATA(insert ( 4059 1184 1184 2 3384 ));
+DATA(insert ( 4059 1184 1184 3 3385 ));
+DATA(insert ( 4059 1184 1184 4 3386 ));
+DATA(insert ( 4059 1184 1114 1 3383 ));
+DATA(insert ( 4059 1184 1114 2 3384 ));
+DATA(insert ( 4059 1184 1114 3 3385 ));
+DATA(insert ( 4059 1184 1114 4 3386 ));
+DATA(insert ( 4059 1184 1082 1 3383 ));
+DATA(insert ( 4059 1184 1082 2 3384 ));
+DATA(insert ( 4059 1184 1082 3 3385 ));
+DATA(insert ( 4059 1184 1082 4 3386 ));
-DATA(insert ( 4059 1082 1082 1 3383 ));
-DATA(insert ( 4059 1082 1082 2 3384 ));
-DATA(insert ( 4059 1082 1082 3 3385 ));
-DATA(insert ( 4059 1082 1082 4 3386 ));
-DATA(insert ( 4059 1082 1114 1 3383 ));
-DATA(insert ( 4059 1082 1114 2 3384 ));
-DATA(insert ( 4059 1082 1114 3 3385 ));
-DATA(insert ( 4059 1082 1114 4 3386 ));
-DATA(insert ( 4059 1082 1184 1 3383 ));
-DATA(insert ( 4059 1082 1184 2 3384 ));
-DATA(insert ( 4059 1082 1184 3 3385 ));
-DATA(insert ( 4059 1082 1184 4 3386 ));
+DATA(insert ( 4059 1082 1082 1 3383 ));
+DATA(insert ( 4059 1082 1082 2 3384 ));
+DATA(insert ( 4059 1082 1082 3 3385 ));
+DATA(insert ( 4059 1082 1082 4 3386 ));
+DATA(insert ( 4059 1082 1114 1 3383 ));
+DATA(insert ( 4059 1082 1114 2 3384 ));
+DATA(insert ( 4059 1082 1114 3 3385 ));
+DATA(insert ( 4059 1082 1114 4 3386 ));
+DATA(insert ( 4059 1082 1184 1 3383 ));
+DATA(insert ( 4059 1082 1184 2 3384 ));
+DATA(insert ( 4059 1082 1184 3 3385 ));
+DATA(insert ( 4059 1082 1184 4 3386 ));
/* minmax interval */
-DATA(insert ( 4078 1186 1186 1 3383 ));
-DATA(insert ( 4078 1186 1186 2 3384 ));
-DATA(insert ( 4078 1186 1186 3 3385 ));
-DATA(insert ( 4078 1186 1186 4 3386 ));
+DATA(insert ( 4078 1186 1186 1 3383 ));
+DATA(insert ( 4078 1186 1186 2 3384 ));
+DATA(insert ( 4078 1186 1186 3 3385 ));
+DATA(insert ( 4078 1186 1186 4 3386 ));
/* minmax time with time zone */
-DATA(insert ( 4058 1266 1266 1 3383 ));
-DATA(insert ( 4058 1266 1266 2 3384 ));
-DATA(insert ( 4058 1266 1266 3 3385 ));
-DATA(insert ( 4058 1266 1266 4 3386 ));
+DATA(insert ( 4058 1266 1266 1 3383 ));
+DATA(insert ( 4058 1266 1266 2 3384 ));
+DATA(insert ( 4058 1266 1266 3 3385 ));
+DATA(insert ( 4058 1266 1266 4 3386 ));
/* minmax bit */
-DATA(insert ( 4079 1560 1560 1 3383 ));
-DATA(insert ( 4079 1560 1560 2 3384 ));
-DATA(insert ( 4079 1560 1560 3 3385 ));
-DATA(insert ( 4079 1560 1560 4 3386 ));
+DATA(insert ( 4079 1560 1560 1 3383 ));
+DATA(insert ( 4079 1560 1560 2 3384 ));
+DATA(insert ( 4079 1560 1560 3 3385 ));
+DATA(insert ( 4079 1560 1560 4 3386 ));
/* minmax bit varying */
-DATA(insert ( 4080 1562 1562 1 3383 ));
-DATA(insert ( 4080 1562 1562 2 3384 ));
-DATA(insert ( 4080 1562 1562 3 3385 ));
-DATA(insert ( 4080 1562 1562 4 3386 ));
+DATA(insert ( 4080 1562 1562 1 3383 ));
+DATA(insert ( 4080 1562 1562 2 3384 ));
+DATA(insert ( 4080 1562 1562 3 3385 ));
+DATA(insert ( 4080 1562 1562 4 3386 ));
/* minmax numeric */
-DATA(insert ( 4055 1700 1700 1 3383 ));
-DATA(insert ( 4055 1700 1700 2 3384 ));
-DATA(insert ( 4055 1700 1700 3 3385 ));
-DATA(insert ( 4055 1700 1700 4 3386 ));
+DATA(insert ( 4055 1700 1700 1 3383 ));
+DATA(insert ( 4055 1700 1700 2 3384 ));
+DATA(insert ( 4055 1700 1700 3 3385 ));
+DATA(insert ( 4055 1700 1700 4 3386 ));
/* minmax uuid */
-DATA(insert ( 4081 2950 2950 1 3383 ));
-DATA(insert ( 4081 2950 2950 2 3384 ));
-DATA(insert ( 4081 2950 2950 3 3385 ));
-DATA(insert ( 4081 2950 2950 4 3386 ));
+DATA(insert ( 4081 2950 2950 1 3383 ));
+DATA(insert ( 4081 2950 2950 2 3384 ));
+DATA(insert ( 4081 2950 2950 3 3385 ));
+DATA(insert ( 4081 2950 2950 4 3386 ));
/* inclusion range types */
-DATA(insert ( 4103 3831 3831 1 4105 ));
-DATA(insert ( 4103 3831 3831 2 4106 ));
-DATA(insert ( 4103 3831 3831 3 4107 ));
-DATA(insert ( 4103 3831 3831 4 4108 ));
-DATA(insert ( 4103 3831 3831 11 4057 ));
-DATA(insert ( 4103 3831 3831 13 3859 ));
-DATA(insert ( 4103 3831 3831 14 3850 ));
+DATA(insert ( 4103 3831 3831 1 4105 ));
+DATA(insert ( 4103 3831 3831 2 4106 ));
+DATA(insert ( 4103 3831 3831 3 4107 ));
+DATA(insert ( 4103 3831 3831 4 4108 ));
+DATA(insert ( 4103 3831 3831 11 4057 ));
+DATA(insert ( 4103 3831 3831 13 3859 ));
+DATA(insert ( 4103 3831 3831 14 3850 ));
/* minmax pg_lsn */
-DATA(insert ( 4082 3220 3220 1 3383 ));
-DATA(insert ( 4082 3220 3220 2 3384 ));
-DATA(insert ( 4082 3220 3220 3 3385 ));
-DATA(insert ( 4082 3220 3220 4 3386 ));
+DATA(insert ( 4082 3220 3220 1 3383 ));
+DATA(insert ( 4082 3220 3220 2 3384 ));
+DATA(insert ( 4082 3220 3220 3 3385 ));
+DATA(insert ( 4082 3220 3220 4 3386 ));
/* inclusion box */
-DATA(insert ( 4104 603 603 1 4105 ));
-DATA(insert ( 4104 603 603 2 4106 ));
-DATA(insert ( 4104 603 603 3 4107 ));
-DATA(insert ( 4104 603 603 4 4108 ));
-DATA(insert ( 4104 603 603 11 4067 ));
-DATA(insert ( 4104 603 603 13 187 ));
+DATA(insert ( 4104 603 603 1 4105 ));
+DATA(insert ( 4104 603 603 2 4106 ));
+DATA(insert ( 4104 603 603 3 4107 ));
+DATA(insert ( 4104 603 603 4 4108 ));
+DATA(insert ( 4104 603 603 11 4067 ));
+DATA(insert ( 4104 603 603 13 187 ));
#endif /* PG_AMPROC_H */
diff --git a/src/include/catalog/pg_attribute.h b/src/include/catalog/pg_attribute.h
index 87a3462353..f0b28b01eb 100644
--- a/src/include/catalog/pg_attribute.h
+++ b/src/include/catalog/pg_attribute.h
@@ -138,12 +138,12 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BK
/*
* This flag specifies whether this column has ever had a local
- * definition. It is set for normal non-inherited columns, but also
- * for columns that are inherited from parents if also explicitly listed
- * in CREATE TABLE INHERITS. It is also set when inheritance is removed
- * from a table with ALTER TABLE NO INHERIT. If the flag is set, the
- * column is not dropped by a parent's DROP COLUMN even if this causes
- * the column's attinhcount to become zero.
+ * definition. It is set for normal non-inherited columns, but also for
+ * columns that are inherited from parents if also explicitly listed in
+ * CREATE TABLE INHERITS. It is also set when inheritance is removed from
+ * a table with ALTER TABLE NO INHERIT. If the flag is set, the column is
+ * not dropped by a parent's DROP COLUMN even if this causes the column's
+ * attinhcount to become zero.
*/
bool attislocal;
diff --git a/src/include/catalog/pg_cast.h b/src/include/catalog/pg_cast.h
index bf6ef10821..9f7733f584 100644
--- a/src/include/catalog/pg_cast.h
+++ b/src/include/catalog/pg_cast.h
@@ -376,6 +376,6 @@ DATA(insert ( 1700 1700 1703 i f ));
/* json to/from jsonb */
DATA(insert ( 114 3802 0 a i ));
-DATA(insert ( 3802 114 0 a i ));
+DATA(insert ( 3802 114 0 a i ));
#endif /* PG_CAST_H */
diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h
index 48a7262895..fea99c700f 100644
--- a/src/include/catalog/pg_class.h
+++ b/src/include/catalog/pg_class.h
@@ -65,7 +65,7 @@ CATALOG(pg_class,1259) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83) BKI_SCHEMA_MACRO
bool relhasrules; /* has (or has had) any rules */
bool relhastriggers; /* has (or has had) any TRIGGERs */
bool relhassubclass; /* has (or has had) derived classes */
- bool relrowsecurity; /* row security is enabled or not */
+ bool relrowsecurity; /* row security is enabled or not */
bool relispopulated; /* matview currently holds query results */
char relreplident; /* see REPLICA_IDENTITY_xxx constants */
TransactionId relfrozenxid; /* all Xids < this are frozen in this rel */
diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h
index 2e4c381361..ad1eb4b9cc 100644
--- a/src/include/catalog/pg_control.h
+++ b/src/include/catalog/pg_control.h
@@ -46,8 +46,10 @@ typedef struct CheckPoint
MultiXactId oldestMulti; /* cluster-wide minimum datminmxid */
Oid oldestMultiDB; /* database with minimum datminmxid */
pg_time_t time; /* time stamp of checkpoint */
- TransactionId oldestCommitTs; /* oldest Xid with valid commit timestamp */
- TransactionId newestCommitTs; /* newest Xid with valid commit timestamp */
+ TransactionId oldestCommitTs; /* oldest Xid with valid commit
+ * timestamp */
+ TransactionId newestCommitTs; /* newest Xid with valid commit
+ * timestamp */
/*
* Oldest XID still running. This is only needed to initialize hot standby
diff --git a/src/include/catalog/pg_description.h b/src/include/catalog/pg_description.h
index 692455f361..8985aed64e 100644
--- a/src/include/catalog/pg_description.h
+++ b/src/include/catalog/pg_description.h
@@ -52,7 +52,7 @@ CATALOG(pg_description,2609) BKI_WITHOUT_OIDS
int32 objsubid; /* column number, or 0 if not used */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
- text description BKI_FORCE_NOT_NULL; /* description of object */
+ text description BKI_FORCE_NOT_NULL; /* description of object */
#endif
} FormData_pg_description;
diff --git a/src/include/catalog/pg_extension.h b/src/include/catalog/pg_extension.h
index 99ab35bb05..de95e481fd 100644
--- a/src/include/catalog/pg_extension.h
+++ b/src/include/catalog/pg_extension.h
@@ -37,7 +37,7 @@ CATALOG(pg_extension,3079)
#ifdef CATALOG_VARLEN /* variable-length fields start here */
/* extversion may never be null, but the others can be. */
- text extversion BKI_FORCE_NOT_NULL; /* extension version name */
+ text extversion BKI_FORCE_NOT_NULL; /* extension version name */
Oid extconfig[1]; /* dumpable configuration tables */
text extcondition[1]; /* WHERE clauses for config tables */
#endif
diff --git a/src/include/catalog/pg_largeobject.h b/src/include/catalog/pg_largeobject.h
index 4a33752040..d7b55faf97 100644
--- a/src/include/catalog/pg_largeobject.h
+++ b/src/include/catalog/pg_largeobject.h
@@ -34,7 +34,7 @@ CATALOG(pg_largeobject,2613) BKI_WITHOUT_OIDS
int32 pageno; /* Page number (starting from 0) */
/* data has variable length, but we allow direct access; see inv_api.c */
- bytea data BKI_FORCE_NOT_NULL; /* Data for page (may be zero-length) */
+ bytea data BKI_FORCE_NOT_NULL; /* Data for page (may be zero-length) */
} FormData_pg_largeobject;
/* ----------------
diff --git a/src/include/catalog/pg_opclass.h b/src/include/catalog/pg_opclass.h
index a13e082800..e7b3148980 100644
--- a/src/include/catalog/pg_opclass.h
+++ b/src/include/catalog/pg_opclass.h
@@ -238,15 +238,15 @@ DATA(insert ( 2742 jsonb_path_ops PGNSP PGUID 4037 3802 f 23 ));
/* BRIN operator classes */
/* no brin opclass for bool */
-DATA(insert ( 3580 bytea_minmax_ops PGNSP PGUID 4064 17 t 17 ));
-DATA(insert ( 3580 char_minmax_ops PGNSP PGUID 4062 18 t 18 ));
-DATA(insert ( 3580 name_minmax_ops PGNSP PGUID 4065 19 t 19 ));
-DATA(insert ( 3580 int8_minmax_ops PGNSP PGUID 4054 20 t 20 ));
-DATA(insert ( 3580 int2_minmax_ops PGNSP PGUID 4054 21 t 21 ));
-DATA(insert ( 3580 int4_minmax_ops PGNSP PGUID 4054 23 t 23 ));
-DATA(insert ( 3580 text_minmax_ops PGNSP PGUID 4056 25 t 25 ));
-DATA(insert ( 3580 oid_minmax_ops PGNSP PGUID 4068 26 t 26 ));
-DATA(insert ( 3580 tid_minmax_ops PGNSP PGUID 4069 27 t 27 ));
+DATA(insert ( 3580 bytea_minmax_ops PGNSP PGUID 4064 17 t 17 ));
+DATA(insert ( 3580 char_minmax_ops PGNSP PGUID 4062 18 t 18 ));
+DATA(insert ( 3580 name_minmax_ops PGNSP PGUID 4065 19 t 19 ));
+DATA(insert ( 3580 int8_minmax_ops PGNSP PGUID 4054 20 t 20 ));
+DATA(insert ( 3580 int2_minmax_ops PGNSP PGUID 4054 21 t 21 ));
+DATA(insert ( 3580 int4_minmax_ops PGNSP PGUID 4054 23 t 23 ));
+DATA(insert ( 3580 text_minmax_ops PGNSP PGUID 4056 25 t 25 ));
+DATA(insert ( 3580 oid_minmax_ops PGNSP PGUID 4068 26 t 26 ));
+DATA(insert ( 3580 tid_minmax_ops PGNSP PGUID 4069 27 t 27 ));
DATA(insert ( 3580 float4_minmax_ops PGNSP PGUID 4070 700 t 700 ));
DATA(insert ( 3580 float8_minmax_ops PGNSP PGUID 4070 701 t 701 ));
DATA(insert ( 3580 abstime_minmax_ops PGNSP PGUID 4072 702 t 702 ));
diff --git a/src/include/catalog/pg_operator.h b/src/include/catalog/pg_operator.h
index 6e260cb304..773f4fd731 100644
--- a/src/include/catalog/pg_operator.h
+++ b/src/include/catalog/pg_operator.h
@@ -1019,9 +1019,9 @@ DATA(insert OID = 1522 ( "<->" PGNSP PGUID b f f 600 718 701 3291 0 di
DESCR("distance between");
DATA(insert OID = 3291 ( "<->" PGNSP PGUID b f f 718 600 701 1522 0 dist_cpoint - - ));
DESCR("distance between");
-DATA(insert OID = 3276 ( "<->" PGNSP PGUID b f f 600 604 701 3289 0 dist_ppoly - - ));
+DATA(insert OID = 3276 ( "<->" PGNSP PGUID b f f 600 604 701 3289 0 dist_ppoly - - ));
DESCR("distance between");
-DATA(insert OID = 3289 ( "<->" PGNSP PGUID b f f 604 600 701 3276 0 dist_polyp - - ));
+DATA(insert OID = 3289 ( "<->" PGNSP PGUID b f f 604 600 701 3276 0 dist_polyp - - ));
DESCR("distance between");
DATA(insert OID = 1523 ( "<->" PGNSP PGUID b f f 718 604 701 0 0 dist_cpoly - - ));
DESCR("distance between");
diff --git a/src/include/catalog/pg_pltemplate.h b/src/include/catalog/pg_pltemplate.h
index 569d724036..754965a9a8 100644
--- a/src/include/catalog/pg_pltemplate.h
+++ b/src/include/catalog/pg_pltemplate.h
@@ -35,10 +35,11 @@ CATALOG(pg_pltemplate,1136) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
bool tmpldbacreate; /* PL is installable by db owner? */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
- text tmplhandler BKI_FORCE_NOT_NULL; /* name of call handler function */
+ text tmplhandler BKI_FORCE_NOT_NULL; /* name of call handler
+ * function */
text tmplinline; /* name of anonymous-block handler, or NULL */
text tmplvalidator; /* name of validator function, or NULL */
- text tmpllibrary BKI_FORCE_NOT_NULL; /* path of shared library */
+ text tmpllibrary BKI_FORCE_NOT_NULL; /* path of shared library */
aclitem tmplacl[1]; /* access privileges for template */
#endif
} FormData_pg_pltemplate;
diff --git a/src/include/catalog/pg_policy.h b/src/include/catalog/pg_policy.h
index ae71f3f3a2..da404c61e9 100644
--- a/src/include/catalog/pg_policy.h
+++ b/src/include/catalog/pg_policy.h
@@ -1,6 +1,6 @@
/*
* pg_policy.h
- * definition of the system "policy" relation (pg_policy)
+ * definition of the system "policy" relation (pg_policy)
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@@ -20,14 +20,14 @@
CATALOG(pg_policy,3256)
{
- NameData polname; /* Policy name. */
- Oid polrelid; /* Oid of the relation with policy. */
- char polcmd; /* One of ACL_*_CHR, or '*' for all */
+ NameData polname; /* Policy name. */
+ Oid polrelid; /* Oid of the relation with policy. */
+ char polcmd; /* One of ACL_*_CHR, or '*' for all */
#ifdef CATALOG_VARLEN
- Oid polroles[1]; /* Roles associated with policy, not-NULL */
- pg_node_tree polqual; /* Policy quals. */
- pg_node_tree polwithcheck; /* WITH CHECK quals. */
+ Oid polroles[1]; /* Roles associated with policy, not-NULL */
+ pg_node_tree polqual; /* Policy quals. */
+ pg_node_tree polwithcheck; /* WITH CHECK quals. */
#endif
} FormData_pg_policy;
@@ -39,7 +39,7 @@ CATALOG(pg_policy,3256)
typedef FormData_pg_policy *Form_pg_policy;
/* ----------------
- * compiler constants for pg_policy
+ * compiler constants for pg_policy
* ----------------
*/
#define Natts_pg_policy 6
@@ -48,6 +48,6 @@ typedef FormData_pg_policy *Form_pg_policy;
#define Anum_pg_policy_polcmd 3
#define Anum_pg_policy_polroles 4
#define Anum_pg_policy_polqual 5
-#define Anum_pg_policy_polwithcheck 6
+#define Anum_pg_policy_polwithcheck 6
-#endif /* PG_POLICY_H */
+#endif /* PG_POLICY_H */
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index 0405027e01..c0aab38292 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -66,8 +66,8 @@ CATALOG(pg_proc,1255) BKI_BOOTSTRAP BKI_ROWTYPE_OID(81) BKI_SCHEMA_MACRO
text proargnames[1]; /* parameter names (NULL if no names) */
pg_node_tree proargdefaults;/* list of expression trees for argument
* defaults (NULL if none) */
- Oid protrftypes[1]; /* types for which to apply transforms */
- text prosrc BKI_FORCE_NOT_NULL; /* procedure source text */
+ Oid protrftypes[1]; /* types for which to apply transforms */
+ text prosrc BKI_FORCE_NOT_NULL; /* procedure source text */
text probin; /* secondary procedure info (can be NULL) */
text proconfig[1]; /* procedure-local GUC settings */
aclitem proacl[1]; /* access permissions */
@@ -216,9 +216,9 @@ DATA(insert OID = 1246 ( charlt PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16
DATA(insert OID = 72 ( charle PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ charle _null_ _null_ _null_ ));
DATA(insert OID = 73 ( chargt PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ chargt _null_ _null_ _null_ ));
DATA(insert OID = 74 ( charge PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ charge _null_ _null_ _null_ ));
-DATA(insert OID = 77 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "18" _null_ _null_ _null_ _null_ _null_ chartoi4 _null_ _null_ _null_ ));
+DATA(insert OID = 77 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "18" _null_ _null_ _null_ _null_ _null_ chartoi4 _null_ _null_ _null_ ));
DESCR("convert char to int4");
-DATA(insert OID = 78 ( char PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 18 "23" _null_ _null_ _null_ _null_ _null_ i4tochar _null_ _null_ _null_ ));
+DATA(insert OID = 78 ( char PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 18 "23" _null_ _null_ _null_ _null_ _null_ i4tochar _null_ _null_ _null_ ));
DESCR("convert int4 to char");
DATA(insert OID = 79 ( nameregexeq PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ nameregexeq _null_ _null_ _null_ ));
@@ -267,8 +267,8 @@ DATA(insert OID = 110 ( unknownout PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0
DESCR("I/O");
DATA(insert OID = 111 ( numeric_fac PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ numeric_fac _null_ _null_ _null_ ));
-DATA(insert OID = 115 ( box_above_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_above_eq _null_ _null_ _null_ ));
-DATA(insert OID = 116 ( box_below_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_below_eq _null_ _null_ _null_ ));
+DATA(insert OID = 115 ( box_above_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_above_eq _null_ _null_ _null_ ));
+DATA(insert OID = 116 ( box_below_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_below_eq _null_ _null_ _null_ ));
DATA(insert OID = 117 ( point_in PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 600 "2275" _null_ _null_ _null_ _null_ _null_ point_in _null_ _null_ _null_ ));
DESCR("I/O");
@@ -425,13 +425,13 @@ DATA(insert OID = 233 ( dexp PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701
DESCR("natural exponential (e^x)");
DATA(insert OID = 234 ( dlog1 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dlog1 _null_ _null_ _null_ ));
DESCR("natural logarithm");
-DATA(insert OID = 235 ( float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "21" _null_ _null_ _null_ _null_ _null_ i2tod _null_ _null_ _null_ ));
+DATA(insert OID = 235 ( float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "21" _null_ _null_ _null_ _null_ _null_ i2tod _null_ _null_ _null_ ));
DESCR("convert int2 to float8");
-DATA(insert OID = 236 ( float4 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 700 "21" _null_ _null_ _null_ _null_ _null_ i2tof _null_ _null_ _null_ ));
+DATA(insert OID = 236 ( float4 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 700 "21" _null_ _null_ _null_ _null_ _null_ i2tof _null_ _null_ _null_ ));
DESCR("convert int2 to float4");
-DATA(insert OID = 237 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "701" _null_ _null_ _null_ _null_ _null_ dtoi2 _null_ _null_ _null_ ));
+DATA(insert OID = 237 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "701" _null_ _null_ _null_ _null_ _null_ dtoi2 _null_ _null_ _null_ ));
DESCR("convert float8 to int2");
-DATA(insert OID = 238 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "700" _null_ _null_ _null_ _null_ _null_ ftoi2 _null_ _null_ _null_ ));
+DATA(insert OID = 238 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "700" _null_ _null_ _null_ _null_ _null_ ftoi2 _null_ _null_ _null_ ));
DESCR("convert float4 to int2");
DATA(insert OID = 239 ( line_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "628 628" _null_ _null_ _null_ _null_ _null_ line_distance _null_ _null_ _null_ ));
@@ -531,14 +531,14 @@ DATA(insert OID = 311 ( float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 70
DESCR("convert float4 to float8");
DATA(insert OID = 312 ( float4 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 700 "701" _null_ _null_ _null_ _null_ _null_ dtof _null_ _null_ _null_ ));
DESCR("convert float8 to float4");
-DATA(insert OID = 313 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "21" _null_ _null_ _null_ _null_ _null_ i2toi4 _null_ _null_ _null_ ));
+DATA(insert OID = 313 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "21" _null_ _null_ _null_ _null_ _null_ i2toi4 _null_ _null_ _null_ ));
DESCR("convert int2 to int4");
-DATA(insert OID = 314 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "23" _null_ _null_ _null_ _null_ _null_ i4toi2 _null_ _null_ _null_ ));
+DATA(insert OID = 314 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "23" _null_ _null_ _null_ _null_ _null_ i4toi2 _null_ _null_ _null_ ));
DESCR("convert int4 to int2");
DATA(insert OID = 315 ( int2vectoreq PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "22 22" _null_ _null_ _null_ _null_ _null_ int2vectoreq _null_ _null_ _null_ ));
DATA(insert OID = 316 ( float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "23" _null_ _null_ _null_ _null_ _null_ i4tod _null_ _null_ _null_ ));
DESCR("convert int4 to float8");
-DATA(insert OID = 317 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "701" _null_ _null_ _null_ _null_ _null_ dtoi4 _null_ _null_ _null_ ));
+DATA(insert OID = 317 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "701" _null_ _null_ _null_ _null_ _null_ dtoi4 _null_ _null_ _null_ ));
DESCR("convert float8 to int4");
DATA(insert OID = 318 ( float4 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 700 "23" _null_ _null_ _null_ _null_ _null_ i4tof _null_ _null_ _null_ ));
DESCR("convert int4 to float4");
@@ -787,7 +787,7 @@ DATA(insert OID = 481 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "
DESCR("convert int4 to int8");
DATA(insert OID = 482 ( float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "20" _null_ _null_ _null_ _null_ _null_ i8tod _null_ _null_ _null_ ));
DESCR("convert int8 to float8");
-DATA(insert OID = 483 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "701" _null_ _null_ _null_ _null_ _null_ dtoi8 _null_ _null_ _null_ ));
+DATA(insert OID = 483 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "701" _null_ _null_ _null_ _null_ _null_ dtoi8 _null_ _null_ _null_ ));
DESCR("convert float8 to int8");
/* OIDS 500 - 599 */
@@ -799,7 +799,7 @@ DESCR("hash");
DATA(insert OID = 652 ( float4 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 700 "20" _null_ _null_ _null_ _null_ _null_ i8tof _null_ _null_ _null_ ));
DESCR("convert int8 to float4");
-DATA(insert OID = 653 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "700" _null_ _null_ _null_ _null_ _null_ ftoi8 _null_ _null_ _null_ ));
+DATA(insert OID = 653 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "700" _null_ _null_ _null_ _null_ _null_ ftoi8 _null_ _null_ _null_ ));
DESCR("convert float4 to int8");
DATA(insert OID = 714 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "20" _null_ _null_ _null_ _null_ _null_ int82 _null_ _null_ _null_ ));
@@ -845,7 +845,7 @@ DATA(insert OID = 723 ( get_bit PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 23
DESCR("get bit");
DATA(insert OID = 724 ( set_bit PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 17 "17 23 23" _null_ _null_ _null_ _null_ _null_ byteaSetBit _null_ _null_ _null_ ));
DESCR("set bit");
-DATA(insert OID = 749 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i 4 0 17 "17 17 23 23" _null_ _null_ _null_ _null_ _null_ byteaoverlay _null_ _null_ _null_ ));
+DATA(insert OID = 749 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i 4 0 17 "17 17 23 23" _null_ _null_ _null_ _null_ _null_ byteaoverlay _null_ _null_ _null_ ));
DESCR("substitute portion of string");
DATA(insert OID = 752 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 17 "17 17 23" _null_ _null_ _null_ _null_ _null_ byteaoverlay_no_len _null_ _null_ _null_ ));
DESCR("substitute portion of string");
@@ -857,7 +857,7 @@ DATA(insert OID = 728 ( dist_cpoly PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0
DATA(insert OID = 729 ( poly_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "604 604" _null_ _null_ _null_ _null_ _null_ poly_distance _null_ _null_ _null_ ));
DATA(insert OID = 3275 ( dist_ppoly PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "600 604" _null_ _null_ _null_ _null_ _null_ dist_ppoly _null_ _null_ _null_ ));
DATA(insert OID = 3292 ( dist_polyp PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "604 600" _null_ _null_ _null_ _null_ _null_ dist_polyp _null_ _null_ _null_ ));
-DATA(insert OID = 3290 ( dist_cpoint PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "718 600" _null_ _null_ _null_ _null_ _null_ dist_cpoint _null_ _null_ _null_ ));
+DATA(insert OID = 3290 ( dist_cpoint PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "718 600" _null_ _null_ _null_ _null_ _null_ dist_cpoint _null_ _null_ _null_ ));
DATA(insert OID = 740 ( text_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_lt _null_ _null_ _null_ ));
DATA(insert OID = 741 ( text_le PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_le _null_ _null_ _null_ ));
@@ -1000,7 +1000,7 @@ DATA(insert OID = 776 ( gistbulkdelete PGNSP PGUID 12 1 0 0 0 f f f f t f v
DESCR("gist(internal)");
DATA(insert OID = 2561 ( gistvacuumcleanup PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ gistvacuumcleanup _null_ _null_ _null_ ));
DESCR("gist(internal)");
-DATA(insert OID = 3280 ( gistcanreturn PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 16 "2281 23" _null_ _null_ _null_ _null_ _null_ gistcanreturn _null_ _null_ _null_ ));
+DATA(insert OID = 3280 ( gistcanreturn PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 16 "2281 23" _null_ _null_ _null_ _null_ _null_ gistcanreturn _null_ _null_ _null_ ));
DESCR("gist(internal)");
DATA(insert OID = 772 ( gistcostestimate PGNSP PGUID 12 1 0 0 0 f f f f t f v 7 0 2278 "2281 2281 2281 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gistcostestimate _null_ _null_ _null_ ));
DESCR("gist(internal)");
@@ -1054,12 +1054,12 @@ DATA(insert OID = 886 ( cash_in PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 7
DESCR("I/O");
DATA(insert OID = 887 ( cash_out PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 2275 "790" _null_ _null_ _null_ _null_ _null_ cash_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 888 ( cash_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_eq _null_ _null_ _null_ ));
-DATA(insert OID = 889 ( cash_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_ne _null_ _null_ _null_ ));
-DATA(insert OID = 890 ( cash_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_lt _null_ _null_ _null_ ));
-DATA(insert OID = 891 ( cash_le PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_le _null_ _null_ _null_ ));
-DATA(insert OID = 892 ( cash_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_gt _null_ _null_ _null_ ));
-DATA(insert OID = 893 ( cash_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_ge _null_ _null_ _null_ ));
+DATA(insert OID = 888 ( cash_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_eq _null_ _null_ _null_ ));
+DATA(insert OID = 889 ( cash_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_ne _null_ _null_ _null_ ));
+DATA(insert OID = 890 ( cash_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_lt _null_ _null_ _null_ ));
+DATA(insert OID = 891 ( cash_le PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_le _null_ _null_ _null_ ));
+DATA(insert OID = 892 ( cash_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_gt _null_ _null_ _null_ ));
+DATA(insert OID = 893 ( cash_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_ge _null_ _null_ _null_ ));
DATA(insert OID = 894 ( cash_pl PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 790 "790 790" _null_ _null_ _null_ _null_ _null_ cash_pl _null_ _null_ _null_ ));
DATA(insert OID = 895 ( cash_mi PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 790 "790 790" _null_ _null_ _null_ _null_ _null_ cash_mi _null_ _null_ _null_ ));
DATA(insert OID = 896 ( cash_mul_flt8 PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 790 "790 701" _null_ _null_ _null_ _null_ _null_ cash_mul_flt8 _null_ _null_ _null_ ));
@@ -1069,16 +1069,16 @@ DESCR("larger of two");
DATA(insert OID = 899 ( cashsmaller PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 790 "790 790" _null_ _null_ _null_ _null_ _null_ cashsmaller _null_ _null_ _null_ ));
DESCR("smaller of two");
DATA(insert OID = 919 ( flt8_mul_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 790 "701 790" _null_ _null_ _null_ _null_ _null_ flt8_mul_cash _null_ _null_ _null_ ));
-DATA(insert OID = 935 ( cash_words PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "790" _null_ _null_ _null_ _null_ _null_ cash_words _null_ _null_ _null_ ));
+DATA(insert OID = 935 ( cash_words PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "790" _null_ _null_ _null_ _null_ _null_ cash_words _null_ _null_ _null_ ));
DESCR("output money amount as words");
DATA(insert OID = 3822 ( cash_div_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "790 790" _null_ _null_ _null_ _null_ _null_ cash_div_cash _null_ _null_ _null_ ));
DATA(insert OID = 3823 ( numeric PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 1700 "790" _null_ _null_ _null_ _null_ _null_ cash_numeric _null_ _null_ _null_ ));
DESCR("convert money to numeric");
DATA(insert OID = 3824 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 790 "1700" _null_ _null_ _null_ _null_ _null_ numeric_cash _null_ _null_ _null_ ));
DESCR("convert numeric to money");
-DATA(insert OID = 3811 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 790 "23" _null_ _null_ _null_ _null_ _null_ int4_cash _null_ _null_ _null_ ));
+DATA(insert OID = 3811 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 790 "23" _null_ _null_ _null_ _null_ _null_ int4_cash _null_ _null_ _null_ ));
DESCR("convert int4 to money");
-DATA(insert OID = 3812 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 790 "20" _null_ _null_ _null_ _null_ _null_ int8_cash _null_ _null_ _null_ ));
+DATA(insert OID = 3812 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 790 "20" _null_ _null_ _null_ _null_ _null_ int8_cash _null_ _null_ _null_ ));
DESCR("convert int8 to money");
/* OIDS 900 - 999 */
@@ -1131,8 +1131,8 @@ DESCR("read large object from offset for length");
DATA(insert OID = 3460 ( lo_put PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "26 20 17" _null_ _null_ _null_ _null_ _null_ lo_put _null_ _null_ _null_ ));
DESCR("write data at offset");
-DATA(insert OID = 959 ( on_pl PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "600 628" _null_ _null_ _null_ _null_ _null_ on_pl _null_ _null_ _null_ ));
-DATA(insert OID = 960 ( on_sl PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "601 628" _null_ _null_ _null_ _null_ _null_ on_sl _null_ _null_ _null_ ));
+DATA(insert OID = 959 ( on_pl PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "600 628" _null_ _null_ _null_ _null_ _null_ on_pl _null_ _null_ _null_ ));
+DATA(insert OID = 960 ( on_sl PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "601 628" _null_ _null_ _null_ _null_ _null_ on_sl _null_ _null_ _null_ ));
DATA(insert OID = 961 ( close_pl PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 600 "600 628" _null_ _null_ _null_ _null_ _null_ close_pl _null_ _null_ _null_ ));
DATA(insert OID = 962 ( close_sl PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 600 "601 628" _null_ _null_ _null_ _null_ _null_ close_sl _null_ _null_ _null_ ));
DATA(insert OID = 963 ( close_lb PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 600 "628 603" _null_ _null_ _null_ _null_ _null_ close_lb _null_ _null_ _null_ ));
@@ -1140,7 +1140,7 @@ DATA(insert OID = 963 ( close_lb PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 6
DATA(insert OID = 964 ( lo_unlink PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 23 "26" _null_ _null_ _null_ _null_ _null_ lo_unlink _null_ _null_ _null_ ));
DESCR("large object unlink (delete)");
-DATA(insert OID = 973 ( path_inter PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "602 602" _null_ _null_ _null_ _null_ _null_ path_inter _null_ _null_ _null_ ));
+DATA(insert OID = 973 ( path_inter PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "602 602" _null_ _null_ _null_ _null_ _null_ path_inter _null_ _null_ _null_ ));
DATA(insert OID = 975 ( area PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "603" _null_ _null_ _null_ _null_ _null_ box_area _null_ _null_ _null_ ));
DESCR("box area");
DATA(insert OID = 976 ( width PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "603" _null_ _null_ _null_ _null_ _null_ box_width _null_ _null_ _null_ ));
@@ -1571,7 +1571,7 @@ DESCR("convert abstime to time");
DATA(insert OID = 1367 ( character_length PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "1042" _null_ _null_ _null_ _null_ _null_ bpcharlen _null_ _null_ _null_ ));
DESCR("character length");
-DATA(insert OID = 1369 ( character_length PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ textlen _null_ _null_ _null_ ));
+DATA(insert OID = 1369 ( character_length PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ textlen _null_ _null_ _null_ ));
DESCR("character length");
DATA(insert OID = 1370 ( interval PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 1186 "1083" _null_ _null_ _null_ _null_ _null_ time_interval _null_ _null_ _null_ ));
@@ -2046,7 +2046,7 @@ DATA(insert OID = 1716 ( pg_get_expr PGNSP PGUID 12 1 0 0 0 f f f f t f s 2
DESCR("deparse an encoded expression");
DATA(insert OID = 1665 ( pg_get_serial_sequence PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ pg_get_serial_sequence _null_ _null_ _null_ ));
DESCR("name of sequence for a serial column");
-DATA(insert OID = 2098 ( pg_get_functiondef PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_functiondef _null_ _null_ _null_ ));
+DATA(insert OID = 2098 ( pg_get_functiondef PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_functiondef _null_ _null_ _null_ ));
DESCR("definition of a function");
DATA(insert OID = 2162 ( pg_get_function_arguments PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_function_arguments _null_ _null_ _null_ ));
DESCR("argument list of a function");
@@ -2412,9 +2412,9 @@ DATA(insert OID = 1773 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "
DESCR("format int4 to text");
DATA(insert OID = 1774 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "20 25" _null_ _null_ _null_ _null_ _null_ int8_to_char _null_ _null_ _null_ ));
DESCR("format int8 to text");
-DATA(insert OID = 1775 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "700 25" _null_ _null_ _null_ _null_ _null_ float4_to_char _null_ _null_ _null_ ));
+DATA(insert OID = 1775 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "700 25" _null_ _null_ _null_ _null_ _null_ float4_to_char _null_ _null_ _null_ ));
DESCR("format float4 to text");
-DATA(insert OID = 1776 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "701 25" _null_ _null_ _null_ _null_ _null_ float8_to_char _null_ _null_ _null_ ));
+DATA(insert OID = 1776 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "701 25" _null_ _null_ _null_ _null_ _null_ float8_to_char _null_ _null_ _null_ ));
DESCR("format float8 to text");
DATA(insert OID = 1777 ( to_number PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 1700 "25 25" _null_ _null_ _null_ _null_ _null_ numeric_to_number _null_ _null_ _null_ ));
DESCR("convert text to numeric");
@@ -2552,7 +2552,7 @@ DATA(insert OID = 3388 ( numeric_poly_sum PGNSP PGUID 12 1 0 0 0 f f f f f f
DESCR("aggregate final function");
DATA(insert OID = 3389 ( numeric_poly_avg PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_avg _null_ _null_ _null_ ));
DESCR("aggregate final function");
-DATA(insert OID = 3390 ( numeric_poly_var_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_pop _null_ _null_ _null_ ));
+DATA(insert OID = 3390 ( numeric_poly_var_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_pop _null_ _null_ _null_ ));
DESCR("aggregate final function");
DATA(insert OID = 3391 ( numeric_poly_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_samp _null_ _null_ _null_ ));
DESCR("aggregate final function");
@@ -2620,7 +2620,7 @@ DATA(insert OID = 3545 ( string_agg PGNSP PGUID 12 1 0 0 0 t f f f f f i 2 0
DESCR("concatenate aggregate input into a bytea");
/* To ASCII conversion */
-DATA(insert OID = 1845 ( to_ascii PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ to_ascii_default _null_ _null_ _null_ ));
+DATA(insert OID = 1845 ( to_ascii PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ to_ascii_default _null_ _null_ _null_ ));
DESCR("encode text from DB encoding to ASCII text");
DATA(insert OID = 1846 ( to_ascii PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ to_ascii_enc _null_ _null_ _null_ ));
DESCR("encode text from encoding to ASCII text");
@@ -2697,21 +2697,21 @@ DESCR("current user privilege on sequence by seq name");
DATA(insert OID = 2186 ( has_sequence_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_sequence_privilege_id _null_ _null_ _null_ ));
DESCR("current user privilege on sequence by seq oid");
-DATA(insert OID = 3012 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 25 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_name_name _null_ _null_ _null_ ));
+DATA(insert OID = 3012 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 25 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_name_name _null_ _null_ _null_ ));
DESCR("user privilege on column by username, rel name, col name");
-DATA(insert OID = 3013 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 25 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_name_attnum _null_ _null_ _null_ ));
+DATA(insert OID = 3013 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 25 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_name_attnum _null_ _null_ _null_ ));
DESCR("user privilege on column by username, rel name, col attnum");
-DATA(insert OID = 3014 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 26 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_id_name _null_ _null_ _null_ ));
+DATA(insert OID = 3014 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 26 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_id_name _null_ _null_ _null_ ));
DESCR("user privilege on column by username, rel oid, col name");
-DATA(insert OID = 3015 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 26 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_id_attnum _null_ _null_ _null_ ));
+DATA(insert OID = 3015 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 26 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_id_attnum _null_ _null_ _null_ ));
DESCR("user privilege on column by username, rel oid, col attnum");
-DATA(insert OID = 3016 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 25 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_name_name _null_ _null_ _null_ ));
+DATA(insert OID = 3016 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 25 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_name_name _null_ _null_ _null_ ));
DESCR("user privilege on column by user oid, rel name, col name");
-DATA(insert OID = 3017 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 25 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_name_attnum _null_ _null_ _null_ ));
+DATA(insert OID = 3017 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 25 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_name_attnum _null_ _null_ _null_ ));
DESCR("user privilege on column by user oid, rel name, col attnum");
-DATA(insert OID = 3018 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 26 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_id_name _null_ _null_ _null_ ));
+DATA(insert OID = 3018 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 26 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_id_name _null_ _null_ _null_ ));
DESCR("user privilege on column by user oid, rel oid, col name");
-DATA(insert OID = 3019 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 26 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_id_attnum _null_ _null_ _null_ ));
+DATA(insert OID = 3019 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 26 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_id_attnum _null_ _null_ _null_ ));
DESCR("user privilege on column by user oid, rel oid, col attnum");
DATA(insert OID = 3020 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s 3 0 16 "25 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_name _null_ _null_ _null_ ));
DESCR("current user privilege on column by rel name, col name");
@@ -2906,9 +2906,9 @@ DESCR("statistics: self execution time of function in current transaction, in ms
DATA(insert OID = 3788 ( pg_stat_get_snapshot_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_snapshot_timestamp _null_ _null_ _null_ ));
DESCR("statistics: timestamp of the current statistics snapshot");
-DATA(insert OID = 2230 ( pg_stat_clear_snapshot PGNSP PGUID 12 1 0 0 0 f f f f f f v 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_stat_clear_snapshot _null_ _null_ _null_ ));
+DATA(insert OID = 2230 ( pg_stat_clear_snapshot PGNSP PGUID 12 1 0 0 0 f f f f f f v 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_stat_clear_snapshot _null_ _null_ _null_ ));
DESCR("statistics: discard current transaction's statistics snapshot");
-DATA(insert OID = 2274 ( pg_stat_reset PGNSP PGUID 12 1 0 0 0 f f f f f f v 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_stat_reset _null_ _null_ _null_ ));
+DATA(insert OID = 2274 ( pg_stat_reset PGNSP PGUID 12 1 0 0 0 f f f f f f v 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_stat_reset _null_ _null_ _null_ ));
DESCR("statistics: reset collected statistics for current database");
DATA(insert OID = 3775 ( pg_stat_reset_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_shared _null_ _null_ _null_ ));
DESCR("statistics: reset collected statistics shared across the cluster");
@@ -3071,7 +3071,7 @@ DATA(insert OID = 2078 ( set_config PGNSP PGUID 12 1 0 0 0 f f f f f f v 3 0 2
DESCR("SET X as a function");
DATA(insert OID = 2084 ( pg_show_all_settings PGNSP PGUID 12 1 1000 0 0 f f f f t t s 0 0 2249 "" "{25,25,25,25,25,25,25,25,25,25,25,1009,25,25,25,23,16}" "{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{name,setting,unit,category,short_desc,extra_desc,context,vartype,source,min_val,max_val,enumvals,boot_val,reset_val,sourcefile,sourceline,pending_restart}" _null_ _null_ show_all_settings _null_ _null_ _null_ ));
DESCR("SHOW ALL as a function");
-DATA(insert OID = 3329 ( pg_show_all_file_settings PGNSP PGUID 12 1 1000 0 0 f f f f t t s 0 0 2249 "" "{25,23,23,25,25}" "{o,o,o,o,o}" "{sourcefile,sourceline,seqno,name,setting}" _null_ _null_ show_all_file_settings _null_ _null_ _null_ ));
+DATA(insert OID = 3329 ( pg_show_all_file_settings PGNSP PGUID 12 1 1000 0 0 f f f f t t s 0 0 2249 "" "{25,23,23,25,25}" "{o,o,o,o,o}" "{sourcefile,sourceline,seqno,name,setting}" _null_ _null_ show_all_file_settings _null_ _null_ _null_ ));
DESCR("show config file settings");
DATA(insert OID = 1371 ( pg_lock_status PGNSP PGUID 12 1 1000 0 0 f f f f t t v 0 0 2249 "" "{25,26,26,23,21,25,28,26,26,21,25,23,25,16,16}" "{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{locktype,database,relation,page,tuple,virtualxid,transactionid,classid,objid,objsubid,virtualtransaction,pid,mode,granted,fastpath}" _null_ _null_ pg_lock_status _null_ _null_ _null_ ));
DESCR("view system lock information");
@@ -3092,7 +3092,7 @@ DESCR("get identification of SQL object");
DATA(insert OID = 3839 ( pg_identify_object PGNSP PGUID 12 1 0 0 0 f f f f t f s 3 0 2249 "26 26 23" "{26,26,23,25,25,25,25}" "{i,i,i,o,o,o,o}" "{classid,objid,subobjid,type,schema,name,identity}" _null_ _null_ pg_identify_object _null_ _null_ _null_ ));
DESCR("get machine-parseable identification of SQL object");
-DATA(insert OID = 3382 ( pg_identify_object_as_address PGNSP PGUID 12 1 0 0 0 f f f f t f s 3 0 2249 "26 26 23" "{26,26,23,25,1009,1009}" "{i,i,i,o,o,o}" "{classid,objid,subobjid,type,object_names,object_args}" _null_ _null_ pg_identify_object_as_address _null_ _null_ _null_ ));
+DATA(insert OID = 3382 ( pg_identify_object_as_address PGNSP PGUID 12 1 0 0 0 f f f f t f s 3 0 2249 "26 26 23" "{26,26,23,25,1009,1009}" "{i,i,i,o,o,o}" "{classid,objid,subobjid,type,object_names,object_args}" _null_ _null_ pg_identify_object_as_address _null_ _null_ _null_ ));
DESCR("get identification of SQL object for pg_get_object_address()");
DATA(insert OID = 3954 ( pg_get_object_address PGNSP PGUID 12 1 0 0 0 f f f f t f s 3 0 2249 "25 1009 1009" "{25,1009,1009,26,26,23}" "{i,i,i,o,o,o}" "{type,name,args,classid,objid,subobjid}" _null_ _null_ pg_get_object_address _null_ _null_ _null_ ));
@@ -3902,9 +3902,9 @@ DESCR("I/O");
DATA(insert OID = 2455 ( regtypesend PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "2206" _null_ _null_ _null_ _null_ _null_ regtypesend _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 4094 ( regrolerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 4096 "2281" _null_ _null_ _null_ _null_ _null_ regrolerecv _null_ _null_ _null_ ));
+DATA(insert OID = 4094 ( regrolerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 4096 "2281" _null_ _null_ _null_ _null_ _null_ regrolerecv _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 4095 ( regrolesend PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "4096" _null_ _null_ _null_ _null_ _null_ regrolesend _null_ _null_ _null_ ));
+DATA(insert OID = 4095 ( regrolesend PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "4096" _null_ _null_ _null_ _null_ _null_ regrolesend _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 4087 ( regnamespacerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 4089 "2281" _null_ _null_ _null_ _null_ _null_ regnamespacerecv _null_ _null_ _null_ ));
DESCR("I/O");
@@ -4232,7 +4232,7 @@ DATA(insert OID = 4106 ( brin_inclusion_add_value PGNSP PGUID 12 1 0 0 0 f f f f
DESCR("BRIN inclusion support");
DATA(insert OID = 4107 ( brin_inclusion_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 16 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_inclusion_consistent _null_ _null_ _null_ ));
DESCR("BRIN inclusion support");
-DATA(insert OID = 4108 ( brin_inclusion_union PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 16 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_inclusion_union _null_ _null_ _null_ ));
+DATA(insert OID = 4108 ( brin_inclusion_union PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 16 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_inclusion_union _null_ _null_ _null_ ));
DESCR("BRIN inclusion support");
/* userlock replacements */
@@ -4574,7 +4574,7 @@ DATA(insert OID = 3657 ( gin_extract_tsquery PGNSP PGUID 12 1 0 0 0 f f f f t f
DESCR("GIN tsvector support");
DATA(insert OID = 3658 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 8 0 16 "2281 21 3615 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent _null_ _null_ _null_ ));
DESCR("GIN tsvector support");
-DATA(insert OID = 3921 ( gin_tsquery_triconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 7 0 18 "2281 21 3615 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_triconsistent _null_ _null_ _null_ ));
+DATA(insert OID = 3921 ( gin_tsquery_triconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 7 0 18 "2281 21 3615 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_triconsistent _null_ _null_ _null_ ));
DESCR("GIN tsvector support");
DATA(insert OID = 3724 ( gin_cmp_tslexeme PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 23 "25 25" _null_ _null_ _null_ _null_ _null_ gin_cmp_tslexeme _null_ _null_ _null_ ));
DESCR("GIN tsvector support");
@@ -4584,7 +4584,7 @@ DATA(insert OID = 3077 ( gin_extract_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t
DESCR("GIN tsvector support (obsolete)");
DATA(insert OID = 3087 ( gin_extract_tsquery PGNSP PGUID 12 1 0 0 0 f f f f t f i 5 0 2281 "3615 2281 21 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsquery_5args _null_ _null_ _null_ ));
DESCR("GIN tsvector support (obsolete)");
-DATA(insert OID = 3088 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 6 0 16 "2281 21 3615 23 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent_6args _null_ _null_ _null_ ));
+DATA(insert OID = 3088 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 6 0 16 "2281 21 3615 23 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent_6args _null_ _null_ _null_ ));
DESCR("GIN tsvector support (obsolete)");
DATA(insert OID = 3662 ( tsquery_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_lt _null_ _null_ _null_ ));
@@ -4764,9 +4764,9 @@ DATA(insert OID = 3264 ( jsonb_object PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0
DESCR("map text array of key value pairs to jsonb object");
DATA(insert OID = 3787 ( to_jsonb PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 3802 "2283" _null_ _null_ _null_ _null_ _null_ to_jsonb _null_ _null_ _null_ ));
DESCR("map input to jsonb");
-DATA(insert OID = 3265 ( jsonb_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f s 2 0 2281 "2281 2283" _null_ _null_ _null_ _null_ _null_ jsonb_agg_transfn _null_ _null_ _null_ ));
+DATA(insert OID = 3265 ( jsonb_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f s 2 0 2281 "2281 2283" _null_ _null_ _null_ _null_ _null_ jsonb_agg_transfn _null_ _null_ _null_ ));
DESCR("jsonb aggregate transition function");
-DATA(insert OID = 3266 ( jsonb_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f s 1 0 3802 "2281" _null_ _null_ _null_ _null_ _null_ jsonb_agg_finalfn _null_ _null_ _null_ ));
+DATA(insert OID = 3266 ( jsonb_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f s 1 0 3802 "2281" _null_ _null_ _null_ _null_ _null_ jsonb_agg_finalfn _null_ _null_ _null_ ));
DESCR("jsonb aggregate final function");
DATA(insert OID = 3267 ( jsonb_agg PGNSP PGUID 12 1 0 0 0 t f f f f f s 1 0 3802 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("aggregate input into jsonb");
@@ -4776,15 +4776,15 @@ DATA(insert OID = 3269 ( jsonb_object_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f
DESCR("jsonb object aggregate final function");
DATA(insert OID = 3270 ( jsonb_object_agg PGNSP PGUID 12 1 0 0 0 t f f f f f i 2 0 3802 "2276 2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("aggregate inputs into jsonb object");
-DATA(insert OID = 3271 ( jsonb_build_array PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_array _null_ _null_ _null_ ));
+DATA(insert OID = 3271 ( jsonb_build_array PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_array _null_ _null_ _null_ ));
DESCR("build a jsonb array from any inputs");
-DATA(insert OID = 3272 ( jsonb_build_array PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 3802 "" _null_ _null_ _null_ _null_ _null_ jsonb_build_array_noargs _null_ _null_ _null_ ));
+DATA(insert OID = 3272 ( jsonb_build_array PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 3802 "" _null_ _null_ _null_ _null_ _null_ jsonb_build_array_noargs _null_ _null_ _null_ ));
DESCR("build an empty jsonb array");
-DATA(insert OID = 3273 ( jsonb_build_object PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_object _null_ _null_ _null_ ));
+DATA(insert OID = 3273 ( jsonb_build_object PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_object _null_ _null_ _null_ ));
DESCR("build a jsonb object from pairwise key/value inputs");
-DATA(insert OID = 3274 ( jsonb_build_object PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 3802 "" _null_ _null_ _null_ _null_ _null_ jsonb_build_object_noargs _null_ _null_ _null_ ));
+DATA(insert OID = 3274 ( jsonb_build_object PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 3802 "" _null_ _null_ _null_ _null_ _null_ jsonb_build_object_noargs _null_ _null_ _null_ ));
DESCR("build an empty jsonb object");
-DATA(insert OID = 3262 ( jsonb_strip_nulls PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 3802 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_strip_nulls _null_ _null_ _null_ ));
+DATA(insert OID = 3262 ( jsonb_strip_nulls PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 3802 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_strip_nulls _null_ _null_ _null_ ));
DESCR("remove object fields with null values from jsonb");
DATA(insert OID = 3478 ( jsonb_object_field PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 3802 "3802 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ jsonb_object_field _null_ _null_ _null_ ));
@@ -4859,7 +4859,7 @@ DATA(insert OID = 3301 ( jsonb_concat PGNSP PGUID 12 1 0 0 0 f f f f t f i 2
DATA(insert OID = 3302 ( jsonb_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 3802 "3802 25" _null_ _null_ _null_ _null_ _null_ jsonb_delete _null_ _null_ _null_ ));
DATA(insert OID = 3303 ( jsonb_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 3802 "3802 23" _null_ _null_ _null_ _null_ _null_ jsonb_delete_idx _null_ _null_ _null_ ));
DATA(insert OID = 3304 ( jsonb_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 3802 "3802 1009" _null_ _null_ _null_ _null_ _null_ jsonb_delete_path _null_ _null_ _null_ ));
-DATA(insert OID = 3305 ( jsonb_replace PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 3802 "3802 1009 3802" _null_ _null_ _null_ _null_ _null_ jsonb_replace _null_ _null_ _null_ ));
+DATA(insert OID = 3305 ( jsonb_replace PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 3802 "3802 1009 3802" _null_ _null_ _null_ _null_ _null_ jsonb_replace _null_ _null_ _null_ ));
DESCR("Replace part of a jsonb");
DATA(insert OID = 3306 ( jsonb_pretty PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_pretty _null_ _null_ _null_ ));
DESCR("Indented text from jsonb");
@@ -5227,7 +5227,7 @@ DATA(insert OID = 3982 ( percentile_cont PGNSP PGUID 12 1 0 0 0 t f f f f f i 2
DESCR("multiple continuous percentiles");
DATA(insert OID = 3983 ( percentile_cont_interval_multi_final PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 1187 "2281 1022" _null_ _null_ _null_ _null_ _null_ percentile_cont_interval_multi_final _null_ _null_ _null_ ));
DESCR("aggregate final function");
-DATA(insert OID = 3984 ( mode PGNSP PGUID 12 1 0 0 0 t f f f f f i 1 0 2283 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 3984 ( mode PGNSP PGUID 12 1 0 0 0 t f f f f f i 1 0 2283 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("most common value");
DATA(insert OID = 3985 ( mode_final PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 2283 "2281 2283" _null_ _null_ _null_ _null_ _null_ mode_final _null_ _null_ _null_ ));
DESCR("aggregate final function");
@@ -5253,11 +5253,11 @@ DESCR("aggregate final function");
/* pg_upgrade support */
DATA(insert OID = 3582 ( binary_upgrade_set_next_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_type_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
-DATA(insert OID = 3584 ( binary_upgrade_set_next_array_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_array_pg_type_oid _null_ _null_ _null_ ));
+DATA(insert OID = 3584 ( binary_upgrade_set_next_array_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_array_pg_type_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
-DATA(insert OID = 3585 ( binary_upgrade_set_next_toast_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_type_oid _null_ _null_ _null_ ));
+DATA(insert OID = 3585 ( binary_upgrade_set_next_toast_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_type_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
-DATA(insert OID = 3586 ( binary_upgrade_set_next_heap_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_heap_pg_class_oid _null_ _null_ _null_ ));
+DATA(insert OID = 3586 ( binary_upgrade_set_next_heap_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_heap_pg_class_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
DATA(insert OID = 3587 ( binary_upgrade_set_next_index_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_index_pg_class_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
@@ -5265,9 +5265,9 @@ DATA(insert OID = 3588 ( binary_upgrade_set_next_toast_pg_class_oid PGNSP PGUID
DESCR("for use by pg_upgrade");
DATA(insert OID = 3589 ( binary_upgrade_set_next_pg_enum_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_enum_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
-DATA(insert OID = 3590 ( binary_upgrade_set_next_pg_authid_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_authid_oid _null_ _null_ _null_ ));
+DATA(insert OID = 3590 ( binary_upgrade_set_next_pg_authid_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_authid_oid _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
-DATA(insert OID = 3591 ( binary_upgrade_create_empty_extension PGNSP PGUID 12 1 0 0 0 f f f f f f v 7 0 2278 "25 25 16 25 1028 1009 1009" _null_ _null_ _null_ _null_ _null_ binary_upgrade_create_empty_extension _null_ _null_ _null_ ));
+DATA(insert OID = 3591 ( binary_upgrade_create_empty_extension PGNSP PGUID 12 1 0 0 0 f f f f f f v 7 0 2278 "25 25 16 25 1028 1009 1009" _null_ _null_ _null_ _null_ _null_ binary_upgrade_create_empty_extension _null_ _null_ _null_ ));
DESCR("for use by pg_upgrade");
/* replication/origin.h */
@@ -5308,11 +5308,11 @@ DATA(insert OID = 6014 ( pg_show_replication_origin_status PGNSP PGUID 12 1 100
DESCR("get progress for all replication origins");
/* tablesample */
-DATA(insert OID = 3335 ( tsm_system_init PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "2281 23 700" _null_ _null_ _null_ _null_ _null_ tsm_system_init _null_ _null_ _null_ ));
+DATA(insert OID = 3335 ( tsm_system_init PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "2281 23 700" _null_ _null_ _null_ _null_ _null_ tsm_system_init _null_ _null_ _null_ ));
DESCR("tsm_system_init(internal)");
DATA(insert OID = 3336 ( tsm_system_nextblock PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 23 "2281 16" _null_ _null_ _null_ _null_ _null_ tsm_system_nextblock _null_ _null_ _null_ ));
DESCR("tsm_system_nextblock(internal)");
-DATA(insert OID = 3337 ( tsm_system_nexttuple PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 21 "2281 23 21 16" _null_ _null_ _null_ _null_ _null_ tsm_system_nexttuple _null_ _null_ _null_ ));
+DATA(insert OID = 3337 ( tsm_system_nexttuple PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 21 "2281 23 21 16" _null_ _null_ _null_ _null_ _null_ tsm_system_nexttuple _null_ _null_ _null_ ));
DESCR("tsm_system_nexttuple(internal)");
DATA(insert OID = 3338 ( tsm_system_end PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ tsm_system_end _null_ _null_ _null_ ));
DESCR("tsm_system_end(internal)");
@@ -5321,11 +5321,11 @@ DESCR("tsm_system_reset(internal)");
DATA(insert OID = 3340 ( tsm_system_cost PGNSP PGUID 12 1 0 0 0 f f f f t f v 7 0 2278 "2281 2281 2281 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ tsm_system_cost _null_ _null_ _null_ ));
DESCR("tsm_system_cost(internal)");
-DATA(insert OID = 3341 ( tsm_bernoulli_init PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "2281 23 700" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_init _null_ _null_ _null_ ));
+DATA(insert OID = 3341 ( tsm_bernoulli_init PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "2281 23 700" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_init _null_ _null_ _null_ ));
DESCR("tsm_bernoulli_init(internal)");
DATA(insert OID = 3342 ( tsm_bernoulli_nextblock PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 23 "2281 16" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_nextblock _null_ _null_ _null_ ));
DESCR("tsm_bernoulli_nextblock(internal)");
-DATA(insert OID = 3343 ( tsm_bernoulli_nexttuple PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 21 "2281 23 21 16" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_nexttuple _null_ _null_ _null_ ));
+DATA(insert OID = 3343 ( tsm_bernoulli_nexttuple PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 21 "2281 23 21 16" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_nexttuple _null_ _null_ _null_ ));
DESCR("tsm_bernoulli_nexttuple(internal)");
DATA(insert OID = 3344 ( tsm_bernoulli_end PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_end _null_ _null_ _null_ ));
DESCR("tsm_bernoulli_end(internal)");
diff --git a/src/include/catalog/pg_replication_origin.h b/src/include/catalog/pg_replication_origin.h
index 7610c91119..85061c3633 100644
--- a/src/include/catalog/pg_replication_origin.h
+++ b/src/include/catalog/pg_replication_origin.h
@@ -38,7 +38,7 @@ CATALOG(pg_replication_origin,6000) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
* records. For this reason we don't use a normal Oid column here, since
* we need to handle allocation of new values manually.
*/
- Oid roident;
+ Oid roident;
/*
* Variable-length fields start here, but we allow direct access to
@@ -46,9 +46,9 @@ CATALOG(pg_replication_origin,6000) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
*/
/* external, free-format, name */
- text roname BKI_FORCE_NOT_NULL;
+ text roname BKI_FORCE_NOT_NULL;
-#ifdef CATALOG_VARLEN /* further variable-length fields */
+#ifdef CATALOG_VARLEN /* further variable-length fields */
#endif
} FormData_pg_replication_origin;
diff --git a/src/include/catalog/pg_seclabel.h b/src/include/catalog/pg_seclabel.h
index c9f5b0cfdf..e13c48d787 100644
--- a/src/include/catalog/pg_seclabel.h
+++ b/src/include/catalog/pg_seclabel.h
@@ -27,8 +27,8 @@ CATALOG(pg_seclabel,3596) BKI_WITHOUT_OIDS
int32 objsubid; /* column number, or 0 if not used */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
- text provider BKI_FORCE_NOT_NULL; /* name of label provider */
- text label BKI_FORCE_NOT_NULL; /* security label of the object */
+ text provider BKI_FORCE_NOT_NULL; /* name of label provider */
+ text label BKI_FORCE_NOT_NULL; /* security label of the object */
#endif
} FormData_pg_seclabel;
diff --git a/src/include/catalog/pg_shdescription.h b/src/include/catalog/pg_shdescription.h
index c524099898..bff2850dba 100644
--- a/src/include/catalog/pg_shdescription.h
+++ b/src/include/catalog/pg_shdescription.h
@@ -44,7 +44,7 @@ CATALOG(pg_shdescription,2396) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
Oid classoid; /* OID of table containing object */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
- text description BKI_FORCE_NOT_NULL; /* description of object */
+ text description BKI_FORCE_NOT_NULL; /* description of object */
#endif
} FormData_pg_shdescription;
diff --git a/src/include/catalog/pg_shseclabel.h b/src/include/catalog/pg_shseclabel.h
index 3977b42f87..0ff41f34bc 100644
--- a/src/include/catalog/pg_shseclabel.h
+++ b/src/include/catalog/pg_shseclabel.h
@@ -26,8 +26,8 @@ CATALOG(pg_shseclabel,3592) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
Oid classoid; /* OID of table containing the shared object */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
- text provider BKI_FORCE_NOT_NULL; /* name of label provider */
- text label BKI_FORCE_NOT_NULL; /* security label of the object */
+ text provider BKI_FORCE_NOT_NULL; /* name of label provider */
+ text label BKI_FORCE_NOT_NULL; /* security label of the object */
#endif
} FormData_pg_shseclabel;
diff --git a/src/include/catalog/pg_tablesample_method.h b/src/include/catalog/pg_tablesample_method.h
index 968d1e696a..b422414d08 100644
--- a/src/include/catalog/pg_tablesample_method.h
+++ b/src/include/catalog/pg_tablesample_method.h
@@ -23,21 +23,24 @@
* typedef struct FormData_pg_tablesample_method
* ----------------
*/
-#define TableSampleMethodRelationId 3330
+#define TableSampleMethodRelationId 3330
CATALOG(pg_tablesample_method,3330)
{
NameData tsmname; /* tablesample method name */
- bool tsmseqscan; /* does this method scan whole table sequentially? */
+ bool tsmseqscan; /* does this method scan whole table
+ * sequentially? */
bool tsmpagemode; /* does this method scan page at a time? */
regproc tsminit; /* init scan function */
- regproc tsmnextblock; /* function returning next block to sample
- or InvalidBlockOffset if finished */
- regproc tsmnexttuple; /* function returning next tuple offset from current block
- or InvalidOffsetNumber if end of the block was reacher */
- regproc tsmexaminetuple; /* optional function which can examine tuple contents and
- decide if tuple should be returned or not */
- regproc tsmend; /* end scan function*/
+ regproc tsmnextblock; /* function returning next block to sample or
+ * InvalidBlockOffset if finished */
+ regproc tsmnexttuple; /* function returning next tuple offset from
+ * current block or InvalidOffsetNumber if end
+ * of the block was reacher */
+ regproc tsmexaminetuple;/* optional function which can examine tuple
+ * contents and decide if tuple should be
+ * returned or not */
+ regproc tsmend; /* end scan function */
regproc tsmreset; /* reset state - used by rescan */
regproc tsmcost; /* costing function */
} FormData_pg_tablesample_method;
diff --git a/src/include/catalog/pg_transform.h b/src/include/catalog/pg_transform.h
index 0e433cf4ae..86e72b3c82 100644
--- a/src/include/catalog/pg_transform.h
+++ b/src/include/catalog/pg_transform.h
@@ -22,7 +22,7 @@
* typedef struct FormData_pg_transform
* ----------------
*/
-#define TransformRelationId 3576
+#define TransformRelationId 3576
CATALOG(pg_transform,3576)
{
diff --git a/src/include/catalog/pg_trigger.h b/src/include/catalog/pg_trigger.h
index bff8fcfdda..a2e303f996 100644
--- a/src/include/catalog/pg_trigger.h
+++ b/src/include/catalog/pg_trigger.h
@@ -57,7 +57,7 @@ CATALOG(pg_trigger,2620)
int2vector tgattr; /* column numbers, if trigger is on columns */
#ifdef CATALOG_VARLEN
- bytea tgargs BKI_FORCE_NOT_NULL; /* first\000second\000tgnargs\000 */
+ bytea tgargs BKI_FORCE_NOT_NULL; /* first\000second\000tgnargs\000 */
pg_node_tree tgqual; /* WHEN expression, or NULL if none */
#endif
} FormData_pg_trigger;
diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h
index 4284a704d3..da123f6c49 100644
--- a/src/include/catalog/pg_type.h
+++ b/src/include/catalog/pg_type.h
@@ -364,7 +364,7 @@ DATA(insert OID = 194 ( pg_node_tree PGNSP PGUID -1 f b S f t \054 0 0 0 pg_node
DESCR("string representing an internal node tree");
#define PGNODETREEOID 194
-DATA(insert OID = 32 ( pg_ddl_command PGNSP PGUID SIZEOF_POINTER t p P f t \054 0 0 0 pg_ddl_command_in pg_ddl_command_out pg_ddl_command_recv pg_ddl_command_send - - - ALIGNOF_POINTER p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 32 ( pg_ddl_command PGNSP PGUID SIZEOF_POINTER t p P f t \054 0 0 0 pg_ddl_command_in pg_ddl_command_out pg_ddl_command_recv pg_ddl_command_send - - - ALIGNOF_POINTER p f 0 -1 0 0 _null_ _null_ _null_ ));
DESCR("internal type for passing CollectedCommand");
#define PGDDLCOMMANDOID 32
@@ -568,7 +568,7 @@ DATA(insert OID = 2206 ( regtype PGNSP PGUID 4 t b N f t \054 0 0 2211 regty
DESCR("registered type");
#define REGTYPEOID 2206
-DATA(insert OID = 4096 ( regrole PGNSP PGUID 4 t b N f t \054 0 0 4097 regrolein regroleout regrolerecv regrolesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 4096 ( regrole PGNSP PGUID 4 t b N f t \054 0 0 4097 regrolein regroleout regrolerecv regrolesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
DESCR("registered role");
#define REGROLEOID 4096
@@ -582,7 +582,7 @@ DATA(insert OID = 2209 ( _regoperator PGNSP PGUID -1 f b A f t \054 0 2204 0 ar
DATA(insert OID = 2210 ( _regclass PGNSP PGUID -1 f b A f t \054 0 2205 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
DATA(insert OID = 2211 ( _regtype PGNSP PGUID -1 f b A f t \054 0 2206 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
#define REGTYPEARRAYOID 2211
-DATA(insert OID = 4097 ( _regrole PGNSP PGUID -1 f b A f t \054 0 4096 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 4097 ( _regrole PGNSP PGUID -1 f b A f t \054 0 4096 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
DATA(insert OID = 4090 ( _regnamespace PGNSP PGUID -1 f b A f t \054 0 4089 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
/* uuid */
diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h
index d6257250cb..dcb6c082c5 100644
--- a/src/include/commands/defrem.h
+++ b/src/include/commands/defrem.h
@@ -32,7 +32,7 @@ extern ObjectAddress DefineIndex(Oid relationId,
extern Oid ReindexIndex(RangeVar *indexRelation, int options);
extern Oid ReindexTable(RangeVar *relation, int options);
extern void ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind,
- int options);
+ int options);
extern char *makeObjectName(const char *name1, const char *name2,
const char *label);
extern char *ChooseRelationName(const char *name1, const char *name2,
@@ -51,13 +51,13 @@ extern void SetFunctionArgType(Oid funcOid, int argIndex, Oid newArgType);
extern ObjectAddress AlterFunction(AlterFunctionStmt *stmt);
extern ObjectAddress CreateCast(CreateCastStmt *stmt);
extern void DropCastById(Oid castOid);
-extern Oid CreateTransform(CreateTransformStmt *stmt);
+extern Oid CreateTransform(CreateTransformStmt *stmt);
extern void DropTransformById(Oid transformOid);
extern void IsThereFunctionInNamespace(const char *proname, int pronargs,
oidvector *proargtypes, Oid nspOid);
extern void ExecuteDoStmt(DoStmt *stmt);
extern Oid get_cast_oid(Oid sourcetypeid, Oid targettypeid, bool missing_ok);
-extern Oid get_transform_oid(Oid type_id, Oid lang_id, bool missing_ok);
+extern Oid get_transform_oid(Oid type_id, Oid lang_id, bool missing_ok);
extern void interpret_function_parameter_list(List *parameters,
Oid languageOid,
bool is_aggregate,
diff --git a/src/include/commands/event_trigger.h b/src/include/commands/event_trigger.h
index 579e1ef8bd..8ba7db92f2 100644
--- a/src/include/commands/event_trigger.h
+++ b/src/include/commands/event_trigger.h
@@ -72,7 +72,7 @@ extern void EventTriggerCollectSimpleCommand(ObjectAddress address,
extern void EventTriggerAlterTableStart(Node *parsetree);
extern void EventTriggerAlterTableRelid(Oid objectId);
extern void EventTriggerCollectAlterTableSubcmd(Node *subcmd,
- ObjectAddress address);
+ ObjectAddress address);
extern void EventTriggerAlterTableEnd(void);
extern void EventTriggerCollectGrant(InternalGrant *istmt);
diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h
index 4df44d0242..26fcc5b643 100644
--- a/src/include/commands/explain.h
+++ b/src/include/commands/explain.h
@@ -84,7 +84,7 @@ extern void ExplainSeparatePlans(ExplainState *es);
extern void ExplainPropertyList(const char *qlabel, List *data,
ExplainState *es);
extern void ExplainPropertyListNested(const char *qlabel, List *data,
- ExplainState *es);
+ ExplainState *es);
extern void ExplainPropertyText(const char *qlabel, const char *value,
ExplainState *es);
extern void ExplainPropertyInteger(const char *qlabel, int value,
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index 4fb91e79cb..e3a31afdf7 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -135,16 +135,16 @@ typedef struct VacAttrStats
*/
typedef struct VacuumParams
{
- int freeze_min_age; /* min freeze age, -1 to use default */
- int freeze_table_age; /* age at which to scan whole table */
- int multixact_freeze_min_age; /* min multixact freeze age,
- * -1 to use default */
- int multixact_freeze_table_age; /* multixact age at which to
- * scan whole table */
- bool is_wraparound; /* force a for-wraparound vacuum */
- int log_min_duration; /* minimum execution threshold in ms at
- * which verbose logs are activated,
- * -1 to use default */
+ int freeze_min_age; /* min freeze age, -1 to use default */
+ int freeze_table_age; /* age at which to scan whole table */
+ int multixact_freeze_min_age; /* min multixact freeze age,
+ * -1 to use default */
+ int multixact_freeze_table_age; /* multixact age at which to
+ * scan whole table */
+ bool is_wraparound; /* force a for-wraparound vacuum */
+ int log_min_duration; /* minimum execution threshold in ms
+ * at which verbose logs are
+ * activated, -1 to use default */
} VacuumParams;
/* GUC parameters */
diff --git a/src/include/common/fe_memutils.h b/src/include/common/fe_memutils.h
index 51f12eb825..36882035a1 100644
--- a/src/include/common/fe_memutils.h
+++ b/src/include/common/fe_memutils.h
@@ -13,8 +13,8 @@
* Flags for pg_malloc_extended and palloc_extended, deliberately named
* the same as the backend flags.
*/
-#define MCXT_ALLOC_HUGE 0x01 /* allow huge allocation (> 1 GB)
- * not actually used for frontends */
+#define MCXT_ALLOC_HUGE 0x01 /* allow huge allocation (> 1 GB) not
+ * actually used for frontends */
#define MCXT_ALLOC_NO_OOM 0x02 /* no failure if out-of-memory */
#define MCXT_ALLOC_ZERO 0x04 /* zero allocated memory */
diff --git a/src/include/common/pg_lzcompress.h b/src/include/common/pg_lzcompress.h
index 52bcaf14b1..dbd51d58ef 100644
--- a/src/include/common/pg_lzcompress.h
+++ b/src/include/common/pg_lzcompress.h
@@ -86,6 +86,6 @@ extern const PGLZ_Strategy *const PGLZ_strategy_always;
extern int32 pglz_compress(const char *source, int32 slen, char *dest,
const PGLZ_Strategy *strategy);
extern int32 pglz_decompress(const char *source, int32 slen, char *dest,
- int32 rawsize);
+ int32 rawsize);
#endif /* _PG_LZCOMPRESS_H_ */
diff --git a/src/include/common/restricted_token.h b/src/include/common/restricted_token.h
index e24374483c..272ad9b21d 100644
--- a/src/include/common/restricted_token.h
+++ b/src/include/common/restricted_token.h
@@ -2,8 +2,8 @@
* restricted_token.h
* helper routine to ensure restricted token on Windows
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/common/restricted_token.h
*/
@@ -14,11 +14,11 @@
* On Windows make sure that we are running with a restricted token,
* On other platforms do nothing.
*/
-void get_restricted_token(const char *progname);
+void get_restricted_token(const char *progname);
#ifdef WIN32
/* Create a restricted token and execute the specified process with it. */
-HANDLE CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, const char *progname);
+HANDLE CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, const char *progname);
#endif
#endif /* COMMON_RESTRICTED_TOKEN_H */
diff --git a/src/include/common/string.h b/src/include/common/string.h
index 023385856f..9f485c355a 100644
--- a/src/include/common/string.h
+++ b/src/include/common/string.h
@@ -2,8 +2,8 @@
* string.h
* string handling helpers
*
- * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
+ * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/common/string.h
*/
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index e60ab9fd96..193a654627 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -369,7 +369,7 @@ extern List *ExecInsertIndexTuples(TupleTableSlot *slot, ItemPointer tupleid,
EState *estate, bool noDupErr, bool *specConflict,
List *arbiterIndexes);
extern bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate,
- ItemPointer conflictTid, List *arbiterIndexes);
+ ItemPointer conflictTid, List *arbiterIndexes);
extern void check_exclusion_constraint(Relation heap, Relation index,
IndexInfo *indexInfo,
ItemPointer tupleid,
diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h
index 71099b15bb..9d0b85c77d 100644
--- a/src/include/executor/hashjoin.h
+++ b/src/include/executor/hashjoin.h
@@ -108,14 +108,15 @@ typedef struct HashSkewBucket
*/
typedef struct HashMemoryChunkData
{
- int ntuples; /* number of tuples stored in this chunk */
- size_t maxlen; /* size of the buffer holding the tuples */
- size_t used; /* number of buffer bytes already used */
+ int ntuples; /* number of tuples stored in this chunk */
+ size_t maxlen; /* size of the buffer holding the tuples */
+ size_t used; /* number of buffer bytes already used */
- struct HashMemoryChunkData *next; /* pointer to the next chunk (linked list) */
+ struct HashMemoryChunkData *next; /* pointer to the next chunk (linked
+ * list) */
char data[FLEXIBLE_ARRAY_MEMBER]; /* buffer allocated at the end */
-} HashMemoryChunkData;
+} HashMemoryChunkData;
typedef struct HashMemoryChunkData *HashMemoryChunk;
@@ -127,8 +128,9 @@ typedef struct HashJoinTableData
int nbuckets; /* # buckets in the in-memory hash table */
int log2_nbuckets; /* its log2 (nbuckets must be a power of 2) */
- int nbuckets_original; /* # buckets when starting the first hash */
- int nbuckets_optimal; /* optimal # buckets (per batch) */
+ int nbuckets_original; /* # buckets when starting the first
+ * hash */
+ int nbuckets_optimal; /* optimal # buckets (per batch) */
int log2_nbuckets_optimal; /* same as log2_nbuckets optimal */
/* buckets[i] is head of list of tuples in i'th in-memory bucket */
@@ -183,7 +185,7 @@ typedef struct HashJoinTableData
MemoryContext batchCxt; /* context for this-batch-only storage */
/* used for dense allocation of tuples (into linked chunks) */
- HashMemoryChunk chunks; /* one list for the whole batch */
+ HashMemoryChunk chunks; /* one list for the whole batch */
} HashJoinTableData;
#endif /* HASHJOIN_H */
diff --git a/src/include/fmgr.h b/src/include/fmgr.h
index b9a5c40f59..4e8f68c7ce 100644
--- a/src/include/fmgr.h
+++ b/src/include/fmgr.h
@@ -298,7 +298,7 @@ extern struct varlena *pg_detoast_datum_packed(struct varlena * datum);
#define PG_RETURN_INT32(x) return Int32GetDatum(x)
#define PG_RETURN_UINT32(x) return UInt32GetDatum(x)
#define PG_RETURN_INT16(x) return Int16GetDatum(x)
-#define PG_RETURN_UINT16(x) return UInt16GetDatum(x)
+#define PG_RETURN_UINT16(x) return UInt16GetDatum(x)
#define PG_RETURN_CHAR(x) return CharGetDatum(x)
#define PG_RETURN_BOOL(x) return BoolGetDatum(x)
#define PG_RETURN_OID(x) return ObjectIdGetDatum(x)
diff --git a/src/include/funcapi.h b/src/include/funcapi.h
index 694f9ddf6d..5dd556baf9 100644
--- a/src/include/funcapi.h
+++ b/src/include/funcapi.h
@@ -176,7 +176,7 @@ extern int get_func_arg_info(HeapTuple procTup,
extern int get_func_input_arg_names(Datum proargnames, Datum proargmodes,
char ***arg_names);
-extern int get_func_trftypes(HeapTuple procTup, Oid **p_trftypes);
+extern int get_func_trftypes(HeapTuple procTup, Oid **p_trftypes);
extern char *get_func_result_name(Oid functionId);
extern TupleDesc build_function_result_tupdesc_d(Datum proallargtypes,
diff --git a/src/include/lib/bipartite_match.h b/src/include/lib/bipartite_match.h
index c80f9bfdd0..373bbede1e 100644
--- a/src/include/lib/bipartite_match.h
+++ b/src/include/lib/bipartite_match.h
@@ -39,6 +39,6 @@ typedef struct bipartite_match_state
BipartiteMatchState *BipartiteMatch(int u_size, int v_size, short **adjacency);
-void BipartiteMatchFree(BipartiteMatchState *state);
+void BipartiteMatchFree(BipartiteMatchState *state);
#endif /* BIPARTITE_MATCH_H */
diff --git a/src/include/lib/hyperloglog.h b/src/include/lib/hyperloglog.h
index a6cbffc4c3..fd8280c5b0 100644
--- a/src/include/lib/hyperloglog.h
+++ b/src/include/lib/hyperloglog.h
@@ -60,7 +60,7 @@ typedef struct hyperLogLogState
} hyperLogLogState;
extern void initHyperLogLog(hyperLogLogState *cState, uint8 bwidth);
-extern void addHyperLogLog(hyperLogLogState *cState, uint32 hash);
+extern void addHyperLogLog(hyperLogLogState *cState, uint32 hash);
extern double estimateHyperLogLog(hyperLogLogState *cState);
extern void mergeHyperLogLog(hyperLogLogState *cState, const hyperLogLogState *oState);
diff --git a/src/include/lib/pairingheap.h b/src/include/lib/pairingheap.h
index eb1856a7c1..e7713a211f 100644
--- a/src/include/lib/pairingheap.h
+++ b/src/include/lib/pairingheap.h
@@ -58,8 +58,8 @@ typedef struct pairingheap_node
* and >0 iff a > b. For a min-heap, the conditions are reversed.
*/
typedef int (*pairingheap_comparator) (const pairingheap_node *a,
- const pairingheap_node *b,
- void *arg);
+ const pairingheap_node *b,
+ void *arg);
/*
* A pairing heap.
@@ -71,12 +71,12 @@ typedef int (*pairingheap_comparator) (const pairingheap_node *a,
typedef struct pairingheap
{
pairingheap_comparator ph_compare; /* comparison function */
- void *ph_arg; /* opaque argument to ph_compare */
- pairingheap_node *ph_root; /* current root of the heap */
+ void *ph_arg; /* opaque argument to ph_compare */
+ pairingheap_node *ph_root; /* current root of the heap */
} pairingheap;
extern pairingheap *pairingheap_allocate(pairingheap_comparator compare,
- void *arg);
+ void *arg);
extern void pairingheap_free(pairingheap *heap);
extern void pairingheap_add(pairingheap *heap, pairingheap_node *node);
extern pairingheap_node *pairingheap_first(pairingheap *heap);
@@ -85,8 +85,8 @@ extern void pairingheap_remove(pairingheap *heap, pairingheap_node *node);
#ifdef PAIRINGHEAP_DEBUG
extern char *pairingheap_dump(pairingheap *heap,
- void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
- void *opaque);
+ void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
+ void *opaque);
#endif
/* Resets the heap to be empty. */
diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h
index f323ed8710..6171ef3a1f 100644
--- a/src/include/libpq/libpq-be.h
+++ b/src/include/libpq/libpq-be.h
@@ -207,12 +207,12 @@ typedef struct Port
* SSL implementation (e.g. be-secure-openssl.c)
*/
extern void be_tls_init(void);
-extern int be_tls_open_server(Port *port);
+extern int be_tls_open_server(Port *port);
extern void be_tls_close(Port *port);
extern ssize_t be_tls_read(Port *port, void *ptr, size_t len, int *waitfor);
extern ssize_t be_tls_write(Port *port, void *ptr, size_t len, int *waitfor);
-extern int be_tls_get_cipher_bits(Port *port);
+extern int be_tls_get_cipher_bits(Port *port);
extern bool be_tls_get_compression(Port *port);
extern void be_tls_get_version(Port *port, char *ptr, size_t len);
extern void be_tls_get_cipher(Port *port, char *ptr, size_t len);
diff --git a/src/include/libpq/libpq.h b/src/include/libpq/libpq.h
index 8fa896eb39..c408e5b551 100644
--- a/src/include/libpq/libpq.h
+++ b/src/include/libpq/libpq.h
@@ -23,19 +23,19 @@
typedef struct
{
- void (*comm_reset)(void);
- int (*flush)(void);
- int (*flush_if_writable)(void);
- bool (*is_send_pending)(void);
- int (*putmessage)(char msgtype, const char *s, size_t len);
- void (*putmessage_noblock)(char msgtype, const char *s, size_t len);
- void (*startcopyout)(void);
- void (*endcopyout)(bool errorAbort);
+ void (*comm_reset) (void);
+ int (*flush) (void);
+ int (*flush_if_writable) (void);
+ bool (*is_send_pending) (void);
+ int (*putmessage) (char msgtype, const char *s, size_t len);
+ void (*putmessage_noblock) (char msgtype, const char *s, size_t len);
+ void (*startcopyout) (void);
+ void (*endcopyout) (bool errorAbort);
} PQcommMethods;
extern PGDLLIMPORT PQcommMethods *PqCommMethods;
-#define pq_comm_reset() (PqCommMethods->comm_reset())
+#define pq_comm_reset() (PqCommMethods->comm_reset())
#define pq_flush() (PqCommMethods->flush())
#define pq_flush_if_writable() (PqCommMethods->flush_if_writable())
#define pq_is_send_pending() (PqCommMethods->is_send_pending())
@@ -79,8 +79,8 @@ extern char *ssl_key_file;
extern char *ssl_ca_file;
extern char *ssl_crl_file;
-extern int (*pq_putmessage_hook)(char msgtype, const char *s, size_t len);
-extern int (*pq_flush_hook)(void);
+extern int (*pq_putmessage_hook) (char msgtype, const char *s, size_t len);
+extern int (*pq_flush_hook) (void);
extern int secure_initialize(void);
extern bool secure_loaded_verify_locations(void);
diff --git a/src/include/libpq/pqmq.h b/src/include/libpq/pqmq.h
index ad7589d4ed..901756596a 100644
--- a/src/include/libpq/pqmq.h
+++ b/src/include/libpq/pqmq.h
@@ -16,7 +16,7 @@
#include "lib/stringinfo.h"
#include "storage/shm_mq.h"
-extern void pq_redirect_to_shm_mq(shm_mq *, shm_mq_handle *);
+extern void pq_redirect_to_shm_mq(shm_mq *, shm_mq_handle *);
extern void pq_set_parallel_master(pid_t pid, BackendId backend_id);
extern void pq_parse_errornotice(StringInfo str, ErrorData *edata);
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 0a92cc4efc..db5bd7faf0 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -68,8 +68,8 @@ typedef struct IndexInfo
Oid *ii_ExclusionProcs; /* array with one entry per column */
uint16 *ii_ExclusionStrats; /* array with one entry per column */
Oid *ii_UniqueOps; /* array with one entry per column */
- Oid *ii_UniqueProcs; /* array with one entry per column */
- uint16 *ii_UniqueStrats; /* array with one entry per column */
+ Oid *ii_UniqueProcs; /* array with one entry per column */
+ uint16 *ii_UniqueStrats; /* array with one entry per column */
bool ii_Unique;
bool ii_ReadyForInserts;
bool ii_Concurrent;
@@ -1128,11 +1128,14 @@ typedef struct ModifyTableState
List **mt_arowmarks; /* per-subplan ExecAuxRowMark lists */
EPQState mt_epqstate; /* for evaluating EvalPlanQual rechecks */
bool fireBSTriggers; /* do we need to fire stmt triggers? */
- OnConflictAction mt_onconflict; /* ON CONFLICT type */
- List *mt_arbiterindexes; /* unique index OIDs to arbitrate taking alt path */
- TupleTableSlot *mt_existing; /* slot to store existing target tuple in */
- List *mt_excludedtlist; /* the excluded pseudo relation's tlist */
- TupleTableSlot *mt_conflproj; /* CONFLICT ... SET ... projection target */
+ OnConflictAction mt_onconflict; /* ON CONFLICT type */
+ List *mt_arbiterindexes; /* unique index OIDs to arbitrate
+ * taking alt path */
+ TupleTableSlot *mt_existing; /* slot to store existing target tuple in */
+ List *mt_excludedtlist; /* the excluded pseudo relation's
+ * tlist */
+ TupleTableSlot *mt_conflproj; /* CONFLICT ... SET ... projection
+ * target */
} ModifyTableState;
/* ----------------
@@ -1828,12 +1831,13 @@ typedef struct AggState
ExprContext **aggcontexts; /* econtexts for long-lived data (per GS) */
ExprContext *tmpcontext; /* econtext for input expressions */
AggStatePerAgg curperagg; /* identifies currently active aggregate */
- bool input_done; /* indicates end of input */
+ bool input_done; /* indicates end of input */
bool agg_done; /* indicates completion of Agg scan */
int projected_set; /* The last projected grouping set */
int current_set; /* The current grouping set being evaluated */
Bitmapset *grouped_cols; /* grouped cols in current projection */
- List *all_grouped_cols; /* list of all grouped cols in DESC order */
+ List *all_grouped_cols; /* list of all grouped cols in DESC
+ * order */
/* These fields are for grouping set phase data */
int maxsets; /* The max number of sets in any phase */
AggStatePerPhase phases; /* array of all phases */
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index 669a0afa09..290cdb3058 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -645,9 +645,9 @@ typedef enum JoinType
*/
typedef enum OnConflictAction
{
- ONCONFLICT_NONE, /* No "ON CONFLICT" clause */
- ONCONFLICT_NOTHING, /* ON CONFLICT ... DO NOTHING */
- ONCONFLICT_UPDATE /* ON CONFLICT ... DO UPDATE */
+ ONCONFLICT_NONE, /* No "ON CONFLICT" clause */
+ ONCONFLICT_NOTHING, /* ON CONFLICT ... DO NOTHING */
+ ONCONFLICT_UPDATE /* ON CONFLICT ... DO UPDATE */
} OnConflictAction;
#endif /* NODES_H */
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 23190e1af0..868905b0c1 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -121,7 +121,7 @@ typedef struct Query
bool hasRecursive; /* WITH RECURSIVE was specified */
bool hasModifyingCTE; /* has INSERT/UPDATE/DELETE in WITH */
bool hasForUpdate; /* FOR [KEY] UPDATE/SHARE was specified */
- bool hasRowSecurity; /* row security applied? */
+ bool hasRowSecurity; /* row security applied? */
List *cteList; /* WITH list (of CommonTableExpr's) */
@@ -132,7 +132,7 @@ typedef struct Query
List *withCheckOptions; /* a list of WithCheckOption's */
- OnConflictExpr *onConflict; /* ON CONFLICT DO [NOTHING | UPDATE] */
+ OnConflictExpr *onConflict; /* ON CONFLICT DO [NOTHING | UPDATE] */
List *returningList; /* return-values list (of TargetEntry) */
@@ -294,18 +294,18 @@ typedef struct CollateClause
*/
typedef enum RoleSpecType
{
- ROLESPEC_CSTRING, /* role name is stored as a C string */
- ROLESPEC_CURRENT_USER, /* role spec is CURRENT_USER */
- ROLESPEC_SESSION_USER, /* role spec is SESSION_USER */
- ROLESPEC_PUBLIC /* role name is "public" */
+ ROLESPEC_CSTRING, /* role name is stored as a C string */
+ ROLESPEC_CURRENT_USER, /* role spec is CURRENT_USER */
+ ROLESPEC_SESSION_USER, /* role spec is SESSION_USER */
+ ROLESPEC_PUBLIC /* role name is "public" */
} RoleSpecType;
typedef struct RoleSpec
{
NodeTag type;
- RoleSpecType roletype; /* Type of this rolespec */
- char *rolename; /* filled only for ROLESPEC_CSTRING */
- int location; /* token location, or -1 if unknown */
+ RoleSpecType roletype; /* Type of this rolespec */
+ char *rolename; /* filled only for ROLESPEC_CSTRING */
+ int location; /* token location, or -1 if unknown */
} RoleSpec;
/*
@@ -568,9 +568,9 @@ typedef struct RangeTableSample
{
NodeTag type;
RangeVar *relation;
- char *method; /* sampling method */
+ char *method; /* sampling method */
Node *repeatable;
- List *args; /* arguments for sampling method */
+ List *args; /* arguments for sampling method */
} RangeTableSample;
/*
@@ -690,7 +690,7 @@ typedef struct LockingClause
NodeTag type;
List *lockedRels; /* FOR [KEY] UPDATE/SHARE relations */
LockClauseStrength strength;
- LockWaitPolicy waitPolicy; /* NOWAIT and SKIP LOCKED */
+ LockWaitPolicy waitPolicy; /* NOWAIT and SKIP LOCKED */
} LockingClause;
/*
@@ -810,7 +810,7 @@ typedef struct RangeTblEntry
*/
Oid relid; /* OID of the relation */
char relkind; /* relation kind (see pg_class.relkind) */
- TableSampleClause *tablesample; /* sampling method and parameters */
+ TableSampleClause *tablesample; /* sampling method and parameters */
/*
* Fields valid for a subquery RTE (else NULL):
@@ -1157,12 +1157,12 @@ typedef struct InferClause
*/
typedef struct OnConflictClause
{
- NodeTag type;
- OnConflictAction action; /* DO NOTHING or UPDATE? */
- InferClause *infer; /* Optional index inference clause */
- List *targetList; /* the target list (of ResTarget) */
- Node *whereClause; /* qualifications */
- int location; /* token location, or -1 if unknown */
+ NodeTag type;
+ OnConflictAction action; /* DO NOTHING or UPDATE? */
+ InferClause *infer; /* Optional index inference clause */
+ List *targetList; /* the target list (of ResTarget) */
+ Node *whereClause; /* qualifications */
+ int location; /* token location, or -1 if unknown */
} OnConflictClause;
/*
@@ -1215,7 +1215,7 @@ typedef struct InsertStmt
RangeVar *relation; /* relation to insert into */
List *cols; /* optional: names of the target columns */
Node *selectStmt; /* the source SELECT/VALUES, or NULL */
- OnConflictClause *onConflictClause; /* ON CONFLICT clause */
+ OnConflictClause *onConflictClause; /* ON CONFLICT clause */
List *returningList; /* list of expressions to return */
WithClause *withClause; /* WITH clause */
} InsertStmt;
@@ -2890,21 +2890,22 @@ typedef struct ConstraintsSetStmt
*/
/* Reindex options */
-#define REINDEXOPT_VERBOSE 1 << 0 /* print progress info */
+#define REINDEXOPT_VERBOSE 1 << 0 /* print progress info */
typedef enum ReindexObjectType
{
- REINDEX_OBJECT_INDEX, /* index */
- REINDEX_OBJECT_TABLE, /* table or materialized view */
- REINDEX_OBJECT_SCHEMA, /* schema */
- REINDEX_OBJECT_SYSTEM, /* system catalogs */
- REINDEX_OBJECT_DATABASE /* database */
+ REINDEX_OBJECT_INDEX, /* index */
+ REINDEX_OBJECT_TABLE, /* table or materialized view */
+ REINDEX_OBJECT_SCHEMA, /* schema */
+ REINDEX_OBJECT_SYSTEM, /* system catalogs */
+ REINDEX_OBJECT_DATABASE /* database */
} ReindexObjectType;
typedef struct ReindexStmt
{
NodeTag type;
- ReindexObjectType kind; /* REINDEX_OBJECT_INDEX, REINDEX_OBJECT_TABLE, etc. */
+ ReindexObjectType kind; /* REINDEX_OBJECT_INDEX, REINDEX_OBJECT_TABLE,
+ * etc. */
RangeVar *relation; /* Table or index to reindex */
const char *name; /* name of database to reindex */
int options; /* Reindex options flags */
@@ -3034,7 +3035,7 @@ typedef enum AlterTSConfigType
typedef struct AlterTSConfigurationStmt
{
NodeTag type;
- AlterTSConfigType kind; /* ALTER_TSCONFIG_ADD_MAPPING, etc */
+ AlterTSConfigType kind; /* ALTER_TSCONFIG_ADD_MAPPING, etc */
List *cfgname; /* qualified name (list of Value strings) */
/*
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 61c8404140..d967219c0b 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -182,10 +182,10 @@ typedef struct ModifyTable
List *fdwPrivLists; /* per-target-table FDW private data lists */
List *rowMarks; /* PlanRowMarks (non-locking only) */
int epqParam; /* ID of Param for EvalPlanQual re-eval */
- OnConflictAction onConflictAction; /* ON CONFLICT action */
- List *arbiterIndexes; /* List of ON CONFLICT arbiter index OIDs */
+ OnConflictAction onConflictAction; /* ON CONFLICT action */
+ List *arbiterIndexes; /* List of ON CONFLICT arbiter index OIDs */
List *onConflictSet; /* SET for INSERT ON CONFLICT DO UPDATE */
- Node *onConflictWhere;/* WHERE for ON CONFLICT UPDATE */
+ Node *onConflictWhere; /* WHERE for ON CONFLICT UPDATE */
Index exclRelRTI; /* RTI of the EXCLUDED pseudo relation */
List *exclRelTlist; /* tlist of the EXCLUDED pseudo relation */
} ModifyTable;
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 9f3a7267a2..60c1ca2c8d 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -1196,9 +1196,9 @@ typedef struct CurrentOfExpr
typedef struct InferenceElem
{
Expr xpr;
- Node *expr; /* expression to infer from, or NULL */
- Oid infercollid; /* OID of collation, or InvalidOid */
- Oid inferopclass; /* OID of att opclass, or InvalidOid */
+ Node *expr; /* expression to infer from, or NULL */
+ Oid infercollid; /* OID of collation, or InvalidOid */
+ Oid inferopclass; /* OID of att opclass, or InvalidOid */
} InferenceElem;
/*--------------------
@@ -1380,13 +1380,14 @@ typedef struct OnConflictExpr
OnConflictAction action; /* DO NOTHING or UPDATE? */
/* Arbiter */
- List *arbiterElems; /* unique index arbiter list (of InferenceElem's) */
+ List *arbiterElems; /* unique index arbiter list (of
+ * InferenceElem's) */
Node *arbiterWhere; /* unique index arbiter WHERE clause */
Oid constraint; /* pg_constraint OID for arbiter */
/* ON CONFLICT UPDATE */
List *onConflictSet; /* List of ON CONFLICT SET TargetEntrys */
- Node *onConflictWhere;/* qualifiers to restrict UPDATE to */
+ Node *onConflictWhere; /* qualifiers to restrict UPDATE to */
int exclRelIndex; /* RT index of 'excluded' relation */
List *exclRelTlist; /* tlist of the EXCLUDED pseudo relation */
} OnConflictExpr;
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index 89c8deda95..161644c343 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -33,7 +33,7 @@ extern bool add_path_precheck(RelOptInfo *parent_rel,
extern Path *create_seqscan_path(PlannerInfo *root, RelOptInfo *rel,
Relids required_outer);
extern Path *create_samplescan_path(PlannerInfo *root, RelOptInfo *rel,
- Relids required_outer);
+ Relids required_outer);
extern IndexPath *create_index_path(PlannerInfo *root,
IndexOptInfo *index,
List *indexclauses,
diff --git a/src/include/optimizer/prep.h b/src/include/optimizer/prep.h
index dcd078ee43..7b8c0a98f3 100644
--- a/src/include/optimizer/prep.h
+++ b/src/include/optimizer/prep.h
@@ -46,7 +46,7 @@ extern void expand_security_quals(PlannerInfo *root, List *tlist);
extern List *preprocess_targetlist(PlannerInfo *root, List *tlist);
extern List *preprocess_onconflict_targetlist(List *tlist,
- int result_relation, List *range_table);
+ int result_relation, List *range_table);
extern PlanRowMark *get_plan_rowmark(List *rowmarks, Index rtindex);
diff --git a/src/include/optimizer/tlist.h b/src/include/optimizer/tlist.h
index b0f0f19683..95cffaa60c 100644
--- a/src/include/optimizer/tlist.h
+++ b/src/include/optimizer/tlist.h
@@ -44,7 +44,7 @@ extern List *get_sortgrouplist_exprs(List *sgClauses,
List *targetList);
extern SortGroupClause *get_sortgroupref_clause(Index sortref,
- List *clauses);
+ List *clauses);
extern Oid *extract_grouping_ops(List *groupClause);
extern AttrNumber *extract_grouping_cols(List *groupClause, List *tlist);
diff --git a/src/include/parser/parse_clause.h b/src/include/parser/parse_clause.h
index cbe5e76bb8..77619e37a0 100644
--- a/src/include/parser/parse_clause.h
+++ b/src/include/parser/parse_clause.h
@@ -27,7 +27,7 @@ extern Node *transformWhereClause(ParseState *pstate, Node *clause,
extern Node *transformLimitClause(ParseState *pstate, Node *clause,
ParseExprKind exprKind, const char *constructName);
extern List *transformGroupClause(ParseState *pstate, List *grouplist,
- List **groupingSets,
+ List **groupingSets,
List **targetlist, List *sortClause,
ParseExprKind exprKind, bool useSQL99);
extern List *transformSortClause(ParseState *pstate, List *orderlist,
diff --git a/src/include/parser/parse_func.h b/src/include/parser/parse_func.h
index 40c007c35f..3194da4639 100644
--- a/src/include/parser/parse_func.h
+++ b/src/include/parser/parse_func.h
@@ -34,9 +34,9 @@ extern Node *ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
FuncCall *fn, int location);
extern TableSampleClause *ParseTableSample(ParseState *pstate,
- char *samplemethod,
- Node *repeatable, List *args,
- int location);
+ char *samplemethod,
+ Node *repeatable, List *args,
+ int location);
extern FuncDetailCode func_get_detail(List *funcname,
List *fargs, List *fargnames,
diff --git a/src/include/parser/parse_relation.h b/src/include/parser/parse_relation.h
index ce563dea25..e2875a0adb 100644
--- a/src/include/parser/parse_relation.h
+++ b/src/include/parser/parse_relation.h
@@ -26,11 +26,11 @@
*/
typedef struct
{
- int distance; /* Weighted distance (lowest so far) */
- RangeTblEntry *rfirst; /* RTE of first */
- AttrNumber first; /* Closest attribute so far */
- RangeTblEntry *rsecond; /* RTE of second */
- AttrNumber second; /* Second closest attribute so far */
+ int distance; /* Weighted distance (lowest so far) */
+ RangeTblEntry *rfirst; /* RTE of first */
+ AttrNumber first; /* Closest attribute so far */
+ RangeTblEntry *rsecond; /* RTE of second */
+ AttrNumber second; /* Second closest attribute so far */
} FuzzyAttrMatchState;
@@ -106,7 +106,7 @@ extern void addRTEtoQuery(ParseState *pstate, RangeTblEntry *rte,
bool addToRelNameSpace, bool addToVarNameSpace);
extern void errorMissingRTE(ParseState *pstate, RangeVar *relation) pg_attribute_noreturn();
extern void errorMissingColumn(ParseState *pstate,
- char *relname, char *colname, int location) pg_attribute_noreturn();
+ char *relname, char *colname, int location) pg_attribute_noreturn();
extern void expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
int location, bool include_dropped,
List **colnames, List **colvars);
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index e3fe06e95b..9ecc16372d 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -710,11 +710,11 @@ typedef enum BackendState
typedef struct PgBackendSSLStatus
{
/* Information about SSL connection */
- int ssl_bits;
- bool ssl_compression;
- char ssl_version[NAMEDATALEN]; /* MUST be null-terminated */
- char ssl_cipher[NAMEDATALEN]; /* MUST be null-terminated */
- char ssl_clientdn[NAMEDATALEN]; /* MUST be null-terminated */
+ int ssl_bits;
+ bool ssl_compression;
+ char ssl_version[NAMEDATALEN]; /* MUST be null-terminated */
+ char ssl_cipher[NAMEDATALEN]; /* MUST be null-terminated */
+ char ssl_clientdn[NAMEDATALEN]; /* MUST be null-terminated */
} PgBackendSSLStatus;
@@ -738,11 +738,11 @@ typedef struct PgBackendStatus
* the copy is valid; otherwise start over. This makes updates cheap
* while reads are potentially expensive, but that's the tradeoff we want.
*
- * The above protocol needs the memory barriers to ensure that
- * the apparent order of execution is as it desires. Otherwise,
- * for example, the CPU might rearrange the code so that st_changecount
- * is incremented twice before the modification on a machine with
- * weak memory ordering. This surprising result can lead to bugs.
+ * The above protocol needs the memory barriers to ensure that the
+ * apparent order of execution is as it desires. Otherwise, for example,
+ * the CPU might rearrange the code so that st_changecount is incremented
+ * twice before the modification on a machine with weak memory ordering.
+ * This surprising result can lead to bugs.
*/
int st_changecount;
@@ -793,26 +793,26 @@ typedef struct PgBackendStatus
#define pgstat_increment_changecount_before(beentry) \
do { \
beentry->st_changecount++; \
- pg_write_barrier(); \
+ pg_write_barrier(); \
} while (0)
-#define pgstat_increment_changecount_after(beentry) \
+#define pgstat_increment_changecount_after(beentry) \
do { \
- pg_write_barrier(); \
+ pg_write_barrier(); \
beentry->st_changecount++; \
- Assert((beentry->st_changecount & 1) == 0); \
+ Assert((beentry->st_changecount & 1) == 0); \
} while (0)
#define pgstat_save_changecount_before(beentry, save_changecount) \
do { \
- save_changecount = beentry->st_changecount; \
+ save_changecount = beentry->st_changecount; \
pg_read_barrier(); \
} while (0)
#define pgstat_save_changecount_after(beentry, save_changecount) \
do { \
pg_read_barrier(); \
- save_changecount = beentry->st_changecount; \
+ save_changecount = beentry->st_changecount; \
} while (0)
/* ----------
diff --git a/src/include/port/atomics.h b/src/include/port/atomics.h
index e90d664d5a..1a4c748cb9 100644
--- a/src/include/port/atomics.h
+++ b/src/include/port/atomics.h
@@ -60,15 +60,15 @@
*/
#if defined(__arm__) || defined(__arm) || \
defined(__aarch64__) || defined(__aarch64)
-# include "port/atomics/arch-arm.h"
+#include "port/atomics/arch-arm.h"
#elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
-# include "port/atomics/arch-x86.h"
+#include "port/atomics/arch-x86.h"
#elif defined(__ia64__) || defined(__ia64)
-# include "port/atomics/arch-ia64.h"
+#include "port/atomics/arch-ia64.h"
#elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
-# include "port/atomics/arch-ppc.h"
+#include "port/atomics/arch-ppc.h"
#elif defined(__hppa) || defined(__hppa__)
-# include "port/atomics/arch-hppa.h"
+#include "port/atomics/arch-hppa.h"
#endif
/*
@@ -83,15 +83,15 @@
*/
/* gcc or compatible, including clang and icc */
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
-# include "port/atomics/generic-gcc.h"
+#include "port/atomics/generic-gcc.h"
#elif defined(WIN32_ONLY_COMPILER)
-# include "port/atomics/generic-msvc.h"
+#include "port/atomics/generic-msvc.h"
#elif defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
-# include "port/atomics/generic-acc.h"
+#include "port/atomics/generic-acc.h"
#elif defined(__SUNPRO_C) && !defined(__GNUC__)
-# include "port/atomics/generic-sunpro.h"
+#include "port/atomics/generic-sunpro.h"
#elif (defined(__IBMC__) || defined(__IBMCPP__)) && !defined(__GNUC__)
-# include "port/atomics/generic-xlc.h"
+#include "port/atomics/generic-xlc.h"
#else
/*
* Unsupported compiler, we'll likely use slower fallbacks... At least
@@ -128,7 +128,7 @@ STATIC_IF_INLINE_DECLARE uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *pt
STATIC_IF_INLINE_DECLARE void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val);
STATIC_IF_INLINE_DECLARE uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval);
STATIC_IF_INLINE_DECLARE bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr,
- uint32 *expected, uint32 newval);
+ uint32 *expected, uint32 newval);
STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_);
STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_);
STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_);
@@ -143,7 +143,7 @@ STATIC_IF_INLINE_DECLARE uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *pt
STATIC_IF_INLINE_DECLARE void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val);
STATIC_IF_INLINE_DECLARE uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval);
STATIC_IF_INLINE_DECLARE bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr,
- uint64 *expected, uint64 newval);
+ uint64 *expected, uint64 newval);
STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_);
STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_);
STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_);
@@ -151,7 +151,7 @@ STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64
STATIC_IF_INLINE_DECLARE uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_);
STATIC_IF_INLINE_DECLARE uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_);
-#endif /* PG_HAVE_64_BIT_ATOMICS */
+#endif /* PG_HAVE_64_BIT_ATOMICS */
/*
@@ -175,14 +175,14 @@ STATIC_IF_INLINE_DECLARE uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint6
* architectures) this requires issuing some sort of memory fencing
* instruction.
*/
-#define pg_memory_barrier() pg_memory_barrier_impl()
+#define pg_memory_barrier() pg_memory_barrier_impl()
/*
* pg_(read|write)_barrier - prevent the CPU from reordering memory access
*
* A read barrier must act as a compiler barrier, and in addition must
* guarantee that any loads issued prior to the barrier are completed before
- * any loads issued after the barrier. Similarly, a write barrier acts
+ * any loads issued after the barrier. Similarly, a write barrier acts
* as a compiler barrier, and also orders stores. Read and write barriers
* are thus weaker than a full memory barrier, but stronger than a compiler
* barrier. In practice, on machines with strong memory ordering, read and
@@ -194,7 +194,7 @@ STATIC_IF_INLINE_DECLARE uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint6
/*
* Spinloop delay - Allow CPU to relax in busy loops
*/
-#define pg_spin_delay() pg_spin_delay_impl()
+#define pg_spin_delay() pg_spin_delay_impl()
/*
* The following functions are wrapper functions around the platform specific
@@ -522,10 +522,11 @@ pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
}
-#endif /* PG_HAVE_64_BIT_ATOMICS */
+#endif /* PG_HAVE_64_BIT_ATOMICS */
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) ||
+ * defined(ATOMICS_INCLUDE_DEFINITIONS) */
#undef INSIDE_ATOMICS_H
-#endif /* ATOMICS_H */
+#endif /* ATOMICS_H */
diff --git a/src/include/port/atomics/arch-ia64.h b/src/include/port/atomics/arch-ia64.h
index 2591a0f163..3fd3918114 100644
--- a/src/include/port/atomics/arch-ia64.h
+++ b/src/include/port/atomics/arch-ia64.h
@@ -18,9 +18,9 @@
* fence.
*/
#if defined(__INTEL_COMPILER)
-# define pg_memory_barrier_impl() __mf()
+#define pg_memory_barrier_impl() __mf()
#elif defined(__GNUC__)
-# define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory")
+#define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory")
#elif defined(__hpux)
-# define pg_memory_barrier_impl() _Asm_mf()
+#define pg_memory_barrier_impl() _Asm_mf()
#endif
diff --git a/src/include/port/atomics/arch-x86.h b/src/include/port/atomics/arch-x86.h
index 168a49c793..d7f45f325e 100644
--- a/src/include/port/atomics/arch-x86.h
+++ b/src/include/port/atomics/arch-x86.h
@@ -78,9 +78,10 @@ typedef struct pg_atomic_uint64
} pg_atomic_uint64;
#endif
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
-#endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
+#endif /* defined(__GNUC__) &&
+ * !defined(__INTEL_COMPILER) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -93,20 +94,20 @@ typedef struct pg_atomic_uint64
* PAUSE in the inner loop of a spin lock is necessary for good
* performance:
*
- * The PAUSE instruction improves the performance of IA-32
- * processors supporting Hyper-Threading Technology when
- * executing spin-wait loops and other routines where one
- * thread is accessing a shared lock or semaphore in a tight
- * polling loop. When executing a spin-wait loop, the
- * processor can suffer a severe performance penalty when
- * exiting the loop because it detects a possible memory order
- * violation and flushes the core processor's pipeline. The
- * PAUSE instruction provides a hint to the processor that the
- * code sequence is a spin-wait loop. The processor uses this
- * hint to avoid the memory order violation and prevent the
- * pipeline flush. In addition, the PAUSE instruction
- * de-pipelines the spin-wait loop to prevent it from
- * consuming execution resources excessively.
+ * The PAUSE instruction improves the performance of IA-32
+ * processors supporting Hyper-Threading Technology when
+ * executing spin-wait loops and other routines where one
+ * thread is accessing a shared lock or semaphore in a tight
+ * polling loop. When executing a spin-wait loop, the
+ * processor can suffer a severe performance penalty when
+ * exiting the loop because it detects a possible memory order
+ * violation and flushes the core processor's pipeline. The
+ * PAUSE instruction provides a hint to the processor that the
+ * code sequence is a spin-wait loop. The processor uses this
+ * hint to avoid the memory order violation and prevent the
+ * pipeline flush. In addition, the PAUSE instruction
+ * de-pipelines the spin-wait loop to prevent it from
+ * consuming execution resources excessively.
*/
#if defined(__INTEL_COMPILER)
#define PG_HAVE_SPIN_DELAY
@@ -120,8 +121,8 @@ pg_spin_delay_impl(void)
static __inline__ void
pg_spin_delay_impl(void)
{
- __asm__ __volatile__(
- " rep; nop \n");
+ __asm__ __volatile__(
+ " rep; nop \n");
}
#elif defined(WIN32_ONLY_COMPILER) && defined(__x86_64__)
#define PG_HAVE_SPIN_DELAY
@@ -136,10 +137,10 @@ static __forceinline void
pg_spin_delay_impl(void)
{
/* See comment for gcc code. Same code, MASM syntax */
- __asm rep nop;
+ __asm rep nop;
}
#endif
-#endif /* !defined(PG_HAVE_SPIN_DELAY) */
+#endif /* !defined(PG_HAVE_SPIN_DELAY) */
#if defined(HAVE_ATOMICS)
@@ -153,12 +154,13 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
{
register char _res = 1;
- __asm__ __volatile__(
- " lock \n"
- " xchgb %0,%1 \n"
-: "+q"(_res), "+m"(ptr->value)
-:
-: "memory");
+ __asm__ __volatile__(
+ " lock \n"
+ " xchgb %0,%1 \n"
+ : "+q"(_res), "+m"(ptr->value)
+ :
+ : "memory");
+
return _res == 0;
}
@@ -170,7 +172,8 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
* On a TSO architecture like x86 it's sufficient to use a compiler
* barrier to achieve release semantics.
*/
- __asm__ __volatile__("" ::: "memory");
+ __asm__ __volatile__("":::"memory");
+
ptr->value = 0;
}
@@ -179,19 +182,20 @@ static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- char ret;
+ char ret;
/*
* Perform cmpxchg and use the zero flag which it implicitly sets when
* equal to measure the success.
*/
- __asm__ __volatile__(
- " lock \n"
- " cmpxchgl %4,%5 \n"
- " setz %2 \n"
-: "=a" (*expected), "=m"(ptr->value), "=q" (ret)
-: "a" (*expected), "r" (newval), "m"(ptr->value)
-: "memory", "cc");
+ __asm__ __volatile__(
+ " lock \n"
+ " cmpxchgl %4,%5 \n"
+ " setz %2 \n"
+ : "=a"(*expected), "=m"(ptr->value), "=q"(ret)
+ : "a"(*expected), "r"(newval), "m"(ptr->value)
+ : "memory", "cc");
+
return (bool) ret;
}
@@ -199,13 +203,14 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
static inline uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{
- uint32 res;
- __asm__ __volatile__(
- " lock \n"
- " xaddl %0,%1 \n"
-: "=q"(res), "=m"(ptr->value)
-: "0" (add_), "m"(ptr->value)
-: "memory", "cc");
+ uint32 res;
+ __asm__ __volatile__(
+ " lock \n"
+ " xaddl %0,%1 \n"
+ : "=q"(res), "=m"(ptr->value)
+ : "0"(add_), "m"(ptr->value)
+ : "memory", "cc");
+
return res;
}
@@ -216,19 +221,20 @@ static inline bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- char ret;
+ char ret;
/*
* Perform cmpxchg and use the zero flag which it implicitly sets when
* equal to measure the success.
*/
- __asm__ __volatile__(
- " lock \n"
- " cmpxchgq %4,%5 \n"
- " setz %2 \n"
-: "=a" (*expected), "=m"(ptr->value), "=q" (ret)
-: "a" (*expected), "r" (newval), "m"(ptr->value)
-: "memory", "cc");
+ __asm__ __volatile__(
+ " lock \n"
+ " cmpxchgq %4,%5 \n"
+ " setz %2 \n"
+ : "=a"(*expected), "=m"(ptr->value), "=q"(ret)
+ : "a"(*expected), "r"(newval), "m"(ptr->value)
+ : "memory", "cc");
+
return (bool) ret;
}
@@ -236,20 +242,23 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
static inline uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{
- uint64 res;
- __asm__ __volatile__(
- " lock \n"
- " xaddq %0,%1 \n"
-: "=q"(res), "=m"(ptr->value)
-: "0" (add_), "m"(ptr->value)
-: "memory", "cc");
+ uint64 res;
+ __asm__ __volatile__(
+ " lock \n"
+ " xaddq %0,%1 \n"
+ : "=q"(res), "=m"(ptr->value)
+ : "0"(add_), "m"(ptr->value)
+ : "memory", "cc");
+
return res;
}
-#endif /* __x86_64__ */
+#endif /* __x86_64__ */
-#endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
+#endif /* defined(__GNUC__) &&
+ * !defined(__INTEL_COMPILER) */
-#endif /* HAVE_ATOMICS */
+#endif /* HAVE_ATOMICS */
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) ||
+ * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/fallback.h b/src/include/port/atomics/fallback.h
index 4e04f9758b..01af089f7b 100644
--- a/src/include/port/atomics/fallback.h
+++ b/src/include/port/atomics/fallback.h
@@ -1,8 +1,8 @@
/*-------------------------------------------------------------------------
*
* fallback.h
- * Fallback for platforms without spinlock and/or atomics support. Slower
- * than native atomics support, but not unusably slow.
+ * Fallback for platforms without spinlock and/or atomics support. Slower
+ * than native atomics support, but not unusably slow.
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@@ -14,7 +14,7 @@
/* intentionally no include guards, should only be included by atomics.h */
#ifndef INSIDE_ATOMICS_H
-# error "should be included via atomics.h"
+#error "should be included via atomics.h"
#endif
#ifndef pg_memory_barrier_impl
@@ -75,14 +75,15 @@ typedef struct pg_atomic_flag
* be content with just one byte instead of 4, but that's not too much
* waste.
*/
-#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
+#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP
+ * compilers */
int sema[4];
#else
int sema;
#endif
} pg_atomic_flag;
-#endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
+#endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
@@ -92,7 +93,8 @@ typedef struct pg_atomic_flag
typedef struct pg_atomic_uint32
{
/* Check pg_atomic_flag's definition above for an explanation */
-#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
+#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP
+ * compilers */
int sema[4];
#else
int sema;
@@ -100,7 +102,7 @@ typedef struct pg_atomic_uint32
volatile uint32 value;
} pg_atomic_uint32;
-#endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
+#endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -128,7 +130,7 @@ pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
return true;
}
-#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
+#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
#ifdef PG_HAVE_ATOMIC_U32_SIMULATION
@@ -137,12 +139,13 @@ extern void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
- uint32 *expected, uint32 newval);
+ uint32 *expected, uint32 newval);
#define PG_HAVE_ATOMIC_FETCH_ADD_U32
extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_);
-#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
+#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) ||
+ * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/generic-acc.h b/src/include/port/atomics/generic-acc.h
index c5639aadda..e16cc6f7dc 100644
--- a/src/include/port/atomics/generic-acc.h
+++ b/src/include/port/atomics/generic-acc.h
@@ -10,9 +10,9 @@
*
* Documentation:
* * inline assembly for Itanium-based HP-UX:
- * http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf
+ * http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf
* * Implementing Spinlocks on the Intel (R) Itanium (R) Architecture and PA-RISC
- * http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
+ * http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
*
* Itanium only supports a small set of numbers (6, -8, -4, -1, 1, 4, 8, 16)
* for atomic add/sub, so we just implement everything but compare_exchange
@@ -49,7 +49,7 @@ typedef struct pg_atomic_uint64
volatile uint64 value;
} pg_atomic_uint64;
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -64,23 +64,25 @@ STATIC_IF_INLINE bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- bool ret;
- uint32 current;
+ bool ret;
+ uint32 current;
_Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
+
/*
* We want a barrier, not just release/acquire semantics.
*/
_Asm_mf();
+
/*
- * Notes:
- * DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the compiler
+ * Notes: DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the
+ * compiler
*/
- current = _Asm_cmpxchg(_SZ_W, /* word */
- _SEM_REL,
- &ptr->value,
- newval, _LDHINT_NONE,
- _DOWN_MEM_FENCE | _UP_MEM_FENCE);
+ current = _Asm_cmpxchg(_SZ_W, /* word */
+ _SEM_REL,
+ &ptr->value,
+ newval, _LDHINT_NONE,
+ _DOWN_MEM_FENCE | _UP_MEM_FENCE);
ret = current == *expected;
*expected = current;
return ret;
@@ -92,16 +94,16 @@ STATIC_IF_INLINE bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- bool ret;
- uint64 current;
+ bool ret;
+ uint64 current;
_Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
_Asm_mf();
- current = _Asm_cmpxchg(_SZ_D, /* doubleword */
- _SEM_REL,
- &ptr->value,
- newval, _LDHINT_NONE,
- _DOWN_MEM_FENCE | _UP_MEM_FENCE);
+ current = _Asm_cmpxchg(_SZ_D, /* doubleword */
+ _SEM_REL,
+ &ptr->value,
+ newval, _LDHINT_NONE,
+ _DOWN_MEM_FENCE | _UP_MEM_FENCE);
ret = current == *expected;
*expected = current;
return ret;
@@ -109,6 +111,7 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
#undef MINOR_FENCE
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) ||
+ * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/generic-gcc.h b/src/include/port/atomics/generic-gcc.h
index 591c9fe1eb..301ab510bf 100644
--- a/src/include/port/atomics/generic-gcc.h
+++ b/src/include/port/atomics/generic-gcc.h
@@ -10,9 +10,9 @@
*
* Documentation:
* * Legacy __sync Built-in Functions for Atomic Memory Access
- * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
+ * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
* * Built-in functions for memory model aware atomic operations
- * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
+ * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
*
* src/include/port/atomics/generic-gcc.h
*
@@ -40,21 +40,21 @@
* definitions where possible, and use this only as a fallback.
*/
#if !defined(pg_memory_barrier_impl)
-# if defined(HAVE_GCC__ATOMIC_INT32_CAS)
-# define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
-# elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
-# define pg_memory_barrier_impl() __sync_synchronize()
-# endif
-#endif /* !defined(pg_memory_barrier_impl) */
+#if defined(HAVE_GCC__ATOMIC_INT32_CAS)
+#define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
+#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
+#define pg_memory_barrier_impl() __sync_synchronize()
+#endif
+#endif /* !defined(pg_memory_barrier_impl) */
#if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
/* acquire semantics include read barrier semantics */
-# define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE)
+#define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE)
#endif
#if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
/* release semantics include write barrier semantics */
-# define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE)
+#define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE)
#endif
#ifdef HAVE_ATOMICS
@@ -75,7 +75,7 @@ typedef struct pg_atomic_flag
#endif
} pg_atomic_flag;
-#endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
+#endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
/* generic gcc based atomic uint32 implementation */
#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
@@ -87,7 +87,8 @@ typedef struct pg_atomic_uint32
volatile uint32 value;
} pg_atomic_uint32;
-#endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
+#endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) ||
+ * defined(HAVE_GCC__SYNC_INT32_CAS) */
/* generic gcc based atomic uint64 implementation */
#if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
@@ -101,7 +102,8 @@ typedef struct pg_atomic_uint64
volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64;
-#endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
+#endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) ||
+ * defined(HAVE_GCC__SYNC_INT64_CAS) */
/*
* Implementation follows. Inlined or directly included from atomics.c
@@ -123,7 +125,7 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
}
#endif
-#endif /* defined(HAVE_GCC__SYNC_*_TAS) */
+#endif /* defined(HAVE_GCC__SYNC_*_TAS) */
#ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
@@ -152,7 +154,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
}
#endif
-#endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
+#endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
/* prefer __atomic, it has a better API */
#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
@@ -173,8 +175,9 @@ static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- bool ret;
- uint32 current;
+ bool ret;
+ uint32 current;
+
current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
ret = current == *expected;
*expected = current;
@@ -211,8 +214,9 @@ static inline bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- bool ret;
- uint64 current;
+ bool ret;
+ uint64 current;
+
current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
ret = current == *expected;
*expected = current;
@@ -229,8 +233,9 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
}
#endif
-#endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
+#endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) ||
+ * defined(ATOMICS_INCLUDE_DEFINITIONS) */
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
diff --git a/src/include/port/atomics/generic-msvc.h b/src/include/port/atomics/generic-msvc.h
index d259d6f51d..3c177fea7e 100644
--- a/src/include/port/atomics/generic-msvc.h
+++ b/src/include/port/atomics/generic-msvc.h
@@ -10,7 +10,7 @@
*
* Documentation:
* * Interlocked Variable Access
- * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
+ * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
*
* src/include/port/atomics/generic-msvc.h
*
@@ -41,12 +41,14 @@ typedef struct pg_atomic_uint32
} pg_atomic_uint32;
#define PG_HAVE_ATOMIC_U64_SUPPORT
-typedef struct __declspec(align(8)) pg_atomic_uint64
+typedef struct __declspec (
+ align(8))
+pg_atomic_uint64
{
volatile uint64 value;
} pg_atomic_uint64;
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -58,8 +60,9 @@ static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- bool ret;
- uint32 current;
+ bool ret;
+ uint32 current;
+
current = InterlockedCompareExchange(&ptr->value, newval, *expected);
ret = current == *expected;
*expected = current;
@@ -86,8 +89,9 @@ static inline bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- bool ret;
- uint64 current;
+ bool ret;
+ uint64 current;
+
current = _InterlockedCompareExchange64(&ptr->value, newval, *expected);
ret = current == *expected;
*expected = current;
@@ -104,8 +108,9 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{
return _InterlockedExchangeAdd64(&ptr->value, add_);
}
-#endif /* _WIN64 */
+#endif /* _WIN64 */
-#endif /* HAVE_ATOMICS */
+#endif /* HAVE_ATOMICS */
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) ||
+ * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/generic-sunpro.h b/src/include/port/atomics/generic-sunpro.h
index d369207fb3..e74cbad502 100644
--- a/src/include/port/atomics/generic-sunpro.h
+++ b/src/include/port/atomics/generic-sunpro.h
@@ -9,8 +9,8 @@
*
* Documentation:
* * manpage for atomic_cas(3C)
- * http://www.unix.com/man-page/opensolaris/3c/atomic_cas/
- * http://docs.oracle.com/cd/E23824_01/html/821-1465/atomic-cas-3c.html
+ * http://www.unix.com/man-page/opensolaris/3c/atomic_cas/
+ * http://docs.oracle.com/cd/E23824_01/html/821-1465/atomic-cas-3c.html
*
* src/include/port/atomics/generic-sunpro.h
*
@@ -30,16 +30,16 @@
* membar #StoreStore | #LoadStore | #StoreLoad | #LoadLoad on x86/sparc
* respectively.
*/
-# define pg_memory_barrier_impl() __machine_rw_barrier()
+#define pg_memory_barrier_impl() __machine_rw_barrier()
#endif
#ifndef pg_read_barrier_impl
-# define pg_read_barrier_impl() __machine_r_barrier()
+#define pg_read_barrier_impl() __machine_r_barrier()
#endif
#ifndef pg_write_barrier_impl
-# define pg_write_barrier_impl() __machine_w_barrier()
+#define pg_write_barrier_impl() __machine_w_barrier()
#endif
-#endif /* HAVE_MBARRIER_H */
+#endif /* HAVE_MBARRIER_H */
/* Older versions of the compiler don't have atomic.h... */
#ifdef HAVE_ATOMIC_H
@@ -64,9 +64,9 @@ typedef struct pg_atomic_uint64
volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64;
-#endif /* HAVE_ATOMIC_H */
+#endif /* HAVE_ATOMIC_H */
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -80,8 +80,8 @@ static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- bool ret;
- uint32 current;
+ bool ret;
+ uint32 current;
current = atomic_cas_32(&ptr->value, *expected, newval);
ret = current == *expected;
@@ -94,8 +94,8 @@ static inline bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- bool ret;
- uint64 current;
+ bool ret;
+ uint64 current;
current = atomic_cas_64(&ptr->value, *expected, newval);
ret = current == *expected;
@@ -103,8 +103,9 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
return ret;
}
-#endif /* HAVE_ATOMIC_H */
+#endif /* HAVE_ATOMIC_H */
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) ||
+ * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/generic-xlc.h b/src/include/port/atomics/generic-xlc.h
index 1c743f2bc8..01c19121eb 100644
--- a/src/include/port/atomics/generic-xlc.h
+++ b/src/include/port/atomics/generic-xlc.h
@@ -9,7 +9,7 @@
*
* Documentation:
* * Synchronization and atomic built-in functions
- * http://publib.boulder.ibm.com/infocenter/lnxpcomp/v8v101/topic/com.ibm.xlcpp8l.doc/compiler/ref/bif_sync.htm
+ * http://publib.boulder.ibm.com/infocenter/lnxpcomp/v8v101/topic/com.ibm.xlcpp8l.doc/compiler/ref/bif_sync.htm
*
* src/include/port/atomics/generic-xlc.h
*
@@ -35,9 +35,9 @@ typedef struct pg_atomic_uint64
volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64;
-#endif /* __64BIT__ */
+#endif /* __64BIT__ */
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -48,13 +48,13 @@ static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- bool ret;
- uint64 current;
+ bool ret;
+ uint64 current;
/*
- * xlc's documentation tells us:
- * "If __compare_and_swap is used as a locking primitive, insert a call to
- * the __isync built-in function at the start of any critical sections."
+ * xlc's documentation tells us: "If __compare_and_swap is used as a
+ * locking primitive, insert a call to the __isync built-in function at
+ * the start of any critical sections."
*/
__isync();
@@ -62,8 +62,8 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
* XXX: __compare_and_swap is defined to take signed parameters, but that
* shouldn't matter since we don't perform any arithmetic operations.
*/
- current = (uint32)__compare_and_swap((volatile int*)ptr->value,
- (int)*expected, (int)newval);
+ current = (uint32) __compare_and_swap((volatile int *) ptr->value,
+ (int) *expected, (int) newval);
ret = current == *expected;
*expected = current;
return ret;
@@ -83,13 +83,13 @@ static inline bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- bool ret;
- uint64 current;
+ bool ret;
+ uint64 current;
__isync();
- current = (uint64)__compare_and_swaplp((volatile long*)ptr->value,
- (long)*expected, (long)newval);
+ current = (uint64) __compare_and_swaplp((volatile long *) ptr->value,
+ (long) *expected, (long) newval);
ret = current == *expected;
*expected = current;
return ret;
@@ -102,8 +102,9 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
return __fetch_and_addlp(&ptr->value, add_);
}
-#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
+#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) ||
+ * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/generic.h b/src/include/port/atomics/generic.h
index bb31df3623..9787f9ee87 100644
--- a/src/include/port/atomics/generic.h
+++ b/src/include/port/atomics/generic.h
@@ -14,7 +14,7 @@
/* intentionally no include guards, should only be included by atomics.h */
#ifndef INSIDE_ATOMICS_H
-# error "should be included via atomics.h"
+#error "should be included via atomics.h"
#endif
/*
@@ -22,10 +22,10 @@
* barriers.
*/
#if !defined(pg_read_barrier_impl)
-# define pg_read_barrier_impl pg_memory_barrier_impl
+#define pg_read_barrier_impl pg_memory_barrier_impl
#endif
#if !defined(pg_write_barrier_impl)
-# define pg_write_barrier_impl pg_memory_barrier_impl
+#define pg_write_barrier_impl pg_memory_barrier_impl
#endif
#ifndef PG_HAVE_SPIN_DELAY
@@ -113,7 +113,8 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
static inline bool
pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
{
- uint32 value = 0;
+ uint32 value = 0;
+
return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1);
}
@@ -129,23 +130,23 @@ static inline void
pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
{
/*
- * Use a memory barrier + plain write if we have a native memory
- * barrier. But don't do so if memory barriers use spinlocks - that'd lead
- * to circularity if flags are used to implement spinlocks.
+ * Use a memory barrier + plain write if we have a native memory barrier.
+ * But don't do so if memory barriers use spinlocks - that'd lead to
+ * circularity if flags are used to implement spinlocks.
*/
#ifndef PG_HAVE_MEMORY_BARRIER_EMULATION
/* XXX: release semantics suffice? */
pg_memory_barrier_impl();
pg_atomic_write_u32_impl(ptr, 0);
#else
- uint32 value = 1;
+ uint32 value = 1;
pg_atomic_compare_exchange_u32_impl(ptr, &value, 0);
#endif
}
#elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG)
-# error "No pg_atomic_test_and_set provided"
-#endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
+#error "No pg_atomic_test_and_set provided"
+#endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
#ifndef PG_HAVE_ATOMIC_INIT_U32
@@ -162,7 +163,8 @@ pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
static inline uint32
pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
{
- uint32 old;
+ uint32 old;
+
while (true)
{
old = pg_atomic_read_u32_impl(ptr);
@@ -178,7 +180,8 @@ pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
static inline uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{
- uint32 old;
+ uint32 old;
+
while (true)
{
old = pg_atomic_read_u32_impl(ptr);
@@ -203,7 +206,8 @@ pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
static inline uint32
pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
{
- uint32 old;
+ uint32 old;
+
while (true)
{
old = pg_atomic_read_u32_impl(ptr);
@@ -219,7 +223,8 @@ pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
static inline uint32
pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
{
- uint32 old;
+ uint32 old;
+
while (true)
{
old = pg_atomic_read_u32_impl(ptr);
@@ -255,7 +260,8 @@ pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
static inline uint64
pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
{
- uint64 old;
+ uint64 old;
+
while (true)
{
old = ptr->value;
@@ -284,7 +290,7 @@ pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
static inline uint64
pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
{
- uint64 old = 0;
+ uint64 old = 0;
/*
* 64 bit reads aren't safe on all platforms. In the generic
@@ -312,7 +318,8 @@ pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
static inline uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{
- uint64 old;
+ uint64 old;
+
while (true)
{
old = pg_atomic_read_u64_impl(ptr);
@@ -337,7 +344,8 @@ pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
static inline uint64
pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
{
- uint64 old;
+ uint64 old;
+
while (true)
{
old = pg_atomic_read_u64_impl(ptr);
@@ -353,7 +361,8 @@ pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
static inline uint64
pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
{
- uint64 old;
+ uint64 old;
+
while (true)
{
old = pg_atomic_read_u64_impl(ptr);
@@ -382,6 +391,7 @@ pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
}
#endif
-#endif /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */
+#endif /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) ||
+ * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h
index b14d194fb3..c925c56901 100644
--- a/src/include/port/pg_crc32c.h
+++ b/src/include/port/pg_crc32c.h
@@ -90,4 +90,4 @@ extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len)
#endif
-#endif /* PG_CRC32C_H */
+#endif /* PG_CRC32C_H */
diff --git a/src/include/postmaster/bgworker.h b/src/include/postmaster/bgworker.h
index de9180df91..f0a9530654 100644
--- a/src/include/postmaster/bgworker.h
+++ b/src/include/postmaster/bgworker.h
@@ -113,7 +113,7 @@ extern BgwHandleStatus
WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *
handle, pid_t *pid);
extern BgwHandleStatus
-WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *);
+ WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *);
/* Terminate a bgworker */
extern void TerminateBackgroundWorker(BackgroundWorkerHandle *handle);
diff --git a/src/include/replication/origin.h b/src/include/replication/origin.h
index b814aeb4fd..5d294de60e 100644
--- a/src/include/replication/origin.h
+++ b/src/include/replication/origin.h
@@ -1,6 +1,6 @@
/*-------------------------------------------------------------------------
* origin.h
- * Exports from replication/logical/origin.c
+ * Exports from replication/logical/origin.c
*
* Copyright (c) 2013-2015, PostgreSQL Global Development Group
*
@@ -17,13 +17,13 @@
typedef struct xl_replorigin_set
{
XLogRecPtr remote_lsn;
- RepOriginId node_id;
+ RepOriginId node_id;
bool force;
} xl_replorigin_set;
typedef struct xl_replorigin_drop
{
- RepOriginId node_id;
+ RepOriginId node_id;
} xl_replorigin_drop;
#define XLOG_REPLORIGIN_SET 0x00
@@ -41,17 +41,17 @@ extern RepOriginId replorigin_by_name(char *name, bool missing_ok);
extern RepOriginId replorigin_create(char *name);
extern void replorigin_drop(RepOriginId roident);
extern bool replorigin_by_oid(RepOriginId roident, bool missing_ok,
- char **roname);
+ char **roname);
/* API for querying & manipulating replication progress tracking */
extern void replorigin_advance(RepOriginId node,
- XLogRecPtr remote_commit,
- XLogRecPtr local_commit,
- bool go_backward, bool wal_log);
+ XLogRecPtr remote_commit,
+ XLogRecPtr local_commit,
+ bool go_backward, bool wal_log);
extern XLogRecPtr replorigin_get_progress(RepOriginId node, bool flush);
extern void replorigin_session_advance(XLogRecPtr remote_commit,
- XLogRecPtr local_commit);
+ XLogRecPtr local_commit);
extern void replorigin_session_setup(RepOriginId node);
extern void replorigin_session_reset(void);
extern XLogRecPtr replorigin_session_get_progress(bool flush);
@@ -61,9 +61,9 @@ extern void CheckPointReplicationOrigin(void);
extern void StartupReplicationOrigin(void);
/* WAL logging */
-void replorigin_redo(XLogReaderState *record);
-void replorigin_desc(StringInfo buf, XLogReaderState *record);
-const char * replorigin_identify(uint8 info);
+void replorigin_redo(XLogReaderState *record);
+void replorigin_desc(StringInfo buf, XLogReaderState *record);
+const char *replorigin_identify(uint8 info);
/* shared memory allocation */
extern Size ReplicationOriginShmemSize(void);
@@ -83,4 +83,4 @@ extern Datum pg_replication_origin_advance(PG_FUNCTION_ARGS);
extern Datum pg_replication_origin_progress(PG_FUNCTION_ARGS);
extern Datum pg_show_replication_origin_status(PG_FUNCTION_ARGS);
-#endif /* PG_ORIGIN_H */
+#endif /* PG_ORIGIN_H */
diff --git a/src/include/replication/output_plugin.h b/src/include/replication/output_plugin.h
index bec1a56017..17c3de235e 100644
--- a/src/include/replication/output_plugin.h
+++ b/src/include/replication/output_plugin.h
@@ -78,7 +78,7 @@ typedef void (*LogicalDecodeCommitCB) (
*/
typedef bool (*LogicalDecodeFilterByOriginCB) (
struct LogicalDecodingContext *,
- RepOriginId origin_id);
+ RepOriginId origin_id);
/*
* Called to shutdown an output plugin.
diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h
index 666c5f2841..110e78e7a8 100644
--- a/src/include/replication/reorderbuffer.h
+++ b/src/include/replication/reorderbuffer.h
@@ -177,7 +177,7 @@ typedef struct ReorderBufferTXN
/* origin of the change that caused this transaction */
RepOriginId origin_id;
- XLogRecPtr origin_lsn;
+ XLogRecPtr origin_lsn;
/*
* Commit time, only known when we read the actual commit record.
@@ -352,7 +352,7 @@ void ReorderBufferReturnChange(ReorderBuffer *, ReorderBufferChange *);
void ReorderBufferQueueChange(ReorderBuffer *, TransactionId, XLogRecPtr lsn, ReorderBufferChange *);
void ReorderBufferCommit(ReorderBuffer *, TransactionId,
XLogRecPtr commit_lsn, XLogRecPtr end_lsn,
- TimestampTz commit_time, RepOriginId origin_id, XLogRecPtr origin_lsn);
+ TimestampTz commit_time, RepOriginId origin_id, XLogRecPtr origin_lsn);
void ReorderBufferAssignChild(ReorderBuffer *, TransactionId, TransactionId, XLogRecPtr commit_lsn);
void ReorderBufferCommitChild(ReorderBuffer *, TransactionId, TransactionId,
XLogRecPtr commit_lsn, XLogRecPtr end_lsn);
diff --git a/src/include/replication/walsender.h b/src/include/replication/walsender.h
index b10e78488f..cb3f6bd21f 100644
--- a/src/include/replication/walsender.h
+++ b/src/include/replication/walsender.h
@@ -25,7 +25,7 @@ extern bool wake_wal_senders;
/* user-settable parameters */
extern int max_wal_senders;
extern int wal_sender_timeout;
-extern bool log_replication_commands;
+extern bool log_replication_commands;
extern void InitWalSender(void);
extern void exec_replication_command(const char *query_string);
diff --git a/src/include/rewrite/rowsecurity.h b/src/include/rewrite/rowsecurity.h
index eb4b20559f..523c56e598 100644
--- a/src/include/rewrite/rowsecurity.h
+++ b/src/include/rewrite/rowsecurity.h
@@ -2,8 +2,8 @@
*
* rowsecurity.h
*
- * prototypes for rewrite/rowsecurity.c and the structures for managing
- * the row security policies for relations in relcache.
+ * prototypes for rewrite/rowsecurity.c and the structures for managing
+ * the row security policies for relations in relcache.
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@@ -19,23 +19,23 @@
typedef struct RowSecurityPolicy
{
- Oid policy_id; /* OID of the policy */
- char *policy_name; /* Name of the policy */
- char polcmd; /* Type of command policy is for */
- ArrayType *roles; /* Array of roles policy is for */
- Expr *qual; /* Expression to filter rows */
- Expr *with_check_qual; /* Expression to limit rows allowed */
- bool hassublinks; /* If either expression has sublinks */
+ Oid policy_id; /* OID of the policy */
+ char *policy_name; /* Name of the policy */
+ char polcmd; /* Type of command policy is for */
+ ArrayType *roles; /* Array of roles policy is for */
+ Expr *qual; /* Expression to filter rows */
+ Expr *with_check_qual; /* Expression to limit rows allowed */
+ bool hassublinks; /* If either expression has sublinks */
} RowSecurityPolicy;
typedef struct RowSecurityDesc
{
- MemoryContext rscxt; /* row security memory context */
- List *policies; /* list of row security policies */
+ MemoryContext rscxt; /* row security memory context */
+ List *policies; /* list of row security policies */
} RowSecurityDesc;
-typedef List *(*row_security_policy_hook_type)(CmdType cmdtype,
- Relation relation);
+typedef List *(*row_security_policy_hook_type) (CmdType cmdtype,
+ Relation relation);
extern PGDLLIMPORT row_security_policy_hook_type row_security_policy_hook_permissive;
@@ -46,4 +46,4 @@ extern void get_row_security_policies(Query *root, CmdType commandType,
List **securityQuals, List **withCheckOptions,
bool *hasRowSecurity, bool *hasSubLinks);
-#endif /* ROWSECURITY_H */
+#endif /* ROWSECURITY_H */
diff --git a/src/include/storage/lmgr.h b/src/include/storage/lmgr.h
index 7cc75fc106..e7ccce2294 100644
--- a/src/include/storage/lmgr.h
+++ b/src/include/storage/lmgr.h
@@ -77,9 +77,9 @@ extern void WaitForLockers(LOCKTAG heaplocktag, LOCKMODE lockmode);
extern void WaitForLockersMultiple(List *locktags, LOCKMODE lockmode);
/* Lock an XID for tuple insertion (used to wait for an insertion to finish) */
-extern uint32 SpeculativeInsertionLockAcquire(TransactionId xid);
-extern void SpeculativeInsertionLockRelease(TransactionId xid);
-extern void SpeculativeInsertionWait(TransactionId xid, uint32 token);
+extern uint32 SpeculativeInsertionLockAcquire(TransactionId xid);
+extern void SpeculativeInsertionLockRelease(TransactionId xid);
+extern void SpeculativeInsertionWait(TransactionId xid, uint32 token);
/* Lock a general object (other than a relation) of the current database */
extern void LockDatabaseObject(Oid classid, Oid objid, uint16 objsubid,
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index b4eb1b4a9e..96fe3a66ab 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -176,7 +176,7 @@ typedef enum LockTagType
/* ID info for a transaction is its TransactionId */
LOCKTAG_VIRTUALTRANSACTION, /* virtual transaction (ditto) */
/* ID info for a virtual transaction is its VirtualTransactionId */
- LOCKTAG_SPECULATIVE_TOKEN, /* speculative insertion Xid and token */
+ LOCKTAG_SPECULATIVE_TOKEN, /* speculative insertion Xid and token */
/* ID info for a transaction is its TransactionId */
LOCKTAG_OBJECT, /* non-relation database object */
/* ID info for an object is DB OID + CLASS OID + OBJECT OID + SUBID */
diff --git a/src/include/storage/shm_mq.h b/src/include/storage/shm_mq.h
index 085a8a73e3..1a2ba040cb 100644
--- a/src/include/storage/shm_mq.h
+++ b/src/include/storage/shm_mq.h
@@ -28,8 +28,8 @@ typedef struct shm_mq_handle shm_mq_handle;
/* Descriptors for a single write spanning multiple locations. */
typedef struct
{
- const char *data;
- Size len;
+ const char *data;
+ Size len;
} shm_mq_iovec;
/* Possible results of a send or receive operation. */
@@ -69,7 +69,7 @@ extern void shm_mq_detach(shm_mq *);
extern shm_mq_result shm_mq_send(shm_mq_handle *mqh,
Size nbytes, const void *data, bool nowait);
extern shm_mq_result shm_mq_sendv(shm_mq_handle *mqh,
- shm_mq_iovec *iov, int iovcnt, bool nowait);
+ shm_mq_iovec *iov, int iovcnt, bool nowait);
extern shm_mq_result shm_mq_receive(shm_mq_handle *mqh,
Size *nbytesp, void **datap, bool nowait);
diff --git a/src/include/tcop/deparse_utility.h b/src/include/tcop/deparse_utility.h
index b6bcbeb317..d276eeb228 100644
--- a/src/include/tcop/deparse_utility.h
+++ b/src/include/tcop/deparse_utility.h
@@ -37,8 +37,8 @@ typedef enum CollectedCommandType
*/
typedef struct CollectedATSubcmd
{
- ObjectAddress address; /* affected column, constraint, index, ... */
- Node *parsetree;
+ ObjectAddress address; /* affected column, constraint, index, ... */
+ Node *parsetree;
} CollectedATSubcmd;
typedef struct CollectedCommand
@@ -54,52 +54,52 @@ typedef struct CollectedCommand
{
ObjectAddress address;
ObjectAddress secondaryObject;
- } simple;
+ } simple;
/* ALTER TABLE, and internal uses thereof */
struct
{
- Oid objectId;
- Oid classId;
- List *subcmds;
- } alterTable;
+ Oid objectId;
+ Oid classId;
+ List *subcmds;
+ } alterTable;
/* GRANT / REVOKE */
struct
{
InternalGrant *istmt;
- } grant;
+ } grant;
/* ALTER OPERATOR FAMILY */
struct
{
ObjectAddress address;
- List *operators;
- List *procedures;
- } opfam;
+ List *operators;
+ List *procedures;
+ } opfam;
/* CREATE OPERATOR CLASS */
struct
{
ObjectAddress address;
- List *operators;
- List *procedures;
- } createopc;
+ List *operators;
+ List *procedures;
+ } createopc;
/* ALTER TEXT SEARCH CONFIGURATION ADD/ALTER/DROP MAPPING */
struct
{
ObjectAddress address;
- Oid *dictIds;
- int ndicts;
- } atscfg;
+ Oid *dictIds;
+ int ndicts;
+ } atscfg;
/* ALTER DEFAULT PRIVILEGES */
struct
{
GrantObjectType objtype;
- } defprivs;
- } d;
+ } defprivs;
+ } d;
} CollectedCommand;
-#endif /* DEPARSE_UTILITY_H */
+#endif /* DEPARSE_UTILITY_H */
diff --git a/src/include/tcop/fastpath.h b/src/include/tcop/fastpath.h
index 47028cb113..dc6905d48c 100644
--- a/src/include/tcop/fastpath.h
+++ b/src/include/tcop/fastpath.h
@@ -15,7 +15,7 @@
#include "lib/stringinfo.h"
-extern int GetOldFunctionMessage(StringInfo buf);
+extern int GetOldFunctionMessage(StringInfo buf);
extern int HandleFunctionRequest(StringInfo msgBuf);
#endif /* FASTPATH_H */
diff --git a/src/include/utils/acl.h b/src/include/utils/acl.h
index d747579e7a..915ea39be2 100644
--- a/src/include/utils/acl.h
+++ b/src/include/utils/acl.h
@@ -230,7 +230,7 @@ extern bool is_admin_of_role(Oid member, Oid role);
extern void check_is_member_of_role(Oid member, Oid role);
extern Oid get_role_oid(const char *rolename, bool missing_ok);
extern Oid get_role_oid_or_public(const char *rolename);
-extern Oid get_rolespec_oid(const Node *node, bool missing_ok);
+extern Oid get_rolespec_oid(const Node *node, bool missing_ok);
extern HeapTuple get_rolespec_tuple(const Node *node);
extern char *get_rolespec_name(const Node *node);
diff --git a/src/include/utils/aclchk_internal.h b/src/include/utils/aclchk_internal.h
index 0855bf1d0d..8378567578 100644
--- a/src/include/utils/aclchk_internal.h
+++ b/src/include/utils/aclchk_internal.h
@@ -42,4 +42,4 @@ typedef struct
} InternalGrant;
-#endif /* ACLCHK_INTERNAL_H */
+#endif /* ACLCHK_INTERNAL_H */
diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h
index 1140c17792..51f25a2814 100644
--- a/src/include/utils/builtins.h
+++ b/src/include/utils/builtins.h
@@ -657,7 +657,7 @@ extern List *stringToQualifiedNameList(const char *string);
extern char *format_procedure(Oid procedure_oid);
extern char *format_procedure_qualified(Oid procedure_oid);
extern void format_procedure_parts(Oid operator_oid, List **objnames,
- List **objargs);
+ List **objargs);
extern char *format_operator(Oid operator_oid);
extern char *format_operator_qualified(Oid operator_oid);
extern void format_operator_parts(Oid operator_oid, List **objnames,
@@ -804,9 +804,9 @@ extern Datum textoverlay_no_len(PG_FUNCTION_ARGS);
extern Datum name_text(PG_FUNCTION_ARGS);
extern Datum text_name(PG_FUNCTION_ARGS);
extern int varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid);
-extern int varstr_levenshtein(const char *source, int slen, const char *target,
+extern int varstr_levenshtein(const char *source, int slen, const char *target,
int tlen, int ins_c, int del_c, int sub_c);
-extern int varstr_levenshtein_less_equal(const char *source, int slen,
+extern int varstr_levenshtein_less_equal(const char *source, int slen,
const char *target, int tlen, int ins_c,
int del_c, int sub_c, int max_d);
extern List *textToQualifiedNameList(text *textval);
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index ff78b70b96..a8191c94c3 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -202,7 +202,8 @@ typedef enum
#define GUC_SUPERUSER_ONLY 0x0100 /* show only to superusers */
#define GUC_IS_NAME 0x0200 /* limit string to NAMEDATALEN-1 */
#define GUC_NOT_WHILE_SEC_REST 0x0400 /* can't set if security restricted */
-#define GUC_DISALLOW_IN_AUTO_FILE 0x0800 /* can't set in PG_AUTOCONF_FILENAME */
+#define GUC_DISALLOW_IN_AUTO_FILE 0x0800 /* can't set in
+ * PG_AUTOCONF_FILENAME */
#define GUC_UNIT_KB 0x1000 /* value is in kilobytes */
#define GUC_UNIT_BLOCKS 0x2000 /* value is in blocks */
diff --git a/src/include/utils/guc_tables.h b/src/include/utils/guc_tables.h
index c0f9cb9374..7a58ddb10b 100644
--- a/src/include/utils/guc_tables.h
+++ b/src/include/utils/guc_tables.h
@@ -167,7 +167,7 @@ struct config_generic
* Caution: the GUC_IS_IN_FILE bit is transient state for ProcessConfigFile.
* Do not assume that its value represents useful information elsewhere.
*/
-#define GUC_PENDING_RESTART 0x0002
+#define GUC_PENDING_RESTART 0x0002
/* GUC records for specific variable types */
diff --git a/src/include/utils/jsonapi.h b/src/include/utils/jsonapi.h
index 1d8293b223..296d20af83 100644
--- a/src/include/utils/jsonapi.h
+++ b/src/include/utils/jsonapi.h
@@ -122,6 +122,6 @@ extern JsonLexContext *makeJsonLexContextCstringLen(char *json,
*
* str agrument does not need to be nul-terminated.
*/
-extern bool IsValidJsonNumber(const char * str, int len);
+extern bool IsValidJsonNumber(const char *str, int len);
#endif /* JSONAPI_H */
diff --git a/src/include/utils/jsonb.h b/src/include/utils/jsonb.h
index b02934a1ae..4d614430ce 100644
--- a/src/include/utils/jsonb.h
+++ b/src/include/utils/jsonb.h
@@ -244,7 +244,7 @@ struct JsonbValue
union
{
Numeric numeric;
- bool boolean;
+ bool boolean;
struct
{
int len;
@@ -401,9 +401,9 @@ extern Datum jsonb_pretty(PG_FUNCTION_ARGS);
extern Datum jsonb_concat(PG_FUNCTION_ARGS);
/* deletion */
-Datum jsonb_delete(PG_FUNCTION_ARGS);
-Datum jsonb_delete_idx(PG_FUNCTION_ARGS);
-Datum jsonb_delete_path(PG_FUNCTION_ARGS);
+Datum jsonb_delete(PG_FUNCTION_ARGS);
+Datum jsonb_delete_idx(PG_FUNCTION_ARGS);
+Datum jsonb_delete_path(PG_FUNCTION_ARGS);
/* replacement */
extern Datum jsonb_replace(PG_FUNCTION_ARGS);
@@ -431,7 +431,7 @@ extern void JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash);
extern char *JsonbToCString(StringInfo out, JsonbContainer *in,
int estimated_len);
extern char *JsonbToCStringIndent(StringInfo out, JsonbContainer *in,
- int estimated_len);
+ int estimated_len);
#endif /* __JSONB_H__ */
diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h
index e2e5734ea7..a40c9b1273 100644
--- a/src/include/utils/lsyscache.h
+++ b/src/include/utils/lsyscache.h
@@ -102,8 +102,8 @@ extern Oid get_rel_namespace(Oid relid);
extern Oid get_rel_type_id(Oid relid);
extern char get_rel_relkind(Oid relid);
extern Oid get_rel_tablespace(Oid relid);
-extern Oid get_transform_fromsql(Oid typid, Oid langid, List *trftypes);
-extern Oid get_transform_tosql(Oid typid, Oid langid, List *trftypes);
+extern Oid get_transform_fromsql(Oid typid, Oid langid, List *trftypes);
+extern Oid get_transform_tosql(Oid typid, Oid langid, List *trftypes);
extern bool get_typisdefined(Oid typid);
extern int16 get_typlen(Oid typid);
extern bool get_typbyval(Oid typid);
diff --git a/src/include/utils/palloc.h b/src/include/utils/palloc.h
index 9861f0dac7..e56f5014a3 100644
--- a/src/include/utils/palloc.h
+++ b/src/include/utils/palloc.h
@@ -72,7 +72,7 @@ extern void *MemoryContextAlloc(MemoryContext context, Size size);
extern void *MemoryContextAllocZero(MemoryContext context, Size size);
extern void *MemoryContextAllocZeroAligned(MemoryContext context, Size size);
extern void *MemoryContextAllocExtended(MemoryContext context,
- Size size, int flags);
+ Size size, int flags);
extern void *palloc(Size size);
extern void *palloc0(Size size);
diff --git a/src/include/utils/pg_crc.h b/src/include/utils/pg_crc.h
index b4efe157f1..37bb0e933b 100644
--- a/src/include/utils/pg_crc.h
+++ b/src/include/utils/pg_crc.h
@@ -93,7 +93,7 @@ do { \
\
while (__len-- > 0) \
{ \
- int __tab_index = ((int) ((crc) >> 24) ^ *__data++) & 0xFF; \
+ int __tab_index = ((int) ((crc) >> 24) ^ *__data++) & 0xFF; \
(crc) = table[__tab_index] ^ ((crc) << 8); \
} \
} while (0)
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index ef206c47ea..90a018082f 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -109,7 +109,7 @@ typedef struct CachedPlanSource
double generic_cost; /* cost of generic plan, or -1 if not known */
double total_custom_cost; /* total cost of custom plans so far */
int num_custom_plans; /* number of plans included in total */
- bool hasRowSecurity; /* planned with row security? */
+ bool hasRowSecurity; /* planned with row security? */
int row_security_env; /* row security setting when planned */
bool rowSecurityDisabled; /* is row security disabled? */
} CachedPlanSource;
diff --git a/src/include/utils/rls.h b/src/include/utils/rls.h
index 867faa05ff..3770ddc216 100644
--- a/src/include/utils/rls.h
+++ b/src/include/utils/rls.h
@@ -14,15 +14,15 @@
#define RLS_H
/* GUC variable */
-extern int row_security;
+extern int row_security;
/* Possible values for row_security GUC */
typedef enum RowSecurityConfigType
{
- ROW_SECURITY_OFF, /* RLS never applied- error thrown if no priv */
- ROW_SECURITY_ON, /* normal case, RLS applied for regular users */
- ROW_SECURITY_FORCE /* RLS applied for superusers and table owners */
-} RowSecurityConfigType;
+ ROW_SECURITY_OFF, /* RLS never applied- error thrown if no priv */
+ ROW_SECURITY_ON, /* normal case, RLS applied for regular users */
+ ROW_SECURITY_FORCE /* RLS applied for superusers and table owners */
+} RowSecurityConfigType;
/*
* Used by callers of check_enable_rls.
@@ -48,11 +48,11 @@ typedef enum RowSecurityConfigType
*/
enum CheckEnableRlsResult
{
- RLS_NONE,
- RLS_NONE_ENV,
- RLS_ENABLED
+ RLS_NONE,
+ RLS_NONE_ENV,
+ RLS_ENABLED
};
-extern int check_enable_rls(Oid relid, Oid checkAsUser, bool noError);
+extern int check_enable_rls(Oid relid, Oid checkAsUser, bool noError);
#endif /* RLS_H */
diff --git a/src/include/utils/ruleutils.h b/src/include/utils/ruleutils.h
index fed9c7b6ff..3494b13b0f 100644
--- a/src/include/utils/ruleutils.h
+++ b/src/include/utils/ruleutils.h
@@ -32,4 +32,4 @@ extern List *select_rtable_names_for_explain(List *rtable,
Bitmapset *rels_used);
extern char *generate_collation_name(Oid collid);
-#endif /* RULEUTILS_H */
+#endif /* RULEUTILS_H */
diff --git a/src/include/utils/sampling.h b/src/include/utils/sampling.h
index 476bb00234..1653ed0aa4 100644
--- a/src/include/utils/sampling.h
+++ b/src/include/utils/sampling.h
@@ -20,7 +20,7 @@
typedef unsigned short SamplerRandomState[3];
extern void sampler_random_init_state(long seed,
- SamplerRandomState randstate);
+ SamplerRandomState randstate);
extern double sampler_random_fract(SamplerRandomState randstate);
/* Block sampling methods */
@@ -32,7 +32,7 @@ typedef struct
int n; /* desired sample size */
BlockNumber t; /* current block number */
int m; /* blocks selected so far */
- SamplerRandomState randstate; /* random generator state */
+ SamplerRandomState randstate; /* random generator state */
} BlockSamplerData;
typedef BlockSamplerData *BlockSampler;
@@ -46,8 +46,8 @@ extern BlockNumber BlockSampler_Next(BlockSampler bs);
typedef struct
{
- double W;
- SamplerRandomState randstate; /* random generator state */
+ double W;
+ SamplerRandomState randstate; /* random generator state */
} ReservoirStateData;
typedef ReservoirStateData *ReservoirState;
@@ -62,4 +62,4 @@ extern double anl_random_fract(void);
extern double anl_init_selection_state(int n);
extern double anl_get_next_S(double t, int n, double *stateptr);
-#endif /* SAMPLING_H */
+#endif /* SAMPLING_H */
diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h
index fdca7130bb..b3d8017b8b 100644
--- a/src/include/utils/selfuncs.h
+++ b/src/include/utils/selfuncs.h
@@ -185,7 +185,7 @@ extern void mergejoinscansel(PlannerInfo *root, Node *clause,
Selectivity *rightstart, Selectivity *rightend);
extern double estimate_num_groups(PlannerInfo *root, List *groupExprs,
- double input_rows, List **pgset);
+ double input_rows, List **pgset);
extern Selectivity estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey,
double nbuckets);
diff --git a/src/include/utils/snapshot.h b/src/include/utils/snapshot.h
index a734bf0075..cbf1bbdeb1 100644
--- a/src/include/utils/snapshot.h
+++ b/src/include/utils/snapshot.h
@@ -118,7 +118,7 @@ typedef enum
HeapTupleSelfUpdated,
HeapTupleUpdated,
HeapTupleBeingUpdated,
- HeapTupleWouldBlock /* can be returned by heap_tuple_lock */
+ HeapTupleWouldBlock /* can be returned by heap_tuple_lock */
} HTSU_Result;
#endif /* SNAPSHOT_H */
diff --git a/src/include/utils/sortsupport.h b/src/include/utils/sortsupport.h
index 44c596f507..787404ed90 100644
--- a/src/include/utils/sortsupport.h
+++ b/src/include/utils/sortsupport.h
@@ -100,18 +100,18 @@ typedef struct SortSupportData
* INT_MIN, as callers are allowed to negate the result before using it.
*
* This may be either the authoritative comparator, or the abbreviated
- * comparator. Core code may switch this over the initial preference of an
- * opclass support function despite originally indicating abbreviation was
- * applicable, by assigning the authoritative comparator back.
+ * comparator. Core code may switch this over the initial preference of
+ * an opclass support function despite originally indicating abbreviation
+ * was applicable, by assigning the authoritative comparator back.
*/
int (*comparator) (Datum x, Datum y, SortSupport ssup);
/*
* "Abbreviated key" infrastructure follows.
*
- * All callbacks must be set by sortsupport opclasses that make use of this
- * optional additional infrastructure (unless for whatever reasons the
- * opclass doesn't proceed with abbreviation, in which case
+ * All callbacks must be set by sortsupport opclasses that make use of
+ * this optional additional infrastructure (unless for whatever reasons
+ * the opclass doesn't proceed with abbreviation, in which case
* abbrev_converter must not be set).
*
* This allows opclass authors to supply a conversion routine, used to
@@ -120,20 +120,20 @@ typedef struct SortSupportData
* pass-by-value Datum format that only the opclass has knowledge of. An
* alternative comparator, used only with this alternative representation
* must also be provided (which is assigned to "comparator"). This
- * representation is a simple approximation of the original Datum. It must
- * be possible to compare datums of this representation with each other
- * using the supplied alternative comparator, and have any non-zero return
- * value be a reliable proxy for what a proper comparison would indicate.
- * Returning zero from the alternative comparator does not indicate
- * equality, as with a conventional support routine 1, though -- it
- * indicates that it wasn't possible to determine how the two abbreviated
- * values compared. A proper comparison, using "abbrev_full_comparator"/
- * ApplySortAbbrevFullComparator() is therefore required. In many cases
- * this results in most or all comparisons only using the cheap alternative
- * comparison func, which is typically implemented as code that compiles to
- * just a few CPU instructions. CPU cache miss penalties are expensive; to
- * get good overall performance, sort infrastructure must heavily weigh
- * cache performance.
+ * representation is a simple approximation of the original Datum. It
+ * must be possible to compare datums of this representation with each
+ * other using the supplied alternative comparator, and have any non-zero
+ * return value be a reliable proxy for what a proper comparison would
+ * indicate. Returning zero from the alternative comparator does not
+ * indicate equality, as with a conventional support routine 1, though --
+ * it indicates that it wasn't possible to determine how the two
+ * abbreviated values compared. A proper comparison, using
+ * "abbrev_full_comparator"/ ApplySortAbbrevFullComparator() is therefore
+ * required. In many cases this results in most or all comparisons only
+ * using the cheap alternative comparison func, which is typically
+ * implemented as code that compiles to just a few CPU instructions. CPU
+ * cache miss penalties are expensive; to get good overall performance,
+ * sort infrastructure must heavily weigh cache performance.
*
* Opclass authors must consider the final cardinality of abbreviated keys
* when devising an encoding scheme. It's possible for a strategy to work
@@ -143,16 +143,16 @@ typedef struct SortSupportData
*/
/*
- * "abbreviate" concerns whether or not the abbreviated key optimization is
- * applicable in principle (that is, the sortsupport routine needs to know
- * if its dealing with a key where an abbreviated representation can
+ * "abbreviate" concerns whether or not the abbreviated key optimization
+ * is applicable in principle (that is, the sortsupport routine needs to
+ * know if its dealing with a key where an abbreviated representation can
* usefully be packed together. Conventionally, this is the leading
* attribute key). Note, however, that in order to determine that
* abbreviation is not in play, the core code always checks whether or not
* the opclass has set abbrev_converter. This is a one way, one time
* message to the opclass.
*/
- bool abbreviate;
+ bool abbreviate;
/*
* Converter to abbreviated format, from original representation. Core
@@ -161,24 +161,25 @@ typedef struct SortSupportData
* guaranteed NOT NULL, because it doesn't make sense to factor NULLness
* into ad-hoc cost model.
*
- * abbrev_converter is tested to see if abbreviation is in play. Core code
- * may set it to NULL to indicate abbreviation should not be used (which is
- * something sortsupport routines need not concern themselves with).
- * However, sortsupport routines must not set it when it is immediately
- * established that abbreviation should not proceed (e.g., for !abbreviate
- * calls, or due to platform-specific impediments to using abbreviation).
+ * abbrev_converter is tested to see if abbreviation is in play. Core
+ * code may set it to NULL to indicate abbreviation should not be used
+ * (which is something sortsupport routines need not concern themselves
+ * with). However, sortsupport routines must not set it when it is
+ * immediately established that abbreviation should not proceed (e.g., for
+ * !abbreviate calls, or due to platform-specific impediments to using
+ * abbreviation).
*/
- Datum (*abbrev_converter) (Datum original, SortSupport ssup);
+ Datum (*abbrev_converter) (Datum original, SortSupport ssup);
/*
- * abbrev_abort callback allows clients to verify that the current strategy
- * is working out, using a sortsupport routine defined ad-hoc cost model.
- * If there is a lot of duplicate abbreviated keys in practice, it's useful
- * to be able to abandon the strategy before paying too high a cost in
- * conversion (perhaps certain opclass-specific adaptations are useful
- * too).
+ * abbrev_abort callback allows clients to verify that the current
+ * strategy is working out, using a sortsupport routine defined ad-hoc
+ * cost model. If there is a lot of duplicate abbreviated keys in
+ * practice, it's useful to be able to abandon the strategy before paying
+ * too high a cost in conversion (perhaps certain opclass-specific
+ * adaptations are useful too).
*/
- bool (*abbrev_abort) (int memtupcount, SortSupport ssup);
+ bool (*abbrev_abort) (int memtupcount, SortSupport ssup);
/*
* Full, authoritative comparator for key that an abbreviated
@@ -200,8 +201,8 @@ extern int ApplySortComparator(Datum datum1, bool isNull1,
Datum datum2, bool isNull2,
SortSupport ssup);
extern int ApplySortAbbrevFullComparator(Datum datum1, bool isNull1,
- Datum datum2, bool isNull2,
- SortSupport ssup);
+ Datum datum2, bool isNull2,
+ SortSupport ssup);
#endif /* !PG_USE_INLINE */
#if defined(PG_USE_INLINE) || defined(SORTSUPPORT_INCLUDE_DEFINITIONS)
/*
@@ -284,6 +285,6 @@ ApplySortAbbrevFullComparator(Datum datum1, bool isNull1,
extern void PrepareSortSupportComparisonShim(Oid cmpFunc, SortSupport ssup);
extern void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup);
extern void PrepareSortSupportFromIndexRel(Relation indexRel, int16 strategy,
- SortSupport ssup);
+ SortSupport ssup);
#endif /* SORTSUPPORT_H */
diff --git a/src/interfaces/ecpg/ecpglib/data.c b/src/interfaces/ecpg/ecpglib/data.c
index 2dcb9153da..c3cd94682d 100644
--- a/src/interfaces/ecpg/ecpglib/data.c
+++ b/src/interfaces/ecpg/ecpglib/data.c
@@ -291,7 +291,8 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
date ddres;
timestamp tres;
interval *ires;
- char *endptr, endchar;
+ char *endptr,
+ endchar;
case ECPGt_short:
case ECPGt_int:
diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c
index 22ce55b60a..bcb38d25f8 100644
--- a/src/interfaces/ecpg/ecpglib/execute.c
+++ b/src/interfaces/ecpg/ecpglib/execute.c
@@ -499,9 +499,9 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
char *newcopy = NULL;
/*
- * arrays are not possible unless the column is an array, too
- * FIXME: we do not know if the column is an array here
- * array input to singleton column will result in a runtime error
+ * arrays are not possible unless the column is an array, too FIXME: we do
+ * not know if the column is an array here array input to singleton column
+ * will result in a runtime error
*/
/*
@@ -852,7 +852,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
mallocedval = ecpg_strdup("", lineno);
if (!mallocedval)
- return false;
+ return false;
for (element = 0; element < asize; element++)
{
@@ -915,7 +915,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
mallocedval = ecpg_strdup("", lineno);
if (!mallocedval)
- return false;
+ return false;
for (element = 0; element < asize; element++)
{
@@ -962,7 +962,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
mallocedval = ecpg_strdup("", lineno);
if (!mallocedval)
- return false;
+ return false;
for (element = 0; element < asize; element++)
{
@@ -1009,7 +1009,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
mallocedval = ecpg_strdup("", lineno);
if (!mallocedval)
- return false;
+ return false;
for (element = 0; element < asize; element++)
{
diff --git a/src/interfaces/ecpg/ecpglib/memory.c b/src/interfaces/ecpg/ecpglib/memory.c
index dffc3a7618..9c1d20efc5 100644
--- a/src/interfaces/ecpg/ecpglib/memory.c
+++ b/src/interfaces/ecpg/ecpglib/memory.c
@@ -107,7 +107,7 @@ static struct auto_mem *auto_allocs = NULL;
char *
ecpg_auto_alloc(long size, int lineno)
{
- void *ptr = (void *) ecpg_alloc(size, lineno);
+ void *ptr = (void *) ecpg_alloc(size, lineno);
if (!ptr)
return NULL;
diff --git a/src/interfaces/ecpg/preproc/parse.pl b/src/interfaces/ecpg/preproc/parse.pl
index 588bb63e53..74557425a9 100644
--- a/src/interfaces/ecpg/preproc/parse.pl
+++ b/src/interfaces/ecpg/preproc/parse.pl
@@ -42,17 +42,16 @@ my %replace_token = (
# or in the block
my %replace_string = (
- 'NOT_LA' => 'not',
- 'NULLS_LA' => 'nulls',
- 'WITH_LA' => 'with',
- 'TYPECAST' => '::',
- 'DOT_DOT' => '..',
- 'COLON_EQUALS' => ':=',
- 'EQUALS_GREATER' => '=>',
- 'LESS_EQUALS' => '<=',
- 'GREATER_EQUALS' => '>=',
- 'NOT_EQUALS' => '<>',
-);
+ 'NOT_LA' => 'not',
+ 'NULLS_LA' => 'nulls',
+ 'WITH_LA' => 'with',
+ 'TYPECAST' => '::',
+ 'DOT_DOT' => '..',
+ 'COLON_EQUALS' => ':=',
+ 'EQUALS_GREATER' => '=>',
+ 'LESS_EQUALS' => '<=',
+ 'GREATER_EQUALS' => '>=',
+ 'NOT_EQUALS' => '<>',);
# specific replace_types for specific non-terminals - never include the ':'
# ECPG-only replace_types are defined in ecpg-replace_types
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index e7c7a256e6..a45f4cba34 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -2011,7 +2011,7 @@ keep_going: /* We will come back to here until there is
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not look up local user ID %d: %s\n"),
(int) uid,
- pqStrerror(passerr, sebuf, sizeof(sebuf)));
+ pqStrerror(passerr, sebuf, sizeof(sebuf)));
else
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("local user with ID %d does not exist\n"),
@@ -3845,7 +3845,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
if (!options[i].val)
{
printfPQExpBuffer(errorMessage,
- libpq_gettext("out of memory\n"));
+ libpq_gettext("out of memory\n"));
free(result);
return 3;
}
@@ -4085,7 +4085,7 @@ parseServiceFile(const char *serviceFile,
if (!options[i].val)
{
printfPQExpBuffer(errorMessage,
- libpq_gettext("out of memory\n"));
+ libpq_gettext("out of memory\n"));
fclose(f);
return 3;
}
@@ -4516,7 +4516,7 @@ conninfo_array_parse(const char *const * keywords, const char *const * values,
if (!options[k].val)
{
printfPQExpBuffer(errorMessage,
- libpq_gettext("out of memory\n"));
+ libpq_gettext("out of memory\n"));
PQconninfoFree(options);
PQconninfoFree(dbname_options);
return NULL;
@@ -4526,6 +4526,7 @@ conninfo_array_parse(const char *const * keywords, const char *const * values,
}
}
}
+
/*
* Forget the parsed connection string, so that any subsequent
* dbname parameters will not be expanded.
@@ -5018,7 +5019,7 @@ conninfo_uri_parse_params(char *params,
/* Insert generic message if conninfo_storeval didn't give one. */
if (errorMessage->len == 0)
printfPQExpBuffer(errorMessage,
- libpq_gettext("invalid URI query parameter: \"%s\"\n"),
+ libpq_gettext("invalid URI query parameter: \"%s\"\n"),
keyword);
/* And fail. */
if (malloced)
diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c
index 25aecc2f14..0dbcf73222 100644
--- a/src/interfaces/libpq/fe-misc.c
+++ b/src/interfaces/libpq/fe-misc.c
@@ -744,10 +744,10 @@ retry3:
* the file selected for reading already.
*
* In SSL mode it's even worse: SSL_read() could say WANT_READ and then
- * data could arrive before we make the pqReadReady() test, but the
- * second SSL_read() could still say WANT_READ because the data received
- * was not a complete SSL record. So we must play dumb and assume there
- * is more data, relying on the SSL layer to detect true EOF.
+ * data could arrive before we make the pqReadReady() test, but the second
+ * SSL_read() could still say WANT_READ because the data received was not
+ * a complete SSL record. So we must play dumb and assume there is more
+ * data, relying on the SSL layer to detect true EOF.
*/
#ifdef USE_SSL
@@ -916,9 +916,9 @@ pqSendSome(PGconn *conn, int len)
* might not arrive until after we've gone to sleep. Therefore,
* we wait for either read ready or write ready.
*
- * In non-blocking mode, we don't wait here directly, but return
- * 1 to indicate that data is still pending. The caller should
- * wait for both read and write ready conditions, and call
+ * In non-blocking mode, we don't wait here directly, but return 1
+ * to indicate that data is still pending. The caller should wait
+ * for both read and write ready conditions, and call
* PQconsumeInput() on read ready, but just in case it doesn't, we
* call pqReadData() ourselves before returning. That's not
* enough if the data has not arrived yet, but it's the best we
diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c
index 0cc5e8d33d..d4069b9e0b 100644
--- a/src/interfaces/libpq/fe-secure-openssl.c
+++ b/src/interfaces/libpq/fe-secure-openssl.c
@@ -64,19 +64,19 @@
static bool verify_peer_name_matches_certificate(PGconn *);
static int verify_cb(int ok, X509_STORE_CTX *ctx);
-static int verify_peer_name_matches_certificate_name(PGconn *conn,
- ASN1_STRING *name,
- char **store_name);
+static int verify_peer_name_matches_certificate_name(PGconn *conn,
+ ASN1_STRING *name,
+ char **store_name);
static void destroy_ssl_system(void);
static int initialize_SSL(PGconn *conn);
static PostgresPollingStatusType open_client_SSL(PGconn *);
static char *SSLerrmessage(void);
static void SSLerrfree(char *buf);
-static int my_sock_read(BIO *h, char *buf, int size);
-static int my_sock_write(BIO *h, const char *buf, int size);
+static int my_sock_read(BIO *h, char *buf, int size);
+static int my_sock_write(BIO *h, const char *buf, int size);
static BIO_METHOD *my_BIO_s_socket(void);
-static int my_SSL_set_fd(PGconn *conn, int fd);
+static int my_SSL_set_fd(PGconn *conn, int fd);
static bool pq_init_ssl_lib = true;
@@ -187,7 +187,7 @@ pgtls_open_client(PGconn *conn)
}
/*
- * Is there unread data waiting in the SSL read buffer?
+ * Is there unread data waiting in the SSL read buffer?
*/
bool
pgtls_read_pending(PGconn *conn)
@@ -221,7 +221,7 @@ rloop:
{
/* Not supposed to happen, so we don't translate the msg */
printfPQExpBuffer(&conn->errorMessage,
- "SSL_read failed but did not provide error information\n");
+ "SSL_read failed but did not provide error information\n");
/* assume the connection is broken */
result_errno = ECONNRESET;
}
@@ -247,7 +247,7 @@ rloop:
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
"server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
+ "\tThis probably means the server terminated abnormally\n"
"\tbefore or while processing the request.\n"));
else
printfPQExpBuffer(&conn->errorMessage,
@@ -279,12 +279,12 @@ rloop:
case SSL_ERROR_ZERO_RETURN:
/*
- * Per OpenSSL documentation, this error code is only returned
- * for a clean connection closure, so we should not report it
- * as a server crash.
+ * Per OpenSSL documentation, this error code is only returned for
+ * a clean connection closure, so we should not report it as a
+ * server crash.
*/
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL connection has been closed unexpectedly\n"));
+ libpq_gettext("SSL connection has been closed unexpectedly\n"));
result_errno = ECONNRESET;
n = -1;
break;
@@ -329,7 +329,7 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len)
{
/* Not supposed to happen, so we don't translate the msg */
printfPQExpBuffer(&conn->errorMessage,
- "SSL_write failed but did not provide error information\n");
+ "SSL_write failed but did not provide error information\n");
/* assume the connection is broken */
result_errno = ECONNRESET;
}
@@ -337,9 +337,8 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len)
case SSL_ERROR_WANT_READ:
/*
- * Returning 0 here causes caller to wait for write-ready,
- * which is not really the right thing, but it's the best we
- * can do.
+ * Returning 0 here causes caller to wait for write-ready, which
+ * is not really the right thing, but it's the best we can do.
*/
n = 0;
break;
@@ -354,7 +353,7 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len)
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
"server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
+ "\tThis probably means the server terminated abnormally\n"
"\tbefore or while processing the request.\n"));
else
printfPQExpBuffer(&conn->errorMessage,
@@ -386,12 +385,12 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len)
case SSL_ERROR_ZERO_RETURN:
/*
- * Per OpenSSL documentation, this error code is only returned
- * for a clean connection closure, so we should not report it
- * as a server crash.
+ * Per OpenSSL documentation, this error code is only returned for
+ * a clean connection closure, so we should not report it as a
+ * server crash.
*/
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL connection has been closed unexpectedly\n"));
+ libpq_gettext("SSL connection has been closed unexpectedly\n"));
result_errno = ECONNRESET;
n = -1;
break;
@@ -509,7 +508,7 @@ verify_peer_name_matches_certificate_name(PGconn *conn, ASN1_STRING *name_entry,
if (name_entry == NULL)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL certificate's name entry is missing\n"));
+ libpq_gettext("SSL certificate's name entry is missing\n"));
return -1;
}
@@ -539,7 +538,7 @@ verify_peer_name_matches_certificate_name(PGconn *conn, ASN1_STRING *name_entry,
{
free(name);
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL certificate's name contains embedded null\n"));
+ libpq_gettext("SSL certificate's name contains embedded null\n"));
return -1;
}
@@ -574,8 +573,9 @@ verify_peer_name_matches_certificate(PGconn *conn)
bool found_match = false;
bool got_error = false;
char *first_name = NULL;
+
STACK_OF(GENERAL_NAME) *peer_san;
- int i;
+ int i;
int rc;
/*
@@ -614,7 +614,7 @@ verify_peer_name_matches_certificate(PGconn *conn)
names_examined++;
rc = verify_peer_name_matches_certificate_name(conn,
- name->d.dNSName,
+ name->d.dNSName,
&alt_name);
if (rc == -1)
got_error = true;
@@ -634,6 +634,7 @@ verify_peer_name_matches_certificate(PGconn *conn)
}
sk_GENERAL_NAME_free(peer_san);
}
+
/*
* If there is no subjectAltName extension of type dNSName, check the
* Common Name.
@@ -656,10 +657,10 @@ verify_peer_name_matches_certificate(PGconn *conn)
{
names_examined++;
rc = verify_peer_name_matches_certificate_name(
- conn,
- X509_NAME_ENTRY_get_data(
- X509_NAME_get_entry(subject_name, cn_index)),
- &first_name);
+ conn,
+ X509_NAME_ENTRY_get_data(
+ X509_NAME_get_entry(subject_name, cn_index)),
+ &first_name);
if (rc == -1)
got_error = true;
@@ -672,10 +673,10 @@ verify_peer_name_matches_certificate(PGconn *conn)
if (!found_match && !got_error)
{
/*
- * No match. Include the name from the server certificate in the
- * error message, to aid debugging broken configurations. If there
- * are multiple names, only print the first one to avoid an overly
- * long error message.
+ * No match. Include the name from the server certificate in the error
+ * message, to aid debugging broken configurations. If there are
+ * multiple names, only print the first one to avoid an overly long
+ * error message.
*/
if (names_examined > 1)
{
@@ -806,8 +807,10 @@ pgtls_init(PGconn *conn)
if (ssl_open_connections++ == 0)
{
- /* These are only required for threaded libcrypto applications, but
- * make sure we don't stomp on them if they're already set. */
+ /*
+ * These are only required for threaded libcrypto applications,
+ * but make sure we don't stomp on them if they're already set.
+ */
if (CRYPTO_get_id_callback() == NULL)
CRYPTO_set_id_callback(pq_threadidcallback);
if (CRYPTO_get_locking_callback() == NULL)
@@ -888,8 +891,10 @@ destroy_ssl_system(void)
if (pq_init_crypto_lib && ssl_open_connections == 0)
{
- /* No connections left, unregister libcrypto callbacks, if no one
- * registered different ones in the meantime. */
+ /*
+ * No connections left, unregister libcrypto callbacks, if no one
+ * registered different ones in the meantime.
+ */
if (CRYPTO_get_locking_callback() == pq_lockingcallback)
CRYPTO_set_locking_callback(NULL);
if (CRYPTO_get_id_callback() == pq_threadidcallback)
@@ -1538,6 +1543,7 @@ PQsslAttributes(PGconn *conn)
"protocol",
NULL
};
+
return result;
}
@@ -1555,7 +1561,7 @@ PQsslAttribute(PGconn *conn, const char *attribute_name)
if (strcmp(attribute_name, "key_bits") == 0)
{
static char sslbits_str[10];
- int sslbits;
+ int sslbits;
SSL_get_cipher_bits(conn->ssl, &sslbits);
snprintf(sslbits_str, sizeof(sslbits_str), "%d", sslbits);
@@ -1571,7 +1577,7 @@ PQsslAttribute(PGconn *conn, const char *attribute_name)
if (strcmp(attribute_name, "protocol") == 0)
return SSL_get_version(conn->ssl);
- return NULL; /* unknown attribute */
+ return NULL; /* unknown attribute */
}
/*
diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c
index 57c572bd09..db91e52ee9 100644
--- a/src/interfaces/libpq/fe-secure.c
+++ b/src/interfaces/libpq/fe-secure.c
@@ -251,14 +251,14 @@ pqsecure_raw_read(PGconn *conn, void *ptr, size_t len)
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
"server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
+ "\tThis probably means the server terminated abnormally\n"
"\tbefore or while processing the request.\n"));
break;
#endif
default:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not receive data from server: %s\n"),
+ libpq_gettext("could not receive data from server: %s\n"),
SOCK_STRERROR(result_errno,
sebuf, sizeof(sebuf)));
break;
@@ -323,9 +323,9 @@ retry_masked:
result_errno = SOCK_ERRNO;
/*
- * If we see an EINVAL, it may be because MSG_NOSIGNAL isn't
- * available on this machine. So, clear sigpipe_flag so we don't
- * try the flag again, and retry the send().
+ * If we see an EINVAL, it may be because MSG_NOSIGNAL isn't available
+ * on this machine. So, clear sigpipe_flag so we don't try the flag
+ * again, and retry the send().
*/
#ifdef MSG_NOSIGNAL
if (flags != 0 && result_errno == EINVAL)
@@ -360,15 +360,15 @@ retry_masked:
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
"server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
+ "\tThis probably means the server terminated abnormally\n"
"\tbefore or while processing the request.\n"));
break;
default:
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not send data to server: %s\n"),
- SOCK_STRERROR(result_errno,
- sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(result_errno,
+ sebuf, sizeof(sebuf)));
break;
}
}
@@ -411,7 +411,7 @@ PQsslAttribute(PGconn *conn, const char *attribute_name)
const char **
PQsslAttributes(PGconn *conn)
{
- static const char *result[] = { NULL };
+ static const char *result[] = {NULL};
return result;
}
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index 840df2ee0b..78baaac05d 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -2690,7 +2690,7 @@ compile_plperl_function(Oid fn_oid, bool is_trigger, bool is_event_trigger)
MemoryContext oldcxt;
protrftypes_datum = SysCacheGetAttr(PROCOID, procTup,
- Anum_pg_proc_protrftypes, &isnull);
+ Anum_pg_proc_protrftypes, &isnull);
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
prodesc->trftypes = isnull ? NIL : oid_array_to_list(protrftypes_datum);
MemoryContextSwitchTo(oldcxt);
diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c
index d5d44f0684..16ff84560b 100644
--- a/src/pl/plpython/plpy_procedure.c
+++ b/src/pl/plpython/plpy_procedure.c
@@ -172,8 +172,9 @@ PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger)
{
MemoryContext oldcxt;
- Datum protrftypes_datum = SysCacheGetAttr(PROCOID, procTup,
- Anum_pg_proc_protrftypes, &isnull);
+ Datum protrftypes_datum = SysCacheGetAttr(PROCOID, procTup,
+ Anum_pg_proc_protrftypes, &isnull);
+
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
proc->trftypes = isnull ? NIL : oid_array_to_list(protrftypes_datum);
MemoryContextSwitchTo(oldcxt);
diff --git a/src/pl/plpython/plpy_typeio.c b/src/pl/plpython/plpy_typeio.c
index 7b65a93183..7a45b22871 100644
--- a/src/pl/plpython/plpy_typeio.c
+++ b/src/pl/plpython/plpy_typeio.c
@@ -401,18 +401,18 @@ PLy_output_datum_func2(PLyObToDatum *arg, HeapTuple typeTup, Oid langid, List *t
arg->func = PLyObject_ToComposite;
}
else
- switch (base_type)
- {
- case BOOLOID:
- arg->func = PLyObject_ToBool;
- break;
- case BYTEAOID:
- arg->func = PLyObject_ToBytea;
- break;
- default:
- arg->func = PLyObject_ToDatum;
- break;
- }
+ switch (base_type)
+ {
+ case BOOLOID:
+ arg->func = PLyObject_ToBool;
+ break;
+ case BYTEAOID:
+ arg->func = PLyObject_ToBytea;
+ break;
+ default:
+ arg->func = PLyObject_ToDatum;
+ break;
+ }
if (element_type)
{
@@ -464,39 +464,39 @@ PLy_input_datum_func2(PLyDatumToOb *arg, Oid typeOid, HeapTuple typeTup, Oid lan
perm_fmgr_info(funcid, &arg->typtransform);
}
else
- switch (base_type)
- {
- case BOOLOID:
- arg->func = PLyBool_FromBool;
- break;
- case FLOAT4OID:
- arg->func = PLyFloat_FromFloat4;
- break;
- case FLOAT8OID:
- arg->func = PLyFloat_FromFloat8;
- break;
- case NUMERICOID:
- arg->func = PLyDecimal_FromNumeric;
- break;
- case INT2OID:
- arg->func = PLyInt_FromInt16;
- break;
- case INT4OID:
- arg->func = PLyInt_FromInt32;
- break;
- case INT8OID:
- arg->func = PLyLong_FromInt64;
- break;
- case OIDOID:
- arg->func = PLyLong_FromOid;
- break;
- case BYTEAOID:
- arg->func = PLyBytes_FromBytea;
- break;
- default:
- arg->func = PLyString_FromDatum;
- break;
- }
+ switch (base_type)
+ {
+ case BOOLOID:
+ arg->func = PLyBool_FromBool;
+ break;
+ case FLOAT4OID:
+ arg->func = PLyFloat_FromFloat4;
+ break;
+ case FLOAT8OID:
+ arg->func = PLyFloat_FromFloat8;
+ break;
+ case NUMERICOID:
+ arg->func = PLyDecimal_FromNumeric;
+ break;
+ case INT2OID:
+ arg->func = PLyInt_FromInt16;
+ break;
+ case INT4OID:
+ arg->func = PLyInt_FromInt32;
+ break;
+ case INT8OID:
+ arg->func = PLyLong_FromInt64;
+ break;
+ case OIDOID:
+ arg->func = PLyLong_FromOid;
+ break;
+ case BYTEAOID:
+ arg->func = PLyBytes_FromBytea;
+ break;
+ default:
+ arg->func = PLyString_FromDatum;
+ break;
+ }
if (element_type)
{
diff --git a/src/port/gettimeofday.c b/src/port/gettimeofday.c
index 3c60238518..af1157134b 100644
--- a/src/port/gettimeofday.c
+++ b/src/port/gettimeofday.c
@@ -38,14 +38,14 @@ static const unsigned __int64 epoch = UINT64CONST(116444736000000000);
* January 1, 1601 (UTC).
*/
#define FILETIME_UNITS_PER_SEC 10000000L
-#define FILETIME_UNITS_PER_USEC 10
+#define FILETIME_UNITS_PER_USEC 10
/*
* Both GetSystemTimeAsFileTime and GetSystemTimePreciseAsFileTime share a
* signature, so we can just store a pointer to whichever we find. This
* is the pointer's type.
*/
-typedef VOID (WINAPI *PgGetSystemTimeFn)(LPFILETIME);
+typedef VOID(WINAPI * PgGetSystemTimeFn) (LPFILETIME);
/* One-time initializer function, must match that signature. */
static void WINAPI init_gettimeofday(LPFILETIME lpSystemTimeAsFileTime);
@@ -71,12 +71,12 @@ init_gettimeofday(LPFILETIME lpSystemTimeAsFileTime)
*
* While we could look up the Windows version and skip this on Windows
* versions below Windows 8 / Windows Server 2012 there isn't much point,
- * and determining the windows version is its self somewhat Windows version
- * and development SDK specific...
+ * and determining the windows version is its self somewhat Windows
+ * version and development SDK specific...
*/
pg_get_system_time = (PgGetSystemTimeFn) GetProcAddress(
- GetModuleHandle(TEXT("kernel32.dll")),
- "GetSystemTimePreciseAsFileTime");
+ GetModuleHandle(TEXT("kernel32.dll")),
+ "GetSystemTimePreciseAsFileTime");
if (pg_get_system_time == NULL)
{
/*
@@ -84,15 +84,15 @@ init_gettimeofday(LPFILETIME lpSystemTimeAsFileTime)
* the function isn't present. No other error should occur.
*
* We can't report an error here because this might be running in
- * frontend code; and even if we're in the backend, it's too early
- * to elog(...) if we get some unexpected error. Also, it's not a
+ * frontend code; and even if we're in the backend, it's too early to
+ * elog(...) if we get some unexpected error. Also, it's not a
* serious problem, so just silently fall back to
* GetSystemTimeAsFileTime irrespective of why the failure occurred.
*/
pg_get_system_time = &GetSystemTimeAsFileTime;
}
- (*pg_get_system_time)(lpSystemTimeAsFileTime);
+ (*pg_get_system_time) (lpSystemTimeAsFileTime);
}
/*
@@ -107,13 +107,13 @@ gettimeofday(struct timeval * tp, struct timezone * tzp)
FILETIME file_time;
ULARGE_INTEGER ularge;
- (*pg_get_system_time)(&file_time);
+ (*pg_get_system_time) (&file_time);
ularge.LowPart = file_time.dwLowDateTime;
ularge.HighPart = file_time.dwHighDateTime;
tp->tv_sec = (long) ((ularge.QuadPart - epoch) / FILETIME_UNITS_PER_SEC);
tp->tv_usec = (long) (((ularge.QuadPart - epoch) % FILETIME_UNITS_PER_SEC)
- / FILETIME_UNITS_PER_USEC);
+ / FILETIME_UNITS_PER_USEC);
return 0;
}
diff --git a/src/port/pg_crc32c_choose.c b/src/port/pg_crc32c_choose.c
index ba0d1670f8..5a297ae30c 100644
--- a/src/port/pg_crc32c_choose.c
+++ b/src/port/pg_crc32c_choose.c
@@ -42,7 +42,7 @@ pg_crc32c_sse42_available(void)
#error cpuid instruction not available
#endif
- return (exx[2] & (1 << 20)) != 0; /* SSE 4.2 */
+ return (exx[2] & (1 << 20)) != 0; /* SSE 4.2 */
}
/*
@@ -60,4 +60,4 @@ pg_comp_crc32c_choose(pg_crc32c crc, const void *data, size_t len)
return pg_comp_crc32c(crc, data, len);
}
-pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len) = pg_comp_crc32c_choose;
+pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len) = pg_comp_crc32c_choose;
diff --git a/src/port/pg_crc32c_sse42.c b/src/port/pg_crc32c_sse42.c
index a22a9dd78b..150d4cb15b 100644
--- a/src/port/pg_crc32c_sse42.c
+++ b/src/port/pg_crc32c_sse42.c
@@ -45,6 +45,7 @@ pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len)
p += 4;
}
#else
+
/*
* Process four bytes at a time. (The eight byte instruction is not
* available on the 32-bit x86 architecture).
@@ -54,7 +55,7 @@ pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len)
crc = _mm_crc32_u32(crc, *((const unsigned int *) p));
p += 4;
}
-#endif /* __x86_64__ */
+#endif /* __x86_64__ */
/* Process any remaining bytes one at a time. */
while (p < pend)
diff --git a/src/port/win32setlocale.c b/src/port/win32setlocale.c
index f253967c07..ca9d590697 100644
--- a/src/port/win32setlocale.c
+++ b/src/port/win32setlocale.c
@@ -40,9 +40,9 @@ struct locale_map
{
/*
* String in locale name to replace. Can be a single string (end is NULL),
- * or separate start and end strings. If two strings are given, the
- * locale name must contain both of them, and everything between them
- * is replaced. This is used for a poor-man's regexp search, allowing
+ * or separate start and end strings. If two strings are given, the locale
+ * name must contain both of them, and everything between them is
+ * replaced. This is used for a poor-man's regexp search, allowing
* replacement of "start.*end".
*/
const char *locale_name_start;
@@ -104,7 +104,7 @@ static const struct locale_map locale_map_result[] = {
#define MAX_LOCALE_NAME_LEN 100
static const char *
-map_locale(const struct locale_map *map, const char *locale)
+map_locale(const struct locale_map * map, const char *locale)
{
static char aliasbuf[MAX_LOCALE_NAME_LEN];
int i;
diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c
index f9ba4132e7..44a5cb0277 100644
--- a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c
+++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c
@@ -63,8 +63,8 @@ Datum
get_altertable_subcmdtypes(PG_FUNCTION_ARGS)
{
CollectedCommand *cmd = (CollectedCommand *) PG_GETARG_POINTER(0);
- ArrayBuildState *astate = NULL;
- ListCell *cell;
+ ArrayBuildState *astate = NULL;
+ ListCell *cell;
if (cmd->type != SCT_AlterTable)
elog(ERROR, "command is not ALTER TABLE");
@@ -72,8 +72,8 @@ get_altertable_subcmdtypes(PG_FUNCTION_ARGS)
foreach(cell, cmd->d.alterTable.subcmds)
{
CollectedATSubcmd *sub = lfirst(cell);
- AlterTableCmd *subcmd = (AlterTableCmd *) sub->parsetree;
- const char *strtype;
+ AlterTableCmd *subcmd = (AlterTableCmd *) sub->parsetree;
+ const char *strtype;
Assert(IsA(subcmd, AlterTableCmd));
diff --git a/src/test/modules/test_rls_hooks/test_rls_hooks.c b/src/test/modules/test_rls_hooks/test_rls_hooks.c
index c2122e7981..61b62d55b4 100644
--- a/src/test/modules/test_rls_hooks/test_rls_hooks.c
+++ b/src/test/modules/test_rls_hooks/test_rls_hooks.c
@@ -35,11 +35,12 @@ PG_MODULE_MAGIC;
static row_security_policy_hook_type prev_row_security_policy_hook_permissive = NULL;
static row_security_policy_hook_type prev_row_security_policy_hook_restrictive = NULL;
-void _PG_init(void);
-void _PG_fini(void);
+void _PG_init(void);
+void _PG_fini(void);
/* Install hooks */
-void _PG_init(void)
+void
+_PG_init(void)
{
/* Save values for unload */
prev_row_security_policy_hook_permissive = row_security_policy_hook_permissive;
@@ -51,7 +52,8 @@ void _PG_init(void)
}
/* Uninstall hooks */
-void _PG_fini(void)
+void
+_PG_fini(void)
{
row_security_policy_hook_permissive = prev_row_security_policy_hook_permissive;
row_security_policy_hook_restrictive = prev_row_security_policy_hook_restrictive;
@@ -60,20 +62,20 @@ void _PG_fini(void)
/*
* Return permissive policies to be added
*/
-List*
+List *
test_rls_hooks_permissive(CmdType cmdtype, Relation relation)
{
- List *policies = NIL;
- RowSecurityPolicy *policy = palloc0(sizeof(RowSecurityPolicy));
- Datum role;
- FuncCall *n;
- Node *e;
- ColumnRef *c;
- ParseState *qual_pstate;
- RangeTblEntry *rte;
-
- if (strcmp(RelationGetRelationName(relation),"rls_test_permissive")
- && strcmp(RelationGetRelationName(relation),"rls_test_both"))
+ List *policies = NIL;
+ RowSecurityPolicy *policy = palloc0(sizeof(RowSecurityPolicy));
+ Datum role;
+ FuncCall *n;
+ Node *e;
+ ColumnRef *c;
+ ParseState *qual_pstate;
+ RangeTblEntry *rte;
+
+ if (strcmp(RelationGetRelationName(relation), "rls_test_permissive")
+ && strcmp(RelationGetRelationName(relation), "rls_test_both"))
return NIL;
qual_pstate = make_parsestate(NULL);
@@ -88,11 +90,11 @@ test_rls_hooks_permissive(CmdType cmdtype, Relation relation)
policy->policy_id = InvalidOid;
policy->polcmd = '*';
policy->roles = construct_array(&role, 1, OIDOID, sizeof(Oid), true, 'i');
+
/*
- policy->qual = (Expr *) makeConst(BOOLOID, -1, InvalidOid,
- sizeof(bool), BoolGetDatum(true),
- false, true);
- */
+ * policy->qual = (Expr *) makeConst(BOOLOID, -1, InvalidOid,
+ * sizeof(bool), BoolGetDatum(true), false, true);
+ */
n = makeFuncCall(list_make2(makeString("pg_catalog"),
makeString("current_user")), NIL, 0);
@@ -101,11 +103,11 @@ test_rls_hooks_permissive(CmdType cmdtype, Relation relation)
c->fields = list_make1(makeString("username"));
c->location = 0;
- e = (Node*) makeSimpleA_Expr(AEXPR_OP, "=", (Node*) n, (Node*) c, 0);
+ e = (Node *) makeSimpleA_Expr(AEXPR_OP, "=", (Node *) n, (Node *) c, 0);
- policy->qual = (Expr*) transformWhereClause(qual_pstate, copyObject(e),
- EXPR_KIND_WHERE,
- "POLICY");
+ policy->qual = (Expr *) transformWhereClause(qual_pstate, copyObject(e),
+ EXPR_KIND_WHERE,
+ "POLICY");
policy->with_check_qual = copyObject(policy->qual);
policy->hassublinks = false;
@@ -118,21 +120,21 @@ test_rls_hooks_permissive(CmdType cmdtype, Relation relation)
/*
* Return restrictive policies to be added
*/
-List*
+List *
test_rls_hooks_restrictive(CmdType cmdtype, Relation relation)
{
- List *policies = NIL;
- RowSecurityPolicy *policy = palloc0(sizeof(RowSecurityPolicy));
- Datum role;
- FuncCall *n;
- Node *e;
- ColumnRef *c;
- ParseState *qual_pstate;
- RangeTblEntry *rte;
-
-
- if (strcmp(RelationGetRelationName(relation),"rls_test_restrictive")
- && strcmp(RelationGetRelationName(relation),"rls_test_both"))
+ List *policies = NIL;
+ RowSecurityPolicy *policy = palloc0(sizeof(RowSecurityPolicy));
+ Datum role;
+ FuncCall *n;
+ Node *e;
+ ColumnRef *c;
+ ParseState *qual_pstate;
+ RangeTblEntry *rte;
+
+
+ if (strcmp(RelationGetRelationName(relation), "rls_test_restrictive")
+ && strcmp(RelationGetRelationName(relation), "rls_test_both"))
return NIL;
qual_pstate = make_parsestate(NULL);
@@ -155,11 +157,11 @@ test_rls_hooks_restrictive(CmdType cmdtype, Relation relation)
c->fields = list_make1(makeString("supervisor"));
c->location = 0;
- e = (Node*) makeSimpleA_Expr(AEXPR_OP, "=", (Node*) n, (Node*) c, 0);
+ e = (Node *) makeSimpleA_Expr(AEXPR_OP, "=", (Node *) n, (Node *) c, 0);
- policy->qual = (Expr*) transformWhereClause(qual_pstate, copyObject(e),
- EXPR_KIND_WHERE,
- "POLICY");
+ policy->qual = (Expr *) transformWhereClause(qual_pstate, copyObject(e),
+ EXPR_KIND_WHERE,
+ "POLICY");
policy->with_check_qual = copyObject(policy->qual);
policy->hassublinks = false;
diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm
index 003cd9a2cc..ef42366888 100644
--- a/src/test/perl/TestLib.pm
+++ b/src/test/perl/TestLib.pm
@@ -60,11 +60,15 @@ $ENV{PGPORT} = int($ENV{PGPORT}) % 65536;
sub tempdir
{
- return File::Temp::tempdir('tmp_testXXXX', DIR => $ENV{TESTDIR} || cwd(), CLEANUP => 1);
+ return File::Temp::tempdir(
+ 'tmp_testXXXX',
+ DIR => $ENV{TESTDIR} || cwd(),
+ CLEANUP => 1);
}
sub tempdir_short
{
+
# Use a separate temp dir outside the build tree for the
# Unix-domain socket, to avoid file name length issues.
return File::Temp::tempdir(CLEANUP => 1);
@@ -75,7 +79,7 @@ sub standard_initdb
my $pgdata = shift;
system_or_bail("initdb -D '$pgdata' -A trust -N >/dev/null");
system_or_bail("$ENV{top_builddir}/src/test/regress/pg_regress",
- '--config-auth', $pgdata);
+ '--config-auth', $pgdata);
}
my ($test_server_datadir, $test_server_logfile);
@@ -90,7 +94,7 @@ sub start_test_server
standard_initdb "$tempdir/pgdata";
$ret = system 'pg_ctl', '-D', "$tempdir/pgdata", '-s', '-w', '-l',
"$tempdir/logfile", '-o',
- "--fsync=off -k $tempdir_short --listen-addresses='' --log-statement=all",
+"--fsync=off -k $tempdir_short --listen-addresses='' --log-statement=all",
'start';
if ($ret != 0)
@@ -185,7 +189,8 @@ sub program_options_handling_ok
{
my ($cmd) = @_;
my ($stdout, $stderr);
- my $result = run [ $cmd, '--not-a-valid-option' ], '>', \$stdout, '2>', \$stderr;
+ my $result = run [ $cmd, '--not-a-valid-option' ], '>', \$stdout, '2>',
+ \$stderr;
ok(!$result, "$cmd with invalid option nonzero exit code");
isnt($stderr, '', "$cmd with invalid option prints error message");
}
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index 2df5c1b5d6..cc260169a4 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -1139,15 +1139,15 @@ spawn_process(const char *cmdline)
/* in parent */
return pid;
#else
- PROCESS_INFORMATION pi;
- char *cmdline2;
- HANDLE restrictedToken;
+ PROCESS_INFORMATION pi;
+ char *cmdline2;
+ HANDLE restrictedToken;
memset(&pi, 0, sizeof(pi));
cmdline2 = psprintf("cmd /c \"%s\"", cmdline);
- if((restrictedToken =
- CreateRestrictedProcess(cmdline2, &pi, progname)) == 0)
+ if ((restrictedToken =
+ CreateRestrictedProcess(cmdline2, &pi, progname)) == 0)
exit(2);
CloseHandle(pi.hThread);
@@ -1973,8 +1973,9 @@ help(void)
printf(_(" --schedule=FILE use test ordering schedule from FILE\n"));
printf(_(" (can be used multiple times to concatenate)\n"));
printf(_(" --temp-instance=DIR create a temporary instance in DIR\n"));
- printf(_(" --use-existing use an existing installation\n")); // XXX
- printf(_("\n"));
+ printf(_(" --use-existing use an existing installation\n"));
+ //XXX
+ printf(_("\n"));
printf(_("Options for \"temp-instance\" mode:\n"));
printf(_(" --no-locale use C locale\n"));
printf(_(" --port=PORT start postmaster on PORT\n"));
@@ -2446,8 +2447,8 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
/*
* If there were no errors, remove the temp instance immediately to
- * conserve disk space. (If there were errors, we leave the instance
- * in place for possible manual investigation.)
+ * conserve disk space. (If there were errors, we leave the instance in
+ * place for possible manual investigation.)
*/
if (temp_instance && fail_count == 0 && fail_ignore_count == 0)
{
diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index d68c90cd9b..bd31a3d382 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -1,8 +1,8 @@
/*------------------------------------------------------------------------
*
* regress.c
- * Code for various C-language functions defined as part of the
- * regression tests.
+ * Code for various C-language functions defined as part of the
+ * regression tests.
*
* This code is released under the terms of the PostgreSQL License.
*
@@ -911,14 +911,14 @@ test_atomic_flag(void)
pg_atomic_clear_flag(&flag);
}
-#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
+#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
static void
test_atomic_uint32(void)
{
pg_atomic_uint32 var;
- uint32 expected;
- int i;
+ uint32 expected;
+ int i;
pg_atomic_init_u32(&var, 0);
@@ -955,7 +955,7 @@ test_atomic_uint32(void)
if (pg_atomic_fetch_add_u32(&var, INT_MAX) != INT_MAX)
elog(ERROR, "pg_atomic_add_fetch_u32() #3 wrong");
- pg_atomic_fetch_add_u32(&var, 1); /* top up to UINT_MAX */
+ pg_atomic_fetch_add_u32(&var, 1); /* top up to UINT_MAX */
if (pg_atomic_read_u32(&var) != UINT_MAX)
elog(ERROR, "atomic_read_u32() #2 wrong");
@@ -963,7 +963,7 @@ test_atomic_uint32(void)
if (pg_atomic_fetch_sub_u32(&var, INT_MAX) != UINT_MAX)
elog(ERROR, "pg_atomic_fetch_sub_u32() #2 wrong");
- if (pg_atomic_read_u32(&var) != (uint32)INT_MAX + 1)
+ if (pg_atomic_read_u32(&var) != (uint32) INT_MAX + 1)
elog(ERROR, "atomic_read_u32() #3 wrong: %u", pg_atomic_read_u32(&var));
expected = pg_atomic_sub_fetch_u32(&var, INT_MAX);
@@ -1018,8 +1018,8 @@ static void
test_atomic_uint64(void)
{
pg_atomic_uint64 var;
- uint64 expected;
- int i;
+ uint64 expected;
+ int i;
pg_atomic_init_u64(&var, 0);
@@ -1083,13 +1083,13 @@ test_atomic_uint64(void)
elog(ERROR, "pg_atomic_fetch_and_u64() #1 wrong");
if (pg_atomic_fetch_and_u64(&var, ~1) != 1)
- elog(ERROR, "pg_atomic_fetch_and_u64() #2 wrong: is "UINT64_FORMAT,
+ elog(ERROR, "pg_atomic_fetch_and_u64() #2 wrong: is " UINT64_FORMAT,
pg_atomic_read_u64(&var));
/* no bits set anymore */
if (pg_atomic_fetch_and_u64(&var, ~0) != 0)
elog(ERROR, "pg_atomic_fetch_and_u64() #3 wrong");
}
-#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
+#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
PG_FUNCTION_INFO_V1(test_atomic_ops);
diff --git a/src/test/ssl/ServerSetup.pm b/src/test/ssl/ServerSetup.pm
index fda3afebc1..4ce4a69e74 100644
--- a/src/test/ssl/ServerSetup.pm
+++ b/src/test/ssl/ServerSetup.pm
@@ -38,74 +38,81 @@ sub copy_files
foreach my $orig_file (@orig_files)
{
my $base_file = basename($orig_file);
- copy($orig_file, "$dest/$base_file") or die "Could not copy $orig_file to $dest";
+ copy($orig_file, "$dest/$base_file")
+ or die "Could not copy $orig_file to $dest";
}
}
sub configure_test_server_for_ssl
{
- my $tempdir = $_[0];
+ my $tempdir = $_[0];
- # Create test users and databases
- psql 'postgres', "CREATE USER ssltestuser";
- psql 'postgres', "CREATE USER anotheruser";
- psql 'postgres', "CREATE DATABASE trustdb";
- psql 'postgres', "CREATE DATABASE certdb";
+ # Create test users and databases
+ psql 'postgres', "CREATE USER ssltestuser";
+ psql 'postgres', "CREATE USER anotheruser";
+ psql 'postgres', "CREATE DATABASE trustdb";
+ psql 'postgres', "CREATE DATABASE certdb";
- # enable logging etc.
- open CONF, ">>$tempdir/pgdata/postgresql.conf";
- print CONF "fsync=off\n";
- print CONF "log_connections=on\n";
- print CONF "log_hostname=on\n";
- print CONF "log_statement=all\n";
+ # enable logging etc.
+ open CONF, ">>$tempdir/pgdata/postgresql.conf";
+ print CONF "fsync=off\n";
+ print CONF "log_connections=on\n";
+ print CONF "log_hostname=on\n";
+ print CONF "log_statement=all\n";
- # enable SSL and set up server key
- print CONF "include 'sslconfig.conf'";
+ # enable SSL and set up server key
+ print CONF "include 'sslconfig.conf'";
- close CONF;
+ close CONF;
- # Copy all server certificates and keys, and client root cert, to the data dir
- copy_files("ssl/server-*.crt", "$tempdir/pgdata");
- copy_files("ssl/server-*.key", "$tempdir/pgdata");
- system_or_bail "chmod 0600 '$tempdir'/pgdata/server-*.key";
- copy_files("ssl/root+client_ca.crt", "$tempdir/pgdata");
- copy_files("ssl/root+client.crl", "$tempdir/pgdata");
+# Copy all server certificates and keys, and client root cert, to the data dir
+ copy_files("ssl/server-*.crt", "$tempdir/pgdata");
+ copy_files("ssl/server-*.key", "$tempdir/pgdata");
+ system_or_bail "chmod 0600 '$tempdir'/pgdata/server-*.key";
+ copy_files("ssl/root+client_ca.crt", "$tempdir/pgdata");
+ copy_files("ssl/root+client.crl", "$tempdir/pgdata");
# Only accept SSL connections from localhost. Our tests don't depend on this
# but seems best to keep it as narrow as possible for security reasons.
#
# When connecting to certdb, also check the client certificate.
- open HBA, ">$tempdir/pgdata/pg_hba.conf";
- print HBA "# TYPE DATABASE USER ADDRESS METHOD\n";
- print HBA "hostssl trustdb ssltestuser 127.0.0.1/32 trust\n";
- print HBA "hostssl trustdb ssltestuser ::1/128 trust\n";
- print HBA "hostssl certdb ssltestuser 127.0.0.1/32 cert\n";
- print HBA "hostssl certdb ssltestuser ::1/128 cert\n";
- close HBA;
+ open HBA, ">$tempdir/pgdata/pg_hba.conf";
+ print HBA
+"# TYPE DATABASE USER ADDRESS METHOD\n";
+ print HBA
+"hostssl trustdb ssltestuser 127.0.0.1/32 trust\n";
+ print HBA
+"hostssl trustdb ssltestuser ::1/128 trust\n";
+ print HBA
+"hostssl certdb ssltestuser 127.0.0.1/32 cert\n";
+ print HBA
+"hostssl certdb ssltestuser ::1/128 cert\n";
+ close HBA;
}
# Change the configuration to use given server cert file, and restart
# the server so that the configuration takes effect.
sub switch_server_cert
{
- my $tempdir = $_[0];
- my $certfile = $_[1];
-
- diag "Restarting server with certfile \"$certfile\"...";
-
- open SSLCONF, ">$tempdir/pgdata/sslconfig.conf";
- print SSLCONF "ssl=on\n";
- print SSLCONF "ssl_ca_file='root+client_ca.crt'\n";
- print SSLCONF "ssl_cert_file='$certfile.crt'\n";
- print SSLCONF "ssl_key_file='$certfile.key'\n";
- print SSLCONF "ssl_crl_file='root+client.crl'\n";
- close SSLCONF;
-
- # Stop and restart server to reload the new config. We cannot use
- # restart_test_server() because that overrides listen_addresses to only all
- # Unix domain socket connections.
-
- system_or_bail 'pg_ctl', 'stop', '-s', '-D', "$tempdir/pgdata", '-w';
- system_or_bail 'pg_ctl', 'start', '-s', '-D', "$tempdir/pgdata", '-w', '-l',
- "$tempdir/logfile";
+ my $tempdir = $_[0];
+ my $certfile = $_[1];
+
+ diag "Restarting server with certfile \"$certfile\"...";
+
+ open SSLCONF, ">$tempdir/pgdata/sslconfig.conf";
+ print SSLCONF "ssl=on\n";
+ print SSLCONF "ssl_ca_file='root+client_ca.crt'\n";
+ print SSLCONF "ssl_cert_file='$certfile.crt'\n";
+ print SSLCONF "ssl_key_file='$certfile.key'\n";
+ print SSLCONF "ssl_crl_file='root+client.crl'\n";
+ close SSLCONF;
+
+ # Stop and restart server to reload the new config. We cannot use
+ # restart_test_server() because that overrides listen_addresses to only all
+ # Unix domain socket connections.
+
+ system_or_bail 'pg_ctl', 'stop', '-s', '-D', "$tempdir/pgdata", '-w';
+ system_or_bail 'pg_ctl', 'start', '-s', '-D', "$tempdir/pgdata", '-w',
+ '-l',
+ "$tempdir/logfile";
}
diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl
index b492a56688..926b529198 100644
--- a/src/test/ssl/t/001_ssltests.pl
+++ b/src/test/ssl/t/001_ssltests.pl
@@ -23,9 +23,10 @@ BEGIN
# This is the hostname used to connect to the server. This cannot be a
# hostname, because the server certificate is always for the domain
# postgresql-ssl-regression.test.
-my $SERVERHOSTADDR='127.0.0.1';
+my $SERVERHOSTADDR = '127.0.0.1';
my $tempdir = TestLib::tempdir;
+
#my $tempdir = "tmp_check";
@@ -33,17 +34,17 @@ my $tempdir = TestLib::tempdir;
my $common_connstr;
-sub run_test_psql {
- my $connstr = $_[0];
+sub run_test_psql
+{
+ my $connstr = $_[0];
my $logstring = $_[1];
- my $cmd = [ 'psql',
- '-A', '-t',
- '-c', "SELECT 'connected with $connstr'",
- '-d', "$connstr"
- ];
+ my $cmd = [
+ 'psql', '-A', '-t', '-c', "SELECT 'connected with $connstr'",
+ '-d', "$connstr" ];
- open CLIENTLOG, ">>$tempdir/client-log" or die "Could not open client-log file";
+ open CLIENTLOG, ">>$tempdir/client-log"
+ or die "Could not open client-log file";
print CLIENTLOG "\n# Running test: $connstr $logstring\n";
close CLIENTLOG;
@@ -57,14 +58,17 @@ sub run_test_psql {
# which also contains a libpq connection string.
#
# The second argument is a hostname to connect to.
-sub test_connect_ok {
+sub test_connect_ok
+{
my $connstr = $_[0];
- my $result = run_test_psql("$common_connstr $connstr", "(should succeed)");
+ my $result =
+ run_test_psql("$common_connstr $connstr", "(should succeed)");
ok($result, $connstr);
}
-sub test_connect_fails {
+sub test_connect_fails
+{
my $connstr = $_[0];
my $result = run_test_psql("$common_connstr $connstr", "(should fail)");
@@ -91,7 +95,8 @@ switch_server_cert($tempdir, 'server-cn-only');
diag "running client tests...";
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
# The server should not accept non-SSL connections
diag "test that the server doesn't accept non-SSL connections";
@@ -100,7 +105,7 @@ test_connect_fails("sslmode=disable");
# Try without a root cert. In sslmode=require, this should work. In verify-ca
# or verify-full mode it should fail
diag "connect without server root cert";
-test_connect_ok ("sslrootcert=invalid sslmode=require");
+test_connect_ok("sslrootcert=invalid sslmode=require");
test_connect_fails("sslrootcert=invalid sslmode=verify-ca");
test_connect_fails("sslrootcert=invalid sslmode=verify-full");
@@ -118,42 +123,50 @@ test_connect_fails("sslrootcert=ssl/server_ca.crt sslmode=verify-ca");
# And finally, with the correct root cert.
diag "connect with correct server CA cert file";
-test_connect_ok ("sslrootcert=ssl/root+server_ca.crt sslmode=require");
-test_connect_ok ("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca");
-test_connect_ok ("sslrootcert=ssl/root+server_ca.crt sslmode=verify-full");
+test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=require");
+test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca");
+test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=verify-full");
# Test with cert root file that contains two certificates. The client should
# be able to pick the right one, regardless of the order in the file.
-test_connect_ok ("sslrootcert=ssl/both-cas-1.crt sslmode=verify-ca");
-test_connect_ok ("sslrootcert=ssl/both-cas-2.crt sslmode=verify-ca");
+test_connect_ok("sslrootcert=ssl/both-cas-1.crt sslmode=verify-ca");
+test_connect_ok("sslrootcert=ssl/both-cas-2.crt sslmode=verify-ca");
diag "testing sslcrl option with a non-revoked cert";
# Invalid CRL filename is the same as no CRL, succeeds
-test_connect_ok ("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=invalid");
+test_connect_ok(
+ "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=invalid");
+
# A CRL belonging to a different CA is not accepted, fails
-test_connect_fails("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/client.crl");
+test_connect_fails(
+"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/client.crl");
+
# With the correct CRL, succeeds (this cert is not revoked)
-test_connect_ok ("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl");
+test_connect_ok(
+"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl"
+);
# Check that connecting with verify-full fails, when the hostname doesn't
# match the hostname in the server's certificate.
diag "test mismatch between hostname and server certificate";
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
-test_connect_ok ("sslmode=require host=wronghost.test");
-test_connect_ok ("sslmode=verify-ca host=wronghost.test");
+test_connect_ok("sslmode=require host=wronghost.test");
+test_connect_ok("sslmode=verify-ca host=wronghost.test");
test_connect_fails("sslmode=verify-full host=wronghost.test");
# Test Subject Alternative Names.
switch_server_cert($tempdir, 'server-multiple-alt-names');
diag "test hostname matching with X509 Subject Alternative Names";
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
-test_connect_ok ("host=dns1.alt-name.pg-ssltest.test");
-test_connect_ok ("host=dns2.alt-name.pg-ssltest.test");
-test_connect_ok ("host=foo.wildcard.pg-ssltest.test");
+test_connect_ok("host=dns1.alt-name.pg-ssltest.test");
+test_connect_ok("host=dns2.alt-name.pg-ssltest.test");
+test_connect_ok("host=foo.wildcard.pg-ssltest.test");
test_connect_fails("host=wronghost.alt-name.pg-ssltest.test");
test_connect_fails("host=deep.subdomain.wildcard.pg-ssltest.test");
@@ -163,9 +176,10 @@ test_connect_fails("host=deep.subdomain.wildcard.pg-ssltest.test");
switch_server_cert($tempdir, 'server-single-alt-name');
diag "test hostname matching with a single X509 Subject Alternative Name";
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
-test_connect_ok ("host=single.alt-name.pg-ssltest.test");
+test_connect_ok("host=single.alt-name.pg-ssltest.test");
test_connect_fails("host=wronghost.alt-name.pg-ssltest.test");
test_connect_fails("host=deep.subdomain.wildcard.pg-ssltest.test");
@@ -175,48 +189,58 @@ test_connect_fails("host=deep.subdomain.wildcard.pg-ssltest.test");
switch_server_cert($tempdir, 'server-cn-and-alt-names');
diag "test certificate with both a CN and SANs";
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
-test_connect_ok ("host=dns1.alt-name.pg-ssltest.test");
-test_connect_ok ("host=dns2.alt-name.pg-ssltest.test");
+test_connect_ok("host=dns1.alt-name.pg-ssltest.test");
+test_connect_ok("host=dns2.alt-name.pg-ssltest.test");
test_connect_fails("host=common-name.pg-ssltest.test");
# Finally, test a server certificate that has no CN or SANs. Of course, that's
# not a very sensible certificate, but libpq should handle it gracefully.
switch_server_cert($tempdir, 'server-no-names');
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR";
-test_connect_ok ("sslmode=verify-ca host=common-name.pg-ssltest.test");
+test_connect_ok("sslmode=verify-ca host=common-name.pg-ssltest.test");
test_connect_fails("sslmode=verify-full host=common-name.pg-ssltest.test");
# Test that the CRL works
diag "Testing client-side CRL";
switch_server_cert($tempdir, 'server-revoked');
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
# Without the CRL, succeeds. With it, fails.
-test_connect_ok ("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca");
-test_connect_fails("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl");
+test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca");
+test_connect_fails(
+"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl"
+);
### Part 2. Server-side tests.
###
### Test certificate authorization.
diag "Testing certificate authorization...";
-$common_connstr="sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR";
+$common_connstr =
+"sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR";
# no client cert
test_connect_fails("user=ssltestuser sslcert=invalid");
# correct client cert
-test_connect_ok ("user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client.key");
+test_connect_ok(
+ "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client.key");
# client cert belonging to another user
-test_connect_fails("user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client.key");
+test_connect_fails(
+ "user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client.key");
# revoked client cert
-test_connect_fails("user=ssltestuser sslcert=ssl/client-revoked.crt sslkey=ssl/client-revoked.key");
+test_connect_fails(
+"user=ssltestuser sslcert=ssl/client-revoked.crt sslkey=ssl/client-revoked.key"
+);
# All done! Save the log, before the temporary installation is deleted
diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm
index b617835c0c..b592f997f6 100644
--- a/src/tools/msvc/Install.pm
+++ b/src/tools/msvc/Install.pm
@@ -306,6 +306,7 @@ sub CopySolutionOutput
}
else # 'StaticLibrary'
{
+
# Static lib, such as libpgport, only used internally
# during build, don't install.
next;
@@ -438,6 +439,7 @@ sub CopyContribFiles
opendir($D, $subdir) || croak "Could not opendir on $subdir!\n";
while (my $d = readdir($D))
{
+
# These configuration-based exclusions must match vcregress.pl
next if ($d eq "uuid-ossp" && !defined($config->{uuid}));
next if ($d eq "sslinfo" && !defined($config->{openssl}));
@@ -463,7 +465,7 @@ sub CopySubdirFiles
return if ($module =~ /^\./);
return unless (-f "$subdir/$module/Makefile");
return
- if ($insttype eq "client" && !grep { $_ eq $module } @client_contribs);
+ if ($insttype eq "client" && !grep { $_ eq $module } @client_contribs);
my $mf = read_file("$subdir/$module/Makefile");
$mf =~ s{\\\r?\n}{}g;
@@ -480,18 +482,17 @@ sub CopySubdirFiles
foreach my $f (split /\s+/, $flist)
{
- lcopy(
- "$subdir/$module/$f.control",
- "$target/share/extension/$f.control"
- ) || croak("Could not copy file $f.control in contrib $module");
- print '.';
+ lcopy("$subdir/$module/$f.control",
+ "$target/share/extension/$f.control")
+ || croak("Could not copy file $f.control in contrib $module");
+ print '.';
}
}
$flist = '';
if ($mf =~ /^DATA_built\s*=\s*(.*)$/m) { $flist .= $1 }
if ($mf =~ /^DATA\s*=\s*(.*)$/m) { $flist .= " $1" }
- $flist =~ s/^\s*//; # Remove leading spaces if we had only DATA_built
+ $flist =~ s/^\s*//; # Remove leading spaces if we had only DATA_built
if ($flist ne '')
{
@@ -500,9 +501,9 @@ sub CopySubdirFiles
foreach my $f (split /\s+/, $flist)
{
lcopy("$subdir/$module/$f",
- "$target/share/$moduledir/" . basename($f))
- || croak("Could not copy file $f in contrib $module");
- print '.';
+ "$target/share/$moduledir/" . basename($f))
+ || croak("Could not copy file $f in contrib $module");
+ print '.';
}
}
@@ -533,8 +534,7 @@ sub CopySubdirFiles
if ($module eq 'spi');
foreach my $f (split /\s+/, $flist)
{
- lcopy("$subdir/$module/$f",
- "$target/doc/$moduledir/$f")
+ lcopy("$subdir/$module/$f", "$target/doc/$moduledir/$f")
|| croak("Could not copy file $f in contrib $module");
print '.';
}
diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm
index be06898d1a..0603130c58 100644
--- a/src/tools/msvc/Mkvcbuild.pm
+++ b/src/tools/msvc/Mkvcbuild.pm
@@ -30,33 +30,30 @@ my $libpq;
# Set of variables for modules in contrib/ and src/test/modules/
my $contrib_defines = { 'refint' => 'REFINT_VERBOSE' };
-my @contrib_uselibpq =
- ('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo');
-my @contrib_uselibpgport = (
- 'oid2name',
- 'pg_standby',
- 'vacuumlo');
-my @contrib_uselibpgcommon = (
- 'oid2name',
- 'pg_standby',
- 'vacuumlo');
-my $contrib_extralibs = undef;
+my @contrib_uselibpq = ('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo');
+my @contrib_uselibpgport = ('oid2name', 'pg_standby', 'vacuumlo');
+my @contrib_uselibpgcommon = ('oid2name', 'pg_standby', 'vacuumlo');
+my $contrib_extralibs = undef;
my $contrib_extraincludes =
{ 'tsearch2' => ['contrib/tsearch2'], 'dblink' => ['src/backend'] };
my $contrib_extrasource = {
'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ],
- 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], };
+ 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], };
my @contrib_excludes = (
- 'commit_ts', 'hstore_plperl',
- 'hstore_plpython', 'intagg',
- 'ltree_plpython', 'pgcrypto',
- 'sepgsql');
+ 'commit_ts', 'hstore_plperl', 'hstore_plpython', 'intagg',
+ 'ltree_plpython', 'pgcrypto', 'sepgsql');
# Set of variables for frontend modules
my $frontend_defines = { 'initdb' => 'FRONTEND' };
my @frontend_uselibpq = ('pg_ctl', 'pg_upgrade', 'pgbench', 'psql');
-my @frontend_uselibpgport = ( 'pg_archivecleanup', 'pg_test_fsync', 'pg_test_timing', 'pg_upgrade', 'pg_xlogdump', 'pgbench' );
-my @frontend_uselibpgcommon = ( 'pg_archivecleanup', 'pg_test_fsync', 'pg_test_timing', 'pg_upgrade', 'pg_xlogdump', 'pgbench' );
+my @frontend_uselibpgport = (
+ 'pg_archivecleanup', 'pg_test_fsync',
+ 'pg_test_timing', 'pg_upgrade',
+ 'pg_xlogdump', 'pgbench');
+my @frontend_uselibpgcommon = (
+ 'pg_archivecleanup', 'pg_test_fsync',
+ 'pg_test_timing', 'pg_upgrade',
+ 'pg_xlogdump', 'pgbench');
my $frontend_extralibs = {
'initdb' => ['ws2_32.lib'],
'pg_restore' => ['ws2_32.lib'],
@@ -68,10 +65,10 @@ my $frontend_extraincludes = {
my $frontend_extrasource = {
'psql' => ['src/bin/psql/psqlscan.l'],
'pgbench' =>
- [ 'src/bin/pgbench/exprscan.l', 'src/bin/pgbench/exprparse.y' ],
-};
-my @frontend_excludes =
- ('pgevent', 'pg_basebackup', 'pg_rewind', 'pg_dump', 'pg_xlogdump', 'scripts');
+ [ 'src/bin/pgbench/exprscan.l', 'src/bin/pgbench/exprparse.y' ], };
+my @frontend_excludes = (
+ 'pgevent', 'pg_basebackup', 'pg_rewind', 'pg_dump',
+ 'pg_xlogdump', 'scripts');
sub mkvcbuild
{
@@ -104,15 +101,16 @@ sub mkvcbuild
}
else
{
- push(@pgportfiles, 'pg_crc32c_sb8.c')
+ push(@pgportfiles, 'pg_crc32c_sb8.c');
}
our @pgcommonallfiles = qw(
exec.c pg_lzcompress.c pgfnames.c psprintf.c relpath.c rmtree.c
string.c username.c wait_error.c);
- our @pgcommonfrontendfiles = (@pgcommonallfiles, qw(fe_memutils.c
- restricted_token.c));
+ our @pgcommonfrontendfiles = (
+ @pgcommonallfiles, qw(fe_memutils.c
+ restricted_token.c));
our @pgcommonbkndfiles = @pgcommonallfiles;
@@ -467,15 +465,16 @@ sub mkvcbuild
# ltree_plpython and hstore_plperl.
if ($solution->{options}->{python})
{
+
# Attempt to get python version and location.
# Assume python.exe in specified dir.
- my $pythonprog = "import sys;print(sys.prefix);" .
- "print(str(sys.version_info[0])+str(sys.version_info[1]))";
- my $prefixcmd = $solution->{options}->{python}
- . "\\python -c \"$pythonprog\"";
+ my $pythonprog = "import sys;print(sys.prefix);"
+ . "print(str(sys.version_info[0])+str(sys.version_info[1]))";
+ my $prefixcmd =
+ $solution->{options}->{python} . "\\python -c \"$pythonprog\"";
my $pyout = `$prefixcmd`;
die "Could not query for python version!\n" if $?;
- my ($pyprefix,$pyver) = split(/\r?\n/,$pyout);
+ my ($pyprefix, $pyver) = split(/\r?\n/, $pyout);
# Sometimes (always?) if python is not present, the execution
# appears to work, but gives no data...
@@ -490,16 +489,14 @@ sub mkvcbuild
$plpython->AddReference($postgres);
# Add transform modules dependent on plpython
- AddTransformModule('hstore_plpython' . $pymajorver,
- 'contrib/hstore_plpython',
- 'plpython' . $pymajorver,
- 'src/pl/plpython', 'hstore',
- 'contrib/hstore');
- AddTransformModule('ltree_plpython' . $pymajorver,
- 'contrib/ltree_plpython',
- 'plpython' . $pymajorver,
- 'src/pl/plpython', 'ltree',
- 'contrib/ltree');
+ AddTransformModule(
+ 'hstore_plpython' . $pymajorver, 'contrib/hstore_plpython',
+ 'plpython' . $pymajorver, 'src/pl/plpython',
+ 'hstore', 'contrib/hstore');
+ AddTransformModule(
+ 'ltree_plpython' . $pymajorver, 'contrib/ltree_plpython',
+ 'plpython' . $pymajorver, 'src/pl/plpython',
+ 'ltree', 'contrib/ltree');
}
if ($solution->{options}->{perl})
@@ -587,10 +584,10 @@ sub mkvcbuild
}
# Add transform module dependent on plperl
- my $hstore_plperl =
- AddTransformModule('hstore_plperl', 'contrib/hstore_plperl',
- 'plperl', 'src/pl/plperl',
- 'hstore', 'contrib/hstore');
+ my $hstore_plperl = AddTransformModule(
+ 'hstore_plperl', 'contrib/hstore_plperl',
+ 'plperl', 'src/pl/plperl',
+ 'hstore', 'contrib/hstore');
$hstore_plperl->AddDefine('PLPERL_HAVE_UID_GID');
}
@@ -670,7 +667,7 @@ sub mkvcbuild
$pg_xlogdump->AddDefine('FRONTEND');
foreach my $xf (glob('src/backend/access/rmgrdesc/*desc.c'))
{
- $pg_xlogdump->AddFile($xf)
+ $pg_xlogdump->AddFile($xf);
}
$pg_xlogdump->AddFile('src/backend/access/transam/xlogreader.c');
@@ -706,12 +703,12 @@ sub AddSimpleFrontend
# Add a simple transform module
sub AddTransformModule
{
- my $n = shift;
- my $n_src = shift;
- my $pl_proj_name = shift;
- my $pl_src = shift;
+ my $n = shift;
+ my $n_src = shift;
+ my $pl_proj_name = shift;
+ my $pl_src = shift;
my $transform_name = shift;
- my $transform_src = shift;
+ my $transform_src = shift;
my $transform_proj = undef;
foreach my $proj (@{ $solution->{projects}->{'contrib'} })
@@ -723,7 +720,7 @@ sub AddTransformModule
}
}
die "could not find base module $transform_name for transform module $n"
- if (!defined($transform_proj));
+ if (!defined($transform_proj));
my $pl_proj = undef;
foreach my $proj (@{ $solution->{projects}->{'PLs'} })
@@ -735,7 +732,7 @@ sub AddTransformModule
}
}
die "could not find PL $pl_proj_name for transform module $n"
- if (!defined($pl_proj));
+ if (!defined($pl_proj));
my $p = $solution->AddProject($n, 'dll', 'contrib', $n_src);
for my $file (glob("$n_src/*.c"))
@@ -748,7 +745,7 @@ sub AddTransformModule
$p->AddIncludeDir($pl_src);
$p->AddReference($pl_proj);
$p->AddIncludeDir($pl_proj->{includes});
- foreach my $pl_lib (@{$pl_proj->{libraries}})
+ foreach my $pl_lib (@{ $pl_proj->{libraries} })
{
$p->AddLibrary($pl_lib);
}
@@ -756,7 +753,7 @@ sub AddTransformModule
# Add base module dependencies
$p->AddIncludeDir($transform_src);
$p->AddIncludeDir($transform_proj->{includes});
- foreach my $trans_lib (@{$transform_proj->{libraries}})
+ foreach my $trans_lib (@{ $transform_proj->{libraries} })
{
$p->AddLibrary($trans_lib);
}
@@ -769,14 +766,13 @@ sub AddTransformModule
sub AddContrib
{
my $subdir = shift;
- my $n = shift;
- my $mf = Project::read_file("$subdir/$n/Makefile");
+ my $n = shift;
+ my $mf = Project::read_file("$subdir/$n/Makefile");
if ($mf =~ /^MODULE_big\s*=\s*(.*)$/mg)
{
my $dn = $1;
- my $proj =
- $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n");
+ my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n");
$proj->AddReference($postgres);
AdjustContribProj($proj);
}
@@ -794,8 +790,7 @@ sub AddContrib
}
elsif ($mf =~ /^PROGRAM\s*=\s*(.*)$/mg)
{
- my $proj =
- $solution->AddProject($1, 'exe', 'contrib', "$subdir/$n");
+ my $proj = $solution->AddProject($1, 'exe', 'contrib', "$subdir/$n");
AdjustContribProj($proj);
}
else
@@ -841,7 +836,7 @@ sub GenerateContribSqlFiles
print "Building $out from $in (contrib/$n)...\n";
my $cont = Project::read_file("contrib/$n/$in");
my $dn = $out;
- $dn =~ s/\.sql$//;
+ $dn =~ s/\.sql$//;
$cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g;
my $o;
open($o, ">contrib/$n/$out")
@@ -866,10 +861,11 @@ sub AdjustContribProj
sub AdjustFrontendProj
{
my $proj = shift;
- AdjustModule($proj, $frontend_defines, \@frontend_uselibpq,
- \@frontend_uselibpgport, \@frontend_uselibpgcommon,
- $frontend_extralibs,
- $frontend_extrasource, $frontend_extraincludes);
+ AdjustModule(
+ $proj, $frontend_defines,
+ \@frontend_uselibpq, \@frontend_uselibpgport,
+ \@frontend_uselibpgcommon, $frontend_extralibs,
+ $frontend_extrasource, $frontend_extraincludes);
}
sub AdjustModule
diff --git a/src/tools/msvc/Project.pm b/src/tools/msvc/Project.pm
index 362beb4218..4ce0941853 100644
--- a/src/tools/msvc/Project.pm
+++ b/src/tools/msvc/Project.pm
@@ -63,6 +63,7 @@ sub ReplaceFile
foreach my $file (keys %{ $self->{files} })
{
+
# Match complete filename
if ($filename =~ m!/!)
{
diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm
index db95afa14b..6b16e69b69 100644
--- a/src/tools/msvc/Solution.pm
+++ b/src/tools/msvc/Solution.pm
@@ -36,7 +36,7 @@ sub _new
$options->{float8byval} = ($bits == 64)
unless exists $options->{float8byval};
die "float8byval not permitted on 32 bit platforms"
- if $options->{float8byval} && $bits == 32;
+ if $options->{float8byval} && $bits == 32;
if ($options->{xml})
{
if (!($options->{xslt} && $options->{iconv}))
@@ -143,16 +143,13 @@ sub GenerateFiles
confess "Unable to parse configure.in for all variables!"
if ($self->{strver} eq '' || $self->{numver} eq '');
- if (IsNewer(
- "src/include/pg_config_os.h", "src/include/port/win32.h"))
+ if (IsNewer("src/include/pg_config_os.h", "src/include/port/win32.h"))
{
print "Copying pg_config_os.h...\n";
- copyFile("src/include/port/win32.h",
- "src/include/pg_config_os.h");
+ copyFile("src/include/port/win32.h", "src/include/pg_config_os.h");
}
- if (IsNewer(
- "src/include/pg_config.h", "src/include/pg_config.h.win32"))
+ if (IsNewer("src/include/pg_config.h", "src/include/pg_config.h.win32"))
{
print "Generating pg_config.h...\n";
open(I, "src/include/pg_config.h.win32")
@@ -165,7 +162,7 @@ sub GenerateFiles
{
s{PG_VERSION "[^"]+"}{PG_VERSION "$self->{strver}$extraver"};
s{PG_VERSION_NUM \d+}{PG_VERSION_NUM $self->{numver}};
- s{PG_VERSION_STR "[^"]+"}{__STRINGIFY(x) #x\n#define __STRINGIFY2(z) __STRINGIFY(z)\n#define PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, compiled by Visual C++ build " __STRINGIFY2(_MSC_VER) ", $bits-bit"};
+s{PG_VERSION_STR "[^"]+"}{__STRINGIFY(x) #x\n#define __STRINGIFY2(z) __STRINGIFY(z)\n#define PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, compiled by Visual C++ build " __STRINGIFY2(_MSC_VER) ", $bits-bit"};
print O;
}
print O "#define PG_MAJORVERSION \"$self->{majorver}\"\n";
@@ -177,10 +174,10 @@ sub GenerateFiles
if ($self->{options}->{asserts});
print O "#define USE_INTEGER_DATETIMES 1\n"
if ($self->{options}->{integer_datetimes});
- print O "#define USE_LDAP 1\n" if ($self->{options}->{ldap});
- print O "#define HAVE_LIBZ 1\n" if ($self->{options}->{zlib});
+ print O "#define USE_LDAP 1\n" if ($self->{options}->{ldap});
+ print O "#define HAVE_LIBZ 1\n" if ($self->{options}->{zlib});
print O "#define USE_OPENSSL 1\n" if ($self->{options}->{openssl});
- print O "#define ENABLE_NLS 1\n" if ($self->{options}->{nls});
+ print O "#define ENABLE_NLS 1\n" if ($self->{options}->{nls});
print O "#define BLCKSZ ", 1024 * $self->{options}->{blocksize}, "\n";
print O "#define RELSEG_SIZE ",
diff --git a/src/tools/msvc/VCBuildProject.pm b/src/tools/msvc/VCBuildProject.pm
index 3a24c4e52f..a8d75d88f3 100644
--- a/src/tools/msvc/VCBuildProject.pm
+++ b/src/tools/msvc/VCBuildProject.pm
@@ -112,7 +112,7 @@ EOF
my $of = $fileNameWithPath;
$of =~ s/\.y$/.c/;
$of =~
-s{^src/pl/plpgsql/src/gram.c$}{src/pl/plpgsql/src/pl_gram.c};
+ s{^src/pl/plpgsql/src/gram.c$}{src/pl/plpgsql/src/pl_gram.c};
print $f '>'
. $self->GenerateCustomTool(
'Running bison on ' . $fileNameWithPath,
diff --git a/src/tools/msvc/VSObjectFactory.pm b/src/tools/msvc/VSObjectFactory.pm
index b83af4026e..fee4684b21 100644
--- a/src/tools/msvc/VSObjectFactory.pm
+++ b/src/tools/msvc/VSObjectFactory.pm
@@ -92,11 +92,14 @@ sub CreateProject
sub DetermineVisualStudioVersion
{
+
# To determine version of Visual Studio we use nmake as it has
# existed for a long time and still exists in current Visual
# Studio versions.
my $output = `nmake /? 2>&1`;
- $? >> 8 == 0 or croak "Unable to determine Visual Studio version: The nmake command wasn't found.";
+ $? >> 8 == 0
+ or croak
+"Unable to determine Visual Studio version: The nmake command wasn't found.";
if ($output =~ /(\d+)\.(\d+)\.\d+(\.\d+)?$/m)
{
return _GetVisualStudioVersion($1, $2);
diff --git a/src/tools/msvc/config_default.pl b/src/tools/msvc/config_default.pl
index 0bee0c0e2d..b9f2ff41f7 100644
--- a/src/tools/msvc/config_default.pl
+++ b/src/tools/msvc/config_default.pl
@@ -7,24 +7,24 @@ our $config = {
# integer_datetimes=>1, # --enable-integer-datetimes - on is now default
# float4byval=>1, # --disable-float4-byval, on by default
- # float8byval=> $platformbits == 64, # --disable-float8-byval,
- # off by default on 32 bit platforms, on by default on 64 bit platforms
+ # float8byval=> $platformbits == 64, # --disable-float8-byval,
+ # off by default on 32 bit platforms, on by default on 64 bit platforms
- # blocksize => 8, # --with-blocksize, 8kB by default
- # wal_blocksize => 8, # --with-wal-blocksize, 8kB by default
- # wal_segsize => 16, # --with-wal-segsize, 16MB by default
- ldap => 1, # --with-ldap
- extraver => undef, # --with-extra-version=
- nls => undef, # --enable-nls=
- tcl => undef, # --with-tls=
- perl => undef, # --with-perl
- python => undef, # --with-python=
- openssl => undef, # --with-openssl=
- uuid => undef, # --with-ossp-uuid
- xml => undef, # --with-libxml=
- xslt => undef, # --with-libxslt=
- iconv => undef, # (not in configure, path to iconv)
- zlib => undef # --with-zlib=
+ # blocksize => 8, # --with-blocksize, 8kB by default
+ # wal_blocksize => 8, # --with-wal-blocksize, 8kB by default
+ # wal_segsize => 16, # --with-wal-segsize, 16MB by default
+ ldap => 1, # --with-ldap
+ extraver => undef, # --with-extra-version=
+ nls => undef, # --enable-nls=
+ tcl => undef, # --with-tls=
+ perl => undef, # --with-perl
+ python => undef, # --with-python=
+ openssl => undef, # --with-openssl=
+ uuid => undef, # --with-ossp-uuid
+ xml => undef, # --with-libxml=
+ xslt => undef, # --with-libxslt=
+ iconv => undef, # (not in configure, path to iconv)
+ zlib => undef # --with-zlib=
};
1;
diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl
index bfa8a3dc90..ddb628d154 100644
--- a/src/tools/msvc/vcregress.pl
+++ b/src/tools/msvc/vcregress.pl
@@ -15,7 +15,7 @@ my $startdir = getcwd();
chdir "../../.." if (-d "../../../src/tools/msvc");
-my $topdir = getcwd();
+my $topdir = getcwd();
my $tmp_installdir = "$topdir/tmp_install";
require 'src/tools/msvc/config_default.pl';
@@ -230,11 +230,11 @@ sub subdircheck
{
my $subdir = shift;
my $module = shift;
- my $mstat = 0;
+ my $mstat = 0;
- if ( ! -d "$module/sql" ||
- ! -d "$module/expected" ||
- ( ! -f "$module/GNUmakefile" && ! -f "$module/Makefile"))
+ if ( !-d "$module/sql"
+ || !-d "$module/expected"
+ || (!-f "$module/GNUmakefile" && !-f "$module/Makefile"))
{
return;
}
@@ -246,19 +246,17 @@ sub subdircheck
# Add some options for transform modules, see their respective
# Makefile for more details regarding Python-version specific
# dependencies.
- if ($module eq "hstore_plpython" ||
- $module eq "ltree_plpython")
+ if ( $module eq "hstore_plpython"
+ || $module eq "ltree_plpython")
{
die "Python not enabled in configuration"
- if !defined($config->{python});
+ if !defined($config->{python});
# Attempt to get python version and location.
# Assume python.exe in specified dir.
- my $pythonprog = "import sys;" .
- "print(str(sys.version_info[0]))";
- my $prefixcmd = $config->{python}
- . "\\python -c \"$pythonprog\"";
- my $pyver = `$prefixcmd`;
+ my $pythonprog = "import sys;" . "print(str(sys.version_info[0]))";
+ my $prefixcmd = $config->{python} . "\\python -c \"$pythonprog\"";
+ my $pyver = `$prefixcmd`;
die "Could not query for python version!\n" if $?;
chomp($pyver);
if ($pyver eq "2")
@@ -268,6 +266,7 @@ sub subdircheck
}
else
{
+
# disable tests on python3 for now.
chdir "..";
return;
@@ -275,10 +274,9 @@ sub subdircheck
}
- print
- "============================================================\n";
+ print "============================================================\n";
print "Checking $module\n";
- my @args = (
+ my @args = (
"${tmp_installdir}/bin/pg_regress",
"--bindir=${tmp_installdir}/bin",
"--dbname=contrib_regression", @opts, @tests);
@@ -295,11 +293,12 @@ sub contribcheck
chdir "$topdir/contrib";
foreach my $module (glob("*"))
{
+
# these configuration-based exclusions must match Install.pm
- next if ($module eq "uuid-ossp" && !defined($config->{uuid}));
- next if ($module eq "sslinfo" && !defined($config->{openssl}));
- next if ($module eq "xml2" && !defined($config->{xml}));
- next if ($module eq "hstore_plperl" && !defined($config->{perl}));
+ next if ($module eq "uuid-ossp" && !defined($config->{uuid}));
+ next if ($module eq "sslinfo" && !defined($config->{openssl}));
+ next if ($module eq "xml2" && !defined($config->{xml}));
+ next if ($module eq "hstore_plperl" && !defined($config->{perl}));
next if ($module eq "hstore_plpython" && !defined($config->{python}));
next if ($module eq "ltree_plpython" && !defined($config->{python}));
next if ($module eq "sepgsql");
@@ -412,6 +411,7 @@ sub fetchRegressOpts
$m =~ s{\\\r?\n}{}g;
if ($m =~ /^\s*REGRESS_OPTS\s*\+?=(.*)/m)
{
+
# Substitute known Makefile variables, then ignore options that retain
# an unhandled variable reference. Ignore anything that isn't an
# option starting with "--".
@@ -492,6 +492,6 @@ sub usage
{
print STDERR
"Usage: vcregress.pl ",
- " [schedule]\n";
+" [schedule]\n";
exit(1);
}
--
cgit v1.2.3